From ae5c4661ecdff608b6d09704092a1cebbc5bc2ef Mon Sep 17 00:00:00 2001 From: Adam Sawicki Date: Wed, 2 Jan 2019 10:23:35 +0100 Subject: [PATCH] Updated copyright headers after New Year. --- LICENSE.txt | 2 +- bin/VmaReplay_Release_vs2017.exe | Bin 214016 -> 243200 bytes bin/VulkanSample_Release_vs2017.exe | Bin 291328 -> 291328 bytes docs/html/vk__mem__alloc_8h_source.html | 2 +- src/Common.cpp | 22 ++++++++++++++++++++++ src/Common.h | 22 ++++++++++++++++++++++ src/Shaders/Shader.frag | 2 +- src/Shaders/Shader.vert | 2 +- src/Shaders/SparseBindingTest.comp | 2 +- src/SparseBindingTest.cpp | 22 ++++++++++++++++++++++ src/SparseBindingTest.h | 22 ++++++++++++++++++++++ src/Tests.cpp | 22 ++++++++++++++++++++++ src/Tests.h | 22 ++++++++++++++++++++++ src/VmaReplay/Common.cpp | 22 ++++++++++++++++++++++ src/VmaReplay/Common.h | 22 ++++++++++++++++++++++ src/VmaReplay/Constants.cpp | 22 ++++++++++++++++++++++ src/VmaReplay/Constants.h | 22 ++++++++++++++++++++++ src/VmaReplay/VmaReplay.cpp | 2 +- src/VmaReplay/VmaUsage.cpp | 22 ++++++++++++++++++++++ src/VmaReplay/VmaUsage.h | 22 ++++++++++++++++++++++ src/VmaUsage.cpp | 22 ++++++++++++++++++++++ src/VmaUsage.h | 22 ++++++++++++++++++++++ src/VulkanSample.cpp | 2 +- src/vk_mem_alloc.h | 2 +- 24 files changed, 316 insertions(+), 8 deletions(-) diff --git a/LICENSE.txt b/LICENSE.txt index dbfe253..67b0d01 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -1,4 +1,4 @@ -Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved. +Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/bin/VmaReplay_Release_vs2017.exe b/bin/VmaReplay_Release_vs2017.exe index 04f6b77adda85e731f4d40586888fffdf1439c2b..f614897d3d62d0502c7557445e79b2881252b498 100644 GIT binary patch delta 104854 zcmb@v3tW`N_dmWo3xXHeRar$vT@jPS3x*dE$wgUxR+q#S?-xunDlep9nwJM%LD#2M zYBH@XsjTc`QI=ww;uXB3mXTegK7Q&Ux=@zUb^q@(^DGy|KELnl_4oCXXXebDGc#w- zoH=vmvN`L5bG{47AH2EZtmOZg&8BoYd2isouQwm(`#<;MmT?avKJZ9T+8q83Nn6gp zrnCk8+bnG+zD47T$K8kT&%ZX~`R;wa*|;weZ+b-H=}nLPGHy1HH%+7O-S^MRpfU^% z|3w>8?FPf_ye5V(R`;ChORF|S7@A8VCd12;!O+T&gKM5=cldRe4E}72Ut+I!*BTAg z1g|g}Dkwv-(O~Y#Gm1aeMuTQF7z&e(hRnyhbZBF>BPJ+`48@Wm)&H?B?U`8$8Zi7N z6l6vM73g1&41!jJA-(s)*)y|d8Vq-r0{~3JWqe;_cS-FBXsr+|0Pzrf*Wg?4pADIN zX9`qaJJiz92^k(|&q18;)?ZQ+%AXV(u>E5FBLg6y&< z*!`kpsFXKKhST_mcqPT->f(;=h)31MU!{0hZ9L$o=Xcrtosu-Gkg+CfrOR)z@W2nG z%Ujr|fwTQoW(%4x{j-)8205hIo$SvbIjr&xl%>sDD;Z?<(a0RsTaG=gUCU!rn$DD} z^Vor=out$^*cVOv`?*WgfCn-s}^J^P?d zPifXtR@Ej+TIFWF+fI|ZEM=ay9i`RJu{YYDm##g`l(6;@>{+l{&b^QoB&#R26Aw!U zr^oImyN$B?v8DMP&eCHF={7mL=jgUrWvM6iM zb`R#Drs{|w*>jg)O7wBr)8dpvy`+uHVe_m5rJzjqvURXDaud61-PZK``RJG2;;g0Y zr--)FnfdIcE|F68N_MJCzt%gERu$&8bbEsM01B_JWUV4DNRQ5AUq?hp8S_}PuD#5w zS4f5w^{7Mr$f2IovR1GWU1PfRp9j66bkMxK)Hd5lf%)=^0Sj_f)bGh_OcBR*Ujfv)dl*B{nJ z&DF!+<5QzLD%=cz3iD^AtuP>UU^9F()PoguqR{Fv}XO=Q(!uEO6#vf7lu zS(T22;;ev#)bK?-HzL7an^jhc4pDTEQzFwL&9Y})WO_+*doNO>8#YZ_jNq#9Fo4>;}{E)+Y*r}E} z6E0+bjczrqS&xjwfKR!>Pw_VzewIx=j+<^@ z9%%(6aSTCB)2n#}9qL_Xb&)k$9Tx6T+sL^mvcQ{TG_0&VJvrVltBvd}mR*PZ<%Giv zf2;~Z7daCSFY18kqBah7JR~{zyE|Mq&T6jPFlkse$KsP;M_rU@4ieH&4# z$|d#E&!$NqMpAo_vN;@``cxApnRszYrqC#grBUapFjg}YCjQIa>Kl+LloL?iUMb-!&7Aytd`Ze zBq~{v8IT-X()1&!<<+Cql!oxiZ~pRnePaOq6*K|TVRfow%ucmklKOj+TAl)ywT~@M z$^A5|y^<8)D$4?{Ib4VR9SKLkwLtYS0*gA(pEj~OhM4B!C4p({LA|O4Agch;mLnz@ zwi;1l*hTds@6Ab;Et2TX5_DjhP^f533~FzkQ8`Rh^7>)LFH7?JT|X>k8NEsV*AM&C zH|YA|Ao{kre%MT3^Yz14`i5RVY@=`3nhQ(u2{*m}RnQ^?tOzuvKqLaqC=i7}a|-lC zpalhD5D2C~KLkQ35Ql(Cq0nH2%oMO8(2@el2(+Srj6iD&I1vb?Kq>+j3XDUb4Fx75 z(3S#I5eTC|Is)w|FdKpP6v#lJ0|n+H0CnI^%0!^!A_bu=ggOyuNiJ{K^7@L%mNp@2 zTv8f3F)msCR#q!%jNgNy6YF()p3i|8ITB7M?Xx1_R8J(UCCR1{SCZ9_O=}a8X-Mq^SOpG%mZ7kQHY|sb`b+5>C>^FX!YZ z$2Zf5DG8iX%*Sa$&7v-{S^{;ss2zx`BU+x|R#YDvs_jsIRfu{Kun@qsricrr3 z!_?#BBs3Xn1i@n@aCIa0^WQO|j-NjwdrKUy<1O4LFsH-a2{EHYGyQCrIDC)68X!RRA(Y(wtnDL1$$3&!1z z+Am zl&93SaFSl&{k{TaDsYM@Fq{e;O$D+HkOeDyfz6)aP~?FKlgy$=FVG)Mf|D@a=-N_i z?<4p70gz}R$o)#B4G1f$k!A=hS}{pwS%UQFWc8W32qvp66CZoRMzz#T1n$eMmdo!1>u&M!46KIzudWAJW}x zc{@GZ3J3_DDa$2Bqe0<0TFs~Y!-8b@BD3rn73L$a?VAtORYy1~$8pCZPmor5jfg-L zwsDcy$^A!^H~U_lnkT?-4;4MEA<;SYh${7|J|U=quFTpo?j3wS^HNVzyO6+ zvNi~R^&BlYAhZq;#sStkKqP`7)e|50o_-YZY>VTtsS4*qps4t<2Wu<-)!s%b{ub={ z#<=nM&pvKc2&$YYg2eqDb8o@hnM6%#gt;39IbSEpo{j6U=+^F_{AfAWjwDqe9l}Yk zH3NcJ4X^UrFdo}xrH|e(dQFsjZmx-v15#S~+{PQ$q_$zD^~mW$<~OUBTX#0bE!QoG zzWo!`4BEN`<3S+{k>r|IG4)Lcs=F}%yndLfNYG{V5A|-dwh%*Eoe+w7pn&RbyhhP5 zL`f92Uw-y_lhqO7NoujGk{l%`95M|%UT6 zmT;b@QjkC-atS9nh=P91AAi*ek(5S)sAs1jfk+_r>=g8aSBJ7kQ5uP}U5{RT(&rU%<7KOhqCDLzk6R)c$4@dEE3=j9@VUl`vLJYV> zg0GeRL`>@^dla>T;)i%Vj^2wUAD1gt=C!> zEw~^C&v_q8-wzp~j~7Btv|p=19MkxyAnoL@3Wv|4JYVQ=(XCWNU`(QvqrMd0!@EVw zXi9mFQfM+9tE$w8SXVhPjqrX~8TB+xBQ3==GVL2+ECEJUU(6wxaLULr{wN+b!ZSK=}%@kydp*G69A1HKY{1wOUITCc=rRARSAO8ou5H!#(}OGJTJb_3<-AE=2+@G0W( z*NEtCR({P)><}$eKb1|&EA4$AEFt-Y0{{xCHc*Jjg#!-BdL(H zPOJQ0uV*RpYo8*9I{!l5at>jY(dezE`NS<~es-;XHJ|j6%qEiS1j)TbaygMCGdd1b z6>Yv~!CRDck1vTUg>fP!m)a@&Qn-F-4<(BlF5?{VrEpbY6*vzOwNPIQ*BV$lPpR~y zWD5CIy+Z1PD~`G)$(I;QpgtTl#6slscf7bnYDqD|y7t&46v5X7epLqxr-;hP~Tpm)5g=I{}$X z-QT-F%8VH5@+I-gpOalKdXt;dO7IC}YDDVl(K}bA(|eQX9YPHB-XVJLAy()i{1L{QR$U1iH)!mmNN6xAouW*n&0c#O zC?6n(@O3KaZG_da{c0<|6d4+;_#rH3>onOiU@4s7WFk0E5Ijx?A;a<-%7|Y2!Dy%=c@fWXkGKcKDs#@nlPqaa}>?<5yUHIo$^| zgky_kcV0CD_|%AxfmD$@FNkJ$_kkkV-LhxHetdk!x%L9^eG#PDnu6vAz~qv=VuWE$ z0VQDQM1_pyyi$M={qR*}aOag#tR$}-A6mw|^~pLlE7tliI(QG%}WA zsq}%lpMe#rB=UcrCe{{C_p*j-#{opTZp{Q|Ujp(3X|w-JG_WAu5Y+-TI0l<5AMM`K zlEBn#<%V|uFr&8JdL18`UccSny~3+_!=jn07$BvCK9Hq2d~{i|A&7KYd{V_{qWGkXPlotp;!~2h1b=nZ@wsR; z;)MD?T{Kyk02{byu8scx!9~+-M&%ozLx#%c@;!dh3ihgG&)eyc16hUFa0M2oS7f-2 z6u6Cg$SS#u&T2=$YEYMR+AvX(Jh*nay zMJc|r`tY42UxDLpEb!u8yz&2+0t*Pm;W@AbYU^fx>*XoDh3B-ZUp8prS#6dm@c-Zt z(_%z{8_FP=TI)|i(Bav@MZ1X}Atln#Z^r%(Y#JDoG`;f5WdqBPYy*CLBCk3`%2_+b z=LD68y*8Q_v5cgg{MTG`vDVvdkTtO68 zYjcr-j?CJmuX*azlsb=6U7SX7RvJ9q7097bZ5>YanKDG+=`P17g;u@7h25TN+aHL& zX}To<^`>dKP2oJH`kE;d)%V98K#>qbIF`!2Ug4`dYNb)Fj{cXYc0;P7mDf+sX^!OM z99iYM_-VefR7stPd3rB8hDH2&cSFeQmrOn15mA?=r8}FD4M4( zX^rTA5p^j+hAcQu%&3}6X~Dyx2&*H^FxrA$HQwwHuPYpoD&z{|4N$^(HmitO5mCIZ zNFMva&y>3W`p`*Rp0q8K>Qqr}z9T^`*LUf;XG>jeFrjVgW$b$;tjdS%E^gxHUv;%V zQW91@p~L8_t`6d`Ejo<8vbx$#Boc%#zdB)E4FIK<)R0Ua!>k8>>J zc}K!yg&fb}eyosny{)_7VK(ZvSrJR~lhh-lJ^s%)4psP%^n^YeL7iYMa~h8%sRoB9 z^gKIx+l0V%G$@0Pu-@InjF+6oU5@S}{et}<4%yAsq@aX=e_mw;QAyIW5$r!v8_cm7 z0okpROnYoWa}ol2y~}H?!!`7Gv4oUSnI8y+oz21z%C^ zpg{qpFR@O&`v*P+u=e2HY;x~0t)}rTFfk~L|Db>|FR>4L_iq*fFwHI}#<0J7k81Yo zb`i}Q!^ZR(Whz3HYBHRvwQDvD*LC;sAi+|=)zPsHfB__HhQ#UD%*kR zCpfo7yk1iU`?(RFf*@Jqt{b=@jUuWR7=TTkTMfOkM@QC(EGUd`4G8vtM z(pz%CIRH4{a5QT09V0?cynqVYO}mGq(Z^n3kKGY2UAc$7bw{68E0JQ#ZN=M5sf%A= z)puB><@c~wvD2jApJxxnTBSc{G8TK6wDEcNpV%(al9{YYzrH<_P>t+Spb0L;Q+SBO z6FzvPr$rwcXmNJWhyXf|66du<&t$Xu^$Pr@0QG*QjBW1MS^A@Z74@@9@20cM{YHl{ zq-2i})!SIW2KMhP4M=Ab`}b-$f>K6ccfgOgtLggFx+F+lH&{(; zwsN+dN;b05@uNE2J_3%K;;iJHgTz@wc3ZIX0QH3X6nihe%k58Yg?xL0I}?@3r}PTF z8|lRGzHYFY*5q<(9YD<+KS&xoh3eG34H5YAP)x3+7(o9MVZcE}GL|vQL{aWm=_H0&aSOyAZ}_CG4KW zZpKXmjBI^kzm9oN0k#zBPPJ{4>i>@EbuG#5|IQrKf`B7u*r$oz!ykaHCm;GxfNY#$ zl|9LkadJX=R!`EOw!N&=oo%K2&ai=Z4znDd05e=0da&-j2X1P~{UJeCR{@V7Kk zP^heqiwv2Fwuf+TAJP6f#%x2LkTTA*w{4x8WgH|c*>j*AwyLeOG=oE>&Ij2LTX*SF z8Rd~K9boGaOA#^80e0Tjak6Ja5yVn^)n&tPRQ-HZ20sxEH=4oAIJ%;-%@R5G&Rt@PxSac(c}Agk9$-u65ss^GHgk5ywoY0?MjYR z;&4U)GNy)+z`TyrX%s03J#9J;$xwv5F(55AE-jU7k;iLEN<~Rt%OLvHiis_4d{P=t zWqRI-4Ew_CEz%-DX2c)}dvt}Y27KQOPLkN8vOBl93${^BW^^JOYj5fIGrQM5+kDAF zz3QQg(4?EeKC$1Xs4tNl&0+k@>E0h%26_%74mrRnioel>0sf=_Dx`B@5R;j~P>6cyQ zRG-V_cx>hHCIL9B-1R)m8$L38G=NUefr<5KIu7UsvMa;ewKyxgYW%amBm+KI4DIuu1&v6dCKIyufE3rH3J>99F4;)zxypp41xT71ZTU~ z(%lIKNar2Za}GChxF-ZT+-+n}+L9C$pQ1KR@g$iwKXkpU+LwqHRE&sd-?2XlWfPFI zEi&O0Gzczu3KMR-J1>sopn6MP%XDm@3_VSivF{?N%@bY~pG=svyM(vym? z0z_@DjY8+4htjjVCgjmslPrg0$Yu7I!ccuCgE0)6)dq1NAy=`-OeN|{Ds!*7Z zDva8PWm#euJ&#j;k9=ZI^#gJWrWk)oG5(bj{g1=tZI#{K0XIoPzze|2?%)tc?z0pL z!Mb9&8Hh-HN%bL;FoQgFP$WAWr;hmX$l&K*FOK{gWq_BX9r&b3|$Y9!;-u^Q! z1m3`U(mjK>>YUzsnRM zG}W<+ee ziXdpc;Xq4{w7Kn8IetZuDYr96+q8x9s-6OT9L5jj=#!)_p}!SV?my6%rrpxuzS&LI zIU{4Lf(OT#aw`E$UWWs`K-$4^a|=1{X*v3k!*~?45d29;II15|1CJ+1_Vp9EJ9XT? zU{NvQdeY1wQ^fIwo?cZ~{PITL& zfEZ@V9Uu@Z1mYGTme)tz;zK-d=R8Vx+k47N{E986+`lm)pvY>0SpZC}K4yUrbBn;t zaNA>0WPvI7s6aG8)X>TuK&+{cxFe|ugAbF@XdpUi#kFDwWI!O92`K~emT?)B%Sk-flUyQe&LhKh2%R-+Ik*_axwwHWL`U>LvS)&yX5CAu9E54d zp(>ZOQ83kIHIH%~_YVa90~D-W&+7r3f=_&|#Dyq9TG5Pb1C#51Onkgvun_Xxg#R(w zCyRD@y_I((O>j+iPb&j6%AgXwmd(S6@ikcLmStlb!)~j>K1~RmvO4|NoW5>*aN{R9 z<#9~>R?y`8u`6sjiH~6ZH)M5{)w^u6XOk5X&4xjN9V3!yA1Lu6Yy}KefG5I->&;8W zU-e^c({8}W(e0iM)#Tglw#%#F5MVO&kY~dd_-1iUMtg`dCDT=b4Kc7%0hs{^9aV4Q z%_{{&gicdha%4uG132$PfZzbmE5lcIK}^U4oL7!mO0JiJpCOook!;8g#HTNkUCKE> z_X=yy${<7bonS{THv*&UEHNw+;GhDh_dH3QzNner!2*7GkY5!BGg5dE)+J>EW1Joj zNg)nh%O$x52+=>>#k0CUScedpMEdiZNy$Yv1?NREHk~RY+vB11$`06F#3XZZi9XK> z|9OMC%9J|{nMl*Oz@i8q_}o}R^5#X+;NhCSGKi??nm(-%18X9Bh!j2a{4Xen>-njQ zQ1tn_qR)`sD+;}q%p@9YxfRHUfm&$FeS>eJ-K^%*ihM(LMUmGMFVHlBR%FV}7HG9P z-lyWzs2bc%RaO*xEvvgS>Z!`Um#*; z7ze}(Q|{LovOv`JyC(OzLBVT2%vAytlhH(AYNp(sEOlI4MW3{)eQG%l*@QZ;Hsy-# z$Pc(Whc;h=YBkj6OMGa4K!Xm-A#J_{9gbRYZ4O1A3_Q0zoon-_NF+RInYG%yg8}Ni zc2B8sy`KNS^*S%%|4FaEbsMVo|De|myNIt1^m_E|q%HoZUcVb9G^*Fo9NT)2CmmLZqV!3y47hi+-$(2Hn%|DsKj?MY{lN}|pw}z#A-#@l z(Ce)(aJ`-bz0T8dT?B{9O2PjkQ63$8veGtA5Oj5A7$2E+Gtiy9ruuyUCs`agkPF1h z|58}BELzCthX(V4z67}qo4t*#;=bf-5P_a3&~I1-2?CiQ&=~&PlW7Se@B{t@Kjt>9 z<{Kk0hSo5B1%WdlBv#(_*D(4btXtz1cqkqCKar)OS~mR)eqlGIaRSfJ0)e*$focCj zAoCU#NSZ?&5cu8x1wTf)1%aP3hy#Ma_nH5~fyl-i;5U~zK;oua-uoB)sfD*7&}9K} zKoD2~0^)3nw}2krX^kK z#?~a^I(lI-G)?Xf=9t`Rc-mSTWuT=^*+Fib?UCgX)Qo}#YWR7P8 zro>wR6G!D@sCg~sc+|Nit$brJntH52tr*A48O}uOHhrEn?ti z>;dyKdY`?a3=bS#^~%^FaCsT`^kXfiXPTqn5J<+V3yh)_bu4>y`hbx4xtBnc@mwq` zo8D!_l;?>JcLiy!&R`)koSH#VuVo?XjUqm!W?=hfkwE1|j6gWw=~)w5#HH$-cIinL zH)D2q^YA)xXyb(H&jinhEd|fpyvT}XZ0nfXp)NyP9B)DLVHmyicEjWsSXTP$lIJAr zGP85g(JkauZPS;@GrLKn53vO^2ao%!osL{%Th!7=ee1)Z9{4=1jkx)7PHTosyyDU= zF0{`$;z~SP4{w_f?+L;?SHQe8?ISWwKF@g!*T|o^5>p8$n@-%!qKtD2aDnzZ4!K0) zZVEUW31`C{k^x0VLf6ZXAnoE~OueVQRB{J<<(?sdD;ELlkq+$pdpa5)$0H=cvpR+h z0t^ISrvdWhHWoMQ_8x!Ep(Cn9FhDe+0nBfMhn&WMArERAyuwuE+zE*UgT!R<#3n zN8c|}V%o5Q8IPoPz&_Gxhk8bPJU}wo)_Aj;+Sgpq@_)y#O1k5M8eE367T71rY6cfo zcG$YH6RTR|&zcKd92HE$+GU7J&a#PytZ5Jd`$Fb-e`wQ>85f`Q4A$xXJEX0Q-F^Qn zgD#>Z$OXx9A?B{i`_VN{&!`xCY_U4lirPncE%zZ0U~wE4DPUuxS?mK3-~Kcfm#{i> zuy3L*ei+7~dzj4^e-iI-=%kxJ#4bKCCU{*BURYBU_E9G5`{01kU;&|x_4hL&17+IS z&1~6&ceZE^!o1_wOADab2~|6i&^t|cZL~30y<1im=Toy?fbC2csF@A zvYGSR1a_qSshil!dCS7a!k2Y(#?Lpfmh;;LK06;73!Y%T=XaAnDPn2!2XsB;1y61w zJZb~qnY|0VExe7rIlrUSv^zU9KTP`VHuepWGncZE1>M_r#$+qI=i8udqiDmiMeGHy z<&oQ%b3uZ1rWH?eyUdu7~!+3@dt?coKBBY>5 zw)>&+;Za>_+DeJ@T2>flHm*LvLDFoZn8X_O+16f;tyc#0V*vnZuEQs6a9 zt42}~PpU@JOiBvnNWm*897JI?tsH3{iq~x5@e0JBrMQN;ng+`v_)Us_!lU0IdXS@G zdoURw^VFkg{AThB%ErwV@XB$eR7FWEDQT?+Up*vo7MH|72|fpKS7HKaN!dAeEI8-*)WD&akUDq%Lr z<0X(#E&^O#n+ZQXni(9z z+nGuUQ&hJ4;rQD!I)IDuQzNa}%h0pI4?RXRkpk-ty9+UBI4oH0yQS>QhuZ}HMde&s z&;EHhUfB+`(^{wN)G?bgpxXBqs z#xo9|*b@#VnTC}o87tuxN51T{<7X*zpt zNsGa&D03(>^E1jXQmhGLuEbCASdWxF74&u!@FzF%^b6V3>y=bmhvYy>u^*RMlpU0M zgi>8cg5*_y79(NdC0C-#@xj84MQDhY^orMOi|a2nH$u9mQ`0=bmhnJcN2oaCMB(KK zC#xG%!RTaltrH)0vy8t;?@Q@Zk?sU%0CK8pCsK+6&WRkUlp_H-)F0yaP??LD=HoFa zPk2uJyH`qK?>yX0Mo2?wahm!OXdXxdMpItqqgToRPgd7lIgPLS1V=!YeU#?J z?93yb?V0?E%$dWA@^VgoR%=m9O}wUhw)Cl&z0dY zhQLi-kn9--R;3kST_&wC-gtqq zL0QLxR7#h z@yc!3QHxfrqZNA<%U0kes}*kh7NEf+{xy^*UN$SPMWeNrD6JG|n4Ho|y_WI3hO~FW z{o!xZyq!ln25z`4d}vXmy$$I^yhg;AYtK_$&CBPzqbWIZLaD5nh91JTHv!7qLMaJ{ zWC7$czYPUEd)^L#wT`evx`5;rHbCMrzYRqKsTdz;g61#{J%&;!hVo1CaU>k$Km`}@ zD39NpWgL1YoAz-KRP#Q=-{gdfB-7BNZXQwdDga7KI4VFCN%njBDn4|qIMb;v=4+=+ zQkhF=$E2%D_DY2#{yEYErZwbDv8!*=NthIMH^IDZEw8lZ0tk<0N}1R4Dq^ylQnq<| zN2w!w`MH)#LMXRxxeK5(!n7)Z?;2a;s(p)?`OxDCz(InY9RzF$76$yOMpTLw1P&d&hyxM72z zR@?W&Bjw~1&$9<`uywcY}!XE%*PzJm76Y^L4%zVBzU7f(p=gHkM1X&rl4 z*v-|X*ZV{jZ>pggHDr^OUQ*iY%&l||nhHmT*YajdR;Wa_E!swnwqu+x39HHkud@?e zi3k2*RZ6%*j9xVp)!e^|1{&VtyvwGUUxrl`<4Sb>(O3)H@)p|ZnM%T5M%XKXos)P4 zQ=lQsiQ$g&KxfBd%=Es^*EEB+1HO-{UarLGTDt6-655`q%poeb^E!A)d6R-4^c^eA z>1ndzYR{_gwh&MaIQS#`F{f7t^>?b7?tUH)2?B{nXMn`!95!%OAL(T?%UHGV&ZgV_ z3^|F-uqH8Nb(Yo9Hezv%b^&ii>GQMzoZm)$#KkHK3DiwmuT^a3>Q2(UZ`rEVVIx9n zs6es*{6PvN$P*lexrw|%VG3IHD!d0`HVJeke)(Hn<}JvA%(JL+KdfXGt8q&Gd)D67 zxy=K}g%Lg+m7}RCU=gOK$-lADuJD#)reV@)i3AzX)_8)4PGiem-KG0O*h{Vx(tztM zb4~bo+J@KE)ub1&w#0*>8#!b*%wC7OmcmKv`aux)P_Sw&zx$+byn_iC`DeVA!NJso zYG?fHNI44~Uh*rOnP(mU^AuF}*m4qx-(>Yct3#b(qci1reGaR(M=^-x_!-^In|9kfyDIz+ zRRqYn*RwjqgA>7zuv>B$0~cg6w2OPF%kVB=RiNzY|IsjZBDedH?T~Uf0fMyFFTo!; zO!xywK?^NHyXv_dvqX7Z3CRfOSIuxGKJp9N8ro@imC@l1gnti@u7cGOQ_=M7WkOD6 z%3^KXfndV{lhsEfY4RoE9RD@jlGj5SJ3eU~opRlQtHMPS&FGRSGH>}Q*Rz$V9zW$8 zL&ED_RZdojT28%fa;jg!G+BhZe?BY3mAK_vJ*mzR=Ezf%Q26#ou+W_VfJ2oha*=fy zgXp5UvOjVQ3U#PU(jCS)r~BPV8By9Ur&F>vgt3W+_rI3loV2hh=#UD{_cVFC?FIP1 z18ROYU5MjTM2y$6wi%`uah4^0nv0$E*_DGpGLg8C=_v@t<8Hb?n5ui&+e00Jip zg9sRz9AjS&iqw(dwyTH4ak}zCU_fw`M1hiDi^5;$yBX{Irc|}9I#ytSLUr3$P=_Wo z?2U!(AvDZ#_>V1WUgBw5@TvO>>rlzoxw*vU0yP<^*revDj|tRbk=CiXNDs7|0x)loYL)QURPa`EsQHZ6SpfwMqf%c&sV+8E%% z)&j_+->KSHEpW{^J+2{0HExKS1600Y&aqx24W#Zf&LFC|Tvw+80FNhxVc4zYVFTs;)$v+0b{>!AOX`pgdk^=1f*vXMWOF$ zmy)Id+MOhw54RVeaWG9yx`K+VI!ZHB($wiw#H$FtYZ|zvL8eu{Fs*+0ij_Tjd*=h+ z)tiLY(8z$-?{1WMauFt>MJ!=ewDA)xxQBb*RBBF<+2{BL&984?wS;e{fqi9$!@r(= zRSr&SQ?ntPsrAjK?5eu94iq+4pRw2y#eu#$+bXqQrrTSIn{KZm7fc{+@ zxn>e*ssPypXfc5t0wfENN}vG%xe}Xy%ii2LP}xXb_H!nh1D89&l14V-x@y>nOE40(xNk8)xQ+P5 z=V%DG5mTury{j~gIEPw8He#4l{q`msamP3HhUc&3nDYd`ikVG&cRq&aRRFNn=iVAw zMEU^sUMHIZTrU$1{fxK#Bh?EI|gUyw&CauL5C9{{|53e;Q5tzb%Z7yVOx$)8AUF+ z8D}uZDbi6ck{zsW%dbV*$W3rzF;P|ZlA3U={W{jO0;@n^DLDGiS9LcRR`xeFs8~n4 zPoNdn6;{tGtXJ669P6l#)mLB@*J0JO2MJ5j>o}XEmFj489|A2at3#`23)U;FH^(}y zV;vJ%6?IsoPH%2l4IYzp+sil>&XH~uXw`LSJ_``We7(XBbF>fj!X^nU-2x;o`YgbP zSdVh7aviIyz#@A}7`e(uZerckXX82AXFA&V+`!#ttwq}y#-|@{&DX1Tu(3!X?zV1ggFW;HXIgHMI^^ZWJ|(P!(7- zC>o0Zj@v}wrq|(4Y!o+M6ss=+IO-{Gl5fkbL(R}pVaWS<1dIHiKX_yHHGnde76JJJ zH%H)_(V0sc#oYy5z6ju0lLc0O9oD)=u~q^zTsj7tpn1r4gmFr>ww{3)!7vKIv{-r8C@S~srO@Ua4F_6 ztOHo#=C%r~ehJxcu=*8DCJl`K@$+fe);IcBT_rWj&(bz(^n)KXKI6F2ABas(ZuA4% z!05lzch_Onmvwc9)lY7TeIuaZH)PZq{jL=s`eir8nR?p0tLm@X%URx*z6zQ06)?DI z=Rvy%#&lyw_vHrxuV-{W0J=9C-McA|ZgdNqdpjWi%jQ<_(#;J<_f240qkCOF zqkBt<{WC~LtYctuH8i=13)Gx~F zM)y3Z58|rN=zjLdjYjvSD>rpO`bc!Zf943Ao6o7RGx;0~o13Op*xae62z+60es&)6 zu<9sH^?27ne~L^uy4f2~MN4TPu}_|giocDQ)2+@0BsQYnN7K;f9m$4@LXnlP5`cRG zn-VN^4jc4zhV<>PZ2Qx((xw^glc(R8p7@+?dL}Axh>Q&DXS1Wv#CEy&gIcRQ6>>q- zg!a=Mm>1q-}u2dCKE+xjWojaW)1VjSc~TlXpIz)i&7=X@q2 zUvhX?nNeNcL=0)njaXA&&qSO-bFnA*_d8LtKT58=3xN7=L^2j3sdT}4BAkd&qHjdT zpa$G=^KN7mf@C^c38em=?4I|o-D7V?+d^;}&LyWICMK_?@<-H{9ExI!d!A2mWEt9F zH8f@!772tMI)aiZCdC{`sgBf$Y4`+?>denXfs$W~atqUNI$<@O`6Yo`P>ZVE!anRO zaO+R~C1OS_ti|2fxRI#+z>BRv^FJz3i|bH}5F}Ys8X8nP1#bPRe~iE_tHbq~iw&p# zg#uNd`uSXp-*>}U+EyV@Y33(oK*iRgl8Jz)0f1XK^;gh%Rku~wq4{jaS~QOJ)A!W( zbyGk2pl)#b9iZ}Vbo$xrIsIDw+jM{9J;+WBr@8g%zTyty_A8qzruz_|+iy<| z)k)oEfO2yh2f}=U0k9ebZZ-gJaQj`FNr?3=0Kal@;Q!SESo_^AEC8PY;8(8|KDS>& z0tRph+Q4tc!0nflcpuh|hFYgnu@*B1mGCL+G{muk)7bpKZbrlTKMYiFod0K2p4$0e zA*b8j0R1lm;O6=N`&-QWaIo=tzr?-;-5}WIDa3#SSJOXf1cofNdKLmEaq>n)8GpvHAns;nyATlMmi4jI9Ib!%!iMG zpR0~S9V>92)z0JOHKX67N*bRuftK|#q_fL_s=e?L5TLV5>{0)CA#KpCQ7wsNK->wR z4ll+m3FR%TKg(`hHl;g+&P#jnFnOqY$H zIE>goWS5Ol=#$dSZP(O~VPukgtsFs;;A5YCsKW~x7wke@LGN$ zkC5AL0PSjAKE=G@O!ztr>k1OX<3XHc6};ofLHpX+;niLkL!$~SFA^%;szjkUqwR;{ z7Ov-ySURGAlMnr4#NMuHO%YLa&kMpB;o}}&dDEJc)Y^KJLt0Q0y~Ap#-4Zh5Yz8aa zF;~fgF(HS^?vyQ6IDxSOmJv>1IK<6pK2q>B4NhPTsp>ywih1%gojbg}cgJ;$1rB&$l0Bl>R{m*X8+1uC@XzG4cRK)2&z z{qi|L+i{*WQte$zJqR`yR80ntamCh5MS9X|aO&A>>G?Hy96zmwUx+NI8i-?^XB%rs z8{QCKE57JQkEFhxhGvx53qX%fHhfGoKm;gf!UDEw7e}HZp67(Qo(5y~lvajCAYK#_ zK<+N5GxDdjQtTg~qSa*5CRF2i)Nu!%Ek_KEsv#iu%o5Jx)bSy}3E=SCZRm~m^L|e; zYg>ll=(t0jZo}KfW;$FXlW^GJIE=%Diq_h5*umGVN-C9RwR2M_Q+Dxl1BR@D_+12k zW=pWI!cfpmK%+ELBPinGy zZ%*ym8m|1(?6ze0y)h$I|7dgw*5URTVZGDzVOP)LS4?s2g^pgGDFkahQ-WmI4|M3_ zI@Ct?ekvX^y~3=Q?xm*_>{j;W**n6nVA;=a28_o=ud0Fk*SK7X+fTC}Uk_L4&_^Jt zJ9CHfnO>nh}+#Ifox1H@t>9uqp+Jw;V&9MSmwy5btV*wJ}$px5#M zQO7(-zXbcm0Z0t4oarm91w5)xE~}2x+(VCE^B?TfSDP&VjdkL_>KAD z&#C=2di$4t&U(E)FeK=Uy7qtfIrBTvJ|)vhC8Pvt_dQIiLYzEFLCZ_9-&SwQSqgf- zmTyHkpIVC!Eu8FU&|6x3oVRo=hPSV!BPfiPK7Jgn>fD-Ej)PEL&66Tpn@;fB)*gtZ zx_qQfPhsirOqD)8#tyvGF*F*@yIkAaBNRQwTPy8qBVGNBwcmB8wEZ(SdDj4G?q}@r zU1c%NrjT0NCeHPY#u!%L5<~Q$IM&nZGV!LGc$4b2gkEO9?!g_3cg-O$Bf+;UUw@g! z7j+%l&7n)iUyB+_#?;Rm@MoGp<&x1C;e4Gb=|?zI641v!ru3mOm~zJvG`>CdMvjR~ zoR@omGrdL%&KM}MmNRXDf=t>!4p#i$RB72^)@gUg&?vA1ju(A6%%W&nEoatz!XDXu zrK3w=aPK145EMu4{a^k9p&(;iTciVsmE0 z#{Y&@LOtQA0X?5^oJQE!_&*P#@$D?=abMdVJ`)*nidhO$m#lE@<`dHqq+SlvLa5M}R=B zK51jW;E4a&x7etIU8URi!trJaf0r8W3AT0y6JD%kLQ9H10VZ_q7>AbAFJ5B(9;f?a z`cpnSn>&epdGIN}bhfOxN82CI5Qn&T&gQlJgcy0}Y~-Em#4Z=NYMFDkj$5-4=H9u1 zMOb)o@7$I9Zt%|CiGu5i$Pj_fIoA3t8+E8to2D=wyp|8BDA&q~hQ&A;amLh{DTlfb z7D?5o$rt?4LDbi@CI=G-xinxtz=zjA7Ffc4$AMbE>EIpC{q5Ne-gPw{Yz=V@9 zRZhL`5Jt+q1j{7Ye!yZc&~CNt zNh#1Sz2ir3T=3OwCRVd{74L!GTZ8{%d%&)K*Bp^*{NLhqzr;2k?~uqBsheoI;k8US zK?}PAIsRFSn{wUU2*tJWl~~x~vdCQ-vhk!?$RwOIxz-ri5662cm8IYo21raTUIZP; zPxvd#n`vx>wW)OA-qs8H86WyNc%IQuUm4P!qXBTb2a>~IG< zTqUT{iu|gMdWF!HDeJVWeW^b4z?pf~s8_zz{Q;FD`)aj}-B#LD*;7KTwfnZIukk(m z=L#sJu@n4W9EEPV!0{^{f`}TohO?)Q^(8T3{B} zViw&Bvm-CDLf2a(1zuS#UTFh7n8|1=ujxm}c@u+l#FhfFf@f!`Cps!yd4}8^f(VcH zN91jRJ1x^0{U67nzl@NuT1m9w*aTKUsq*{iR{Gc3@xd-y1>t{6?o}M}M!=cym1*r_ zl%$J?O$+QpB(oxAp65F9JJnMT^)JVM#dkas-ahdghoj; zJ}GKwsWagMf1s8vIoUbTk&s7l_Vme~O3ywduc#_&D7${dn}R*iOx&R7!oUxB(q>8> z1ITRtvN}4{%X0J; z`Ou$6xYBU_tGFtdqtlI=iR!c>a+k)XL?3m4;kexki94JP|L0*DD@%YJ2_;$WT!|I0 zlQtgjP{a9iKDygvp+i>RTZfv_#bHC%Qug!d2<0vq`aR$U#fkj*6m=zjHf4oYWrugg z8U3;2&>!fk<#;y%eoSkMdeVo_C(x@jZ75KGRgWj*m$o@9JshwHg-X*~n*cq7 z3Co0KuQ%Has2R|zKm=N}f!j^BmmxL8ml)CQL4RNi$|ad@V~ zplR;4bUW%tXSd+(6As=TP|=0>4?)23B~sUJdpZ7Bl-MyfpgrW#jKv_g7kE|M!~E?F z8IwKn!FZ*_P}R+qXyMs!^z?-b2N}*pBOCpUg1U*SptXo$AD$i3^;EI$pBefLY$WK| zEJel$N!3-7>|cel(+7+uz{|0PB%kC?S=@J9evGz_=N3b8v=o<{P&vi6v#`MYoy;)Hip7%e%zAQW3=939z zweuTGHki58TiUO^+1~S=l^vL#4o3qtOMu4hWS1{=_RD8~Tu6={6-3G( zW5=?+6Rk8(Yh6(|7B9JdDJ9`gldCfVqdflI+5Hzs#;1p&8quZvs#}R^%>m5EcoH)Q zP~0Ds6D}!3iR!b2>oDSCpl1mW z;Vk!5yd1-yHz#u=%4_-KeGKRtG9;o*LTj!_CcW z+=+Xkz||5ANv}7NObk=*6{IGc_LL;KY647a&m$J!zcp5S*=<3A3(`{^A;giGv(>~$ zUT0+=MoSOf#{T{=vEMtL$-Yi1YRcCzDdk#+7a_Q7KF#V+b;lMwjP8}Ayxuj%+0A1= zP4+DM8)v}he-ve!GmHNyfCsyA_+^xJ?4^%-^;rEHo=)$Du`gKTi2+f~-U+SHL3nHq z8vxo+2m9-zF4DnCtn;OIee<7}3@bZ1JZXN;1o{zooFB$afwNJb;4w61JQ0Oi(GpSw zGW5bn$0BzBrQ1T!KqZhYN8x8ixL60j{wjO%(r6`u5)uNs;*&-8bId|K_?{WG7*kBK z842`Tl-XKUkJ=TvN3`K_9D~Bn2=6^v4Q|cpTf*_NufZn8+nkXd=VD^3|26@{@_Foc zHr+elCrIk$}TuRfG2ZglO4Z~c9uU46x@*}$GGm;PBrZoAj?pI zWW6FG3XRI(hlw6+4|ni9gesSS2`J^E9qjv$Z)@6xmzmgV2mAkc`|`M`s{a2u2S{94 zOh!RbM@7Y@LO?}vVOWP`3sJFBG{qH_6ckMz2rY5Ms8NqCmhDznmKC->n5CAcW<5QY zS@g6GxsjQn8T{U#bMKwGGlTVf|M|^}59glGc0Olc@44Y8!mJNG#tm$xJPj}3$@~`W zq2q~uxM}wlCWAEk>GUs`cZ?{Jo`RG4PNrZTQywwdjGv%Uq5)-zpQNG!VZBOhBYayu6nLzKzkGHz$U>}|5& zIa;cr66W|I1Ujn&9weI&_N8Ykca^JZ==u^_1OuvwwzIE2@yoc0i{C&i>Nr!w&_bIL z8YQzFUZfLK9x1?(DiRuV2$3W;Dv_PEl{UIda2wJ|EvDg6=rZW zN05f<1XN18>Lvr7N)9~_`qV!Ct3HGFBF2Xu@d(~hwM{ZrZL<))YMW$JOZf9RYGc3o ztC9Y*zTX_!^5XvyMW)1)ax1?1R`H zU&Z==@EQd@=01DG$&^XUr03U-48x86r@JD$Sj@-32@h-V;F#10tuf^h;6`FMLcAG=v3d13oas! ze9(?qS`Gkv?m_#pQ%0Xj{kh$7$`thj7J{fi<~ZI8!UzDf`y9Hwc}{RfP?GGkj1N30 zBh;SsKV!1TeKW#;{wtJGKicfezllkSA`ji8`7B=TSIzt!xYIGP>PaJ{0!^rc@5z2r ziN)(g4zV2r!xO}Cwx|8qZ$kJV>+Nm6&EbU`?UTPv_Ia!?w{QP8j{nfv{?)g!=9fCR zV5YdpuOF6oiYa8(v_v^>gTBooigd#h;=yC zE|)GEkrCIiu zYS|0YGF4eWPi43ros@lpqls#oxs+i6PgQ1BmE}ogN7kyAnLI4(swi9PRQ7p*)3WwX zWi3~wWqm$>URnlaN{Lz1+}u*xgm2w-L@BaVk6z23e`Qp ze&>&!eWrD1_Vqsw3om*O!Wf*Hg4W)NVqDqe6I3||4^@8O+;9K-$Jn+Vh@r~w>HFx;x zzpX$7H_+KUY7gQZw{*8JaKt9uMGBSWPMIh+N_3mCNYS!@v;71_*C5wzRyB zUaO@6)sq5!cniHFixsVaDtf}I*4`#x4Eb;z%&`nr)nXf627(;Vcb`R1@!ys#vsByf zZ^+IMA%EpQjQYMR39}5kLSrG!zsjihuJ-MTEWNOqWR2ZOw+y8%J;_jf)KH$K$**m6 z6F6y|#@79(&Gxuomhz>W?Yn*nwLW{FREGZ1?-sB)p86Go>=R04^O10vq}eh})cv-z zV$N3mNJJE3mTtQ(INi1+G~ITWDc$z{lzlbzU6>&T^SENbPJjb2GK?9v04@NMyE4NE zfT7)#XBwyaPqNt4PABo>tL+<3=S8esP5E|LvE@Kg8TKp;3G42fhWil`Xe3dDx7aL> z6&>xqXC`o;)%M%YB=dJy*&jdCjhmjdzjvmX-?+*ib+!xN{DeLI>~x={E1A7KG%UC? zF4{g!r!M@d56f6-|Ke-}zkR*^>e(LrLXbWDT%pfncPXr6AD65x#CjB$DA->-H-MK{ z*&~1L!-DKr&&?n3<_Z`{i!zpKOMgq{cl8#`wN7_wG2Xsi6~%?FM;#BWu-BX)%pY23 zfA4%BJ}A)s{rR~(c!e}?*<<$Ejq`l6mn&hOe^ei4-{msQsC(@v&h_T+cd%#tI))!F zlY%22wLksqG@ou|B-l9+cYVISjbsttrDgWP7b1FJht>_n0uy!`;Z&s}8dy@myX(qm z$MHye`{D~}{K;kZw=bA@#WMS;3!`|_GJCh*OuW}J`{>_}achh{=wi>dKQX#~CGH_+ zPoEya&5`yg7jNfJmfGLG2)R=Gsf&@kTdBRx@9F%V>+QL}-_Bnukv1A1vVZjZbe~I0 zl*nyCYv4-z!%Jk^e_cUoZ(ZDm=HZsgGq|`dXbp|GlWiT7@#8t^$__~8pd9g$3QER(JVqOnRsGfHYuM-0RqPs2K-ngt6K0ToA) ztaxsy0aE0wW}TG}vfD0OYy&K|QY14b&mWerc76E#s3QSAEk?oJ-oh9aweN`IUyEry z#rA*rEI4Ij0xfx11^lx+?JbvjhrfI$OthUi%`q{sbwLx6 zfiJLS40_-3V+FG>G!1DpBpug^K6t0S^p6|(FI((S{jrI!@wJa^s^ixe+pjj^bC!#g zEQ{F;{kZ0!1V<~6IOZ<0&%HdAPv~I(=Uso(AT zj2^Vm{_K^B{9nx8a%C|uSYTiBXKxc?Sw>eWFVc2`g{c?GTED;4%tS8t6jq{de63Qa+` zH&VwueNse)z3pG7VVxAQdR!I;F|>^^(Ri+0ik&CtKnezQ zEiF>CUJ*S~ZZG(&&y8) z@0-RelGype7guk?7grZgM5s3?qBm-yb&9C5+#cFI&u9IqX8YP^6K}cC{%rGk{?i=$ z)|PYp((KLa*k1m}EHRF;-u$y!Vi99qL(Y~^LBqBm&Yh*o5_WN^*v8m!zGIg7g0Ug| zcbjO-S$L0Ih?5#4v_q%gEqNQy$K>-Xjx(2=@wptfh0iV&Cpoky6^a(lx`a$yLfX5a zPLQ&wuhIA`hPX#Uh)YASIpA#|1v4V(Akjz1Q9iJ=~FZi&iUbi$g zx8SpiMG>JmwPmQWLbgm zHODrV%AXohi!46B1$DeG!{(Q~oD>*`!by*Jl=`7+oyPLohg8L$gwbH>MJ&}>!5M>a zZu8%cY8jOPAHt&dLYr)CBbDsMMBGyO-$qNt73KIGE|+Z*QGTqG|C<;x9CpOa5aaw< z_lT#;C`@Vsszz_8=c<6m$-&IkTu?A|pX13(&0>QeyX13aIpO%qz!W8`DtQn8)j-- zvXl%6C|oM`wqdDl`zqefOGU6hiwJBi1yzmN#ZuA5j}7+Uh(YXOhyOIO)SvZ^*gzau zYe58tuA}FwfGTn*0#k9!&#~mUX7P?c>&oX9h!g&73?En^LfgXqWh(h2tqJ25qrY<8 zfw=}X1{jXCud;;O#`#uROr+^HeZwFlnLw+yQOc_A?O2UNGck>};O1oFsJd(=GHe~O z-IHlc^f&Kj=JGPM+r7=U&a-MV(VfKSgqwpN*tf(FM;Z;MeKTzd!IGuqdmKSdS#wh9 zt1{!;m7?vZv~`cYCu&_n2iWvl!>GhFQ&0BVoh5% zfp=;rPPS!l^SMQ0Yddy3KVhuB)Q;_Ae4oE~y90ZL+vnCU3}C;pQEHZbskG;2Ec4Sx zV5<-%{Lev*aO)Co1J<$*JB!kYhVkGOQ-auZ&Ss0cAQs>0gISQR4jF`v$(YRQH0Ibn zO9Te9<^AG7kTw4c+Nc;vY=eHgxjLjzy3ZH*;`tz)5_Sa65-$d`zI;Uo@p&+duzrhX zfqn^Un_%|Oz;T2(3unpSbyP6o_bv{lrMG-Xf0Q0{{3e_mZ85H2PE8J7G$XqUq)U2L z5)LPfLl5bb5q}U1G*$WZj14CIKixZJO_a; zLIMergw*(~9E0JU*PMb69Kq@F z$I#A$#r7$Nq6oks%LM|kK$54UA0^A;trq{%pbYp>@YAdOGpqRNNYE{?FA#fMF6{MS z3U+yYGHiRz`_T!b)Z*m2|9*2QtdNg%8V2^zX&7l13aAX$0>ho*BG>>5)|!q?Z}>xw zh0OjIN(90qnSidR$W$5R^XGV`D33({Xi+|7mQ(A>KK`(i?ml*@mIUElQl*QJ}3wRVJ-h3VTNNI*KYC z5uhmgqnp64p|6Z)Top)GS{4Rc*IWHh}RxH!FmbKPZIp zC&aj}tfAco3#tsAk9*yIu6I{9M@;U4uO@9yRCe-I~)gd>bqFQ{ukg`8c68*wiq|fS; z&9%3Lv(J5ammfrb6T_e*QH1tl-Fe#gVt6lhBcJrWm`(1_{}rMa8yEKc>&;kb{YW`v zPpuO!^21@}_H)Faz1T2*+v_5xH+rsnXNyU_S=5Nv3o+KlG)m!**g7?&xBvp^=1RQbUQ)`@uj7TU3wp%26zj-4%T`Tw)A12jlKnJ)Q-p> z-k^Q+6Yy3sCBM}Es@m^K!s;}bNW;0YwZRc=3J*D-fL>5pZH~hIC|D_;d{|WUV`Ev7 z*xQeF>NGTy#PLaCxu;K4?>EHH{aC+{Y?maqc}eo!L!wuINFEl``$KYbhE6h$Bs+_H z`!frhB98TE5v)nv8_7n98zb2e_KBDl$@;NbgqqF9i1U#wmbDcT1K9c-SJSp!t>5^M zut$Cc#wV0F_b%<3ZbNB3gp0(2@j?lAOjekpKN4RLVBsuQTpqxZt>;^?fl}*t;vfph zLJx`06xwS<%k`kuDzrz4hQ84)q=yySI-+&;pjj2#?V!Wu;>wL| zV1U0ysWzs3Acow;da_?ws|;C^J6s^*RN#15j&5Fg*f#s>6JDJV6^-WQ#tSZd_4 z%hai4Axs>BvG`z6zu)m7*61uY3|O&Lvtsn|1w-N0(aPea)FfZxz5$gZ&3ShQE9 zMuXu)F((>It6Y@8y3A{zT{MwUnlMXiLDho zV%X@L(p;3QILLqz5xxi7dw*&qb7cIE{@#Y8fm+#SjxZ4ti_|>!yHnlfe~G!VQ1^te z#j>b?J6)92e~E)cNfN)rve5y4F3LCWiP)i_{6|b23d(~Qo!XYZC$oC?g;OuWs!Q=0WjA0P`msmIqW^8m(X1pt2Aj&lH$uI=g z+eP{F9nobt66xXvF>*N4?~Myib@q2eB}i_H z#A?L05o~0@1Q+F(H^mP`=_lHZguxBxoigX%6geY7c}pxAiHaWQqGY})SUf0b!s1yP z+bfFV(aL3uN8{PVfVgu`4Vilc8>R4#La0a1Iw|*V5c5F^eoRGSB8qw2@=CF76zd-_ z$Hn;c8{$(^F+yA%#YXX~XT*Yd7RAt@!4 zvwOv?40a0}D)wZs^K5hNj!c%w_#I{9Ocr~D8#dIg&t@AL58WWT-oozTJ4?kAx3KLz zYJ*75Wgl?M2GMRjOPxyJgB}Mk1C|3S0fg&@x%v4jX808F4WI!)xS=*?@B{pb=U#9J z0SI><@EKq);1J*!z=Setn#dyn%Sl7ERc>1k#N1q1+Y0K@?Zm+%aXhT9YF0Kjkn;V$6N&}u*l z;2VGgK)COADE#ljJqY*@fN-C@zzo+v&kVkRSMdBV0O5YbrW^DJ89uNu8iG?5;&p^Baf%V@-md(tlFbxtS4hPiu22vnVqQ}vVzsK_U$oN zr`_!%4kp4^GQsAG*H%JmfVjAl)uFj~eHHqGk>dMRY$KnsL@Zg&GJ2-8Q*vfB7CU9m z?5FAyiH#PjEGaq?~!K47Rjd6ib5 zPQ6HC8zN-JW5Bek)>O%IIA7x<7O!PlKCwQmcHdg|IP=-gSnce489tjdqeVPf#rp6u zEwu-$SPAd3=PD^Vfgx(O-#Zv0n)lSxieHrv{3{E^ts7VzpZTNMw1Eu|9H8U9VW#+U z1M9(0-61Y-VAjBuvR_y9l+O_NRHN1Y@DA}@HTynrJ33iy$g$?KcxWS=6*x+V3%)D@ z?_(uy9`Xhd5G$DfEyg z@p}!s$z z9l(+njlOVjEIYKtSo@1uy^Zzhwiq;;5*E|SKaLT75TzV(!HVCr?_#Aj(AwZKcn0Q! zkFv19IXWEXh!-AZRe_(P|Ikw6X@{tK3|U&xAfA1U?Pk}D+qSb(_Kx^;J4@%k{4ByB zM?q`zv%nTuJj<=U>v6^yU-y$({RE5V6OW2lpI}zr{&a23lc?kTlcOTL&z6LI(%7HKj; z2K#;{OVtw0;^))5kr=!O#~e@JTkq?*_;GE>v+N{e?~2A9tdQhl$K$Ne$-)≷;E8sylB3rYyp**ml(4{-`@wi;TRcW5CV>?ju)dqefx zewN1Zjt-9)jM)+)!@FA$>YZUj0d~nRV2|-M+-toue635mM$c3f_FFz ztcDxBQgKAOl8SeF2Q)@BKhHv^{3v5AWAO{_p+^{|mL)QbL*8l8wAwo^hb>AFS66vu z)JNjG7g(p;Ok{DfD;&B5pHNf016bcm03WLXZ1fJ`HVPor%3ZqKK(~@&+q|`R)oEX4 zsm?~BQO5Fn*BA`uN^6v1=>jtBRqxdLc9nO``>2M6d@r-#EgN@<4Le!rv`NsU6p?qA z<0Mw4H4fFFliYE_NB+EnncYe-Z>K2N^JMwch-}U zQnxH&W044CMt|?%@;t<48=xAQ?-ra4t*?$c zudn!mmTUV#9hS@ETa5SrTRaUi&-g-o40;!=8H`In$CYp!mf3E@g06htC%WxsgL&WA zMEY))(Mf5oAvgkRq)y)yV+LOMTtxKafHhCx zSb}+2rr7%}>&dqq6Xko@O+5=0tl#f(D8E&0%$MYFGoQjk0*$RbvWMN^!={U?Z?n<- zr`p>1cd*aNEwOSs(Qfs}Qm0>FjM)Aj3t{=<_4m+euH8}F@E+^z!?wNgKCsMEYuv|t zd{}aA*nXDH;Lbh3+B4QvTY8Y;+ie}L{w2mAV$nW_{kT|ni0$OZ-VwtNWBm2pJ7UUV z7Unai9}~+Dv)x^Ce7PYLa3f$8APoSVNcx1Gy^+alsV#bmeL8+7!dULN>qF?M)E%=?rL3w#7vj2`@L2V0Eg zZ-|#ZWj%fR_hRDMr&y+4bWWWAluaGDs1H?md`4+$HWl-o5hiMM@N!8?rVX#Z81IE& z(kF^#$C;_!mjhM*VDb2I);Tp@lkWg|{r8ZIYSrT0S@!h{UU!W%%y_QRmxo>Auj4G0 z57;D9K4X14UB!e$MrVT@gW@Wr-Qup#*oXn^UUS8$2+ycRCcHX=NJd`TYbqV6yoAxU zCljYWLmcloBd&hNK1%($w`y~9hP#bZr6JB2<_i$6Hr$HAv$PQlq_WIDA=>|&rCLwR z_@?_~TkbNx?&g>emC4uys5XA5r9{3&CE-qU<5>-ttl(&65KFj9ibg3oe6G(Jpy9$5 z9O|DjOvCvDM_G_jofKjPp%(~_i+CR**;biB)>RvC)Ob$;onfoQLO!9hHS~T39g1-k z(8U`1WuQ}R_xLK@RT_FL&@m@%r_CcCCDPJK*kDs{v=P}~F#ic_!I^CPB-!?1rq4&V zzp`x`j`)MoF$IETsiiutBVI{FP?eJN!xUSG{f@DqwtQ%ge~FYXel10ouk~P%<*S$2 z`2{xl)<}VUj*GZWBiWWsiTJq6f=@6yP66Gr&m7-lF^)x1K*rsmA~Oy+-Ums=9VJ{$ zo#SN}>I_ikJqQ@z)^uSnsf_n%C~LLRr>`QsM8j1ZO)6@d3pG?lWx7x|1EmHr+=VTY zScIG4=;gw$*08~j_Acz!zKn^Z|6#@vj&$u6C))ncz$rWDb#IktqG)^E)Qps%4b3@p zR04b0CAd<}_FvKM1iQ()2gl%0Ei6HJC9ir(XizZ2A&8*h^x&Y5S;0ZUW>ZkdRFkFJ zb^>J*$$`bg&pJ(f1O~af# z?*RF8psH=taB$Xf5;#25SBU8KAOlE_16xJfm+a|&*s-U2A0l_^lPqLXB<*fms>bD{ zV+Sk&t&XEY1bxMB7?T_k`X(Y2tffemO~wLe=;I6}x1bb{cwr`vXfI9G&2GVOwjCt3 zV(C{b%6c%__WOtz$gH4y#-JE!6#~}>H7DBE{5gkC`l86&cJ7-4U6zW!pdn$|b(Xzn zeegzvO(qJ(H_2ovB(B9D|Hm6lyt;hC}>RCbA=|(xPTnEsCvL6qm4W z(I|{yF>1)z2?aJ?&ZJo&mIJ^6i$&9_7R|Z2>YYF3C!b(uH_goJt0$O}tF4(VxjF^o zV7--?@>-;o2i^4Ic7?gHYcA%2b3<>g+VJNbOq0zm`;+5$%k;bZ5Q;8+K@U^pIW$Ga zJQ;=8HBZJxhtADYY|>e5A50PMW#I#q$`@ zRQt`7Yu1zeX~DI{coi|GxA3rzNW{`WRElcj^Ek*w8zzpYlj&^=zenK*+TMkU$WyFu zpQC?Z0B)Ih$nEhRdnW;;I<^Y2&xpGHBAAph=hLAms)7v3I*$=_p}~u@n(-Zl~sx2aSf) zTDk(O+SpdJ_CzAiy6epyj&zphD$3&Y%G#61Vw2yBq|s|4ru``2*r9DPwt2zO4#etl z!6?r=D0g@(cD1D^zaV6TrK)0&;`@@wRTXuL&qtvhRD7Ei+A+mSG`h$9pql8lUbQt|mKzCy(ptoXVp#)K-qjtb4B_?9TX zNX3_;_@dyWL`$b0^GlrGZZZ0V5sC7u+SmvTTIx7(wHQzDs6F%@d&ZYP`Gi>V6I;ou zMdMG{L)i4Vc;IKO_{@J?y!|sX@%Z)P)X!{8xH1hm{TQ+})Am|3Rd#u!;seN7G2DT5 zc0!6AKL(qPS+a@YjEJf41drY+2NH9END##Je8MmYr!B9cZT)ps~Gdb!!YJ% z+fHdrhAq>djv1!lXw6=H?z3c&C_lsU=T5qpD)<*P^}0Nk%CK|lwH}LsY6y5w{4y&E zp_kY(W24W4V;SmsBd!O?%CKce>X4fWIRYb6AT7A6&&j<~B%H;%;X~q%vzT+#JS;Y! zWrNzzfQi%q=7^}5aT>Y)ELNABD@E6HEStYrDdwNUY;0Yns5!^_@oAM}54mxbqMqE~ zO5uMVuH$YIMQ+{QBA?u?cZ+gzi|-aY$hF)pj*)A+TQrf|R3S`_aF1083%NTgL@~ML z6=Ey7`4ys$+^7oSAlJV_1pf-RzFfqSyQf@CBe$kptRr_$x!6T+Lb*6WZfN9-VSuQdzv3@<3`EUqWvTTpY;+bxnADn_k zP-+Wb5;d1FZ<$pic3omy)XDR%8tOeiNREF-={+08?SHTZez#`gq-yQaKd=VR2QL?A znsA8b!hIs*GA2?x%S6#-%$eKVCxWi9W!Qqf_X-=zwu^VKuou|{G5=4t2n*RK|HNG6 z-8ZhXI3IRJjA&sqcv+Qrw1wS)*YU?;$nc)5A_rnxHn-&idJa^VOX!*ic{=-$v-wxvyJz?kD(}@w^7%$ss(dfIk}TsM zF@!oR{s9kDuvMcX=&jQ2VhiJo0@LnsP0a?~BRXO39>{dKf3FpDIsfJ+zqPJD zxnT{0WDD~t9HpQA0~j;_vY$PK=NkL@HeE!u5C1jrNTrMPYNgoV%ex2e*5U506t%wm z=D^!@xSJ|PkRLxEn1$6IO>OkuqQiAOHZWX=Ypf7i*YSq}XI8lQEEVF?bv!C`m=4zt z&o#=yP1!7%#MCx?MPNy}ODw-!{L}_wsXAO&JiCe2X8CgmWABOK?Rb=$B|TS(h3$CP z0pl>3#Jk|7bN8=_*o78aE*dYyz|mug>tXRqJKiZg=r_u?qmIXVcySbo`gXi?_=n*{ zIqC@T)-h7*$liZJ(J=-(%=D$XAV*R+FLj2V#N}Gsm%+Z!(~Iwfz^7CPvIXL?_B^)l z`-pcIP0!@VUQ)Jf3pGA-$mggF8O{Gc~plPT>g?p?6}h#HNn9ehELuMfK0@)suJ zYc7?Xos!Q@&)eY?Iq1O-O7glzk?8$%8VPmOQ71FSjk{XMjr|tMHo{x!Eb(;! z@6CPsYzgFjGQW`}!c9+mcRjRbyA@l16lyR+H7iTs#!xSFkBj0!-i_TOHU#n>A#J>a zUL)QMJw)kG9 z9L$qi#_AW3e-0yyl*MCRNvkoT4RDpPJC}*_U~UrIf_Zmrb&Gd`c_`mHQG5~1@8BQc zC_^XSNo029*&U+3hB=tmm>d&T^xGYINc$VGeX5`;F z{PjE}>~-4hlp1`xs1=;lT^FM8+;u(g!sn?1&tA`O;l8RT0D3#jsMn;2bXEisI`PQR zO;9CI?2#(bNwX0>Jg6OJ{-OyzZ&ZX1bV4EMp_uwxCq9ior)rxMf!jDS}DtOfM`^@_mDogpwx|t?7DDdz9sA)VXHUrOxt2TTE z^&J*EsWa8M1cV{c?ooN-!+0F8>ZlmKE(`+ee$dpt5u~8s4&z+|hoe@bILi`pNaepo z!mCwJQdgcBIO)8LtD8!ArYle30~U(2U3pyCw+o;h7ucXP52BMowh7yDXxwosUySL- z2X}lPWb^>lrXa9kiy^x9u5Nq1EO6M5bF$l#Yi&?fN@ z;MrdOLFn);*kF-M1OdLtQvK(Aii*^FrVJ<+DAIC3VV)<>cITmelp)@)bIIJzG~U4- zJ~WENg?Kdk7=gu95!r*!=(lMCg@W@<)$2fZL((DKNQ%zd$ZBVwdmv$zkzYJ;8`c&?!he|&)nCQ$kx6Q@jk z4x1!Wd+|Zn|5k|Zs*y~@k{(k3w?a|bi;oPn&T^SJW0pA7i+5}H6ZWR?y0X9HlxXb5 z!>x{Es=pR9M%<_u-?Ah;)8=~-eMtIAmGlTmmX`hL@qaA-0r=N76>EUUrEw;+ZD}Yi zzQtzRa65@{v!-$;$;CHIg=hwiO<$=B$4P|;Kohp!e8{i_O=BAn^p|bVrl?c4DFtE< zBC4gzs6ug}H@61=HiI&#iE02=kA9yaD*Eu@Q`gLNMSTvQX#qDfS-*yqf}vowG4|i8 zN#FnF9&!@WP8V{rqE8mZ(3zrh1fS*8n8<3a5oj6NFd_Q#n4pKIW0@*t4Ytp=wivff z7hm?}BLWBB;j$#~4q@uYyYV*#jd!Ya~HM~oQQpZ8#EM0$UI3r@2< z+@Fv3*)*2bp6t)_7~3YI2Jo@$lqenmt})`i0elQj>Kz%t=kili#fX7?JvUDk9}eWb zcucyexPkZQep5xqLHr)RYKp)I&bIQ;ZWl3w`C$H5nwUNq6=&V;V&h=GpYOU&OuT^) z;18uLaz(e1T&ifif#1j1-YP0?giuk6A~X^w!*AsO=Hn-e-8b>R+?=ckb(%~jb_{PP#?IuuZYc@E*ALD`*I{8ea^Eg?`VXhtpf~&qKwSIL+Zgg1f|B>G^UH?c z(F0l>&vl@6qEg@Jy3-+!M-)%nn2#LycMz}4NL<2VvE!Bi!j)nW9?(pFv-FoNf$B1xDqg{vFc$JypN9jP$SCAtdG~<} zudd@3$DodW>!qh-zyomi``wQJ4DT4yrux1&kw~~Kk0OmIqHY!+$`|z&f6U^&)HgfjzJy~8xE(D>LG;MC({ly>;V`Ac z`dxF2WU2CTEWsoP@6|1FM8hH^>1;TT#Vsu@j<#*2qk^ksY|xrvNYjMqO9|_+xbFxAa{UQgz@zPvU?@3ju)T{? zSbFmTzk7j#2k|o2V{Qhc_gdjPddoPKwA<4HV#AjT$72e|9AfwrMjr4>hd5^DhSUBD zZ!j4ca_$i%nBydjrdLEOu~&uH8xA^tk(QNQk7XEhbq>dQRhtjKg+RJ$b-W>#`Cxue zVaCQyd&m0-$?*#QS!{8RC#8GBL1{$W=C=QUW9urU%?Srx-iOqjkbrdpKgVcr#2kNx zIig{XZQtoIaJ;N=#Fwv(Fd4uzU96tRZ|GzMYYhrdCvs(yw}|)V@os&`^`KC(GITQ@ zT8y#Md+@Ecz6Q!36P+@t>n@t-VS0VC9V*)Mlup>?io-kn?;~mGi-))ZqYl~RmrH(| z-wWha`Gtd>I1}(}0_uR@rMApKiVyfT;)(L)fZrAMNq*ADGQW=U=K^q+186_N;lfX` z1q{U29rj#(O&S(!Bmn^$5|RzQkdOuxT-Z*pq@h;fBleYN5>TP=eJD9o-`xbYF?i#x zj$X`}hM`=G(cE49H6JyIcNeimyboW{OH3}}J^HPJK00TNQLU`dXcMLf!$iLlNx(aY zc)hHb*jB{5@_=6AO_E8umdq%ZOfOaDMUx0yzz3QtOcV~9puPSL?U92@$yeT{t@fK? z5;GQ{H+B3W?pXlqPJSm|SipPqP5LjSE6A|5XG_B> zel`R(2352eI!-ezZVxVBaGh$;#D#_Y#?YPM3VQ3OkR=Nb_&tTg(v)Eb{2sk5;ui6! z8;gOdJm`PGuK*7|AXWKI!ISyYK2u4j%7dl@eyQ+Mdt2p~d|7N+#QOzA0TZ;jZeL~J zwg}{-MLaY7#xO+3w#@-POZDaxzT`WE+{1C2FcDqM$MSulVo5R2U`!~n z;&P)FV|=Lit(f=g`27j_iZYe_7<|a;^DbiGoji{}(?yiu$%l8NwYS6XtLbiLqdHP98qExibZu5^AYR!H(HM_)s&4(DNDPxf-|lNdG&XMd!tQgmw8PlAxV@NOhIM|JTX?>*fD< zd7t8Is$ShBEmAWd@4$PlIAB0-(*phEtEq00;+EB!*E9D;{&1Lj`Mn9Ec51l`2>$ z$R}oMT#A=rTIKYIHWc3}{S+xE+e%_L{7dS0ikvEwEI39eb%ZvIc9S_lGVMHM+Ia-i z&P^ng66L^q?!hcu>TcP8a5pe4RV~YBR>-`j$Y|D*n!P2IMAhppl!Q+$flT#!YAPDG zl6vC zOra|ZqR1cX@<)dkuN6t55Ph9I(j~#JSWie;YH0>B1k25 zL{JJvL#$%uc4CUuJr_-tiW(`#hP~3@GUC~Up`PC0&y^IT6m2L|X)0YB^wa{W2Xw2U zAs-AdLNaIQo)dJ>vAX9--E+9^Iav4Xt9x!*?9zV*&t!=9vxtf^9v;+Y89Mlf%KyS8 zsMUU37K)u^JS_hFQXm(r$b}1Gv218bl&+r-o`A+2TkOAJWvbXQZ&Pzi%yG;*UcZd9 z6r#S2ckcGZie{Y6Lt8l^66+h&9A7-E@>EN>l}8e(D5d$=&pda?A}f38e6IO`&bQEEvAe%R)%cXjtMJW@`LO z!9-(ng2X<7Z78eOX~b8tmzL8SMyjk7ysUc(#JNTbKrQ7ZF26J=&*fi7{5rpkfUk=# zOZr9Rp{n0vFhSr47eZzM2_SacNm-L9y^BW-wb11opW6H#x8kG`hRM`jlZhBLU{bmH z62Twz~*TarN2;xdM%K^U@3}@*_;w^uPo~w9h(1$d{RRur# zOBK9J3jY4HCU~z-aFHU|b*)aYik|U>4|zjUlVWvysbj5EN19H@O@E1&ReZ1YZb4%= zgW>%e#XX_A`S&aS1l4V~Rq;2f{9mc=e${+ze7WjAp}Jpd{Hh!LfT~Y*V^!Cpx{5yZJ2O?p3f0}9x?5ED z3Dtc|b;qbFaYprXyAn{WiXW)DCe=Nz@@Wwmsq!x+7vUQoQ4vmtO^U)))%{|#;ty2) zmsI~n54k!Oujy%4`8EI7s(+oz*9@1!Hyl?9D^x;}>Yh{aAE@pQ)y+`@en92hpyIDr z{S#Gpp2n}bSt>q3bz?XC$nZ4-4GvYYNfoG9{hzDu$EsVWy3eR?x#~_;75}8VJyg7} zs_y~SuLYQDw<<|%QS(Y8l&ghqvFhHTx(TXlQr&i{d+9z!Prd4Xpt`TB?h~rJQg!F4 z?yXk!AyswzsqUGLio(vSzghJ+ZBXzZsQz85`-tkUR^5En9i_SvsvE4jKU6DvtjE=d z*HrgW)m^8$OI3H8>Sn3#DAkQn-43d&rQp>aJON#sp?;_0YWQi}Re!L7=wi_tKA^+a z#}s4^h~m2=Jl1MT^^xL5DrNgl#h>t^;%-&xo1Ri6L!VaM2@B^hT(oSVsjzHDVR6aa zMGMt%gVPi_PyRcm&ooUhDq1vSy24ze@U=I%7{#q?`AfB?d%1~so|tJa$jQ#mC`igp zG3TWeq^D(N=cb4$8+e$tATd2phUUqXk)D-e&Mh!!q-E!(=UFl}Mo+%P3CYQmz1a*C z#}y<_NKH-2)j}PgW6m9)0ySC5=~-#2I@F|VG3KU>Pft$CN}8;)UW?D1k!~KJlC0V1 ztuH4xJ1-?EPh)ZEnK0gLO-m`r%Qa_>Cqu{QCONerP23pue97bU{*G@#R(fi7Ze~GV zN`6X)mIzuhd+4>Ap?G{=b}qU9ivaOy0jTNgRPAZ7ruT37tm8=`vLq${A1o!li7C0T z(VNjTV9MfLB)KQ2Ctp&w6`XnL*;(d{g4`5Xk*gMsf|PtJmJk?kF-v4-c5(`^Nf{Hy zr%z0gMb?|1l%16{As3pIGEMU7nPjVYdC1uDdFHGnrPd*Z(~#8TrIf1A z>P(vGYti79@W$< z36s;u-<+MEtEjFoRoqg$!eK$ z1uXH=*TBc<@oKhCKtxl~ZYm@MpyG35(_&F;sZq!=Cyk?_K7K^ED&>I~5@XGJrXj;5 zgIZod&;1*E?$EShi9>ZkKw1evl?OeyHM&)^aA?}l#G&RP?kZd{NCZ9iZ|LSBFaa8_ zC7&CoP-Mh)5wn6Yo)X5BKmy~F%o$2rx(p%yYv3oKa42eY0TBPS@VRM;+DJ|xK$^~0 z54_V<54_V<54_V<54_Va1=_l@Sl*_r6Arx57Qa^h~NOJ0kLEPpPGoD6PpX@xE`p%uK0N^5wEiVLqq z5QbW1E-3NvPSR_~#HqloIJFYMH6o(PTMIm5q7jr#X-%PfOw7m-S8D5pn4ux|R9^7~HDGh-qsIqH~8tr45Nn91^8# zZAl0HbOGu3+#;{%vtlq})-ie~!Zj2s1=wlcwE}ia1bx72xK=GDRB(0LJYs^lBF2dK zKg8I@cg+~ffSr6Oe}5YjC!^LZ>TJ}fjEnE@W9;HnB5YN&-y>;qhYV9<+6vy)wzv$C z{92o$D+7dg)j$t^Y7UkD&z+z)hi+LQ6{Timn`IB{#-HZ0)eEoM>VRwtMSSZ1=<`T3yDs8c-tXbd68klNJFI-5Hry43hHBjPW+uDitV^ zxwZlSM0aM4cL30D*2mE!1*{YUVsJ*%gF&M^BY6$FGsCVym(4uI(4CI-*9_1i>CWJy zYmwB^W1^Mr!!zZ8Z?zB;UQ?qOVwKae+?ePzmoB#e2=85}6n<5gCx4<#mA7HgM|90R z&;DO3mJCii-I7eBoAqI|VgS*#F!V{TmnZYl{=4VNFw8-;oX`sb@6_jscbcj*WLRvP zOO+SinW9$b=vNW&PEv_abm{U;QYYPIo?D8!=uR_rbZbGb#$eWm;b}1pAi6V*YtgkZ zTy)vu56K;Zsg)$^IuJxfH~vof6cLnYNK-BE6Ku?cG|1YQIPVF8BAj#YUr_!fa5c3c6G5;WM)iUf_2B6IS zt%2H%`&#lt|6bmjG|ZwEIF|~23_({)jVlM-QegN$q`>gMO#y34UQ%>{GVS?WMS8x! z%Mu-*b2X#2h^c&5IiVZkWuq$v^a|C9U#ltM&gW^OPMZ>7~x8Nkpo zkD}}q0E)7{E_%?1{ewKwz2%kuUm7s1OdWG;+3OVmMoE8TK-@nVKy)v8b^T8o5cdxT z5Zz0Gvc9Lvdz4R)2*j0PVZtK<+WMbVFeJ(&3LXk1U9Z8L?w*9L(cR^p{l8{_dlbAC zxJSX8?wlfO6-!^*wX%kYfR8M}{uixhYt_NY5KRn!TfG#6C7&)zo(6%AG3vFesZ}lg zLnLFXil~K>D;%-Wsw!Rmg~19HYlqjMgMw8(EOW)=WSFy3z^2BG)acj$BqR8uTj6sy z7zof?q0TN{N_y$jv#ETt{(C8mRofzuyuhG>~JP`flkIFYurT?)7<2&S}fcJ6;2rDlI%K#mzm2TITCbb!A2Fkm!7^r)n() zBt1>Xbj>mVJ!wO%_@7AJW}S#GKV1P!osv~PDR#K!W$Rh#A)=V(l(SyD@F<*894SPx z{$I-g@k%*G?C;8eTm6@Tv>IS5ndy--tp?bNm|Gc$&d2z-wE+LV3@Ga^_)n`+&ZLx$ z1^&4V!^(ofg#XvzQ~H0RgBi)}UD5OkRX$?#OjNgkTh*r~hPyti4A@PflTR0u))Z=K zq*s_TF*gsBrs%|+9DT2YP4F}~1697*MC<>;82vvgj4p`%9|M*K>iS>RKwXTT7Q#SR zw|EUc)xiIOPa3G>vnnY=0jvARN!b&y55!z;Ja#{R@{R<+csNvEWQ%fME$U#H$ZDP1YzEM17QT7FzA z-Q}%XwNVX#h2yCV(NVB*mRzqSr_)F{%c8Lni~P~4ICF+}_C!0A=VSDz*HHgk zwVVVp&ErxEvhyskNGH@f|Ag`2O2)0TICYaV8Og34{`dge_=Q!+6f$GxZ^;^?7v zw9}idr8MTb@<^z$reugFPaj({M>?f z59CidXRJJHN(e{1^mZuThc)0ebQw6_AE8?~4S2QOfH&U^qyukW7;v|YG^``s>j9kr zApj$wGXO6H8t~~2g8?r$8gO-#0k=&VaP^g;I{@!I(;a6rR=wc%2J`_$0Qv&@0r~^z zJ(K}}fq+4P!GIe8Hv(<~L;;2X=y*U3AQmtbKm<^Z%meD0(JnN13V9S0k9MBBH$&!%Ya>gR{*a9b^~4mybgGS;#Z4@J%DM0w#82e z+yR&Ymb>!2lB= z0T7D%+!~#jiEaXvQ~pXVBu$z~1Ac83N7ie4Y)z2~cnVniX*@na5Z(`P9e{L^dVl*P z@ofRWeDYBI*1H~|%4VzZ+ifc9G ztB5(OTduk_s=Hlvcd70{)vZ_ECe;l-pac-5x;d&lO?8V^cb)2PmoCzOkBT^^x{a#q ze^60qQr&hkzDlyv<3V5L<%q8bDXv3x8&$VSb^Qk`^ib8^qvEAH%70VeQd4B#8>46S z?K^-1G2xYF>6(;Z-z6hW#H@NvQDuGe3F9H=r1vcy>8X;$4HFkk&snsnC~0xw^pe8# zg|ikRRBP#gky^y0f(3;O7A;<$x4ak}GYiY8RhIZ!Mbl?5kv_$L2j2deKYs3tLf}mJ zEttD7kx)sCN*9*s_!dkrYaOrfTNMG*=+UNDI7#uW#nTtSmI+G=U8*O}Pb{4^t8lSf zRCF;Rxh0BiWWRy@rAwyICSx`Kk~z~C&s}Jpow;bHhDH+2PhSAGYw4AW<`M@bkQ(z@ zixw}Ku11*xS~hoP$sAoka|-9qo};7B#49#Vx)lNo<`!oZE-fsg%+5D`T1gj@TU>Z&>D3#EoHG)`X0V0*g5-IU~gtv0{{-#mUo4rW4b`(xRf`lEn%Y z&6+Sj&O5h5T~G^la+!-sZA;_O4D<6AU$i8m6I?G~kB#y2PvsB5ZIU0l*H{nIunZvAbFyIuNGG8$Dx!WM;4{E*_7 zZ&Tb|PbzNRi;BDbCB;psP~BZB{T0P+RNZn_Z{5v`VWy=DeV6K%FN2>-W}V7-Om!Q} z6h_l>#Z6G%M$NxM!AIStxOG;=EiPC2Rl{;Lw^G6HQr+M+ia#pdC&5ZdRig%6thyVX zSNh8T53W__jTVrWWaY4o4BLLYVyQ;|C)cDVwV_sxvFg_CQT)NGK9lO!y{RT$l!|D4 zQ(-KAOL6N}xAASo-}sK=I`dwYGrX(db5wt^>S_`4S0hy3sPNVNs<_AURK}d&6-2%2 zZ&clURj~1rLN`oN++fu${zLJbRDTq?;=$d#KQq+6yPMB%->tWwJ&eYCo3gc0o-&}^ zk?kYKy^jkrL6g76v5Gr6L6p4DBg7{Bvu>T^W1y^vnCl})i8*jh3ZKN2b{b`Be4A9h zx_{u?1HEKVJzP(HWNp(OGF+=6Z?=ydu9m}%0)c*H5Cu%`vGJ~Gk>6D4Qb=QFE&8fL zgZ~Ugp9QW7boy!f+T;22=`MX;XvP)=ODvMj#Yh&e)G<;D)WGk_cOCF2rnwZhg+Ca4 z#c=7T1x)f^7r6LH&H%Y6SwD4*B%qHmiEM}9G`RHB6t>2f4?a%=J^3hNq2P;?+AyQI zmO^55Om#6{i;oQ41-^V$;otEUd-GYn@|YAx{f_}RP4PC64Bra8_M9k^ zD25v*yZESZ?ScQ82Osh4%N7;N@d;owc`%Z~gZb_X8{wZu7T~7^?5VH~Xu%>-2S3^oN?3PBebh;H=meL- zYZ*uZHRgHqQ9w@tul?w0kB*N~_|!6y;wD`SMtNg$W4v}tcLH)I@YC;F22#MmmEL?j z0fe$;?iSYH?PAoDkYeJ=w}be^>-%w0UrmMh_yF%`jjC{oYPOK*!PTxHs63s3-?J4b zzB7y6`SQVM0-k<)Q#|=X6+ZXeB!$OTxfIebx!fGNgJ@SEV$PYambr*wQIvFjRqIXXVlpWhlE#XrP@(bO6v_}1LzGLW8X z;Mbaay@C1|%bE*5EoHPUCVt{m(jOjDcw&W%QBz2kZvbBVk$*ei$4XIX}B!bDv7=>xvH z)IG-j5cEuHQV_1=qcRf(zX>k=^!mOob=T*B-$YE5{u(1Gh|n=olVX9t94`GdK2o)^ z#HEn(cq{zV;L=ae7pmisq@e%Gj>6^O;)5eZW zaC%rX&d0E3sFH9Rtpo5OfO^pCJ!mw5s7G#H1nr`WCJr6p3w^fx$P0KvI^=x76DS(_ zaOp}Oqo_a1J4g%c!9E4j*WU6xY)nPQr25?7D~xcy!>7To{fc$Zgd=|KhrNdudQJd9 zhZuK^hqbQ*Lmh;0y^mAj37ry$?wN#%U;F75LWiPo4jAInd=3yVxsEWfj-p~n2?Pw7wJVrDb-*;jhZPMiB&RzKw!uEt znocKlEYqB^mjxmG^BfzWB;adXb*eQU#*!Q{>SxStrN!VOCk~3_OT!Ot2H|o-^BON>(6eeFS}5F z*;VQL`M?kyvXupcDvPa!iDbP#<%M+FA!1_MaDMok z9}KQJXD#E9un+tEqgPay)g1k8)R?N<%P7_;w!m=2Q&3fWWW1GPYaU@+CP`JxUN$i% z_Ocy~E$P^}U(mH#=49AAB4XIdFg4QV#OoVjt9Ih0vH8hZ=U45-%U}y#jcvWOijuie z$ybhbxNN<1zan#XH63b<8#ZjHT}^AL{_k>$wo^!Egl(l$$bj2ct>a~} zIhB#L)jFQyy;mQL6XrMQ%Jf~w{pwCBz%i6HpsGi&_o{7T)BU(WZ1E>BX{x%Lo`q)O;r4 zs7aC0cVhF`U$TBpS@2+q62aE!A&#~!_^HPmv{U`o3@^uMzus?6-lvd*Eg|aDw8Fc{ zP&kY(Hi=^jfJ9p+U(!Y1dK40hp{_uH#qv5vQ@d+Y~Zv8EhQO}sF1N~pcB zeu4@FSSf()HU+uD_Q$VS$Lt#p>l$RrSWq#Cv%v{-=sSGMWwTFz)jDEg(nL$S~Dl2I0xk4kKgkU*-^07+V7y{C&><4a5v3d%^QFUp9&wq(g#HE zqwL|Zy@o^++nq!_v(MwP`amASPV(A6-^$kmGG<43J}Wg4!uIQ1DL=46m!CdJsaFxg z_PA}<P33JPbyUV`MDY+BkC_T*VAZ-o^snL--U zj;|f9a;tqRc^EUu?>MQdB#|6(eZCX+@^}+A8Oi?PFfBj^No;%4aRBacF`rPGRhzGh z>CH*B76nHeyBU5*c%7kn9s1`BNyIijHZxDntSwMQG*|wLJSSqgiRk|Yg|Y1eJ@8N{ zmSs?`Agd(2SdaTtp=@K1(6^$ipu~L}O5BC4Tw;FL2gw11ma?`8i8)6uQC&uDQ+pyw zc)T1{#aQKEQJBro_@bjCd0{y!Whxt`GO}G`c>VFZVs}E>_B%mWY_HbgpT?Tz>0d1M zWbMv!2da3Ve6|5&C zbvNrik28iILV~BrI2p_Q=rY(9+xW3SCW|DsDe>nh|Lz9md+7%%a578MsyPox<{6i# z#x3w=^U@R(w8GBFp%VkE=_Dj6S&zw4V|Fk!Hy1j#BeAEPB55b=5^vbX2}#(bjdARG zs^H;KncQej#2dDPGRrk_%yPky*{#JivV0*;wkyul_VLs5)U=8*s&K_{2SrFHX_6*Q z3Q9N)sfv0FO3VM>_6v_o{8DySPrItaA^D8`MN@H zK^eM*7wF+Q56Un+1xlWm!cp*NQ1X5n=5tP}9VmtHe;Ngq9NG035{d2Tb-KXfNjW1< zIzMq_doF7BADE|Rc+SY7ZPSd9uyl2*l&)UV3%Rn+Fl>bNJ6 zx>NO(GBQkkNuWreh_CjFUtBgyO{hIcO{h3fP4Mim3a9`VqcgYjQ5_k=1B<-sgCFZk zE&Yj8N3{k@*HquA!xupbUv!fW{}@WxGcHGsW7-?Hz96dS>3o%9-y293FR&e3b%7Ip z2V1q+$U^oLp0vl{N@P!e-6}a#23m@mEufR6q`2wRO;GwV0i~B8h0?GoI1txyx7@1x zIQCOL<&3^l_w}2s;*+><-DThPx;1UC9DOro$o90X>|?9nr1NlA_z}MG9{bhTc^@nF zi4wQfulJ`ZPquAP>N5bvvKu-%KB)Ka%PK*s|Lw9`knJNV@qco!uG96cx=#DG>v+Wn z_|%Nr@v5lW&+r+id=KYDDSJBK>GC>@G$k`XnICkYlb^D>?7K3if&qy`1!a>D&`aEs zU+C~+DBPfhm}Sa~Vt zAtpMhdC*VInVFa%5 z@JBf|e~Jlaz{!Z_VTVNYBicq)U5SgG78qwaQ@xolbS0qYnQA4)@(Ci@u8v&2)aL`Tjn&|Lz>CbVsf=^`$&&Kh8>} zxsLydb&zN1n!3ZqM-`}pE9O`wo@o}B{u463^iY;OZ%RK&Wyn|x4Vv?BF?`}>ZR>hP zS8Oy7CLYuYuj%l%*X;ZqR_QF64{FFmw)0-sX=Y@cpgJsLAHTzzHBI6KiL?Dpy?_2& zdjF!g>>mx~;WK&s>1`e7+IMuEmUo8ZWDeKmeCs_O=Y1$!?)y$TM@c!o#JPE=-oG8n zHhxaNno~DQ1uCYhal5$kXLO@V{5bJ1{8-2T^~d(=x2?IQ(k^w_TTLgOvrF%P9Lkpa zce{HiUWj-@o%!~sI^M$1biDIEvv<91m7Xf)6cJC_IBeQ@6_oa@gBGke_9q8*JLi3& z=i`sDywh~$WGG;B6U z+76{3lO`;8)I-~U@%$obLZhT@Si=rQy9Y|Ftx(b)Rlsu&ytd$6ZBTqsOKwTZ_>!Mw zQ2ecYRaQS)1!^a$8PyZHL!Y1ucjacJYx}4q_XSS86qS%|i(x0rQ3_?d2}+Z+n(&xOx@k(GvmZ*!w3_gk$&44mrO??AfMNTnc5IKT|hZ1j<%S7v!u~N=AJ{rPn^}J+4_Z)_9#<iON$gq*)m7s5;6jA&o0s zq3`;ueQI3ADDM8vomXw1ijttP#G{%`B3IZhc%Snt%+6iN787AaSk5;xNiiB$?K7Oo zh_J+|pvB8PDvk`*&K-81a2Hs6jwDUIe;`%0-a6+{Tn2rTlVNah|_SJ*E2z$!jEzzUQnk)c|;!+Sma?RoYK(;u>J-OuSW zd&vjZF$ZN2VEUQoQDIg>NoR}$jmX3DU?t0%=Ylv#P!L;4!y zo;Dsr$Q&hGUsxBQPSORO`3(h-=Wqf2-%vne zuL7d30tQ#;0)i*&0)F-l1(2tG%g1~tD@>%TkO4B3CU=GHKYnZ#9~C{-qi#lqhO0a5 znUJd{tR5R36DbTA1V_%-@uT&reAMao{#l-NLuW7*AWi4#g52R^mR#oZx*~slp7T&d zR(k9(@A%L1sI!oiN#_plolctA8FQpdlICk zi8XCn%Nph{r8M~{PBplnGf z+BOsZ+JuXz=cz)Tc?;L;mqzmJwWmdS!5UoaQ7v`MOQvFkM;U{C*Dh;rzMKsqDs$Fl z_8)dxtEaJmTXi9IEDeycH5|5&{kt{WzVPqX>9NR-9(510+lj=9R6kC+ZRUa&cAnn2 zOv?q(e=|3!NaZ>*9-XH~^YEtFr}o}Qp(El-H`R|)fm`@~D$;kNymvNNUNO`l8zoZ` zjtoo1MkI9NxH3|Ezi}!uC&2Q!ZuaY;bc?5mx4b;27I6zvRAE26n@1lO@2fOYc9E1# zSxoW6HaqWA>&OMH<5zbe^-dgGb3~YnQ$2SE6}c|Eec7khy49@j=QRxzW0bt*Sa;_* z;lE)E4B2+%DhYp$gv7@2?;Vyr_M~C|aK84jpYdoGN_y1uNV7>l9Ja6e%$l+iKLWMp zVUE+;&oMIAN7$UlcRF!zyVRKpr*U$5{Wa)+3uuUhc4m449}Ul*lkV2%b-E{>xBoe4p3B3gtPEv!{z320f5Bex5A&w8-0aV~>LSPH zRDeG{TXdan`IAojAQWw@3FrJ-hxdb$ZoK>eR6R~j=huiaD@JFXSGiFuBFC*vS?XbQ z@%1j$ijv83@=Lma#Za_Up+sA2?DzcDw*G0Aeor#$|F-Uc0O_UBy(iZL{-!Il4$5{j z6s^nHw;KESer=!EZ*TagH7gc;h1(<~&C0V9;=9A+%kou`XQCRP9hVuCE=Y}z`XZyk zc|oa9($pttj%Ts!xQ6N{^~l?*>y?JGef^p)f8XuSv7W4sLXqt(C|By@IiuA?PmwYY zx#i-v6=!#ExPp8=kMSJi)`=4LPYcIktBVpDQI;UTCz7zh=`%K?MHl{cU zuR*R58|n8BOCEdDu-~0;@u3B(tRg^rOc~i8VSeeC=l)76-%28B@u)e-Yj)}iPyfga zPkB{T!E59d{_NOHVLCrG{9BCQ9DJimdn5ci=_H3UvO4`Y|Kc&va&FxRLV8#Zz3~u! zNJ7bA(kHqiOFyxX|B7=H=N$DLq~&tcV|&8(kG`_@pBCOlZz3<5{lj7V*{?Wv@#Rr> zAVm$P#dnW#!cSwHCpOBI{lj5SCMIlmy1g%sDe6D(b6wBkIk{?*=U9~&$qnX&^@z)` ztE6nKDr=Z+ul<)*JPpSnl}4IPfx}_@u77b50nXr)kez1#aM<4dFKg~Jzt>VTkl+t= zg@?oTtgqRhFwZYYphga1lUVA*$nZIw8Y zqC~Q&{h5V2l08O8>KbF&TZwd58pAtCaD%RiJ3M~>LbX5lcXO)8@``bcny_MQwlI^Q z_NDYM!d0f*Tg^t+8aCRul!?=6`vCs=^Evdx>UD{}i3U6BW;Saa?LEA__2Uad|HWR zukmZ=MU%noBXovGL#fa@6TaJo$2)gK`6GTK z?KOD^U3oVjsVnrGBdwTI#;X{dBXsT*#&D-lIJ9VC&z(Y?jKWKF!W*D$_d`kdoU!Xi znQu;CNBU4mr~d(zZBr@Ir8Q3z@v4+b;sz6+%x((bL7kHhjYB8^JPl#E#>{KpWncj=}2dvtP{nd zbj!OYJg(Zda=i%Xn-g&JOHS&W6+V{yR>0CIF|LUQ^EMRXWbYP`2N!vZv*la)RWsf^Iqc3?1j5 zvvi!^v+R|5-f7MXh!km@tN-rlDiX2Ob4W`g4eY$hnc;*z-?!9LVk55XcZWx$k$3In zqc+bNk(c{TZxNj#+wWI1Xb0X!k(^0=2-%?}=HxEfzA1yk{Vv$62W z6~|pjmgOpQYV|>vxblpiZK>N3ohL(1G978;(aRmll{mfDQo9g$g+|)A&v3_arENLa zQqLgnv@PEoVdR<19d;#7pGU!nJMl;x1rNuuV}sUYXN>gG46>bYf$sN9p`4`;USMzV zd1oCjGRH=AHLL5WtUm1{2D!0 zMuv&Id7K)y*@uqymL8kE+)|qxIH^sVkzvNuOZlXmq(<5$FSpJ9*=X;q6)jgxTP*eoVRV}g|UP;AcfcJeD)5LX33sJo{2s+alyFkm`p)>=()kE z5DlBXl~$ga?pzlV3ZM<5ECB(+1yzUxQg!m$#8XB;w+f`9XCj<$Q|0&--@21;cghV zeeX7}N@7d6Y=u8%|8KcNapjkPC-1hJp5f=>9}g$q|4w|hiP++bxE$Lc@#3y{>#z+F zuhM0UV~b;JblH;F`mse_wluarZ2ViVvMK-h7X5#|MgN~~(X0P&zeRui|GRI|KVCY~ zyV+ZQig%g*M^O9+t&>-rx}5*&r6yU&on3$QrTjCeb@kPC{0FhxWwmy4hId9WukqJ7 z|NhBDJ*HvJ1(#lO?qz4&Urh1Nw71Og7TN#e&*Mc?y&JucC#HEzJq2r3{n_jJmtp)P zGW+@I-uE7#GSmCMg^OI-odIuUOub-*B2 zjG=Ff&Zpe4Ml+@wjUj2+Z#ZaJ^rB7}G!+UPMh)YJNyG9FO@$33hH=9_!$F5U29|%M z6NC*L44V!64U0b3@hXSdr^8XhX2X^jwY}G{-*C{d=)X-qLU|0WF@^@iX2V{?wBdlE z`lHUE$S`PFWf(DRFpL`}3{!^vFeZJUH3nZ=XB;rBG^{afFpL`}3{!^vhFL@ZpLBjf z!z#mwVZ)y+{V#@^jiJ}D-*C{-|7V?1xnamKV%TWdVwf`QH_RIP{$lbmtoln#8=}V0 zY}jkqZ#ZZecuB{vGK?BF8>S2g4E=xA@hc4@hE0aam{Ix-vxdHxb;5vQrD52x&alz2 z*)VCCHq09O{$}zstTc?(7^T6m*|68J-*C{dsNWosVU1yfVY6YcVZY&^q5l<~zFae= zLdH;I7&UA%Oc?eV4jB4gH5nOJ8Ac2n4C96g!<1paL(acJV<_5cj?A#eu)(m|Fl9Jk zsJ7{J0mDkeuwm3NZkRMo3+4RJ8iW5eol(#*Y#23+8zv3YhFL@Zc9Y&PY#23+8zv3= zp>zJL*LA{j!?0n4VT)nPFk|S;m;w#MhEc<~VbU;dm^JjjVb1^H8#<$~Vbm~gm^4fq zW)1ysnhF?(4Wou}!=z!_Fl$)!R!nD5`IctHu+cDK*k?Fk=-XimHmoshG)x$#4YP(t zZ|ihbhK(_!^crRj%il2-GHfFlpFts6No~%QZRwjiJUcYS?7hV%Td~^SsWW(J*1yZ|M7j z-XAoqF>EwU7^WS@cqHx92?h-PCZkHj$j5qr+%RQ0U|5vZ`>PBa3|kD-hUya?PwmtU z2y2v5A!CRbHX0@j`wi8HI$_$h#K2ICm>Y|zVUy54B;Z{%f!v+#(q_$p&Gv?Xca{b3 z3V7!Vp9y&93wN3Dv=SXY-f%;S_fXjn=i8YQuipd?v}c!kM@wM;QYXdZD@(oidAP9* z-dX1L#l*AM?dU2FuilB=Xkr)okxF#ow~;FJFq}S#J0kP|TtAujSLluK4TL2wDhtO4 zcmpVL;C+bry9(bZ#lI@-71FW$P#6K>35X;P!v~F?g#SQ-5~ob5*O0~NEY*YtaTi0L-X-@!Y(`( zNu#@bY#m?Q%?ELT!Y=#+;>YXN-L-#586|Z3y34Mkw$cz2P?H`AvGl+ z>_OTkBUrtNW=D^}Q;y`VzUVM7WGVy?S*lwl1jVCOoK=PU97~Jhm@lkAYSD!Ykp}b- zyrGJ;F-ChF<)r0|Qg%dO&UffzbU*y=s5fy6|J< z3UuKGagG!{3P<0~*@5ndxj$p#It?A3*iOr!hvCdFUJ#=P;NeN)tfUff$ji>{P4`ZW z9WOk@03?#|FGxGO@I&NLbm6pz=>~M+Qe+!?7|wi*^B&#p&*pM=bDZD`pW+~JITH>b zjp)LhU(^3_0&a&kadg}MH2>2HyYL^#7Ie2?T0x3_N5p4M82BwSAiD4?WD+iB!qLw# z;h_8BHHi4935WdBU`*ZcJhfwo@a#WOJ9OdINE|&5-*}OJMbE;=(wvIuZZ|P;AB(-r zjCKYy8oV2cptry;U*oqkbhVx3(r+*((Ead(w>XG1ng6pW;ddEyXVRqbm+x~bqNm{9 zA2O=ZTi}nfoQmjinE5+Lb{2CuJoIx4Ko7wLzcTcqyFHQI-b>=er1+&G9HBxmH^!e@ z0=cjdsYe$chipU_Za_AnH$vZ7k4mEZVILwhV;WvQo|Q=7Ck~9yBo4Z8{A?X30GA*G ztEnj5gvgaP=JGOeyiZOKP>D5OCA=^$#saT^*a1H`XuxgIEY9Bx5J4zqCA9@5l8rYq!Qij zXCe+LGw0CM*o6lnwdig?lhfwJJnBkz)U!i)3(|xhhkbKd`Gf9qGI5+wX70<;V;3HT zJcS;Fze2X4r{IioU1_)fiTI^#MYa(~_%V`=5y+yn>_;WI;t4DFrzP+^3Blv=Ln%iO z!`H+o1>Nn7B0eh14&=zN3lm5Ky6^#{2|Wp0=j(iu@ZE#xf5`{NCItyFhyw6ZM2?^l zUblc1yhMt_4-mOK%EHXIbR41hvPgWFPmAOK62P5h3u%RiAQ^PwVkC<$yj0tHv4}F{ z{~~9+vP=EVR~_Msl_gQ8o2`9fJgcm^(6LjRLE1g9UxxIhoU8OLyRqQmbX5??$|h8$1aUMX&m6}LBw zc(pu%%)kppxE+ZF35fC*5<(Y#hJ?|DUm_87b(}}FE%T@-dJ-Oag6{he{Pl7@xKi-r z@6znV$->zuF=6535r8M+@6mwn^7RlOkeK?ansb~TLhmW8_&bk5_dS~HR5C?(`+kT| zNK*|hgkAUyvK&1Pvxp32VdgY5NMY_uZTG_^Mi0R?j?VlaMfoKnBRmCXoX*TdrUCd} zev?7~GE40pHN=A1(l8A%JO)>tw#EzR_ z2Qq-}_978)k^>_QE}T1r#~=apFx-j+(KGPE?`u5@2a%=Nhn-6p?W;ZNCu?wvz%INS z*@zw+_Av1}o+roFQfqbyzk~FmyFEzU-X-E?@)5F=IKn|h;iw_ZIh)fI-R(&t9ws}H zAayM1x&;-+3PXIR`BS{~6VTPF?I#r(A+B2lfa& zsh$I1l8L}6mua2P2B>zV5PuIo9iT3|oV4hTm&ZKny#}3_PXnmKH<-SJ`~6TiDW3>X zPZF;&B(_tJOYWM^Z5X^^cqfM(P09SX`asrDBni(7y-T| zpf0?Q3ki|ZP&`q#p}Rd QUMn>Yd-MTBE+=IV#;_B0Xil#7rMcHv}kEX%i&_)O?P?+}D9bl-?qe4|hqRyzr`*G-g)Xc| z#Lj0Y)PrqwB|4v;P<(bmWzdB`e}I9Cu3bs!Pw`Q4`tY)}_H z$*B^i*)Oc9TjMZclXi6guZ8I;0cQA+0%fGd8h zFJNx(6FgI5dt6n-W##A3a9*zWI(z~>gf7e=VRYe^-!a(G)9~EQdZvrQtnT^M45C#=p>i?1;lNUgikUBe3)p24g*w5*+fJ z5H5d}F2gSTKC&A<3a`dVA?H%^aXBoAsy3#n)5_yKt(q)Hd|tMfbef!uFNL5_9ar zQC>?0SX@86@SX<}Y?rfIZZY=Db6IT3zami)0>i8Cd5OrX{I=1oJHjq(M_SO6@JUwS zC(%=|k2UpKboY9CS!W*^XQ`zd7(=kh&tfX{IQ-=#{xt`B3jS>h`NRlhP=du|gf2`@ zC(P+5d}=1H&_5&rl%?vm= z?12huL?8)!54D{0J93z%=CY`~5ql7ZSt#CzE-YcKchQyDVfG02p$k7@iFPl#T4Jd; zkwLM;KZjV+brlB*hgL}ok2uOw8?R!ym?&2w%|vR1hb?2B8F~mFUQI@#!;PoV5}dAa zXrIcFqPM_JtVV7T9scTcju<@!-)7};ppiClB| zBRkNAS0lU8g*PHOS9=}))Yx0#R%8Zt_sU;cCHxYy6uWQ$S%ohA9EqZ~#+{6NwXZAj%-F3zQ8J9N$XziD=UMat)n8u5x$D-L>In? z#MF;yB9yz&)qU???JG-z?_~wApE$x_BIW49&B$VO*9u=}HSn|NGa#`GUqx!sh3_E^ z=vmQO@Y^K%1uV8cfmOb8>m7!Ttnf{-L--ovJQczCMT`UNLjQXDnX99W@o_QmqU*f>Qo9P7 zza-#Z2<%?)yMg_Ov5q%zEpJER50MIVp{(a!iaxxqm*0(8@7spdVHd9asim$!kHEIu zXq9K z1Q%}7XG9oYjATh$50i+Dt=>(X|DO<$JD=U~h`BF_DBxEqlpP>)*b97OaeeA(z3cd5$dSvh;-C-sI30aC)v)yPL=Q_{E4DOAW@zOCMpx5L{%c3s7XW;b%|)A zA<>v(uw{=CNYr6CI%BKz^T23!rhUQiQa#Y*yZc;cNKL7y2`tP zU6oy-uBxtZS4~%>tF9~B)zH=0)zlU5YVK<3N^~W=db?6xeO>9U{;o{dKv%YFuuFCO zy8Ycn-GT1%?qGLicc{CnJKSB<9qF$7es{FHp}VoWsXN}?+}+Zh=uUR`cBi`gy3^hL z-I?xz?ris9x9air_u)~k=DA_Xlp}jV{21uytTQtr8Uu-Z0&7Lwf429 zTl-rxtplyu*1=ZQ=4l{q05Vf%fuPd$7H-J=9*+E*%|lb#=VG+3D_N zdvANHy{|pp-oIDZ7j*Ij=b5bbE_XzXZm4z{Hu(UI)v?MQX>b)-A`J2D*u z9ode-4t1aJKL341_XX~=E06LnjumwVI?FqQot2%T&Z^FEXH93Mv#vAR+0fb8+0+^D zZ0>C7OmrqYdplE|eVysf{?1J2Kxg*CqPPr$!YCpL!l)=}C@vtPprChjP+U+_QQWaIQNbw1K?juSI!Yb& zT4_>QY1>b;lrjx>Tyobem$1!?SlMQ@Iq&y5cLva(_5R=g>*s^_Y~SZR=Q-y*=Q+fsOg^Yg>o`1A7N*ZA{` z!+Y_ZWi=fs!?WvmU+{7t?)c)!0K_j|{*=crUT$|}H;-RAOwUoXr)5zcx_kdU6V>=x z!8>ew-xd=+8Mk#|x~8I!QMXvs>5gfsWeZtnp`%!!XQPGquD=B9b+;)aQ?IKO@J!I_ z%x$!i{QqpdE*t6D*F;_RGws^6tVBRZ1wH>tM4i;&nRbCKy5-Es%Ry?xr%=KdfyFFP zY}&Dy5YO6JP@_HK^K;ncM$?5hEW!J6amfU>&D$bAn$Q09mIB)4 zqb~KU@eqZ)(p&(hr05gsg7IuZ<0)eHc(%83Td{aCyVkgu(3<%-af!P{W@`F^xHz4? z+w_L`fyiEHrig!xWjzgH6iYWO6Gx3@R}4<^K-!K+eYy(bPI1TcMt?zkUu4^SSBSrk zVzJG`D3;NDjQF0&b~Lw(cSf?97Bj^iA}ejNOdPm?nf;KL%AWF@D}FVCRr?JSjI57; zs2H5f9`+AzA?6ap$JI#?xp7>tR_}_e*uR_KYuiQLf`LC9AMRC-kmNMIpTmy&$NEjB z^mt0o*3#$YFq0|T&rInFl-@y0kIrEeOuc=-*oO3OGcCp?r$OY`1?(kLgnsunk)1H5 zi1r1nP0NvD!UFbq%M|h8B(|qz7x61At8SSnCQf4AT4jhYS(&p{u=wHQ?5$R3#0A5c zEg&%L_#B8@Dmt6vEy>4J%P>)AbtVgvLodmnN^*r-K9TJX$Pi~uWPz>ch_i<>N9!;# zc_@3kb&$Ai0z2Niw-`B;=>x+82RufdHnNEh^z64HFixCvMV-s^#3O7#;8?NaV>^^U zZ$TU>v6tF}iCrajsLjt}&Jgxt&_FS3HoF^iNbEY89SRN;8xP)56>Jp5p2_Tww((-u zSu8f>sQCRL)*v)YJT{0$gm!5inFdbfA>NX6xR4TgSaLQyZjmpl{%LGZXdkh75PL1O zpSWxW`#N+}<2eH%_M-BfC)tl-t;Cdp?A3N=aocEiyj@SfuE?tnaG5fulN>-{_Gs23 z?5x-?j;vqm ztC4Jd_|syO1ZFlzwefXN2R?-~@#M}R@eIghbIk$Xb9f{qldU!%7spRw6Wa&dBBy|F z`@yKmlO`M>oco{Tm}vHvm+6d{7V_s-`DonIQg5AZVM7Oh%F7xcCvNFxcN#tAW!^k( zi#v^;lDy29NW?AOMyZV?2MUq*?wN89v^btA zXMTe_3I|!-;Aue%N=7&vykOZ^*}&o)-Po!>U~&30{GCaZb*y~wIKwJQMg!a}Vr_>6 z=nwxT?uZ>SR4^=s6mpx&iQaKuOJ=YVDN(HM%g#xwe0QLGa$6)CcO`hwjPrVXI-6&C zCCnFz#-fSPUmVexM^g|r?n-QiVt-9zy@&QP9-RiPgfxO4oW`CSI;^#g($Y9=F$ISj zcP2LL>yL;+ zt0inaPfv#)l|WpI^BTz$h9E&s^o{cxI+eX;?G@C9NRF1|K~$tAMFu0{i%9dS>~E{3 z$wlgVs1lRJQd34WS?i8|G=yzU8QmfkQDe~x=tZ#Y-PcQ+!hDAJYSbFZYMUW!={el z!U#)~n#mgCiDdTuh+$2RyQ5*rY*6a3ChOeMPmmcTW-(rgi}c1a(j9b>oRJHj|Hi@$;O?7 z(dgt!Z0V>V@yIB)Y1Bh4?q(2y7QDTbdnbccjtUj0k7DBJ4DrbfHf40Eh$(3wJzO+q zuydo^iCH7rozdO9>;oIQ!$1>K`hda%EY6^QL!HeYqKOuh>>T1HImeh|TqeJfZ1k9} zjdF-@zNgCXAh+Klh!;CXbC3PYh?> z#dg_fvq>7Wb2S3BuH5^Bd*=RLx1<(S;fdH*m51d{blt9v;@_vp7+Qg(9ioW0I^c zw-vV;~)Nvhk z<(ZrJHLrA>ho_yCzx^hgGd@87MiqoSzHjpLLusnqmSQuaa{F514(Igpu;hpO{{mL+ zQ%5m{kAU7twek^=L=;=yWQ`_->b*g+^Mt<1HI{#)IG!l3t*Yfwqj+$vhvLhWpIgPY zP6*b|Rz-Gn!oyJ?B3J!Yt;Jwoeu9BG8p#;m2f*hgDsc`a?D3U1L^f(-sXiXhUndS| zn}sZ_ZJ~*i6Eh}H$f&w>1AXE&yhMl-H`u6*_WGi4MfOxi&)|kw>eboEx5}*& z-&7MF4c?t;oa?otm>tjP7&QJ2^v1rgyd?ePP|2BOj*;Rn<#Zwa8TA?Snbb-gSIjz2 z8f3aTgiMsT+LI)#=5#VD2efClNoLzo)PeFYQ_Nrlipq2I(US9?#Bg_F781YkB>K1$ zM5xlySkI@%M7+T-YKJH z$hoQeL=!(D6=GR@W5=oY+ zRnab~=xEMRNoFNYn0`ih*t=lW0)zNuh=~4h?ADsTTWk7mYC21s5i2eVVI?!nZIbU2 zDW@C&*{fxLX^~6R0gtj%Gg|gtgCv(}Y$9m6Oe5%_i6tRzY+_pCILY~zIpCVhRi=gk za^oTNS!AUodwp_EWMMPIgg;pF%;~;k{YlYL*w z$-3$Yl7vR2(zXSWREmUJwU%LlH-%yg{<-(iL?4@Drfa>ivHV_$Qu^>UoCPwTI5DB5`zKDZDXO0Se#b?MSNhS z5AlVi-2$I&Q)l5Bb| z0j)f81TzJ~J7RH_4=g~tbMK_Y9n5V{gib|rLaj-K##o(2G?K}>y&a{~Rc+IVOau|2 zOCA6(xgTzGptuZm3(*{IiV^ydO2rSpImRCaoB3xweyvCoH(o3}Lz4-|RnKj7Pq z1m3n(Bar(q1Tz1D!1mD~kf`C~{{!_G#nOz))xeS51u8Tq$(hhZa^(BE zOe?;4QACT)nI27K)v%{&@4NTi&Eqz>=N*lLcdHuQ@!7G)#xaFRJnC)s4@ z7LG02#<3@*$bA<5VJum|4&_(@*hVPbUC_WiHE?%gyPU8PR~{-a;{-g5Z=Q$kj$qr7 z97O!)8H;}7ew96^IWDb$>aVDW%P;W2e(X)KP;^d#v1q@Bs?<=$sO(aGP_YN}5eJz8 zDmE55HPCGh1S`j>)(4evkPS=A3XX~Zs>E27uAy`oUl>m7fvTyGTAx@3B?r(@fUI&2 zKe0u$Tr1 zM4E0X^O!2m5#H!R$vFxZ$R$`Hm)MKhZCQDCS6iMhc`;!$Smbg`T)DAuC81?0Kysu+ zS>?s%7(Pjsi)e#Yel>s?lsD4k!NO+IUrUII_dO3cLLm*wI~ZBmA1>o0_IPu-tstxM9?+4FbAu6-@+Bk{8wPT2A{4 ztJ7J+MLRqJ4(9;lxN^y{r<78hYb)?ba=c0c``KtKqH=2APXvgC3_GWjGO8w!>XS`x zK!>=EgRZsKfw4$-E6PRTnxb5!DN3kKx1x+;`xdmeK`H(MVq7VXffzI0N^zUZ)FGNH z#Y$vjSZ^~H{RvYTlRTTZC#?*_It#(tVO{2#cHi->QIV#b_W+4|YmZ zfl}+ER1c~LN2LO#8jJMIu&|X))8_fNJqqmCD38{=Z7e#8p@k;6wO$SA-lO$uJfL!b zNb9A8a*eS_0gRMhel|uzI@ft?XaG3VvhP)SAqfGx+CA9+LII}#&lKP`wBvuM0N1`n zExtzqZo}r-t@G|zfSLcL04=EM0Sa(eLOli8;?(^LkVFF;2nw(_ZHr?wbX);u-lqV+ z`{ciNVm1qO*8Y=R0cLs>V0TRcM%5|6g#T86rTMKB=^eWX}s{prAM?+&m0VZe)u$*`O+Dbg20Jq>l3XsaFpC8c_AUs7*%8(pZb3_t$ z!%oA0zzgl-Y)A^F{g2Tb3NnO`^g1nm^+Y{!iUkLdA z1A!?SAn+s9024ICmkNytr+bas9BhlNm&O*2QfH zCzB(U902O>W8^5nHlmU z^nH$sEGEB8>-F!`T!S@+Jpx=|s`LsOEM8B>vHbj2Vre8>mmeJ(jmaEVmp%7GkDxlBzoY-7TH z+mT7Og}yC2*4BZutI`haq^*ySt3zEKZ8|c)g0Mb~CcEpncHq7`oFH1Sj&C(AuVa4) z_EzAKCBHRs$#WA0A}qm%S;*m$Rkz5bvb1{sQl)c}W zHT&J*nuG<-J=9nB0`*x3pt~{7@*r@v$sZKS!S2?6Fk$imd;G+DxJ@2hA>kf6z$O6LYyow|-w;A{wO$86{ce$ePIC9M9=(8kIv&%FLGC-B^J31wd zAoVLJt1b)_L)x=OMFSc=j(Mg&--4wS1?xNS6WN@iV4u!N1Hl()g5251))#f^{3iNM zqnN3Fu^W6ly$WdWKznEM%_dN0imD#i&8mw+{ipD}m}F1dC@n1yX{$YH{li(!@(A(DXDn<* zf&5(#4@2)?)7jv5Bm%IBD0pamGTbO=B3fz z!?`w9q*{pZymlhibziURli7F0ia4+tTkcTA8zC&@X;YxIoRrm^f)x1^>YONa4hNNm zu;EX)H@Bny1v8HrxFT+pR6N}rTHPAb9e}AsIjIJwJpEMa4y<)2Eb=Gn{67U=|co{~1~eor}KF4P)T6y5j?O+1bC@n;^_P?vWaSwO^IgYyu_F5)_Dp z+*SR1HrEnPs;7SwpoVA?EHB{4ZQ68u(vCrb^DHNyvQXdBZvwlqDx*nz2CzCR(;qVqB(+2)RsKTDa*bEkyRlUu^@S zH$zb%LH%T#fc&}0x5~F&rb#VW`I_8ze6FXMNYU71xYr!7^U7W zb-7@Y|123hAI42QuK+E>SN+tTVKH_-LK&`69C2ISiwq3G0a!ED{o5&=7A4v9C11%n z2qC8-nR2#hIljn=;5oj?8BIB>kYfYrHnO~V)@@{sq^ukcGR&cH7KP=sOUTQoc)Av^ zM4XmuRYF|Gw$^EQk)p9YdJED2P_#3Z=6y(Vd6M_*USzoYRj)oB$O_l>@ofxYoluYe zAYjWm*DHJ!JHBp!?}yDPaj`q`{74q44CwqQPwa}sYGWcl@VX8?qy}wsxiHr)a+-ub zz#zTcg4vY;twk!1-J81adms$ps}f6`y5m3WveLKJf3SN#q0U1V<{S+}GMU8hS(o*L zY=6HEHfenFL%!eb@geU(p&AP(V-$f587J!R?~rhm3evQ=(nxCK8l_*?(#t7buas$z zZQ5fC9ywD`@(PvQg_8F8gQ!c)wSpAo5|bU^kXK*O$Z(+3@Hdq&){;0$r=bZoJ-1$E zaOY9d)OT3$bDeDsDcqgc(4W^Z9W@kaHE^g&gBEK^Ayiz{l1j9so|N>%+q{+QwWMc= zjVoGGDJ8iZ-ieAH)Y4D$bb}_5?v(M?+w9zPv4gf6Ak^52=Fr?l=xD=!YzML7gfXZ2 zamb)m<2TTlA=B25C6Y#_X69a{ilg3UBc6}#($$AL?uTmXI3)}EKl%=i*M)`ieT`H zo@gQ=RD;mMH2DGu?}WxvpX3#<{Gt-+Npj&ASMZeAZ~%(zpdw>=-7CMijVvpQsCY^! zL!?mVQ!3K~Wn72i{=?>PXl1*EaiDvmsbv4?iy8`|zKbZBLaw^C)6Pk=sb)^-&(xr80kzoke4FOP*yFpPn3x|b$yo9E8Mfvx&*e6i zVUZd3#&in11aSEnMakTXzU;_2!S-$px9OXZ$8d}Lfazx_3ybVyk(Xpk`rj@3;aPMw zA}`4~s0j`6JeTQQV=Dia`+(+vr~KXmh)aXy!BJPBA<_z5rsn~G=Xe|3c{rPSwibY; z5C9u0Sp}07Me>W?pkf%x8mJ5)oXyaEan9A=Qco?b)@zs&HwL#vdT5w(U~n?ixfa8{ zNYt2BTQtmiH)g%d)X>J8QvnPPW`4uQL#wB}wjRE@a^#l+gQX~~6nijlYETjm{rIuW zet@jbcWCs&bHz0Yu0>``@{$CyPqP8qLT2YLOKH8vjkQe@M!&J}4ys6!&HvFZBz#PX zE>m73NH+<)CTGfNmq|oSl2cBz?Ms4fi7_FPG5Isv3!3;|a5{25+sFc55@&qWUFmXi zg|-SLmlLCK79^KZAOV4G6o`?N-@+qGPTqn?M3Un%bEq-m8eN_LjCKd8-_#y{2EMiS z-%Q)y)OvgC1CT(yO=*zE2;Y>}G~}C7E{oj{%w(bg!B;%GGj#m@ug0;@_vFC`(xU-2Ay$@$wa6ya` zEOu4My#-He1@&5Zg;ie8`<&mO!R489tJA;n2s!AqB(Ez)krHd%=egkRST*Z(E~Fu` zV}BLw1mYUTK8>LT#L(c^+zeTrJNaUZHznJXKP=uM_vvN7#yJ8)u~^XMIP(T zLe8~n;A@SQ*oQB76}?_&RWFBlU%^h?WxCsd`MzRqm9-8W%gL(b$P1{>hHP@EMV%XnU#V$Tt(RO4lV!J9A0_m) z&Df^I&wUFzC`ZCzM17!>M;i(5DE@@gukLD(Kk`a_n(XE)zJbKUc%tG271$i4ib!3x z{fhZ-?&3#xvJ%FQla}6EPZ_dT$SZ8b=B{nRIVJbe-C&v-PQzmCg4Ew$Van!*#D)!6 z#pd1pk3KKx3gVxCgIEiZ$JYFjbo zI=lXAz>wEoBG$Fzyglee)E9uog6F9~;@@pR8ASJ&K_4Xku>o%Pirw z81a`<_S9=3Ek%^VG#yMj0m{~E0hH}erR+bi1vO8^j;g9TGBmB)BZysjt)qBUU|z2u z6;Ezrr(X{mOJ|(w?tIeR*dXDxh}AshA`Vt8@(Kzk7WZsuk#|wBdbHIs9k=X@icMaN8P-yc~4;)se;|{kvUjM<;O?nzBsqJfc(G1fI}Y%N(*`?PZ}q?n zaB2V+54qZqH18%Z3kno`0fJcb{bIt`8hRNerUS3QDd z)gntce!v*E=)LLosBMEe50zTv^h}FB!0Om+jzW~qP_2%)%n=A%aUC%f&m&k*nvwc~ zKE|3$>jwVAQkqsQU{NuG@LhRjcSL>3O} z;Bdzp8zSkAZp;Q?G{PEaE(dKYpmJ_po_4@@4{Q!q1x_?jUky}J2U<;He*n(bivtVY zKv4sh)&tU9egN?Azi5oo?Ufie_ApI1dDFIf*ewsn?gJLUegN7?Fis6wUJpr&`U7e$ z){qJAD#vKR)AfL~!aM*tLIdKkmUl%P4Oq$V3u$6qed)o-mKqW#wjB8-S&H(e+x4nc zInb9<>i6UYc&)HqcjHKf0;l7umiPba8X!ivHA;QpMh;Zn!1(}1U$(-j8bw1SHKpi| zv5;;ZOCNad!SaCt^T*rr0h{n!So~3Ky{%nk*O5@9@G}xT$+)lTY=%{dud^v~vx*d_ ze+6F_`%tk}-sMfLmN)t0Vb%A6lnUaH{D~AHj0~=h zp~E|T3VxOzB(N~-)xo-PAHuV!SijKYr5!AOl&P*l=)U!OYm3u4?n#AhjdFU8;{^|^hzt`3^ zxQ}Li7_rx@#&w^%LYL|l>eNDe59Mi;f^nd}v5r=+4d!y*AiU8_Pe`E01$OJ*NZSnB zIA&5AScDWFgRs0#N2TDnqLhI=gL;w;?#yvieF}~0VD)!LkuMx z*9uS@QYpS$i&Go2DE_7vr#2K&`~}9odcTK_ziF|BtfzELLsj|;6GXF~p7mHyd@ch^%U?x>j{8b>nU6Fz&PMuQyZY>s>zh#*kE2yD!`di9DSCorBoZDWGYGg`JOoe z&m+kNxC9I9L49Ev?x{(9nG~!0fWDR(_9`GORq!}Cs z=Ml$s)NqycaHL@C*JpFvtz9_~J|qtOL9-)o*8^%+61GzJcHpnrZaI?QbK`btxcjZ7 z`W3qC+rx2Xl1;17rUA*q@bn=W9S;B&ao|Vp3WsR8(0aIl2jfyW?z9_c)NoP!#*BN= zL_8R10`Oi-`J85bCe*7m=E2y{a4La`LY6Wa%Fk=S)OtXh^k8JEhV)p0tDRUjU_S>ONu~pS_}htp*wE`q9#A%Tcx>qTAoD;Q+J^jzc>fP<=s5TVxsV@dL*JI`TBBK$?G4M|aa0ejL|Z&Iv_1blA-6G% zUb1Sdd%@w!KKiJ;t$VE*OxtSM!B)5{^7vgsdi{-C%pj3RvViGZkOVhOCd_1%R*$YC zGx-<;LHSaxnOv!!83T5mX_X!`c^>frN|{MG-LA8fM?opu>Kn>|hwnF(tDnBVX@*bW zj#tZN%~0m69_-{)jA=3tX>lYwnN~sA$$a63mDv3l7+QLe7U()VnM~wFwsvor*z0Nb z!QP12{=BmGbq)>fpx{F^4{N^D3-x-V-m0yXz)uUFq_kh2W`Q4PiN|iT!jGfH+(P!j z#~+Gs6|>y(h(`Z`A6R|F!8VsixBKSfT1z<L%%e+CQ%T6GoC?`QC#ks(%!-oP9mozKF9C+*9D678#FQp5*7*M&={h ze)BLpxGzmSx16=vA1+>B&Jy+qHIEYEpKD#)j&`{Dv%jNQc7v_kKUOTh!M@)=y_NVa z=}nc~S2!YH`3;dpP`PV?7p$bTdGqg(xu^FPOUP8aVKXw-YAHF#YwFWp$6iNU_^rY=5T5?#k7rwI&?Y4lWRo< z@sDAZ%EuHoUh{7Wx*)#iN{&97f)@ti5yQE{EeM`_ZwuMu*V+VeH(3)Bv$F_vAgXe= zsDcl&XAbt%EIO4ex&RCqHR1xQ=N6q$p~s@TO)puwmimGNw?EA1K^C1a0JRp~XS8HF z4YQ$W)UOLMjw+CFzdvk)Ih`!Jdj2q9M9EGgBhMRQ@`vg0bR@4K6ONke@pqm28l2X7 zq1y2^@?L)z&Eop@+Xf9%>{j95W3{nC%N6k1uDQt`F z_pZhH8n(0!Yvbq zLwXkDvozx5O{eQ2|G|zMp#eQ>F*#cRtgHv5`Tqdtlh0SS`it=t4S2gAP&4FwDW!f- zUi^}9p0&6Wa1U6E_Y>;h*W#JO{y$dZ|IK22wV+PNyBFgTKXVU?`k$qkpjeE3YFFVJ zYJld(^Q^+p0sp{N*u4nb$dk2v65;Bv!Y`e{Wchzug{RV1icFmcuEMSAHN)-G*$PLZ zddpUTMtD}?g7}a^@p%XN&#RgZD=UV?WAKt&hPPNrZ-_748BS*Ry%7*Al+JyqU!!rfI+t&Ne;b> zQ-9ju1~ezaUh9{5bS*7pDRQNR^9IKlbE=h}@Z(T{A3FIg7-)~*gtB73XW_Ou_$ims z&iAiQgYEm@(Vp2hXxz25W8^FM=aiO4X-kntClSe5Mv~L%;s|_zD;>!jxRH~dti4Z| zO)nK@S!8e0%#!{yiyr3~jtm@n;}uwb=q-JLrtmk5GVm(m``EczQ0Y3IITT ziMQ|YH6TK4ahY0ah*Ww7&sbDgi`ZZ;pi!=2@_^aq#%yz$s;`ma#hX7F+l)mc>p@C9 z5cdg>BcsfXvX!|^?-PtpOJXzdI$0pcZ7`PtW6=kJ$(82yJlxET>dM`~a+fJz!z~65 zZcPtEU}YNYv>S%gd|wVL1nji2 zXkk6n={gkYTU2(*jk@GAeGYS)4uE1a@J3fcJyfNJvRU*i&7^Gss{{-lqqIsK9RkMn zWd`2qYQkY3nQx<*Mc>by0*R2GP0=?q<02BA|C!dfQwyH7$e+>E*BGKJhzrHLqOJo{ z+!ynI25G$Zo)mXF{i;gw!ZjKK=q9EB1dvluAFtAaA z4p?!%sNP&ia}MK-1{-W2s?|A6K#lWPP zYhbnFIO%uV0onL#u3DJHwZ01H{rJk%98eeYTjH(>9ZJ_u=?o|dI`3>q`i*$ycSQ{c zVfd*X7hyIhaxIeKe!&i%aT}LSaSQ%3IwLmYHh_-|_GlLQqU&j>LR%3}g0-tvcUaJpT*a)9h z)5xEpo7ty!ktm#=?N26;x|fxu2Wi#82csq>W+YB@ADFPaXX3<#Pq4VN!T#8hR$)PO zFA_T`n$70z_hYuREyW?1m~!^K_!2%4c`n>{%tda>(#*wC^R)}C^jvSh{z&v}iN+%= zIUZ9Ihl-|*lQ^EkX2x4>zX0=v-xQ)KzQckeoO+w0ZW;l;DQbdn32wX?3pYcKxU-_& z#5ngG#5EkMqRNS6T!zVIKGyj2`dPN|VjrIbBzhV@@f@4?Rba|(x^X6B7Ie%;vy-(84k>8%r|0i!c9aI? z&2EM;6wyVR4JD&dyVlIC`MjBWvH43_!*G)Z>fPy6WwHe2UGuYGxfi_w~7k43BjFIK{SJ z4IlU_u7dGF@e$sAt3N0ngKb)GP#n^rTskiyT;djq_gpTLICQN?Bo9#-BKiDrh$P(j zc{L=lajqtbEst|av_uj5(COoIS=8l;V))~1)#YG+co3?-sFlPtik_~O#4jh=&CC78 z_fE3TS9*(!PqL|3&WgpUtoPLr-_0ZVwDDtT zX()UD+8}Y)BkYfB!T#`8)Enc!!TV5ej29ngW52lJ`})!PW4!xOKE_Ls=xNwnNBI~p z7u;j~iJA9~@s3#K>h)rQ2IUQ#h_GjjZySi(wJ~0n#XI!RTXl{6fx>9y(^>8@zF!;T zPlIPQWwzGb^eo=97g3hRc-z^m;Q9pd{493$dayrsqE&c7&#lvVG?n*kyDwXcuU4?w zFTeIZa=3n5M;<0+=Cd7%p0<8+nA$q3F}4PL#IB!qZzoC>_q6pT4a(b^k1*cC3h@c9|M!3u5_ct)^HVKn%)8TU5$HSBNI*)z2U=gi;@ev02lxS;l($)0ZmfpC(s&;-Er0h%5bqhN|Y;+6#hi^N^@vOgJ zBF}4!N5QVKumH;d`Nv=k!CTg{3s7|@^r`Cm(;ze47t5SV0_BTAIkc}DPU*NL)@>?e z*@kVp;niw3_KWH-Q?bm;E6t^DuUO$rZ1;`!!Uo>G0pG5gumYA-Q(Kij9CYn56~!mK3yh`V*4f2;{JUFkYpjPi*^ zrbm!z<*ceIq<7LCDuETGO~$>m>-fHVglN{FT!^}I*8gU|s1Ih{Bc5z7o;?xwh$mZ} zn5l`U(PW4x#E(9>)(8zyk4@IZb72z16BtZ>Y8(v1f2F>F91@VnZ5a6?^Q)d>yAFq7 zHfp5XafTdhOFLr~#kSNc!xF~F?i{#=TZ0d^G=aeeQcr$HvYv!x{CBeN_^xyYrBzZI zea0yIES=!Oh}@<&d>fYx`PLNXU2^ny$@1T@{Y&vzK-<;gMEOuNZ5+ddwmRNscdOeZ zC1antmJCUH#|$yA4`^hwc?(0o)mY=XfLF4 zAxUiPt**9_Koj+-TKoXuQ)o8>%iMz947r-}szYd&M?vFK?`Erij3bjSzW|@6D79iU zlP6&_1LE@wdDZR<@l<;rY6sWE8=6$Y8Q>9^t1(7gSHhPOZ*`mS(w_VISdByRl(@f% zk7Nk`5Ez+LQr*tY2kt|r6syC)2JF)HTt0X&5`N|ycV%vdjLV*2j_fr-&yBX=|(PgUoh{Rjz#+@TC1IKNv#ciPwU5vU1w-q=T z-EOCyt?2n$WO*&J?19KUUgK?djZUxNqx^cB^a1u=^e?3D3OYah8vzT+ z8uz1d#j~i%Ju(v1V_k@3j#=VmT&T+``LM`mEY63$wSz-z z-0>9SkV8*|7%?*Nk4_Jx>E!T~!#{M?;{qQ0<%ce|ksZpE~`GaNO-w-J$=9%H? z(U)ngWbrM%Z4?(_m2rihyKOM2$ybmVNkYRlHeRSXZ{;bA{A->3Eqm{kB{qB(y7vx2 zK3OZsQ}+q7_N@UD>-=s9e~F4{uW-IX)MJ`?oVx9ao=@}wmK66EHW*>IUTLBW6W7Jt3?ZY@kc+~ zolzulwALzPPxyUpA3*LTMt5gGY_#*W#Ql*2eZ618CXve@OML$6>L#23FF`NiM_D2- zO8frMTV-gDxYN0CetHm=xI;N@?D09{NPmw9=fQY=p1cqZ%yc9;--`N*(Mo#HQkP?6 z)}O-g#v9E2MIX}omD}Hfo+fu!2+eeZwVenwi!#Ng>S&_WWnJN%% z9QtkkfJ_)3{2jrfJ9%^PxijK{j`3n7`*@yXk~2^epY?{g)$wCl*slYEM(*OCic_lZTG$s{KP}~L zK##Jet9m@Frvl^-*}n$0#NmTYG!*RdhsO|m|5e%ihSh$&GHyTYg&#_Fr15pJm@8Fs zcO;W$h+%zJ^=Os1lT$yX_5pQnBcg`1Qa1EX=cauqj|{^*jM?tAZQc-ysYKP`f6dW; zg1vqxs@;~+#E1KmXE5&)gK!E;-F`4)@}#PTV_40dj-vBD*6z3cEuX}JvirS}1Q?Cf zgQ{0o_WN%kwppWyj`qUHMeWMwc9kVJE$|A4v+y%rvos0JjVku!8D+}l#$qU@mCWn+pv8#?R#E(7zvnL z82w(KyTj=S@1$_7*Xwu~*JA1+>715y@L?ACdx%iT`u?62Nd}Eo#=J7UhYbR~UL20f z=>wVVDJSkvqumdIVa^8q*w){N#+Kk!1SztDzjSoOSokhX1sA z18uh;$SN1{(s-LSr%@u?V+pYxLF|y zx#H;y&c-4#T#}5tjwIS^yo@UjAr{+99)XXqwt_N2d-1zHII>Wur{Z2_{LOL9_s>YN zd3zTBXME3>L&#oDEQ7xXE^oXn2nDZVhA0Trp#BqDp+o|7m=XCH{k#gDCGQ z7Ica7;p#v%@+gb|dNa*Xih%lx1v;2|2HL2y4YFRY*j#z~Gv-uUySiH45=91NX&z-9Vzt8z#bdd!?L1Y?j? zIla>53|@yQUN^=6x+Nj>#vayjM5q@{Pf<~)VHg$@bvQn1njC$HcAlXqv(Q~8cq{_= z*2n<9;G834In-r3x>d>03#~#kw^Ac`(hMHF1Y?T6kHeD&ZdKOkg@-z}Miv(I!89sh zwS$OZFNx))AvoQUuVJHtdG*#{<)&U}-#q^ph)GYA-E{=X3kNHq4Fr?^byZMe8VI3L zRuxr32d1h`dSl(k4mp)RgOKHGH4fs-l_?Mvp^8YV;?7{DxPcJs{qyV8L+=+UM;i#y z$z>Fy&+atCJn{}+{+h9tu3EWw!eHD1@=>c_`yn|m*6iRDOARJ z38~`yNy=6)AxZzkw}NupONbGB#w(p03Oy6=_H(&t&Mo|w&pF)M!=y{aIU;r+VAJ$6 zP9;dHsp{q!3^(4O57AjXF813*49mI;r+zIHDVp!t>p}3z%4T@uO80YyE%J>bGFpY z*;n^*=5A(xIPcstt0M+uz)fs~o7hB7tgV~a!uyHg^ODreljl6dHutLKOsj~?%rd& z@8ePyH5Ed{<^z-sO@+3NzU+@{gma>meNBaydf(r4%I8gm-a+qe0s$CYp~j+l7|iw> zz0sb9fWvEMf2B(^p=aZXRKnrawZAgEnb1QNS}Gfx3H`jQVKh0s+7DK)Bi3eaJeJB7 zOfCAv_*6U(drd%+t}0ekZX1M7$`pg(ALvJ=a1h2z;cl$1N4arlxx?$ve#&ZtFfecj zzEV*2oI4-Jk0<|yeoD1L=x6#a3HgKE`RRBb_WCSI>E|Q#vJTr=+YVo3V2jz*xCWL4 zHY@n{c>>Ic?f^@%rXG0Yn2f&2Fg6z@yH@}be5>|Cz@#5y)Ol@`Jw8IugyS2i-Q1JN z#UoP}$fjF^IGOM8$T3I`qHqae@9L2Rk0Kq0s5+;&(%mR@w@n2Vhu1a^qA$+`YrS3w0A`@o z9HBM}r6Tb)1Hl+1!y#egLN6=Q*vV$Un$H!rrV{Mhl${ zLeRVOIoR|99$GaH3h`?6?rQpk@oMsT5&WJ>@XUcqv9GXDypy=Dxe#F61^Wcluz-4< zNrVJ8{|M9jlGfaG$PTak%PFC>Jb#{tot$%nISOsF$P;`m@&b5M7KK{muQCpll>DgE zeTBc!pLDv3_*;*^oA?|2vrhLi{(iw<&M!LMH~6D4S+CyXBt>a13=&_?Q?4`@#+a?B z0;_nobYxH-&IWV|#fviW_Id(Y8;{{lDoH)oM;X&Xz-8knls8)lNuu)!<;NC68*$5X zijSX=E#6(I6!;0P#Ng+Y=lp~z`oRkYRuItI_r>*?#B^c7n-;mCg-Qp1!7M&jq*(oh zw&K9v%47b*4E@|(t@IIvmv&LyqF zL+Xfo1eFptWYw))rmxqz;ds0DlsYv>sWb^)#5e4UPfHwK;9gk@BDxDc&uXeIO)U!Jdl_pMQ|ZOPPE&m(Y4AADqR z5cc(0%1^0>=P8Q=1anwRXlUNW+%|FyMx$`hpMxVQZYQa?x-0twgdyUBc}mmPLZ~=$ zp3=3o&`Kp+pi_^bHS(ouk$iEjFd^s{Hm#BS)#N1tdpxvzqwW@b zr0;{r6jO-MRouTyNeU6xh~_TJ=OIG5*yB-UL#U9b-|>i6ds8QBhYu#ln;b5VN0i3x zgh}F|rpoMgLNCKAC~(O!*}M8Bb#!Ot&2~bzK6kbT`~CbfMef)jUdh+boRlv6jqV za-HG#)Qm;5H4Q=^D}?%ci5_sN<@g(YlTT|Ju<#)zw!IMAdys~!#HVn8!yXpmF%8#n zBj|A?8NY7ka&c^>hVAx{vbwzx-tFcJUMF_^R3~XuV1Mn#>%4E0mGzKvGQpL z;RkWubfu!B@V)rjG^H#;*eAX|Raw*tlCw`$HgysL{jbiW?#4ONDJy=Nkox)S1WRHb2Op}Uw-sKj;_g4!lhNv_ETdspwt*^MyVkO*&AgTh?Og3iJ^vDFl%X(Xus zoT)q%DYWuW%_i!t(D$BEG;Y2!J`($jQ<=)@NTGY1^UJ887RW3&gQSFvuO*nyDIOYKUp$q%1Q0y}oM?i$~PtfmAMJQdmU^=8nC}X<_v&3nW zm5;j!VYUI2sb)Tm6OF!&B>T6wF_tC$&&K8rt+LnVjzqw;77t6Dn(KvzSHsV9hy=jE zG>R;xBF^Ey)<|4byP)RtrWmGCD%2&MdZt;N$Uf`{CHt4RrKNwkuQ%M0DC{ucN2AHD7fLs752)wbx|EN)3U~BxWC)6_jbKua zbrV{4c@Nf%+ds%3q}oIA8)0Md6T2jg_uA7?hcJ4*+g@q*5QI_EUg`ai@R+z{BECd} z?ifE&sdz{TZG7I0>SV(qv-0CZ!r;b5T6CdVNr)26KB*LSH*Jhr8PZkg;(aHdT2MAY zc`Zr^Gv5j)JbpS#a`aLu>@ZZ(gIX{NFR!bwv~el7qJ%&(aDw999eVWh@rp#x=f_jL ztCAcoj1j{dDKA6|yTz(Ul=PlZU22P6J9`QT1o51gvZ%MPUG&S^)vb^4v!G0g6*9zq zGnDdJAk9eP#4TleEJGccBq-oeV{m;qVH1PL`;hOd8pGr+@imT z-|&pYi|Fdf>B`%Eh4$jH#>)A=f?40#<=XXTyrAj@i?SwBcu%~&O<9yAn3cx|3Z2D+ z*LOXSryvHTD2E3LJH(IQq1@pj`(oGeB%zZaszbGm)o*DT)2=Fi4i>7Kd?cZEx_|IK z`#vmCSg$megyfEwAEwa=HyS?mK(5K-h|>pxQ^94NMSo$Kvsq7d=fg^_Bt-V^2NKmi zT&Bk!p{aq}FM0Q&vk)KV^nZx_zWR$CoA$7BQ4%8b-Mn491dDK9FDk0?sTB&K%*unL#H>WFjYp1u^ z^h@Qd;TS@5rz?#{2vM;aQ{ffFqRfx!V<>{&kHIhe(a{h-fK}w3DCiKajo(ht;!5@i zp;M~`u+Q<`V;rU5*^dv;ei#6isCG+Kwv7)SL=C1fqs-FpcKJ-Rbpb z>AfUEiA39kUFE45EB>4QMy}?Ih{D%x;7164M(I9U7$7WG=8YEo{9dJZkuZic$z>2l zhS#@>GFs^1@A|728KWVWmMCWkc|~bH21u{^$QVKnR{D$)Bw>zHFb39+cVi_cO^8vh zjS;#FR~6qhp@Xnc>60eR5{4_A(uAHuYvp{J@N~CTI=BvZdHJovVagxiKB3@lSZ;_# zhO^|jB*V$#94D%?wL(8Rm6csQ{epPBRgha!R znp({H<;vhm$SG2?CJ9}I8_K##!f1n@7r-w}J561dD|e{C1tnlIh(7llL3z%}A|-P& zavoKdO&0nYy4B?@C{m6iN7VhMd{3YQ_@JK~6i}q}&ID+!lAbBVKQyi`=c7Vu;z@j5 zChYLw2tl>}N?ypebz@slJ1GYM$`Ry<4SrT` zPJ{doV^`)TH+rcOJsqI+O4@W`Kwx@p&b@r-eplX^F0>KCm1EO|F2YXbx9P%g!|T}X zx#?I7lu?9Lri9PJJeTS+3`>;llmiVmOBf=YQG&9dHAs&hJ0mvmR2qcm0tUCQ=WZ9ND~ez>LbEqLh-J&M}^1rtvjh2Ulu3ti41(nFyQ~N_wI33 z72W^%?Ad_G^`M}js0T%*R76F?TOJJc;FSz7r72#+G9^XRyd1n0c+@DXF|*`(9xE*? zDxc6w!!*TPl37uiQd%9;ip)y$!uP#q=5RLX^L#$v*YD5YcfVe9-fOK{v*x;I&z`;Z zJ~Sde5hrF~vb`uS&p<~!R?=ps@l62Vaz$*Oi;Mm^(Qz(rHUbpO-#aIg<{IrvuFu5{ zu^~jG`No|_KaoA(_|;HF$#;#fAmO2A(m#q{l*Ijlr|Wi-U`dNBP))is6fm zBAz#1TwHAIzF*mDR^ARIvW6oF(9+-tBt2yqCB2;C7#wql&YMj#AA zcnDzu0?BCo+X^89p(#QSgg)a-#^o6e4Z|&FEx~oGuXt~X@q|$*hAcIfwGKIfHtBg~ zc5`RDQ?z5U`#XGzW@I3J)I~mE9%%FQY9anuYFr8J)0|FmxYG@q=cyFemKhy+@dF}p zIhLL$JBZQCjays1)j{@;|5Cc0tm&0J-7CFH{;}L>6A{yTx3?dL(3y#T7$N)Nh^Tzp zXlDE>u0L(GrG9wExQp{H`C`F3qk!k-l>|I%*bKfdPwXthnC!IPaPlsXiUsS9Dqfsd z^5AnAqr5Dyb$xRRU)M=Oa9Z zK(f9I4EFIO2HT5}2s#CUWStf&{ZL>uLR$oqHFFtk;v$1(A{d~<5lGfGPwI!WND#Dr z+8&{6UdgV17~gQ7S|ajZG@6NbHyMc-f@e2j>9I}R@*;-sxDwZkMhnBZU2J^GNHwmO zeEX77X#_WYo1WRyj!T3%yx9=Od@*?oQgs)bwiu>UGR7&TE1lrHIG-blG$$=|1eDZbym`N zo6&>w(^te@?_eWm*Od}DQ{?jIqvZv#0*lnu{*fto@+(1XOUeI z`frT(V>^v8_3bpd{rL%F;{9Dlv>3L_XdLrUy8Ev*yy8`kYzE`MZ^~qL_~e#Yy$hF@ zwPMRIBefp3rw(~We9UMPceW5xCEIoIig~JiE`>X}lyx?KfQY>eEiyy=CI$expNJkF&l^MxGTh zWkzyXibWQAR*We#n%??wYTn{rEO#145q!Z$M$a8T*nJ&NP%DmwFMke|8shGs(d7WW z0VOt;8D06HFU99&#v!WOQO`g3i7f|k(OoMJ9WZWh6pN`oeLp>2!D0-~1;e7NqelNI zS{yW5#h(6=`U8%U)K~MUo79^{xEVkE^1TNQBkafTeI+)35Dy$OV#3%DKG})y#gm7O zg0N{8y?)<|TMnZOfBjwzIBdLaaB=0ZF~>M89y)^kx<}54_m5!UZ9FYb9WiHjV|pC%f3H7%yq+|krLS27&e z>1zX?TzcQxecNF7Tw6L8v@{*PnMV!VKQNV6*82t|;BB+0IpK8CIO=&3H!*e8mx{5U zW3N00hUl%KaF1)D%*9jhM$>6GF&(WFxH|8_L#X8*3WIu(fj3(Zg zQ6I7K3pCm1vc|F&UoVh(*sEvDJl?;tEiNy?CCq>GPE7;gtg;U4;2fae#H_YHO@7aSK`zcJ!Vj(%hK z8GP^_@!3f{xtc64pEP}iGHWBntK13xc`*VqcNVQ;ep?> zy^b+wpM@8=q_7|n@14Rkw2yMc*;B^g26EO9Ks0vu(9GeFZWqoRo#9@yas<+pl>cqoLo-~mU3{d=Qhi_Z>Asef-eK?>@ns^#iv z$`9=`zGTMtMhDZlQ*7`Uy?Or~C0~2+%z_uzleZ&5zwA?~M+OMTIU^EF;t}U?k!?Mt zq~IJL_#3NBihhCENGbXEuZC$F$t6237{d%>a!LJ*MzCRcN}~QSzUJoEgzMt>zl_di z`!5ZVaLL%pvv!EjFJXb!eTTSq$%ru@z05?+W#jF}#cQ6_@s$msm*f*E||L9&Y$pDdZr(bx~&32 z`8WeVPB&a8oq?lLy&u+DcHZIK?u!%?OXE7j8?pg+~mth1*i?;SmGu&eiS;luA1ERY#5AD(63Ub|8%q|C|ys zokriE2+ag7|Dz7?C`Nm3e;kTQN1Dze9q8Oq) zaqGpm{`|#O1Mo+6__H`CUM4EI!->Kq;QJ;h(4dTT_b3P5lB^M119*oX1EP1JvNmt4 z6u4#KX$6)$te^X%wC=CcKLgFZ!|JJ>>Mnyt(YPK@avex@{|qH6Q23)=F^p+^KOm+FWFxk+n2WI1g0d?ao7W-=%M#V$bcMGkm}&sqSG(c;BWV8`aFQ4W_=| z@33Pipu;i}3Yy*x}O7|^F=gF*HJlqCH;89{{AYa6TwuoN!c@y4kvlv~UcN*62Wely=L(vSkfWotq zSI~V<_ozVHh^>w~fz0WRjaYO9BLDatk*lNbKSARW*P$2F$oAImecrm& zslTx3c9+l~P4|App&`tx7ry6JN4@Zpum|zv(F{WGMJ5(BJ@WMZB@jNv9kuIgDh@vK z@$((HqF6ri@%&~wk7}+cf4=0qq8xikYz^WgI$w4!+d-dl!P=iDLiZLbkkkF9`bQFf zq$t#%mB593&5K|~$aN#pMLsswQ9qLag_F;`0!bMz4>hHuPlw>j@f8_FLsayIKc)48 z_ZVs;i>HiLT8+I&48FKV<)9k$#oc}&J?F<~S6nZ{Wr4QQZTOs$)LT7FF2CBfrXslC zpc4a2hVbRM1>baNjZmsAM!aD{YvL=t&q%v(z879&B+O!x& z#yy#|9z9(~!|sZEXn?yPqX`6`!V#4ryjAN?%GBdDX$yqpqKTwX9_h+KndQUfCB@R5|3(rNEZMHO35qLK+*-wzDXy8~$`vw%Q(TaOk#n}}{4GYZ# z#eJr@ByiNQ>2#@_UrBF>yJIMd`gV0xAtaccQ=o;vqIt1oOc;O3kGI+=%!Yg+J+E%a zi}=OoMaxLumLGav42tA-?skg@Bl+#Ia`8V8_cVn=+}p0xxh~&OU5gPgRqT)CBMgUV z7KJ7Hr>n&6QM_$1KCuL!R)(HDEOMfFD(|;aydA}VHy#x)H{$z@55$bd{3Cw3qv#sV zr?-6rX9u1hcp5ffY&kq79N@#>85WDbqE)xe_&ZwsxtS=7##oSrNx{EZ6b^K6m4z9M zZwZj2P(OG`?jC|urhTEX#qiPDFXz()fLqWbp0j;ukYMb|Td$J2H5ue4!gy{Zy(ft) z9Q|1(cB@{E!uNEC4t5Vqun3PW(3^+BD(q{PqfL z#UALwV?|CAp2CN&5U(`h!}zu3;z|?VidQTb_NKfIFJ3O3g!#)w4q?`Ev5qihxhN&H zEf*d_&oY6(X>Y?zmx&a@b<0E+Va_sM+Tn}tZ(&;5N` z_t(|(pb3rB7R#A90prc($hrk8L){zXxJ-9HVC#oXQkpH_5OJ+|GZSwT3P&scjJj8z zG94?%>b+?_Uy}Bg{w98I#UBh9GXzd%Q%lC9}x>%^KKkY6maY6&#%rGS6kz6 zx*l_jjcxeDa5KlBiFTz&c5V4<#&_aMTRzj6B_2rN@y2TxG%kw+?f4{qX@y8^ z&nMRJf(66^2fo22Hn!)tv`(U>P|Yfb-bALqk-!X{T0|@JD!Tq&7C%Cj_U;dP8cq_? z9e79HVXAO;;3;s1S<->GY(71gPU6+75%JD$f#2nthMPZ8e265*R&k*NZyR=X8Ct%e zS3JF`RCHLhN#rxbHex@;d$oISsrXkScZL;MWK))k%ZdDC$9tFfF4^-zJB=CmE}HkR z;$13MUNgslE?oO-y~d(h68|l%O`b1;bO6&GfxiNWRc}97AHIN^TbNrXuQ!!?+wnGwokpaFZYv& zVPRNj4}Db&>X6!7?!_e}JL<`6euMhG;-TAkBiBo@6m!(`ON$#c+TFz9ecE6P7)-_A z8e*$HzUd7qe`{X5D6$NzZp`68a}kij>A89m-d<|G2NxKp+mr4-MQ`AuP=Pll=_m42 zcuW3Wyy%p|8;TE7cyza}Exnt7x@uBWr&mAai=oB8MSjQJMY9&-N;Tr z%e4HN3ApfIk-LmXJ+I1MsimE7(Wb3bYT@`BjZW~XEugPhX5i(q(ab4U_uwJ?dQ0(K z58i6X0Xb}HnGf)#^FHypLl@>s6B;dYZUlXxF@E*C8BLl-n{*V8!fw<)>N}r zYqb>*_2h9bIZJKIl(YPcG|LZJaVC8EWP=hN^?Y&T@Qjr9YM&|DSl`MahoaUwvA%CY zQ9gC4msevpsOF*9P7&J+7om}HHDfcZ(NRxajb@D^{DFJ%mXTu`-H=y;c(WI8F}PwK z_P49|59xVz)?;cEjg(gUSa+RI$56S{uCryBuCiN~KArCp&3bdYaP-Dq*u=?VY;PXT zQyPl7z4=7GEk?vSa2EaCn-2?doIq*tnw{O#UyH}!%Xz`yU}H{+XA8T7M~XGK^X9y& zg?RmTSlSn@Ea1~}kug(hRDVa!D$)8VmFPYPZ)}7qyK5ahi$ANa z1*ResC+$|tO!h@8{hp~jhI_Qmv{c@cd$gq;l;Gvpya}Rog7c}oiSdl6*Ow>p5n6R{ zU)26Ec3PC}r6{Hx?o{R#HkHw;zNr0KZDf02EDcv^CiV_w)V?P5TMbl#VRs-w8?Ce8 z4t|eOB0j%^Cj?J@)tk{7@kttbq*)q|g?n6&G(g10Y##Ql5Iv>=q%YB%o z>EmUqg0sYqZ=^?+kiv)!FFV`fs2$BW}2_ndnmM2jI45>$9wp zynFXospUfC+o@vpKpw69EY|cBuMOml{d>@=OOy@dlUiTI6+?cDY;`^+D^F*FKK9%z?i|G9dGS>7@F0$NUIy{@{98Y9WDp-?>2J*4_^%2SfttHYEE>YIjr+t8LwLIe<|JIts;G$TWD(sHMXRB_N7#=OeMK}U zi3f)Ara{je;bF+7n0?zz$QQd;s5H{OhoO81(~q;x&6OM{s7qBeUC_LK(qBTlp% z#yfSZqCH9~)L+-HS6jb+`Z8)}*$-l>Lb6p(Ob|~F<1U*yfja0C4S4K9tWu6UI&o;$?ByLfZsDRK5LTmd?Zo|(M2nS7O%EXd@UhVhyxAHn+=KMUJP zXblw|NAlZ^YB6IZpT>ub5nqnvEBPn)h$*8u{_$j>h`R^-7jy0rZ;s-R@}KV(-AD6v zeA3OTQ;C+1phQDTs>b48h=*hfd?WA{zV8kdyZJ7P9VU*9 zqB z4<5aSn>P#)e7G!id&Jr$O|V}xyyUhC9M65v4x<$De9y48MSWw1Ya-vnzZ)vjCh^<& zmZ2q!Ch<_tpYBz%Y6^eO;Fo%o+;Sg}@#EigD@l8RqCQS8x$8mxHRr!|DoM)W^*O)N zr=;gBZZvl}t6TqJ$Hp4)dyP}9=zW55_;AewBy3}cb}wb@!kh-q-Mej5|Jq$$ePIs2 zfGLBC%5VJI~?vW=~HTlJku~oV+3NG|Bz=A&DUUl!~<#ZTpm$K4tKA>IxCrE@6~BnEG7~c#edMPEYX0L-sO*@v$(h>qB2Beu?CBXT$V7_%{E&i;1V7;K}Bo-Ao*Q zg2!a`f!2j^8{Rt!kMQpynTXK7zl?Fz2zTZl!2Xyq7g*k7|>=-t8b^ zE5FS3TYhX#*vdnmkPxtkJpKTgFI2rR*|?)zJ89zk(tXg`CF;rtXozq7I6pn;T=sYM zoFMA1(?~^~NMF|Y`UB?2JcmPsV=eE>PsNFc*J4?Yr(1&yoc6&54m`xlu-%2X_vnSS zlp*fl((wYc`@%j)8M62rAN{-JG%w@d;jyE#$61a4mm-HeR0Z#3Uw7gTBn$HfzI%rj zt+eApckJ93xFxYuQ>CPi8$S-krX+Y=mK%?(Fi|0=RQw0fvhx`-vnu(=K9})6E7TI4 zJx}Jwq_@L)CBM*5maC)kVHltMIrq&uY&hcsYIU5olhF#riaGzjYOHx=t@`q=!$e?6(E?4REOu4f? zi9u`}7Nen)FoHatuUA)l>f;1N(vRgRo-^FDgHDC>_(414L3QYXQ`f6KKahIIwot#7 ze*O&^(lns#o`ot*`+J@wo##%8E>H26uGx@c1Bp&Rv_gVs5H{>R6J;_Ip4|;)YB{t! zo@(XB+hk*@8nw7Wg$-Zc4fS(${?n0D-O&VxZnS; z_vsT?sPyWn=oSQ_mtELr@X)?EDiO538j)weR2pB<4g@}hLkb2I_WSQZa*oJzfsS_! znHk3ZSTvaD3lthH>0E|qhImV*%=3yYS#AUDjis*6;9U+l-DY*vV6yeLMK{v<0F)Oi zWsKcm4?Rop&{XAgr+BtVtne%XlJ|o93uxpoMUz%|J_(SmIl_T`9e+=6XmmODrs25& zTkbvQVxaMc(&(ADFy4-TM)uq%c0J8IG;9jhLY#gvgtE!6z(t-nZS9sfgLsa#5=3=W zu4K#Jt?$REd+cK{laG!Ow?Bg|k3nb&&nwhU^vycFAl;eHb^6EyzKex!^8XMDcwW(z z#0CDV-cy`k1GQ8hE3l#Oqe*gP5wH*z_oDY;z>rbSV`*Ed$$S1$o9eO_= zo8v_FvwNl3(+VBp6(VI!GZc}YC!p-~ZXxC+{24Mm9(R9P9rdqhBzM!hPpk`GT`Z;d ztFo|au8vv~EqV*gK?fU)T!CHp=VQgQf;VsTS}bK~()=Q)M9iuaY$&4tCK-{6M<)IS zvEmzno8;cH;xeU~^Pkd8_oca0r}?I-xO*MH)&6Ev$_F#k?!dFb=s`8|spNL#u(qlA z*E+0QHvcL1twZU?{7al$$D6m_^Dm^5w?(Tl@1tz<1F3BBWGuX2Y9JcmY?MfRmdAz# zy`x5P7c~Cg{}!X4<@fVv|1I`D%X=g*Mt`L)D>Kpx{AYtaS1--&e?N#7KhKF-@*@kc z3SUyqBC^=5U}*V4YsgL}IJqDv}bNdLV`@C#+x4#4K zNzV5=|MOIbXWN15xo1wUbJ>(=m>z=X-F@*8%1QruQP2<1Ko^3;x!Q^7Bt@^r*EuAA zDMH-yJn!z>DczIkLx_Kspr7XlIm83;GzT;2;Vz}!{pZ12_*12Opg-bW(qHZ0Pa;jBav;Ux3tNY# z!TjppdjaPfV0NObWk7Y*WQ>v*c&nI=DtcX==mZ(v$b|+xT;2LZl&!k;Hu<|t{+7yL zCEqLQee$0QH1uB*WIw&O@Bz?P0WNhRMc$VUD{lP|HFtZx0a z{QXG&mdM{}5#mS@Z|8dIAGCT}nSsH0cKh0EK9`);D^p08i+E}#r+ejS8nARiS1rU% zV+|jDwrZ~A$%b>u?sb&x!9y@xqY`-b-o>?(?9@qIDoq8Y8CWOH$~xJstd&hPM)=u! zb&~wGpmsK+kc3K)1C?~*bUvFlA1bR?(%E-5Wv)vyS;Vv_^Q%|V2O7`X740N>q_%;$ zJmURS+;*bpQMHhU$}+e}wxo?{hoY-UCmFOZKoarsK0X2e8N|+Ro1+Q|L)64)H-QID z{$^*_={Rj5hvJv3_6W|92(B5ALP=rRi(GWmM73mH|(8XuOW zpGQi|H`Xq3u4*#s-?P~|&RU?f*)jte??gOoO1hVYPO{MPKDy7@XdiPn#6mL*eengK zQICZ_@q(DRiMI>ie><*{E4N-FzY70uxnjd69@F#<5>C{@uv`>T&P`NsQMHLTYT6&2 zKNx;ht4AbYXCuqg?d9v$U5?=f=+3`I+>5+*(||e(b604E2&K?uAG+Uaq0OUO;S*8x zB9C_M-CWbNDb#q|e7x#uA1{a6aUY*Ry}#VYXL-}3akFJd=s4XGl_blot?xQ}91bJY z!xWiPJAv#WOM(Q_@q1G_L~a?LNjJ^SjrHRv?m*9d7E`kQWSe2FfV;2I z&D42++h*N{wWjA~F!X)&-6C!?w>SSBEq~av?nm|A#Hw^g9~AW84y}Fu|9DxPf0;K8 z-}wg8z-tO$<&4_>2E)T}qh0e!GGS{Z#FyWl`~7D^i4ICW^9?b6Gmj4MYDw7LoACEQ z%JJV=0H}n2z3xkR>UEj0$sdq;bG%G6zSk~4HFHUodYQD*uf|4+#m2_hMV~EvcS+V# z(_y&MI?%d}v8!zqMz>Y?R(pkqH6|u1K2qa#o$e=%pKE+q<4YQ!)R?Prw8kG)elB*a z7Btd$U1xYkqpCb+E?uyR8XX!pXk4!GEsfw{!+8HGiuwL)j<+T0T9|rJB^+ zRNM7g@QTLu8s}*ot+B7hM2*oJueMh9e$p7P@hgqnH2zcLlP*mx);Lb%P>tO*Cbd$A zuf{9%Xgsd*9gT$=b2X0D=+qdiF-W89yf*x~#!`*1Yh0)CQH{A8$7)Q|=xsr^!EoS8 z&5D+AiC(#Vj^=Gl%JG-8gu?YYj|&-{t0HYs5^){P_O8lPN^*Xd!t&0TtY9mri^9>B zN}f4iVTHyKGai^R^WhoxDf1>x$;q8IbB4~p{6FYVoG{rwVfyr$lO`zTjBlmAVCFL{ z?tg>7UUQy~E>an4yUZ>WpQO>DF-v2P#(a(IG;Y?oU1PaMk4Cmm*^O4{VkufMT4T1x zc^V5fZq`_;u|ne|jkaf%;RKE6_bC37Mz&Y+ff}PF#xRzk1!Z1E)dVGti?MMbw*ScU z4OY_K!yC8iX2;Pl@xDZRC;QZyv+Vs-@5)3g-#2sCwA}0m+uJ)!>9CB<^dYH(eGw%C zgUuf}*4`zFVdhFRV*0e%xxF%Px985BX`eoG#(hkzh%(!j6i1oa!LF?DR57zAsqto; ztkABp_&&uabyQfTd57i;rzv^qg9^)Y6tagE+LkD+(pZ?O_`+opUCg#Z2^<>B6BVC# zMine4UGZ$7!g7r^T>!gA$6ytoqp?t}irz-FH=S&%Z$I{4eyVfK?Yc8?Eih6NmcvW;H3cDE_AclEED4{Z9oXy_6gETymUN z{O)T)`9KrAopQwS-!jgCUfn(>{XmO7vTOtIt#NI8E?>q}!lK{)cYRlmJ)uhI(^1PH z?G@>Rs6y}wn)m8a1w*`g)c-WNQXnX=qeuy?gNp`b>904QC1lVEx%VJ@IpDLj$eVFp zdw9^t%)(!|Fx!}|T@^q3ER#hV6mA{DOw$}Zz0|a*brD-FB*DwY0cODH`A+VDD~7yZY^^mBD{yJn^Ke_uO7*hEqYY&4DfXe zNcwdf^-=?C0g=D=im(|G-h*nqy_TZVBZHGIdXysPKj_&kdej2ZH`Al~ACK^5Op2A@ z?La!bH6~26=uwIc@c+}?LIb(n=4%;m7n40|8P)z^$lyAQqBmob_k!Ge5H**g8KlYT68JnI9bP@s zXm8P@K6Zl70@7jCE35CbM=ena-cCw5NY6#1pH#+L6e;6*q(~Y#yaoyF7CqVY;2ord z!>U*JwDVw|uuCoVm{Xs1u38fa6{Ogr=ru?!&f(fR1`MW=wWf0mp;F zL5iFG{+psylQk^kH|kM=Vj=e))YK{9ljLOUO28<$>p1Ut^p!zHP~t#L#{W~DNY5Gd zKkLcf#$;UT-8%9%=FKv8B4dv#$A9g`d}N&eztAg#p0h?ztWPq7L~)|&H|otE4aSn3 z8k~rS5Rj5x-;aqx;_c+Sm=5LZ+u4z6ab66)I!x6KzPf;*Zs%p_CpiNHnuDej2u3OmGzx@{kBSE;R<%S&VG9(3F_;y@%BxY$${4pl}I^uq8tV9i2?SKDKXtB}@Qa z*C-jJbX0vhtcGEW3^xfv{5+(^77p3XdC2r{WHDO|TAO|%E~;rMw7q#+t-%7KWvLpL zo%U*=+rH!M(wUkP_sX`!6?eLM$?_~ZRBAiAck~&~L_|k3HaMX(dJ0ytli_WLwpEq_ z&-)Kn^S@IK8A91o7P#9~CPPNUWF<1dt-h9!5hP%^vs98+Rge^e!(BI(sn%gfS`^S* zoQCTxI(Asm4M2k`(g|Jfv00_l6^lj=fso+#+-qsP6REuiRs3ch8EDZVUF7CXB9Ut6 zJ*-`Z8jC?)%qIPjZv`3a}p53+)452sWOwITC#g&CY!CYOr=*0nQVGWo2m3lA#)}ppLwb< zfz9wF<~%Y|0{jgkj*AeUAerjN&Lk^uZn9{ftP9CJkXia0W*MW$1^sa|r_**L8Ke0T zkf`#MGk+jPujJ!r^<#NBzt+*T}4vvm}r*=aXSrf%m43X%)bN->O&XpW~NhX7D8HQ`;Au(b++LS&lxX z!x$)mtpY;qs*Y$9z?vk7vXJ3{IrXvvG91{a@qDfZK&4Jpr3VCCsx}ZC0%=xmrTH#E zS|9cYq8Hdm%})i=WH%p(USS2mPY0<2{SK77>9K>=@~sz;7UolcWNZbn9`JP_8TlB9 z?qxBNek>9l959^cP+?0^SUUb1rVPh)2w<_sy?*to{L9TOo7oB`5dPE-6vARXcND?a=I#or2~elo)SSoq{n7CtLDC(a+unjFNk5Vr)O z3^&+PmKiMhM#$dpqNI)jd_Km)iPNRquk>fCZ* zKCXwPJAJ0=qZSXTJNqq|rl_x$KCJj<^F)(7%qDH96`Z<1#?6NUj>t!Jen2|fFA(?K zft?~Mj~%-I#8gCiR3bk*&I74Dmw+VkTc*lm1Jbd7fznT2C|R}!Nc!z?9iuX}%~NGM zute#_vmx00Y7amg z9C%de?kZ6Hmq1Dz6YIx<@W(i24$q>&mKUz4$mB~#zowaSvROBy)GJjXSz)0nW!RHSa0^hDx_tm^YSiNa zRem|%(*%!(Ssa?H-5GqB=D0s^yf=UcCWrGs?i?6iq|vt-gB5DNk0fpn;XddGwVrNDdbLR z2epTs^n5bh?61f9=}jCXYZzRshDlVzHi4|onLytBczxbzZxCwOke5EsSf>}T-tZRE z$BT{q&1TJMwC15%-$Lh7^Ce-xeEyPZQn6Wl)n5;o(a`x2I&@s$qI9pnqWG9stxa#% zDp(Z_8rk#=O>dy-ISLlcE0Ko27nhBFb!vJmAJaFObxLl9@7_f7kgA|Tm4W2}rMe#& z6*!3sj2csc9s2Yc47~eIC7ZBK6|4vz>;}jUC5nGwyEr(&Y}uSn=Rnw?XgqeARUKxioQ;5V>{yMm{6*8qIOG`CGGa<6l$G4dz8*2 zdzH@9du2X-DW7cUG~ch{X8`GlXdT2_<9kT<Hz0i{3WplF$H zwrNT25)Ju3btnCKSj7)GqU=3=MBMMytNPT}nRCjO-a#N8f0v85(#@8mD4#OuQQfn2 z8&3yPd(Hz=&6jKWou8_9{?}(}I(2-Zrm8Q#Qv3+4I%(=1gRiKy8;I3G4g~uUa{ff` z)72o;Px94}EhHI~Dc;K0N+V|v%pl42S#3FrRYw+cM=B;`tq3Z9PzGYU1hM$sC}y7=$zn4bvS>K8hg|Tlq^_NZLe7FN6|NkGpyPzbQD>EW z7m$vKpHu<-&N1=oAhVgAJ2POQC5lA`s&vAu8UvRrgOM8T8ry43(U_(&L*rPD*&4q% zrwY*fyehzOAPyJX34*dXrUb0_&#DE61L@caq-)$U&G-IAwajoJ9Xo-v06V7n-oL6f z)NmjjJAt&8I;Q#FRd{O%{BR&9Kq!C+A_Xa6?Bt)SCGaP_@`wpFnBPRkXw)Zs%Ee!U zamg8r=gDM+HlPxKd8lRz(1GAR{;g%ie{)GmpMs1AKui#4L7mmo6GoXqv;O3IB-3l4 z0G4qXf31qJ8F|GajfGEUMtA@V&kSVY$@N%xXT5Nm9Kh^<<6$*Ig-&GQ#i1cMzt|PV z{)rIsx3XG`r}GU)a;_r&n$JirUart^%|g86y03Ir-Uz^=xHGS*NgnjJAmsnevt?B* z0(Fun2e3*g)W?<}l+?6u;f;9#Y+h~%%SjGq0ke3P_rjTn#@vG7O=IE3p`m6>({jUL z_j9AB4K2KAJIw6YF$3R7+<=gMMK^LS9s~0XI>ILt&kZxXyNd7)=sys=iE8sIkyWNT zt3Eml9Y&qi+K;t9Q%|;a20r4y4k7lcEg5Dv z#bJfp8OHt3)Mu6W6M{sW!KQ12RvzE!W_~DN9De+Av6B*Euww|Dw62wx%kny-E2#ng zy{Ew~1}oPIl)%fwGD>QZi7OdqT+>pd!oT*i$ZM*K7G7+;(;Vn(in}6&Jc^gYD@^i7 zAX`N;y7}H~W5h?}o1B}xddKTazAI$gwQY*G^0m_7B%*w3?1Ik%-^!bLOk5y~n;gz! zadQ|pE2t{4GC)nH}>fO{ZZi)WyMi zzQY>(PdODIQAN2BZP`@_7ivSKVC8*@MDvkm6IT}g%;*DzGOcgrBU%QrmN+MxWrktZAzQJ^uTt$p zB%l{)&=p|}5Yp0z5Dg`dP2JTPIR%tQ4?|x4+5_nrrud#LrfC2R!HsIjEVUy;H&ivv zeI|_ML9wc*0jCqynm%4!7-_Z%q9MUhm^acz%TeaUCU93|ml15H()aP=g;8dUdNd6# zg+uWBqs+0c;_(Li8DW!DhN0YWW|isg%8++vx~sA#V7fx^P@4JytUkJfA&#L-YQ>?J zwRq~H+3$24g3S;n<1s1?Vp8z&RIpkJrH;kW4ckN&Cy)lmhDoZq zzW`GA7}3Ej8n>X)nStn1(dcg6v%qDQ*|QCHT;`2*n8XQR z=5eXY#X>CEpZVls-`(bniEwL0bK`HzC>=GxML)^6g{<5wJLAuYABIq+?bqeWMok*j z%V$?Sa}Vyz;4aHzmgC#x0jhjHUVMCy+2|g4&$71>@?<>fm>Ixo$>gm$I55XEY9EjWK}3CkKiL#+t2r&}o&ALeiiq*5^14DB`W>`ZMd*!1@A6 zN6>TP;8?TCR7#(v)0gV>-rJ+6o>OUd0O>dmBq~PEHzAmBFi5cz7&IK)HF~~DfvuSv z^dNp-rMu;MG4Nh}Q*P6Fl_Rgf7gYTHFNmf0>fK7u*Qzd+maIOx=E!C ze^HgGCyKCorpCMnDpkCr6jZj`Gma zUZZzFcsqFN>ni^w`pz$8SNG~5^Eow&LY$Hkg1 z21SThn~9BC74&1@!Z3yh5Eb0Vi+9JHP2?7iQy0>q3pe@SDzE16i3{V+mfff#cCC|P zX;_$JE#FrKe+Wp&X&_NyJH@~(?0F^aG*~wT29G9r`e7kX{A&n-UKyr1;`bo5CmGV$ ztYJ0&<@y8C$@^ zZB=2FA>~1(zULj~NKa{VblQc$yFOGE-ML4lk7*UeTIB|_=97b14AvfM?rP5~sr5E% z1K#c?9I{s#$OY1|X|Gs60p})M(pig-43+_>GHMAQFOE$xZ*5YHxC(^jI^M^NMiX)F z!iAmvflx;A{&WGUC;8?F40b}Np?DvU0fq_Nny&6MlsuGw)j?IxnAZL*(&*0uviy_$ zGSrBxL{waS7>l3RLX=E2W17Gpo+TZ^)de1OR0$t1ewv6%&$!y+&dVRF+^;f=8J#_Y@s=$?#hB!CL zjFU=9P~wp9I;E8UqN?Di|E(d~O~&R$(zgcdk5H*|vhooLAuIup4&w3Xz>XDJ7Y<-Cn52&A)(T{6oke2lx}+dwMNNzDgG zC_X~2nSyGqndWKR85Y}t4V1l(4X~J|V;}@%P~E$PVl5GJ!&)K(hDvq9%Ro9}8>)1- z0ja?MEqwyg7i+zd!_Sn zd-2sYOlUM79neYZpyIzuRPlc#idQD<$0YM~J|A{eIt`MPPKzXQ&;2@|D(DPHtA2zy zD(|(ZhvsnLvs!0#eu9?A9v<`s@0tczgvJ+Sq67D_TBiwn;2@a<)7jE+Kf;gT_j29=dz6 zKTxA1=00e)?4L1+v-rU{so@DuX=?KrPsj0yVQ*v|IoYzx#K{NE=EK>Yob^N~(za^z z*k?F{?WSDV!;*~Lerm_ljxpjXuEvfSI>TJ-%9_a8G=$tHC?l<+a{VR$2xNIAL%Av5 z%HNO%=S}e>&OSkq8+3XjnfR{|4r@Dgd8Fx6lgMa@J+IE;RiTx^<=#6BSs*lQ#uUyb zAw)LS4OzpBl{2vvv}JSF4Z)f@@lJ%vI!#^Png(^0gQLD5_9R;3F%Sk14VH}$adzu0 zJWRu^N@!pO#jzBOi1ugE6C*1dxNI*yZXl$lTw&6Mx&Z3`; ztZGnc%kk=NhTc(x_n}J|5lj9Vp}1;d;V?Y9s!?TRjc!aXW=w=@Uc2<52O9_S*}?FH zV5vx1PMO2m{c}0HC12qSpo?pGF+9h7B-k-u6)vHfDLQsCqg)R7`DG(m4_q3gzcb0` zd?AcRaL!YSiT55}hij`|pw7grL+H)8xOQIRy-sFO5M;U>@P2lYb2J@RlbbDBSH<~? zThRvIzH4!6!VS@TbZ!f}5FOFNCu@{ovdxfHT4aYIy98Ob#ooboriYPB=#^RY5+SREUXDeU0a-p|+byyj$jTwxY>^d0Rtgz5`QjV>zwe^_ zzwe^3|My)K?+LvfCpjJezxggo-~aF5MJf4duDRC~A3kC>5+MuB*pkr;%;jeAeJpp% ztOuvfn2TiY5JVm)8m?=SX)~Q~rcuEXhV>ai&``4KGqphdJ zlWWXG!Vfk7ljfT|uH*wBH@i_hFi{jgZrU`{LHzo-S)Z7{m1fDUh2~4fGf$dQX2$w(;rXmEwO*;eFG!mCI(01UgD6E=5=K;S(z#|w|1&pxcGD14Q zmk`LsoABF~@FNEe#NA~pWCn(C5CSF60KTE=V&G+jD$9cM<)AGd=F-#r&Lp^yLQXgwft}~fMa10*HZWPhSqNiM z>OA0+nl1vihF=eL4ZSz80wDqVg@j$u_n<94>C!hnzAs~0BnS3KAlG!lp$O!TPS|w- z`hNo$5bQX@HqeAQ>F`qqO}Jqoyk-#XEAg=_DdH~}F zwAHzqoUFZ0)r4P6N3%nR@bL#RCO{L0%s_DciU9>;%RCGU(8a(P7vN5dXyC6)aCU&M z0>1VrJUFW$2Y$8+I-s?iuk`Y@x&V`F@B?emLcei|vq8s$CR~k>44TmOE`HL$5S~F8 z3%Uxp6W+QxpsilI#uEpWjB|UG; zHy~pu5x(|3`W7_d4upKrR>xX$uq}KMqaAX>tqA3yi-E=#v=(R^@bgz0v;U430(yO4 z31i;GDGfPc*EiAsMPM9Y+}mLkbRqD`_t10~F}Q#WcH!m|bRMvBHwpxru-``*q@Xi^ z?aDAS{(;dBY;XvtG-x|;=EvwV&^f?;pTIuoa^Qp$7;_|df$;ifT759d7qj#m)bvl( z9{3T$CeVb35w?LQtalPOyr2ntAe;d00KWetx(0M9aGpn<5qZG2=Wu887YYh=z#Y$3 z1%`;-my}XD@K=O{%NPg1!El$8Q?kW%j$G)1;SskDI)p6{%0OE@xSTf4&g9_Owbv?%?MObtJfTP z(@j8_3?0Hb2=hSuoaHe8lN;R;ghE6Ro7_^B zdW+#?*B(lQYY@^v6F!ZQ37U``??`TSz$3@J{0P(-a>6wTPl2{N-jPF|e*=SUg`BW0 zLMiB^1}^fxQ_gmTuOXa31mWulRiFv~jliy}TxA(Voygj-|Dt!Q1)7N0Zei&oJN1+B(bFA)ve8|)&K>_7(sEkFp9 zI;g@CjzA!J7VvRR7XshbbScn3Q4Lrda5{n?Gg%Js1%yb@_;wl#?x@Z<2XI73^gkKJ zx64@5Bo$$GbR!423kYdQOgOx=fe#azwVd3@+3g#Ig^&~eflvtA=f7r3$F@aXaF#(% z_%uQp=pq-0cFCAPz$5{8YPuBY>}s&{kXs$y$N}zaIJMafQ{p+$?Lk`|+sNVV8H6;* z37<#E0`2orGo^zY`LDUip^b>!;jH!)XhJ8#X3&J02-`th9oWdRtrr~7Dj+A!KsXPY z@L~9sT?Os)CqvEW8WQ5fS z(V(lB;{0E@42~~g2v;MdfcE*hnbQC5!g4etgPs2aP=QWe?v}4er!dc z3BCSpg!>Qu=8w>^+~J>Up`MW{ep!uro3Eoj0d1V&%T0mce+XCU^9 z?I3m_&`ehfYzL1w2Nn)Vz|Qbsi$?+nunHj$v~7dI`aX~86LbdfS@KH5_kN0i=U+nY zKvx0Z-+~H&E(I>#iWY-YmenDP9JRK-hW=lLh^?=oKM*#7E(d=5CMFrsK1V21I!N8I z4W|~IqzD84jiv@|1E#%)Qwwwku>DSC4BF~^WOYokIxX#SLAi1lY8-;A6YzV4Owfe> zA7EkuO&Eia3)&7`zZ-r%po@SNd(aY~D}m$#6bRoPpD&OpeS|iAgfk0r!mS9opuPS; zE^-heN1=^nIJH0%zKXD&lz?6*A*(YGIR*WOa1lC$4Gtn>_}~zxBSeGF0PaU<58CI~ zgH}0&$>&oP2y&o{Jq%_Q7@u>GDV=`q{T$5*IpKW><)D2IJ~&+=`=SDU4mn}kml#lR zzwtTwn9}*j{00LLaza0Z4A3@UlT#=}IOcylh=gx(z9Pcvw?qCsrQe~JkP~`Nqn0)_ zFEG@DT7tF#+x~?40CW=Y`SX}TK^Fn-zpAr_u>EgH3m=#y;Hirk)S#u`i|Y@ZPGHDM z=Ka4g8G$A&M<@hMcpRaK5&*xsjDbls@I^c+tO8vOY#xYLsT!a%gh5=M-{rukL$Q*A zya-qth7~es!Y9JHGJ@Wc*u4sduwtZYdR>>G*a%%H8l736dA zS`;3x!S90bXN1w9tAOic@mvUW5%Afzv_b|AJd=Q%5qyZL3i#)(SpV6s2Jqc3oZ0c%-|Aan^*bQ{gPmP*-G>h0M+loh6Ml=Z4K(3- zguS58cXe_0yN+Pp@Q@hc1R|_H2IOmyd>h`RXlzP!f{q1EI0_*NbQbVA1PACM;D>M( zpoNUpWq|w(j&w&0L5J{Lgd7(bBB~IUgC=a(Qzf?g43K|8n_iqf1s%d32wOoDrX!Ss z&H!FPpt;@ZGC=+X#_hPrgbra8f(JCAD;_^r!C3qRq)$PcKDb}Q<9fm#2=Sl^(-BfY zXAtecQ(~fluOi4DRA6N)?uH>J?A;fGj_!VdRS4T5C(P+bx9#YEBL3mTV8NY=&j-Mi z{sJGRqb7J}Px!zLS?;gTUJ9B9IBL-D*GG~w+- zU2s_hb32$p2&9w&{5?aK&ho(Dlnw;*@5Dn#=ny`MPzIXtG(rVv!U4l^rjQ-r`y+5> z;Q@Xr@Uc;xwFg}YJTwMp4d`+gh+X438%s*S(G$76`dPgMtR4jar>S!XZ7Pbx_>~k9 z1Y1zFgF~m1F@+8lD|Qhxb`Z%JP$XI(efWq-+mu#XMX+dTf^;}3=c0o04_Girg@6ts z9V&K+*g^-zN|y*7>SFzU$sIC${Jwi~&b`S!=j3(2oG3|f@B&E->Wh8o6<&OamhsJL zg67nK(ggX7bPsjJ?`V974H=~bTDaQZoEA`OAa;q_#-5;2%_?@ebtA zP3wcJ2M!^<^SKG%N97e*7-9_Z3MqCC^Bh3iZkR$dzv+LO$@&IU5PpsP8r(+ODeKq= z$3_|lYlo9)ogGcVpQzTy|6rfn;U3QhM(m* z*a{5pG#ZOv&>VHv;2AFCFW{9Vm`0jfy0rf*@=6YzLMmqs{9qpGty=+>kv^!1)yR+U zU|!#2C+u_u9vx*6;XB|k(jA&oS>(mc7z|A!7XLI0tE90UJ65GFRM?jPwiG*kKBzF$X&SeL^;n-Jt>03Xwu}v0)^avuvsN%? zFRWx`tJo)7wW@8}mep+6Xt>I#0f9T<+FZNqaB-J#`ShFESZ7atU)+|UA3)X>T*cK~ Udofwe7E8riF}|2u diff --git a/bin/VulkanSample_Release_vs2017.exe b/bin/VulkanSample_Release_vs2017.exe index f5df2b4c2ece0c6084cb415211f3b16d2c82ff54..99b5843de2cf17d3cd9356b0f587f1058f04f1cf 100644 GIT binary patch delta 48741 zcmZr(2V4}#_rIB=3Bu71jwT=|pduhBC@LTzivnT=yT*pSffWS>R6I{H8yyohYLZ`K zG$tlQ&lrs;7OdENFUcwvG}c(||7K?QAn~8iC*IDR@4WZs&CHuOWtYOcPK9@!$_fka z**!eJVa)sz<^PS&P9@tQ{WqRh{0;kEP`sJ_E-e0o{T3Df4}SAkq?XX%Hc>^zU%~H& zF-0ZiW9OF~$9h)oIG$6o9_Yt$VKM#fF?v`Im7)6gf6K!ggk&bGRKpiotIo}DF+jvs$GJM~pm4j6kEb<3cS|Ehu5Dh3Y2#EwoTYiCa(#j<;Fv?_rjOO7LlPmqt1ewi(?38O z%HE^T>&4y{!%?ER(_%0x7dl_=MKrw)`8^wpA=xX*Ghw(Bki zSgmvXBm-FJj|xTm{rU^0CbPxUR)Tt;ms-5l&JFDqqpVZVP%+=S5$Y$dwif(8PgkqT z4kZO<)0zr~L5|6hhm#{K1pOI7f5N2wL{!^E_)MZ8!C+a@8+ezh4<_cT4kk{fVocg3 zvAazYev&2Hv~JYmxktrFkcgrPDqv_Up<7Z)@L#1FcPKQy?6tjibJdLU#euD28M z8$oQ{Fb-`L^BP7Yow%jpNaQcN+Q*|RvA2B^uIVEF*szs&+8 z7C&{WLY`uO=aD$Fvsmez>7M|NQc^KvOwnsJw@J}!b>0xoA8CQ{f?njT!Co$Hz*1vf z!tpH!aji=yG+w;vQh@Kpi$h&wpmn~0e{IDxt{ah)n6K>!&%bNE(f`DY+Mc=)#$C`P zzXD4sMl)$aN4Fe+|KR40j){leriKpbNbReyl7cX8(bw>3(w28ntBNaft%TC4IW-PK z>FgY4?-t@{_u=R-@r3(oF!vZ;0CEwR>U1vQ9@Hf6srnOX`V^bA=KG{E;%~Yr{G@^S zL>Gb1ivAutv{r2A(bFrXJrieX>S_VanG^_&?E`>j()JcNdW7J&vEmtz5VTc%?-3oe zF_!YJg#Hvicu@aJ!H^ahEQvKHlVH%MSpfq^m?rl3T-9KUHDF6Ba%Ylx6OlVIh^}5u z@Ew0K)+@?+Jml5rOxigh=$N2CFRc;hdF7y3@qt%QG+pfI-5ISHOT3d%fANTSlIsF6 zN&L%0w43M9o~b`7{Vq248HT!-FYp*&p#JeHiqEAJAv)=fnm3X_^|GayLt5wp?wOEy=PhPc? zjU~OkUpou1si*jL^GNSD0N6=8Rl;vHH(9?IjDD&u1q6uCnorh_4pXbx!0WvTa$;G| z9C1dApq|@WDmm*R#}q@$bXU;5Xm7QDz=+;;L;;BB81a?4^hTiVWwg!Wvlh{AN1K6Z zq%AKnSl6HeH%JFdM=>t6W%qS|bK_)6eJ!~CI zqWtoCVFH3aaU?tleAKh>Ky+Soj|fFJVsgYl+a6769rY)q2=RxAERUy6Sn{ZJvX&W~ zPA$~nLq+da5t(})(<-Fb8|csKS}m!?%WZa}WN}rr z4~i4FM~}jL9*8&Fxx+Axjv1$Ze;<=a>BvLV+Lsn6G3DCO`Rt5%BBmL>{#u;SEnb=V#pNMhoLeUs;e7ni` z!(X&Ke(zoWrrijHI*EhZ2cSmfMeThM4D1aZ;*hI&tHZVs+q-7gGl2D9z&ho1jh9KA z49*qwtH`ydUi!`*aZg82m-o-GDpl_W!qWAfna7P5A9fVbS7Ph97?dJTh#Q5h#q)8Y z_^G>S8J~oT#4hoT#U=5sVqttkR3RGT<2*9mDd(d7!Id;_;Qzt2?V%O##Rs5bu|dKX z+wCsUIrJAz+7huMAs?MA@6c%#az)hoh7~7SGufZ=w@^tI;{C){(30Ls;a%1{sZ~XB zsysX9Z%o=UNSm~m;or8_K(vHnChcrUY8+EuYeJI!B2Uu}XwqH)8r0P8H<*h6lIVrAE6w!Y_pogw|Cbh-Rh*DGp=R#tUo-K@l$-H7cA zaD1#?_KWi}r)a|-LW2|~+DInt1SO{f+6Y6>Uk%=iVOX;J#De@xfI9_16t3UjJE2!X?}R=HS>?wDJVww{ejn5xbtr#5 zXefJWk`Ax_=9S5Sdzf#H(f zDP9ReBOXS{`<>OIr5PffSSZ~6 zq`V++w<1041g}yJ^krV_&v!6=dYB;%Wrzxf3x`bOkT5gk{&CKwgBjA3LmHYP2Ps6+ z$8C5pwt6AP5ATQ$iUq^n(T^e--W<&@uN)qP(i;Cx1*Pwit{vrUpVIL_3Bq=!8&Uwf zWI()y3<%*$7YO&48FIu)|$+SQ}>E#j5(Ux(flp)&~OAj;TjQC-s&hF)Jlmi89 z75ztLM{oX{_I~>o5Mrhoy4ujN&d?KF>uR8vRA@?0gU=K6J#EqqJor`v(4-w9ZXFe( zO_VXzuh$F%l%q-eo%nc^d%KBp&LLjfk(SN`G0Wc5(xosP(a{~ArcVkCrsW_jmt}nr z<8s_lQ!y*2g*^M-({luShblnrp>GxH<_D&FP(8}dVqsm$B{bWdv zbXkTx{-8jTIOM&NOUaRpqUY#$w$~p~UJwiI7X8M=*sh^TSgA;}#ZSgW*-hK7pu@{I zj1evHQzjz~MVyFP) zNS(xGGdx=lt4HyI*^I_aH^54OT2E0$U$g48=P*WkzC*k|!=tCR9_jcF9+1s@&g&Q> zMe}mC-*K&Dq)h;!%umo}2TR3Bt{igQ3|T04ndxc2k)^qgL&_)5JcH5o^5pzpm@Xve zPDRzCL%}Fp>#w=vQDHHBpI2{~f-iy!5c9(i74x))ixDKUhSeZ*1J^{5Y=4n8VX=W> zE3M`f4r}jZ@1lt8A+uRF)4|Kuf(x5d|NP0Y-sh+JCmn?|4i~oXFINect zyQ%!b0(-URRwlHkjZktr7dA*LU}SB(LI+)t_@T5B>`C}6HsD{5i$5&ZHB^BE6kr_2 z({SzN;!w1({KMke2>o6@Ye^)sO<_I-x_(p9Xo$Ccf0(IpLb@uVrODPm(xg--rZ4Ru z^ea5G0nMiCbp3vLx`sU$1Wc%maFcd{SiLk9EfU`^4MRC%=rVV7Q%qhq!6ShVOqf=?IIGU-MseKjvKly*o;*ipjjH(3F+ zkO5_kCjbZsWGVnV8Q`mMn8pEN3gE#PTttS#p?66Eli{Es4$FwO3L=WLcvLEDxlsn} zf~bON>B0fW6u?3ma8%*&teA<|tN=z*0FU1wb`xii>1e!IOGdb-?82%f*dU|xkx4rm zQgj^TkoU8&k660O)nU|b+HrDOh~IU$xOr8i_w_xrOP5^9g{2w{p6MLH0`a%y|E!9_ zum>5o`Tz`~7pvp!FBa2U7^ihkiYy;zNR4ljN+sUvr&FuC9`E^#ZR5neEp!x+g%hJC|fRTMUf!$LTWk#5ZtpBg=)FelMH zH)-cgUZuxev)7c?n3ueg!>YtcqS$eVXVkBp;s|>o(m1)`O>iJkrC>@sJB(Gyz+sG( zEH2sM*?J_W#8Xfsxu7lAHJj2F@Pct1#z?Pbh*x%a*y}iH5=EU9(QlqnPe(De7E#Lg zGr0E2T>JHu=EQj&;V?#;FLr~1UvtV13hFNxocJTt{v@SUe!{fR=P*Wsox|TeTleOa z8ML|W<%0gaphOGi@q#TmjFJAFEEc=8)BBF{23t0J_5NGcWX~unlGlAL` z?WTScYf9R{u2-6VM0Q&2U%9i=AVi!&hp>2JcYpk5uGsvK5SiDQm6VsbtJr|S){v!K6`rXQc3+WeZZ_pOC+swTIwxU6EM%xoca z(Y?KVc;NsV=JARmP035>)M>F#a=pc^d;5Fer_)KQ{yYr_4-5gEo-Kf=+vy_)?Taq> zaS&~d^hMI>iM++;{0~r+aJDb+-wHqg0QK|(6hJu#j8gzXGKW|W$d*3kfUX>nEm_GN zrfjDo=+rRDnL|Ptgi`KI5TERW(8)}I(5X^aaAzP@lLFRG;CePNGqL78D$I~sGQ`RZ zxtq=EvepdAk|ATiQB>K>A+yYoC>i1iT*Qr)_I6q3EGMy|(%r5#OM|6q$BVZso6;TI z{fTIOdHVhU3pgO2T;(mdR>TtVv#O>RO`s9PJylKRW;>;aAis1R9O4+@BuBcv0%jb& zeYuDZx3KTCls45soe+B?7tYWQ5Ap5`6qxbO)2XO+Uz%&!s^Rm;LD6_Prln67w~!!R z7{wc^W-YIqASLu*)&HlD0{K<+IpS`6k)_jh67;4cI=k)W>`CI#Bf0|7oK6RhbP^eA z=4kBBI8HJ{n#qukX2@q8l5U1r%MdR!WHN^|Geho<1VQqD<*$!yMPStLkG0XZhT8KJ zvQ=QCJ*(&fETa7RvFQjI#c?P4*?zl#ngsUA^Td-Up4x6Mp$jP37@bvqwAz5|T#M!G zJH>1{Iq#Ia-Ps};^L4R!th#aefm3Cu(FI!2kS?SdEN8%&WXE>k@XKJhnzZ}Mr^2$` zx&R8&!<+oG@wPGa8DbI1T|9U;+T#@jQ*4SqpU#@d5e6e2Yp@S`u3g%$u57INNz(@a z4D6}@l(rBY{oL-%%(+?t^pOE$IKW2Q%K@`Ez(#56z!{RWxaXX^T^ptR z)BfVEb6>#Deg1hrG(%i_-V=>4-*G+>qoDG47qtjEl{;U)iBP!s_mw$vFU^=CPQN;| zfcund;FyKxuY|oHAUxVO1OgCE$!II6%%PG@NT^itR^W1I1OqPTOPVon_av6#2*_VK zA=^wiLJ1`=bAtr^bz$!v3qgHDF!YD;EfG$AT%buuECP?ZVZia9Cz(otoZ(xXVi<1jKU_FE#3IA*$Z6&P;6$?wqxybg2NbT zf!Ou7XaDawh0S$?<${sai5m=TVms896)fa1Mv4I#ZS;npGZ>r3)^k{24r3(nEw??R zT5(DYjZhwB@PZ#`CpH)|DXj-DXvblUR4F>#@r=6Lfz^lwVr%4rF{@ZX7Ij(hg1b44 zk@Ca=P;e!uum~$dF8F{>p#}pB&bG&}8cpOdMhX%)-SM;+IEBSrD)AJs^5+y5a$SOf zKx;jR>%iix1kUOoxDEi^8R-|%_pWEu8BTGeafV(lIG7g zCnc#+?Eb*L;2R}5IECwUh4rm;Njeh2Oi-i%I?8}&3gCMV_(TEt$bb-t1VEr9E#iO# z1@Nvj7ZI-jMn&)yrJsVhA|s|Nh|ZjasWq#EB<+*|OBFy94!EuWK9>Op6o3T>>{b9X zWIzj6jRH{fPB^RR8U>LhBZ?Fj`#4~>0%#)x`q6NRR&)ag^iu#@88BJlP{;vo6~Oak zZiz+Wy&8?JP9eiae~r8Ct0=j$&{ZG0+n!gF%f)y|Zc~ylbv|@2_)JN5lUXLNl$F{U z#;P?{0ko6>o4BW!q|Z5^hXSyd0YeoIGdLhZ0X$6NTIVPnvch=#IVy;wGNM3X(T1~l z!Y)sMr8dccrV0x!2b@pTwau z6L&oJgOm14kG*`tVcnYsL0m996SY{HMQQ}rx?kak+uy7{b0uv7|=POjhIrq$!<0y z!oG2r5wLIURqp=u20}~4SI-)w8qxFb;RO>u!K$Lzy7TfukTz*I!oO|R=JWDYNNOAt z;k>+rMg#Q7Tn-D?K~B9>b3aI?l@5EIR2o1xsQd(dLybwhX$aLUi6(7K+BI@2STI!x z;`_fl*tHr;^C5sipFo7-RX*hTH00a}Si$ap9Q~n3<1br?(u?-?Cjkpn-YH7@L5z4A zXIo4uG{zq$&UzVZ8^@E?QX_HC%SH}i&6rzav&3KR#G02L_BNcvrU{6!UwKB|3SppH zsGIb8JKm+XfM0}a+RzyXHDN8jlfxKkiZ~bwik#v{L0#p7gL%OiN-Ga!1;=q1BL$Xk zc@=GeI+oviTY}Jz^0DtBB>765_u-B0v)?wmf5^D?~PbGc7Cj-sEXrAp`D9J*&%S5e4 znYJ5fnughhWS|-~vn}yq1HW2|C(G5yJ#Dy>_HD_nG_{agDZN@+tYbAzQ~)1BIpDMc z7|j8V6~Hy}T#csKT6L#tLbQ60OtL`jZ1-i#YA+|hTA=lII+m79+P^Z%WDDd%rdT3h z+ba-HupYCCthPj>?QFcLo^*+{N)71Etjx20|qQf%aq6yEjfjN`T#@>Ud$`m`r znd#iz47nFX9yLJjb~a|pKBBQja8Tf;a9J0`xzx;2xHRBg_L(91GNjN9`O}FBT4#pz zl_Bp7;c8RJ6!pxU)D6Kb1s?UM2p?fUUDE@sGa8FC9Y z$G91yIiyyr)NzvxSzv~^a>#KrWT6ac4iI+1EL?i($SS$Xj2UhmZ-?d~{Er){ZHNY; z9;BN+8t$+`sN1S;Es(wT$g5RmJz7I}KA3zHQ3sMKl%`7u;hHY^B#U|&DTO#Vph)+> zQ?QC%^eK96p-HZS3m#S`?UyMe+W`gk+X$J;p7=O;F3bl$xz{M)Ap$_{c21%LzO5^b zePLHbpm)9CWEnQgx6~^d0ndzB$R6O@ND4XcfV!q0?F*^BB@IrHdS}oC7kYG#QY|0Tq(bn$O;oIHE#oE+b4+8H2zXMd&qY?S}l(bz_zraz;9RJaCrA+ks&HlE5KyOTD)>&T~g82n{EPb%5?- ztkI#7$o=aM6kTG&(d%WjmgIP%h4_&*dFqLDxTZa^^Fm3;Kr+3M2htnIcp(>z0?1q+ z)YSEz1r#n#4o@?rz~F%0kSb{e+2Mn>qgG^MBLLWO0Nju;19HjtjnECGBc;9oc#0W^ z%zA)+#!6q5Y=u^l22BAm`5o-UlqR37hq!D^Y>Jv76hlS_0mA7mL)4}j5*00Gk*|Z$ zBh;BJ4@NW48DblP($E5uMSt3oZ$eNXeET(d7lMYPd@{5d8V33tXoh@K9AT1xIlck^ z!TjfK3r2hnKgUekM?Cc>|GB|`F7lsK{O1U1-WJj@KscO`z`h#GJjrAN{O1bsj71&X)B~-+@D-A(Zr*k4&C7fuVm4QZ}=vP5miAgEgI42H4g&RIerPIg9$|+{5122 znCS4g4~D5(igY!Z()4+{wAfdU8?vV(KlD)q4Q$ zj&56&iDCPE$<74iSkMXfgJBScOYff`=K7qP%7mQDALw0te+6(A0CakeG8A4j?nI@vHLY8Yr%l`o57V2 z8oHS-lN%{0+U>fA85%AhU8Yt}52ih!J82=HXxr~EG9x)k!^wC7x!W#M(uriLfZ%!* zOVbw#{K(G&asnm` zb<01III&%0a=a^=q+WNP@kL*ftTdFO?#PoC9bhsb{nJpQ`uaHzxJGp8C>RYP3F+v2 zw4KzXBQMS4228+9dhPp!w1j9gz`w2`u^FfU|9OTf;n?V3m|bUxTLub5$B11wG*6v* zhI8*mzU_uas!yI~Nt;*Jz!$FglF&@hF^*GFM`FxG{+cD0&|4rJViy23mgKKYG!R`U z$=y+3gvq+@V4VTvPqv1A^apJ368CLWvq1m~xaLYz+u{KM6I8EJt<3iSWE= zKwu0F%d&-1xIqN*-Voq3l7pDOT@d-97m9Gr0vZeTF2cPA8ycxuBJ#8s^6ICn$MUL* z1$#r}U@wOUdzmy2`u;d|BlMOO4fe8GT<24-=msdth_&nif;}S{*&CfjBS~x@bhK%4eILOzZ z6VykgePrkW)Ds1hZ39q8gvj#&Xa|12pZquwHKq&(LSWno&hX`uj&&t%h}R$#gG=|5 z5qFAqKCaglT#hGJ~`(n1D{&E(-w6i9v=2GLd|**6UBrgQFaq;nkbH?yYv zUcx`S$;RQpG~*&=+L>Al6g$KfyC6jy9}Y)Rh>rb{P>TV^V7UF{+IT=hfe9$i{q_lSEBMv5!ZDIH0mYznQU(~*gq)m!zCkTW-b5&9Lq4B~ zT55VjH%-^?Pt~8J*HmD=F%d=hpZf(ui^eAHJ($$#3@Tat3-$6C37&*HqX}faN_d|x_GdA8O8Iw^&!8V}% zdkhW(3M{UMsPcj|zVR@AdeM;u(UwU&gEL@)+bb}>2CF|T8~pt7u$)t`D1v7;EHm%g z&?gv|?!;;eijP_eoAxlEXM-#4@5mqx0c&=2%kHpja1_`06O{a#%$kDY26hF4Vv{g< zKA5P3fVL~_P&w7h(ZVxO9pN>JdQ{VGriCxRQB=pS_3+iV zNKh{FcQ^*QJE`-diY+94a#7-dvGq!KgXcQF5%qFf!!xh@>UvaPc-CjZV!2xS;UdHw z+Lk~t3%I|9yvRjE&|uOh4@I=S1|G~@(IGSeFa5;8OFx3X0d z9nz#C4@T8>V#-4%SaO-Bp##_@yOJsdJ&pFRa2n5iOD@br2A3WC>Se^x3@hXGSt!5~ zJs_(K&@ksg7wAX?4+`)1R>6LgWN&mVM1B}Q+-YnzA5BH*g7MoTbOzp^sUowNL5vV< z{Aw8*ssl%Eu@Sw4g#ygqBXFZ1NzV7EneVeLv?>P6#SreptHJD*O$UBr3L#hv0pbap4t|haq@)e+~6U#iZwTa2uz{lI!RP6liRF1GylF9eH&lbWI-VcN2|t z_@*8$IFFpTiSiwG=GNi9%q7{kP^+ew>(K_mGjzfM^u5Y)bSoqmrpTiG;e#v5$y?B# zOGw0R@Q7{l$fVm40F+H7OK(He@|65~8--g|(9*=`GV&n!4(e~~&${#I$w)NiI6yCLMqJID)tMvmM;3G(smH)6WoF> zIjt!spfkMwmKk|?WEmOq2u0a{*^PF=^Dy}c(i*bi5lTjV$X}08kaxJC$FTD)=#eSY zkGx@w|f_T*;4~O`AwEZJUr&?6%@coE7T<8dLsTQ3= zzmUyO(QUkJ7+LlV#X9eMK$j46!EhOZV8}w!P~+8S5Z{5B&~sFXW*g@}M}7#Ek*{B% zw+_<=*KzDOnEdb(wRRBe(cA}M8=?0cCPOKJ-=P((5rm0NE*pFrC{-Yc<-Dh9$x~ySM{jYYtPa@tU zm;NxAswKxdIa-iIN$)>aYSYq>jrFN4pykH`W>W#Wg0C1OR)_EMO(t%!4j-%Et{(R} zf}tC9n(mH*>4d zeFT{KKJcUa6AdT=Xg)xz$p=D1nb43D#!9U}R^|4Rk8AvR6=yS&wV+br0yc=t|ARM= zlgmtR);c%nrD{;)(8pED{ZDb`0|*!Q8XEOKgE+Y!oHXx??V2|N1H%Z(R!u8_>p0XM zQ%Tt$OR`)-HkeQ&*A+8A;(D5#F`-D;r5_j4ukgfP$zh+W>LE;4J3z!#!UZbZX zQ*6MA%@OpyX~z=axby9D70lSM4(7XgX}w@dXtZKI7GY%=g>Z-t-W1P>hu5fjLVTBE zff6*^FHp@Fkz9n!ZCv4<&l)S~5edV%8E)5`^v5{R2>0aG$o{9cF!ks!2!^hLw1t$Z zaZl%;flFFxQ(&nR^sy$bM-TEsjp>@m!ve>;gR))YAzp~5A7AMSr>*R{l8m*$Ezk*AO}+hxP)8sZ}7b!SW__%ckA{!ps^ zEM1DtB?b1lkNvtqs)RRG93()8G-fs)Tg>9tAfT zl)^N!TuR~`u@*K*dOPAM^fg)Rh`&dpNt6@*fNk?hlrxxXmvN{wa6q@oW*1xxhXLWP zxEY+D^>@V`V1fC$D<0wOa71`Y5F-Elk`xyTAECj=x|r=bZ~^MmPSXA6`IA6v+PrQ(E0+OFo8Y^=Fdwr8aW%^;#)5~z|7doa;A z!JP{dr%_(1Hp!apZ_;5U5(_Mo^lH$iEz1EZ!Hkq>0kmX==E`X{K5eB4ovO>EEG^#J0*|-)!#!SeN_A_7+d)3UKU7%pM069%xluea3Xung)D><3o z<^yt-LM{MuvVMUDN8eJ&IY5rQqOVD4`z6hqmNili?o}#M-4xsOz6Qu7{qZFIfpqm@ z{rhhEB`08EuaZKcAk|bbq&Nxc3J`ULArB<$8y=K2kZU@S5c?x#I_1}7y%hj%D?*== z7C|_$;3_LnDec!$Lv*7?d?Q1k$4gs)Y^qF%Z8YT|7o@eDj#SVXhony-OQr=;T4aS( zP)AAvQo;@ptdd68Q8Q#qrKfpJRwMg_U? z(o=V{hUXQkqjaT?T05O&1YQk1cx3~^=Sq?nl zWWYumK=uaX@D{N9YLlgWIsmrO!v*+6mLJ&5MBkCm5#Yx5B%w4na7LQ`Ht`C)aYPy?D~)5SqsA%2QRA54r*Z1+Cm8e>LG(_znFRZ~YvC(n;GBQY zu~DgZ`%#tXc8;bws8V#uMksoO@*WALn{yh_bx%^DjZj)N37$(g&wzh{RU!Pq1*m3J z^%e)?W5zl{U;-~W6)mFqrJKw6&no`29)3zUZ=?zGJ_P%PR9hpsz?859PGTuHdc9Zr z9cb{J12=VP7Hs9Jr0+=QX87Bt^rdoe8t`T<@J6k0*dC@sh&S2?Q-<{# zCPjc^aFb&qZ0}DTQaUlGOX;q+)Yeo@FGWpfLvOz%Lv}h!G9*L%TwhCA_-h}n? zv?Im>UN6zZ#G&5TLjE;VOydwXQ^1aFOWY^%sZjJA z6y`S%=p%nn@el1j;ArYpJA3CT#{y(w){xLi4N~P1eh2f~MIsGAAGV6u7h9 zjC4@Uy!94N6EJ-JG{M(LrfTM1N`}*IgEkfR7zBMcm~k9Gn95#4lt?GGZcyVx(28tg zBCnL)Q({aI20DW*BSsaeT@X@BMp!ORoRnYrVA>nkO{l3*?B4xKxfNmVi z0&XS!XkXMXV8dR&l!Qm%0GkMwYgkHVL|}i5!a&%ES|5R7?t?}U^p}Lvd0VMR&|40s zgwlCCcq)RYDtM|ZPgU{Md)lo_=bhv!U!Hf7ryB6oO`f{KkrGd7d0s6|73eL4c=9#R ze9ECJYDm51Y@V{=sYg8J$Ws@1O2q~~m z;1<}`mwXxnUxBP4J7e%rZywZd_JW2=*Z=x~4#9NV#;~AKk?6L#H>x0pwpg%(s~nz4 zb>9SI(s;8iZiBE@7}3Sz1F$=Ko&L-u-?WGL(4YL-9;Y<>6-IynOAy$_33OJeis1;U zGmtdfyLlJgN19>}NEE$+7cjy}RtMaq0oS98J0is$aGtMVqC*slAD2=x>5W=b-Ad_4 zH$>b!VxO=DTKLvTMw-4`Fg+?upn>!WX)-0Yg>Vsw0=xrj&d4UiI%0p+hLm)Kp!Oj# zcErK9l{z@6DS4?WX+`dL#7&Hjarm({oST_CK~=UL4h}P z>ZP>9(lvy>;e9D|H}qlCfz;S1Q|{F%Ivq+T>+dDCWkW}^0PF`FrG+Fv1vmBOE2Z`T z$|5MG$}C&hlt{bMazBH;0s{n8B? zjz*IJ>nGHV^z4EkK$CRrif6!~r?D$;ja6sK8XxqFrGQn5%|K))Z$H@X+~f~y7vI?r<1^nrEv-RgEiG;K@U99 z!53u1jLkf&T20hFvEUF=kG2JqzCH2v7S~Xnlimx@0-BIb1-LQq>5J>Qe2t95dSN@H z`4&QXh4o?;B6E6UZ_Sl@#KAz^*c+#6is})qRcK`&T;-4keNrhMZX)OV;Bei9dbDRB zU_q}x#BL$3D8zoGV-}v}FaW4ZsrV1%U>1~`TaSkSTdHL?tkwP?)3b57yssVfj{KO7 zJwv*}1fN{|GH>+3;=rviKG_DK1HIe)?`_lH$Pki7%3D@pH%?VY2acKZ{r~l()|a$r5+mg&RLsWVh#Q; zrVEJo0GK2KNZbG%m_GMoK=Xg|q5E%sved266%zP1<~Hw-6nI9q4!~Y;H}BX0+{pF2 zk2!xsJ`BL_D2=!c#Qt($U-F7{9Ejb=>RE-9okX^g7}K(@rIgcvq(ol-U5!5Klnwp^ zX9kG+nC&f6F%UP!v!cj=e%RlwS~~Y}2cJ)T2jRx98n2I}q>_kg`>eA!|lh>IMk`)5#*?7D3)AI8E%5iwOPxeDuBkoI|-mi0EdH2b@aX88v zZoNQV_LH({*qxjihg*8C{>#jN5#zrT2z38s5gEJ~>JvL2yEtqIB@|&_NaWN4>`A7L z$DTedYW^ki0LQUOAXOq;#$#OopD}O!Nx80|vu~QdgigwWeqTE9Yzy-gk;da;O)s3_ z!2`Sp2iekTU%b)lut~j*qn|j0=^F{pWy$*OGnj6$4b?3kULB3m!6N%oogN47 zf){u!w4IneEehfm@^T{XARi=-y-V6n!cEAKNw^SQCATKw)@}>#z&f}%uo(2B`f&?y zAT1{2whrrnm07>LWa?ym(q|@BqHa2L<`Aae$~$Dp6zuN0?J}G}z;v}Fu!3@h9`pB4 zL_Y=ZMAJ#;pHJn&shH?ys*?qbtiI&l|KoVTD9LjT2_?d9zN?2l6Tp z$AlR`c0B=2If4n;1cl4!=jCSXgx(2!s-?kX&@?>9dG9rI-z9g=e&Gt*Oy6j_M z7s-oRINISN2vs=lx=h;5##5R(f^zj(e$OF{Whgv@D}r{U-Re7Xem3kXcP7#KI6%`0 z_LAgpA;CI$I2oM}{D*^3g}-o#{Fsj;y{|yM>haIz5XS#Cd7qDqG~O5Lv+78e&A~C6 z4!~bw<$qBn{;j$`~zT`)G8izguzL{=8yrp`Cd!r)|%U=`~w zs&nK-0gSZw=jwRhI!CmH;DlXBmqPqMpW~oQJ#9yFB;)?zENM6omuT$HnAL^7hETG8 z9*%Ko0iqOj9pM@F=fth6v@7R>#$TWR7ti(_%6L|sF}9hH^$5S+Mk%DAlvQ z_l@KKQgH@{FcsgCH;Z9;qbIHOFb6*(tMt$SZAst~+!&wVN^UL1L0Ef~OkRQ)K#co( z3H}@%C7&8_C>lsA3~(~EiaavlL$FNWwiL!&2Xb>M4#xRav_xl;xePCZCHu`~@E(O` z3)ySH%~2N;TZYf0uEcpcgeZ46a~S~#X^C{>*5#lLoYLI?6ixseY~q>1ewx{h7(_UG zf`XsXf*a+6m8>Ay`WYUMCK2rl&?1?@%hS!U=RWd&B~0a;A0vD8oGf1fh$rOb3Jiyh zWb8`VKlqvKT8ZOODS5XN&h3Vg#sp6DdXjzw>i3>3BKR_HyoU^51=Rb-HLI`#Ml+0? zzrbq{x?&X8U{4EFVH~!e(tkEi`3hgf=&Z5Z|FA7aYGc3eaB5?;oP2W_``N+0TA1YM zcS+&0`e9hJ=aZ+0@epSofgZA)VZ7tl(fEQEAup>CYZb2Uf53e~M!MdsZ;x;(V-Pq0X&Cpk_TkbFST2G3lg*IkG9>P_0$IT!8D@x$ z4B2IdOyQ8`X2`vPR1h3;*z^|2t23a|Mq}7n*aAW4j2q9vNPwf%&> zNGvbm{;>8KdI^7t4y?Qk{aqj4i$V6IpfzkAaQ2f3Cn#Uq+f< z#o=&QJL@WR#@3_|QrKdS@fVuJ?|PD}*C5I&AU|G*Q+tt|z7E0bcJk^vlqn&>H*hSR zs1Cn@pTTi?)=m5YX0krFaE}&g(?Co1t-J)7A6h4uAl0D~`q?xGs2%(KJ#<$?6%Zt~ z@$4-)14jRlfIA@LOOkR27oZR15KTQZ*4&|oxSx{M_u#y^t?~Ii+(``=rK9iTL|9_X zzK<_M+}P&Yh*>tSVCW{~7XzR%FppT>_RS$3{>j&d#;uvwN!L8sX zcTx?$jol`Y;D0W2!0i=LmM8#x%sE$=tJDZXB-p<7XWMxe)RYR+GS>I7+>(# zQMeUZ)&Mmk%^zXMh*NN`3yo4HElJ|76uyhLl3@s~=<}AWp|&s#CygNE9%0X@1@%ZB z<&vlCmHd{M99$3PPIf}c{xS7PwTWD2@$NdA*}SAvJ=kFx_NX2#g2NuPqUv+iz9M0d zLAAxAS&^eqvVTxL(v42M zQkC^ertp%gdaxZbtYJl+06z}9M33?1N-ZbxPhc!;V0-y{W$vy?}IS2Gm0QNG62n8^M10oc_Ll_XsOGnIe2&LrJ6PVg25uaMv z5rM0VwKxgIlagB85-!I6S_`||gUL&px1V@Cg`e+9`=>AkXOq0Aa6w`h`Ti+*oYkc9 zGf0gheV*YyDBrl{89s_o5}ElN{7o4-_Z<6qzZwqJDz%K6LM@`*3kNDNPMje{kE)zr zU>8(Hn!JErSyxi}0=(7@^2ZC@q}|`K)EX;JT2MUr{`SgJRf2WDvKX8ZS0onWy-e&)I+Z1B=9yf)X zHLc#`6>#&Y>OC$4_tN(Ro`;T*D<2?8_aJE|{4?0(g9#_w{+vboCFoSPB3<3jHk~Hv zN3B|{_*tD~W1>!4y5+y(y*k~k4nFhkTFvdavaCPOX*B$^rEmW~NO>QPS{11W4P#pIzY=eHKb?s@3o}UlhWrl@@@1;VtKHCH{~u zIIEf~ zcN=vhq#nUl z0w+>ghxHYI!|8L?-|u${U^x6%wyL$B@BpAP54j1vrN za4aA|uU!Y^B=$NvNPv6@!-C;|Ka=)WIxWZK{!#ZRlFN4L1+dB(+E6_~+XON?xV9Rx zKMzjr?5n|1z+Y$~cN>C9rV(pxI(ifxLZ_f28e8DQXRC;9X|OG#O&M zMS0*gF4=` zQ&(`shb8fr$s~0=PPd8WH(Z>^I0yB7*dBZ0pdN=q1h`(Po{ip-OB!_*E>9s}JF1&w zD}kJJRL8+$%+X2R5n`ThPU;ZcE1Ar4Qg?$Lh9geuOy~JYa0f?KK;^*8={Mjz(xip6 zx+R)L`a7$e;kYES&{^FbHViL0t9?2wg_$1q6+DaPXqt3); zc2L0!ww`2$tJ(o&l7+771?p;hUh)J9(yAvlYhVx3K$(yJ_E8d>q~S!B{q3XM4OL=7 zyXN?T7kR8zFI5kLG?hP?taJnU>+EFt7vk&7&$Fv1zmR0PtIxu#_$_qm02pAII<*&! zrExm7j=u1xQ^#9}#X)J(gcXTY58<)Qu?qDze@3`lsj(^K8bZ$0UEU%Nqp4o4`KmTKqo?;^PgPT95=NK-xC~_zBXx5x@txrSJr@rja@W zKXfJ!8>ye*1Mmi}58yHJZv=R6KeY}&_9kuo)N$4Y(X_x&lJ5un8d_&NAQLv? zs;Kpy1-&D@%EIb>kYa!6N=qZjR)2LxOYY{Bb16W^ z+J6q=)3ZEhVeekYk+1feOwaQ0vr1v_VaNLy$RCZ>(Tz;DR0|I~`1gJ!{TIHL&tK^_ zSbDW1E&=L1=%S?o>MVF8>Q;by4EBg2>4D%CE6Myob>jlRSft8(+^Pa@AO}}&MK!O? z95mZ4?I5${@b3uJ)+ALfvV7YPZbF{0fVd~3A-!dgXVI#n##IXBi3TttJ>aqC!D9NC z0R8&YzgESxi7)I>0Yz2%OPUgxC)rhmg+IZA6y*=w_F%2xk}G*IZ0x_H67B#K_)v@^ zq&Ly@8J5nS3c_Z9U;L9~HBq-gDP(CAwO7wrKs?hp(p$JrmTg$V$-X1}9<;Q9f6-RN zFITfaW+lyoPy_yw7X1Ar{oWnv{ooj|t!2cCcc#3V!Z6F>@Bs6)KY7z>mvSwNPv~>kHT3QEZp72K= zr%HbU5RBpl{G~5pTS3}u#Y!##0vN1^G#LQWH{7IfxxoI^ln*B9OA3Drno2L}Kf8ej z(vDDScYqcrR^(brnKbz|g`bCd12gz)CvC#t=njQ64dlREh(!;BrRGosd>8F5EUgBM zN@IvRSe@8(S~R@+0M|^413*y8;k+(~EC&M#5}>v=CyHsGO2!4NJwk(c>Lwh=NxuQl zLzeyk#Q?3biQkUm7v%=}fi2-Uz$7grzXYqBS}md2vE+KNx}(i{364`PN~I()1g8H{ zq;rV6dBeY;YnE}1uaY?-YF(RhP@L*|A8KYgc5EMQeV8oa65O~z9sR2kPzT4z!-p(K z!j(BHJhI}DWgl`VL_HS0Ad$_~v8_IB4fjBbs!&p?Tvh)|7-8tqoXRaek)z4AwWXs{ot7=gb<(ui6lU@gQAHb zwjoH+AmJ4QAESYZ7#%d>p+*oVDomlG28S7=A2SmjorDpzc}YOfsHjl^1B03fYEaam zK<;l>?M@o*y6gUN*Si0Fp0(=y_N#X7s#E9m>3ypEz{c_KFT;L+$3D9YYmUhKKKnTJ zRkBPS5dgueM*n^GU$MwYLi4cw9H{J{cE^;3u4;_gXFmZJ-?^b^QIm$tq>4Vn&=vX~ z(i=o)_}L_48~@$Q5_B~5dt94IM$1ystBh^9u8P((7y!)Sp4+JTL&Gp9en`V9^%6Mx zler9hFZvXL=HNL_M$Re^RtVY`5x>^Ho8V!48qCQu%|&0KAlql}3lTND&+ZS3j_y8t z2vZH14kG8jJTy;xr>U3}84-t+3Wa6Gc$Ij-9QXy6xoIH`KT!oKa^Qe?-KBqaz{42) zkibKKcU*+e0NnW`smD@eI=)ivOOXjk@SaYQQU3ay**g1l6)-|Z1x#^L=d-a~%bu#7 zBV>Qug;DC86dCDHe>V&1Ky5cGtlRD!ZuWB?Ml?I=H@55SFEYPsr+-^KDw!PD42)(B zv};Mb_8cm#{WJ$AtpX;OzGOE_9UmbR`h0MP8l;r!Q57^&hK~&W-YeLFgMF}VCOJQN zuz|)7O^@+p+65NjIW~B9uS!f*(?;S`ZLnGfNW29Va2J7$WIx zt#1qus~sby3#sypBk{SqTRBI`*pWS9zN!NY2_c#39H6mFHUlrb#u^>V6=RF9;}z^JeEO>R~Tz}KXXO!`-J zCa%P2btabSeX-sb=>7G2pQiUi^gc@O#|>3u)FAE5Vx z^uAKpdWrV##7&xcRPT*qsisr3sgYM}$@6;uh~AIXdw;z*0a==UtNyvs@?pPg^Pj>e z`>62y0868a9pCN$!4Y)e2S-8RA4n0lvkg~V;h6cpV`g|RB+v$?lBVzgl)D{o?G%mz z`)qYbnoOU3&jLTJzy9o;BPsDVkdCAg*AdLz1HlNbE4~0>2npT$`XIT!7S4+uw|X2& z&5oq3w)s#Hlm4#yHcdtje}M#rhaH6E<`{;5XClW4C%s0Sze6&2v7#DV=N4759JdqO4kF)7QR40Ze8s&~_6 z4E|`UJze%Ic;2)JUnYXWcZ*@Y7}8?L%@>Ltg;_@*MhYn@z1KYTO{T=VpVWP0r4xTu^Y^iGc<{Wr zm$rPh`g*L4_kV5GWKj=^cajo>TC%uWmmhNFpiO?|2A$H|ESACN##scpBZ@6u#M z_G2XRnbS+Oi}YJGc;|GIeUauzOf%XE1T$%($K9q<)8znFK3?A7H>3Wo@p8Fe;I3%8 z((KXuW9r5%xwc~TN9S=J&sn#o2!q*Hwx2l7;}cf2QaZ=iDmTKd5Lk}K@AnEIag7F4DXZk zww+gKiI!@ua^E+=1&znt$) zjl_^)q4IMfp84<(Q3r`O!j}3<8)PLUTW#uBQvGs6yNF(H`r#qkX=0BddVL@5&qZ_< zXyc9E(`If8zD6$EEiQCsHw3}t-%7dUpo)E?x;y`Rrw+J<9K3627t!-lwKK$(L+n}8 zjvk{%|L%66uO@x|Tc$7In~1-rn=d7P%O`qjl~NzGlAweker71z1{gYs=paM8iM9-_ zoptQh_U+{8v|5slab$mb%o~lmk<{A_T}yPGp<8wPhOQ!dBWUk9A&erDoiwM3ZldGI zOZ1hb&&P1UHi_u+i{~SV(ev+-i#oO(Qn=N?iE=>je{ihuTKO0(*4qei>pXS+L^&$> z&zFhg)tZSo7$-+}&oFXwxSQ8btubBtBpR4kyY+6e_o?4P{Q7Qv5%Fa?w5rR@^m5km zjHCJ$z7=%$w&c>+Gd(3;+JV>MT60lakAQLvja%=Gwlc5%T0N9GD|R+hwfin$V7f%K z9ejN3J6Pve2w~c8eGK*FOfs~KjHm3<@@hRdjoi#l(CnZbxBhMUc5@PvU6rY$57lz@ z@-;FtcguVF2vm~GsGqu<3NUo7gj^V!o=>+~P1;_MMJwha`mOulaNM@JEpcyLUJ`*ot`9z`@N>3u9fLF+-l!= zt&HlsV=!*%kyF13d7Pbt)x!g1%D_Jd3^ zI?6%QbiMShde`~X8^!fT_|!`sq@IkDDbD{msJ*>d{b_pl9UiWDd-pLtXrW9BP5i){ zBJqo##`?!{GxeVWrF_|&vCLDOzQuTq@h8ThY!lEB#&L`u?x^lH<=%hFF@^^i z%NU~=-?`4{*E3FGv>4yG*63F%YoWhvQxSH{C#+Mi?p5jPrSWvhtg71moaq#y1$ZGCs~&#yFQTgL~v= zEMc6&`YRakXZfRyKaDr-wT!pS`G0@~TNoc_T+XS@LHF$C*I0i~p?q^hMPnBG`~QqhEBvUNs(y_Oj{Uy|q1x8S z7ouaru?Uj-7hy1HmG&WA>czEkaCA$5BX+Ua%~(`_e65@@3hT!*UzbDus?;0;H$kXM z`KJuSPsm<|Xjt>$OUE(J{zNoAktywGjyVl6QFy=oc6e~Ao3_KlgzfOqt~P`2hnsZq zNOZR~9gSM>PN8nwhQ|eNGK&lrJ|<)wq-%#$-?m3}*Cua`>I~H=@e%8gjM#aZIKzlH zMPBNpRxR9tfbo2Wpj{o`fmq2+ZA7;r@eSZ(Osw_(Z^#a;S66;JQKwWL-(|WCu16CF zq=CCsr#6#8yE?N|#^VpUg+LG_sngJ-qrW9K8rvSF1MwPIBvR@$n zy1eaud1K&6c%eeuFg57|Irw)i8B&ZaTTot>@Vh%m?8=omcrasGg3~FAAglBD%F0Dh z9s)tc#^#>+R8JI1;Ub%b^XHV!Nk}IVLK65cB}O`(^NMfB)kwYffeg(mfEboyf9ZgciAqzlN-K;MJ zQtwOD7f0lH-2!I3SP2=KRYFa;OJyFFH|iE@p(ar^d)TF(IV#`OAy-26Zyv82^}A#2 zumEBkJFMec+ttov@*(s$_j5T|`*pJ4CbqzegwN&V5Zz~}CUT$KuUDUbE^pS6w&11E zzp!I5*^EVi%59Oeb$=V6p2+GLR;K_V!j*AF8>)rJ#tnifYvya=B)dW6)u6LN+Yx4$FB^K4zC=KP1njS#-i9sRojpP3opLIU6(LWq_WItp}yRsu*)HVMW_^HSBBoCN39kUrRND zPWX+mqTy{~iz7g4rPl^=-xKbdvp(X9e##_ z%TS$q;50smUFvUSwB&N5`s1f%sD#yz-)WDN+Od|ZPAzEHw+2|4Pu$-O_hGxfy7f9E zr(q7wI)k=X*Kat3#k*kN`(EO&SnAV%kRSU21Aml%w4v%VKg&_j9{O4SFp#RPT@ zgPQd?BWdfWUOp$|Zy&;5h0;33tNDCKG^9gqnaagsFQk$U(!%&c$}#P7`4BcfIg$@8PlBxb$H@&_sz%=qnP+W7X#u zWpsEEjCY}UxIxXUz)plN@D{C9lrVfHpjb4oZt9f7BifVDBa{MG8D2f!DU-vShT|xq zY-FA0)q9;Xejr`z+i}xw^P~GD}b->7hrZ5pB|}aPuLb2Mio*4&@gc z-m~KU>NfmnjmI@G)zA99zP)sFl}fJi!9){ok4Yxn7u9}0D}ICrEWI|_gYSnF@1;CU zPlj~MRJ6@9SHC3oUJdUj@kT4!!qrV)wP1A()JOA7!z0zhHfy|&tr_2ms5^BW+p~qn z~dmb{Ou0 zfvlbuebC?4Q=7bI;K!cUWK_uZ*A?#b*Uokj;LgC`8hZnsoQ;THU#!pe6O~WcL_10l zGALTMCrun;1WUwW@FW*&1t>|S%`UR;cds%c(u&o#Rg)BfXYZO*N1^2c>2ez<_ z8imszh4|<|KVi+K?}XXCt@tcsg0Y(4Q#hZpi98fSJE?tWB&$@onl-8NLjAZIoXhD{`o>Y4vkx4W+A@ZeDeE$Z+&|dLKRD zj~O`B$C@{!Bi4*w=}R_o&41g3;nB}M4$&=BHwReB!8vipHt26QaS|m*Jr-a^h8rVLOLVJ81X>TJuZ~AMc6>6H*~PFu zHZcyRnq)Z2m-8{uk!-bU3q*rQ*zB>rHt`h75f$6l8XKIDz>)8>iFg$J(z&m7Rd5-L zf7*|Gf&(^uA==j0x?A6bJ3p1;eVjo01a_NyfB+z!Q&G$llAe(6o)ds{6Yf+?9MLOyA3ek_!qXOsaK94KgFboG8HY6{xC!|l>QVdWHj#eZCYn@wkYzr*{n2QsV{co*?VaiS*&o@(J$nkvbx?QmAQL&Sq6W91^h}NSYcT#kdtnF*dkWa7Ar!<`-4BJ|48cVSoCLad?DC!wO=MBik>)A#;)bnkyI#G({Lcqoo$z%VmJ zO@pOa93w@5x*-%t(<`otlVUw49xC>>RS{G*Z5BG|< zK=d|BF{_q>+osg1Y||?hDzb`X~c%L4Yg>*=_8ASY@>QC%8LAL?Zt0vFMeBlA%0tX z@!Q&q-_~CIw)WzG+}ewY|LfevCR`!}TUFP1gux#O+#S8CUFfDJ!lso7Bmk6_6 zx1j9myOu6pa7S5w$MqrLke~P=pbOM#5Zx!zG+oT zudcG&Duf3L5aL=SQ4fQ51J~h|;U_^?1Am89W=x1>QkbAAHR_w*^i6$IN;C$qac_VP zVIs;w(1epvj({e-73DPOqM!123Poigr79~y`=%@@b$JA(8FIqY zR6zUYE-8mO9LY$3)^bIEn^+8*@OhL<(1f3$RD;I)5-|hG z$1R|J6OojVY)M2wWCOg2V$s3`WFx&9Ny4K@LB>E%_!UYHXi7y^cyp74Gmwj%4+Y^K zlq%4^IZDb^t{P<%wU868Mrj0H4cv%SWHV^rv?Qe`onvt9k%%NLKvL2L+9x4dp)-|D zP!vHy_#R3b=oX+o6ODil0uCLI#y~rPm3U2#Kk|r#!Udbt`1%A4m3vMcbuDLc*0-7)m8Mr1|n^OTIWImEtSk)2({CE-a zwxC;p=k9_t&|;ZQTzxl!1MLRBP>$fRvZW4Ke=mXq?Ew}o#~}kvSa&~0i)?`I74VNm zEQEhpiNgT85_o78j(;_nCJ?y~AR;mX-uoabV7W{su;U>#09sTb>W9$)XeaRLHD~~I z4G{lgA^fq7#tlqghq|<+2Dso+G!D8HXnPzJ8FUcvUS#8%KqCuR(UrRMP0>-Ru6Bb> zxR3=StV78MO?VJxK4`*^QOZCQ9%Fe6Fb{dUM<6c(K8?}{+BbhkdA#)-aq136N{EPB z6e|Xe0qaqsKzo3{q9mz9h4{WlsWVFKjd&VyK~0#AQUIFJjj|Xt;o40YgkCr?fI7*C zx`YotgETR?Mn(&!}7)Q{AnTKp*D`*#R+B=9B zbP;gOyM}fF3!5+&Dz?aqOgsp6>4!G)0m^yMEx^w{La4}Awg4MG#Vk{^U|P`tQCIR3 zG$k=dpTx+6CVb*6oMc%0Rs(GN6(%&muoH!hDE~Seh3NIb zk5K6Lq#4+Oa-w!o-yhMoX?a6Bdj{8R%^pimHJfhiLVP1uZ52AlK1NF>ltm|+kTT{( z6Hxg*Y< z@VB0={u~;zWcFn!ubfzI&QW$nbM~{VX3k@FwVHDTu5PzZ%%<1w@m6z=!}Wx})$D6E ztY@#8=)m~aOnf){B+%DPw3$P%orVp}qB=C6|63GZJv2Q@qZu^YMzgM&YcKU~g{GFq z9y@6@Z3K-b95HgS6oH27riEE)>QlldE6q)M&9lZ&TM3%`4uOK^82mFB9kX~x>^lFg0#b`FN&1Bt>e%SQV;8{Zb=;l&8vMV+i`~;j5CNQLON5;Al&v5q+88A&?PC#CK=6?rrG$T@zM$#L%<(tf~NRT zVo)ZnslnLCIVtjRQe>$?JZlh7n02!x!8XEgE(IBk)}>uRc9C$f^)$`F*5jxev#yua z(KZp=q)EiK3CfU8*fv0$q>r`@(KacpdM2tP&8{AVajLYj`X!_-nQiBVkWN}{FJZ3~ zsj)*GDwA>?qEU$StHThKAh|onBU$R|n24XZm40(*Bpr9$iAG5CoSL90X^+!l6f9+E zdpZ?|(Mm4N1Dnmd#!{&^9et2ooFmWxskL*o^DI|EBknVZX~~AjW3szz$vkHpgxsXB zUCK}+si*4@{6mto&o#Yf|5k#guype9{5NPu?ff@FPCxBmsX_4uQR1Q_q$sy0Xt*@O zEgU1Qw92g&>Mh-L%f;3SQh)atG*DU&|JRkyy01f_(llL5QW%b0Y7 zHt98ZN_Co9H(YA%kp=LL9=_;wyE{@%JZb{`Y5cm zlb+}!&_k)Fmmcksnt65h>D!#Cvo^Q0g62#N0>O^4Kr`!dq;+1Q*sZyA)+-cR=A&11 zozhq;cOUep_`ZF5?lTxugF5LXiEq&Xd$x@+jB2?#yZ=(AQ za7p~DL$sS`(Vi(Dm9I#({05?NCA0m8W8@$`t+5h$N(%ypq1w`$fbWo_w6tb0x-RXm zS-(Mq84Ll#`pNILRGm@RFi)iCG9fGW-rJ?Q6BI$1ES~O1*>vqK+Hq!07Kl!GG3mW<6Dy&Hrla}sa zV@Y%jXl4aIb(Mao7wOvo06S=>O4v^CCW(8%>8G1gfRFU7-Z)+VhJuC-ysmR;bFe!( z(&YMeIv;4D-dPQI%rTTFMMZa^z18snBf3`-4M04{h)*os>wvb0(SDGg)sObLQx{w# zAAgC#yG9L!K{{CaNO55eI{yBgdn;X@V#WQ}qKQh`_n7%_z6H_^AP^mNMh&N2JS7A2IuPSTF_Bs0Mm_Vk!5RilLlsDYukL8jM7PrGSRMC`)S5&>t<6dNm9} z*(Ea@8W0MTj)d1i?WAYnLFk0!84-r`Qc^^3yWzpK0^$j|qqH$1!>c+k9+fL9n8WGR zLLEL+@@*86e)2K3I4wY4ZpN%mzbVd)k?-DT_WsHcsX;OFB!JK^eZ>OV!y)r5kS@}y zMxJ(As!(u$BTxHewdiV=ZZ`6;3s%cdzmA-Milpqw@DRCrF>1H!ebY6;%87cTZz~oUH^3tZ&pbUlPY+E{?J(o_z)Wx6PNTZttN`6hp z;xYFb(-7(Brej0a%b*p4j1%asW5~aC(@>P~rrgu4yXvP1v8O_2Npa1>V33Y#HV*sC zlppK-O5Qdbj9`-J+dL4pEXi-~hfpu+`xbGizI3a_me9I)Eutrb=zl?U@|$uWv#vJ; zSFo=%+mS|T;jXm1rMH{kbF4`bJwRES*oH;i4C!G@1KKM!j*CHkrO|OiVfs5C7ly0r zCF}S^n9JM7*FvkMQSmjTk@4=*>iGIzvImu%zdvLj%^UbXn6_QqrF-##=trq~!moDS z+@N!a7ioAdO_+uPN?Np9iri~b?;97NV$I}ux~7#z4wCM-ZUimqn;719o3o(FkJIGX zvv_0Ht%I^zcf>`|G_?VuHL#g=5)ie{$#1lwNdb{(Xa_Xw4gmyeYQG)kB7lr_mX;(Y zq0`dUL?iU_;Yk5@vmmCzEM?YxFMX5L!@*xIws01t8)je0qfIzW$H{Fn97a1SNTZhC z@{ta-N%Rmv!7=&K71q&>c}&X%N2x|~7vE)I&#@G6aUR^Iea;x*%!i5gO3A|H{>a|f zLGeW_aI}q*8m5GydEM@c;8eF-C9l7=bh87oTMSW* zwaaE{X8L$thlkK0`K@hbv(8Dq69#u`NOd~KIz0mt=w^x*2-4V&#dZ!5oD5=xSyxZ8 z>y+tz^8xgy{D=C~qCr6hvu*`er0)=v3Rsn_=;Vm(ASvW(^QAvKx3kx>B1ma~CHqc3 zQec;Q=!Mjw%M`mr8vIgVEYy?EbQu@242r2DM3A(?!`Z&6;yz`bI%(ETWR}1!v#zHg zjp*78{VuKT`VVBd#%_LSK*@$~Z4pWLNh;^wZWiEvQvp*HzyW&& zu%82rDj-t<%uiAPKXAZE6%b7UkcEr;yC!r==$gRP`F(0G_M4q z84si6{EyY5r3GTEKwelNojAnD0=XwS^w-dw|&pl@;e|L&&IUBDc?(bk) z{$wGtUr4}Dd#Z%`eS+US2&QQB_#<56zhQ#u|f!LXIt=Ar_wDu8$^ z1yF|rDt0L4|4B`TEN_s?im5>XvWLQ8$0Slpzb#B;s0H#|`ZPpuU(HfJCDk078T|vy z43KQq{{#ch*v^)wb;iyRS|y+tmTC*nK+H3Uoo!Q%Joy#?2$PJoX=tb}R>9D?UOo^| z&Su?r(&M3?&2p7Hhd8%0H8Kf4V@rK{>MIp(U*L;R+;6epkjJk3Xr9 zL=JJZKrTq$!#>!(dPISc3tf`}hR4`#rbT#Ckyl6qhDX`Y-pXytlG97RA1+y8rxPVj z#>~KILCKbJM={!4GBbO*;Pv!|!gcW%>Rw~Kl0glVq9(U@T)vJ9F_waJ)PFsr_2AW9ga$j-1BzJ)_(SRjUy^?3y059g&;vl7tml7d-Y$bJk&P&U6VI3t}t z;V&J`pJaa$A{EO$9-o%3L#{EX;8EZPcp@Xi5^r^@n=03l9M%?Ixv~ix^!5N~P0mfmxS@%*p zH7^Whlzf^u6`}l+ujWT0yS_)LpP}owkxa&TI~^_3k?=u63zBRO(lIA*lF}CRtP=&d z*nno!b(*+enXX~Y#RYCr9pPr(E9vBdFqA2MT+k5plfo8yqKi_}!qHyc>CmM0i3ln_^#bD61tVFErw;G1gUh9Cu%HRTr|z)#V7kKhVvHEq@6dNw%lm)W9B;%VvoV#Woo)s_^|5E|8DS+p1kh@8f$s|~esUU+r`|iY=L|7oB^O0HCA4+r_ zWRZ_wVLz#8sk>9(U9{uy6uhEyG!X^^Ecd#f%L8t zs*|}8XAWbb`l95u>Zs#fnD`9FuHdFJ`9sOmRe2cS*(yy~-N<*SiHZ@=(g%cgvj8O; zZ-RIl2d_$dRyWVBk;mF|sr;arE4)sVdSlvBx$#uiT%U3kz+nK;Ibat9&_U(K5my<4 z64ok^s^w*TaV z;-qswb?{oW8y4>F-aw$SvvaC=2o_GM3R@zzSR3px^P}0EB3{+j$kS*_Mz3uopfx4m zuP;Ja|8+_I4K1wDrIO*lH^G$pM@<3cmF(LbfV@K2fWK%^cOJ#ungv>^Tfdh+Y-twt z&v4ek`awXJf1Ao3x8@I8Wuq+s*HF7^`(N^!q<2VHp z5slmwMA2x{6?!{}Dy6iUTvS^QVFA%vMI) zFNK-Bqt0wn`gMw%#1HF@`%!zoKCsDr{dkZasTX@oQO z<1j`FQD8rESQ&+l<*=q4#z^QZ>8Z&p%9WGox|)1>3a`>*ZrK}3tHn9XvsjfFsYGhI z%{yv8r#QoUhdf_lya_=9ycj}he-C6;TE$_EG+3Iy&AahLPKl?WmI|XCw>6W}W^=|4 z9L7kVQ=}`~yc|O~X)HxmNNBrv6ylV*lyZ17w?2tmznaoqxU9=Vne~jcM(O~JC7iO2 zf~G2rV}4`SpQ5yV1DN&mIgF8Db#S|Pe{dRaeuHh5|1r0Bm zv%`e>xQ`et_4+HqE)>$IA=(;=8jZB*ub{eY&|et@HO}8f<0IAsyH|^HTK`^ zsi}|(PG%!mIBwJ^+U;v0IE>tfH1we@ zPLLMt3`QfR$lZ+`PlIjZbtVAnNQmWtO!;dL=*a<@aL`{da@TX zBntsDq<#9_=e?yI_jg+cB4M%Yqt((a}7IV-f}#j!~U zU64ke=wa7pHgyRshYyxcop@^3u8=+_!TRU$lA|Y$$bNIaa{Fundup6{+S5K8XbdA2 zNXJgrDmiev82KNfjK(xWs?mBfq*QEo4q|>W3|F&mN67?uI=3OfNcU)_{T*+YPJf4- zLyndX{vGY5<;4^7t2EX`&M+M5ScB!xbInqZwPXGckV!A@Pw{KoLa@VgyA5;a1{E+- z0SxB=TX_!$%;NxCxvRADoZdc*6;p#I%Z;Sn=RED(sQfOyq+92fqxI6X^8v76yXw3* z8eFpNd~1x7OFmrGArw*Kdif?o>C*En(|Ip7CrOj9_RnQ~^XDY6tKMtFo=;YWrd#?! z2BIw-Q4Okcs4yK08clpP2&^=PfsnJB-k7;-EW6iQDx! zWVeG&OE3z)FX?q70oNKck@^x6t8NL*iNAF1mYZY!IO=~R zo9|voA8&a_S#t`TXO4H^!)6b30YI_IP1lUI%k`Fwk&)I)?QVPbG;s=>?s65zNa!%Y z$mX^~O&Mb`hcQwnz~~@ySjJ#%CR@#6!#IqQAhz80j%vjzF*HfBQy4!LFvfIB>%o|jB7cCC0U~t#s`EkE~Of` z#;_XYau_2eOTXOlcFg1ymUab5r$JN%r?9N+Eer%&>*?GEIzPz?T$CM$G13{S#$E5I zJ8%vFJm^eQjSUK8U(OgpY0smX4LdoEkzk`180i55V1y>u$h{TDahx%e(thHMqdAO` znn?e<>)kV%Q^rz|R$&xi5QDI}l$OC612~M4#M zj=avr5YHb_H-i1z2&waTTys0n}&J$OSX+gtLlnR1uRDM7}Cw zF9(P!pqm2dK^M1ZMZf2O;VPi90vM-C$m4(%72vJ_=1TXwFpz^L(kkF)#4b4q_q3DUZNPaqamwSz6xls0Dj?-UY3_}zyK8x zrU3e@5+-v%f(md@09mSpjE21Z0#rnKA~zye714x?Ftalfv{Na7U{!>U1Foxp^$MWH zB36x4@{0zn8oM~)lsuOLlTy0AC2e~g0OzzWJ@)Yn8A(GjoE2i5 zAB$=5f!M!hBAv~%C6^}wz7s~ks_iaY7%gnJ??N{%9EUJKyp;B2v(ueHSYym^f|U!i z?)5-PTjA@wosn!9>G(jf4;D5-0G$K6Ny!zz*!uv0Ee|&v49mklC7w@jAoQE``dKY> zNAi9?D0g;$tjUk9+!mh*WwUN6B;`#7%eHubAZndk!?t)K%?9YsxRMqu1YniT@fZN6 z77cupSQJRtp#lt|gBCW~`%xnjY0=iK+o_a74CYdU^znHM`!kFJ3Cz>}kfHdL^m{Q8 zxi$gGu*iP_Bo@;haE~xae%ahnhB8cfrzz=}6!9v~ZY8DA96w+B>Q$^=1}~nJTS~iM z`8&ncWnqcU5;n0?`719+Jtwhg0y6B^-ce6O8K?s4CU0-XJIt>T7onQAG-#vRti_LT z7$bcx^##TsI3<9BMktJZIb#f^l>{-yX&lB#i6y_jjwtx?3HGI zdTaM}SEaH3kgl0)NI#gv?V75jOduUY?-&1q2HKfBQyloqpLinF&i)s*e7FnsADN3# zj-6H|HXt?_Ks!1qOqV({&}58e+PQ&vmcqRwf`HQPws&FPJu0szy#-X)Zm}O5=_loW zWRZY8Q?u1_Tm$Z<33=2@X(#1*dREiUD!^X>oKXS8IG~XV_()y|XuRFpbZQo4s|Uzf zE7Z(x04qb?W|GZTXtn(|NB|6?3>_hzjI%;+WV|)1VK2`iOMf zq^*V~3fR+&%&Cr|8}{*3?}WgdNyCwEA8Inij(24?$66rPA>>hYSU zvjV9H5O$6%Tz=}zD!IdgDKL$)M>7%j_9PV!s1F)XIyj<1PKykcE7C2J$sR}K(`w4kq11G=QWViqhnRa2gk4(g&ccE7=uoh?5hF^t=N!cAh8yjg>a349;;>Gwas2 zA(>7nsK?SO4B0Tf)1MKC1b+SG}j^;25P&)ziC6D1?mg&fucx|OM1kA@)6|Fxj#pI~~Y=+z3k zG0E~qbFiZ=dFqYySZG1)eNZC$m8AP1FZ6?HxDRr}s5P14hl1UGt%12HDLmDf41)t! zL(1ervds@|MV-hP*mZTUXT<<;5W)iZhOGBTH&6^IssR8OfdSI10KPKqtAUcLp>3pk zFd!CvfTftCq*GN8?@X?SiBpvx$WbZ-c(2_&x} zu>NFP+7Pv}MgNd1(ZIX=0p&Hoapt0=xm9=riFXWoh$@IpQ#2msldqaWCy6C{o1!%I zh&VSx8E7%dYKAgVbF!xy3TRpnIxgftLHy#!FCK6Kr)YV}mR}IRyn~Jm49|&oENbDg zg?4NCyMLKCXwpW*NVsWKENWzB3o8#t=u*hRtkPZhI+;)jrnai}4`I~nde z$D__pood7Kgmo*v=9?%*l+{7jWLZ4&aSMe>SdMZxn^VOceQNCM>~TZL!FV(fdwn4F z5>RT;Vh^wZdMb@CQ*JR!<|>xhL6NRd6iW^F)*w3)kaKP)*gJ+W5ia{xAQt6Zf)zs6 ztRrG)|sW0l^&bgzvA0%R3Yazo`K0%ehR)2^FzS zK^RrU-AfGcs|uK`07j~SeH<`f1!O3Ib}C>E2aHewO%%ZGR9X!k)O;pKv{ezhC#Gwy z(0wbfHqOwtDY5&tg{#1U_v^wT4%&n@u8y;49Lws(?Y04P6CI06O{wqCr=H?!%n9X zZW5Oi6Kf?fjs(|SVAOaEs=nT$s83!y< z0lg?d`4`{WkpD2iKz>d^(SrXaL|^H_Iw$fr1=T^TNzJw}N`EJYwy02Oc#+*j!Q|hz zXe1tWfjmq@-Y%;gsN>Xg)~zof=~PUJ(EcAJjcHbk9B+rl3cJs9&aEUP6(tLOc+sjW z#C_5;6}1*Vo#TL)M4yI2P##H0L+jCbQl5r-n)_X&9;(Y1$Ax|77VN4cmP zgE%*-UKMinEb(ZMg3v=^-vP}O#-8QEhmy4&&=8^G3@h3$v;iKo?j{LK2NU~pD(Xf| z>8Pf5nl*G5NHExrzNZ!WHy!mxpGZv~{i#G7CiOY43nL%O2B(QFdi4S74501I8?aJB=Gtkd=2Z0fM?+I0W zlL65?jN%L;V%i+Q&ItCaYbcOEd9zI`23>j#x?LRQL0wZLPJXJjB-dZCuci@fNCwqf@Jw<2LPuYuLfcSpfnkTZVHe~K)5Ae1 z3ellI1ZpwOlr#iAMYM`pVCOQDorT^&UN&kN+zdA@8b-0vp|nCkWh*=+ z8KY4Q8byi$gIbVNqtTBjmE?>8#t^b>3~Hdwfo_^6?oScV(Q_w5$@wuTqUOs#A&IDE z)_sKOn@*r|&}S%*<0ND(Y6IhT%vjVI`~FGnU|^6vV^Nr6w|jJ^h7kjcM3}rEivl8{ z>sm62)H{zLOmG`xtKcu(Bz3Y;O{blJ+(CmMHSA~7EgQA&HLwbI z25?u(?GGhYZVQWCu?oHpP{j;*EIui3ya*|W&cCXH>c5ef*{C0yM7rgmh^EH@Y^i7# z1f^nlizf!&;xUNTAslK8uWm#d`cjWt-mjENX%394PsE&q%;=NJJP{qhZj<-Xib78~Hid;0%b@T-~5+Q=Z3%g~ovLuI@obyls z#t#28HJXJcAoSX_HXog}$5#%KsS6=R=xJKB5cStXAh%kFKENXZOy7g?<6lYEdQ`W@ z-d|}|jMnoYv4?kk**lm{d~XyouvF6@>(Pl|d~PYF|RlF@8Rk47>ud;Dl+;6_kU~Sn}~2>WY3Oov%aK zxJBk)M;lR;sp$>mhMYF#u=Zjj^v7({<0cy7^i367cs4n46HRmaGrJPEb^^(~g&MUv zQiV2T0_%Fc=!=pg>7qwAOp*Ef!~5JE+I#l!m#_9}EU6aDxX7kk(B9LC%Wa65 z|74T&+mIk+jVGgTLsIiS`Q|nXw;l_ciizK4n5}N#Mm?(sKzEnz;*f?MyMy$k?;Ygn zH7-SbohWV$It9v7#hpQP9m6253@Rn7?jRo|k?nU-0^gY(*^=P9C>(7zMczd{t2x1Y zDzww`RAD$-`2cx&EUZGSHyqTLXBv!c$czf)P3}EFwVj$(0h}LZ(wC$52okiR57A7U z>eRk5jY-8rXuVs+@e%R~%}&Uh(Oi=~G)3%|nG)MgKe2bJXogeHg=e${^h6E3-j*JD zSX)Cw(jTEH$KN{8E_og%BZEAjEP8~JP#bdL5voHEJ79wtZV-_<9USq-K;Mo#VeS-Q zh}NSxA#|$OaU_>vaAp#twS3E@TMV{TKyS9|-qY zK0}-`u*Z@o=%dq07C# zQy;SO6>97>p$bjghunCDLZSCpe~nteJe>R*?SmB5{tXJP;S4}{i8}yZ=XN#}jnK0( z+s--iOv;{{R`NHd&Tr6FgnE4F&xHDO=4?Adaz2!E-l1Bv?p zE}O_Cv06->&U6HrComnozSL3uKgQ8kDO6M#S-XDLh?f_9UUlla0BSz6D9x3tfmTX; z{H2L2{=-kzo0;CfXgOg9)iKoIEY zY%m{@&wK-KyXWVIYbK7O0$YD+YlZRix>8%OF5}+MX5Hg6O?wnL`THYs=_f;Fc)x8A zhmAo|3=9o7y3h#9j(%Cw$^VbCY(?1z=FiWRrI4ggC?NL@4M0XmzhN{rv^8iGPgC)= z0Dm9k(RGY>EJU4@oq$$84G13fmkA@@n#90@+ku{JslG#W=_`24gGaxVtG#TH*zR zv7JF)Me+pP*>zlJ$i#|*L89Iu#zOMkk=zq-Ae|1ZaI7c1uih>m5|8+v_x=3ADEb@Z z9i*ofu8#`ITq~UB%HsY!Kbo*mM{8$qBeE5)Wz7znnw+fhQslv^b=ZeiYktsb+6N&i zJWY00!-3>RH5`KmoAfq#1VZacfi13$R*+w8F)Xo=x3;)53MXmRaa-G8)2I$(8rc($ zYrqn~?&>(aMpsU>-ZBq_7uh7dI*1_i&!j5ADL06d9sU#TH0`m&dU#`vTz0_ut_#na z&G5;XMDb9H_&0r|8%@SI;%<(k8dCq9hj^fuH<->i;)V!KC0L8wqI8m`#VIg75-lE# zw-u6>>rtp}GBe0}25IPwb!Z()a>h}R8hz!A*TbTEkPH5V-%TY!uHdYVrjD*40G%N# z+;9P!P6FIvZl^mp8BWH9Obd!nfpzt8Z7BQ( zUcaixlVF*!ix)P)O4uqdoQ}RDkG=4Bs0LZ!jfX%A_|h9^LY~~-2O8lfS>=QK!6Mx! zAFy~33H8MhkRbH*#gS;VX`wGpK**Pz_JgYLB-Q+J1w3fI_QzXv@pZAL1^oEKPks1_ zhM#!To*Eclz}{hstcia|s4clu3pYmfiB}+A4nEr)h=W{@g(G-j=BB|opQWE>-BI!& z5H~ZFMX+3kJ-+S)j6qBe5Vf$Nksb1CPe4xHRIwC7*)QYZ4zS*YJ|HKJgN|SY4AK#4@4%40$3Bu?tm6OBek{yT4R-F#c8nc zqDxU}6<(CJwFA()sU1ajmRu{@#axAfM;j3Jhz;$(L)w(WWx zkcr~)MDakHa9I4w zoFrBQz!_EPE#g}T2jw1P4EyBG08SJ;P$%A3AkgFGRaC~lgjoOa2e~5Md$%3~@?Q9w%A28v;exsuva0k%;|xkg<(g%+vOjJhm^6i`W;u8aLvUp1$Wgqn$s+*rKWaN1!=B(* zoGMyf2fxe&8L+)zE2ok_LU6d7xZhA@u+31vDFCbS@#N+t>@K8=?}%e4ZkP)phOP=4 zU~}GJoT}G4Bdv>#);Yyl>(aql>zojvbx95|7{!aAY6qNLg4N&~^;qN+vutVCu@2t` zb0>V1DInG8=nlW{0I~)?uA-L*yn~4?De_Tj(T1$*^jQg38H@7A!mo)%8z#dgwJ2&s z9$XFLp~RRG&MDeNc;_j9F1xd#m|vFi%WAj)*E(7v|Ayj#&`UN5=J|weuwx5mi1Z+^ zd;nR={~)m?>Htz?Kk$XaIZKrKgA)@pR^0Kt9yP zVa<~D+o1x^s#A(YW;@N+vO6S+2NIiZ7zA%?Ol(cp*VMNq62vl)NQTzK;kkzs#76*P z2a{(vh9|NP!Koo%K}M;nV&#k{G@7zJ&y$lQep{JNEfmEAjZKf zyrtRhY&9#<*me>4btydgm`{~EF+UsIW|?*H{S0uSm=14! z(idexHOD%q7G>!ThvEaw@Xt_`9#UioA95%@in=u`=3sntXpJoJ!^ym<&KhWm6~7gr02Hbt7XM;Kojw@I(v#XOm89F!{3q4)e8!>QtE_hHwa5 z@PPH)2Dn@FTSNYKDihLs8rQ`2RF6UYgX>AE4g85>1=TdxtZPGybe(oAc^Qa9b3LIe znsrHFjAgMdhC<+Ni~w!O^&u(R8JCh^=No1XmbSy}65{;HTwHY$dlEur!X(CprB@YJ z9>ny0M~$J7RAjb3n*2)-D`jjTm#)YJ31g>e*GZ5C}Aw|uL z!0x#Lp|s^M8H#3ZV&bf$DWPcQHeRaEOQpON$V+9sbPgN{$Wy%ZnwKu}(k))P$x9`? zB=gcEUaFv_T+!Ohi*NYN!@Q)Sju)*v^O7wu9pEKrUizJv^t_}P8o)~eH#CHo7IS1c zl&E7W@!To(%wx4<*Jf0?Q{zQjt8tn`cbcP|s zvnlQh3s0k(VuQWBR)$@04K(I4Q)yG&1mS-gkT=cn0mw4{iiJyeQqUZ7#~S2dbDUfk zc2mGqc#=6G2f1oA1+Yn#2qf*+4!-&Kk+#4S3i)s0b&dukx&^Ln%j_t>t09A1;2i%H zGabdieOwMnr-yURw41S~jx4~uZGrtfChOqiB<)kh4k2_CD1m0!C**;oeoL%}G&HFt zt_k}+Lt8>}`z=}45{KC3=waKZ@RhbOiX3f;YnvXo#1%GZG`Z0R2WK$-ZLfeIQpLZP zLhGi8kJ4zshH)S+pq-aKNr(>jWxrj}pUnqSVxNq^XR#^(IFrPCiA{gwlY}>|E4)cU zdL`pv{^H#6%Nk94aV7IH1V8wO!x=X2ekb22V?Ti|ZI&kECH&c{I)LF|T4L;4CU4eL zI7om;RqL5%Z0|h8bi)8oQLxe)*%m)Q4T*m{JQJQzS{p)A!Yz^X;+-09ax2E{-hxlU2&OH6!bWiJ>(NP*bRp} zwXZ^Z@R3+$;2)h5VP;oxF(1j+48ZlNLVNdtyvTrWvF;*6GjTXy>MlD>zRSeknQ-Pr zlT`33XV}4lpiMBq*aD%M9#{T+4RvE9>fF!QQ0FGop?^{i5Bvf?ow#(z9=Y9W(mN;R zZ@(1S?*9=OrwBY*i_V)cc(n50HNKGT@*hrSL-Q{++>an@yTdc>405VFj;g(n&LyIp zBHn;*O^w?2rP+f?NDqikm~yz2zCEzM$rd>C%@_IJe~Et+Ug?$TDSUl~9zloB+ye*Z zZv2vCeHD)0RX93ELvEwpx|ubCrQZ%b#D<($NJm^^>`m>gl#lTIUhpbAmfO^pX{cck zVeynmzFC3IM z|A8~Vg$J_a@^;!|Yf}mW;-A$?nm(yX(q()LwXIm>-nK>|H_X zj=`}`vZgk`D@@$fQgj2<4!FImi( z{U-#8$A+e3GAA|$bB2&tV{i*zp~OE)>{wVhJx2PC#d+uhxiuCy_W1UWMpIM}Q~-ui z1Gz(&k^19sQzvq_ih*~?gmL(k-}hiq6$3GcFay86P5O<;p6=%^!w$wF+90J=Ds&LH zO+*}zcOXLAOu%*B5^sUd#Gvd-kF+BB6L52<7Pl)EM%*IDC*afQ1o&;3<)yV4{ z9Mfe_X z_P)c{NaIPkemna=U`QKv?rh~Ap2t>{ef(@qaUfKwiUIq%2xdS(Py@*{tX!R=$|K0e zNqCa$qN`Lnd^AYySH-09WawA(0j(;}yGmwE#xZ^kpgL8AFXRv=yxvuEb}~Nc=L-3z zGHiKWT!27Zk(S7~Y2Y>|(5Om=r`Rbt%5Mwx4efq>1bc7`W37PpC^;P!eg{2 zE-B`+kBFTjFTcXkPNyI#RVD7fM4C;-ttaT?CoMqQ{X zstH**9mi-JgM3v~=mm|maj6%1G97#QT?GTH$ZNrIOx~sQrrI;`ScHH%OV%9b`Q)h(F?79TzTnauhe%4C+EF*TWmMTC8}l70EO zk<;!|Fj$icU{htOS+|!61<<{BpRSZ$a*DJoz!&}YfQ~A5*5VK*^RJU6z7V^^gFx3p zTt|26kQt6)ly`#;Z5dCgWKkjR2#0^}6ykZV$q;CjuE_n=hUCtM_>ge2Qg7oE__6}U~iOf>M;keL9k)*aV`$ik6#Yi7-R%<7ei~L z!m_dmCnG)N_oP)34%Gd-kJ%YnY3EUR82qXTw}exJ+l%mg*D*&db{4bt98EIjft|yS zR@&M72(`1huMO0xiiueq!c4RxZ|A{t&<4^-gei9ySt>#Uq>!NbxE6l3iQHO%>)?Aw z$hi4;;?`?;Q7NSkYNb9hm=0uFz`S2D=JBRJ50NjDV_?@b=T z`i2uMgcd{LP!&Y;VKCUa$T=N9((s;cfp`x!T|9zAkc|UuJRg%^5u0PUqs?ZK7T_56 zF}%&G5T@I52ZK-+dB&`ZA+?X=7@ObcQNS`X=s0fg(SsEdyHX`x)2X6(_&x|JJC0kq z)}2H9EPPDbQ=!)+z9*nFbRj7xa7*u9vnf+rg;{rn{w5k>9J9&z1Nr_0Y*!|bhbOS! zCj?A@#ba$D1Tnzt=0YrBPsG}}q{d0?>HP}SFu*zvc&q}hk@S<;-L;J0&zZ3uww2*< z;n+el@gxqy!wbmDli06ey+SImY=;dSAT1!#$T#C`>wL3q4xcif0SS&GtT;>q;ysgm z5;oq<}f{vTkor95pj+nwOK(vO% zuK^b!w5%r9mvB#%NcvyG-=bYhF0&Ca_%fcMkC;ijnRaeIbtHWI2672Hi>)E=FXKA( z-X+n>!KZi*bmMiKa*P^Izs-}ts&+{Q{7BNSU{AYkYB`^bre&g*2attV@Njg8yt@KJ zeGUn}io;=&%(x1jF^uFv34fSo`jZy1Zx?d)8f00?6TwRMjlBez9~vhWBF&*f`YARis2%%kK6F=O z84%<^)8DsXlNdcCfp!V1>LmUsQO~W2STzxx=7$3sE`fzgeA+GJ02pkXsK&&yqnzzfc#&jWh zWl(rEvamWl+}3-9og;3+#xFEVvAi*nw^H~{+DgWLw4yH>u!h>jFq~9KMn1ycQA?_j z`Y4=ds&KC5oMWoMqR9^6>}jY%a#lDCc2(-kAj7EF!;XV2&=By%fn z?Y=6U$(+-<3hbf+b11D;5Wr#gVb5Q!)E^}N35Pxyk!`;wI)349)Q3R)Oro4c7GnbrufdhP>41#JES;vg09?|V7fs*Lx3MF{2nYg^f zZZNQFzl7yl2U7GBqSgiS*GpWxnOk$}jm4*|C?58eZh;uOGYW|^^=t+0+)rW|hW-vR z7mlGaBL)+{S5UJvr1dM9d43{eUttk!wj)qYO8y~P{s>}iyxuNOWe_V3`3$!)UX9p2*T(2J_BHQz0<;^4o39W7NSYbGkkdRrcCN0nvMcbOxZN_3-xIoNb z7Qkc6>gx2%PNW_doO5r{(^JN@)$+!Mzy!?~zJ*pF2(P_ZA1ze@A^>O~z41E*(AQzl zam2R_K?zw3#1~dzIOISA%;S$@*$S zS4aTN)dX)i-{x&2r1{?K2o))6v6Y_Q79D@47uej@(meD5$+rt^8Fam+ghw;IRG1=rclL`5Gc&L z_2jMtxMT^jaTJ0(K5^r{GE?3j#M?LNH`>1NUZX5%Spy%lK$a*FyI)nvO(zCfZ-Go8 zGaLoI{cH;%oqP|3h662xFh$6Ai;ziNNW2AN7er)7*uMQAadi^ny_>azP<&XHZkf$; z%j0xWSb32qf{b($X2Eg8Cr-jh{5F~N)(TVMq3x1ZD8qpU@`JNb4=+e2r<{d2w1qgk z2rW?)(!oUt#UB#MS1v*aWKZDl=A^s+k_ab4G`Um{yzPF&tXo9ty9x~;WcG9w>f+a} z$sAXqBTPe=Tm`=tzrjoo>k8ia)3vVIK`{E@M8F!L@!#gs-2hP_tyH3YEj2E|Cdpohe`P%p&WB*y_a ztj!(M3+>TW;^QSeM}HEnHw;L4gVbAS6p-%;=8BHv+tESSVgkL?dkY^yg6}i4iH1nt zAg5^yDkqR!_7*y!BP7g6c#MClMn3w1;%cO(zu-f9`GVRLB*z!v889y&lM~2JUr-CL z8~X_r_<$9;Rs-Nx1O))Rxj(=kldk>%pVXAX`;(RaLVN7sMjrYLPw+iV!t_Edya$s6 zUkDOch;M+P$4-8vNq`V%L!v3qOfoG1RMaO&1B79rwxAYP;>xJ|T@9i$yduLEbIiKO z&EU=sXPP>|5f1nQQB5HjUYabZ34Lj66xmc$C~Xga3d*Yy2!(5P<@#Bf$3e^CK{%@9 zI0f>nXF0BhJ$syoyxwCrKg+?-_8ImZcD{d>d|OM1_J40jt<>4W|Mwfx|KT(I{LONs z_4}sec`YFa`tYzoAp`Xydjf^w_-Ql=3xcq@n+ymNYUOHTktXMHqf$7+9I|f{Dt~Q} zpxtU+-VFAF5C0EAwX})*=2~BYvV6n}5~DB&dKMwas!?gVyBxp^)nUl$;Mej8^XQ)# z44@~Xxi#^5Emi}oM!cjggo%_LSeOIzIDCS^4;J>pn<0f)a$sCJ&Y~LbfDrgJjWd+z z(ehbV&Yl3u;Iz69-ifU(G=Wp!qiYL3oqYiDOzTX~@w!-hu!4)DKU@!5%W$q6Z9@EX zHv21C@^r{W;7@eHA7&EwY)k6_hclX5yPWu7&Y5BuXq^bZz(kJ7)!IT~@7?h59(e|A zvoE-kV`G@3a|Lm{UfjibE#P|?8qK^**;WN-a-8NJmAgP|z$FbXOf7YR(j20$T^Jn! zm&7B6NFAi0!9opWBG-e3*15lf4iu^5wd+vbWT6K`A^cG}V1eL9y9+Cu0U*P1 zGEBUtpF+@^y$1Tx}4=4kdqu2rX^%WY}-H2p^HL3WfY&APEi? z>N(tju35}2zDRn93i?LBgW=TH`%p78)Bt8ucw=j1ke>dj3b5Zf^6(++ws35XDvm5Y zWZjHx2o*-4OT@FT5Zh=X98yfkFGGn%)+6B8a+^fadI0=V_$bF~=D!+kg?6S{-pWMb+6-dx}UZ8^Wc5E?TJ{GMBaDXd#Zh@a?pS#tUg*+ z>)35S20}vfr2QzU>|b^`7e_6tb?&zB0*eQ7C~8znKbaiW|-Snjs}LeyB0+i*OIoZnT^z@FX zEIph9lLiYWSANmAyV^NGCQjUamTF6J*y9*;I+k(_tJn3dis|^HUtdl7#u-%B{{7pi z!_kZ(Ray3vNtjh@s&*_(3RXi0;+8H!-2q6Ni43?CLDchb5>@BL1{#!AYwD)oK5PgXyq$;>WvV$%KKoR(91T6aY$rs_zS?9uP-1S)h)9PX;HRMG{{`1Qdc zqBlT8zeJeeG<>3Z+$D#3vvIPZU-jhVocCeUr4ox%&x{@0gj+WqqjoRN%GRWw@UVY> zWzY^hDcTo8W==sje2Vk-GKbahHO`43Z2Ih^yX-VxJcg~V!l!QS8?Ph#RrcpK;zVEu zv0Kin9_RHP@W>{>cet)gy1wJj*lN}EK{C<1&uD9|(ms`nhA!LTz$)_x8mR#;s`f0p z=;^IBsVhdKHL1Ja_tbl*-uKb_biKb;@8k4-v)lT zf!^2X{Vu)F)%(4AzhCb^(tDrYf1>y4*HX2N+@mFf^?rojchGwu#;rB!1ksM9lbU&T zt9P(`GU%rCb+{WyiF=#93N|P5&fpdQ1u=(1u2k zCT}m4JHmEVi?Ce#4QgV#%osO)UJ%xRe|aG+Iq72X3B#vQ-i#Z=fiqDr1QgWyEH#6Z=_q($##5> z7_#ZRVY*&>g<*y?!t*e!4@<#6O`$Z%dwi)zU5BIaA+`28=}JmR;js8>epv1f(ncGe zTpb192)6^u)S2sKkKwbm>dOJQXiOeCSR>UWWqD$i_MSHqA7rLd>~@V*rR0pp+3LeA z8H)(ojG@vMA^O9b~**q_W#<#h|AztjEh z%9Im3U51tl_@zLVWJ>4U8qHtM*sk%*0UHcvtk;;bC9&T0!N@;of$7BijdFQnlTq3y zYJsWxU_f?0TC3_J`0_OB2?IL5C2LGH+w%?-p0Yi+oJHd3C=(d?EB13jpxx?jz zprKn|9WIvyg|3UI<9of{zpc_n%2IFS=Ph)Hv}$EOI{aMz9zF-8|cE;f;K170D}np zNNCl{VqAI}b8QraFcs~t7G001Rdl4jUQzwsRK{|cbNz8QfbzRs^7C{wHJ$_y+XX~# z`}Kzd zpPT3fp#A>b)6DI|w+L{%#HH5kZQ(F^S3T^OJ-x4`wKx8SRy`o~UI-rChe&{F~TXt$XoaiP) zdx$=1Xzi@d&`soMznYYdeq?{T&nt|&melJFT|so2p&NAhhAttx2(-VSP>p<&)tJLX z7g6`~75ZY*r=vSyn@n`><(4kQ==pcgWnKRqQn=KmY}q|x2gVBDxOe81u2~Cl!;LC+ zluV0Qc9mGFyGLO#Y6iE@Fmke`o!3rJw)4rbGav(&{?OHsexDYuhYD z(5^6NX*K_zrq4^4F(I9>de?zaA-)22jBYUL=G5o@dO#0ZG5M^;WJFzbS=$X= zAtA3cG`%Hn-14!u*PXG}$aO>Ux1pnD|FGS7z230$-!1g>CwtRA)iPRM8}0sgqeH{ABdx7rw^!xtwx_{{wx7bh_E>e_O)|don1F0!O}3uDhEujvU;49k49NCF zPj&xIDDF(p_TtX=)W!8I)%hN3%l9U1{KYBGr{6MhHM2NhH=WPkmhU`X&iL~k>tV_} zN-u0U-|-&m`8e6%_4hEU=Nc?RHO+fDAm0;YPw^T$R6_B$uQ;=8&CZec2YG+I0mF=~ zmNCR__&$t-8AnWFIb#T8v-zxyIKo)VxP|dK#zz_FGmbat6`3q>GIn76{#H}qLB?TR zk((H>zO&g0YdIJt&y#<`4(8H*X8VqC}gD&uy>{fu>tUo)O#oX9;uWztrWZRzsG zGhCy`7z-Km7;_ji850>Bcx>aC@5FeK<)#=RCi7&?V|3qQdg8WQyoM-bK?&nJ#;+J_ z87moIW%L)YeH;$~qhDS%-pKzn(ct&Y`xsAnnRtl}{23Qq#lJeiIPmLTtY6LfhcVxW zv6&MBvVn~=bYVV(u>;%Lm~Upg?-?gE<}e;)+{-wEaWJD(Bj(>q7Wlh%K2MTzF32yx zkNLM5i?7l@brqk=C!9#e=IhN6`WUwp()@pk1uGZ}8S@ygXG~=bXFNI5IDDUR6Jr_U zV#b+_gBiOrb^z+*&&@T=Wh`X;afB&g7vmPj6|7&!{Bz9T&HPlx>lwQ+ULJ1zH86fM zoR9xaELg|5obdt1I~YeX)?a5F*_i)+nBhNXtYv(aaRuW8jCqXiVP2Eb#ezHKQlhccsJwg+#^f4<*8iJT-J|f9PDMH3**I1Q}DNpb&NHPuQNWwxR^1Y z@g_zWV>sh4Lyg}fjJ1pf|6rnov5@gL#+w)iGsZCrKl0&GbtZ^z5=G%y#F+hu46s>E zpDb?1#O21w@LvF6wo?Jg^g-Qxp0#%*2lN!qEXof-X+w`cksd?U%j$j z#zuq(n2@nkP1-J_V#v$>xc1!Stxlb#97O?QT{RrY*4I6xvO1(k`g6tCT zSLBFrsk0=h($K-t-4bh!ZHM9>_$)Qk4$%0)9Qr81-qTO^K`vcTi5j#6K58{|@W4kI zmshKnlEy*BM#WI|J;|1STUZFhyF5{)4(yPzS?8{8Z(Ah9F1(7WqyEJf*ave1?C@MF zidF9#85=VqsXe9+^w|M7|W6 zWV3K?!Qz5MtY#JhRT6lt5(8bXIrHzLAwBS^j2h{I7?xt-jg#?D&^KmhgU-EDHXb68 z4_R`%-87PwsNM%!6H|Cr9O1X>Nq9LM=k3?8+-t9h}8ovJ!&kYcQc{_vVcy) zYk48)qFSbQeunjKZWa8w{7aBeJ^#7vmE;?Vq27Z}ywNg9=6`9wojbF%d_)FlQzmP-VFQpFahc>7&brQdag4+_ttyc zQsuKs++s@Xe2eOHNIn`$x=hxcP#X`Sx*OD)L$a4H+_g^%8)iB=bK;MD<<0R_CkMnQ zFi}YCVuvHt9}mkZx*GL+P4OO9!`d{mrwyUi7MH{3H9Pp`EOn2VyNEb{;E>hLjnTQJG%KbGR6`e!|k{PFcN zPd9QKWTUt+_c+txedjmwMf7Ioo-yj_c%K9b_EY+a=a{=?#0h&SUHCTpMC1F^1n zImD6N$a=2PX|7ST%04OY?@ARfg4td!D}20ZU-$89?@4(#dNbpcOw;Z2kT+uMmY;J;-3J0ur%zK0xNFJV8V*FBP!)v`EQ&f8o!ll03GFPVMX2B z#1<_;D#tO=H6f2r;THkKyqTA_y4zv;2zQIfiXb5aH?FPGjXN~7Ke=%7<1`_ov^NHsTKUVd=D0_vG%UpI@KT$qA zNcIlydckC!pdPs>6Q+@gi%pvTFQtC@t0}c-i^+ODkf`ZEs$D6|t66?Toki&}^_PM~ z+T|e89Bzr;FxR|F`c?M2jY@TJsp3{MbwU_VaGMEfd%M+?QmiMk5O3OSy7VuAG_`*O z(iSGwx?g3_qz(9rb2<3l_@6z;)L7xhC(7ewCns?~Qf(CXEb;2+ zOEN7+#M(qMQVXjzud*)7zFtpPn^=S7j^=JT*HQEDLpGIUD1h|sJPJMO5P|vPVHch5 z5FIAl3aQd@sC0jPpNzKnzIM?i=oaB<>QH5QaO%T+po!8uv9m}l>aC7lmhmx#F#ZIo ziVM`d3T;Kz!|{=@Y@}%1`BPmquO_w1eqBV0O@tujvdZx4nO2z^Q{N9Gi?o(?npX!} zWkL@+*Eiwb+!iFn^)54K9v)4>b0;C_tH-wc~}$3 zM2f~{=*N6BzRXKF8)S^t96@@(H%YdkY*aG~o3{$>ys5y#G zuc@{Yf_!UES;KKo>>Xr1sP8d7?83(`ei~(}pESl~-=p3MvJwW=gQX8b`|uD*^}mvb z>B-1;nF_X9=4_bE-s|CA-eNosQQ3aAV09DJm+-M!YRg3Rh|LsE(HArc9XIZ?cy zXG}KEFrLP!L-3SE^P`eY!(mEUXi~S%H0IaNGUn5O)H{oSWD}v5UXro;tX+caXIVTh z-6Uj|Nfw8kOBAcxLyuR}X~gTSZGIIN))=wPiy?e)p~Tvt*2tW7i_IT1|J zvYI}cPhD*jBS;2Y(g*TwdDNm{D;9NlGT5SfgpI*ymrorCw$kvatDsI+RF5WhQLb#_ z10>?(LxY4hn;zc=b+Qsh8WZ%@+>XMv&L$=!`LkWY_p?O5sU3tJc4g~rVlz^cdK#rn z@SC@Zsnh`T{FiLvF{Hqvu9V?<*KecFcCupo2AY<=jM9+WifQLnYqN|&o5x(EJAA2u z_pY(#TcGiT6cA2^<#7d3GW!vyKY~mzRu38#m#l{#hWr-H; zsy9Ncz7bWNqiVBF{EAeiPKDs0H)7m?p2ULssllRM-6Pa`B4c#|>SV{2ax`SQUg>cg52r z&VhaWwRRDcez%ATS|B>ml}H^AvxY>}Bg{iBYC49K=5d>-S4rVkg1(kJP-e2+67xxOn+iZw_p)gPm+ZZUpw z4Mg7|y^WH;Q*T6Du^E09mPm^QNavxVE!aY1`v~FJ|7VaC_YIaJVKPu%fY&5aQ8$lF z$5VQS6vLq+{5?{MpI2EiR_QhC5k;e|sh_2K?UgZhM}G@3_{JEVj*h5^E>={q-^8Py z2CF={9t0P{%2;Yyeith)egwPH{;;DO__R0Jbi;6HQ-oOaqZ=^8Mgkdc}P3`?hTgk%){deVo?6=up4wfo`(jt%ihJy+@jgIM5VRM2IK8vGwezN z>>41ehh6jFHao9)6>keYb8$5aFx&;1dk&sb+6~2N$SNVL4X9vetT=F@f(-$(fsnZ& zs|=8NAag(_0{lGyS?-*^n34g)XCN#`A(;WP&5)I;rEym5Z%Z(KTY~Z15)AR%5{%!L zVE8Wwzb(P|Z3)Kznk5*+{{NR?Y~Aa$9t&1jMbSmQo@#a9`b(-cF>KH?*5rBj%qw_s z-jvC=kH3G;@5U6(x#yt=7yf>pc=jsIlm!J3%$qCJUsA1Z>Z?>MZtLh_*7l&MT_JCe zu(ovb`p)2&s|X$EgjpgSIK3%I6obwOZo)4Ewt=n$c1F}T8KKv3;3o*h7K089$x>*x z1<68OlrREu*6yI4z(EopK>{5ZJEa)vvv!+TR6zkk+=fW$LD2cY)%fiBNzmoMC5T@- zBP|oB1WobJz!+v=e3RmwE_{l98*B(OkoJQnoQ8A+G~q&|)1V8(y*BYJ-Y&I(X#yrA zBo9P&phCM+bs|gf}C(KnF$~+ai))C@3`f zbT@>RK@)C9Dh5s1h*Sm|>qo?0h%jyd9T;t-sAE$SDnvHG&ImPHh#3Zk9Vz^HCKXciu(L5BnT4o3l?UBDOd)t)#6 z2g`v@H*8Mh!2;NSluZ=U$`RnPF*flO=)mA11r@jFU~WN9=*z+QH-qtkIf&%IT9tZW z@Hm_34muq8*))VGNDiDh9m4_I1I)b>c|y^RdHAk8qJI^TmjNqhVMss|F3m@av5=+= z=$Vbl3z~2+!fN%P-ChuV=Aso?pyC1^z8gIOx*mApAvgmqes2>uK8(tBLuTM>MW`I; zO5od%q9UNHfU}k$UJN=PSotUP7TEwjOW_|2QV9R>7={6KG4R7>82@rGJ`iIcM~%n` z_~;5`z-pIbVDl3w0JJDUMV~|gpk2ThR-ypVmsB1}iv zZxcdepa~n0MuR2{La>kIZP7i@6af6=KXCE`O_=>AS_B&C6~BO44TkVC(k9S^*Hjv# zaNt{;P378xf3Tq#Vb)vdF3^ONks3e~{tc-aG-1arn7Eh%;lL$Z(NNIEz?)U+!&6i4dj);7ZE(HLLf0Ph)BW3sCX$(LQXgfsTnkIzNiHgfVNaUg|wg@@J80RA`22JQiDgj*#`~oTGjKQ;sSQ!C1VP29Ho7B+) z9H84FvJiK=59y!^nvGsQ3DNOnDaNOu<)A&l*O15)VRb5AjAH>VVgG(;IA|B}2_&*1 ze5F5hkQ0UsK%+ntK8WN2y%@L&Nw;Pod?GD?yk?;EVu_zoIDkW#P6xh>bPtTmfk6m` zt|q6zB}i4E3I7|Z0z=2`f z1EwL7Q>;c7->@7jl0`Gi2_q0>C7Wnq8G@_PczHtjS0s{e0OFVMVm##ez|}}pFyXvR zDe@s-06gTyZULAhz;0Q_h+?XLV7e0cBNE-7oC8J=L+gU^t{IrlbT06(!!aq~XA|%; z5>+C61cnHSXu@+y%V86TNbwLPl6yTMoz>% diff --git a/docs/html/vk__mem__alloc_8h_source.html b/docs/html/vk__mem__alloc_8h_source.html index 3d5c663..bbe571c 100644 --- a/docs/html/vk__mem__alloc_8h_source.html +++ b/docs/html/vk__mem__alloc_8h_source.html @@ -65,7 +65,7 @@ $(function() {
vk_mem_alloc.h
-Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1635 /*
1636 Define this macro to 0/1 to disable/enable support for recording functionality,
1637 available through VmaAllocatorCreateInfo::pRecordSettings.
1638 */
1639 #ifndef VMA_RECORDING_ENABLED
1640  #ifdef _WIN32
1641  #define VMA_RECORDING_ENABLED 1
1642  #else
1643  #define VMA_RECORDING_ENABLED 0
1644  #endif
1645 #endif
1646 
1647 #ifndef NOMINMAX
1648  #define NOMINMAX // For windows.h
1649 #endif
1650 
1651 #ifndef VULKAN_H_
1652  #include <vulkan/vulkan.h>
1653 #endif
1654 
1655 #if VMA_RECORDING_ENABLED
1656  #include <windows.h>
1657 #endif
1658 
1659 #if !defined(VMA_DEDICATED_ALLOCATION)
1660  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1661  #define VMA_DEDICATED_ALLOCATION 1
1662  #else
1663  #define VMA_DEDICATED_ALLOCATION 0
1664  #endif
1665 #endif
1666 
1676 VK_DEFINE_HANDLE(VmaAllocator)
1677 
1678 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1680  VmaAllocator allocator,
1681  uint32_t memoryType,
1682  VkDeviceMemory memory,
1683  VkDeviceSize size);
1685 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1686  VmaAllocator allocator,
1687  uint32_t memoryType,
1688  VkDeviceMemory memory,
1689  VkDeviceSize size);
1690 
1704 
1734 
1737 typedef VkFlags VmaAllocatorCreateFlags;
1738 
1743 typedef struct VmaVulkanFunctions {
1744  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1745  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1746  PFN_vkAllocateMemory vkAllocateMemory;
1747  PFN_vkFreeMemory vkFreeMemory;
1748  PFN_vkMapMemory vkMapMemory;
1749  PFN_vkUnmapMemory vkUnmapMemory;
1750  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1751  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1752  PFN_vkBindBufferMemory vkBindBufferMemory;
1753  PFN_vkBindImageMemory vkBindImageMemory;
1754  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1755  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1756  PFN_vkCreateBuffer vkCreateBuffer;
1757  PFN_vkDestroyBuffer vkDestroyBuffer;
1758  PFN_vkCreateImage vkCreateImage;
1759  PFN_vkDestroyImage vkDestroyImage;
1760  PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
1761 #if VMA_DEDICATED_ALLOCATION
1762  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1763  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1764 #endif
1766 
1768 typedef enum VmaRecordFlagBits {
1775 
1778 typedef VkFlags VmaRecordFlags;
1779 
1781 typedef struct VmaRecordSettings
1782 {
1792  const char* pFilePath;
1794 
1797 {
1801 
1802  VkPhysicalDevice physicalDevice;
1804 
1805  VkDevice device;
1807 
1810 
1811  const VkAllocationCallbacks* pAllocationCallbacks;
1813 
1853  const VkDeviceSize* pHeapSizeLimit;
1874 
1876 VkResult vmaCreateAllocator(
1877  const VmaAllocatorCreateInfo* pCreateInfo,
1878  VmaAllocator* pAllocator);
1879 
1881 void vmaDestroyAllocator(
1882  VmaAllocator allocator);
1883 
1889  VmaAllocator allocator,
1890  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1891 
1897  VmaAllocator allocator,
1898  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1899 
1907  VmaAllocator allocator,
1908  uint32_t memoryTypeIndex,
1909  VkMemoryPropertyFlags* pFlags);
1910 
1920  VmaAllocator allocator,
1921  uint32_t frameIndex);
1922 
1925 typedef struct VmaStatInfo
1926 {
1928  uint32_t blockCount;
1934  VkDeviceSize usedBytes;
1936  VkDeviceSize unusedBytes;
1939 } VmaStatInfo;
1940 
1942 typedef struct VmaStats
1943 {
1944  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1945  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1947 } VmaStats;
1948 
1950 void vmaCalculateStats(
1951  VmaAllocator allocator,
1952  VmaStats* pStats);
1953 
1954 #define VMA_STATS_STRING_ENABLED 1
1955 
1956 #if VMA_STATS_STRING_ENABLED
1957 
1959 
1961 void vmaBuildStatsString(
1962  VmaAllocator allocator,
1963  char** ppStatsString,
1964  VkBool32 detailedMap);
1965 
1966 void vmaFreeStatsString(
1967  VmaAllocator allocator,
1968  char* pStatsString);
1969 
1970 #endif // #if VMA_STATS_STRING_ENABLED
1971 
1980 VK_DEFINE_HANDLE(VmaPool)
1981 
1982 typedef enum VmaMemoryUsage
1983 {
2032 } VmaMemoryUsage;
2033 
2043 
2098 
2114 
2124 
2131 
2135 
2137 {
2150  VkMemoryPropertyFlags requiredFlags;
2155  VkMemoryPropertyFlags preferredFlags;
2163  uint32_t memoryTypeBits;
2176  void* pUserData;
2178 
2195 VkResult vmaFindMemoryTypeIndex(
2196  VmaAllocator allocator,
2197  uint32_t memoryTypeBits,
2198  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2199  uint32_t* pMemoryTypeIndex);
2200 
2214  VmaAllocator allocator,
2215  const VkBufferCreateInfo* pBufferCreateInfo,
2216  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2217  uint32_t* pMemoryTypeIndex);
2218 
2232  VmaAllocator allocator,
2233  const VkImageCreateInfo* pImageCreateInfo,
2234  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2235  uint32_t* pMemoryTypeIndex);
2236 
2257 
2274 
2285 
2291 
2294 typedef VkFlags VmaPoolCreateFlags;
2295 
2298 typedef struct VmaPoolCreateInfo {
2313  VkDeviceSize blockSize;
2342 
2345 typedef struct VmaPoolStats {
2348  VkDeviceSize size;
2351  VkDeviceSize unusedSize;
2364  VkDeviceSize unusedRangeSizeMax;
2367  size_t blockCount;
2368 } VmaPoolStats;
2369 
2376 VkResult vmaCreatePool(
2377  VmaAllocator allocator,
2378  const VmaPoolCreateInfo* pCreateInfo,
2379  VmaPool* pPool);
2380 
2383 void vmaDestroyPool(
2384  VmaAllocator allocator,
2385  VmaPool pool);
2386 
2393 void vmaGetPoolStats(
2394  VmaAllocator allocator,
2395  VmaPool pool,
2396  VmaPoolStats* pPoolStats);
2397 
2405  VmaAllocator allocator,
2406  VmaPool pool,
2407  size_t* pLostAllocationCount);
2408 
2423 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2424 
2449 VK_DEFINE_HANDLE(VmaAllocation)
2450 
2451 
2453 typedef struct VmaAllocationInfo {
2458  uint32_t memoryType;
2467  VkDeviceMemory deviceMemory;
2472  VkDeviceSize offset;
2477  VkDeviceSize size;
2491  void* pUserData;
2493 
2504 VkResult vmaAllocateMemory(
2505  VmaAllocator allocator,
2506  const VkMemoryRequirements* pVkMemoryRequirements,
2507  const VmaAllocationCreateInfo* pCreateInfo,
2508  VmaAllocation* pAllocation,
2509  VmaAllocationInfo* pAllocationInfo);
2510 
2530 VkResult vmaAllocateMemoryPages(
2531  VmaAllocator allocator,
2532  const VkMemoryRequirements* pVkMemoryRequirements,
2533  const VmaAllocationCreateInfo* pCreateInfo,
2534  size_t allocationCount,
2535  VmaAllocation* pAllocations,
2536  VmaAllocationInfo* pAllocationInfo);
2537 
2545  VmaAllocator allocator,
2546  VkBuffer buffer,
2547  const VmaAllocationCreateInfo* pCreateInfo,
2548  VmaAllocation* pAllocation,
2549  VmaAllocationInfo* pAllocationInfo);
2550 
2552 VkResult vmaAllocateMemoryForImage(
2553  VmaAllocator allocator,
2554  VkImage image,
2555  const VmaAllocationCreateInfo* pCreateInfo,
2556  VmaAllocation* pAllocation,
2557  VmaAllocationInfo* pAllocationInfo);
2558 
2563 void vmaFreeMemory(
2564  VmaAllocator allocator,
2565  VmaAllocation allocation);
2566 
2577 void vmaFreeMemoryPages(
2578  VmaAllocator allocator,
2579  size_t allocationCount,
2580  VmaAllocation* pAllocations);
2581 
2602 VkResult vmaResizeAllocation(
2603  VmaAllocator allocator,
2604  VmaAllocation allocation,
2605  VkDeviceSize newSize);
2606 
2624  VmaAllocator allocator,
2625  VmaAllocation allocation,
2626  VmaAllocationInfo* pAllocationInfo);
2627 
2642 VkBool32 vmaTouchAllocation(
2643  VmaAllocator allocator,
2644  VmaAllocation allocation);
2645 
2660  VmaAllocator allocator,
2661  VmaAllocation allocation,
2662  void* pUserData);
2663 
2675  VmaAllocator allocator,
2676  VmaAllocation* pAllocation);
2677 
2712 VkResult vmaMapMemory(
2713  VmaAllocator allocator,
2714  VmaAllocation allocation,
2715  void** ppData);
2716 
2721 void vmaUnmapMemory(
2722  VmaAllocator allocator,
2723  VmaAllocation allocation);
2724 
2737 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2738 
2751 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2752 
2769 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
2770 
2777 VK_DEFINE_HANDLE(VmaDefragmentationContext)
2778 
2779 typedef enum VmaDefragmentationFlagBits {
2783 typedef VkFlags VmaDefragmentationFlags;
2784 
2789 typedef struct VmaDefragmentationInfo2 {
2813  uint32_t poolCount;
2834  VkDeviceSize maxCpuBytesToMove;
2844  VkDeviceSize maxGpuBytesToMove;
2858  VkCommandBuffer commandBuffer;
2860 
2865 typedef struct VmaDefragmentationInfo {
2870  VkDeviceSize maxBytesToMove;
2877 
2879 typedef struct VmaDefragmentationStats {
2881  VkDeviceSize bytesMoved;
2883  VkDeviceSize bytesFreed;
2889 
2916 VkResult vmaDefragmentationBegin(
2917  VmaAllocator allocator,
2918  const VmaDefragmentationInfo2* pInfo,
2919  VmaDefragmentationStats* pStats,
2920  VmaDefragmentationContext *pContext);
2921 
2927 VkResult vmaDefragmentationEnd(
2928  VmaAllocator allocator,
2929  VmaDefragmentationContext context);
2930 
2971 VkResult vmaDefragment(
2972  VmaAllocator allocator,
2973  VmaAllocation* pAllocations,
2974  size_t allocationCount,
2975  VkBool32* pAllocationsChanged,
2976  const VmaDefragmentationInfo *pDefragmentationInfo,
2977  VmaDefragmentationStats* pDefragmentationStats);
2978 
2991 VkResult vmaBindBufferMemory(
2992  VmaAllocator allocator,
2993  VmaAllocation allocation,
2994  VkBuffer buffer);
2995 
3008 VkResult vmaBindImageMemory(
3009  VmaAllocator allocator,
3010  VmaAllocation allocation,
3011  VkImage image);
3012 
3039 VkResult vmaCreateBuffer(
3040  VmaAllocator allocator,
3041  const VkBufferCreateInfo* pBufferCreateInfo,
3042  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3043  VkBuffer* pBuffer,
3044  VmaAllocation* pAllocation,
3045  VmaAllocationInfo* pAllocationInfo);
3046 
3058 void vmaDestroyBuffer(
3059  VmaAllocator allocator,
3060  VkBuffer buffer,
3061  VmaAllocation allocation);
3062 
3064 VkResult vmaCreateImage(
3065  VmaAllocator allocator,
3066  const VkImageCreateInfo* pImageCreateInfo,
3067  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3068  VkImage* pImage,
3069  VmaAllocation* pAllocation,
3070  VmaAllocationInfo* pAllocationInfo);
3071 
3083 void vmaDestroyImage(
3084  VmaAllocator allocator,
3085  VkImage image,
3086  VmaAllocation allocation);
3087 
3088 #ifdef __cplusplus
3089 }
3090 #endif
3091 
3092 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
3093 
3094 // For Visual Studio IntelliSense.
3095 #if defined(__cplusplus) && defined(__INTELLISENSE__)
3096 #define VMA_IMPLEMENTATION
3097 #endif
3098 
3099 #ifdef VMA_IMPLEMENTATION
3100 #undef VMA_IMPLEMENTATION
3101 
3102 #include <cstdint>
3103 #include <cstdlib>
3104 #include <cstring>
3105 
3106 /*******************************************************************************
3107 CONFIGURATION SECTION
3108 
3109 Define some of these macros before each #include of this header or change them
3110 here if you need other then default behavior depending on your environment.
3111 */
3112 
3113 /*
3114 Define this macro to 1 to make the library fetch pointers to Vulkan functions
3115 internally, like:
3116 
3117  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
3118 
3119 Define to 0 if you are going to provide you own pointers to Vulkan functions via
3120 VmaAllocatorCreateInfo::pVulkanFunctions.
3121 */
3122 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
3123 #define VMA_STATIC_VULKAN_FUNCTIONS 1
3124 #endif
3125 
3126 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
3127 //#define VMA_USE_STL_CONTAINERS 1
3128 
3129 /* Set this macro to 1 to make the library including and using STL containers:
3130 std::pair, std::vector, std::list, std::unordered_map.
3131 
3132 Set it to 0 or undefined to make the library using its own implementation of
3133 the containers.
3134 */
3135 #if VMA_USE_STL_CONTAINERS
3136  #define VMA_USE_STL_VECTOR 1
3137  #define VMA_USE_STL_UNORDERED_MAP 1
3138  #define VMA_USE_STL_LIST 1
3139 #endif
3140 
3141 #ifndef VMA_USE_STL_SHARED_MUTEX
3142  // Minimum Visual Studio 2015 Update 2
3143  #if defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918
3144  #define VMA_USE_STL_SHARED_MUTEX 1
3145  #endif
3146 #endif
3147 
3148 #if VMA_USE_STL_VECTOR
3149  #include <vector>
3150 #endif
3151 
3152 #if VMA_USE_STL_UNORDERED_MAP
3153  #include <unordered_map>
3154 #endif
3155 
3156 #if VMA_USE_STL_LIST
3157  #include <list>
3158 #endif
3159 
3160 /*
3161 Following headers are used in this CONFIGURATION section only, so feel free to
3162 remove them if not needed.
3163 */
3164 #include <cassert> // for assert
3165 #include <algorithm> // for min, max
3166 #include <mutex>
3167 #include <atomic> // for std::atomic
3168 
3169 #ifndef VMA_NULL
3170  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
3171  #define VMA_NULL nullptr
3172 #endif
3173 
3174 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
3175 #include <cstdlib>
3176 void *aligned_alloc(size_t alignment, size_t size)
3177 {
3178  // alignment must be >= sizeof(void*)
3179  if(alignment < sizeof(void*))
3180  {
3181  alignment = sizeof(void*);
3182  }
3183 
3184  return memalign(alignment, size);
3185 }
3186 #elif defined(__APPLE__) || defined(__ANDROID__)
3187 #include <cstdlib>
3188 void *aligned_alloc(size_t alignment, size_t size)
3189 {
3190  // alignment must be >= sizeof(void*)
3191  if(alignment < sizeof(void*))
3192  {
3193  alignment = sizeof(void*);
3194  }
3195 
3196  void *pointer;
3197  if(posix_memalign(&pointer, alignment, size) == 0)
3198  return pointer;
3199  return VMA_NULL;
3200 }
3201 #endif
3202 
3203 // If your compiler is not compatible with C++11 and definition of
3204 // aligned_alloc() function is missing, uncommeting following line may help:
3205 
3206 //#include <malloc.h>
3207 
3208 // Normal assert to check for programmer's errors, especially in Debug configuration.
3209 #ifndef VMA_ASSERT
3210  #ifdef _DEBUG
3211  #define VMA_ASSERT(expr) assert(expr)
3212  #else
3213  #define VMA_ASSERT(expr)
3214  #endif
3215 #endif
3216 
3217 // Assert that will be called very often, like inside data structures e.g. operator[].
3218 // Making it non-empty can make program slow.
3219 #ifndef VMA_HEAVY_ASSERT
3220  #ifdef _DEBUG
3221  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
3222  #else
3223  #define VMA_HEAVY_ASSERT(expr)
3224  #endif
3225 #endif
3226 
3227 #ifndef VMA_ALIGN_OF
3228  #define VMA_ALIGN_OF(type) (__alignof(type))
3229 #endif
3230 
3231 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
3232  #if defined(_WIN32)
3233  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
3234  #else
3235  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
3236  #endif
3237 #endif
3238 
3239 #ifndef VMA_SYSTEM_FREE
3240  #if defined(_WIN32)
3241  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
3242  #else
3243  #define VMA_SYSTEM_FREE(ptr) free(ptr)
3244  #endif
3245 #endif
3246 
3247 #ifndef VMA_MIN
3248  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
3249 #endif
3250 
3251 #ifndef VMA_MAX
3252  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
3253 #endif
3254 
3255 #ifndef VMA_SWAP
3256  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
3257 #endif
3258 
3259 #ifndef VMA_SORT
3260  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
3261 #endif
3262 
3263 #ifndef VMA_DEBUG_LOG
3264  #define VMA_DEBUG_LOG(format, ...)
3265  /*
3266  #define VMA_DEBUG_LOG(format, ...) do { \
3267  printf(format, __VA_ARGS__); \
3268  printf("\n"); \
3269  } while(false)
3270  */
3271 #endif
3272 
3273 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
3274 #if VMA_STATS_STRING_ENABLED
3275  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
3276  {
3277  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
3278  }
3279  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
3280  {
3281  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
3282  }
3283  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
3284  {
3285  snprintf(outStr, strLen, "%p", ptr);
3286  }
3287 #endif
3288 
3289 #ifndef VMA_MUTEX
3290  class VmaMutex
3291  {
3292  public:
3293  void Lock() { m_Mutex.lock(); }
3294  void Unlock() { m_Mutex.unlock(); }
3295  private:
3296  std::mutex m_Mutex;
3297  };
3298  #define VMA_MUTEX VmaMutex
3299 #endif
3300 
3301 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
3302 #ifndef VMA_RW_MUTEX
3303  #if VMA_USE_STL_SHARED_MUTEX
3304  // Use std::shared_mutex from C++17.
3305  #include <shared_mutex>
3306  class VmaRWMutex
3307  {
3308  public:
3309  void LockRead() { m_Mutex.lock_shared(); }
3310  void UnlockRead() { m_Mutex.unlock_shared(); }
3311  void LockWrite() { m_Mutex.lock(); }
3312  void UnlockWrite() { m_Mutex.unlock(); }
3313  private:
3314  std::shared_mutex m_Mutex;
3315  };
3316  #define VMA_RW_MUTEX VmaRWMutex
3317  #elif defined(_WIN32)
3318  // Use SRWLOCK from WinAPI.
3319  class VmaRWMutex
3320  {
3321  public:
3322  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
3323  void LockRead() { AcquireSRWLockShared(&m_Lock); }
3324  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
3325  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
3326  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
3327  private:
3328  SRWLOCK m_Lock;
3329  };
3330  #define VMA_RW_MUTEX VmaRWMutex
3331  #else
3332  // Less efficient fallback: Use normal mutex.
3333  class VmaRWMutex
3334  {
3335  public:
3336  void LockRead() { m_Mutex.Lock(); }
3337  void UnlockRead() { m_Mutex.Unlock(); }
3338  void LockWrite() { m_Mutex.Lock(); }
3339  void UnlockWrite() { m_Mutex.Unlock(); }
3340  private:
3341  VMA_MUTEX m_Mutex;
3342  };
3343  #define VMA_RW_MUTEX VmaRWMutex
3344  #endif // #if VMA_USE_STL_SHARED_MUTEX
3345 #endif // #ifndef VMA_RW_MUTEX
3346 
3347 /*
3348 If providing your own implementation, you need to implement a subset of std::atomic:
3349 
3350 - Constructor(uint32_t desired)
3351 - uint32_t load() const
3352 - void store(uint32_t desired)
3353 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
3354 */
3355 #ifndef VMA_ATOMIC_UINT32
3356  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
3357 #endif
3358 
3359 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
3360 
3364  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
3365 #endif
3366 
3367 #ifndef VMA_DEBUG_ALIGNMENT
3368 
3372  #define VMA_DEBUG_ALIGNMENT (1)
3373 #endif
3374 
3375 #ifndef VMA_DEBUG_MARGIN
3376 
3380  #define VMA_DEBUG_MARGIN (0)
3381 #endif
3382 
3383 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
3384 
3388  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
3389 #endif
3390 
3391 #ifndef VMA_DEBUG_DETECT_CORRUPTION
3392 
3397  #define VMA_DEBUG_DETECT_CORRUPTION (0)
3398 #endif
3399 
3400 #ifndef VMA_DEBUG_GLOBAL_MUTEX
3401 
3405  #define VMA_DEBUG_GLOBAL_MUTEX (0)
3406 #endif
3407 
3408 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
3409 
3413  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
3414 #endif
3415 
3416 #ifndef VMA_SMALL_HEAP_MAX_SIZE
3417  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
3419 #endif
3420 
3421 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
3422  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
3424 #endif
3425 
3426 #ifndef VMA_CLASS_NO_COPY
3427  #define VMA_CLASS_NO_COPY(className) \
3428  private: \
3429  className(const className&) = delete; \
3430  className& operator=(const className&) = delete;
3431 #endif
3432 
3433 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
3434 
3435 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
3436 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
3437 
3438 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
3439 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
3440 
3441 /*******************************************************************************
3442 END OF CONFIGURATION
3443 */
3444 
3445 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
3446 
3447 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
3448  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
3449 
3450 // Returns number of bits set to 1 in (v).
3451 static inline uint32_t VmaCountBitsSet(uint32_t v)
3452 {
3453  uint32_t c = v - ((v >> 1) & 0x55555555);
3454  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
3455  c = ((c >> 4) + c) & 0x0F0F0F0F;
3456  c = ((c >> 8) + c) & 0x00FF00FF;
3457  c = ((c >> 16) + c) & 0x0000FFFF;
3458  return c;
3459 }
3460 
3461 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
3462 // Use types like uint32_t, uint64_t as T.
3463 template <typename T>
3464 static inline T VmaAlignUp(T val, T align)
3465 {
3466  return (val + align - 1) / align * align;
3467 }
3468 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
3469 // Use types like uint32_t, uint64_t as T.
3470 template <typename T>
3471 static inline T VmaAlignDown(T val, T align)
3472 {
3473  return val / align * align;
3474 }
3475 
3476 // Division with mathematical rounding to nearest number.
3477 template <typename T>
3478 static inline T VmaRoundDiv(T x, T y)
3479 {
3480  return (x + (y / (T)2)) / y;
3481 }
3482 
3483 /*
3484 Returns true if given number is a power of two.
3485 T must be unsigned integer number or signed integer but always nonnegative.
3486 For 0 returns true.
3487 */
3488 template <typename T>
3489 inline bool VmaIsPow2(T x)
3490 {
3491  return (x & (x-1)) == 0;
3492 }
3493 
3494 // Returns smallest power of 2 greater or equal to v.
3495 static inline uint32_t VmaNextPow2(uint32_t v)
3496 {
3497  v--;
3498  v |= v >> 1;
3499  v |= v >> 2;
3500  v |= v >> 4;
3501  v |= v >> 8;
3502  v |= v >> 16;
3503  v++;
3504  return v;
3505 }
3506 static inline uint64_t VmaNextPow2(uint64_t v)
3507 {
3508  v--;
3509  v |= v >> 1;
3510  v |= v >> 2;
3511  v |= v >> 4;
3512  v |= v >> 8;
3513  v |= v >> 16;
3514  v |= v >> 32;
3515  v++;
3516  return v;
3517 }
3518 
3519 // Returns largest power of 2 less or equal to v.
3520 static inline uint32_t VmaPrevPow2(uint32_t v)
3521 {
3522  v |= v >> 1;
3523  v |= v >> 2;
3524  v |= v >> 4;
3525  v |= v >> 8;
3526  v |= v >> 16;
3527  v = v ^ (v >> 1);
3528  return v;
3529 }
3530 static inline uint64_t VmaPrevPow2(uint64_t v)
3531 {
3532  v |= v >> 1;
3533  v |= v >> 2;
3534  v |= v >> 4;
3535  v |= v >> 8;
3536  v |= v >> 16;
3537  v |= v >> 32;
3538  v = v ^ (v >> 1);
3539  return v;
3540 }
3541 
3542 static inline bool VmaStrIsEmpty(const char* pStr)
3543 {
3544  return pStr == VMA_NULL || *pStr == '\0';
3545 }
3546 
3547 static const char* VmaAlgorithmToStr(uint32_t algorithm)
3548 {
3549  switch(algorithm)
3550  {
3552  return "Linear";
3554  return "Buddy";
3555  case 0:
3556  return "Default";
3557  default:
3558  VMA_ASSERT(0);
3559  return "";
3560  }
3561 }
3562 
3563 #ifndef VMA_SORT
3564 
3565 template<typename Iterator, typename Compare>
3566 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
3567 {
3568  Iterator centerValue = end; --centerValue;
3569  Iterator insertIndex = beg;
3570  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
3571  {
3572  if(cmp(*memTypeIndex, *centerValue))
3573  {
3574  if(insertIndex != memTypeIndex)
3575  {
3576  VMA_SWAP(*memTypeIndex, *insertIndex);
3577  }
3578  ++insertIndex;
3579  }
3580  }
3581  if(insertIndex != centerValue)
3582  {
3583  VMA_SWAP(*insertIndex, *centerValue);
3584  }
3585  return insertIndex;
3586 }
3587 
3588 template<typename Iterator, typename Compare>
3589 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
3590 {
3591  if(beg < end)
3592  {
3593  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
3594  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
3595  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
3596  }
3597 }
3598 
3599 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
3600 
3601 #endif // #ifndef VMA_SORT
3602 
3603 /*
3604 Returns true if two memory blocks occupy overlapping pages.
3605 ResourceA must be in less memory offset than ResourceB.
3606 
3607 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
3608 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
3609 */
3610 static inline bool VmaBlocksOnSamePage(
3611  VkDeviceSize resourceAOffset,
3612  VkDeviceSize resourceASize,
3613  VkDeviceSize resourceBOffset,
3614  VkDeviceSize pageSize)
3615 {
3616  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
3617  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
3618  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
3619  VkDeviceSize resourceBStart = resourceBOffset;
3620  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
3621  return resourceAEndPage == resourceBStartPage;
3622 }
3623 
3624 enum VmaSuballocationType
3625 {
3626  VMA_SUBALLOCATION_TYPE_FREE = 0,
3627  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
3628  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
3629  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
3630  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
3631  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
3632  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
3633 };
3634 
3635 /*
3636 Returns true if given suballocation types could conflict and must respect
3637 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
3638 or linear image and another one is optimal image. If type is unknown, behave
3639 conservatively.
3640 */
3641 static inline bool VmaIsBufferImageGranularityConflict(
3642  VmaSuballocationType suballocType1,
3643  VmaSuballocationType suballocType2)
3644 {
3645  if(suballocType1 > suballocType2)
3646  {
3647  VMA_SWAP(suballocType1, suballocType2);
3648  }
3649 
3650  switch(suballocType1)
3651  {
3652  case VMA_SUBALLOCATION_TYPE_FREE:
3653  return false;
3654  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3655  return true;
3656  case VMA_SUBALLOCATION_TYPE_BUFFER:
3657  return
3658  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3659  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3660  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3661  return
3662  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3663  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3664  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3665  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3666  return
3667  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3668  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3669  return false;
3670  default:
3671  VMA_ASSERT(0);
3672  return true;
3673  }
3674 }
3675 
3676 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
3677 {
3678  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
3679  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3680  for(size_t i = 0; i < numberCount; ++i, ++pDst)
3681  {
3682  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3683  }
3684 }
3685 
3686 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
3687 {
3688  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
3689  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3690  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
3691  {
3692  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
3693  {
3694  return false;
3695  }
3696  }
3697  return true;
3698 }
3699 
3700 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
3701 struct VmaMutexLock
3702 {
3703  VMA_CLASS_NO_COPY(VmaMutexLock)
3704 public:
3705  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
3706  m_pMutex(useMutex ? &mutex : VMA_NULL)
3707  { if(m_pMutex) { m_pMutex->Lock(); } }
3708  ~VmaMutexLock()
3709  { if(m_pMutex) { m_pMutex->Unlock(); } }
3710 private:
3711  VMA_MUTEX* m_pMutex;
3712 };
3713 
3714 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
3715 struct VmaMutexLockRead
3716 {
3717  VMA_CLASS_NO_COPY(VmaMutexLockRead)
3718 public:
3719  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
3720  m_pMutex(useMutex ? &mutex : VMA_NULL)
3721  { if(m_pMutex) { m_pMutex->LockRead(); } }
3722  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
3723 private:
3724  VMA_RW_MUTEX* m_pMutex;
3725 };
3726 
3727 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
3728 struct VmaMutexLockWrite
3729 {
3730  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
3731 public:
3732  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
3733  m_pMutex(useMutex ? &mutex : VMA_NULL)
3734  { if(m_pMutex) { m_pMutex->LockWrite(); } }
3735  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
3736 private:
3737  VMA_RW_MUTEX* m_pMutex;
3738 };
3739 
3740 #if VMA_DEBUG_GLOBAL_MUTEX
3741  static VMA_MUTEX gDebugGlobalMutex;
3742  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
3743 #else
3744  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
3745 #endif
3746 
3747 // Minimum size of a free suballocation to register it in the free suballocation collection.
3748 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
3749 
3750 /*
3751 Performs binary search and returns iterator to first element that is greater or
3752 equal to (key), according to comparison (cmp).
3753 
3754 Cmp should return true if first argument is less than second argument.
3755 
3756 Returned value is the found element, if present in the collection or place where
3757 new element with value (key) should be inserted.
3758 */
3759 template <typename CmpLess, typename IterT, typename KeyT>
3760 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
3761 {
3762  size_t down = 0, up = (end - beg);
3763  while(down < up)
3764  {
3765  const size_t mid = (down + up) / 2;
3766  if(cmp(*(beg+mid), key))
3767  {
3768  down = mid + 1;
3769  }
3770  else
3771  {
3772  up = mid;
3773  }
3774  }
3775  return beg + down;
3776 }
3777 
3778 /*
3779 Returns true if all pointers in the array are not-null and unique.
3780 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
3781 T must be pointer type, e.g. VmaAllocation, VmaPool.
3782 */
3783 template<typename T>
3784 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
3785 {
3786  for(uint32_t i = 0; i < count; ++i)
3787  {
3788  const T iPtr = arr[i];
3789  if(iPtr == VMA_NULL)
3790  {
3791  return false;
3792  }
3793  for(uint32_t j = i + 1; j < count; ++j)
3794  {
3795  if(iPtr == arr[j])
3796  {
3797  return false;
3798  }
3799  }
3800  }
3801  return true;
3802 }
3803 
3805 // Memory allocation
3806 
3807 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
3808 {
3809  if((pAllocationCallbacks != VMA_NULL) &&
3810  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
3811  {
3812  return (*pAllocationCallbacks->pfnAllocation)(
3813  pAllocationCallbacks->pUserData,
3814  size,
3815  alignment,
3816  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3817  }
3818  else
3819  {
3820  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
3821  }
3822 }
3823 
3824 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
3825 {
3826  if((pAllocationCallbacks != VMA_NULL) &&
3827  (pAllocationCallbacks->pfnFree != VMA_NULL))
3828  {
3829  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
3830  }
3831  else
3832  {
3833  VMA_SYSTEM_FREE(ptr);
3834  }
3835 }
3836 
3837 template<typename T>
3838 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
3839 {
3840  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
3841 }
3842 
3843 template<typename T>
3844 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
3845 {
3846  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
3847 }
3848 
3849 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
3850 
3851 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
3852 
3853 template<typename T>
3854 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
3855 {
3856  ptr->~T();
3857  VmaFree(pAllocationCallbacks, ptr);
3858 }
3859 
3860 template<typename T>
3861 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
3862 {
3863  if(ptr != VMA_NULL)
3864  {
3865  for(size_t i = count; i--; )
3866  {
3867  ptr[i].~T();
3868  }
3869  VmaFree(pAllocationCallbacks, ptr);
3870  }
3871 }
3872 
3873 // STL-compatible allocator.
3874 template<typename T>
3875 class VmaStlAllocator
3876 {
3877 public:
3878  const VkAllocationCallbacks* const m_pCallbacks;
3879  typedef T value_type;
3880 
3881  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
3882  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
3883 
3884  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
3885  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
3886 
3887  template<typename U>
3888  bool operator==(const VmaStlAllocator<U>& rhs) const
3889  {
3890  return m_pCallbacks == rhs.m_pCallbacks;
3891  }
3892  template<typename U>
3893  bool operator!=(const VmaStlAllocator<U>& rhs) const
3894  {
3895  return m_pCallbacks != rhs.m_pCallbacks;
3896  }
3897 
3898  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
3899 };
3900 
3901 #if VMA_USE_STL_VECTOR
3902 
3903 #define VmaVector std::vector
3904 
3905 template<typename T, typename allocatorT>
3906 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
3907 {
3908  vec.insert(vec.begin() + index, item);
3909 }
3910 
3911 template<typename T, typename allocatorT>
3912 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
3913 {
3914  vec.erase(vec.begin() + index);
3915 }
3916 
3917 #else // #if VMA_USE_STL_VECTOR
3918 
3919 /* Class with interface compatible with subset of std::vector.
3920 T must be POD because constructors and destructors are not called and memcpy is
3921 used for these objects. */
3922 template<typename T, typename AllocatorT>
3923 class VmaVector
3924 {
3925 public:
3926  typedef T value_type;
3927 
3928  VmaVector(const AllocatorT& allocator) :
3929  m_Allocator(allocator),
3930  m_pArray(VMA_NULL),
3931  m_Count(0),
3932  m_Capacity(0)
3933  {
3934  }
3935 
3936  VmaVector(size_t count, const AllocatorT& allocator) :
3937  m_Allocator(allocator),
3938  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
3939  m_Count(count),
3940  m_Capacity(count)
3941  {
3942  }
3943 
3944  VmaVector(const VmaVector<T, AllocatorT>& src) :
3945  m_Allocator(src.m_Allocator),
3946  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
3947  m_Count(src.m_Count),
3948  m_Capacity(src.m_Count)
3949  {
3950  if(m_Count != 0)
3951  {
3952  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
3953  }
3954  }
3955 
3956  ~VmaVector()
3957  {
3958  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3959  }
3960 
3961  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
3962  {
3963  if(&rhs != this)
3964  {
3965  resize(rhs.m_Count);
3966  if(m_Count != 0)
3967  {
3968  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
3969  }
3970  }
3971  return *this;
3972  }
3973 
3974  bool empty() const { return m_Count == 0; }
3975  size_t size() const { return m_Count; }
3976  T* data() { return m_pArray; }
3977  const T* data() const { return m_pArray; }
3978 
3979  T& operator[](size_t index)
3980  {
3981  VMA_HEAVY_ASSERT(index < m_Count);
3982  return m_pArray[index];
3983  }
3984  const T& operator[](size_t index) const
3985  {
3986  VMA_HEAVY_ASSERT(index < m_Count);
3987  return m_pArray[index];
3988  }
3989 
3990  T& front()
3991  {
3992  VMA_HEAVY_ASSERT(m_Count > 0);
3993  return m_pArray[0];
3994  }
3995  const T& front() const
3996  {
3997  VMA_HEAVY_ASSERT(m_Count > 0);
3998  return m_pArray[0];
3999  }
4000  T& back()
4001  {
4002  VMA_HEAVY_ASSERT(m_Count > 0);
4003  return m_pArray[m_Count - 1];
4004  }
4005  const T& back() const
4006  {
4007  VMA_HEAVY_ASSERT(m_Count > 0);
4008  return m_pArray[m_Count - 1];
4009  }
4010 
4011  void reserve(size_t newCapacity, bool freeMemory = false)
4012  {
4013  newCapacity = VMA_MAX(newCapacity, m_Count);
4014 
4015  if((newCapacity < m_Capacity) && !freeMemory)
4016  {
4017  newCapacity = m_Capacity;
4018  }
4019 
4020  if(newCapacity != m_Capacity)
4021  {
4022  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
4023  if(m_Count != 0)
4024  {
4025  memcpy(newArray, m_pArray, m_Count * sizeof(T));
4026  }
4027  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4028  m_Capacity = newCapacity;
4029  m_pArray = newArray;
4030  }
4031  }
4032 
4033  void resize(size_t newCount, bool freeMemory = false)
4034  {
4035  size_t newCapacity = m_Capacity;
4036  if(newCount > m_Capacity)
4037  {
4038  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
4039  }
4040  else if(freeMemory)
4041  {
4042  newCapacity = newCount;
4043  }
4044 
4045  if(newCapacity != m_Capacity)
4046  {
4047  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
4048  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
4049  if(elementsToCopy != 0)
4050  {
4051  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
4052  }
4053  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4054  m_Capacity = newCapacity;
4055  m_pArray = newArray;
4056  }
4057 
4058  m_Count = newCount;
4059  }
4060 
4061  void clear(bool freeMemory = false)
4062  {
4063  resize(0, freeMemory);
4064  }
4065 
4066  void insert(size_t index, const T& src)
4067  {
4068  VMA_HEAVY_ASSERT(index <= m_Count);
4069  const size_t oldCount = size();
4070  resize(oldCount + 1);
4071  if(index < oldCount)
4072  {
4073  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
4074  }
4075  m_pArray[index] = src;
4076  }
4077 
4078  void remove(size_t index)
4079  {
4080  VMA_HEAVY_ASSERT(index < m_Count);
4081  const size_t oldCount = size();
4082  if(index < oldCount - 1)
4083  {
4084  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
4085  }
4086  resize(oldCount - 1);
4087  }
4088 
4089  void push_back(const T& src)
4090  {
4091  const size_t newIndex = size();
4092  resize(newIndex + 1);
4093  m_pArray[newIndex] = src;
4094  }
4095 
4096  void pop_back()
4097  {
4098  VMA_HEAVY_ASSERT(m_Count > 0);
4099  resize(size() - 1);
4100  }
4101 
4102  void push_front(const T& src)
4103  {
4104  insert(0, src);
4105  }
4106 
4107  void pop_front()
4108  {
4109  VMA_HEAVY_ASSERT(m_Count > 0);
4110  remove(0);
4111  }
4112 
4113  typedef T* iterator;
4114 
4115  iterator begin() { return m_pArray; }
4116  iterator end() { return m_pArray + m_Count; }
4117 
4118 private:
4119  AllocatorT m_Allocator;
4120  T* m_pArray;
4121  size_t m_Count;
4122  size_t m_Capacity;
4123 };
4124 
4125 template<typename T, typename allocatorT>
4126 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
4127 {
4128  vec.insert(index, item);
4129 }
4130 
4131 template<typename T, typename allocatorT>
4132 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
4133 {
4134  vec.remove(index);
4135 }
4136 
4137 #endif // #if VMA_USE_STL_VECTOR
4138 
4139 template<typename CmpLess, typename VectorT>
4140 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
4141 {
4142  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4143  vector.data(),
4144  vector.data() + vector.size(),
4145  value,
4146  CmpLess()) - vector.data();
4147  VmaVectorInsert(vector, indexToInsert, value);
4148  return indexToInsert;
4149 }
4150 
4151 template<typename CmpLess, typename VectorT>
4152 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
4153 {
4154  CmpLess comparator;
4155  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
4156  vector.begin(),
4157  vector.end(),
4158  value,
4159  comparator);
4160  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
4161  {
4162  size_t indexToRemove = it - vector.begin();
4163  VmaVectorRemove(vector, indexToRemove);
4164  return true;
4165  }
4166  return false;
4167 }
4168 
4169 template<typename CmpLess, typename IterT, typename KeyT>
4170 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
4171 {
4172  CmpLess comparator;
4173  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
4174  beg, end, value, comparator);
4175  if(it == end ||
4176  (!comparator(*it, value) && !comparator(value, *it)))
4177  {
4178  return it;
4179  }
4180  return end;
4181 }
4182 
4184 // class VmaPoolAllocator
4185 
4186 /*
4187 Allocator for objects of type T using a list of arrays (pools) to speed up
4188 allocation. Number of elements that can be allocated is not bounded because
4189 allocator can create multiple blocks.
4190 */
4191 template<typename T>
4192 class VmaPoolAllocator
4193 {
4194  VMA_CLASS_NO_COPY(VmaPoolAllocator)
4195 public:
4196  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
4197  ~VmaPoolAllocator();
4198  void Clear();
4199  T* Alloc();
4200  void Free(T* ptr);
4201 
4202 private:
4203  union Item
4204  {
4205  uint32_t NextFreeIndex;
4206  T Value;
4207  };
4208 
4209  struct ItemBlock
4210  {
4211  Item* pItems;
4212  uint32_t FirstFreeIndex;
4213  };
4214 
4215  const VkAllocationCallbacks* m_pAllocationCallbacks;
4216  size_t m_ItemsPerBlock;
4217  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
4218 
4219  ItemBlock& CreateNewBlock();
4220 };
4221 
4222 template<typename T>
4223 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
4224  m_pAllocationCallbacks(pAllocationCallbacks),
4225  m_ItemsPerBlock(itemsPerBlock),
4226  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
4227 {
4228  VMA_ASSERT(itemsPerBlock > 0);
4229 }
4230 
4231 template<typename T>
4232 VmaPoolAllocator<T>::~VmaPoolAllocator()
4233 {
4234  Clear();
4235 }
4236 
4237 template<typename T>
4238 void VmaPoolAllocator<T>::Clear()
4239 {
4240  for(size_t i = m_ItemBlocks.size(); i--; )
4241  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
4242  m_ItemBlocks.clear();
4243 }
4244 
4245 template<typename T>
4246 T* VmaPoolAllocator<T>::Alloc()
4247 {
4248  for(size_t i = m_ItemBlocks.size(); i--; )
4249  {
4250  ItemBlock& block = m_ItemBlocks[i];
4251  // This block has some free items: Use first one.
4252  if(block.FirstFreeIndex != UINT32_MAX)
4253  {
4254  Item* const pItem = &block.pItems[block.FirstFreeIndex];
4255  block.FirstFreeIndex = pItem->NextFreeIndex;
4256  return &pItem->Value;
4257  }
4258  }
4259 
4260  // No block has free item: Create new one and use it.
4261  ItemBlock& newBlock = CreateNewBlock();
4262  Item* const pItem = &newBlock.pItems[0];
4263  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
4264  return &pItem->Value;
4265 }
4266 
4267 template<typename T>
4268 void VmaPoolAllocator<T>::Free(T* ptr)
4269 {
4270  // Search all memory blocks to find ptr.
4271  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
4272  {
4273  ItemBlock& block = m_ItemBlocks[i];
4274 
4275  // Casting to union.
4276  Item* pItemPtr;
4277  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
4278 
4279  // Check if pItemPtr is in address range of this block.
4280  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
4281  {
4282  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
4283  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
4284  block.FirstFreeIndex = index;
4285  return;
4286  }
4287  }
4288  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
4289 }
4290 
4291 template<typename T>
4292 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
4293 {
4294  ItemBlock newBlock = {
4295  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
4296 
4297  m_ItemBlocks.push_back(newBlock);
4298 
4299  // Setup singly-linked list of all free items in this block.
4300  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
4301  newBlock.pItems[i].NextFreeIndex = i + 1;
4302  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
4303  return m_ItemBlocks.back();
4304 }
4305 
4307 // class VmaRawList, VmaList
4308 
4309 #if VMA_USE_STL_LIST
4310 
4311 #define VmaList std::list
4312 
4313 #else // #if VMA_USE_STL_LIST
4314 
4315 template<typename T>
4316 struct VmaListItem
4317 {
4318  VmaListItem* pPrev;
4319  VmaListItem* pNext;
4320  T Value;
4321 };
4322 
4323 // Doubly linked list.
4324 template<typename T>
4325 class VmaRawList
4326 {
4327  VMA_CLASS_NO_COPY(VmaRawList)
4328 public:
4329  typedef VmaListItem<T> ItemType;
4330 
4331  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
4332  ~VmaRawList();
4333  void Clear();
4334 
4335  size_t GetCount() const { return m_Count; }
4336  bool IsEmpty() const { return m_Count == 0; }
4337 
4338  ItemType* Front() { return m_pFront; }
4339  const ItemType* Front() const { return m_pFront; }
4340  ItemType* Back() { return m_pBack; }
4341  const ItemType* Back() const { return m_pBack; }
4342 
4343  ItemType* PushBack();
4344  ItemType* PushFront();
4345  ItemType* PushBack(const T& value);
4346  ItemType* PushFront(const T& value);
4347  void PopBack();
4348  void PopFront();
4349 
4350  // Item can be null - it means PushBack.
4351  ItemType* InsertBefore(ItemType* pItem);
4352  // Item can be null - it means PushFront.
4353  ItemType* InsertAfter(ItemType* pItem);
4354 
4355  ItemType* InsertBefore(ItemType* pItem, const T& value);
4356  ItemType* InsertAfter(ItemType* pItem, const T& value);
4357 
4358  void Remove(ItemType* pItem);
4359 
4360 private:
4361  const VkAllocationCallbacks* const m_pAllocationCallbacks;
4362  VmaPoolAllocator<ItemType> m_ItemAllocator;
4363  ItemType* m_pFront;
4364  ItemType* m_pBack;
4365  size_t m_Count;
4366 };
4367 
4368 template<typename T>
4369 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
4370  m_pAllocationCallbacks(pAllocationCallbacks),
4371  m_ItemAllocator(pAllocationCallbacks, 128),
4372  m_pFront(VMA_NULL),
4373  m_pBack(VMA_NULL),
4374  m_Count(0)
4375 {
4376 }
4377 
4378 template<typename T>
4379 VmaRawList<T>::~VmaRawList()
4380 {
4381  // Intentionally not calling Clear, because that would be unnecessary
4382  // computations to return all items to m_ItemAllocator as free.
4383 }
4384 
4385 template<typename T>
4386 void VmaRawList<T>::Clear()
4387 {
4388  if(IsEmpty() == false)
4389  {
4390  ItemType* pItem = m_pBack;
4391  while(pItem != VMA_NULL)
4392  {
4393  ItemType* const pPrevItem = pItem->pPrev;
4394  m_ItemAllocator.Free(pItem);
4395  pItem = pPrevItem;
4396  }
4397  m_pFront = VMA_NULL;
4398  m_pBack = VMA_NULL;
4399  m_Count = 0;
4400  }
4401 }
4402 
4403 template<typename T>
4404 VmaListItem<T>* VmaRawList<T>::PushBack()
4405 {
4406  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4407  pNewItem->pNext = VMA_NULL;
4408  if(IsEmpty())
4409  {
4410  pNewItem->pPrev = VMA_NULL;
4411  m_pFront = pNewItem;
4412  m_pBack = pNewItem;
4413  m_Count = 1;
4414  }
4415  else
4416  {
4417  pNewItem->pPrev = m_pBack;
4418  m_pBack->pNext = pNewItem;
4419  m_pBack = pNewItem;
4420  ++m_Count;
4421  }
4422  return pNewItem;
4423 }
4424 
4425 template<typename T>
4426 VmaListItem<T>* VmaRawList<T>::PushFront()
4427 {
4428  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4429  pNewItem->pPrev = VMA_NULL;
4430  if(IsEmpty())
4431  {
4432  pNewItem->pNext = VMA_NULL;
4433  m_pFront = pNewItem;
4434  m_pBack = pNewItem;
4435  m_Count = 1;
4436  }
4437  else
4438  {
4439  pNewItem->pNext = m_pFront;
4440  m_pFront->pPrev = pNewItem;
4441  m_pFront = pNewItem;
4442  ++m_Count;
4443  }
4444  return pNewItem;
4445 }
4446 
4447 template<typename T>
4448 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
4449 {
4450  ItemType* const pNewItem = PushBack();
4451  pNewItem->Value = value;
4452  return pNewItem;
4453 }
4454 
4455 template<typename T>
4456 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
4457 {
4458  ItemType* const pNewItem = PushFront();
4459  pNewItem->Value = value;
4460  return pNewItem;
4461 }
4462 
4463 template<typename T>
4464 void VmaRawList<T>::PopBack()
4465 {
4466  VMA_HEAVY_ASSERT(m_Count > 0);
4467  ItemType* const pBackItem = m_pBack;
4468  ItemType* const pPrevItem = pBackItem->pPrev;
4469  if(pPrevItem != VMA_NULL)
4470  {
4471  pPrevItem->pNext = VMA_NULL;
4472  }
4473  m_pBack = pPrevItem;
4474  m_ItemAllocator.Free(pBackItem);
4475  --m_Count;
4476 }
4477 
4478 template<typename T>
4479 void VmaRawList<T>::PopFront()
4480 {
4481  VMA_HEAVY_ASSERT(m_Count > 0);
4482  ItemType* const pFrontItem = m_pFront;
4483  ItemType* const pNextItem = pFrontItem->pNext;
4484  if(pNextItem != VMA_NULL)
4485  {
4486  pNextItem->pPrev = VMA_NULL;
4487  }
4488  m_pFront = pNextItem;
4489  m_ItemAllocator.Free(pFrontItem);
4490  --m_Count;
4491 }
4492 
4493 template<typename T>
4494 void VmaRawList<T>::Remove(ItemType* pItem)
4495 {
4496  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
4497  VMA_HEAVY_ASSERT(m_Count > 0);
4498 
4499  if(pItem->pPrev != VMA_NULL)
4500  {
4501  pItem->pPrev->pNext = pItem->pNext;
4502  }
4503  else
4504  {
4505  VMA_HEAVY_ASSERT(m_pFront == pItem);
4506  m_pFront = pItem->pNext;
4507  }
4508 
4509  if(pItem->pNext != VMA_NULL)
4510  {
4511  pItem->pNext->pPrev = pItem->pPrev;
4512  }
4513  else
4514  {
4515  VMA_HEAVY_ASSERT(m_pBack == pItem);
4516  m_pBack = pItem->pPrev;
4517  }
4518 
4519  m_ItemAllocator.Free(pItem);
4520  --m_Count;
4521 }
4522 
4523 template<typename T>
4524 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
4525 {
4526  if(pItem != VMA_NULL)
4527  {
4528  ItemType* const prevItem = pItem->pPrev;
4529  ItemType* const newItem = m_ItemAllocator.Alloc();
4530  newItem->pPrev = prevItem;
4531  newItem->pNext = pItem;
4532  pItem->pPrev = newItem;
4533  if(prevItem != VMA_NULL)
4534  {
4535  prevItem->pNext = newItem;
4536  }
4537  else
4538  {
4539  VMA_HEAVY_ASSERT(m_pFront == pItem);
4540  m_pFront = newItem;
4541  }
4542  ++m_Count;
4543  return newItem;
4544  }
4545  else
4546  return PushBack();
4547 }
4548 
4549 template<typename T>
4550 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
4551 {
4552  if(pItem != VMA_NULL)
4553  {
4554  ItemType* const nextItem = pItem->pNext;
4555  ItemType* const newItem = m_ItemAllocator.Alloc();
4556  newItem->pNext = nextItem;
4557  newItem->pPrev = pItem;
4558  pItem->pNext = newItem;
4559  if(nextItem != VMA_NULL)
4560  {
4561  nextItem->pPrev = newItem;
4562  }
4563  else
4564  {
4565  VMA_HEAVY_ASSERT(m_pBack == pItem);
4566  m_pBack = newItem;
4567  }
4568  ++m_Count;
4569  return newItem;
4570  }
4571  else
4572  return PushFront();
4573 }
4574 
4575 template<typename T>
4576 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
4577 {
4578  ItemType* const newItem = InsertBefore(pItem);
4579  newItem->Value = value;
4580  return newItem;
4581 }
4582 
4583 template<typename T>
4584 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
4585 {
4586  ItemType* const newItem = InsertAfter(pItem);
4587  newItem->Value = value;
4588  return newItem;
4589 }
4590 
4591 template<typename T, typename AllocatorT>
4592 class VmaList
4593 {
4594  VMA_CLASS_NO_COPY(VmaList)
4595 public:
4596  class iterator
4597  {
4598  public:
4599  iterator() :
4600  m_pList(VMA_NULL),
4601  m_pItem(VMA_NULL)
4602  {
4603  }
4604 
4605  T& operator*() const
4606  {
4607  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4608  return m_pItem->Value;
4609  }
4610  T* operator->() const
4611  {
4612  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4613  return &m_pItem->Value;
4614  }
4615 
4616  iterator& operator++()
4617  {
4618  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4619  m_pItem = m_pItem->pNext;
4620  return *this;
4621  }
4622  iterator& operator--()
4623  {
4624  if(m_pItem != VMA_NULL)
4625  {
4626  m_pItem = m_pItem->pPrev;
4627  }
4628  else
4629  {
4630  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4631  m_pItem = m_pList->Back();
4632  }
4633  return *this;
4634  }
4635 
4636  iterator operator++(int)
4637  {
4638  iterator result = *this;
4639  ++*this;
4640  return result;
4641  }
4642  iterator operator--(int)
4643  {
4644  iterator result = *this;
4645  --*this;
4646  return result;
4647  }
4648 
4649  bool operator==(const iterator& rhs) const
4650  {
4651  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4652  return m_pItem == rhs.m_pItem;
4653  }
4654  bool operator!=(const iterator& rhs) const
4655  {
4656  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4657  return m_pItem != rhs.m_pItem;
4658  }
4659 
4660  private:
4661  VmaRawList<T>* m_pList;
4662  VmaListItem<T>* m_pItem;
4663 
4664  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
4665  m_pList(pList),
4666  m_pItem(pItem)
4667  {
4668  }
4669 
4670  friend class VmaList<T, AllocatorT>;
4671  };
4672 
4673  class const_iterator
4674  {
4675  public:
4676  const_iterator() :
4677  m_pList(VMA_NULL),
4678  m_pItem(VMA_NULL)
4679  {
4680  }
4681 
4682  const_iterator(const iterator& src) :
4683  m_pList(src.m_pList),
4684  m_pItem(src.m_pItem)
4685  {
4686  }
4687 
4688  const T& operator*() const
4689  {
4690  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4691  return m_pItem->Value;
4692  }
4693  const T* operator->() const
4694  {
4695  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4696  return &m_pItem->Value;
4697  }
4698 
4699  const_iterator& operator++()
4700  {
4701  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4702  m_pItem = m_pItem->pNext;
4703  return *this;
4704  }
4705  const_iterator& operator--()
4706  {
4707  if(m_pItem != VMA_NULL)
4708  {
4709  m_pItem = m_pItem->pPrev;
4710  }
4711  else
4712  {
4713  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4714  m_pItem = m_pList->Back();
4715  }
4716  return *this;
4717  }
4718 
4719  const_iterator operator++(int)
4720  {
4721  const_iterator result = *this;
4722  ++*this;
4723  return result;
4724  }
4725  const_iterator operator--(int)
4726  {
4727  const_iterator result = *this;
4728  --*this;
4729  return result;
4730  }
4731 
4732  bool operator==(const const_iterator& rhs) const
4733  {
4734  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4735  return m_pItem == rhs.m_pItem;
4736  }
4737  bool operator!=(const const_iterator& rhs) const
4738  {
4739  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4740  return m_pItem != rhs.m_pItem;
4741  }
4742 
4743  private:
4744  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
4745  m_pList(pList),
4746  m_pItem(pItem)
4747  {
4748  }
4749 
4750  const VmaRawList<T>* m_pList;
4751  const VmaListItem<T>* m_pItem;
4752 
4753  friend class VmaList<T, AllocatorT>;
4754  };
4755 
4756  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
4757 
4758  bool empty() const { return m_RawList.IsEmpty(); }
4759  size_t size() const { return m_RawList.GetCount(); }
4760 
4761  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
4762  iterator end() { return iterator(&m_RawList, VMA_NULL); }
4763 
4764  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
4765  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
4766 
4767  void clear() { m_RawList.Clear(); }
4768  void push_back(const T& value) { m_RawList.PushBack(value); }
4769  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
4770  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
4771 
4772 private:
4773  VmaRawList<T> m_RawList;
4774 };
4775 
4776 #endif // #if VMA_USE_STL_LIST
4777 
4779 // class VmaMap
4780 
4781 // Unused in this version.
4782 #if 0
4783 
4784 #if VMA_USE_STL_UNORDERED_MAP
4785 
4786 #define VmaPair std::pair
4787 
4788 #define VMA_MAP_TYPE(KeyT, ValueT) \
4789  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
4790 
4791 #else // #if VMA_USE_STL_UNORDERED_MAP
4792 
4793 template<typename T1, typename T2>
4794 struct VmaPair
4795 {
4796  T1 first;
4797  T2 second;
4798 
4799  VmaPair() : first(), second() { }
4800  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
4801 };
4802 
4803 /* Class compatible with subset of interface of std::unordered_map.
4804 KeyT, ValueT must be POD because they will be stored in VmaVector.
4805 */
4806 template<typename KeyT, typename ValueT>
4807 class VmaMap
4808 {
4809 public:
4810  typedef VmaPair<KeyT, ValueT> PairType;
4811  typedef PairType* iterator;
4812 
4813  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
4814 
4815  iterator begin() { return m_Vector.begin(); }
4816  iterator end() { return m_Vector.end(); }
4817 
4818  void insert(const PairType& pair);
4819  iterator find(const KeyT& key);
4820  void erase(iterator it);
4821 
4822 private:
4823  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
4824 };
4825 
4826 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
4827 
4828 template<typename FirstT, typename SecondT>
4829 struct VmaPairFirstLess
4830 {
4831  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
4832  {
4833  return lhs.first < rhs.first;
4834  }
4835  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
4836  {
4837  return lhs.first < rhsFirst;
4838  }
4839 };
4840 
4841 template<typename KeyT, typename ValueT>
4842 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
4843 {
4844  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4845  m_Vector.data(),
4846  m_Vector.data() + m_Vector.size(),
4847  pair,
4848  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
4849  VmaVectorInsert(m_Vector, indexToInsert, pair);
4850 }
4851 
4852 template<typename KeyT, typename ValueT>
4853 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
4854 {
4855  PairType* it = VmaBinaryFindFirstNotLess(
4856  m_Vector.data(),
4857  m_Vector.data() + m_Vector.size(),
4858  key,
4859  VmaPairFirstLess<KeyT, ValueT>());
4860  if((it != m_Vector.end()) && (it->first == key))
4861  {
4862  return it;
4863  }
4864  else
4865  {
4866  return m_Vector.end();
4867  }
4868 }
4869 
4870 template<typename KeyT, typename ValueT>
4871 void VmaMap<KeyT, ValueT>::erase(iterator it)
4872 {
4873  VmaVectorRemove(m_Vector, it - m_Vector.begin());
4874 }
4875 
4876 #endif // #if VMA_USE_STL_UNORDERED_MAP
4877 
4878 #endif // #if 0
4879 
4881 
4882 class VmaDeviceMemoryBlock;
4883 
4884 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
4885 
4886 struct VmaAllocation_T
4887 {
4888  VMA_CLASS_NO_COPY(VmaAllocation_T)
4889 private:
4890  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
4891 
4892  enum FLAGS
4893  {
4894  FLAG_USER_DATA_STRING = 0x01,
4895  };
4896 
4897 public:
4898  enum ALLOCATION_TYPE
4899  {
4900  ALLOCATION_TYPE_NONE,
4901  ALLOCATION_TYPE_BLOCK,
4902  ALLOCATION_TYPE_DEDICATED,
4903  };
4904 
4905  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
4906  m_Alignment(1),
4907  m_Size(0),
4908  m_pUserData(VMA_NULL),
4909  m_LastUseFrameIndex(currentFrameIndex),
4910  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
4911  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
4912  m_MapCount(0),
4913  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
4914  {
4915 #if VMA_STATS_STRING_ENABLED
4916  m_CreationFrameIndex = currentFrameIndex;
4917  m_BufferImageUsage = 0;
4918 #endif
4919  }
4920 
4921  ~VmaAllocation_T()
4922  {
4923  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
4924 
4925  // Check if owned string was freed.
4926  VMA_ASSERT(m_pUserData == VMA_NULL);
4927  }
4928 
4929  void InitBlockAllocation(
4930  VmaPool hPool,
4931  VmaDeviceMemoryBlock* block,
4932  VkDeviceSize offset,
4933  VkDeviceSize alignment,
4934  VkDeviceSize size,
4935  VmaSuballocationType suballocationType,
4936  bool mapped,
4937  bool canBecomeLost)
4938  {
4939  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4940  VMA_ASSERT(block != VMA_NULL);
4941  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4942  m_Alignment = alignment;
4943  m_Size = size;
4944  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4945  m_SuballocationType = (uint8_t)suballocationType;
4946  m_BlockAllocation.m_hPool = hPool;
4947  m_BlockAllocation.m_Block = block;
4948  m_BlockAllocation.m_Offset = offset;
4949  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
4950  }
4951 
4952  void InitLost()
4953  {
4954  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4955  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
4956  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4957  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
4958  m_BlockAllocation.m_Block = VMA_NULL;
4959  m_BlockAllocation.m_Offset = 0;
4960  m_BlockAllocation.m_CanBecomeLost = true;
4961  }
4962 
4963  void ChangeBlockAllocation(
4964  VmaAllocator hAllocator,
4965  VmaDeviceMemoryBlock* block,
4966  VkDeviceSize offset);
4967 
4968  void ChangeSize(VkDeviceSize newSize);
4969  void ChangeOffset(VkDeviceSize newOffset);
4970 
4971  // pMappedData not null means allocation is created with MAPPED flag.
4972  void InitDedicatedAllocation(
4973  uint32_t memoryTypeIndex,
4974  VkDeviceMemory hMemory,
4975  VmaSuballocationType suballocationType,
4976  void* pMappedData,
4977  VkDeviceSize size)
4978  {
4979  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4980  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
4981  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
4982  m_Alignment = 0;
4983  m_Size = size;
4984  m_SuballocationType = (uint8_t)suballocationType;
4985  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4986  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
4987  m_DedicatedAllocation.m_hMemory = hMemory;
4988  m_DedicatedAllocation.m_pMappedData = pMappedData;
4989  }
4990 
4991  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
4992  VkDeviceSize GetAlignment() const { return m_Alignment; }
4993  VkDeviceSize GetSize() const { return m_Size; }
4994  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
4995  void* GetUserData() const { return m_pUserData; }
4996  void SetUserData(VmaAllocator hAllocator, void* pUserData);
4997  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
4998 
4999  VmaDeviceMemoryBlock* GetBlock() const
5000  {
5001  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
5002  return m_BlockAllocation.m_Block;
5003  }
5004  VkDeviceSize GetOffset() const;
5005  VkDeviceMemory GetMemory() const;
5006  uint32_t GetMemoryTypeIndex() const;
5007  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
5008  void* GetMappedData() const;
5009  bool CanBecomeLost() const;
5010  VmaPool GetPool() const;
5011 
5012  uint32_t GetLastUseFrameIndex() const
5013  {
5014  return m_LastUseFrameIndex.load();
5015  }
5016  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
5017  {
5018  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
5019  }
5020  /*
5021  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
5022  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
5023  - Else, returns false.
5024 
5025  If hAllocation is already lost, assert - you should not call it then.
5026  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
5027  */
5028  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5029 
5030  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
5031  {
5032  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
5033  outInfo.blockCount = 1;
5034  outInfo.allocationCount = 1;
5035  outInfo.unusedRangeCount = 0;
5036  outInfo.usedBytes = m_Size;
5037  outInfo.unusedBytes = 0;
5038  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
5039  outInfo.unusedRangeSizeMin = UINT64_MAX;
5040  outInfo.unusedRangeSizeMax = 0;
5041  }
5042 
5043  void BlockAllocMap();
5044  void BlockAllocUnmap();
5045  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
5046  void DedicatedAllocUnmap(VmaAllocator hAllocator);
5047 
5048 #if VMA_STATS_STRING_ENABLED
5049  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
5050  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
5051 
5052  void InitBufferImageUsage(uint32_t bufferImageUsage)
5053  {
5054  VMA_ASSERT(m_BufferImageUsage == 0);
5055  m_BufferImageUsage = bufferImageUsage;
5056  }
5057 
5058  void PrintParameters(class VmaJsonWriter& json) const;
5059 #endif
5060 
5061 private:
5062  VkDeviceSize m_Alignment;
5063  VkDeviceSize m_Size;
5064  void* m_pUserData;
5065  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
5066  uint8_t m_Type; // ALLOCATION_TYPE
5067  uint8_t m_SuballocationType; // VmaSuballocationType
5068  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
5069  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
5070  uint8_t m_MapCount;
5071  uint8_t m_Flags; // enum FLAGS
5072 
5073  // Allocation out of VmaDeviceMemoryBlock.
5074  struct BlockAllocation
5075  {
5076  VmaPool m_hPool; // Null if belongs to general memory.
5077  VmaDeviceMemoryBlock* m_Block;
5078  VkDeviceSize m_Offset;
5079  bool m_CanBecomeLost;
5080  };
5081 
5082  // Allocation for an object that has its own private VkDeviceMemory.
5083  struct DedicatedAllocation
5084  {
5085  uint32_t m_MemoryTypeIndex;
5086  VkDeviceMemory m_hMemory;
5087  void* m_pMappedData; // Not null means memory is mapped.
5088  };
5089 
5090  union
5091  {
5092  // Allocation out of VmaDeviceMemoryBlock.
5093  BlockAllocation m_BlockAllocation;
5094  // Allocation for an object that has its own private VkDeviceMemory.
5095  DedicatedAllocation m_DedicatedAllocation;
5096  };
5097 
5098 #if VMA_STATS_STRING_ENABLED
5099  uint32_t m_CreationFrameIndex;
5100  uint32_t m_BufferImageUsage; // 0 if unknown.
5101 #endif
5102 
5103  void FreeUserDataString(VmaAllocator hAllocator);
5104 };
5105 
5106 /*
5107 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
5108 allocated memory block or free.
5109 */
5110 struct VmaSuballocation
5111 {
5112  VkDeviceSize offset;
5113  VkDeviceSize size;
5114  VmaAllocation hAllocation;
5115  VmaSuballocationType type;
5116 };
5117 
5118 // Comparator for offsets.
5119 struct VmaSuballocationOffsetLess
5120 {
5121  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5122  {
5123  return lhs.offset < rhs.offset;
5124  }
5125 };
5126 struct VmaSuballocationOffsetGreater
5127 {
5128  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5129  {
5130  return lhs.offset > rhs.offset;
5131  }
5132 };
5133 
5134 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
5135 
5136 // Cost of one additional allocation lost, as equivalent in bytes.
5137 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
5138 
5139 /*
5140 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
5141 
5142 If canMakeOtherLost was false:
5143 - item points to a FREE suballocation.
5144 - itemsToMakeLostCount is 0.
5145 
5146 If canMakeOtherLost was true:
5147 - item points to first of sequence of suballocations, which are either FREE,
5148  or point to VmaAllocations that can become lost.
5149 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
5150  the requested allocation to succeed.
5151 */
5152 struct VmaAllocationRequest
5153 {
5154  VkDeviceSize offset;
5155  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
5156  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
5157  VmaSuballocationList::iterator item;
5158  size_t itemsToMakeLostCount;
5159  void* customData;
5160 
5161  VkDeviceSize CalcCost() const
5162  {
5163  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
5164  }
5165 };
5166 
5167 /*
5168 Data structure used for bookkeeping of allocations and unused ranges of memory
5169 in a single VkDeviceMemory block.
5170 */
5171 class VmaBlockMetadata
5172 {
5173 public:
5174  VmaBlockMetadata(VmaAllocator hAllocator);
5175  virtual ~VmaBlockMetadata() { }
5176  virtual void Init(VkDeviceSize size) { m_Size = size; }
5177 
5178  // Validates all data structures inside this object. If not valid, returns false.
5179  virtual bool Validate() const = 0;
5180  VkDeviceSize GetSize() const { return m_Size; }
5181  virtual size_t GetAllocationCount() const = 0;
5182  virtual VkDeviceSize GetSumFreeSize() const = 0;
5183  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
5184  // Returns true if this block is empty - contains only single free suballocation.
5185  virtual bool IsEmpty() const = 0;
5186 
5187  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
5188  // Shouldn't modify blockCount.
5189  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
5190 
5191 #if VMA_STATS_STRING_ENABLED
5192  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
5193 #endif
5194 
5195  // Tries to find a place for suballocation with given parameters inside this block.
5196  // If succeeded, fills pAllocationRequest and returns true.
5197  // If failed, returns false.
5198  virtual bool CreateAllocationRequest(
5199  uint32_t currentFrameIndex,
5200  uint32_t frameInUseCount,
5201  VkDeviceSize bufferImageGranularity,
5202  VkDeviceSize allocSize,
5203  VkDeviceSize allocAlignment,
5204  bool upperAddress,
5205  VmaSuballocationType allocType,
5206  bool canMakeOtherLost,
5207  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
5208  uint32_t strategy,
5209  VmaAllocationRequest* pAllocationRequest) = 0;
5210 
5211  virtual bool MakeRequestedAllocationsLost(
5212  uint32_t currentFrameIndex,
5213  uint32_t frameInUseCount,
5214  VmaAllocationRequest* pAllocationRequest) = 0;
5215 
5216  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
5217 
5218  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
5219 
5220  // Makes actual allocation based on request. Request must already be checked and valid.
5221  virtual void Alloc(
5222  const VmaAllocationRequest& request,
5223  VmaSuballocationType type,
5224  VkDeviceSize allocSize,
5225  bool upperAddress,
5226  VmaAllocation hAllocation) = 0;
5227 
5228  // Frees suballocation assigned to given memory region.
5229  virtual void Free(const VmaAllocation allocation) = 0;
5230  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
5231 
5232  // Tries to resize (grow or shrink) space for given allocation, in place.
5233  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
5234 
5235 protected:
5236  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
5237 
5238 #if VMA_STATS_STRING_ENABLED
5239  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
5240  VkDeviceSize unusedBytes,
5241  size_t allocationCount,
5242  size_t unusedRangeCount) const;
5243  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
5244  VkDeviceSize offset,
5245  VmaAllocation hAllocation) const;
5246  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
5247  VkDeviceSize offset,
5248  VkDeviceSize size) const;
5249  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
5250 #endif
5251 
5252 private:
5253  VkDeviceSize m_Size;
5254  const VkAllocationCallbacks* m_pAllocationCallbacks;
5255 };
5256 
5257 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
5258  VMA_ASSERT(0 && "Validation failed: " #cond); \
5259  return false; \
5260  } } while(false)
5261 
5262 class VmaBlockMetadata_Generic : public VmaBlockMetadata
5263 {
5264  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
5265 public:
5266  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
5267  virtual ~VmaBlockMetadata_Generic();
5268  virtual void Init(VkDeviceSize size);
5269 
5270  virtual bool Validate() const;
5271  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
5272  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5273  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5274  virtual bool IsEmpty() const;
5275 
5276  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5277  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5278 
5279 #if VMA_STATS_STRING_ENABLED
5280  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5281 #endif
5282 
5283  virtual bool CreateAllocationRequest(
5284  uint32_t currentFrameIndex,
5285  uint32_t frameInUseCount,
5286  VkDeviceSize bufferImageGranularity,
5287  VkDeviceSize allocSize,
5288  VkDeviceSize allocAlignment,
5289  bool upperAddress,
5290  VmaSuballocationType allocType,
5291  bool canMakeOtherLost,
5292  uint32_t strategy,
5293  VmaAllocationRequest* pAllocationRequest);
5294 
5295  virtual bool MakeRequestedAllocationsLost(
5296  uint32_t currentFrameIndex,
5297  uint32_t frameInUseCount,
5298  VmaAllocationRequest* pAllocationRequest);
5299 
5300  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5301 
5302  virtual VkResult CheckCorruption(const void* pBlockData);
5303 
5304  virtual void Alloc(
5305  const VmaAllocationRequest& request,
5306  VmaSuballocationType type,
5307  VkDeviceSize allocSize,
5308  bool upperAddress,
5309  VmaAllocation hAllocation);
5310 
5311  virtual void Free(const VmaAllocation allocation);
5312  virtual void FreeAtOffset(VkDeviceSize offset);
5313 
5314  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
5315 
5317  // For defragmentation
5318 
5319  bool IsBufferImageGranularityConflictPossible(
5320  VkDeviceSize bufferImageGranularity,
5321  VmaSuballocationType& inOutPrevSuballocType) const;
5322 
5323 private:
5324  friend class VmaDefragmentationAlgorithm_Generic;
5325  friend class VmaDefragmentationAlgorithm_Fast;
5326 
5327  uint32_t m_FreeCount;
5328  VkDeviceSize m_SumFreeSize;
5329  VmaSuballocationList m_Suballocations;
5330  // Suballocations that are free and have size greater than certain threshold.
5331  // Sorted by size, ascending.
5332  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
5333 
5334  bool ValidateFreeSuballocationList() const;
5335 
5336  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
5337  // If yes, fills pOffset and returns true. If no, returns false.
5338  bool CheckAllocation(
5339  uint32_t currentFrameIndex,
5340  uint32_t frameInUseCount,
5341  VkDeviceSize bufferImageGranularity,
5342  VkDeviceSize allocSize,
5343  VkDeviceSize allocAlignment,
5344  VmaSuballocationType allocType,
5345  VmaSuballocationList::const_iterator suballocItem,
5346  bool canMakeOtherLost,
5347  VkDeviceSize* pOffset,
5348  size_t* itemsToMakeLostCount,
5349  VkDeviceSize* pSumFreeSize,
5350  VkDeviceSize* pSumItemSize) const;
5351  // Given free suballocation, it merges it with following one, which must also be free.
5352  void MergeFreeWithNext(VmaSuballocationList::iterator item);
5353  // Releases given suballocation, making it free.
5354  // Merges it with adjacent free suballocations if applicable.
5355  // Returns iterator to new free suballocation at this place.
5356  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
5357  // Given free suballocation, it inserts it into sorted list of
5358  // m_FreeSuballocationsBySize if it's suitable.
5359  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
5360  // Given free suballocation, it removes it from sorted list of
5361  // m_FreeSuballocationsBySize if it's suitable.
5362  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
5363 };
5364 
5365 /*
5366 Allocations and their references in internal data structure look like this:
5367 
5368 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
5369 
5370  0 +-------+
5371  | |
5372  | |
5373  | |
5374  +-------+
5375  | Alloc | 1st[m_1stNullItemsBeginCount]
5376  +-------+
5377  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5378  +-------+
5379  | ... |
5380  +-------+
5381  | Alloc | 1st[1st.size() - 1]
5382  +-------+
5383  | |
5384  | |
5385  | |
5386 GetSize() +-------+
5387 
5388 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
5389 
5390  0 +-------+
5391  | Alloc | 2nd[0]
5392  +-------+
5393  | Alloc | 2nd[1]
5394  +-------+
5395  | ... |
5396  +-------+
5397  | Alloc | 2nd[2nd.size() - 1]
5398  +-------+
5399  | |
5400  | |
5401  | |
5402  +-------+
5403  | Alloc | 1st[m_1stNullItemsBeginCount]
5404  +-------+
5405  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5406  +-------+
5407  | ... |
5408  +-------+
5409  | Alloc | 1st[1st.size() - 1]
5410  +-------+
5411  | |
5412 GetSize() +-------+
5413 
5414 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
5415 
5416  0 +-------+
5417  | |
5418  | |
5419  | |
5420  +-------+
5421  | Alloc | 1st[m_1stNullItemsBeginCount]
5422  +-------+
5423  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5424  +-------+
5425  | ... |
5426  +-------+
5427  | Alloc | 1st[1st.size() - 1]
5428  +-------+
5429  | |
5430  | |
5431  | |
5432  +-------+
5433  | Alloc | 2nd[2nd.size() - 1]
5434  +-------+
5435  | ... |
5436  +-------+
5437  | Alloc | 2nd[1]
5438  +-------+
5439  | Alloc | 2nd[0]
5440 GetSize() +-------+
5441 
5442 */
5443 class VmaBlockMetadata_Linear : public VmaBlockMetadata
5444 {
5445  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
5446 public:
5447  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
5448  virtual ~VmaBlockMetadata_Linear();
5449  virtual void Init(VkDeviceSize size);
5450 
5451  virtual bool Validate() const;
5452  virtual size_t GetAllocationCount() const;
5453  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5454  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5455  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
5456 
5457  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5458  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5459 
5460 #if VMA_STATS_STRING_ENABLED
5461  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5462 #endif
5463 
5464  virtual bool CreateAllocationRequest(
5465  uint32_t currentFrameIndex,
5466  uint32_t frameInUseCount,
5467  VkDeviceSize bufferImageGranularity,
5468  VkDeviceSize allocSize,
5469  VkDeviceSize allocAlignment,
5470  bool upperAddress,
5471  VmaSuballocationType allocType,
5472  bool canMakeOtherLost,
5473  uint32_t strategy,
5474  VmaAllocationRequest* pAllocationRequest);
5475 
5476  virtual bool MakeRequestedAllocationsLost(
5477  uint32_t currentFrameIndex,
5478  uint32_t frameInUseCount,
5479  VmaAllocationRequest* pAllocationRequest);
5480 
5481  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5482 
5483  virtual VkResult CheckCorruption(const void* pBlockData);
5484 
5485  virtual void Alloc(
5486  const VmaAllocationRequest& request,
5487  VmaSuballocationType type,
5488  VkDeviceSize allocSize,
5489  bool upperAddress,
5490  VmaAllocation hAllocation);
5491 
5492  virtual void Free(const VmaAllocation allocation);
5493  virtual void FreeAtOffset(VkDeviceSize offset);
5494 
5495 private:
5496  /*
5497  There are two suballocation vectors, used in ping-pong way.
5498  The one with index m_1stVectorIndex is called 1st.
5499  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
5500  2nd can be non-empty only when 1st is not empty.
5501  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
5502  */
5503  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
5504 
5505  enum SECOND_VECTOR_MODE
5506  {
5507  SECOND_VECTOR_EMPTY,
5508  /*
5509  Suballocations in 2nd vector are created later than the ones in 1st, but they
5510  all have smaller offset.
5511  */
5512  SECOND_VECTOR_RING_BUFFER,
5513  /*
5514  Suballocations in 2nd vector are upper side of double stack.
5515  They all have offsets higher than those in 1st vector.
5516  Top of this stack means smaller offsets, but higher indices in this vector.
5517  */
5518  SECOND_VECTOR_DOUBLE_STACK,
5519  };
5520 
5521  VkDeviceSize m_SumFreeSize;
5522  SuballocationVectorType m_Suballocations0, m_Suballocations1;
5523  uint32_t m_1stVectorIndex;
5524  SECOND_VECTOR_MODE m_2ndVectorMode;
5525 
5526  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5527  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5528  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5529  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5530 
5531  // Number of items in 1st vector with hAllocation = null at the beginning.
5532  size_t m_1stNullItemsBeginCount;
5533  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
5534  size_t m_1stNullItemsMiddleCount;
5535  // Number of items in 2nd vector with hAllocation = null.
5536  size_t m_2ndNullItemsCount;
5537 
5538  bool ShouldCompact1st() const;
5539  void CleanupAfterFree();
5540 };
5541 
5542 /*
5543 - GetSize() is the original size of allocated memory block.
5544 - m_UsableSize is this size aligned down to a power of two.
5545  All allocations and calculations happen relative to m_UsableSize.
5546 - GetUnusableSize() is the difference between them.
5547  It is repoted as separate, unused range, not available for allocations.
5548 
5549 Node at level 0 has size = m_UsableSize.
5550 Each next level contains nodes with size 2 times smaller than current level.
5551 m_LevelCount is the maximum number of levels to use in the current object.
5552 */
5553 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
5554 {
5555  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
5556 public:
5557  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
5558  virtual ~VmaBlockMetadata_Buddy();
5559  virtual void Init(VkDeviceSize size);
5560 
5561  virtual bool Validate() const;
5562  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
5563  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
5564  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5565  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
5566 
5567  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5568  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5569 
5570 #if VMA_STATS_STRING_ENABLED
5571  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5572 #endif
5573 
5574  virtual bool CreateAllocationRequest(
5575  uint32_t currentFrameIndex,
5576  uint32_t frameInUseCount,
5577  VkDeviceSize bufferImageGranularity,
5578  VkDeviceSize allocSize,
5579  VkDeviceSize allocAlignment,
5580  bool upperAddress,
5581  VmaSuballocationType allocType,
5582  bool canMakeOtherLost,
5583  uint32_t strategy,
5584  VmaAllocationRequest* pAllocationRequest);
5585 
5586  virtual bool MakeRequestedAllocationsLost(
5587  uint32_t currentFrameIndex,
5588  uint32_t frameInUseCount,
5589  VmaAllocationRequest* pAllocationRequest);
5590 
5591  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5592 
5593  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
5594 
5595  virtual void Alloc(
5596  const VmaAllocationRequest& request,
5597  VmaSuballocationType type,
5598  VkDeviceSize allocSize,
5599  bool upperAddress,
5600  VmaAllocation hAllocation);
5601 
5602  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
5603  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
5604 
5605 private:
5606  static const VkDeviceSize MIN_NODE_SIZE = 32;
5607  static const size_t MAX_LEVELS = 30;
5608 
5609  struct ValidationContext
5610  {
5611  size_t calculatedAllocationCount;
5612  size_t calculatedFreeCount;
5613  VkDeviceSize calculatedSumFreeSize;
5614 
5615  ValidationContext() :
5616  calculatedAllocationCount(0),
5617  calculatedFreeCount(0),
5618  calculatedSumFreeSize(0) { }
5619  };
5620 
5621  struct Node
5622  {
5623  VkDeviceSize offset;
5624  enum TYPE
5625  {
5626  TYPE_FREE,
5627  TYPE_ALLOCATION,
5628  TYPE_SPLIT,
5629  TYPE_COUNT
5630  } type;
5631  Node* parent;
5632  Node* buddy;
5633 
5634  union
5635  {
5636  struct
5637  {
5638  Node* prev;
5639  Node* next;
5640  } free;
5641  struct
5642  {
5643  VmaAllocation alloc;
5644  } allocation;
5645  struct
5646  {
5647  Node* leftChild;
5648  } split;
5649  };
5650  };
5651 
5652  // Size of the memory block aligned down to a power of two.
5653  VkDeviceSize m_UsableSize;
5654  uint32_t m_LevelCount;
5655 
5656  Node* m_Root;
5657  struct {
5658  Node* front;
5659  Node* back;
5660  } m_FreeList[MAX_LEVELS];
5661  // Number of nodes in the tree with type == TYPE_ALLOCATION.
5662  size_t m_AllocationCount;
5663  // Number of nodes in the tree with type == TYPE_FREE.
5664  size_t m_FreeCount;
5665  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
5666  VkDeviceSize m_SumFreeSize;
5667 
5668  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
5669  void DeleteNode(Node* node);
5670  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
5671  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
5672  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
5673  // Alloc passed just for validation. Can be null.
5674  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
5675  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
5676  // Adds node to the front of FreeList at given level.
5677  // node->type must be FREE.
5678  // node->free.prev, next can be undefined.
5679  void AddToFreeListFront(uint32_t level, Node* node);
5680  // Removes node from FreeList at given level.
5681  // node->type must be FREE.
5682  // node->free.prev, next stay untouched.
5683  void RemoveFromFreeList(uint32_t level, Node* node);
5684 
5685 #if VMA_STATS_STRING_ENABLED
5686  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
5687 #endif
5688 };
5689 
5690 /*
5691 Represents a single block of device memory (`VkDeviceMemory`) with all the
5692 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
5693 
5694 Thread-safety: This class must be externally synchronized.
5695 */
5696 class VmaDeviceMemoryBlock
5697 {
5698  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
5699 public:
5700  VmaBlockMetadata* m_pMetadata;
5701 
5702  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
5703 
5704  ~VmaDeviceMemoryBlock()
5705  {
5706  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5707  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5708  }
5709 
5710  // Always call after construction.
5711  void Init(
5712  VmaAllocator hAllocator,
5713  uint32_t newMemoryTypeIndex,
5714  VkDeviceMemory newMemory,
5715  VkDeviceSize newSize,
5716  uint32_t id,
5717  uint32_t algorithm);
5718  // Always call before destruction.
5719  void Destroy(VmaAllocator allocator);
5720 
5721  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
5722  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5723  uint32_t GetId() const { return m_Id; }
5724  void* GetMappedData() const { return m_pMappedData; }
5725 
5726  // Validates all data structures inside this object. If not valid, returns false.
5727  bool Validate() const;
5728 
5729  VkResult CheckCorruption(VmaAllocator hAllocator);
5730 
5731  // ppData can be null.
5732  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
5733  void Unmap(VmaAllocator hAllocator, uint32_t count);
5734 
5735  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5736  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5737 
5738  VkResult BindBufferMemory(
5739  const VmaAllocator hAllocator,
5740  const VmaAllocation hAllocation,
5741  VkBuffer hBuffer);
5742  VkResult BindImageMemory(
5743  const VmaAllocator hAllocator,
5744  const VmaAllocation hAllocation,
5745  VkImage hImage);
5746 
5747 private:
5748  uint32_t m_MemoryTypeIndex;
5749  uint32_t m_Id;
5750  VkDeviceMemory m_hMemory;
5751 
5752  /*
5753  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
5754  Also protects m_MapCount, m_pMappedData.
5755  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
5756  */
5757  VMA_MUTEX m_Mutex;
5758  uint32_t m_MapCount;
5759  void* m_pMappedData;
5760 };
5761 
5762 struct VmaPointerLess
5763 {
5764  bool operator()(const void* lhs, const void* rhs) const
5765  {
5766  return lhs < rhs;
5767  }
5768 };
5769 
5770 struct VmaDefragmentationMove
5771 {
5772  size_t srcBlockIndex;
5773  size_t dstBlockIndex;
5774  VkDeviceSize srcOffset;
5775  VkDeviceSize dstOffset;
5776  VkDeviceSize size;
5777 };
5778 
5779 class VmaDefragmentationAlgorithm;
5780 
5781 /*
5782 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
5783 Vulkan memory type.
5784 
5785 Synchronized internally with a mutex.
5786 */
5787 struct VmaBlockVector
5788 {
5789  VMA_CLASS_NO_COPY(VmaBlockVector)
5790 public:
5791  VmaBlockVector(
5792  VmaAllocator hAllocator,
5793  uint32_t memoryTypeIndex,
5794  VkDeviceSize preferredBlockSize,
5795  size_t minBlockCount,
5796  size_t maxBlockCount,
5797  VkDeviceSize bufferImageGranularity,
5798  uint32_t frameInUseCount,
5799  bool isCustomPool,
5800  bool explicitBlockSize,
5801  uint32_t algorithm);
5802  ~VmaBlockVector();
5803 
5804  VkResult CreateMinBlocks();
5805 
5806  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5807  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
5808  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
5809  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
5810  uint32_t GetAlgorithm() const { return m_Algorithm; }
5811 
5812  void GetPoolStats(VmaPoolStats* pStats);
5813 
5814  bool IsEmpty() const { return m_Blocks.empty(); }
5815  bool IsCorruptionDetectionEnabled() const;
5816 
5817  VkResult Allocate(
5818  VmaPool hCurrentPool,
5819  uint32_t currentFrameIndex,
5820  VkDeviceSize size,
5821  VkDeviceSize alignment,
5822  const VmaAllocationCreateInfo& createInfo,
5823  VmaSuballocationType suballocType,
5824  size_t allocationCount,
5825  VmaAllocation* pAllocations);
5826 
5827  void Free(
5828  VmaAllocation hAllocation);
5829 
5830  // Adds statistics of this BlockVector to pStats.
5831  void AddStats(VmaStats* pStats);
5832 
5833 #if VMA_STATS_STRING_ENABLED
5834  void PrintDetailedMap(class VmaJsonWriter& json);
5835 #endif
5836 
5837  void MakePoolAllocationsLost(
5838  uint32_t currentFrameIndex,
5839  size_t* pLostAllocationCount);
5840  VkResult CheckCorruption();
5841 
5842  // Saves results in pCtx->res.
5843  void Defragment(
5844  class VmaBlockVectorDefragmentationContext* pCtx,
5845  VmaDefragmentationStats* pStats,
5846  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
5847  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
5848  VkCommandBuffer commandBuffer);
5849  void DefragmentationEnd(
5850  class VmaBlockVectorDefragmentationContext* pCtx,
5851  VmaDefragmentationStats* pStats);
5852 
5854  // To be used only while the m_Mutex is locked. Used during defragmentation.
5855 
5856  size_t GetBlockCount() const { return m_Blocks.size(); }
5857  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
5858  size_t CalcAllocationCount() const;
5859  bool IsBufferImageGranularityConflictPossible() const;
5860 
5861 private:
5862  friend class VmaDefragmentationAlgorithm_Generic;
5863 
5864  const VmaAllocator m_hAllocator;
5865  const uint32_t m_MemoryTypeIndex;
5866  const VkDeviceSize m_PreferredBlockSize;
5867  const size_t m_MinBlockCount;
5868  const size_t m_MaxBlockCount;
5869  const VkDeviceSize m_BufferImageGranularity;
5870  const uint32_t m_FrameInUseCount;
5871  const bool m_IsCustomPool;
5872  const bool m_ExplicitBlockSize;
5873  const uint32_t m_Algorithm;
5874  /* There can be at most one allocation that is completely empty - a
5875  hysteresis to avoid pessimistic case of alternating creation and destruction
5876  of a VkDeviceMemory. */
5877  bool m_HasEmptyBlock;
5878  VMA_RW_MUTEX m_Mutex;
5879  // Incrementally sorted by sumFreeSize, ascending.
5880  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
5881  uint32_t m_NextBlockId;
5882 
5883  VkDeviceSize CalcMaxBlockSize() const;
5884 
5885  // Finds and removes given block from vector.
5886  void Remove(VmaDeviceMemoryBlock* pBlock);
5887 
5888  // Performs single step in sorting m_Blocks. They may not be fully sorted
5889  // after this call.
5890  void IncrementallySortBlocks();
5891 
5892  VkResult AllocatePage(
5893  VmaPool hCurrentPool,
5894  uint32_t currentFrameIndex,
5895  VkDeviceSize size,
5896  VkDeviceSize alignment,
5897  const VmaAllocationCreateInfo& createInfo,
5898  VmaSuballocationType suballocType,
5899  VmaAllocation* pAllocation);
5900 
5901  // To be used only without CAN_MAKE_OTHER_LOST flag.
5902  VkResult AllocateFromBlock(
5903  VmaDeviceMemoryBlock* pBlock,
5904  VmaPool hCurrentPool,
5905  uint32_t currentFrameIndex,
5906  VkDeviceSize size,
5907  VkDeviceSize alignment,
5908  VmaAllocationCreateFlags allocFlags,
5909  void* pUserData,
5910  VmaSuballocationType suballocType,
5911  uint32_t strategy,
5912  VmaAllocation* pAllocation);
5913 
5914  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
5915 
5916  // Saves result to pCtx->res.
5917  void ApplyDefragmentationMovesCpu(
5918  class VmaBlockVectorDefragmentationContext* pDefragCtx,
5919  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
5920  // Saves result to pCtx->res.
5921  void ApplyDefragmentationMovesGpu(
5922  class VmaBlockVectorDefragmentationContext* pDefragCtx,
5923  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
5924  VkCommandBuffer commandBuffer);
5925 
5926  /*
5927  Used during defragmentation. pDefragmentationStats is optional. It's in/out
5928  - updated with new data.
5929  */
5930  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
5931 };
5932 
5933 struct VmaPool_T
5934 {
5935  VMA_CLASS_NO_COPY(VmaPool_T)
5936 public:
5937  VmaBlockVector m_BlockVector;
5938 
5939  VmaPool_T(
5940  VmaAllocator hAllocator,
5941  const VmaPoolCreateInfo& createInfo,
5942  VkDeviceSize preferredBlockSize);
5943  ~VmaPool_T();
5944 
5945  uint32_t GetId() const { return m_Id; }
5946  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
5947 
5948 #if VMA_STATS_STRING_ENABLED
5949  //void PrintDetailedMap(class VmaStringBuilder& sb);
5950 #endif
5951 
5952 private:
5953  uint32_t m_Id;
5954 };
5955 
5956 /*
5957 Performs defragmentation:
5958 
5959 - Updates `pBlockVector->m_pMetadata`.
5960 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
5961 - Does not move actual data, only returns requested moves as `moves`.
5962 */
5963 class VmaDefragmentationAlgorithm
5964 {
5965  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
5966 public:
5967  VmaDefragmentationAlgorithm(
5968  VmaAllocator hAllocator,
5969  VmaBlockVector* pBlockVector,
5970  uint32_t currentFrameIndex) :
5971  m_hAllocator(hAllocator),
5972  m_pBlockVector(pBlockVector),
5973  m_CurrentFrameIndex(currentFrameIndex)
5974  {
5975  }
5976  virtual ~VmaDefragmentationAlgorithm()
5977  {
5978  }
5979 
5980  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
5981  virtual void AddAll() = 0;
5982 
5983  virtual VkResult Defragment(
5984  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
5985  VkDeviceSize maxBytesToMove,
5986  uint32_t maxAllocationsToMove) = 0;
5987 
5988  virtual VkDeviceSize GetBytesMoved() const = 0;
5989  virtual uint32_t GetAllocationsMoved() const = 0;
5990 
5991 protected:
5992  VmaAllocator const m_hAllocator;
5993  VmaBlockVector* const m_pBlockVector;
5994  const uint32_t m_CurrentFrameIndex;
5995 
5996  struct AllocationInfo
5997  {
5998  VmaAllocation m_hAllocation;
5999  VkBool32* m_pChanged;
6000 
6001  AllocationInfo() :
6002  m_hAllocation(VK_NULL_HANDLE),
6003  m_pChanged(VMA_NULL)
6004  {
6005  }
6006  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
6007  m_hAllocation(hAlloc),
6008  m_pChanged(pChanged)
6009  {
6010  }
6011  };
6012 };
6013 
6014 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
6015 {
6016  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
6017 public:
6018  VmaDefragmentationAlgorithm_Generic(
6019  VmaAllocator hAllocator,
6020  VmaBlockVector* pBlockVector,
6021  uint32_t currentFrameIndex,
6022  bool overlappingMoveSupported);
6023  virtual ~VmaDefragmentationAlgorithm_Generic();
6024 
6025  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6026  virtual void AddAll() { m_AllAllocations = true; }
6027 
6028  virtual VkResult Defragment(
6029  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6030  VkDeviceSize maxBytesToMove,
6031  uint32_t maxAllocationsToMove);
6032 
6033  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6034  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6035 
6036 private:
6037  uint32_t m_AllocationCount;
6038  bool m_AllAllocations;
6039 
6040  VkDeviceSize m_BytesMoved;
6041  uint32_t m_AllocationsMoved;
6042 
6043  struct AllocationInfoSizeGreater
6044  {
6045  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6046  {
6047  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
6048  }
6049  };
6050 
6051  struct AllocationInfoOffsetGreater
6052  {
6053  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6054  {
6055  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
6056  }
6057  };
6058 
6059  struct BlockInfo
6060  {
6061  size_t m_OriginalBlockIndex;
6062  VmaDeviceMemoryBlock* m_pBlock;
6063  bool m_HasNonMovableAllocations;
6064  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
6065 
6066  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
6067  m_OriginalBlockIndex(SIZE_MAX),
6068  m_pBlock(VMA_NULL),
6069  m_HasNonMovableAllocations(true),
6070  m_Allocations(pAllocationCallbacks)
6071  {
6072  }
6073 
6074  void CalcHasNonMovableAllocations()
6075  {
6076  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
6077  const size_t defragmentAllocCount = m_Allocations.size();
6078  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
6079  }
6080 
6081  void SortAllocationsBySizeDescending()
6082  {
6083  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
6084  }
6085 
6086  void SortAllocationsByOffsetDescending()
6087  {
6088  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
6089  }
6090  };
6091 
6092  struct BlockPointerLess
6093  {
6094  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
6095  {
6096  return pLhsBlockInfo->m_pBlock < pRhsBlock;
6097  }
6098  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6099  {
6100  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
6101  }
6102  };
6103 
6104  // 1. Blocks with some non-movable allocations go first.
6105  // 2. Blocks with smaller sumFreeSize go first.
6106  struct BlockInfoCompareMoveDestination
6107  {
6108  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6109  {
6110  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
6111  {
6112  return true;
6113  }
6114  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
6115  {
6116  return false;
6117  }
6118  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
6119  {
6120  return true;
6121  }
6122  return false;
6123  }
6124  };
6125 
6126  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
6127  BlockInfoVector m_Blocks;
6128 
6129  VkResult DefragmentRound(
6130  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6131  VkDeviceSize maxBytesToMove,
6132  uint32_t maxAllocationsToMove);
6133 
6134  size_t CalcBlocksWithNonMovableCount() const;
6135 
6136  static bool MoveMakesSense(
6137  size_t dstBlockIndex, VkDeviceSize dstOffset,
6138  size_t srcBlockIndex, VkDeviceSize srcOffset);
6139 };
6140 
6141 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
6142 {
6143  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
6144 public:
6145  VmaDefragmentationAlgorithm_Fast(
6146  VmaAllocator hAllocator,
6147  VmaBlockVector* pBlockVector,
6148  uint32_t currentFrameIndex,
6149  bool overlappingMoveSupported);
6150  virtual ~VmaDefragmentationAlgorithm_Fast();
6151 
6152  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
6153  virtual void AddAll() { m_AllAllocations = true; }
6154 
6155  virtual VkResult Defragment(
6156  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6157  VkDeviceSize maxBytesToMove,
6158  uint32_t maxAllocationsToMove);
6159 
6160  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6161  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6162 
6163 private:
6164  struct BlockInfo
6165  {
6166  size_t origBlockIndex;
6167  };
6168 
6169  class FreeSpaceDatabase
6170  {
6171  public:
6172  FreeSpaceDatabase()
6173  {
6174  FreeSpace s = {};
6175  s.blockInfoIndex = SIZE_MAX;
6176  for(size_t i = 0; i < MAX_COUNT; ++i)
6177  {
6178  m_FreeSpaces[i] = s;
6179  }
6180  }
6181 
6182  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
6183  {
6184  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6185  {
6186  return;
6187  }
6188 
6189  // Find first invalid or the smallest structure.
6190  size_t bestIndex = SIZE_MAX;
6191  for(size_t i = 0; i < MAX_COUNT; ++i)
6192  {
6193  // Empty structure.
6194  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
6195  {
6196  bestIndex = i;
6197  break;
6198  }
6199  if(m_FreeSpaces[i].size < size &&
6200  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
6201  {
6202  bestIndex = i;
6203  }
6204  }
6205 
6206  if(bestIndex != SIZE_MAX)
6207  {
6208  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
6209  m_FreeSpaces[bestIndex].offset = offset;
6210  m_FreeSpaces[bestIndex].size = size;
6211  }
6212  }
6213 
6214  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
6215  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
6216  {
6217  size_t bestIndex = SIZE_MAX;
6218  VkDeviceSize bestFreeSpaceAfter = 0;
6219  for(size_t i = 0; i < MAX_COUNT; ++i)
6220  {
6221  // Structure is valid.
6222  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
6223  {
6224  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
6225  // Allocation fits into this structure.
6226  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
6227  {
6228  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
6229  (dstOffset + size);
6230  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
6231  {
6232  bestIndex = i;
6233  bestFreeSpaceAfter = freeSpaceAfter;
6234  }
6235  }
6236  }
6237  }
6238 
6239  if(bestIndex != SIZE_MAX)
6240  {
6241  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
6242  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
6243 
6244  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6245  {
6246  // Leave this structure for remaining empty space.
6247  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
6248  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
6249  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
6250  }
6251  else
6252  {
6253  // This structure becomes invalid.
6254  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
6255  }
6256 
6257  return true;
6258  }
6259 
6260  return false;
6261  }
6262 
6263  private:
6264  static const size_t MAX_COUNT = 4;
6265 
6266  struct FreeSpace
6267  {
6268  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
6269  VkDeviceSize offset;
6270  VkDeviceSize size;
6271  } m_FreeSpaces[MAX_COUNT];
6272  };
6273 
6274  const bool m_OverlappingMoveSupported;
6275 
6276  uint32_t m_AllocationCount;
6277  bool m_AllAllocations;
6278 
6279  VkDeviceSize m_BytesMoved;
6280  uint32_t m_AllocationsMoved;
6281 
6282  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
6283 
6284  void PreprocessMetadata();
6285  void PostprocessMetadata();
6286  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
6287 };
6288 
6289 struct VmaBlockDefragmentationContext
6290 {
6291  enum BLOCK_FLAG
6292  {
6293  BLOCK_FLAG_USED = 0x00000001,
6294  };
6295  uint32_t flags;
6296  VkBuffer hBuffer;
6297 
6298  VmaBlockDefragmentationContext() :
6299  flags(0),
6300  hBuffer(VK_NULL_HANDLE)
6301  {
6302  }
6303 };
6304 
6305 class VmaBlockVectorDefragmentationContext
6306 {
6307  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
6308 public:
6309  VkResult res;
6310  bool mutexLocked;
6311  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
6312 
6313  VmaBlockVectorDefragmentationContext(
6314  VmaAllocator hAllocator,
6315  VmaPool hCustomPool, // Optional.
6316  VmaBlockVector* pBlockVector,
6317  uint32_t currFrameIndex,
6318  uint32_t flags);
6319  ~VmaBlockVectorDefragmentationContext();
6320 
6321  VmaPool GetCustomPool() const { return m_hCustomPool; }
6322  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
6323  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
6324 
6325  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6326  void AddAll() { m_AllAllocations = true; }
6327 
6328  void Begin(bool overlappingMoveSupported);
6329 
6330 private:
6331  const VmaAllocator m_hAllocator;
6332  // Null if not from custom pool.
6333  const VmaPool m_hCustomPool;
6334  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
6335  VmaBlockVector* const m_pBlockVector;
6336  const uint32_t m_CurrFrameIndex;
6337  const uint32_t m_AlgorithmFlags;
6338  // Owner of this object.
6339  VmaDefragmentationAlgorithm* m_pAlgorithm;
6340 
6341  struct AllocInfo
6342  {
6343  VmaAllocation hAlloc;
6344  VkBool32* pChanged;
6345  };
6346  // Used between constructor and Begin.
6347  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
6348  bool m_AllAllocations;
6349 };
6350 
6351 struct VmaDefragmentationContext_T
6352 {
6353 private:
6354  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
6355 public:
6356  VmaDefragmentationContext_T(
6357  VmaAllocator hAllocator,
6358  uint32_t currFrameIndex,
6359  uint32_t flags,
6360  VmaDefragmentationStats* pStats);
6361  ~VmaDefragmentationContext_T();
6362 
6363  void AddPools(uint32_t poolCount, VmaPool* pPools);
6364  void AddAllocations(
6365  uint32_t allocationCount,
6366  VmaAllocation* pAllocations,
6367  VkBool32* pAllocationsChanged);
6368 
6369  /*
6370  Returns:
6371  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
6372  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
6373  - Negative value if error occured and object can be destroyed immediately.
6374  */
6375  VkResult Defragment(
6376  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
6377  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
6378  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats);
6379 
6380 private:
6381  const VmaAllocator m_hAllocator;
6382  const uint32_t m_CurrFrameIndex;
6383  const uint32_t m_Flags;
6384  VmaDefragmentationStats* const m_pStats;
6385  // Owner of these objects.
6386  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
6387  // Owner of these objects.
6388  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
6389 };
6390 
6391 #if VMA_RECORDING_ENABLED
6392 
6393 class VmaRecorder
6394 {
6395 public:
6396  VmaRecorder();
6397  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
6398  void WriteConfiguration(
6399  const VkPhysicalDeviceProperties& devProps,
6400  const VkPhysicalDeviceMemoryProperties& memProps,
6401  bool dedicatedAllocationExtensionEnabled);
6402  ~VmaRecorder();
6403 
6404  void RecordCreateAllocator(uint32_t frameIndex);
6405  void RecordDestroyAllocator(uint32_t frameIndex);
6406  void RecordCreatePool(uint32_t frameIndex,
6407  const VmaPoolCreateInfo& createInfo,
6408  VmaPool pool);
6409  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
6410  void RecordAllocateMemory(uint32_t frameIndex,
6411  const VkMemoryRequirements& vkMemReq,
6412  const VmaAllocationCreateInfo& createInfo,
6413  VmaAllocation allocation);
6414  void RecordAllocateMemoryPages(uint32_t frameIndex,
6415  const VkMemoryRequirements& vkMemReq,
6416  const VmaAllocationCreateInfo& createInfo,
6417  uint64_t allocationCount,
6418  const VmaAllocation* pAllocations);
6419  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
6420  const VkMemoryRequirements& vkMemReq,
6421  bool requiresDedicatedAllocation,
6422  bool prefersDedicatedAllocation,
6423  const VmaAllocationCreateInfo& createInfo,
6424  VmaAllocation allocation);
6425  void RecordAllocateMemoryForImage(uint32_t frameIndex,
6426  const VkMemoryRequirements& vkMemReq,
6427  bool requiresDedicatedAllocation,
6428  bool prefersDedicatedAllocation,
6429  const VmaAllocationCreateInfo& createInfo,
6430  VmaAllocation allocation);
6431  void RecordFreeMemory(uint32_t frameIndex,
6432  VmaAllocation allocation);
6433  void RecordFreeMemoryPages(uint32_t frameIndex,
6434  uint64_t allocationCount,
6435  const VmaAllocation* pAllocations);
6436  void RecordResizeAllocation(
6437  uint32_t frameIndex,
6438  VmaAllocation allocation,
6439  VkDeviceSize newSize);
6440  void RecordSetAllocationUserData(uint32_t frameIndex,
6441  VmaAllocation allocation,
6442  const void* pUserData);
6443  void RecordCreateLostAllocation(uint32_t frameIndex,
6444  VmaAllocation allocation);
6445  void RecordMapMemory(uint32_t frameIndex,
6446  VmaAllocation allocation);
6447  void RecordUnmapMemory(uint32_t frameIndex,
6448  VmaAllocation allocation);
6449  void RecordFlushAllocation(uint32_t frameIndex,
6450  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6451  void RecordInvalidateAllocation(uint32_t frameIndex,
6452  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6453  void RecordCreateBuffer(uint32_t frameIndex,
6454  const VkBufferCreateInfo& bufCreateInfo,
6455  const VmaAllocationCreateInfo& allocCreateInfo,
6456  VmaAllocation allocation);
6457  void RecordCreateImage(uint32_t frameIndex,
6458  const VkImageCreateInfo& imageCreateInfo,
6459  const VmaAllocationCreateInfo& allocCreateInfo,
6460  VmaAllocation allocation);
6461  void RecordDestroyBuffer(uint32_t frameIndex,
6462  VmaAllocation allocation);
6463  void RecordDestroyImage(uint32_t frameIndex,
6464  VmaAllocation allocation);
6465  void RecordTouchAllocation(uint32_t frameIndex,
6466  VmaAllocation allocation);
6467  void RecordGetAllocationInfo(uint32_t frameIndex,
6468  VmaAllocation allocation);
6469  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
6470  VmaPool pool);
6471  void RecordDefragmentationBegin(uint32_t frameIndex,
6472  const VmaDefragmentationInfo2& info,
6474  void RecordDefragmentationEnd(uint32_t frameIndex,
6476 
6477 private:
6478  struct CallParams
6479  {
6480  uint32_t threadId;
6481  double time;
6482  };
6483 
6484  class UserDataString
6485  {
6486  public:
6487  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
6488  const char* GetString() const { return m_Str; }
6489 
6490  private:
6491  char m_PtrStr[17];
6492  const char* m_Str;
6493  };
6494 
6495  bool m_UseMutex;
6496  VmaRecordFlags m_Flags;
6497  FILE* m_File;
6498  VMA_MUTEX m_FileMutex;
6499  int64_t m_Freq;
6500  int64_t m_StartCounter;
6501 
6502  void GetBasicParams(CallParams& outParams);
6503 
6504  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
6505  template<typename T>
6506  void PrintPointerList(uint64_t count, const T* pItems)
6507  {
6508  if(count)
6509  {
6510  fprintf(m_File, "%p", pItems[0]);
6511  for(uint64_t i = 1; i < count; ++i)
6512  {
6513  fprintf(m_File, " %p", pItems[i]);
6514  }
6515  }
6516  }
6517 
6518  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
6519  void Flush();
6520 };
6521 
6522 #endif // #if VMA_RECORDING_ENABLED
6523 
6524 // Main allocator object.
6525 struct VmaAllocator_T
6526 {
6527  VMA_CLASS_NO_COPY(VmaAllocator_T)
6528 public:
6529  bool m_UseMutex;
6530  bool m_UseKhrDedicatedAllocation;
6531  VkDevice m_hDevice;
6532  bool m_AllocationCallbacksSpecified;
6533  VkAllocationCallbacks m_AllocationCallbacks;
6534  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
6535 
6536  // Number of bytes free out of limit, or VK_WHOLE_SIZE if no limit for that heap.
6537  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
6538  VMA_MUTEX m_HeapSizeLimitMutex;
6539 
6540  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
6541  VkPhysicalDeviceMemoryProperties m_MemProps;
6542 
6543  // Default pools.
6544  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
6545 
6546  // Each vector is sorted by memory (handle value).
6547  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
6548  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
6549  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
6550 
6551  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
6552  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
6553  ~VmaAllocator_T();
6554 
6555  const VkAllocationCallbacks* GetAllocationCallbacks() const
6556  {
6557  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
6558  }
6559  const VmaVulkanFunctions& GetVulkanFunctions() const
6560  {
6561  return m_VulkanFunctions;
6562  }
6563 
6564  VkDeviceSize GetBufferImageGranularity() const
6565  {
6566  return VMA_MAX(
6567  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
6568  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
6569  }
6570 
6571  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
6572  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
6573 
6574  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
6575  {
6576  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
6577  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
6578  }
6579  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
6580  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
6581  {
6582  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
6583  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
6584  }
6585  // Minimum alignment for all allocations in specific memory type.
6586  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
6587  {
6588  return IsMemoryTypeNonCoherent(memTypeIndex) ?
6589  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
6590  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
6591  }
6592 
6593  bool IsIntegratedGpu() const
6594  {
6595  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
6596  }
6597 
6598 #if VMA_RECORDING_ENABLED
6599  VmaRecorder* GetRecorder() const { return m_pRecorder; }
6600 #endif
6601 
6602  void GetBufferMemoryRequirements(
6603  VkBuffer hBuffer,
6604  VkMemoryRequirements& memReq,
6605  bool& requiresDedicatedAllocation,
6606  bool& prefersDedicatedAllocation) const;
6607  void GetImageMemoryRequirements(
6608  VkImage hImage,
6609  VkMemoryRequirements& memReq,
6610  bool& requiresDedicatedAllocation,
6611  bool& prefersDedicatedAllocation) const;
6612 
6613  // Main allocation function.
6614  VkResult AllocateMemory(
6615  const VkMemoryRequirements& vkMemReq,
6616  bool requiresDedicatedAllocation,
6617  bool prefersDedicatedAllocation,
6618  VkBuffer dedicatedBuffer,
6619  VkImage dedicatedImage,
6620  const VmaAllocationCreateInfo& createInfo,
6621  VmaSuballocationType suballocType,
6622  size_t allocationCount,
6623  VmaAllocation* pAllocations);
6624 
6625  // Main deallocation function.
6626  void FreeMemory(
6627  size_t allocationCount,
6628  const VmaAllocation* pAllocations);
6629 
6630  VkResult ResizeAllocation(
6631  const VmaAllocation alloc,
6632  VkDeviceSize newSize);
6633 
6634  void CalculateStats(VmaStats* pStats);
6635 
6636 #if VMA_STATS_STRING_ENABLED
6637  void PrintDetailedMap(class VmaJsonWriter& json);
6638 #endif
6639 
6640  VkResult DefragmentationBegin(
6641  const VmaDefragmentationInfo2& info,
6642  VmaDefragmentationStats* pStats,
6643  VmaDefragmentationContext* pContext);
6644  VkResult DefragmentationEnd(
6645  VmaDefragmentationContext context);
6646 
6647  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
6648  bool TouchAllocation(VmaAllocation hAllocation);
6649 
6650  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
6651  void DestroyPool(VmaPool pool);
6652  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
6653 
6654  void SetCurrentFrameIndex(uint32_t frameIndex);
6655  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
6656 
6657  void MakePoolAllocationsLost(
6658  VmaPool hPool,
6659  size_t* pLostAllocationCount);
6660  VkResult CheckPoolCorruption(VmaPool hPool);
6661  VkResult CheckCorruption(uint32_t memoryTypeBits);
6662 
6663  void CreateLostAllocation(VmaAllocation* pAllocation);
6664 
6665  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
6666  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
6667 
6668  VkResult Map(VmaAllocation hAllocation, void** ppData);
6669  void Unmap(VmaAllocation hAllocation);
6670 
6671  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
6672  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
6673 
6674  void FlushOrInvalidateAllocation(
6675  VmaAllocation hAllocation,
6676  VkDeviceSize offset, VkDeviceSize size,
6677  VMA_CACHE_OPERATION op);
6678 
6679  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
6680 
6681 private:
6682  VkDeviceSize m_PreferredLargeHeapBlockSize;
6683 
6684  VkPhysicalDevice m_PhysicalDevice;
6685  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
6686 
6687  VMA_RW_MUTEX m_PoolsMutex;
6688  // Protected by m_PoolsMutex. Sorted by pointer value.
6689  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
6690  uint32_t m_NextPoolId;
6691 
6692  VmaVulkanFunctions m_VulkanFunctions;
6693 
6694 #if VMA_RECORDING_ENABLED
6695  VmaRecorder* m_pRecorder;
6696 #endif
6697 
6698  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
6699 
6700  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
6701 
6702  VkResult AllocateMemoryOfType(
6703  VkDeviceSize size,
6704  VkDeviceSize alignment,
6705  bool dedicatedAllocation,
6706  VkBuffer dedicatedBuffer,
6707  VkImage dedicatedImage,
6708  const VmaAllocationCreateInfo& createInfo,
6709  uint32_t memTypeIndex,
6710  VmaSuballocationType suballocType,
6711  size_t allocationCount,
6712  VmaAllocation* pAllocations);
6713 
6714  // Helper function only to be used inside AllocateDedicatedMemory.
6715  VkResult AllocateDedicatedMemoryPage(
6716  VkDeviceSize size,
6717  VmaSuballocationType suballocType,
6718  uint32_t memTypeIndex,
6719  const VkMemoryAllocateInfo& allocInfo,
6720  bool map,
6721  bool isUserDataString,
6722  void* pUserData,
6723  VmaAllocation* pAllocation);
6724 
6725  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
6726  VkResult AllocateDedicatedMemory(
6727  VkDeviceSize size,
6728  VmaSuballocationType suballocType,
6729  uint32_t memTypeIndex,
6730  bool map,
6731  bool isUserDataString,
6732  void* pUserData,
6733  VkBuffer dedicatedBuffer,
6734  VkImage dedicatedImage,
6735  size_t allocationCount,
6736  VmaAllocation* pAllocations);
6737 
6738  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
6739  void FreeDedicatedMemory(VmaAllocation allocation);
6740 };
6741 
6743 // Memory allocation #2 after VmaAllocator_T definition
6744 
6745 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
6746 {
6747  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
6748 }
6749 
6750 static void VmaFree(VmaAllocator hAllocator, void* ptr)
6751 {
6752  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
6753 }
6754 
6755 template<typename T>
6756 static T* VmaAllocate(VmaAllocator hAllocator)
6757 {
6758  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
6759 }
6760 
6761 template<typename T>
6762 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
6763 {
6764  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
6765 }
6766 
6767 template<typename T>
6768 static void vma_delete(VmaAllocator hAllocator, T* ptr)
6769 {
6770  if(ptr != VMA_NULL)
6771  {
6772  ptr->~T();
6773  VmaFree(hAllocator, ptr);
6774  }
6775 }
6776 
6777 template<typename T>
6778 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
6779 {
6780  if(ptr != VMA_NULL)
6781  {
6782  for(size_t i = count; i--; )
6783  ptr[i].~T();
6784  VmaFree(hAllocator, ptr);
6785  }
6786 }
6787 
6789 // VmaStringBuilder
6790 
6791 #if VMA_STATS_STRING_ENABLED
6792 
6793 class VmaStringBuilder
6794 {
6795 public:
6796  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
6797  size_t GetLength() const { return m_Data.size(); }
6798  const char* GetData() const { return m_Data.data(); }
6799 
6800  void Add(char ch) { m_Data.push_back(ch); }
6801  void Add(const char* pStr);
6802  void AddNewLine() { Add('\n'); }
6803  void AddNumber(uint32_t num);
6804  void AddNumber(uint64_t num);
6805  void AddPointer(const void* ptr);
6806 
6807 private:
6808  VmaVector< char, VmaStlAllocator<char> > m_Data;
6809 };
6810 
6811 void VmaStringBuilder::Add(const char* pStr)
6812 {
6813  const size_t strLen = strlen(pStr);
6814  if(strLen > 0)
6815  {
6816  const size_t oldCount = m_Data.size();
6817  m_Data.resize(oldCount + strLen);
6818  memcpy(m_Data.data() + oldCount, pStr, strLen);
6819  }
6820 }
6821 
6822 void VmaStringBuilder::AddNumber(uint32_t num)
6823 {
6824  char buf[11];
6825  VmaUint32ToStr(buf, sizeof(buf), num);
6826  Add(buf);
6827 }
6828 
6829 void VmaStringBuilder::AddNumber(uint64_t num)
6830 {
6831  char buf[21];
6832  VmaUint64ToStr(buf, sizeof(buf), num);
6833  Add(buf);
6834 }
6835 
6836 void VmaStringBuilder::AddPointer(const void* ptr)
6837 {
6838  char buf[21];
6839  VmaPtrToStr(buf, sizeof(buf), ptr);
6840  Add(buf);
6841 }
6842 
6843 #endif // #if VMA_STATS_STRING_ENABLED
6844 
6846 // VmaJsonWriter
6847 
6848 #if VMA_STATS_STRING_ENABLED
6849 
6850 class VmaJsonWriter
6851 {
6852  VMA_CLASS_NO_COPY(VmaJsonWriter)
6853 public:
6854  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
6855  ~VmaJsonWriter();
6856 
6857  void BeginObject(bool singleLine = false);
6858  void EndObject();
6859 
6860  void BeginArray(bool singleLine = false);
6861  void EndArray();
6862 
6863  void WriteString(const char* pStr);
6864  void BeginString(const char* pStr = VMA_NULL);
6865  void ContinueString(const char* pStr);
6866  void ContinueString(uint32_t n);
6867  void ContinueString(uint64_t n);
6868  void ContinueString_Pointer(const void* ptr);
6869  void EndString(const char* pStr = VMA_NULL);
6870 
6871  void WriteNumber(uint32_t n);
6872  void WriteNumber(uint64_t n);
6873  void WriteBool(bool b);
6874  void WriteNull();
6875 
6876 private:
6877  static const char* const INDENT;
6878 
6879  enum COLLECTION_TYPE
6880  {
6881  COLLECTION_TYPE_OBJECT,
6882  COLLECTION_TYPE_ARRAY,
6883  };
6884  struct StackItem
6885  {
6886  COLLECTION_TYPE type;
6887  uint32_t valueCount;
6888  bool singleLineMode;
6889  };
6890 
6891  VmaStringBuilder& m_SB;
6892  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
6893  bool m_InsideString;
6894 
6895  void BeginValue(bool isString);
6896  void WriteIndent(bool oneLess = false);
6897 };
6898 
6899 const char* const VmaJsonWriter::INDENT = " ";
6900 
6901 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
6902  m_SB(sb),
6903  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
6904  m_InsideString(false)
6905 {
6906 }
6907 
6908 VmaJsonWriter::~VmaJsonWriter()
6909 {
6910  VMA_ASSERT(!m_InsideString);
6911  VMA_ASSERT(m_Stack.empty());
6912 }
6913 
6914 void VmaJsonWriter::BeginObject(bool singleLine)
6915 {
6916  VMA_ASSERT(!m_InsideString);
6917 
6918  BeginValue(false);
6919  m_SB.Add('{');
6920 
6921  StackItem item;
6922  item.type = COLLECTION_TYPE_OBJECT;
6923  item.valueCount = 0;
6924  item.singleLineMode = singleLine;
6925  m_Stack.push_back(item);
6926 }
6927 
6928 void VmaJsonWriter::EndObject()
6929 {
6930  VMA_ASSERT(!m_InsideString);
6931 
6932  WriteIndent(true);
6933  m_SB.Add('}');
6934 
6935  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
6936  m_Stack.pop_back();
6937 }
6938 
6939 void VmaJsonWriter::BeginArray(bool singleLine)
6940 {
6941  VMA_ASSERT(!m_InsideString);
6942 
6943  BeginValue(false);
6944  m_SB.Add('[');
6945 
6946  StackItem item;
6947  item.type = COLLECTION_TYPE_ARRAY;
6948  item.valueCount = 0;
6949  item.singleLineMode = singleLine;
6950  m_Stack.push_back(item);
6951 }
6952 
6953 void VmaJsonWriter::EndArray()
6954 {
6955  VMA_ASSERT(!m_InsideString);
6956 
6957  WriteIndent(true);
6958  m_SB.Add(']');
6959 
6960  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
6961  m_Stack.pop_back();
6962 }
6963 
6964 void VmaJsonWriter::WriteString(const char* pStr)
6965 {
6966  BeginString(pStr);
6967  EndString();
6968 }
6969 
6970 void VmaJsonWriter::BeginString(const char* pStr)
6971 {
6972  VMA_ASSERT(!m_InsideString);
6973 
6974  BeginValue(true);
6975  m_SB.Add('"');
6976  m_InsideString = true;
6977  if(pStr != VMA_NULL && pStr[0] != '\0')
6978  {
6979  ContinueString(pStr);
6980  }
6981 }
6982 
6983 void VmaJsonWriter::ContinueString(const char* pStr)
6984 {
6985  VMA_ASSERT(m_InsideString);
6986 
6987  const size_t strLen = strlen(pStr);
6988  for(size_t i = 0; i < strLen; ++i)
6989  {
6990  char ch = pStr[i];
6991  if(ch == '\\')
6992  {
6993  m_SB.Add("\\\\");
6994  }
6995  else if(ch == '"')
6996  {
6997  m_SB.Add("\\\"");
6998  }
6999  else if(ch >= 32)
7000  {
7001  m_SB.Add(ch);
7002  }
7003  else switch(ch)
7004  {
7005  case '\b':
7006  m_SB.Add("\\b");
7007  break;
7008  case '\f':
7009  m_SB.Add("\\f");
7010  break;
7011  case '\n':
7012  m_SB.Add("\\n");
7013  break;
7014  case '\r':
7015  m_SB.Add("\\r");
7016  break;
7017  case '\t':
7018  m_SB.Add("\\t");
7019  break;
7020  default:
7021  VMA_ASSERT(0 && "Character not currently supported.");
7022  break;
7023  }
7024  }
7025 }
7026 
7027 void VmaJsonWriter::ContinueString(uint32_t n)
7028 {
7029  VMA_ASSERT(m_InsideString);
7030  m_SB.AddNumber(n);
7031 }
7032 
7033 void VmaJsonWriter::ContinueString(uint64_t n)
7034 {
7035  VMA_ASSERT(m_InsideString);
7036  m_SB.AddNumber(n);
7037 }
7038 
7039 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
7040 {
7041  VMA_ASSERT(m_InsideString);
7042  m_SB.AddPointer(ptr);
7043 }
7044 
7045 void VmaJsonWriter::EndString(const char* pStr)
7046 {
7047  VMA_ASSERT(m_InsideString);
7048  if(pStr != VMA_NULL && pStr[0] != '\0')
7049  {
7050  ContinueString(pStr);
7051  }
7052  m_SB.Add('"');
7053  m_InsideString = false;
7054 }
7055 
7056 void VmaJsonWriter::WriteNumber(uint32_t n)
7057 {
7058  VMA_ASSERT(!m_InsideString);
7059  BeginValue(false);
7060  m_SB.AddNumber(n);
7061 }
7062 
7063 void VmaJsonWriter::WriteNumber(uint64_t n)
7064 {
7065  VMA_ASSERT(!m_InsideString);
7066  BeginValue(false);
7067  m_SB.AddNumber(n);
7068 }
7069 
7070 void VmaJsonWriter::WriteBool(bool b)
7071 {
7072  VMA_ASSERT(!m_InsideString);
7073  BeginValue(false);
7074  m_SB.Add(b ? "true" : "false");
7075 }
7076 
7077 void VmaJsonWriter::WriteNull()
7078 {
7079  VMA_ASSERT(!m_InsideString);
7080  BeginValue(false);
7081  m_SB.Add("null");
7082 }
7083 
7084 void VmaJsonWriter::BeginValue(bool isString)
7085 {
7086  if(!m_Stack.empty())
7087  {
7088  StackItem& currItem = m_Stack.back();
7089  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7090  currItem.valueCount % 2 == 0)
7091  {
7092  VMA_ASSERT(isString);
7093  }
7094 
7095  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7096  currItem.valueCount % 2 != 0)
7097  {
7098  m_SB.Add(": ");
7099  }
7100  else if(currItem.valueCount > 0)
7101  {
7102  m_SB.Add(", ");
7103  WriteIndent();
7104  }
7105  else
7106  {
7107  WriteIndent();
7108  }
7109  ++currItem.valueCount;
7110  }
7111 }
7112 
7113 void VmaJsonWriter::WriteIndent(bool oneLess)
7114 {
7115  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
7116  {
7117  m_SB.AddNewLine();
7118 
7119  size_t count = m_Stack.size();
7120  if(count > 0 && oneLess)
7121  {
7122  --count;
7123  }
7124  for(size_t i = 0; i < count; ++i)
7125  {
7126  m_SB.Add(INDENT);
7127  }
7128  }
7129 }
7130 
7131 #endif // #if VMA_STATS_STRING_ENABLED
7132 
7134 
7135 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
7136 {
7137  if(IsUserDataString())
7138  {
7139  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
7140 
7141  FreeUserDataString(hAllocator);
7142 
7143  if(pUserData != VMA_NULL)
7144  {
7145  const char* const newStrSrc = (char*)pUserData;
7146  const size_t newStrLen = strlen(newStrSrc);
7147  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
7148  memcpy(newStrDst, newStrSrc, newStrLen + 1);
7149  m_pUserData = newStrDst;
7150  }
7151  }
7152  else
7153  {
7154  m_pUserData = pUserData;
7155  }
7156 }
7157 
7158 void VmaAllocation_T::ChangeBlockAllocation(
7159  VmaAllocator hAllocator,
7160  VmaDeviceMemoryBlock* block,
7161  VkDeviceSize offset)
7162 {
7163  VMA_ASSERT(block != VMA_NULL);
7164  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7165 
7166  // Move mapping reference counter from old block to new block.
7167  if(block != m_BlockAllocation.m_Block)
7168  {
7169  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
7170  if(IsPersistentMap())
7171  ++mapRefCount;
7172  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
7173  block->Map(hAllocator, mapRefCount, VMA_NULL);
7174  }
7175 
7176  m_BlockAllocation.m_Block = block;
7177  m_BlockAllocation.m_Offset = offset;
7178 }
7179 
7180 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
7181 {
7182  VMA_ASSERT(newSize > 0);
7183  m_Size = newSize;
7184 }
7185 
7186 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
7187 {
7188  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7189  m_BlockAllocation.m_Offset = newOffset;
7190 }
7191 
7192 VkDeviceSize VmaAllocation_T::GetOffset() const
7193 {
7194  switch(m_Type)
7195  {
7196  case ALLOCATION_TYPE_BLOCK:
7197  return m_BlockAllocation.m_Offset;
7198  case ALLOCATION_TYPE_DEDICATED:
7199  return 0;
7200  default:
7201  VMA_ASSERT(0);
7202  return 0;
7203  }
7204 }
7205 
7206 VkDeviceMemory VmaAllocation_T::GetMemory() const
7207 {
7208  switch(m_Type)
7209  {
7210  case ALLOCATION_TYPE_BLOCK:
7211  return m_BlockAllocation.m_Block->GetDeviceMemory();
7212  case ALLOCATION_TYPE_DEDICATED:
7213  return m_DedicatedAllocation.m_hMemory;
7214  default:
7215  VMA_ASSERT(0);
7216  return VK_NULL_HANDLE;
7217  }
7218 }
7219 
7220 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
7221 {
7222  switch(m_Type)
7223  {
7224  case ALLOCATION_TYPE_BLOCK:
7225  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
7226  case ALLOCATION_TYPE_DEDICATED:
7227  return m_DedicatedAllocation.m_MemoryTypeIndex;
7228  default:
7229  VMA_ASSERT(0);
7230  return UINT32_MAX;
7231  }
7232 }
7233 
7234 void* VmaAllocation_T::GetMappedData() const
7235 {
7236  switch(m_Type)
7237  {
7238  case ALLOCATION_TYPE_BLOCK:
7239  if(m_MapCount != 0)
7240  {
7241  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
7242  VMA_ASSERT(pBlockData != VMA_NULL);
7243  return (char*)pBlockData + m_BlockAllocation.m_Offset;
7244  }
7245  else
7246  {
7247  return VMA_NULL;
7248  }
7249  break;
7250  case ALLOCATION_TYPE_DEDICATED:
7251  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
7252  return m_DedicatedAllocation.m_pMappedData;
7253  default:
7254  VMA_ASSERT(0);
7255  return VMA_NULL;
7256  }
7257 }
7258 
7259 bool VmaAllocation_T::CanBecomeLost() const
7260 {
7261  switch(m_Type)
7262  {
7263  case ALLOCATION_TYPE_BLOCK:
7264  return m_BlockAllocation.m_CanBecomeLost;
7265  case ALLOCATION_TYPE_DEDICATED:
7266  return false;
7267  default:
7268  VMA_ASSERT(0);
7269  return false;
7270  }
7271 }
7272 
7273 VmaPool VmaAllocation_T::GetPool() const
7274 {
7275  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7276  return m_BlockAllocation.m_hPool;
7277 }
7278 
7279 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7280 {
7281  VMA_ASSERT(CanBecomeLost());
7282 
7283  /*
7284  Warning: This is a carefully designed algorithm.
7285  Do not modify unless you really know what you're doing :)
7286  */
7287  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
7288  for(;;)
7289  {
7290  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7291  {
7292  VMA_ASSERT(0);
7293  return false;
7294  }
7295  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
7296  {
7297  return false;
7298  }
7299  else // Last use time earlier than current time.
7300  {
7301  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
7302  {
7303  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
7304  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
7305  return true;
7306  }
7307  }
7308  }
7309 }
7310 
7311 #if VMA_STATS_STRING_ENABLED
7312 
7313 // Correspond to values of enum VmaSuballocationType.
7314 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
7315  "FREE",
7316  "UNKNOWN",
7317  "BUFFER",
7318  "IMAGE_UNKNOWN",
7319  "IMAGE_LINEAR",
7320  "IMAGE_OPTIMAL",
7321 };
7322 
7323 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
7324 {
7325  json.WriteString("Type");
7326  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
7327 
7328  json.WriteString("Size");
7329  json.WriteNumber(m_Size);
7330 
7331  if(m_pUserData != VMA_NULL)
7332  {
7333  json.WriteString("UserData");
7334  if(IsUserDataString())
7335  {
7336  json.WriteString((const char*)m_pUserData);
7337  }
7338  else
7339  {
7340  json.BeginString();
7341  json.ContinueString_Pointer(m_pUserData);
7342  json.EndString();
7343  }
7344  }
7345 
7346  json.WriteString("CreationFrameIndex");
7347  json.WriteNumber(m_CreationFrameIndex);
7348 
7349  json.WriteString("LastUseFrameIndex");
7350  json.WriteNumber(GetLastUseFrameIndex());
7351 
7352  if(m_BufferImageUsage != 0)
7353  {
7354  json.WriteString("Usage");
7355  json.WriteNumber(m_BufferImageUsage);
7356  }
7357 }
7358 
7359 #endif
7360 
7361 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
7362 {
7363  VMA_ASSERT(IsUserDataString());
7364  if(m_pUserData != VMA_NULL)
7365  {
7366  char* const oldStr = (char*)m_pUserData;
7367  const size_t oldStrLen = strlen(oldStr);
7368  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
7369  m_pUserData = VMA_NULL;
7370  }
7371 }
7372 
7373 void VmaAllocation_T::BlockAllocMap()
7374 {
7375  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7376 
7377  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7378  {
7379  ++m_MapCount;
7380  }
7381  else
7382  {
7383  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
7384  }
7385 }
7386 
7387 void VmaAllocation_T::BlockAllocUnmap()
7388 {
7389  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7390 
7391  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7392  {
7393  --m_MapCount;
7394  }
7395  else
7396  {
7397  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
7398  }
7399 }
7400 
7401 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
7402 {
7403  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7404 
7405  if(m_MapCount != 0)
7406  {
7407  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7408  {
7409  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
7410  *ppData = m_DedicatedAllocation.m_pMappedData;
7411  ++m_MapCount;
7412  return VK_SUCCESS;
7413  }
7414  else
7415  {
7416  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
7417  return VK_ERROR_MEMORY_MAP_FAILED;
7418  }
7419  }
7420  else
7421  {
7422  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
7423  hAllocator->m_hDevice,
7424  m_DedicatedAllocation.m_hMemory,
7425  0, // offset
7426  VK_WHOLE_SIZE,
7427  0, // flags
7428  ppData);
7429  if(result == VK_SUCCESS)
7430  {
7431  m_DedicatedAllocation.m_pMappedData = *ppData;
7432  m_MapCount = 1;
7433  }
7434  return result;
7435  }
7436 }
7437 
7438 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
7439 {
7440  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7441 
7442  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7443  {
7444  --m_MapCount;
7445  if(m_MapCount == 0)
7446  {
7447  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
7448  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
7449  hAllocator->m_hDevice,
7450  m_DedicatedAllocation.m_hMemory);
7451  }
7452  }
7453  else
7454  {
7455  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
7456  }
7457 }
7458 
7459 #if VMA_STATS_STRING_ENABLED
7460 
7461 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
7462 {
7463  json.BeginObject();
7464 
7465  json.WriteString("Blocks");
7466  json.WriteNumber(stat.blockCount);
7467 
7468  json.WriteString("Allocations");
7469  json.WriteNumber(stat.allocationCount);
7470 
7471  json.WriteString("UnusedRanges");
7472  json.WriteNumber(stat.unusedRangeCount);
7473 
7474  json.WriteString("UsedBytes");
7475  json.WriteNumber(stat.usedBytes);
7476 
7477  json.WriteString("UnusedBytes");
7478  json.WriteNumber(stat.unusedBytes);
7479 
7480  if(stat.allocationCount > 1)
7481  {
7482  json.WriteString("AllocationSize");
7483  json.BeginObject(true);
7484  json.WriteString("Min");
7485  json.WriteNumber(stat.allocationSizeMin);
7486  json.WriteString("Avg");
7487  json.WriteNumber(stat.allocationSizeAvg);
7488  json.WriteString("Max");
7489  json.WriteNumber(stat.allocationSizeMax);
7490  json.EndObject();
7491  }
7492 
7493  if(stat.unusedRangeCount > 1)
7494  {
7495  json.WriteString("UnusedRangeSize");
7496  json.BeginObject(true);
7497  json.WriteString("Min");
7498  json.WriteNumber(stat.unusedRangeSizeMin);
7499  json.WriteString("Avg");
7500  json.WriteNumber(stat.unusedRangeSizeAvg);
7501  json.WriteString("Max");
7502  json.WriteNumber(stat.unusedRangeSizeMax);
7503  json.EndObject();
7504  }
7505 
7506  json.EndObject();
7507 }
7508 
7509 #endif // #if VMA_STATS_STRING_ENABLED
7510 
7511 struct VmaSuballocationItemSizeLess
7512 {
7513  bool operator()(
7514  const VmaSuballocationList::iterator lhs,
7515  const VmaSuballocationList::iterator rhs) const
7516  {
7517  return lhs->size < rhs->size;
7518  }
7519  bool operator()(
7520  const VmaSuballocationList::iterator lhs,
7521  VkDeviceSize rhsSize) const
7522  {
7523  return lhs->size < rhsSize;
7524  }
7525 };
7526 
7527 
7529 // class VmaBlockMetadata
7530 
7531 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
7532  m_Size(0),
7533  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
7534 {
7535 }
7536 
7537 #if VMA_STATS_STRING_ENABLED
7538 
7539 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
7540  VkDeviceSize unusedBytes,
7541  size_t allocationCount,
7542  size_t unusedRangeCount) const
7543 {
7544  json.BeginObject();
7545 
7546  json.WriteString("TotalBytes");
7547  json.WriteNumber(GetSize());
7548 
7549  json.WriteString("UnusedBytes");
7550  json.WriteNumber(unusedBytes);
7551 
7552  json.WriteString("Allocations");
7553  json.WriteNumber((uint64_t)allocationCount);
7554 
7555  json.WriteString("UnusedRanges");
7556  json.WriteNumber((uint64_t)unusedRangeCount);
7557 
7558  json.WriteString("Suballocations");
7559  json.BeginArray();
7560 }
7561 
7562 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
7563  VkDeviceSize offset,
7564  VmaAllocation hAllocation) const
7565 {
7566  json.BeginObject(true);
7567 
7568  json.WriteString("Offset");
7569  json.WriteNumber(offset);
7570 
7571  hAllocation->PrintParameters(json);
7572 
7573  json.EndObject();
7574 }
7575 
7576 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
7577  VkDeviceSize offset,
7578  VkDeviceSize size) const
7579 {
7580  json.BeginObject(true);
7581 
7582  json.WriteString("Offset");
7583  json.WriteNumber(offset);
7584 
7585  json.WriteString("Type");
7586  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
7587 
7588  json.WriteString("Size");
7589  json.WriteNumber(size);
7590 
7591  json.EndObject();
7592 }
7593 
7594 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
7595 {
7596  json.EndArray();
7597  json.EndObject();
7598 }
7599 
7600 #endif // #if VMA_STATS_STRING_ENABLED
7601 
7603 // class VmaBlockMetadata_Generic
7604 
7605 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
7606  VmaBlockMetadata(hAllocator),
7607  m_FreeCount(0),
7608  m_SumFreeSize(0),
7609  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7610  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
7611 {
7612 }
7613 
7614 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
7615 {
7616 }
7617 
7618 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
7619 {
7620  VmaBlockMetadata::Init(size);
7621 
7622  m_FreeCount = 1;
7623  m_SumFreeSize = size;
7624 
7625  VmaSuballocation suballoc = {};
7626  suballoc.offset = 0;
7627  suballoc.size = size;
7628  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7629  suballoc.hAllocation = VK_NULL_HANDLE;
7630 
7631  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
7632  m_Suballocations.push_back(suballoc);
7633  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
7634  --suballocItem;
7635  m_FreeSuballocationsBySize.push_back(suballocItem);
7636 }
7637 
7638 bool VmaBlockMetadata_Generic::Validate() const
7639 {
7640  VMA_VALIDATE(!m_Suballocations.empty());
7641 
7642  // Expected offset of new suballocation as calculated from previous ones.
7643  VkDeviceSize calculatedOffset = 0;
7644  // Expected number of free suballocations as calculated from traversing their list.
7645  uint32_t calculatedFreeCount = 0;
7646  // Expected sum size of free suballocations as calculated from traversing their list.
7647  VkDeviceSize calculatedSumFreeSize = 0;
7648  // Expected number of free suballocations that should be registered in
7649  // m_FreeSuballocationsBySize calculated from traversing their list.
7650  size_t freeSuballocationsToRegister = 0;
7651  // True if previous visited suballocation was free.
7652  bool prevFree = false;
7653 
7654  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7655  suballocItem != m_Suballocations.cend();
7656  ++suballocItem)
7657  {
7658  const VmaSuballocation& subAlloc = *suballocItem;
7659 
7660  // Actual offset of this suballocation doesn't match expected one.
7661  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
7662 
7663  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
7664  // Two adjacent free suballocations are invalid. They should be merged.
7665  VMA_VALIDATE(!prevFree || !currFree);
7666 
7667  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
7668 
7669  if(currFree)
7670  {
7671  calculatedSumFreeSize += subAlloc.size;
7672  ++calculatedFreeCount;
7673  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7674  {
7675  ++freeSuballocationsToRegister;
7676  }
7677 
7678  // Margin required between allocations - every free space must be at least that large.
7679  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
7680  }
7681  else
7682  {
7683  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
7684  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
7685 
7686  // Margin required between allocations - previous allocation must be free.
7687  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
7688  }
7689 
7690  calculatedOffset += subAlloc.size;
7691  prevFree = currFree;
7692  }
7693 
7694  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
7695  // match expected one.
7696  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
7697 
7698  VkDeviceSize lastSize = 0;
7699  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
7700  {
7701  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
7702 
7703  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
7704  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
7705  // They must be sorted by size ascending.
7706  VMA_VALIDATE(suballocItem->size >= lastSize);
7707 
7708  lastSize = suballocItem->size;
7709  }
7710 
7711  // Check if totals match calculacted values.
7712  VMA_VALIDATE(ValidateFreeSuballocationList());
7713  VMA_VALIDATE(calculatedOffset == GetSize());
7714  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
7715  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
7716 
7717  return true;
7718 }
7719 
7720 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
7721 {
7722  if(!m_FreeSuballocationsBySize.empty())
7723  {
7724  return m_FreeSuballocationsBySize.back()->size;
7725  }
7726  else
7727  {
7728  return 0;
7729  }
7730 }
7731 
7732 bool VmaBlockMetadata_Generic::IsEmpty() const
7733 {
7734  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
7735 }
7736 
7737 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
7738 {
7739  outInfo.blockCount = 1;
7740 
7741  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7742  outInfo.allocationCount = rangeCount - m_FreeCount;
7743  outInfo.unusedRangeCount = m_FreeCount;
7744 
7745  outInfo.unusedBytes = m_SumFreeSize;
7746  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
7747 
7748  outInfo.allocationSizeMin = UINT64_MAX;
7749  outInfo.allocationSizeMax = 0;
7750  outInfo.unusedRangeSizeMin = UINT64_MAX;
7751  outInfo.unusedRangeSizeMax = 0;
7752 
7753  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7754  suballocItem != m_Suballocations.cend();
7755  ++suballocItem)
7756  {
7757  const VmaSuballocation& suballoc = *suballocItem;
7758  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
7759  {
7760  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7761  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
7762  }
7763  else
7764  {
7765  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
7766  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
7767  }
7768  }
7769 }
7770 
7771 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
7772 {
7773  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7774 
7775  inoutStats.size += GetSize();
7776  inoutStats.unusedSize += m_SumFreeSize;
7777  inoutStats.allocationCount += rangeCount - m_FreeCount;
7778  inoutStats.unusedRangeCount += m_FreeCount;
7779  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
7780 }
7781 
7782 #if VMA_STATS_STRING_ENABLED
7783 
7784 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
7785 {
7786  PrintDetailedMap_Begin(json,
7787  m_SumFreeSize, // unusedBytes
7788  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
7789  m_FreeCount); // unusedRangeCount
7790 
7791  size_t i = 0;
7792  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7793  suballocItem != m_Suballocations.cend();
7794  ++suballocItem, ++i)
7795  {
7796  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7797  {
7798  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
7799  }
7800  else
7801  {
7802  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
7803  }
7804  }
7805 
7806  PrintDetailedMap_End(json);
7807 }
7808 
7809 #endif // #if VMA_STATS_STRING_ENABLED
7810 
7811 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
7812  uint32_t currentFrameIndex,
7813  uint32_t frameInUseCount,
7814  VkDeviceSize bufferImageGranularity,
7815  VkDeviceSize allocSize,
7816  VkDeviceSize allocAlignment,
7817  bool upperAddress,
7818  VmaSuballocationType allocType,
7819  bool canMakeOtherLost,
7820  uint32_t strategy,
7821  VmaAllocationRequest* pAllocationRequest)
7822 {
7823  VMA_ASSERT(allocSize > 0);
7824  VMA_ASSERT(!upperAddress);
7825  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
7826  VMA_ASSERT(pAllocationRequest != VMA_NULL);
7827  VMA_HEAVY_ASSERT(Validate());
7828 
7829  // There is not enough total free space in this block to fullfill the request: Early return.
7830  if(canMakeOtherLost == false &&
7831  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
7832  {
7833  return false;
7834  }
7835 
7836  // New algorithm, efficiently searching freeSuballocationsBySize.
7837  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
7838  if(freeSuballocCount > 0)
7839  {
7841  {
7842  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
7843  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7844  m_FreeSuballocationsBySize.data(),
7845  m_FreeSuballocationsBySize.data() + freeSuballocCount,
7846  allocSize + 2 * VMA_DEBUG_MARGIN,
7847  VmaSuballocationItemSizeLess());
7848  size_t index = it - m_FreeSuballocationsBySize.data();
7849  for(; index < freeSuballocCount; ++index)
7850  {
7851  if(CheckAllocation(
7852  currentFrameIndex,
7853  frameInUseCount,
7854  bufferImageGranularity,
7855  allocSize,
7856  allocAlignment,
7857  allocType,
7858  m_FreeSuballocationsBySize[index],
7859  false, // canMakeOtherLost
7860  &pAllocationRequest->offset,
7861  &pAllocationRequest->itemsToMakeLostCount,
7862  &pAllocationRequest->sumFreeSize,
7863  &pAllocationRequest->sumItemSize))
7864  {
7865  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7866  return true;
7867  }
7868  }
7869  }
7870  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
7871  {
7872  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7873  it != m_Suballocations.end();
7874  ++it)
7875  {
7876  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
7877  currentFrameIndex,
7878  frameInUseCount,
7879  bufferImageGranularity,
7880  allocSize,
7881  allocAlignment,
7882  allocType,
7883  it,
7884  false, // canMakeOtherLost
7885  &pAllocationRequest->offset,
7886  &pAllocationRequest->itemsToMakeLostCount,
7887  &pAllocationRequest->sumFreeSize,
7888  &pAllocationRequest->sumItemSize))
7889  {
7890  pAllocationRequest->item = it;
7891  return true;
7892  }
7893  }
7894  }
7895  else // WORST_FIT, FIRST_FIT
7896  {
7897  // Search staring from biggest suballocations.
7898  for(size_t index = freeSuballocCount; index--; )
7899  {
7900  if(CheckAllocation(
7901  currentFrameIndex,
7902  frameInUseCount,
7903  bufferImageGranularity,
7904  allocSize,
7905  allocAlignment,
7906  allocType,
7907  m_FreeSuballocationsBySize[index],
7908  false, // canMakeOtherLost
7909  &pAllocationRequest->offset,
7910  &pAllocationRequest->itemsToMakeLostCount,
7911  &pAllocationRequest->sumFreeSize,
7912  &pAllocationRequest->sumItemSize))
7913  {
7914  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7915  return true;
7916  }
7917  }
7918  }
7919  }
7920 
7921  if(canMakeOtherLost)
7922  {
7923  // Brute-force algorithm. TODO: Come up with something better.
7924 
7925  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
7926  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
7927 
7928  VmaAllocationRequest tmpAllocRequest = {};
7929  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
7930  suballocIt != m_Suballocations.end();
7931  ++suballocIt)
7932  {
7933  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
7934  suballocIt->hAllocation->CanBecomeLost())
7935  {
7936  if(CheckAllocation(
7937  currentFrameIndex,
7938  frameInUseCount,
7939  bufferImageGranularity,
7940  allocSize,
7941  allocAlignment,
7942  allocType,
7943  suballocIt,
7944  canMakeOtherLost,
7945  &tmpAllocRequest.offset,
7946  &tmpAllocRequest.itemsToMakeLostCount,
7947  &tmpAllocRequest.sumFreeSize,
7948  &tmpAllocRequest.sumItemSize))
7949  {
7950  tmpAllocRequest.item = suballocIt;
7951 
7952  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
7954  {
7955  *pAllocationRequest = tmpAllocRequest;
7956  }
7957  }
7958  }
7959  }
7960 
7961  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
7962  {
7963  return true;
7964  }
7965  }
7966 
7967  return false;
7968 }
7969 
7970 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
7971  uint32_t currentFrameIndex,
7972  uint32_t frameInUseCount,
7973  VmaAllocationRequest* pAllocationRequest)
7974 {
7975  while(pAllocationRequest->itemsToMakeLostCount > 0)
7976  {
7977  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
7978  {
7979  ++pAllocationRequest->item;
7980  }
7981  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
7982  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
7983  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
7984  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
7985  {
7986  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
7987  --pAllocationRequest->itemsToMakeLostCount;
7988  }
7989  else
7990  {
7991  return false;
7992  }
7993  }
7994 
7995  VMA_HEAVY_ASSERT(Validate());
7996  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
7997  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
7998 
7999  return true;
8000 }
8001 
8002 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
8003 {
8004  uint32_t lostAllocationCount = 0;
8005  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8006  it != m_Suballocations.end();
8007  ++it)
8008  {
8009  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
8010  it->hAllocation->CanBecomeLost() &&
8011  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8012  {
8013  it = FreeSuballocation(it);
8014  ++lostAllocationCount;
8015  }
8016  }
8017  return lostAllocationCount;
8018 }
8019 
8020 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
8021 {
8022  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8023  it != m_Suballocations.end();
8024  ++it)
8025  {
8026  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
8027  {
8028  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
8029  {
8030  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
8031  return VK_ERROR_VALIDATION_FAILED_EXT;
8032  }
8033  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
8034  {
8035  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
8036  return VK_ERROR_VALIDATION_FAILED_EXT;
8037  }
8038  }
8039  }
8040 
8041  return VK_SUCCESS;
8042 }
8043 
8044 void VmaBlockMetadata_Generic::Alloc(
8045  const VmaAllocationRequest& request,
8046  VmaSuballocationType type,
8047  VkDeviceSize allocSize,
8048  bool upperAddress,
8049  VmaAllocation hAllocation)
8050 {
8051  VMA_ASSERT(!upperAddress);
8052  VMA_ASSERT(request.item != m_Suballocations.end());
8053  VmaSuballocation& suballoc = *request.item;
8054  // Given suballocation is a free block.
8055  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8056  // Given offset is inside this suballocation.
8057  VMA_ASSERT(request.offset >= suballoc.offset);
8058  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
8059  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
8060  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
8061 
8062  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
8063  // it to become used.
8064  UnregisterFreeSuballocation(request.item);
8065 
8066  suballoc.offset = request.offset;
8067  suballoc.size = allocSize;
8068  suballoc.type = type;
8069  suballoc.hAllocation = hAllocation;
8070 
8071  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
8072  if(paddingEnd)
8073  {
8074  VmaSuballocation paddingSuballoc = {};
8075  paddingSuballoc.offset = request.offset + allocSize;
8076  paddingSuballoc.size = paddingEnd;
8077  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8078  VmaSuballocationList::iterator next = request.item;
8079  ++next;
8080  const VmaSuballocationList::iterator paddingEndItem =
8081  m_Suballocations.insert(next, paddingSuballoc);
8082  RegisterFreeSuballocation(paddingEndItem);
8083  }
8084 
8085  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
8086  if(paddingBegin)
8087  {
8088  VmaSuballocation paddingSuballoc = {};
8089  paddingSuballoc.offset = request.offset - paddingBegin;
8090  paddingSuballoc.size = paddingBegin;
8091  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8092  const VmaSuballocationList::iterator paddingBeginItem =
8093  m_Suballocations.insert(request.item, paddingSuballoc);
8094  RegisterFreeSuballocation(paddingBeginItem);
8095  }
8096 
8097  // Update totals.
8098  m_FreeCount = m_FreeCount - 1;
8099  if(paddingBegin > 0)
8100  {
8101  ++m_FreeCount;
8102  }
8103  if(paddingEnd > 0)
8104  {
8105  ++m_FreeCount;
8106  }
8107  m_SumFreeSize -= allocSize;
8108 }
8109 
8110 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
8111 {
8112  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8113  suballocItem != m_Suballocations.end();
8114  ++suballocItem)
8115  {
8116  VmaSuballocation& suballoc = *suballocItem;
8117  if(suballoc.hAllocation == allocation)
8118  {
8119  FreeSuballocation(suballocItem);
8120  VMA_HEAVY_ASSERT(Validate());
8121  return;
8122  }
8123  }
8124  VMA_ASSERT(0 && "Not found!");
8125 }
8126 
8127 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
8128 {
8129  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8130  suballocItem != m_Suballocations.end();
8131  ++suballocItem)
8132  {
8133  VmaSuballocation& suballoc = *suballocItem;
8134  if(suballoc.offset == offset)
8135  {
8136  FreeSuballocation(suballocItem);
8137  return;
8138  }
8139  }
8140  VMA_ASSERT(0 && "Not found!");
8141 }
8142 
8143 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
8144 {
8145  typedef VmaSuballocationList::iterator iter_type;
8146  for(iter_type suballocItem = m_Suballocations.begin();
8147  suballocItem != m_Suballocations.end();
8148  ++suballocItem)
8149  {
8150  VmaSuballocation& suballoc = *suballocItem;
8151  if(suballoc.hAllocation == alloc)
8152  {
8153  iter_type nextItem = suballocItem;
8154  ++nextItem;
8155 
8156  // Should have been ensured on higher level.
8157  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
8158 
8159  // Shrinking.
8160  if(newSize < alloc->GetSize())
8161  {
8162  const VkDeviceSize sizeDiff = suballoc.size - newSize;
8163 
8164  // There is next item.
8165  if(nextItem != m_Suballocations.end())
8166  {
8167  // Next item is free.
8168  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8169  {
8170  // Grow this next item backward.
8171  UnregisterFreeSuballocation(nextItem);
8172  nextItem->offset -= sizeDiff;
8173  nextItem->size += sizeDiff;
8174  RegisterFreeSuballocation(nextItem);
8175  }
8176  // Next item is not free.
8177  else
8178  {
8179  // Create free item after current one.
8180  VmaSuballocation newFreeSuballoc;
8181  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
8182  newFreeSuballoc.offset = suballoc.offset + newSize;
8183  newFreeSuballoc.size = sizeDiff;
8184  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8185  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
8186  RegisterFreeSuballocation(newFreeSuballocIt);
8187 
8188  ++m_FreeCount;
8189  }
8190  }
8191  // This is the last item.
8192  else
8193  {
8194  // Create free item at the end.
8195  VmaSuballocation newFreeSuballoc;
8196  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
8197  newFreeSuballoc.offset = suballoc.offset + newSize;
8198  newFreeSuballoc.size = sizeDiff;
8199  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8200  m_Suballocations.push_back(newFreeSuballoc);
8201 
8202  iter_type newFreeSuballocIt = m_Suballocations.end();
8203  RegisterFreeSuballocation(--newFreeSuballocIt);
8204 
8205  ++m_FreeCount;
8206  }
8207 
8208  suballoc.size = newSize;
8209  m_SumFreeSize += sizeDiff;
8210  }
8211  // Growing.
8212  else
8213  {
8214  const VkDeviceSize sizeDiff = newSize - suballoc.size;
8215 
8216  // There is next item.
8217  if(nextItem != m_Suballocations.end())
8218  {
8219  // Next item is free.
8220  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8221  {
8222  // There is not enough free space, including margin.
8223  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
8224  {
8225  return false;
8226  }
8227 
8228  // There is more free space than required.
8229  if(nextItem->size > sizeDiff)
8230  {
8231  // Move and shrink this next item.
8232  UnregisterFreeSuballocation(nextItem);
8233  nextItem->offset += sizeDiff;
8234  nextItem->size -= sizeDiff;
8235  RegisterFreeSuballocation(nextItem);
8236  }
8237  // There is exactly the amount of free space required.
8238  else
8239  {
8240  // Remove this next free item.
8241  UnregisterFreeSuballocation(nextItem);
8242  m_Suballocations.erase(nextItem);
8243  --m_FreeCount;
8244  }
8245  }
8246  // Next item is not free - there is no space to grow.
8247  else
8248  {
8249  return false;
8250  }
8251  }
8252  // This is the last item - there is no space to grow.
8253  else
8254  {
8255  return false;
8256  }
8257 
8258  suballoc.size = newSize;
8259  m_SumFreeSize -= sizeDiff;
8260  }
8261 
8262  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
8263  return true;
8264  }
8265  }
8266  VMA_ASSERT(0 && "Not found!");
8267  return false;
8268 }
8269 
8270 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
8271 {
8272  VkDeviceSize lastSize = 0;
8273  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
8274  {
8275  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
8276 
8277  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
8278  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
8279  VMA_VALIDATE(it->size >= lastSize);
8280  lastSize = it->size;
8281  }
8282  return true;
8283 }
8284 
8285 bool VmaBlockMetadata_Generic::CheckAllocation(
8286  uint32_t currentFrameIndex,
8287  uint32_t frameInUseCount,
8288  VkDeviceSize bufferImageGranularity,
8289  VkDeviceSize allocSize,
8290  VkDeviceSize allocAlignment,
8291  VmaSuballocationType allocType,
8292  VmaSuballocationList::const_iterator suballocItem,
8293  bool canMakeOtherLost,
8294  VkDeviceSize* pOffset,
8295  size_t* itemsToMakeLostCount,
8296  VkDeviceSize* pSumFreeSize,
8297  VkDeviceSize* pSumItemSize) const
8298 {
8299  VMA_ASSERT(allocSize > 0);
8300  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8301  VMA_ASSERT(suballocItem != m_Suballocations.cend());
8302  VMA_ASSERT(pOffset != VMA_NULL);
8303 
8304  *itemsToMakeLostCount = 0;
8305  *pSumFreeSize = 0;
8306  *pSumItemSize = 0;
8307 
8308  if(canMakeOtherLost)
8309  {
8310  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8311  {
8312  *pSumFreeSize = suballocItem->size;
8313  }
8314  else
8315  {
8316  if(suballocItem->hAllocation->CanBecomeLost() &&
8317  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8318  {
8319  ++*itemsToMakeLostCount;
8320  *pSumItemSize = suballocItem->size;
8321  }
8322  else
8323  {
8324  return false;
8325  }
8326  }
8327 
8328  // Remaining size is too small for this request: Early return.
8329  if(GetSize() - suballocItem->offset < allocSize)
8330  {
8331  return false;
8332  }
8333 
8334  // Start from offset equal to beginning of this suballocation.
8335  *pOffset = suballocItem->offset;
8336 
8337  // Apply VMA_DEBUG_MARGIN at the beginning.
8338  if(VMA_DEBUG_MARGIN > 0)
8339  {
8340  *pOffset += VMA_DEBUG_MARGIN;
8341  }
8342 
8343  // Apply alignment.
8344  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8345 
8346  // Check previous suballocations for BufferImageGranularity conflicts.
8347  // Make bigger alignment if necessary.
8348  if(bufferImageGranularity > 1)
8349  {
8350  bool bufferImageGranularityConflict = false;
8351  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8352  while(prevSuballocItem != m_Suballocations.cbegin())
8353  {
8354  --prevSuballocItem;
8355  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8356  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8357  {
8358  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8359  {
8360  bufferImageGranularityConflict = true;
8361  break;
8362  }
8363  }
8364  else
8365  // Already on previous page.
8366  break;
8367  }
8368  if(bufferImageGranularityConflict)
8369  {
8370  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8371  }
8372  }
8373 
8374  // Now that we have final *pOffset, check if we are past suballocItem.
8375  // If yes, return false - this function should be called for another suballocItem as starting point.
8376  if(*pOffset >= suballocItem->offset + suballocItem->size)
8377  {
8378  return false;
8379  }
8380 
8381  // Calculate padding at the beginning based on current offset.
8382  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
8383 
8384  // Calculate required margin at the end.
8385  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8386 
8387  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
8388  // Another early return check.
8389  if(suballocItem->offset + totalSize > GetSize())
8390  {
8391  return false;
8392  }
8393 
8394  // Advance lastSuballocItem until desired size is reached.
8395  // Update itemsToMakeLostCount.
8396  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
8397  if(totalSize > suballocItem->size)
8398  {
8399  VkDeviceSize remainingSize = totalSize - suballocItem->size;
8400  while(remainingSize > 0)
8401  {
8402  ++lastSuballocItem;
8403  if(lastSuballocItem == m_Suballocations.cend())
8404  {
8405  return false;
8406  }
8407  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8408  {
8409  *pSumFreeSize += lastSuballocItem->size;
8410  }
8411  else
8412  {
8413  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
8414  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
8415  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8416  {
8417  ++*itemsToMakeLostCount;
8418  *pSumItemSize += lastSuballocItem->size;
8419  }
8420  else
8421  {
8422  return false;
8423  }
8424  }
8425  remainingSize = (lastSuballocItem->size < remainingSize) ?
8426  remainingSize - lastSuballocItem->size : 0;
8427  }
8428  }
8429 
8430  // Check next suballocations for BufferImageGranularity conflicts.
8431  // If conflict exists, we must mark more allocations lost or fail.
8432  if(bufferImageGranularity > 1)
8433  {
8434  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
8435  ++nextSuballocItem;
8436  while(nextSuballocItem != m_Suballocations.cend())
8437  {
8438  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8439  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8440  {
8441  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8442  {
8443  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
8444  if(nextSuballoc.hAllocation->CanBecomeLost() &&
8445  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8446  {
8447  ++*itemsToMakeLostCount;
8448  }
8449  else
8450  {
8451  return false;
8452  }
8453  }
8454  }
8455  else
8456  {
8457  // Already on next page.
8458  break;
8459  }
8460  ++nextSuballocItem;
8461  }
8462  }
8463  }
8464  else
8465  {
8466  const VmaSuballocation& suballoc = *suballocItem;
8467  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8468 
8469  *pSumFreeSize = suballoc.size;
8470 
8471  // Size of this suballocation is too small for this request: Early return.
8472  if(suballoc.size < allocSize)
8473  {
8474  return false;
8475  }
8476 
8477  // Start from offset equal to beginning of this suballocation.
8478  *pOffset = suballoc.offset;
8479 
8480  // Apply VMA_DEBUG_MARGIN at the beginning.
8481  if(VMA_DEBUG_MARGIN > 0)
8482  {
8483  *pOffset += VMA_DEBUG_MARGIN;
8484  }
8485 
8486  // Apply alignment.
8487  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8488 
8489  // Check previous suballocations for BufferImageGranularity conflicts.
8490  // Make bigger alignment if necessary.
8491  if(bufferImageGranularity > 1)
8492  {
8493  bool bufferImageGranularityConflict = false;
8494  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8495  while(prevSuballocItem != m_Suballocations.cbegin())
8496  {
8497  --prevSuballocItem;
8498  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8499  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8500  {
8501  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8502  {
8503  bufferImageGranularityConflict = true;
8504  break;
8505  }
8506  }
8507  else
8508  // Already on previous page.
8509  break;
8510  }
8511  if(bufferImageGranularityConflict)
8512  {
8513  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8514  }
8515  }
8516 
8517  // Calculate padding at the beginning based on current offset.
8518  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
8519 
8520  // Calculate required margin at the end.
8521  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8522 
8523  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
8524  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
8525  {
8526  return false;
8527  }
8528 
8529  // Check next suballocations for BufferImageGranularity conflicts.
8530  // If conflict exists, allocation cannot be made here.
8531  if(bufferImageGranularity > 1)
8532  {
8533  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
8534  ++nextSuballocItem;
8535  while(nextSuballocItem != m_Suballocations.cend())
8536  {
8537  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8538  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8539  {
8540  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8541  {
8542  return false;
8543  }
8544  }
8545  else
8546  {
8547  // Already on next page.
8548  break;
8549  }
8550  ++nextSuballocItem;
8551  }
8552  }
8553  }
8554 
8555  // All tests passed: Success. pOffset is already filled.
8556  return true;
8557 }
8558 
8559 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
8560 {
8561  VMA_ASSERT(item != m_Suballocations.end());
8562  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8563 
8564  VmaSuballocationList::iterator nextItem = item;
8565  ++nextItem;
8566  VMA_ASSERT(nextItem != m_Suballocations.end());
8567  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
8568 
8569  item->size += nextItem->size;
8570  --m_FreeCount;
8571  m_Suballocations.erase(nextItem);
8572 }
8573 
8574 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
8575 {
8576  // Change this suballocation to be marked as free.
8577  VmaSuballocation& suballoc = *suballocItem;
8578  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8579  suballoc.hAllocation = VK_NULL_HANDLE;
8580 
8581  // Update totals.
8582  ++m_FreeCount;
8583  m_SumFreeSize += suballoc.size;
8584 
8585  // Merge with previous and/or next suballocation if it's also free.
8586  bool mergeWithNext = false;
8587  bool mergeWithPrev = false;
8588 
8589  VmaSuballocationList::iterator nextItem = suballocItem;
8590  ++nextItem;
8591  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
8592  {
8593  mergeWithNext = true;
8594  }
8595 
8596  VmaSuballocationList::iterator prevItem = suballocItem;
8597  if(suballocItem != m_Suballocations.begin())
8598  {
8599  --prevItem;
8600  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8601  {
8602  mergeWithPrev = true;
8603  }
8604  }
8605 
8606  if(mergeWithNext)
8607  {
8608  UnregisterFreeSuballocation(nextItem);
8609  MergeFreeWithNext(suballocItem);
8610  }
8611 
8612  if(mergeWithPrev)
8613  {
8614  UnregisterFreeSuballocation(prevItem);
8615  MergeFreeWithNext(prevItem);
8616  RegisterFreeSuballocation(prevItem);
8617  return prevItem;
8618  }
8619  else
8620  {
8621  RegisterFreeSuballocation(suballocItem);
8622  return suballocItem;
8623  }
8624 }
8625 
8626 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
8627 {
8628  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8629  VMA_ASSERT(item->size > 0);
8630 
8631  // You may want to enable this validation at the beginning or at the end of
8632  // this function, depending on what do you want to check.
8633  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8634 
8635  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8636  {
8637  if(m_FreeSuballocationsBySize.empty())
8638  {
8639  m_FreeSuballocationsBySize.push_back(item);
8640  }
8641  else
8642  {
8643  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
8644  }
8645  }
8646 
8647  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8648 }
8649 
8650 
8651 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
8652 {
8653  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8654  VMA_ASSERT(item->size > 0);
8655 
8656  // You may want to enable this validation at the beginning or at the end of
8657  // this function, depending on what do you want to check.
8658  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8659 
8660  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8661  {
8662  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
8663  m_FreeSuballocationsBySize.data(),
8664  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
8665  item,
8666  VmaSuballocationItemSizeLess());
8667  for(size_t index = it - m_FreeSuballocationsBySize.data();
8668  index < m_FreeSuballocationsBySize.size();
8669  ++index)
8670  {
8671  if(m_FreeSuballocationsBySize[index] == item)
8672  {
8673  VmaVectorRemove(m_FreeSuballocationsBySize, index);
8674  return;
8675  }
8676  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
8677  }
8678  VMA_ASSERT(0 && "Not found.");
8679  }
8680 
8681  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8682 }
8683 
8684 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
8685  VkDeviceSize bufferImageGranularity,
8686  VmaSuballocationType& inOutPrevSuballocType) const
8687 {
8688  if(bufferImageGranularity == 1 || IsEmpty())
8689  {
8690  return false;
8691  }
8692 
8693  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
8694  bool typeConflictFound = false;
8695  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
8696  it != m_Suballocations.cend();
8697  ++it)
8698  {
8699  const VmaSuballocationType suballocType = it->type;
8700  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
8701  {
8702  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
8703  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
8704  {
8705  typeConflictFound = true;
8706  }
8707  inOutPrevSuballocType = suballocType;
8708  }
8709  }
8710 
8711  return typeConflictFound || minAlignment >= bufferImageGranularity;
8712 }
8713 
8715 // class VmaBlockMetadata_Linear
8716 
8717 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
8718  VmaBlockMetadata(hAllocator),
8719  m_SumFreeSize(0),
8720  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8721  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8722  m_1stVectorIndex(0),
8723  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
8724  m_1stNullItemsBeginCount(0),
8725  m_1stNullItemsMiddleCount(0),
8726  m_2ndNullItemsCount(0)
8727 {
8728 }
8729 
8730 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
8731 {
8732 }
8733 
8734 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
8735 {
8736  VmaBlockMetadata::Init(size);
8737  m_SumFreeSize = size;
8738 }
8739 
8740 bool VmaBlockMetadata_Linear::Validate() const
8741 {
8742  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8743  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8744 
8745  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
8746  VMA_VALIDATE(!suballocations1st.empty() ||
8747  suballocations2nd.empty() ||
8748  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
8749 
8750  if(!suballocations1st.empty())
8751  {
8752  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
8753  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
8754  // Null item at the end should be just pop_back().
8755  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
8756  }
8757  if(!suballocations2nd.empty())
8758  {
8759  // Null item at the end should be just pop_back().
8760  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
8761  }
8762 
8763  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
8764  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
8765 
8766  VkDeviceSize sumUsedSize = 0;
8767  const size_t suballoc1stCount = suballocations1st.size();
8768  VkDeviceSize offset = VMA_DEBUG_MARGIN;
8769 
8770  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8771  {
8772  const size_t suballoc2ndCount = suballocations2nd.size();
8773  size_t nullItem2ndCount = 0;
8774  for(size_t i = 0; i < suballoc2ndCount; ++i)
8775  {
8776  const VmaSuballocation& suballoc = suballocations2nd[i];
8777  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8778 
8779  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8780  VMA_VALIDATE(suballoc.offset >= offset);
8781 
8782  if(!currFree)
8783  {
8784  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8785  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8786  sumUsedSize += suballoc.size;
8787  }
8788  else
8789  {
8790  ++nullItem2ndCount;
8791  }
8792 
8793  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8794  }
8795 
8796  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8797  }
8798 
8799  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
8800  {
8801  const VmaSuballocation& suballoc = suballocations1st[i];
8802  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
8803  suballoc.hAllocation == VK_NULL_HANDLE);
8804  }
8805 
8806  size_t nullItem1stCount = m_1stNullItemsBeginCount;
8807 
8808  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
8809  {
8810  const VmaSuballocation& suballoc = suballocations1st[i];
8811  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8812 
8813  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8814  VMA_VALIDATE(suballoc.offset >= offset);
8815  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
8816 
8817  if(!currFree)
8818  {
8819  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8820  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8821  sumUsedSize += suballoc.size;
8822  }
8823  else
8824  {
8825  ++nullItem1stCount;
8826  }
8827 
8828  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8829  }
8830  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
8831 
8832  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8833  {
8834  const size_t suballoc2ndCount = suballocations2nd.size();
8835  size_t nullItem2ndCount = 0;
8836  for(size_t i = suballoc2ndCount; i--; )
8837  {
8838  const VmaSuballocation& suballoc = suballocations2nd[i];
8839  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8840 
8841  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8842  VMA_VALIDATE(suballoc.offset >= offset);
8843 
8844  if(!currFree)
8845  {
8846  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8847  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8848  sumUsedSize += suballoc.size;
8849  }
8850  else
8851  {
8852  ++nullItem2ndCount;
8853  }
8854 
8855  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8856  }
8857 
8858  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8859  }
8860 
8861  VMA_VALIDATE(offset <= GetSize());
8862  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
8863 
8864  return true;
8865 }
8866 
8867 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
8868 {
8869  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
8870  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
8871 }
8872 
8873 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
8874 {
8875  const VkDeviceSize size = GetSize();
8876 
8877  /*
8878  We don't consider gaps inside allocation vectors with freed allocations because
8879  they are not suitable for reuse in linear allocator. We consider only space that
8880  is available for new allocations.
8881  */
8882  if(IsEmpty())
8883  {
8884  return size;
8885  }
8886 
8887  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8888 
8889  switch(m_2ndVectorMode)
8890  {
8891  case SECOND_VECTOR_EMPTY:
8892  /*
8893  Available space is after end of 1st, as well as before beginning of 1st (which
8894  whould make it a ring buffer).
8895  */
8896  {
8897  const size_t suballocations1stCount = suballocations1st.size();
8898  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
8899  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
8900  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
8901  return VMA_MAX(
8902  firstSuballoc.offset,
8903  size - (lastSuballoc.offset + lastSuballoc.size));
8904  }
8905  break;
8906 
8907  case SECOND_VECTOR_RING_BUFFER:
8908  /*
8909  Available space is only between end of 2nd and beginning of 1st.
8910  */
8911  {
8912  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8913  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
8914  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
8915  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
8916  }
8917  break;
8918 
8919  case SECOND_VECTOR_DOUBLE_STACK:
8920  /*
8921  Available space is only between end of 1st and top of 2nd.
8922  */
8923  {
8924  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8925  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
8926  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
8927  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
8928  }
8929  break;
8930 
8931  default:
8932  VMA_ASSERT(0);
8933  return 0;
8934  }
8935 }
8936 
8937 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
8938 {
8939  const VkDeviceSize size = GetSize();
8940  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8941  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8942  const size_t suballoc1stCount = suballocations1st.size();
8943  const size_t suballoc2ndCount = suballocations2nd.size();
8944 
8945  outInfo.blockCount = 1;
8946  outInfo.allocationCount = (uint32_t)GetAllocationCount();
8947  outInfo.unusedRangeCount = 0;
8948  outInfo.usedBytes = 0;
8949  outInfo.allocationSizeMin = UINT64_MAX;
8950  outInfo.allocationSizeMax = 0;
8951  outInfo.unusedRangeSizeMin = UINT64_MAX;
8952  outInfo.unusedRangeSizeMax = 0;
8953 
8954  VkDeviceSize lastOffset = 0;
8955 
8956  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8957  {
8958  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8959  size_t nextAlloc2ndIndex = 0;
8960  while(lastOffset < freeSpace2ndTo1stEnd)
8961  {
8962  // Find next non-null allocation or move nextAllocIndex to the end.
8963  while(nextAlloc2ndIndex < suballoc2ndCount &&
8964  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8965  {
8966  ++nextAlloc2ndIndex;
8967  }
8968 
8969  // Found non-null allocation.
8970  if(nextAlloc2ndIndex < suballoc2ndCount)
8971  {
8972  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8973 
8974  // 1. Process free space before this allocation.
8975  if(lastOffset < suballoc.offset)
8976  {
8977  // There is free space from lastOffset to suballoc.offset.
8978  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8979  ++outInfo.unusedRangeCount;
8980  outInfo.unusedBytes += unusedRangeSize;
8981  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8982  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8983  }
8984 
8985  // 2. Process this allocation.
8986  // There is allocation with suballoc.offset, suballoc.size.
8987  outInfo.usedBytes += suballoc.size;
8988  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8989  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
8990 
8991  // 3. Prepare for next iteration.
8992  lastOffset = suballoc.offset + suballoc.size;
8993  ++nextAlloc2ndIndex;
8994  }
8995  // We are at the end.
8996  else
8997  {
8998  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8999  if(lastOffset < freeSpace2ndTo1stEnd)
9000  {
9001  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9002  ++outInfo.unusedRangeCount;
9003  outInfo.unusedBytes += unusedRangeSize;
9004  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9005  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9006  }
9007 
9008  // End of loop.
9009  lastOffset = freeSpace2ndTo1stEnd;
9010  }
9011  }
9012  }
9013 
9014  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9015  const VkDeviceSize freeSpace1stTo2ndEnd =
9016  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9017  while(lastOffset < freeSpace1stTo2ndEnd)
9018  {
9019  // Find next non-null allocation or move nextAllocIndex to the end.
9020  while(nextAlloc1stIndex < suballoc1stCount &&
9021  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9022  {
9023  ++nextAlloc1stIndex;
9024  }
9025 
9026  // Found non-null allocation.
9027  if(nextAlloc1stIndex < suballoc1stCount)
9028  {
9029  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9030 
9031  // 1. Process free space before this allocation.
9032  if(lastOffset < suballoc.offset)
9033  {
9034  // There is free space from lastOffset to suballoc.offset.
9035  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9036  ++outInfo.unusedRangeCount;
9037  outInfo.unusedBytes += unusedRangeSize;
9038  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9039  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9040  }
9041 
9042  // 2. Process this allocation.
9043  // There is allocation with suballoc.offset, suballoc.size.
9044  outInfo.usedBytes += suballoc.size;
9045  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9046  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9047 
9048  // 3. Prepare for next iteration.
9049  lastOffset = suballoc.offset + suballoc.size;
9050  ++nextAlloc1stIndex;
9051  }
9052  // We are at the end.
9053  else
9054  {
9055  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9056  if(lastOffset < freeSpace1stTo2ndEnd)
9057  {
9058  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9059  ++outInfo.unusedRangeCount;
9060  outInfo.unusedBytes += unusedRangeSize;
9061  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9062  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9063  }
9064 
9065  // End of loop.
9066  lastOffset = freeSpace1stTo2ndEnd;
9067  }
9068  }
9069 
9070  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9071  {
9072  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9073  while(lastOffset < size)
9074  {
9075  // Find next non-null allocation or move nextAllocIndex to the end.
9076  while(nextAlloc2ndIndex != SIZE_MAX &&
9077  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9078  {
9079  --nextAlloc2ndIndex;
9080  }
9081 
9082  // Found non-null allocation.
9083  if(nextAlloc2ndIndex != SIZE_MAX)
9084  {
9085  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9086 
9087  // 1. Process free space before this allocation.
9088  if(lastOffset < suballoc.offset)
9089  {
9090  // There is free space from lastOffset to suballoc.offset.
9091  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9092  ++outInfo.unusedRangeCount;
9093  outInfo.unusedBytes += unusedRangeSize;
9094  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9095  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9096  }
9097 
9098  // 2. Process this allocation.
9099  // There is allocation with suballoc.offset, suballoc.size.
9100  outInfo.usedBytes += suballoc.size;
9101  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9102  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9103 
9104  // 3. Prepare for next iteration.
9105  lastOffset = suballoc.offset + suballoc.size;
9106  --nextAlloc2ndIndex;
9107  }
9108  // We are at the end.
9109  else
9110  {
9111  // There is free space from lastOffset to size.
9112  if(lastOffset < size)
9113  {
9114  const VkDeviceSize unusedRangeSize = size - lastOffset;
9115  ++outInfo.unusedRangeCount;
9116  outInfo.unusedBytes += unusedRangeSize;
9117  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9118  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9119  }
9120 
9121  // End of loop.
9122  lastOffset = size;
9123  }
9124  }
9125  }
9126 
9127  outInfo.unusedBytes = size - outInfo.usedBytes;
9128 }
9129 
9130 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
9131 {
9132  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9133  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9134  const VkDeviceSize size = GetSize();
9135  const size_t suballoc1stCount = suballocations1st.size();
9136  const size_t suballoc2ndCount = suballocations2nd.size();
9137 
9138  inoutStats.size += size;
9139 
9140  VkDeviceSize lastOffset = 0;
9141 
9142  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9143  {
9144  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9145  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
9146  while(lastOffset < freeSpace2ndTo1stEnd)
9147  {
9148  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9149  while(nextAlloc2ndIndex < suballoc2ndCount &&
9150  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9151  {
9152  ++nextAlloc2ndIndex;
9153  }
9154 
9155  // Found non-null allocation.
9156  if(nextAlloc2ndIndex < suballoc2ndCount)
9157  {
9158  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9159 
9160  // 1. Process free space before this allocation.
9161  if(lastOffset < suballoc.offset)
9162  {
9163  // There is free space from lastOffset to suballoc.offset.
9164  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9165  inoutStats.unusedSize += unusedRangeSize;
9166  ++inoutStats.unusedRangeCount;
9167  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9168  }
9169 
9170  // 2. Process this allocation.
9171  // There is allocation with suballoc.offset, suballoc.size.
9172  ++inoutStats.allocationCount;
9173 
9174  // 3. Prepare for next iteration.
9175  lastOffset = suballoc.offset + suballoc.size;
9176  ++nextAlloc2ndIndex;
9177  }
9178  // We are at the end.
9179  else
9180  {
9181  if(lastOffset < freeSpace2ndTo1stEnd)
9182  {
9183  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9184  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9185  inoutStats.unusedSize += unusedRangeSize;
9186  ++inoutStats.unusedRangeCount;
9187  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9188  }
9189 
9190  // End of loop.
9191  lastOffset = freeSpace2ndTo1stEnd;
9192  }
9193  }
9194  }
9195 
9196  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9197  const VkDeviceSize freeSpace1stTo2ndEnd =
9198  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9199  while(lastOffset < freeSpace1stTo2ndEnd)
9200  {
9201  // Find next non-null allocation or move nextAllocIndex to the end.
9202  while(nextAlloc1stIndex < suballoc1stCount &&
9203  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9204  {
9205  ++nextAlloc1stIndex;
9206  }
9207 
9208  // Found non-null allocation.
9209  if(nextAlloc1stIndex < suballoc1stCount)
9210  {
9211  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9212 
9213  // 1. Process free space before this allocation.
9214  if(lastOffset < suballoc.offset)
9215  {
9216  // There is free space from lastOffset to suballoc.offset.
9217  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9218  inoutStats.unusedSize += unusedRangeSize;
9219  ++inoutStats.unusedRangeCount;
9220  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9221  }
9222 
9223  // 2. Process this allocation.
9224  // There is allocation with suballoc.offset, suballoc.size.
9225  ++inoutStats.allocationCount;
9226 
9227  // 3. Prepare for next iteration.
9228  lastOffset = suballoc.offset + suballoc.size;
9229  ++nextAlloc1stIndex;
9230  }
9231  // We are at the end.
9232  else
9233  {
9234  if(lastOffset < freeSpace1stTo2ndEnd)
9235  {
9236  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9237  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9238  inoutStats.unusedSize += unusedRangeSize;
9239  ++inoutStats.unusedRangeCount;
9240  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9241  }
9242 
9243  // End of loop.
9244  lastOffset = freeSpace1stTo2ndEnd;
9245  }
9246  }
9247 
9248  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9249  {
9250  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9251  while(lastOffset < size)
9252  {
9253  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9254  while(nextAlloc2ndIndex != SIZE_MAX &&
9255  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9256  {
9257  --nextAlloc2ndIndex;
9258  }
9259 
9260  // Found non-null allocation.
9261  if(nextAlloc2ndIndex != SIZE_MAX)
9262  {
9263  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9264 
9265  // 1. Process free space before this allocation.
9266  if(lastOffset < suballoc.offset)
9267  {
9268  // There is free space from lastOffset to suballoc.offset.
9269  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9270  inoutStats.unusedSize += unusedRangeSize;
9271  ++inoutStats.unusedRangeCount;
9272  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9273  }
9274 
9275  // 2. Process this allocation.
9276  // There is allocation with suballoc.offset, suballoc.size.
9277  ++inoutStats.allocationCount;
9278 
9279  // 3. Prepare for next iteration.
9280  lastOffset = suballoc.offset + suballoc.size;
9281  --nextAlloc2ndIndex;
9282  }
9283  // We are at the end.
9284  else
9285  {
9286  if(lastOffset < size)
9287  {
9288  // There is free space from lastOffset to size.
9289  const VkDeviceSize unusedRangeSize = size - lastOffset;
9290  inoutStats.unusedSize += unusedRangeSize;
9291  ++inoutStats.unusedRangeCount;
9292  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9293  }
9294 
9295  // End of loop.
9296  lastOffset = size;
9297  }
9298  }
9299  }
9300 }
9301 
9302 #if VMA_STATS_STRING_ENABLED
9303 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
9304 {
9305  const VkDeviceSize size = GetSize();
9306  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9307  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9308  const size_t suballoc1stCount = suballocations1st.size();
9309  const size_t suballoc2ndCount = suballocations2nd.size();
9310 
9311  // FIRST PASS
9312 
9313  size_t unusedRangeCount = 0;
9314  VkDeviceSize usedBytes = 0;
9315 
9316  VkDeviceSize lastOffset = 0;
9317 
9318  size_t alloc2ndCount = 0;
9319  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9320  {
9321  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9322  size_t nextAlloc2ndIndex = 0;
9323  while(lastOffset < freeSpace2ndTo1stEnd)
9324  {
9325  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9326  while(nextAlloc2ndIndex < suballoc2ndCount &&
9327  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9328  {
9329  ++nextAlloc2ndIndex;
9330  }
9331 
9332  // Found non-null allocation.
9333  if(nextAlloc2ndIndex < suballoc2ndCount)
9334  {
9335  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9336 
9337  // 1. Process free space before this allocation.
9338  if(lastOffset < suballoc.offset)
9339  {
9340  // There is free space from lastOffset to suballoc.offset.
9341  ++unusedRangeCount;
9342  }
9343 
9344  // 2. Process this allocation.
9345  // There is allocation with suballoc.offset, suballoc.size.
9346  ++alloc2ndCount;
9347  usedBytes += suballoc.size;
9348 
9349  // 3. Prepare for next iteration.
9350  lastOffset = suballoc.offset + suballoc.size;
9351  ++nextAlloc2ndIndex;
9352  }
9353  // We are at the end.
9354  else
9355  {
9356  if(lastOffset < freeSpace2ndTo1stEnd)
9357  {
9358  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9359  ++unusedRangeCount;
9360  }
9361 
9362  // End of loop.
9363  lastOffset = freeSpace2ndTo1stEnd;
9364  }
9365  }
9366  }
9367 
9368  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9369  size_t alloc1stCount = 0;
9370  const VkDeviceSize freeSpace1stTo2ndEnd =
9371  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9372  while(lastOffset < freeSpace1stTo2ndEnd)
9373  {
9374  // Find next non-null allocation or move nextAllocIndex to the end.
9375  while(nextAlloc1stIndex < suballoc1stCount &&
9376  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9377  {
9378  ++nextAlloc1stIndex;
9379  }
9380 
9381  // Found non-null allocation.
9382  if(nextAlloc1stIndex < suballoc1stCount)
9383  {
9384  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9385 
9386  // 1. Process free space before this allocation.
9387  if(lastOffset < suballoc.offset)
9388  {
9389  // There is free space from lastOffset to suballoc.offset.
9390  ++unusedRangeCount;
9391  }
9392 
9393  // 2. Process this allocation.
9394  // There is allocation with suballoc.offset, suballoc.size.
9395  ++alloc1stCount;
9396  usedBytes += suballoc.size;
9397 
9398  // 3. Prepare for next iteration.
9399  lastOffset = suballoc.offset + suballoc.size;
9400  ++nextAlloc1stIndex;
9401  }
9402  // We are at the end.
9403  else
9404  {
9405  if(lastOffset < size)
9406  {
9407  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9408  ++unusedRangeCount;
9409  }
9410 
9411  // End of loop.
9412  lastOffset = freeSpace1stTo2ndEnd;
9413  }
9414  }
9415 
9416  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9417  {
9418  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9419  while(lastOffset < size)
9420  {
9421  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9422  while(nextAlloc2ndIndex != SIZE_MAX &&
9423  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9424  {
9425  --nextAlloc2ndIndex;
9426  }
9427 
9428  // Found non-null allocation.
9429  if(nextAlloc2ndIndex != SIZE_MAX)
9430  {
9431  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9432 
9433  // 1. Process free space before this allocation.
9434  if(lastOffset < suballoc.offset)
9435  {
9436  // There is free space from lastOffset to suballoc.offset.
9437  ++unusedRangeCount;
9438  }
9439 
9440  // 2. Process this allocation.
9441  // There is allocation with suballoc.offset, suballoc.size.
9442  ++alloc2ndCount;
9443  usedBytes += suballoc.size;
9444 
9445  // 3. Prepare for next iteration.
9446  lastOffset = suballoc.offset + suballoc.size;
9447  --nextAlloc2ndIndex;
9448  }
9449  // We are at the end.
9450  else
9451  {
9452  if(lastOffset < size)
9453  {
9454  // There is free space from lastOffset to size.
9455  ++unusedRangeCount;
9456  }
9457 
9458  // End of loop.
9459  lastOffset = size;
9460  }
9461  }
9462  }
9463 
9464  const VkDeviceSize unusedBytes = size - usedBytes;
9465  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
9466 
9467  // SECOND PASS
9468  lastOffset = 0;
9469 
9470  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9471  {
9472  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9473  size_t nextAlloc2ndIndex = 0;
9474  while(lastOffset < freeSpace2ndTo1stEnd)
9475  {
9476  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9477  while(nextAlloc2ndIndex < suballoc2ndCount &&
9478  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9479  {
9480  ++nextAlloc2ndIndex;
9481  }
9482 
9483  // Found non-null allocation.
9484  if(nextAlloc2ndIndex < suballoc2ndCount)
9485  {
9486  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9487 
9488  // 1. Process free space before this allocation.
9489  if(lastOffset < suballoc.offset)
9490  {
9491  // There is free space from lastOffset to suballoc.offset.
9492  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9493  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9494  }
9495 
9496  // 2. Process this allocation.
9497  // There is allocation with suballoc.offset, suballoc.size.
9498  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9499 
9500  // 3. Prepare for next iteration.
9501  lastOffset = suballoc.offset + suballoc.size;
9502  ++nextAlloc2ndIndex;
9503  }
9504  // We are at the end.
9505  else
9506  {
9507  if(lastOffset < freeSpace2ndTo1stEnd)
9508  {
9509  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9510  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9511  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9512  }
9513 
9514  // End of loop.
9515  lastOffset = freeSpace2ndTo1stEnd;
9516  }
9517  }
9518  }
9519 
9520  nextAlloc1stIndex = m_1stNullItemsBeginCount;
9521  while(lastOffset < freeSpace1stTo2ndEnd)
9522  {
9523  // Find next non-null allocation or move nextAllocIndex to the end.
9524  while(nextAlloc1stIndex < suballoc1stCount &&
9525  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9526  {
9527  ++nextAlloc1stIndex;
9528  }
9529 
9530  // Found non-null allocation.
9531  if(nextAlloc1stIndex < suballoc1stCount)
9532  {
9533  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9534 
9535  // 1. Process free space before this allocation.
9536  if(lastOffset < suballoc.offset)
9537  {
9538  // There is free space from lastOffset to suballoc.offset.
9539  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9540  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9541  }
9542 
9543  // 2. Process this allocation.
9544  // There is allocation with suballoc.offset, suballoc.size.
9545  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9546 
9547  // 3. Prepare for next iteration.
9548  lastOffset = suballoc.offset + suballoc.size;
9549  ++nextAlloc1stIndex;
9550  }
9551  // We are at the end.
9552  else
9553  {
9554  if(lastOffset < freeSpace1stTo2ndEnd)
9555  {
9556  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9557  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9558  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9559  }
9560 
9561  // End of loop.
9562  lastOffset = freeSpace1stTo2ndEnd;
9563  }
9564  }
9565 
9566  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9567  {
9568  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9569  while(lastOffset < size)
9570  {
9571  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9572  while(nextAlloc2ndIndex != SIZE_MAX &&
9573  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9574  {
9575  --nextAlloc2ndIndex;
9576  }
9577 
9578  // Found non-null allocation.
9579  if(nextAlloc2ndIndex != SIZE_MAX)
9580  {
9581  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9582 
9583  // 1. Process free space before this allocation.
9584  if(lastOffset < suballoc.offset)
9585  {
9586  // There is free space from lastOffset to suballoc.offset.
9587  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9588  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9589  }
9590 
9591  // 2. Process this allocation.
9592  // There is allocation with suballoc.offset, suballoc.size.
9593  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9594 
9595  // 3. Prepare for next iteration.
9596  lastOffset = suballoc.offset + suballoc.size;
9597  --nextAlloc2ndIndex;
9598  }
9599  // We are at the end.
9600  else
9601  {
9602  if(lastOffset < size)
9603  {
9604  // There is free space from lastOffset to size.
9605  const VkDeviceSize unusedRangeSize = size - lastOffset;
9606  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9607  }
9608 
9609  // End of loop.
9610  lastOffset = size;
9611  }
9612  }
9613  }
9614 
9615  PrintDetailedMap_End(json);
9616 }
9617 #endif // #if VMA_STATS_STRING_ENABLED
9618 
9619 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
9620  uint32_t currentFrameIndex,
9621  uint32_t frameInUseCount,
9622  VkDeviceSize bufferImageGranularity,
9623  VkDeviceSize allocSize,
9624  VkDeviceSize allocAlignment,
9625  bool upperAddress,
9626  VmaSuballocationType allocType,
9627  bool canMakeOtherLost,
9628  uint32_t strategy,
9629  VmaAllocationRequest* pAllocationRequest)
9630 {
9631  VMA_ASSERT(allocSize > 0);
9632  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
9633  VMA_ASSERT(pAllocationRequest != VMA_NULL);
9634  VMA_HEAVY_ASSERT(Validate());
9635 
9636  const VkDeviceSize size = GetSize();
9637  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9638  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9639 
9640  if(upperAddress)
9641  {
9642  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9643  {
9644  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
9645  return false;
9646  }
9647 
9648  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
9649  if(allocSize > size)
9650  {
9651  return false;
9652  }
9653  VkDeviceSize resultBaseOffset = size - allocSize;
9654  if(!suballocations2nd.empty())
9655  {
9656  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9657  resultBaseOffset = lastSuballoc.offset - allocSize;
9658  if(allocSize > lastSuballoc.offset)
9659  {
9660  return false;
9661  }
9662  }
9663 
9664  // Start from offset equal to end of free space.
9665  VkDeviceSize resultOffset = resultBaseOffset;
9666 
9667  // Apply VMA_DEBUG_MARGIN at the end.
9668  if(VMA_DEBUG_MARGIN > 0)
9669  {
9670  if(resultOffset < VMA_DEBUG_MARGIN)
9671  {
9672  return false;
9673  }
9674  resultOffset -= VMA_DEBUG_MARGIN;
9675  }
9676 
9677  // Apply alignment.
9678  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
9679 
9680  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
9681  // Make bigger alignment if necessary.
9682  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
9683  {
9684  bool bufferImageGranularityConflict = false;
9685  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9686  {
9687  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9688  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9689  {
9690  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
9691  {
9692  bufferImageGranularityConflict = true;
9693  break;
9694  }
9695  }
9696  else
9697  // Already on previous page.
9698  break;
9699  }
9700  if(bufferImageGranularityConflict)
9701  {
9702  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
9703  }
9704  }
9705 
9706  // There is enough free space.
9707  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
9708  suballocations1st.back().offset + suballocations1st.back().size :
9709  0;
9710  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
9711  {
9712  // Check previous suballocations for BufferImageGranularity conflicts.
9713  // If conflict exists, allocation cannot be made here.
9714  if(bufferImageGranularity > 1)
9715  {
9716  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9717  {
9718  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9719  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9720  {
9721  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
9722  {
9723  return false;
9724  }
9725  }
9726  else
9727  {
9728  // Already on next page.
9729  break;
9730  }
9731  }
9732  }
9733 
9734  // All tests passed: Success.
9735  pAllocationRequest->offset = resultOffset;
9736  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
9737  pAllocationRequest->sumItemSize = 0;
9738  // pAllocationRequest->item unused.
9739  pAllocationRequest->itemsToMakeLostCount = 0;
9740  return true;
9741  }
9742  }
9743  else // !upperAddress
9744  {
9745  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9746  {
9747  // Try to allocate at the end of 1st vector.
9748 
9749  VkDeviceSize resultBaseOffset = 0;
9750  if(!suballocations1st.empty())
9751  {
9752  const VmaSuballocation& lastSuballoc = suballocations1st.back();
9753  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
9754  }
9755 
9756  // Start from offset equal to beginning of free space.
9757  VkDeviceSize resultOffset = resultBaseOffset;
9758 
9759  // Apply VMA_DEBUG_MARGIN at the beginning.
9760  if(VMA_DEBUG_MARGIN > 0)
9761  {
9762  resultOffset += VMA_DEBUG_MARGIN;
9763  }
9764 
9765  // Apply alignment.
9766  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
9767 
9768  // Check previous suballocations for BufferImageGranularity conflicts.
9769  // Make bigger alignment if necessary.
9770  if(bufferImageGranularity > 1 && !suballocations1st.empty())
9771  {
9772  bool bufferImageGranularityConflict = false;
9773  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9774  {
9775  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9776  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9777  {
9778  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9779  {
9780  bufferImageGranularityConflict = true;
9781  break;
9782  }
9783  }
9784  else
9785  // Already on previous page.
9786  break;
9787  }
9788  if(bufferImageGranularityConflict)
9789  {
9790  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
9791  }
9792  }
9793 
9794  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
9795  suballocations2nd.back().offset : size;
9796 
9797  // There is enough free space at the end after alignment.
9798  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
9799  {
9800  // Check next suballocations for BufferImageGranularity conflicts.
9801  // If conflict exists, allocation cannot be made here.
9802  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9803  {
9804  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9805  {
9806  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9807  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9808  {
9809  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9810  {
9811  return false;
9812  }
9813  }
9814  else
9815  {
9816  // Already on previous page.
9817  break;
9818  }
9819  }
9820  }
9821 
9822  // All tests passed: Success.
9823  pAllocationRequest->offset = resultOffset;
9824  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
9825  pAllocationRequest->sumItemSize = 0;
9826  // pAllocationRequest->item unused.
9827  pAllocationRequest->itemsToMakeLostCount = 0;
9828  return true;
9829  }
9830  }
9831 
9832  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
9833  // beginning of 1st vector as the end of free space.
9834  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9835  {
9836  VMA_ASSERT(!suballocations1st.empty());
9837 
9838  VkDeviceSize resultBaseOffset = 0;
9839  if(!suballocations2nd.empty())
9840  {
9841  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9842  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
9843  }
9844 
9845  // Start from offset equal to beginning of free space.
9846  VkDeviceSize resultOffset = resultBaseOffset;
9847 
9848  // Apply VMA_DEBUG_MARGIN at the beginning.
9849  if(VMA_DEBUG_MARGIN > 0)
9850  {
9851  resultOffset += VMA_DEBUG_MARGIN;
9852  }
9853 
9854  // Apply alignment.
9855  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
9856 
9857  // Check previous suballocations for BufferImageGranularity conflicts.
9858  // Make bigger alignment if necessary.
9859  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
9860  {
9861  bool bufferImageGranularityConflict = false;
9862  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
9863  {
9864  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
9865  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9866  {
9867  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9868  {
9869  bufferImageGranularityConflict = true;
9870  break;
9871  }
9872  }
9873  else
9874  // Already on previous page.
9875  break;
9876  }
9877  if(bufferImageGranularityConflict)
9878  {
9879  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
9880  }
9881  }
9882 
9883  pAllocationRequest->itemsToMakeLostCount = 0;
9884  pAllocationRequest->sumItemSize = 0;
9885  size_t index1st = m_1stNullItemsBeginCount;
9886 
9887  if(canMakeOtherLost)
9888  {
9889  while(index1st < suballocations1st.size() &&
9890  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
9891  {
9892  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
9893  const VmaSuballocation& suballoc = suballocations1st[index1st];
9894  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
9895  {
9896  // No problem.
9897  }
9898  else
9899  {
9900  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
9901  if(suballoc.hAllocation->CanBecomeLost() &&
9902  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9903  {
9904  ++pAllocationRequest->itemsToMakeLostCount;
9905  pAllocationRequest->sumItemSize += suballoc.size;
9906  }
9907  else
9908  {
9909  return false;
9910  }
9911  }
9912  ++index1st;
9913  }
9914 
9915  // Check next suballocations for BufferImageGranularity conflicts.
9916  // If conflict exists, we must mark more allocations lost or fail.
9917  if(bufferImageGranularity > 1)
9918  {
9919  while(index1st < suballocations1st.size())
9920  {
9921  const VmaSuballocation& suballoc = suballocations1st[index1st];
9922  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
9923  {
9924  if(suballoc.hAllocation != VK_NULL_HANDLE)
9925  {
9926  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
9927  if(suballoc.hAllocation->CanBecomeLost() &&
9928  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9929  {
9930  ++pAllocationRequest->itemsToMakeLostCount;
9931  pAllocationRequest->sumItemSize += suballoc.size;
9932  }
9933  else
9934  {
9935  return false;
9936  }
9937  }
9938  }
9939  else
9940  {
9941  // Already on next page.
9942  break;
9943  }
9944  ++index1st;
9945  }
9946  }
9947  }
9948 
9949  // There is enough free space at the end after alignment.
9950  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
9951  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
9952  {
9953  // Check next suballocations for BufferImageGranularity conflicts.
9954  // If conflict exists, allocation cannot be made here.
9955  if(bufferImageGranularity > 1)
9956  {
9957  for(size_t nextSuballocIndex = index1st;
9958  nextSuballocIndex < suballocations1st.size();
9959  nextSuballocIndex++)
9960  {
9961  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
9962  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9963  {
9964  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9965  {
9966  return false;
9967  }
9968  }
9969  else
9970  {
9971  // Already on next page.
9972  break;
9973  }
9974  }
9975  }
9976 
9977  // All tests passed: Success.
9978  pAllocationRequest->offset = resultOffset;
9979  pAllocationRequest->sumFreeSize =
9980  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
9981  - resultBaseOffset
9982  - pAllocationRequest->sumItemSize;
9983  // pAllocationRequest->item unused.
9984  return true;
9985  }
9986  }
9987  }
9988 
9989  return false;
9990 }
9991 
9992 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
9993  uint32_t currentFrameIndex,
9994  uint32_t frameInUseCount,
9995  VmaAllocationRequest* pAllocationRequest)
9996 {
9997  if(pAllocationRequest->itemsToMakeLostCount == 0)
9998  {
9999  return true;
10000  }
10001 
10002  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
10003 
10004  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10005  size_t index1st = m_1stNullItemsBeginCount;
10006  size_t madeLostCount = 0;
10007  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
10008  {
10009  VMA_ASSERT(index1st < suballocations1st.size());
10010  VmaSuballocation& suballoc = suballocations1st[index1st];
10011  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10012  {
10013  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10014  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
10015  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10016  {
10017  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10018  suballoc.hAllocation = VK_NULL_HANDLE;
10019  m_SumFreeSize += suballoc.size;
10020  ++m_1stNullItemsMiddleCount;
10021  ++madeLostCount;
10022  }
10023  else
10024  {
10025  return false;
10026  }
10027  }
10028  ++index1st;
10029  }
10030 
10031  CleanupAfterFree();
10032  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
10033 
10034  return true;
10035 }
10036 
10037 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10038 {
10039  uint32_t lostAllocationCount = 0;
10040 
10041  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10042  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10043  {
10044  VmaSuballocation& suballoc = suballocations1st[i];
10045  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10046  suballoc.hAllocation->CanBecomeLost() &&
10047  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10048  {
10049  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10050  suballoc.hAllocation = VK_NULL_HANDLE;
10051  ++m_1stNullItemsMiddleCount;
10052  m_SumFreeSize += suballoc.size;
10053  ++lostAllocationCount;
10054  }
10055  }
10056 
10057  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10058  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10059  {
10060  VmaSuballocation& suballoc = suballocations2nd[i];
10061  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10062  suballoc.hAllocation->CanBecomeLost() &&
10063  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10064  {
10065  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10066  suballoc.hAllocation = VK_NULL_HANDLE;
10067  ++m_2ndNullItemsCount;
10068  ++lostAllocationCount;
10069  }
10070  }
10071 
10072  if(lostAllocationCount)
10073  {
10074  CleanupAfterFree();
10075  }
10076 
10077  return lostAllocationCount;
10078 }
10079 
10080 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
10081 {
10082  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10083  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10084  {
10085  const VmaSuballocation& suballoc = suballocations1st[i];
10086  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10087  {
10088  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10089  {
10090  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10091  return VK_ERROR_VALIDATION_FAILED_EXT;
10092  }
10093  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10094  {
10095  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10096  return VK_ERROR_VALIDATION_FAILED_EXT;
10097  }
10098  }
10099  }
10100 
10101  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10102  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10103  {
10104  const VmaSuballocation& suballoc = suballocations2nd[i];
10105  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10106  {
10107  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10108  {
10109  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10110  return VK_ERROR_VALIDATION_FAILED_EXT;
10111  }
10112  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10113  {
10114  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10115  return VK_ERROR_VALIDATION_FAILED_EXT;
10116  }
10117  }
10118  }
10119 
10120  return VK_SUCCESS;
10121 }
10122 
10123 void VmaBlockMetadata_Linear::Alloc(
10124  const VmaAllocationRequest& request,
10125  VmaSuballocationType type,
10126  VkDeviceSize allocSize,
10127  bool upperAddress,
10128  VmaAllocation hAllocation)
10129 {
10130  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
10131 
10132  if(upperAddress)
10133  {
10134  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
10135  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
10136  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10137  suballocations2nd.push_back(newSuballoc);
10138  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
10139  }
10140  else
10141  {
10142  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10143 
10144  // First allocation.
10145  if(suballocations1st.empty())
10146  {
10147  suballocations1st.push_back(newSuballoc);
10148  }
10149  else
10150  {
10151  // New allocation at the end of 1st vector.
10152  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
10153  {
10154  // Check if it fits before the end of the block.
10155  VMA_ASSERT(request.offset + allocSize <= GetSize());
10156  suballocations1st.push_back(newSuballoc);
10157  }
10158  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
10159  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
10160  {
10161  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10162 
10163  switch(m_2ndVectorMode)
10164  {
10165  case SECOND_VECTOR_EMPTY:
10166  // First allocation from second part ring buffer.
10167  VMA_ASSERT(suballocations2nd.empty());
10168  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
10169  break;
10170  case SECOND_VECTOR_RING_BUFFER:
10171  // 2-part ring buffer is already started.
10172  VMA_ASSERT(!suballocations2nd.empty());
10173  break;
10174  case SECOND_VECTOR_DOUBLE_STACK:
10175  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
10176  break;
10177  default:
10178  VMA_ASSERT(0);
10179  }
10180 
10181  suballocations2nd.push_back(newSuballoc);
10182  }
10183  else
10184  {
10185  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
10186  }
10187  }
10188  }
10189 
10190  m_SumFreeSize -= newSuballoc.size;
10191 }
10192 
10193 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
10194 {
10195  FreeAtOffset(allocation->GetOffset());
10196 }
10197 
10198 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
10199 {
10200  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10201  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10202 
10203  if(!suballocations1st.empty())
10204  {
10205  // First allocation: Mark it as next empty at the beginning.
10206  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
10207  if(firstSuballoc.offset == offset)
10208  {
10209  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10210  firstSuballoc.hAllocation = VK_NULL_HANDLE;
10211  m_SumFreeSize += firstSuballoc.size;
10212  ++m_1stNullItemsBeginCount;
10213  CleanupAfterFree();
10214  return;
10215  }
10216  }
10217 
10218  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
10219  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
10220  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10221  {
10222  VmaSuballocation& lastSuballoc = suballocations2nd.back();
10223  if(lastSuballoc.offset == offset)
10224  {
10225  m_SumFreeSize += lastSuballoc.size;
10226  suballocations2nd.pop_back();
10227  CleanupAfterFree();
10228  return;
10229  }
10230  }
10231  // Last allocation in 1st vector.
10232  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
10233  {
10234  VmaSuballocation& lastSuballoc = suballocations1st.back();
10235  if(lastSuballoc.offset == offset)
10236  {
10237  m_SumFreeSize += lastSuballoc.size;
10238  suballocations1st.pop_back();
10239  CleanupAfterFree();
10240  return;
10241  }
10242  }
10243 
10244  // Item from the middle of 1st vector.
10245  {
10246  VmaSuballocation refSuballoc;
10247  refSuballoc.offset = offset;
10248  // Rest of members stays uninitialized intentionally for better performance.
10249  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
10250  suballocations1st.begin() + m_1stNullItemsBeginCount,
10251  suballocations1st.end(),
10252  refSuballoc);
10253  if(it != suballocations1st.end())
10254  {
10255  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10256  it->hAllocation = VK_NULL_HANDLE;
10257  ++m_1stNullItemsMiddleCount;
10258  m_SumFreeSize += it->size;
10259  CleanupAfterFree();
10260  return;
10261  }
10262  }
10263 
10264  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
10265  {
10266  // Item from the middle of 2nd vector.
10267  VmaSuballocation refSuballoc;
10268  refSuballoc.offset = offset;
10269  // Rest of members stays uninitialized intentionally for better performance.
10270  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
10271  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
10272  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
10273  if(it != suballocations2nd.end())
10274  {
10275  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10276  it->hAllocation = VK_NULL_HANDLE;
10277  ++m_2ndNullItemsCount;
10278  m_SumFreeSize += it->size;
10279  CleanupAfterFree();
10280  return;
10281  }
10282  }
10283 
10284  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
10285 }
10286 
10287 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
10288 {
10289  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10290  const size_t suballocCount = AccessSuballocations1st().size();
10291  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
10292 }
10293 
10294 void VmaBlockMetadata_Linear::CleanupAfterFree()
10295 {
10296  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10297  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10298 
10299  if(IsEmpty())
10300  {
10301  suballocations1st.clear();
10302  suballocations2nd.clear();
10303  m_1stNullItemsBeginCount = 0;
10304  m_1stNullItemsMiddleCount = 0;
10305  m_2ndNullItemsCount = 0;
10306  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10307  }
10308  else
10309  {
10310  const size_t suballoc1stCount = suballocations1st.size();
10311  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10312  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
10313 
10314  // Find more null items at the beginning of 1st vector.
10315  while(m_1stNullItemsBeginCount < suballoc1stCount &&
10316  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10317  {
10318  ++m_1stNullItemsBeginCount;
10319  --m_1stNullItemsMiddleCount;
10320  }
10321 
10322  // Find more null items at the end of 1st vector.
10323  while(m_1stNullItemsMiddleCount > 0 &&
10324  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
10325  {
10326  --m_1stNullItemsMiddleCount;
10327  suballocations1st.pop_back();
10328  }
10329 
10330  // Find more null items at the end of 2nd vector.
10331  while(m_2ndNullItemsCount > 0 &&
10332  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
10333  {
10334  --m_2ndNullItemsCount;
10335  suballocations2nd.pop_back();
10336  }
10337 
10338  if(ShouldCompact1st())
10339  {
10340  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
10341  size_t srcIndex = m_1stNullItemsBeginCount;
10342  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
10343  {
10344  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
10345  {
10346  ++srcIndex;
10347  }
10348  if(dstIndex != srcIndex)
10349  {
10350  suballocations1st[dstIndex] = suballocations1st[srcIndex];
10351  }
10352  ++srcIndex;
10353  }
10354  suballocations1st.resize(nonNullItemCount);
10355  m_1stNullItemsBeginCount = 0;
10356  m_1stNullItemsMiddleCount = 0;
10357  }
10358 
10359  // 2nd vector became empty.
10360  if(suballocations2nd.empty())
10361  {
10362  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10363  }
10364 
10365  // 1st vector became empty.
10366  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
10367  {
10368  suballocations1st.clear();
10369  m_1stNullItemsBeginCount = 0;
10370 
10371  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10372  {
10373  // Swap 1st with 2nd. Now 2nd is empty.
10374  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10375  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
10376  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
10377  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10378  {
10379  ++m_1stNullItemsBeginCount;
10380  --m_1stNullItemsMiddleCount;
10381  }
10382  m_2ndNullItemsCount = 0;
10383  m_1stVectorIndex ^= 1;
10384  }
10385  }
10386  }
10387 
10388  VMA_HEAVY_ASSERT(Validate());
10389 }
10390 
10391 
10393 // class VmaBlockMetadata_Buddy
10394 
10395 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
10396  VmaBlockMetadata(hAllocator),
10397  m_Root(VMA_NULL),
10398  m_AllocationCount(0),
10399  m_FreeCount(1),
10400  m_SumFreeSize(0)
10401 {
10402  memset(m_FreeList, 0, sizeof(m_FreeList));
10403 }
10404 
10405 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
10406 {
10407  DeleteNode(m_Root);
10408 }
10409 
10410 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
10411 {
10412  VmaBlockMetadata::Init(size);
10413 
10414  m_UsableSize = VmaPrevPow2(size);
10415  m_SumFreeSize = m_UsableSize;
10416 
10417  // Calculate m_LevelCount.
10418  m_LevelCount = 1;
10419  while(m_LevelCount < MAX_LEVELS &&
10420  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
10421  {
10422  ++m_LevelCount;
10423  }
10424 
10425  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
10426  rootNode->offset = 0;
10427  rootNode->type = Node::TYPE_FREE;
10428  rootNode->parent = VMA_NULL;
10429  rootNode->buddy = VMA_NULL;
10430 
10431  m_Root = rootNode;
10432  AddToFreeListFront(0, rootNode);
10433 }
10434 
10435 bool VmaBlockMetadata_Buddy::Validate() const
10436 {
10437  // Validate tree.
10438  ValidationContext ctx;
10439  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
10440  {
10441  VMA_VALIDATE(false && "ValidateNode failed.");
10442  }
10443  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
10444  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
10445 
10446  // Validate free node lists.
10447  for(uint32_t level = 0; level < m_LevelCount; ++level)
10448  {
10449  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
10450  m_FreeList[level].front->free.prev == VMA_NULL);
10451 
10452  for(Node* node = m_FreeList[level].front;
10453  node != VMA_NULL;
10454  node = node->free.next)
10455  {
10456  VMA_VALIDATE(node->type == Node::TYPE_FREE);
10457 
10458  if(node->free.next == VMA_NULL)
10459  {
10460  VMA_VALIDATE(m_FreeList[level].back == node);
10461  }
10462  else
10463  {
10464  VMA_VALIDATE(node->free.next->free.prev == node);
10465  }
10466  }
10467  }
10468 
10469  // Validate that free lists ar higher levels are empty.
10470  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
10471  {
10472  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
10473  }
10474 
10475  return true;
10476 }
10477 
10478 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
10479 {
10480  for(uint32_t level = 0; level < m_LevelCount; ++level)
10481  {
10482  if(m_FreeList[level].front != VMA_NULL)
10483  {
10484  return LevelToNodeSize(level);
10485  }
10486  }
10487  return 0;
10488 }
10489 
10490 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
10491 {
10492  const VkDeviceSize unusableSize = GetUnusableSize();
10493 
10494  outInfo.blockCount = 1;
10495 
10496  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
10497  outInfo.usedBytes = outInfo.unusedBytes = 0;
10498 
10499  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
10500  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
10501  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
10502 
10503  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
10504 
10505  if(unusableSize > 0)
10506  {
10507  ++outInfo.unusedRangeCount;
10508  outInfo.unusedBytes += unusableSize;
10509  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
10510  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
10511  }
10512 }
10513 
10514 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
10515 {
10516  const VkDeviceSize unusableSize = GetUnusableSize();
10517 
10518  inoutStats.size += GetSize();
10519  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
10520  inoutStats.allocationCount += m_AllocationCount;
10521  inoutStats.unusedRangeCount += m_FreeCount;
10522  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
10523 
10524  if(unusableSize > 0)
10525  {
10526  ++inoutStats.unusedRangeCount;
10527  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
10528  }
10529 }
10530 
10531 #if VMA_STATS_STRING_ENABLED
10532 
10533 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
10534 {
10535  // TODO optimize
10536  VmaStatInfo stat;
10537  CalcAllocationStatInfo(stat);
10538 
10539  PrintDetailedMap_Begin(
10540  json,
10541  stat.unusedBytes,
10542  stat.allocationCount,
10543  stat.unusedRangeCount);
10544 
10545  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
10546 
10547  const VkDeviceSize unusableSize = GetUnusableSize();
10548  if(unusableSize > 0)
10549  {
10550  PrintDetailedMap_UnusedRange(json,
10551  m_UsableSize, // offset
10552  unusableSize); // size
10553  }
10554 
10555  PrintDetailedMap_End(json);
10556 }
10557 
10558 #endif // #if VMA_STATS_STRING_ENABLED
10559 
10560 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
10561  uint32_t currentFrameIndex,
10562  uint32_t frameInUseCount,
10563  VkDeviceSize bufferImageGranularity,
10564  VkDeviceSize allocSize,
10565  VkDeviceSize allocAlignment,
10566  bool upperAddress,
10567  VmaSuballocationType allocType,
10568  bool canMakeOtherLost,
10569  uint32_t strategy,
10570  VmaAllocationRequest* pAllocationRequest)
10571 {
10572  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
10573 
10574  // Simple way to respect bufferImageGranularity. May be optimized some day.
10575  // Whenever it might be an OPTIMAL image...
10576  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
10577  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
10578  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
10579  {
10580  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
10581  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
10582  }
10583 
10584  if(allocSize > m_UsableSize)
10585  {
10586  return false;
10587  }
10588 
10589  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10590  for(uint32_t level = targetLevel + 1; level--; )
10591  {
10592  for(Node* freeNode = m_FreeList[level].front;
10593  freeNode != VMA_NULL;
10594  freeNode = freeNode->free.next)
10595  {
10596  if(freeNode->offset % allocAlignment == 0)
10597  {
10598  pAllocationRequest->offset = freeNode->offset;
10599  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
10600  pAllocationRequest->sumItemSize = 0;
10601  pAllocationRequest->itemsToMakeLostCount = 0;
10602  pAllocationRequest->customData = (void*)(uintptr_t)level;
10603  return true;
10604  }
10605  }
10606  }
10607 
10608  return false;
10609 }
10610 
10611 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
10612  uint32_t currentFrameIndex,
10613  uint32_t frameInUseCount,
10614  VmaAllocationRequest* pAllocationRequest)
10615 {
10616  /*
10617  Lost allocations are not supported in buddy allocator at the moment.
10618  Support might be added in the future.
10619  */
10620  return pAllocationRequest->itemsToMakeLostCount == 0;
10621 }
10622 
10623 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10624 {
10625  /*
10626  Lost allocations are not supported in buddy allocator at the moment.
10627  Support might be added in the future.
10628  */
10629  return 0;
10630 }
10631 
10632 void VmaBlockMetadata_Buddy::Alloc(
10633  const VmaAllocationRequest& request,
10634  VmaSuballocationType type,
10635  VkDeviceSize allocSize,
10636  bool upperAddress,
10637  VmaAllocation hAllocation)
10638 {
10639  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10640  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
10641 
10642  Node* currNode = m_FreeList[currLevel].front;
10643  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10644  while(currNode->offset != request.offset)
10645  {
10646  currNode = currNode->free.next;
10647  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10648  }
10649 
10650  // Go down, splitting free nodes.
10651  while(currLevel < targetLevel)
10652  {
10653  // currNode is already first free node at currLevel.
10654  // Remove it from list of free nodes at this currLevel.
10655  RemoveFromFreeList(currLevel, currNode);
10656 
10657  const uint32_t childrenLevel = currLevel + 1;
10658 
10659  // Create two free sub-nodes.
10660  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
10661  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
10662 
10663  leftChild->offset = currNode->offset;
10664  leftChild->type = Node::TYPE_FREE;
10665  leftChild->parent = currNode;
10666  leftChild->buddy = rightChild;
10667 
10668  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
10669  rightChild->type = Node::TYPE_FREE;
10670  rightChild->parent = currNode;
10671  rightChild->buddy = leftChild;
10672 
10673  // Convert current currNode to split type.
10674  currNode->type = Node::TYPE_SPLIT;
10675  currNode->split.leftChild = leftChild;
10676 
10677  // Add child nodes to free list. Order is important!
10678  AddToFreeListFront(childrenLevel, rightChild);
10679  AddToFreeListFront(childrenLevel, leftChild);
10680 
10681  ++m_FreeCount;
10682  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
10683  ++currLevel;
10684  currNode = m_FreeList[currLevel].front;
10685 
10686  /*
10687  We can be sure that currNode, as left child of node previously split,
10688  also fullfills the alignment requirement.
10689  */
10690  }
10691 
10692  // Remove from free list.
10693  VMA_ASSERT(currLevel == targetLevel &&
10694  currNode != VMA_NULL &&
10695  currNode->type == Node::TYPE_FREE);
10696  RemoveFromFreeList(currLevel, currNode);
10697 
10698  // Convert to allocation node.
10699  currNode->type = Node::TYPE_ALLOCATION;
10700  currNode->allocation.alloc = hAllocation;
10701 
10702  ++m_AllocationCount;
10703  --m_FreeCount;
10704  m_SumFreeSize -= allocSize;
10705 }
10706 
10707 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
10708 {
10709  if(node->type == Node::TYPE_SPLIT)
10710  {
10711  DeleteNode(node->split.leftChild->buddy);
10712  DeleteNode(node->split.leftChild);
10713  }
10714 
10715  vma_delete(GetAllocationCallbacks(), node);
10716 }
10717 
10718 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
10719 {
10720  VMA_VALIDATE(level < m_LevelCount);
10721  VMA_VALIDATE(curr->parent == parent);
10722  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
10723  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
10724  switch(curr->type)
10725  {
10726  case Node::TYPE_FREE:
10727  // curr->free.prev, next are validated separately.
10728  ctx.calculatedSumFreeSize += levelNodeSize;
10729  ++ctx.calculatedFreeCount;
10730  break;
10731  case Node::TYPE_ALLOCATION:
10732  ++ctx.calculatedAllocationCount;
10733  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
10734  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
10735  break;
10736  case Node::TYPE_SPLIT:
10737  {
10738  const uint32_t childrenLevel = level + 1;
10739  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
10740  const Node* const leftChild = curr->split.leftChild;
10741  VMA_VALIDATE(leftChild != VMA_NULL);
10742  VMA_VALIDATE(leftChild->offset == curr->offset);
10743  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
10744  {
10745  VMA_VALIDATE(false && "ValidateNode for left child failed.");
10746  }
10747  const Node* const rightChild = leftChild->buddy;
10748  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
10749  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
10750  {
10751  VMA_VALIDATE(false && "ValidateNode for right child failed.");
10752  }
10753  }
10754  break;
10755  default:
10756  return false;
10757  }
10758 
10759  return true;
10760 }
10761 
10762 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
10763 {
10764  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
10765  uint32_t level = 0;
10766  VkDeviceSize currLevelNodeSize = m_UsableSize;
10767  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
10768  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
10769  {
10770  ++level;
10771  currLevelNodeSize = nextLevelNodeSize;
10772  nextLevelNodeSize = currLevelNodeSize >> 1;
10773  }
10774  return level;
10775 }
10776 
10777 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
10778 {
10779  // Find node and level.
10780  Node* node = m_Root;
10781  VkDeviceSize nodeOffset = 0;
10782  uint32_t level = 0;
10783  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
10784  while(node->type == Node::TYPE_SPLIT)
10785  {
10786  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
10787  if(offset < nodeOffset + nextLevelSize)
10788  {
10789  node = node->split.leftChild;
10790  }
10791  else
10792  {
10793  node = node->split.leftChild->buddy;
10794  nodeOffset += nextLevelSize;
10795  }
10796  ++level;
10797  levelNodeSize = nextLevelSize;
10798  }
10799 
10800  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
10801  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
10802 
10803  ++m_FreeCount;
10804  --m_AllocationCount;
10805  m_SumFreeSize += alloc->GetSize();
10806 
10807  node->type = Node::TYPE_FREE;
10808 
10809  // Join free nodes if possible.
10810  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
10811  {
10812  RemoveFromFreeList(level, node->buddy);
10813  Node* const parent = node->parent;
10814 
10815  vma_delete(GetAllocationCallbacks(), node->buddy);
10816  vma_delete(GetAllocationCallbacks(), node);
10817  parent->type = Node::TYPE_FREE;
10818 
10819  node = parent;
10820  --level;
10821  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
10822  --m_FreeCount;
10823  }
10824 
10825  AddToFreeListFront(level, node);
10826 }
10827 
10828 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
10829 {
10830  switch(node->type)
10831  {
10832  case Node::TYPE_FREE:
10833  ++outInfo.unusedRangeCount;
10834  outInfo.unusedBytes += levelNodeSize;
10835  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
10836  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
10837  break;
10838  case Node::TYPE_ALLOCATION:
10839  {
10840  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
10841  ++outInfo.allocationCount;
10842  outInfo.usedBytes += allocSize;
10843  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
10844  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
10845 
10846  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
10847  if(unusedRangeSize > 0)
10848  {
10849  ++outInfo.unusedRangeCount;
10850  outInfo.unusedBytes += unusedRangeSize;
10851  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
10852  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
10853  }
10854  }
10855  break;
10856  case Node::TYPE_SPLIT:
10857  {
10858  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
10859  const Node* const leftChild = node->split.leftChild;
10860  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
10861  const Node* const rightChild = leftChild->buddy;
10862  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
10863  }
10864  break;
10865  default:
10866  VMA_ASSERT(0);
10867  }
10868 }
10869 
10870 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
10871 {
10872  VMA_ASSERT(node->type == Node::TYPE_FREE);
10873 
10874  // List is empty.
10875  Node* const frontNode = m_FreeList[level].front;
10876  if(frontNode == VMA_NULL)
10877  {
10878  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
10879  node->free.prev = node->free.next = VMA_NULL;
10880  m_FreeList[level].front = m_FreeList[level].back = node;
10881  }
10882  else
10883  {
10884  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
10885  node->free.prev = VMA_NULL;
10886  node->free.next = frontNode;
10887  frontNode->free.prev = node;
10888  m_FreeList[level].front = node;
10889  }
10890 }
10891 
10892 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
10893 {
10894  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
10895 
10896  // It is at the front.
10897  if(node->free.prev == VMA_NULL)
10898  {
10899  VMA_ASSERT(m_FreeList[level].front == node);
10900  m_FreeList[level].front = node->free.next;
10901  }
10902  else
10903  {
10904  Node* const prevFreeNode = node->free.prev;
10905  VMA_ASSERT(prevFreeNode->free.next == node);
10906  prevFreeNode->free.next = node->free.next;
10907  }
10908 
10909  // It is at the back.
10910  if(node->free.next == VMA_NULL)
10911  {
10912  VMA_ASSERT(m_FreeList[level].back == node);
10913  m_FreeList[level].back = node->free.prev;
10914  }
10915  else
10916  {
10917  Node* const nextFreeNode = node->free.next;
10918  VMA_ASSERT(nextFreeNode->free.prev == node);
10919  nextFreeNode->free.prev = node->free.prev;
10920  }
10921 }
10922 
10923 #if VMA_STATS_STRING_ENABLED
10924 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
10925 {
10926  switch(node->type)
10927  {
10928  case Node::TYPE_FREE:
10929  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
10930  break;
10931  case Node::TYPE_ALLOCATION:
10932  {
10933  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
10934  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
10935  if(allocSize < levelNodeSize)
10936  {
10937  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
10938  }
10939  }
10940  break;
10941  case Node::TYPE_SPLIT:
10942  {
10943  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
10944  const Node* const leftChild = node->split.leftChild;
10945  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
10946  const Node* const rightChild = leftChild->buddy;
10947  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
10948  }
10949  break;
10950  default:
10951  VMA_ASSERT(0);
10952  }
10953 }
10954 #endif // #if VMA_STATS_STRING_ENABLED
10955 
10956 
10958 // class VmaDeviceMemoryBlock
10959 
10960 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
10961  m_pMetadata(VMA_NULL),
10962  m_MemoryTypeIndex(UINT32_MAX),
10963  m_Id(0),
10964  m_hMemory(VK_NULL_HANDLE),
10965  m_MapCount(0),
10966  m_pMappedData(VMA_NULL)
10967 {
10968 }
10969 
10970 void VmaDeviceMemoryBlock::Init(
10971  VmaAllocator hAllocator,
10972  uint32_t newMemoryTypeIndex,
10973  VkDeviceMemory newMemory,
10974  VkDeviceSize newSize,
10975  uint32_t id,
10976  uint32_t algorithm)
10977 {
10978  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
10979 
10980  m_MemoryTypeIndex = newMemoryTypeIndex;
10981  m_Id = id;
10982  m_hMemory = newMemory;
10983 
10984  switch(algorithm)
10985  {
10987  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
10988  break;
10990  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
10991  break;
10992  default:
10993  VMA_ASSERT(0);
10994  // Fall-through.
10995  case 0:
10996  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
10997  }
10998  m_pMetadata->Init(newSize);
10999 }
11000 
11001 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
11002 {
11003  // This is the most important assert in the entire library.
11004  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
11005  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
11006 
11007  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
11008  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
11009  m_hMemory = VK_NULL_HANDLE;
11010 
11011  vma_delete(allocator, m_pMetadata);
11012  m_pMetadata = VMA_NULL;
11013 }
11014 
11015 bool VmaDeviceMemoryBlock::Validate() const
11016 {
11017  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
11018  (m_pMetadata->GetSize() != 0));
11019 
11020  return m_pMetadata->Validate();
11021 }
11022 
11023 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
11024 {
11025  void* pData = nullptr;
11026  VkResult res = Map(hAllocator, 1, &pData);
11027  if(res != VK_SUCCESS)
11028  {
11029  return res;
11030  }
11031 
11032  res = m_pMetadata->CheckCorruption(pData);
11033 
11034  Unmap(hAllocator, 1);
11035 
11036  return res;
11037 }
11038 
11039 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
11040 {
11041  if(count == 0)
11042  {
11043  return VK_SUCCESS;
11044  }
11045 
11046  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11047  if(m_MapCount != 0)
11048  {
11049  m_MapCount += count;
11050  VMA_ASSERT(m_pMappedData != VMA_NULL);
11051  if(ppData != VMA_NULL)
11052  {
11053  *ppData = m_pMappedData;
11054  }
11055  return VK_SUCCESS;
11056  }
11057  else
11058  {
11059  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
11060  hAllocator->m_hDevice,
11061  m_hMemory,
11062  0, // offset
11063  VK_WHOLE_SIZE,
11064  0, // flags
11065  &m_pMappedData);
11066  if(result == VK_SUCCESS)
11067  {
11068  if(ppData != VMA_NULL)
11069  {
11070  *ppData = m_pMappedData;
11071  }
11072  m_MapCount = count;
11073  }
11074  return result;
11075  }
11076 }
11077 
11078 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
11079 {
11080  if(count == 0)
11081  {
11082  return;
11083  }
11084 
11085  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11086  if(m_MapCount >= count)
11087  {
11088  m_MapCount -= count;
11089  if(m_MapCount == 0)
11090  {
11091  m_pMappedData = VMA_NULL;
11092  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
11093  }
11094  }
11095  else
11096  {
11097  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
11098  }
11099 }
11100 
11101 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11102 {
11103  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11104  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11105 
11106  void* pData;
11107  VkResult res = Map(hAllocator, 1, &pData);
11108  if(res != VK_SUCCESS)
11109  {
11110  return res;
11111  }
11112 
11113  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
11114  VmaWriteMagicValue(pData, allocOffset + allocSize);
11115 
11116  Unmap(hAllocator, 1);
11117 
11118  return VK_SUCCESS;
11119 }
11120 
11121 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11122 {
11123  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11124  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11125 
11126  void* pData;
11127  VkResult res = Map(hAllocator, 1, &pData);
11128  if(res != VK_SUCCESS)
11129  {
11130  return res;
11131  }
11132 
11133  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
11134  {
11135  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
11136  }
11137  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
11138  {
11139  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
11140  }
11141 
11142  Unmap(hAllocator, 1);
11143 
11144  return VK_SUCCESS;
11145 }
11146 
11147 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
11148  const VmaAllocator hAllocator,
11149  const VmaAllocation hAllocation,
11150  VkBuffer hBuffer)
11151 {
11152  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11153  hAllocation->GetBlock() == this);
11154  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11155  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11156  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
11157  hAllocator->m_hDevice,
11158  hBuffer,
11159  m_hMemory,
11160  hAllocation->GetOffset());
11161 }
11162 
11163 VkResult VmaDeviceMemoryBlock::BindImageMemory(
11164  const VmaAllocator hAllocator,
11165  const VmaAllocation hAllocation,
11166  VkImage hImage)
11167 {
11168  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11169  hAllocation->GetBlock() == this);
11170  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11171  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11172  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
11173  hAllocator->m_hDevice,
11174  hImage,
11175  m_hMemory,
11176  hAllocation->GetOffset());
11177 }
11178 
11179 static void InitStatInfo(VmaStatInfo& outInfo)
11180 {
11181  memset(&outInfo, 0, sizeof(outInfo));
11182  outInfo.allocationSizeMin = UINT64_MAX;
11183  outInfo.unusedRangeSizeMin = UINT64_MAX;
11184 }
11185 
11186 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
11187 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
11188 {
11189  inoutInfo.blockCount += srcInfo.blockCount;
11190  inoutInfo.allocationCount += srcInfo.allocationCount;
11191  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
11192  inoutInfo.usedBytes += srcInfo.usedBytes;
11193  inoutInfo.unusedBytes += srcInfo.unusedBytes;
11194  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
11195  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
11196  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
11197  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
11198 }
11199 
11200 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
11201 {
11202  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
11203  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
11204  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
11205  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
11206 }
11207 
11208 VmaPool_T::VmaPool_T(
11209  VmaAllocator hAllocator,
11210  const VmaPoolCreateInfo& createInfo,
11211  VkDeviceSize preferredBlockSize) :
11212  m_BlockVector(
11213  hAllocator,
11214  createInfo.memoryTypeIndex,
11215  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
11216  createInfo.minBlockCount,
11217  createInfo.maxBlockCount,
11218  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
11219  createInfo.frameInUseCount,
11220  true, // isCustomPool
11221  createInfo.blockSize != 0, // explicitBlockSize
11222  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
11223  m_Id(0)
11224 {
11225 }
11226 
11227 VmaPool_T::~VmaPool_T()
11228 {
11229 }
11230 
11231 #if VMA_STATS_STRING_ENABLED
11232 
11233 #endif // #if VMA_STATS_STRING_ENABLED
11234 
11235 VmaBlockVector::VmaBlockVector(
11236  VmaAllocator hAllocator,
11237  uint32_t memoryTypeIndex,
11238  VkDeviceSize preferredBlockSize,
11239  size_t minBlockCount,
11240  size_t maxBlockCount,
11241  VkDeviceSize bufferImageGranularity,
11242  uint32_t frameInUseCount,
11243  bool isCustomPool,
11244  bool explicitBlockSize,
11245  uint32_t algorithm) :
11246  m_hAllocator(hAllocator),
11247  m_MemoryTypeIndex(memoryTypeIndex),
11248  m_PreferredBlockSize(preferredBlockSize),
11249  m_MinBlockCount(minBlockCount),
11250  m_MaxBlockCount(maxBlockCount),
11251  m_BufferImageGranularity(bufferImageGranularity),
11252  m_FrameInUseCount(frameInUseCount),
11253  m_IsCustomPool(isCustomPool),
11254  m_ExplicitBlockSize(explicitBlockSize),
11255  m_Algorithm(algorithm),
11256  m_HasEmptyBlock(false),
11257  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
11258  m_NextBlockId(0)
11259 {
11260 }
11261 
11262 VmaBlockVector::~VmaBlockVector()
11263 {
11264  for(size_t i = m_Blocks.size(); i--; )
11265  {
11266  m_Blocks[i]->Destroy(m_hAllocator);
11267  vma_delete(m_hAllocator, m_Blocks[i]);
11268  }
11269 }
11270 
11271 VkResult VmaBlockVector::CreateMinBlocks()
11272 {
11273  for(size_t i = 0; i < m_MinBlockCount; ++i)
11274  {
11275  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
11276  if(res != VK_SUCCESS)
11277  {
11278  return res;
11279  }
11280  }
11281  return VK_SUCCESS;
11282 }
11283 
11284 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
11285 {
11286  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
11287 
11288  const size_t blockCount = m_Blocks.size();
11289 
11290  pStats->size = 0;
11291  pStats->unusedSize = 0;
11292  pStats->allocationCount = 0;
11293  pStats->unusedRangeCount = 0;
11294  pStats->unusedRangeSizeMax = 0;
11295  pStats->blockCount = blockCount;
11296 
11297  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11298  {
11299  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11300  VMA_ASSERT(pBlock);
11301  VMA_HEAVY_ASSERT(pBlock->Validate());
11302  pBlock->m_pMetadata->AddPoolStats(*pStats);
11303  }
11304 }
11305 
11306 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
11307 {
11308  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
11309  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
11310  (VMA_DEBUG_MARGIN > 0) &&
11311  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
11312 }
11313 
11314 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
11315 
11316 VkResult VmaBlockVector::Allocate(
11317  VmaPool hCurrentPool,
11318  uint32_t currentFrameIndex,
11319  VkDeviceSize size,
11320  VkDeviceSize alignment,
11321  const VmaAllocationCreateInfo& createInfo,
11322  VmaSuballocationType suballocType,
11323  size_t allocationCount,
11324  VmaAllocation* pAllocations)
11325 {
11326  size_t allocIndex;
11327  VkResult res = VK_SUCCESS;
11328 
11329  {
11330  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11331  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
11332  {
11333  res = AllocatePage(
11334  hCurrentPool,
11335  currentFrameIndex,
11336  size,
11337  alignment,
11338  createInfo,
11339  suballocType,
11340  pAllocations + allocIndex);
11341  if(res != VK_SUCCESS)
11342  {
11343  break;
11344  }
11345  }
11346  }
11347 
11348  if(res != VK_SUCCESS)
11349  {
11350  // Free all already created allocations.
11351  while(allocIndex--)
11352  {
11353  Free(pAllocations[allocIndex]);
11354  }
11355  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
11356  }
11357 
11358  return res;
11359 }
11360 
11361 VkResult VmaBlockVector::AllocatePage(
11362  VmaPool hCurrentPool,
11363  uint32_t currentFrameIndex,
11364  VkDeviceSize size,
11365  VkDeviceSize alignment,
11366  const VmaAllocationCreateInfo& createInfo,
11367  VmaSuballocationType suballocType,
11368  VmaAllocation* pAllocation)
11369 {
11370  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
11371  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
11372  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
11373  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
11374  const bool canCreateNewBlock =
11375  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
11376  (m_Blocks.size() < m_MaxBlockCount);
11377  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
11378 
11379  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
11380  // Which in turn is available only when maxBlockCount = 1.
11381  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
11382  {
11383  canMakeOtherLost = false;
11384  }
11385 
11386  // Upper address can only be used with linear allocator and within single memory block.
11387  if(isUpperAddress &&
11388  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
11389  {
11390  return VK_ERROR_FEATURE_NOT_PRESENT;
11391  }
11392 
11393  // Validate strategy.
11394  switch(strategy)
11395  {
11396  case 0:
11398  break;
11402  break;
11403  default:
11404  return VK_ERROR_FEATURE_NOT_PRESENT;
11405  }
11406 
11407  // Early reject: requested allocation size is larger that maximum block size for this block vector.
11408  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
11409  {
11410  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11411  }
11412 
11413  /*
11414  Under certain condition, this whole section can be skipped for optimization, so
11415  we move on directly to trying to allocate with canMakeOtherLost. That's the case
11416  e.g. for custom pools with linear algorithm.
11417  */
11418  if(!canMakeOtherLost || canCreateNewBlock)
11419  {
11420  // 1. Search existing allocations. Try to allocate without making other allocations lost.
11421  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
11423 
11424  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
11425  {
11426  // Use only last block.
11427  if(!m_Blocks.empty())
11428  {
11429  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
11430  VMA_ASSERT(pCurrBlock);
11431  VkResult res = AllocateFromBlock(
11432  pCurrBlock,
11433  hCurrentPool,
11434  currentFrameIndex,
11435  size,
11436  alignment,
11437  allocFlagsCopy,
11438  createInfo.pUserData,
11439  suballocType,
11440  strategy,
11441  pAllocation);
11442  if(res == VK_SUCCESS)
11443  {
11444  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
11445  return VK_SUCCESS;
11446  }
11447  }
11448  }
11449  else
11450  {
11452  {
11453  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11454  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11455  {
11456  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11457  VMA_ASSERT(pCurrBlock);
11458  VkResult res = AllocateFromBlock(
11459  pCurrBlock,
11460  hCurrentPool,
11461  currentFrameIndex,
11462  size,
11463  alignment,
11464  allocFlagsCopy,
11465  createInfo.pUserData,
11466  suballocType,
11467  strategy,
11468  pAllocation);
11469  if(res == VK_SUCCESS)
11470  {
11471  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
11472  return VK_SUCCESS;
11473  }
11474  }
11475  }
11476  else // WORST_FIT, FIRST_FIT
11477  {
11478  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11479  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11480  {
11481  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11482  VMA_ASSERT(pCurrBlock);
11483  VkResult res = AllocateFromBlock(
11484  pCurrBlock,
11485  hCurrentPool,
11486  currentFrameIndex,
11487  size,
11488  alignment,
11489  allocFlagsCopy,
11490  createInfo.pUserData,
11491  suballocType,
11492  strategy,
11493  pAllocation);
11494  if(res == VK_SUCCESS)
11495  {
11496  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
11497  return VK_SUCCESS;
11498  }
11499  }
11500  }
11501  }
11502 
11503  // 2. Try to create new block.
11504  if(canCreateNewBlock)
11505  {
11506  // Calculate optimal size for new block.
11507  VkDeviceSize newBlockSize = m_PreferredBlockSize;
11508  uint32_t newBlockSizeShift = 0;
11509  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
11510 
11511  if(!m_ExplicitBlockSize)
11512  {
11513  // Allocate 1/8, 1/4, 1/2 as first blocks.
11514  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
11515  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
11516  {
11517  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11518  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
11519  {
11520  newBlockSize = smallerNewBlockSize;
11521  ++newBlockSizeShift;
11522  }
11523  else
11524  {
11525  break;
11526  }
11527  }
11528  }
11529 
11530  size_t newBlockIndex = 0;
11531  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
11532  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
11533  if(!m_ExplicitBlockSize)
11534  {
11535  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
11536  {
11537  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11538  if(smallerNewBlockSize >= size)
11539  {
11540  newBlockSize = smallerNewBlockSize;
11541  ++newBlockSizeShift;
11542  res = CreateBlock(newBlockSize, &newBlockIndex);
11543  }
11544  else
11545  {
11546  break;
11547  }
11548  }
11549  }
11550 
11551  if(res == VK_SUCCESS)
11552  {
11553  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
11554  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
11555 
11556  res = AllocateFromBlock(
11557  pBlock,
11558  hCurrentPool,
11559  currentFrameIndex,
11560  size,
11561  alignment,
11562  allocFlagsCopy,
11563  createInfo.pUserData,
11564  suballocType,
11565  strategy,
11566  pAllocation);
11567  if(res == VK_SUCCESS)
11568  {
11569  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
11570  return VK_SUCCESS;
11571  }
11572  else
11573  {
11574  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
11575  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11576  }
11577  }
11578  }
11579  }
11580 
11581  // 3. Try to allocate from existing blocks with making other allocations lost.
11582  if(canMakeOtherLost)
11583  {
11584  uint32_t tryIndex = 0;
11585  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
11586  {
11587  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
11588  VmaAllocationRequest bestRequest = {};
11589  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
11590 
11591  // 1. Search existing allocations.
11593  {
11594  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11595  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11596  {
11597  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11598  VMA_ASSERT(pCurrBlock);
11599  VmaAllocationRequest currRequest = {};
11600  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11601  currentFrameIndex,
11602  m_FrameInUseCount,
11603  m_BufferImageGranularity,
11604  size,
11605  alignment,
11606  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
11607  suballocType,
11608  canMakeOtherLost,
11609  strategy,
11610  &currRequest))
11611  {
11612  const VkDeviceSize currRequestCost = currRequest.CalcCost();
11613  if(pBestRequestBlock == VMA_NULL ||
11614  currRequestCost < bestRequestCost)
11615  {
11616  pBestRequestBlock = pCurrBlock;
11617  bestRequest = currRequest;
11618  bestRequestCost = currRequestCost;
11619 
11620  if(bestRequestCost == 0)
11621  {
11622  break;
11623  }
11624  }
11625  }
11626  }
11627  }
11628  else // WORST_FIT, FIRST_FIT
11629  {
11630  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11631  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11632  {
11633  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11634  VMA_ASSERT(pCurrBlock);
11635  VmaAllocationRequest currRequest = {};
11636  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11637  currentFrameIndex,
11638  m_FrameInUseCount,
11639  m_BufferImageGranularity,
11640  size,
11641  alignment,
11642  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
11643  suballocType,
11644  canMakeOtherLost,
11645  strategy,
11646  &currRequest))
11647  {
11648  const VkDeviceSize currRequestCost = currRequest.CalcCost();
11649  if(pBestRequestBlock == VMA_NULL ||
11650  currRequestCost < bestRequestCost ||
11652  {
11653  pBestRequestBlock = pCurrBlock;
11654  bestRequest = currRequest;
11655  bestRequestCost = currRequestCost;
11656 
11657  if(bestRequestCost == 0 ||
11659  {
11660  break;
11661  }
11662  }
11663  }
11664  }
11665  }
11666 
11667  if(pBestRequestBlock != VMA_NULL)
11668  {
11669  if(mapped)
11670  {
11671  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
11672  if(res != VK_SUCCESS)
11673  {
11674  return res;
11675  }
11676  }
11677 
11678  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
11679  currentFrameIndex,
11680  m_FrameInUseCount,
11681  &bestRequest))
11682  {
11683  // We no longer have an empty Allocation.
11684  if(pBestRequestBlock->m_pMetadata->IsEmpty())
11685  {
11686  m_HasEmptyBlock = false;
11687  }
11688  // Allocate from this pBlock.
11689  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
11690  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
11691  (*pAllocation)->InitBlockAllocation(
11692  hCurrentPool,
11693  pBestRequestBlock,
11694  bestRequest.offset,
11695  alignment,
11696  size,
11697  suballocType,
11698  mapped,
11699  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
11700  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
11701  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
11702  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
11703  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
11704  {
11705  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
11706  }
11707  if(IsCorruptionDetectionEnabled())
11708  {
11709  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
11710  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
11711  }
11712  return VK_SUCCESS;
11713  }
11714  // else: Some allocations must have been touched while we are here. Next try.
11715  }
11716  else
11717  {
11718  // Could not find place in any of the blocks - break outer loop.
11719  break;
11720  }
11721  }
11722  /* Maximum number of tries exceeded - a very unlike event when many other
11723  threads are simultaneously touching allocations making it impossible to make
11724  lost at the same time as we try to allocate. */
11725  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
11726  {
11727  return VK_ERROR_TOO_MANY_OBJECTS;
11728  }
11729  }
11730 
11731  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11732 }
11733 
11734 void VmaBlockVector::Free(
11735  VmaAllocation hAllocation)
11736 {
11737  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
11738 
11739  // Scope for lock.
11740  {
11741  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11742 
11743  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
11744 
11745  if(IsCorruptionDetectionEnabled())
11746  {
11747  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
11748  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
11749  }
11750 
11751  if(hAllocation->IsPersistentMap())
11752  {
11753  pBlock->Unmap(m_hAllocator, 1);
11754  }
11755 
11756  pBlock->m_pMetadata->Free(hAllocation);
11757  VMA_HEAVY_ASSERT(pBlock->Validate());
11758 
11759  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
11760 
11761  // pBlock became empty after this deallocation.
11762  if(pBlock->m_pMetadata->IsEmpty())
11763  {
11764  // Already has empty Allocation. We don't want to have two, so delete this one.
11765  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
11766  {
11767  pBlockToDelete = pBlock;
11768  Remove(pBlock);
11769  }
11770  // We now have first empty block.
11771  else
11772  {
11773  m_HasEmptyBlock = true;
11774  }
11775  }
11776  // pBlock didn't become empty, but we have another empty block - find and free that one.
11777  // (This is optional, heuristics.)
11778  else if(m_HasEmptyBlock)
11779  {
11780  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
11781  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
11782  {
11783  pBlockToDelete = pLastBlock;
11784  m_Blocks.pop_back();
11785  m_HasEmptyBlock = false;
11786  }
11787  }
11788 
11789  IncrementallySortBlocks();
11790  }
11791 
11792  // Destruction of a free Allocation. Deferred until this point, outside of mutex
11793  // lock, for performance reason.
11794  if(pBlockToDelete != VMA_NULL)
11795  {
11796  VMA_DEBUG_LOG(" Deleted empty allocation");
11797  pBlockToDelete->Destroy(m_hAllocator);
11798  vma_delete(m_hAllocator, pBlockToDelete);
11799  }
11800 }
11801 
11802 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
11803 {
11804  VkDeviceSize result = 0;
11805  for(size_t i = m_Blocks.size(); i--; )
11806  {
11807  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
11808  if(result >= m_PreferredBlockSize)
11809  {
11810  break;
11811  }
11812  }
11813  return result;
11814 }
11815 
11816 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
11817 {
11818  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11819  {
11820  if(m_Blocks[blockIndex] == pBlock)
11821  {
11822  VmaVectorRemove(m_Blocks, blockIndex);
11823  return;
11824  }
11825  }
11826  VMA_ASSERT(0);
11827 }
11828 
11829 void VmaBlockVector::IncrementallySortBlocks()
11830 {
11831  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
11832  {
11833  // Bubble sort only until first swap.
11834  for(size_t i = 1; i < m_Blocks.size(); ++i)
11835  {
11836  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
11837  {
11838  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
11839  return;
11840  }
11841  }
11842  }
11843 }
11844 
11845 VkResult VmaBlockVector::AllocateFromBlock(
11846  VmaDeviceMemoryBlock* pBlock,
11847  VmaPool hCurrentPool,
11848  uint32_t currentFrameIndex,
11849  VkDeviceSize size,
11850  VkDeviceSize alignment,
11851  VmaAllocationCreateFlags allocFlags,
11852  void* pUserData,
11853  VmaSuballocationType suballocType,
11854  uint32_t strategy,
11855  VmaAllocation* pAllocation)
11856 {
11857  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
11858  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
11859  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
11860  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
11861 
11862  VmaAllocationRequest currRequest = {};
11863  if(pBlock->m_pMetadata->CreateAllocationRequest(
11864  currentFrameIndex,
11865  m_FrameInUseCount,
11866  m_BufferImageGranularity,
11867  size,
11868  alignment,
11869  isUpperAddress,
11870  suballocType,
11871  false, // canMakeOtherLost
11872  strategy,
11873  &currRequest))
11874  {
11875  // Allocate from pCurrBlock.
11876  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
11877 
11878  if(mapped)
11879  {
11880  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
11881  if(res != VK_SUCCESS)
11882  {
11883  return res;
11884  }
11885  }
11886 
11887  // We no longer have an empty Allocation.
11888  if(pBlock->m_pMetadata->IsEmpty())
11889  {
11890  m_HasEmptyBlock = false;
11891  }
11892 
11893  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
11894  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
11895  (*pAllocation)->InitBlockAllocation(
11896  hCurrentPool,
11897  pBlock,
11898  currRequest.offset,
11899  alignment,
11900  size,
11901  suballocType,
11902  mapped,
11903  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
11904  VMA_HEAVY_ASSERT(pBlock->Validate());
11905  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
11906  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
11907  {
11908  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
11909  }
11910  if(IsCorruptionDetectionEnabled())
11911  {
11912  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
11913  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
11914  }
11915  return VK_SUCCESS;
11916  }
11917  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11918 }
11919 
11920 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
11921 {
11922  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
11923  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
11924  allocInfo.allocationSize = blockSize;
11925  VkDeviceMemory mem = VK_NULL_HANDLE;
11926  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
11927  if(res < 0)
11928  {
11929  return res;
11930  }
11931 
11932  // New VkDeviceMemory successfully created.
11933 
11934  // Create new Allocation for it.
11935  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
11936  pBlock->Init(
11937  m_hAllocator,
11938  m_MemoryTypeIndex,
11939  mem,
11940  allocInfo.allocationSize,
11941  m_NextBlockId++,
11942  m_Algorithm);
11943 
11944  m_Blocks.push_back(pBlock);
11945  if(pNewBlockIndex != VMA_NULL)
11946  {
11947  *pNewBlockIndex = m_Blocks.size() - 1;
11948  }
11949 
11950  return VK_SUCCESS;
11951 }
11952 
11953 void VmaBlockVector::ApplyDefragmentationMovesCpu(
11954  class VmaBlockVectorDefragmentationContext* pDefragCtx,
11955  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
11956 {
11957  const size_t blockCount = m_Blocks.size();
11958  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
11959 
11960  enum BLOCK_FLAG
11961  {
11962  BLOCK_FLAG_USED = 0x00000001,
11963  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
11964  };
11965 
11966  struct BlockInfo
11967  {
11968  uint32_t flags;
11969  void* pMappedData;
11970  };
11971  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
11972  blockInfo(blockCount, VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
11973  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
11974 
11975  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
11976  const size_t moveCount = moves.size();
11977  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
11978  {
11979  const VmaDefragmentationMove& move = moves[moveIndex];
11980  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
11981  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
11982  }
11983 
11984  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
11985 
11986  // Go over all blocks. Get mapped pointer or map if necessary.
11987  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
11988  {
11989  BlockInfo& currBlockInfo = blockInfo[blockIndex];
11990  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
11991  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
11992  {
11993  currBlockInfo.pMappedData = pBlock->GetMappedData();
11994  // It is not originally mapped - map it.
11995  if(currBlockInfo.pMappedData == VMA_NULL)
11996  {
11997  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
11998  if(pDefragCtx->res == VK_SUCCESS)
11999  {
12000  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
12001  }
12002  }
12003  }
12004  }
12005 
12006  // Go over all moves. Do actual data transfer.
12007  if(pDefragCtx->res == VK_SUCCESS)
12008  {
12009  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
12010  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
12011 
12012  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12013  {
12014  const VmaDefragmentationMove& move = moves[moveIndex];
12015 
12016  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
12017  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
12018 
12019  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
12020 
12021  // Invalidate source.
12022  if(isNonCoherent)
12023  {
12024  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
12025  memRange.memory = pSrcBlock->GetDeviceMemory();
12026  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
12027  memRange.size = VMA_MIN(
12028  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
12029  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
12030  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12031  }
12032 
12033  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
12034  memmove(
12035  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
12036  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
12037  static_cast<size_t>(move.size));
12038 
12039  if(IsCorruptionDetectionEnabled())
12040  {
12041  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
12042  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
12043  }
12044 
12045  // Flush destination.
12046  if(isNonCoherent)
12047  {
12048  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
12049  memRange.memory = pDstBlock->GetDeviceMemory();
12050  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
12051  memRange.size = VMA_MIN(
12052  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
12053  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
12054  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12055  }
12056  }
12057  }
12058 
12059  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
12060  // Regardless of pCtx->res == VK_SUCCESS.
12061  for(size_t blockIndex = blockCount; blockIndex--; )
12062  {
12063  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
12064  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
12065  {
12066  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12067  pBlock->Unmap(m_hAllocator, 1);
12068  }
12069  }
12070 }
12071 
12072 void VmaBlockVector::ApplyDefragmentationMovesGpu(
12073  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12074  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12075  VkCommandBuffer commandBuffer)
12076 {
12077  const size_t blockCount = m_Blocks.size();
12078 
12079  pDefragCtx->blockContexts.resize(blockCount);
12080  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
12081 
12082  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12083  const size_t moveCount = moves.size();
12084  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12085  {
12086  const VmaDefragmentationMove& move = moves[moveIndex];
12087  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12088  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12089  }
12090 
12091  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12092 
12093  // Go over all blocks. Create and bind buffer for whole block if necessary.
12094  {
12095  VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
12096  bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
12097  VK_BUFFER_USAGE_TRANSFER_DST_BIT;
12098 
12099  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12100  {
12101  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
12102  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12103  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
12104  {
12105  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
12106  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
12107  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
12108  if(pDefragCtx->res == VK_SUCCESS)
12109  {
12110  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
12111  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
12112  }
12113  }
12114  }
12115  }
12116 
12117  // Go over all moves. Post data transfer commands to command buffer.
12118  if(pDefragCtx->res == VK_SUCCESS)
12119  {
12120  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
12121  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
12122 
12123  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12124  {
12125  const VmaDefragmentationMove& move = moves[moveIndex];
12126 
12127  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
12128  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
12129 
12130  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
12131 
12132  VkBufferCopy region = {
12133  move.srcOffset,
12134  move.dstOffset,
12135  move.size };
12136  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
12137  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
12138  }
12139  }
12140 
12141  // Save buffers to defrag context for later destruction.
12142  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
12143  {
12144  pDefragCtx->res = VK_NOT_READY;
12145  }
12146 }
12147 
12148 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
12149 {
12150  m_HasEmptyBlock = false;
12151  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12152  {
12153  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12154  if(pBlock->m_pMetadata->IsEmpty())
12155  {
12156  if(m_Blocks.size() > m_MinBlockCount)
12157  {
12158  if(pDefragmentationStats != VMA_NULL)
12159  {
12160  ++pDefragmentationStats->deviceMemoryBlocksFreed;
12161  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
12162  }
12163 
12164  VmaVectorRemove(m_Blocks, blockIndex);
12165  pBlock->Destroy(m_hAllocator);
12166  vma_delete(m_hAllocator, pBlock);
12167  }
12168  else
12169  {
12170  m_HasEmptyBlock = true;
12171  }
12172  }
12173  }
12174 }
12175 
12176 #if VMA_STATS_STRING_ENABLED
12177 
12178 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
12179 {
12180  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12181 
12182  json.BeginObject();
12183 
12184  if(m_IsCustomPool)
12185  {
12186  json.WriteString("MemoryTypeIndex");
12187  json.WriteNumber(m_MemoryTypeIndex);
12188 
12189  json.WriteString("BlockSize");
12190  json.WriteNumber(m_PreferredBlockSize);
12191 
12192  json.WriteString("BlockCount");
12193  json.BeginObject(true);
12194  if(m_MinBlockCount > 0)
12195  {
12196  json.WriteString("Min");
12197  json.WriteNumber((uint64_t)m_MinBlockCount);
12198  }
12199  if(m_MaxBlockCount < SIZE_MAX)
12200  {
12201  json.WriteString("Max");
12202  json.WriteNumber((uint64_t)m_MaxBlockCount);
12203  }
12204  json.WriteString("Cur");
12205  json.WriteNumber((uint64_t)m_Blocks.size());
12206  json.EndObject();
12207 
12208  if(m_FrameInUseCount > 0)
12209  {
12210  json.WriteString("FrameInUseCount");
12211  json.WriteNumber(m_FrameInUseCount);
12212  }
12213 
12214  if(m_Algorithm != 0)
12215  {
12216  json.WriteString("Algorithm");
12217  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
12218  }
12219  }
12220  else
12221  {
12222  json.WriteString("PreferredBlockSize");
12223  json.WriteNumber(m_PreferredBlockSize);
12224  }
12225 
12226  json.WriteString("Blocks");
12227  json.BeginObject();
12228  for(size_t i = 0; i < m_Blocks.size(); ++i)
12229  {
12230  json.BeginString();
12231  json.ContinueString(m_Blocks[i]->GetId());
12232  json.EndString();
12233 
12234  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
12235  }
12236  json.EndObject();
12237 
12238  json.EndObject();
12239 }
12240 
12241 #endif // #if VMA_STATS_STRING_ENABLED
12242 
12243 void VmaBlockVector::Defragment(
12244  class VmaBlockVectorDefragmentationContext* pCtx,
12245  VmaDefragmentationStats* pStats,
12246  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
12247  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
12248  VkCommandBuffer commandBuffer)
12249 {
12250  pCtx->res = VK_SUCCESS;
12251 
12252  const VkMemoryPropertyFlags memPropFlags =
12253  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
12254  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
12255  const bool isHostCoherent = (memPropFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0;
12256 
12257  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
12258  isHostVisible;
12259  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
12260  (VMA_DEBUG_DETECT_CORRUPTION == 0 || !(isHostVisible && isHostCoherent));
12261 
12262  // There are options to defragment this memory type.
12263  if(canDefragmentOnCpu || canDefragmentOnGpu)
12264  {
12265  bool defragmentOnGpu;
12266  // There is only one option to defragment this memory type.
12267  if(canDefragmentOnGpu != canDefragmentOnCpu)
12268  {
12269  defragmentOnGpu = canDefragmentOnGpu;
12270  }
12271  // Both options are available: Heuristics to choose the best one.
12272  else
12273  {
12274  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
12275  m_hAllocator->IsIntegratedGpu();
12276  }
12277 
12278  bool overlappingMoveSupported = !defragmentOnGpu;
12279 
12280  if(m_hAllocator->m_UseMutex)
12281  {
12282  m_Mutex.LockWrite();
12283  pCtx->mutexLocked = true;
12284  }
12285 
12286  pCtx->Begin(overlappingMoveSupported);
12287 
12288  // Defragment.
12289 
12290  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
12291  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
12292  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
12293  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
12294  pCtx->res = pCtx->GetAlgorithm()->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
12295 
12296  // Accumulate statistics.
12297  if(pStats != VMA_NULL)
12298  {
12299  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
12300  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
12301  pStats->bytesMoved += bytesMoved;
12302  pStats->allocationsMoved += allocationsMoved;
12303  VMA_ASSERT(bytesMoved <= maxBytesToMove);
12304  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
12305  if(defragmentOnGpu)
12306  {
12307  maxGpuBytesToMove -= bytesMoved;
12308  maxGpuAllocationsToMove -= allocationsMoved;
12309  }
12310  else
12311  {
12312  maxCpuBytesToMove -= bytesMoved;
12313  maxCpuAllocationsToMove -= allocationsMoved;
12314  }
12315  }
12316 
12317  if(pCtx->res >= VK_SUCCESS)
12318  {
12319  if(defragmentOnGpu)
12320  {
12321  ApplyDefragmentationMovesGpu(pCtx, moves, commandBuffer);
12322  }
12323  else
12324  {
12325  ApplyDefragmentationMovesCpu(pCtx, moves);
12326  }
12327  }
12328  }
12329 }
12330 
12331 void VmaBlockVector::DefragmentationEnd(
12332  class VmaBlockVectorDefragmentationContext* pCtx,
12333  VmaDefragmentationStats* pStats)
12334 {
12335  // Destroy buffers.
12336  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
12337  {
12338  VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];
12339  if(blockCtx.hBuffer)
12340  {
12341  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
12342  m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
12343  }
12344  }
12345 
12346  if(pCtx->res >= VK_SUCCESS)
12347  {
12348  FreeEmptyBlocks(pStats);
12349  }
12350 
12351  if(pCtx->mutexLocked)
12352  {
12353  VMA_ASSERT(m_hAllocator->m_UseMutex);
12354  m_Mutex.UnlockWrite();
12355  }
12356 }
12357 
12358 size_t VmaBlockVector::CalcAllocationCount() const
12359 {
12360  size_t result = 0;
12361  for(size_t i = 0; i < m_Blocks.size(); ++i)
12362  {
12363  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
12364  }
12365  return result;
12366 }
12367 
12368 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
12369 {
12370  if(m_BufferImageGranularity == 1)
12371  {
12372  return false;
12373  }
12374  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
12375  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
12376  {
12377  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
12378  VMA_ASSERT(m_Algorithm == 0);
12379  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
12380  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
12381  {
12382  return true;
12383  }
12384  }
12385  return false;
12386 }
12387 
12388 void VmaBlockVector::MakePoolAllocationsLost(
12389  uint32_t currentFrameIndex,
12390  size_t* pLostAllocationCount)
12391 {
12392  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12393  size_t lostAllocationCount = 0;
12394  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12395  {
12396  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12397  VMA_ASSERT(pBlock);
12398  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
12399  }
12400  if(pLostAllocationCount != VMA_NULL)
12401  {
12402  *pLostAllocationCount = lostAllocationCount;
12403  }
12404 }
12405 
12406 VkResult VmaBlockVector::CheckCorruption()
12407 {
12408  if(!IsCorruptionDetectionEnabled())
12409  {
12410  return VK_ERROR_FEATURE_NOT_PRESENT;
12411  }
12412 
12413  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12414  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12415  {
12416  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12417  VMA_ASSERT(pBlock);
12418  VkResult res = pBlock->CheckCorruption(m_hAllocator);
12419  if(res != VK_SUCCESS)
12420  {
12421  return res;
12422  }
12423  }
12424  return VK_SUCCESS;
12425 }
12426 
12427 void VmaBlockVector::AddStats(VmaStats* pStats)
12428 {
12429  const uint32_t memTypeIndex = m_MemoryTypeIndex;
12430  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
12431 
12432  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12433 
12434  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12435  {
12436  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12437  VMA_ASSERT(pBlock);
12438  VMA_HEAVY_ASSERT(pBlock->Validate());
12439  VmaStatInfo allocationStatInfo;
12440  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
12441  VmaAddStatInfo(pStats->total, allocationStatInfo);
12442  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
12443  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
12444  }
12445 }
12446 
12448 // VmaDefragmentationAlgorithm_Generic members definition
12449 
12450 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
12451  VmaAllocator hAllocator,
12452  VmaBlockVector* pBlockVector,
12453  uint32_t currentFrameIndex,
12454  bool overlappingMoveSupported) :
12455  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12456  m_AllAllocations(false),
12457  m_AllocationCount(0),
12458  m_BytesMoved(0),
12459  m_AllocationsMoved(0),
12460  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
12461 {
12462  // Create block info for each block.
12463  const size_t blockCount = m_pBlockVector->m_Blocks.size();
12464  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12465  {
12466  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
12467  pBlockInfo->m_OriginalBlockIndex = blockIndex;
12468  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
12469  m_Blocks.push_back(pBlockInfo);
12470  }
12471 
12472  // Sort them by m_pBlock pointer value.
12473  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
12474 }
12475 
12476 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
12477 {
12478  for(size_t i = m_Blocks.size(); i--; )
12479  {
12480  vma_delete(m_hAllocator, m_Blocks[i]);
12481  }
12482 }
12483 
12484 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
12485 {
12486  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
12487  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
12488  {
12489  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
12490  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
12491  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
12492  {
12493  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
12494  (*it)->m_Allocations.push_back(allocInfo);
12495  }
12496  else
12497  {
12498  VMA_ASSERT(0);
12499  }
12500 
12501  ++m_AllocationCount;
12502  }
12503 }
12504 
12505 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
12506  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12507  VkDeviceSize maxBytesToMove,
12508  uint32_t maxAllocationsToMove)
12509 {
12510  if(m_Blocks.empty())
12511  {
12512  return VK_SUCCESS;
12513  }
12514 
12515  // This is a choice based on research.
12516  // Option 1:
12517  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
12518  // Option 2:
12519  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
12520  // Option 3:
12521  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
12522 
12523  size_t srcBlockMinIndex = 0;
12524  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
12525  /*
12526  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
12527  {
12528  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
12529  if(blocksWithNonMovableCount > 0)
12530  {
12531  srcBlockMinIndex = blocksWithNonMovableCount - 1;
12532  }
12533  }
12534  */
12535 
12536  size_t srcBlockIndex = m_Blocks.size() - 1;
12537  size_t srcAllocIndex = SIZE_MAX;
12538  for(;;)
12539  {
12540  // 1. Find next allocation to move.
12541  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
12542  // 1.2. Then start from last to first m_Allocations.
12543  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
12544  {
12545  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
12546  {
12547  // Finished: no more allocations to process.
12548  if(srcBlockIndex == srcBlockMinIndex)
12549  {
12550  return VK_SUCCESS;
12551  }
12552  else
12553  {
12554  --srcBlockIndex;
12555  srcAllocIndex = SIZE_MAX;
12556  }
12557  }
12558  else
12559  {
12560  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
12561  }
12562  }
12563 
12564  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
12565  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
12566 
12567  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
12568  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
12569  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
12570  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
12571 
12572  // 2. Try to find new place for this allocation in preceding or current block.
12573  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
12574  {
12575  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
12576  VmaAllocationRequest dstAllocRequest;
12577  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
12578  m_CurrentFrameIndex,
12579  m_pBlockVector->GetFrameInUseCount(),
12580  m_pBlockVector->GetBufferImageGranularity(),
12581  size,
12582  alignment,
12583  false, // upperAddress
12584  suballocType,
12585  false, // canMakeOtherLost
12586  strategy,
12587  &dstAllocRequest) &&
12588  MoveMakesSense(
12589  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
12590  {
12591  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
12592 
12593  // Reached limit on number of allocations or bytes to move.
12594  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
12595  (m_BytesMoved + size > maxBytesToMove))
12596  {
12597  return VK_SUCCESS;
12598  }
12599 
12600  VmaDefragmentationMove move;
12601  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
12602  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
12603  move.srcOffset = srcOffset;
12604  move.dstOffset = dstAllocRequest.offset;
12605  move.size = size;
12606  moves.push_back(move);
12607 
12608  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
12609  dstAllocRequest,
12610  suballocType,
12611  size,
12612  false, // upperAddress
12613  allocInfo.m_hAllocation);
12614  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
12615 
12616  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
12617 
12618  if(allocInfo.m_pChanged != VMA_NULL)
12619  {
12620  *allocInfo.m_pChanged = VK_TRUE;
12621  }
12622 
12623  ++m_AllocationsMoved;
12624  m_BytesMoved += size;
12625 
12626  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
12627 
12628  break;
12629  }
12630  }
12631 
12632  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
12633 
12634  if(srcAllocIndex > 0)
12635  {
12636  --srcAllocIndex;
12637  }
12638  else
12639  {
12640  if(srcBlockIndex > 0)
12641  {
12642  --srcBlockIndex;
12643  srcAllocIndex = SIZE_MAX;
12644  }
12645  else
12646  {
12647  return VK_SUCCESS;
12648  }
12649  }
12650  }
12651 }
12652 
12653 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
12654 {
12655  size_t result = 0;
12656  for(size_t i = 0; i < m_Blocks.size(); ++i)
12657  {
12658  if(m_Blocks[i]->m_HasNonMovableAllocations)
12659  {
12660  ++result;
12661  }
12662  }
12663  return result;
12664 }
12665 
12666 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
12667  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12668  VkDeviceSize maxBytesToMove,
12669  uint32_t maxAllocationsToMove)
12670 {
12671  if(!m_AllAllocations && m_AllocationCount == 0)
12672  {
12673  return VK_SUCCESS;
12674  }
12675 
12676  const size_t blockCount = m_Blocks.size();
12677  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12678  {
12679  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
12680 
12681  if(m_AllAllocations)
12682  {
12683  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
12684  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
12685  it != pMetadata->m_Suballocations.end();
12686  ++it)
12687  {
12688  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
12689  {
12690  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
12691  pBlockInfo->m_Allocations.push_back(allocInfo);
12692  }
12693  }
12694  }
12695 
12696  pBlockInfo->CalcHasNonMovableAllocations();
12697 
12698  // This is a choice based on research.
12699  // Option 1:
12700  pBlockInfo->SortAllocationsByOffsetDescending();
12701  // Option 2:
12702  //pBlockInfo->SortAllocationsBySizeDescending();
12703  }
12704 
12705  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
12706  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
12707 
12708  // This is a choice based on research.
12709  const uint32_t roundCount = 2;
12710 
12711  // Execute defragmentation rounds (the main part).
12712  VkResult result = VK_SUCCESS;
12713  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
12714  {
12715  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove);
12716  }
12717 
12718  return result;
12719 }
12720 
12721 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
12722  size_t dstBlockIndex, VkDeviceSize dstOffset,
12723  size_t srcBlockIndex, VkDeviceSize srcOffset)
12724 {
12725  if(dstBlockIndex < srcBlockIndex)
12726  {
12727  return true;
12728  }
12729  if(dstBlockIndex > srcBlockIndex)
12730  {
12731  return false;
12732  }
12733  if(dstOffset < srcOffset)
12734  {
12735  return true;
12736  }
12737  return false;
12738 }
12739 
12741 // VmaDefragmentationAlgorithm_Fast
12742 
12743 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
12744  VmaAllocator hAllocator,
12745  VmaBlockVector* pBlockVector,
12746  uint32_t currentFrameIndex,
12747  bool overlappingMoveSupported) :
12748  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12749  m_OverlappingMoveSupported(overlappingMoveSupported),
12750  m_AllocationCount(0),
12751  m_AllAllocations(false),
12752  m_BytesMoved(0),
12753  m_AllocationsMoved(0),
12754  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
12755 {
12756  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
12757 
12758 }
12759 
12760 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
12761 {
12762 }
12763 
12764 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
12765  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12766  VkDeviceSize maxBytesToMove,
12767  uint32_t maxAllocationsToMove)
12768 {
12769  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
12770 
12771  const size_t blockCount = m_pBlockVector->GetBlockCount();
12772  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
12773  {
12774  return VK_SUCCESS;
12775  }
12776 
12777  PreprocessMetadata();
12778 
12779  // Sort blocks in order from most destination.
12780 
12781  m_BlockInfos.resize(blockCount);
12782  for(size_t i = 0; i < blockCount; ++i)
12783  {
12784  m_BlockInfos[i].origBlockIndex = i;
12785  }
12786 
12787  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
12788  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
12789  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
12790  });
12791 
12792  // THE MAIN ALGORITHM
12793 
12794  FreeSpaceDatabase freeSpaceDb;
12795 
12796  size_t dstBlockInfoIndex = 0;
12797  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
12798  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
12799  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
12800  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
12801  VkDeviceSize dstOffset = 0;
12802 
12803  bool end = false;
12804  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
12805  {
12806  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
12807  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
12808  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
12809  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
12810  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
12811  {
12812  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
12813  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
12814  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
12815  if(m_AllocationsMoved == maxAllocationsToMove ||
12816  m_BytesMoved + srcAllocSize > maxBytesToMove)
12817  {
12818  end = true;
12819  break;
12820  }
12821  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
12822 
12823  // Try to place it in one of free spaces from the database.
12824  size_t freeSpaceInfoIndex;
12825  VkDeviceSize dstAllocOffset;
12826  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
12827  freeSpaceInfoIndex, dstAllocOffset))
12828  {
12829  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
12830  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
12831  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
12832  VkDeviceSize freeSpaceBlockSize = pFreeSpaceMetadata->GetSize();
12833 
12834  // Same block
12835  if(freeSpaceInfoIndex == srcBlockInfoIndex)
12836  {
12837  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
12838 
12839  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
12840 
12841  VmaSuballocation suballoc = *srcSuballocIt;
12842  suballoc.offset = dstAllocOffset;
12843  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
12844  m_BytesMoved += srcAllocSize;
12845  ++m_AllocationsMoved;
12846 
12847  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
12848  ++nextSuballocIt;
12849  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
12850  srcSuballocIt = nextSuballocIt;
12851 
12852  InsertSuballoc(pFreeSpaceMetadata, suballoc);
12853 
12854  VmaDefragmentationMove move = {
12855  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
12856  srcAllocOffset, dstAllocOffset,
12857  srcAllocSize };
12858  moves.push_back(move);
12859  }
12860  // Different block
12861  else
12862  {
12863  // MOVE OPTION 2: Move the allocation to a different block.
12864 
12865  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
12866 
12867  VmaSuballocation suballoc = *srcSuballocIt;
12868  suballoc.offset = dstAllocOffset;
12869  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
12870  m_BytesMoved += srcAllocSize;
12871  ++m_AllocationsMoved;
12872 
12873  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
12874  ++nextSuballocIt;
12875  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
12876  srcSuballocIt = nextSuballocIt;
12877 
12878  InsertSuballoc(pFreeSpaceMetadata, suballoc);
12879 
12880  VmaDefragmentationMove move = {
12881  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
12882  srcAllocOffset, dstAllocOffset,
12883  srcAllocSize };
12884  moves.push_back(move);
12885  }
12886  }
12887  else
12888  {
12889  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
12890 
12891  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
12892  while(dstBlockInfoIndex < srcBlockInfoIndex &&
12893  dstAllocOffset + srcAllocSize > dstBlockSize)
12894  {
12895  // But before that, register remaining free space at the end of dst block.
12896  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
12897 
12898  ++dstBlockInfoIndex;
12899  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
12900  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
12901  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
12902  dstBlockSize = pDstMetadata->GetSize();
12903  dstOffset = 0;
12904  dstAllocOffset = 0;
12905  }
12906 
12907  // Same block
12908  if(dstBlockInfoIndex == srcBlockInfoIndex)
12909  {
12910  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
12911 
12912  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
12913 
12914  bool skipOver = overlap;
12915  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
12916  {
12917  // If destination and source place overlap, skip if it would move it
12918  // by only < 1/64 of its size.
12919  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
12920  }
12921 
12922  if(skipOver)
12923  {
12924  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
12925 
12926  dstOffset = srcAllocOffset + srcAllocSize;
12927  ++srcSuballocIt;
12928  }
12929  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
12930  else
12931  {
12932  srcSuballocIt->offset = dstAllocOffset;
12933  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
12934  dstOffset = dstAllocOffset + srcAllocSize;
12935  m_BytesMoved += srcAllocSize;
12936  ++m_AllocationsMoved;
12937  ++srcSuballocIt;
12938  VmaDefragmentationMove move = {
12939  srcOrigBlockIndex, dstOrigBlockIndex,
12940  srcAllocOffset, dstAllocOffset,
12941  srcAllocSize };
12942  moves.push_back(move);
12943  }
12944  }
12945  // Different block
12946  else
12947  {
12948  // MOVE OPTION 2: Move the allocation to a different block.
12949 
12950  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
12951  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
12952 
12953  VmaSuballocation suballoc = *srcSuballocIt;
12954  suballoc.offset = dstAllocOffset;
12955  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
12956  dstOffset = dstAllocOffset + srcAllocSize;
12957  m_BytesMoved += srcAllocSize;
12958  ++m_AllocationsMoved;
12959 
12960  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
12961  ++nextSuballocIt;
12962  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
12963  srcSuballocIt = nextSuballocIt;
12964 
12965  pDstMetadata->m_Suballocations.push_back(suballoc);
12966 
12967  VmaDefragmentationMove move = {
12968  srcOrigBlockIndex, dstOrigBlockIndex,
12969  srcAllocOffset, dstAllocOffset,
12970  srcAllocSize };
12971  moves.push_back(move);
12972  }
12973  }
12974  }
12975  }
12976 
12977  m_BlockInfos.clear();
12978 
12979  PostprocessMetadata();
12980 
12981  return VK_SUCCESS;
12982 }
12983 
12984 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
12985 {
12986  const size_t blockCount = m_pBlockVector->GetBlockCount();
12987  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12988  {
12989  VmaBlockMetadata_Generic* const pMetadata =
12990  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
12991  pMetadata->m_FreeCount = 0;
12992  pMetadata->m_SumFreeSize = pMetadata->GetSize();
12993  pMetadata->m_FreeSuballocationsBySize.clear();
12994  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
12995  it != pMetadata->m_Suballocations.end(); )
12996  {
12997  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
12998  {
12999  VmaSuballocationList::iterator nextIt = it;
13000  ++nextIt;
13001  pMetadata->m_Suballocations.erase(it);
13002  it = nextIt;
13003  }
13004  else
13005  {
13006  ++it;
13007  }
13008  }
13009  }
13010 }
13011 
13012 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
13013 {
13014  const size_t blockCount = m_pBlockVector->GetBlockCount();
13015  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13016  {
13017  VmaBlockMetadata_Generic* const pMetadata =
13018  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13019  const VkDeviceSize blockSize = pMetadata->GetSize();
13020 
13021  // No allocations in this block - entire area is free.
13022  if(pMetadata->m_Suballocations.empty())
13023  {
13024  pMetadata->m_FreeCount = 1;
13025  //pMetadata->m_SumFreeSize is already set to blockSize.
13026  VmaSuballocation suballoc = {
13027  0, // offset
13028  blockSize, // size
13029  VMA_NULL, // hAllocation
13030  VMA_SUBALLOCATION_TYPE_FREE };
13031  pMetadata->m_Suballocations.push_back(suballoc);
13032  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
13033  }
13034  // There are some allocations in this block.
13035  else
13036  {
13037  VkDeviceSize offset = 0;
13038  VmaSuballocationList::iterator it;
13039  for(it = pMetadata->m_Suballocations.begin();
13040  it != pMetadata->m_Suballocations.end();
13041  ++it)
13042  {
13043  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
13044  VMA_ASSERT(it->offset >= offset);
13045 
13046  // Need to insert preceding free space.
13047  if(it->offset > offset)
13048  {
13049  ++pMetadata->m_FreeCount;
13050  const VkDeviceSize freeSize = it->offset - offset;
13051  VmaSuballocation suballoc = {
13052  offset, // offset
13053  freeSize, // size
13054  VMA_NULL, // hAllocation
13055  VMA_SUBALLOCATION_TYPE_FREE };
13056  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13057  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13058  {
13059  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
13060  }
13061  }
13062 
13063  pMetadata->m_SumFreeSize -= it->size;
13064  offset = it->offset + it->size;
13065  }
13066 
13067  // Need to insert trailing free space.
13068  if(offset < blockSize)
13069  {
13070  ++pMetadata->m_FreeCount;
13071  const VkDeviceSize freeSize = blockSize - offset;
13072  VmaSuballocation suballoc = {
13073  offset, // offset
13074  freeSize, // size
13075  VMA_NULL, // hAllocation
13076  VMA_SUBALLOCATION_TYPE_FREE };
13077  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
13078  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13079  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13080  {
13081  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
13082  }
13083  }
13084 
13085  VMA_SORT(
13086  pMetadata->m_FreeSuballocationsBySize.begin(),
13087  pMetadata->m_FreeSuballocationsBySize.end(),
13088  VmaSuballocationItemSizeLess());
13089  }
13090 
13091  VMA_HEAVY_ASSERT(pMetadata->Validate());
13092  }
13093 }
13094 
13095 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
13096 {
13097  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
13098  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13099  while(it != pMetadata->m_Suballocations.end())
13100  {
13101  if(it->offset < suballoc.offset)
13102  {
13103  ++it;
13104  }
13105  }
13106  pMetadata->m_Suballocations.insert(it, suballoc);
13107 }
13108 
13110 // VmaBlockVectorDefragmentationContext
13111 
13112 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
13113  VmaAllocator hAllocator,
13114  VmaPool hCustomPool,
13115  VmaBlockVector* pBlockVector,
13116  uint32_t currFrameIndex,
13117  uint32_t algorithmFlags) :
13118  res(VK_SUCCESS),
13119  mutexLocked(false),
13120  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
13121  m_hAllocator(hAllocator),
13122  m_hCustomPool(hCustomPool),
13123  m_pBlockVector(pBlockVector),
13124  m_CurrFrameIndex(currFrameIndex),
13125  m_AlgorithmFlags(algorithmFlags),
13126  m_pAlgorithm(VMA_NULL),
13127  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
13128  m_AllAllocations(false)
13129 {
13130 }
13131 
13132 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
13133 {
13134  vma_delete(m_hAllocator, m_pAlgorithm);
13135 }
13136 
13137 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
13138 {
13139  AllocInfo info = { hAlloc, pChanged };
13140  m_Allocations.push_back(info);
13141 }
13142 
13143 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported)
13144 {
13145  const bool allAllocations = m_AllAllocations ||
13146  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
13147 
13148  /********************************
13149  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
13150  ********************************/
13151 
13152  /*
13153  Fast algorithm is supported only when certain criteria are met:
13154  - VMA_DEBUG_MARGIN is 0.
13155  - All allocations in this block vector are moveable.
13156  - There is no possibility of image/buffer granularity conflict.
13157  */
13158  if(VMA_DEBUG_MARGIN == 0 &&
13159  allAllocations &&
13160  !m_pBlockVector->IsBufferImageGranularityConflictPossible())
13161  {
13162  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
13163  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13164  }
13165  else
13166  {
13167  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
13168  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13169  }
13170 
13171  if(allAllocations)
13172  {
13173  m_pAlgorithm->AddAll();
13174  }
13175  else
13176  {
13177  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
13178  {
13179  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
13180  }
13181  }
13182 }
13183 
13185 // VmaDefragmentationContext
13186 
13187 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
13188  VmaAllocator hAllocator,
13189  uint32_t currFrameIndex,
13190  uint32_t flags,
13191  VmaDefragmentationStats* pStats) :
13192  m_hAllocator(hAllocator),
13193  m_CurrFrameIndex(currFrameIndex),
13194  m_Flags(flags),
13195  m_pStats(pStats),
13196  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
13197 {
13198  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
13199 }
13200 
13201 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
13202 {
13203  for(size_t i = m_CustomPoolContexts.size(); i--; )
13204  {
13205  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
13206  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13207  vma_delete(m_hAllocator, pBlockVectorCtx);
13208  }
13209  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
13210  {
13211  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
13212  if(pBlockVectorCtx)
13213  {
13214  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13215  vma_delete(m_hAllocator, pBlockVectorCtx);
13216  }
13217  }
13218 }
13219 
13220 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
13221 {
13222  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
13223  {
13224  VmaPool pool = pPools[poolIndex];
13225  VMA_ASSERT(pool);
13226  // Pools with algorithm other than default are not defragmented.
13227  if(pool->m_BlockVector.GetAlgorithm() == 0)
13228  {
13229  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13230 
13231  for(size_t i = m_CustomPoolContexts.size(); i--; )
13232  {
13233  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
13234  {
13235  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13236  break;
13237  }
13238  }
13239 
13240  if(!pBlockVectorDefragCtx)
13241  {
13242  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13243  m_hAllocator,
13244  pool,
13245  &pool->m_BlockVector,
13246  m_CurrFrameIndex,
13247  m_Flags);
13248  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13249  }
13250 
13251  pBlockVectorDefragCtx->AddAll();
13252  }
13253  }
13254 }
13255 
13256 void VmaDefragmentationContext_T::AddAllocations(
13257  uint32_t allocationCount,
13258  VmaAllocation* pAllocations,
13259  VkBool32* pAllocationsChanged)
13260 {
13261  // Dispatch pAllocations among defragmentators. Create them when necessary.
13262  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
13263  {
13264  const VmaAllocation hAlloc = pAllocations[allocIndex];
13265  VMA_ASSERT(hAlloc);
13266  // DedicatedAlloc cannot be defragmented.
13267  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
13268  // Lost allocation cannot be defragmented.
13269  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
13270  {
13271  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13272 
13273  const VmaPool hAllocPool = hAlloc->GetPool();
13274  // This allocation belongs to custom pool.
13275  if(hAllocPool != VK_NULL_HANDLE)
13276  {
13277  // Pools with algorithm other than default are not defragmented.
13278  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
13279  {
13280  for(size_t i = m_CustomPoolContexts.size(); i--; )
13281  {
13282  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
13283  {
13284  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13285  break;
13286  }
13287  }
13288  if(!pBlockVectorDefragCtx)
13289  {
13290  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13291  m_hAllocator,
13292  hAllocPool,
13293  &hAllocPool->m_BlockVector,
13294  m_CurrFrameIndex,
13295  m_Flags);
13296  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13297  }
13298  }
13299  }
13300  // This allocation belongs to default pool.
13301  else
13302  {
13303  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
13304  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
13305  if(!pBlockVectorDefragCtx)
13306  {
13307  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13308  m_hAllocator,
13309  VMA_NULL, // hCustomPool
13310  m_hAllocator->m_pBlockVectors[memTypeIndex],
13311  m_CurrFrameIndex,
13312  m_Flags);
13313  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
13314  }
13315  }
13316 
13317  if(pBlockVectorDefragCtx)
13318  {
13319  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
13320  &pAllocationsChanged[allocIndex] : VMA_NULL;
13321  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
13322  }
13323  }
13324  }
13325 }
13326 
13327 VkResult VmaDefragmentationContext_T::Defragment(
13328  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
13329  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
13330  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats)
13331 {
13332  if(pStats)
13333  {
13334  memset(pStats, 0, sizeof(VmaDefragmentationStats));
13335  }
13336 
13337  if(commandBuffer == VK_NULL_HANDLE)
13338  {
13339  maxGpuBytesToMove = 0;
13340  maxGpuAllocationsToMove = 0;
13341  }
13342 
13343  VkResult res = VK_SUCCESS;
13344 
13345  // Process default pools.
13346  for(uint32_t memTypeIndex = 0;
13347  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
13348  ++memTypeIndex)
13349  {
13350  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
13351  if(pBlockVectorCtx)
13352  {
13353  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
13354  pBlockVectorCtx->GetBlockVector()->Defragment(
13355  pBlockVectorCtx,
13356  pStats,
13357  maxCpuBytesToMove, maxCpuAllocationsToMove,
13358  maxGpuBytesToMove, maxGpuAllocationsToMove,
13359  commandBuffer);
13360  if(pBlockVectorCtx->res != VK_SUCCESS)
13361  {
13362  res = pBlockVectorCtx->res;
13363  }
13364  }
13365  }
13366 
13367  // Process custom pools.
13368  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
13369  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
13370  ++customCtxIndex)
13371  {
13372  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
13373  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
13374  pBlockVectorCtx->GetBlockVector()->Defragment(
13375  pBlockVectorCtx,
13376  pStats,
13377  maxCpuBytesToMove, maxCpuAllocationsToMove,
13378  maxGpuBytesToMove, maxGpuAllocationsToMove,
13379  commandBuffer);
13380  if(pBlockVectorCtx->res != VK_SUCCESS)
13381  {
13382  res = pBlockVectorCtx->res;
13383  }
13384  }
13385 
13386  return res;
13387 }
13388 
13390 // VmaRecorder
13391 
13392 #if VMA_RECORDING_ENABLED
13393 
13394 VmaRecorder::VmaRecorder() :
13395  m_UseMutex(true),
13396  m_Flags(0),
13397  m_File(VMA_NULL),
13398  m_Freq(INT64_MAX),
13399  m_StartCounter(INT64_MAX)
13400 {
13401 }
13402 
13403 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
13404 {
13405  m_UseMutex = useMutex;
13406  m_Flags = settings.flags;
13407 
13408  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
13409  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
13410 
13411  // Open file for writing.
13412  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
13413  if(err != 0)
13414  {
13415  return VK_ERROR_INITIALIZATION_FAILED;
13416  }
13417 
13418  // Write header.
13419  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
13420  fprintf(m_File, "%s\n", "1,5");
13421 
13422  return VK_SUCCESS;
13423 }
13424 
13425 VmaRecorder::~VmaRecorder()
13426 {
13427  if(m_File != VMA_NULL)
13428  {
13429  fclose(m_File);
13430  }
13431 }
13432 
13433 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
13434 {
13435  CallParams callParams;
13436  GetBasicParams(callParams);
13437 
13438  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13439  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
13440  Flush();
13441 }
13442 
13443 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
13444 {
13445  CallParams callParams;
13446  GetBasicParams(callParams);
13447 
13448  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13449  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
13450  Flush();
13451 }
13452 
13453 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
13454 {
13455  CallParams callParams;
13456  GetBasicParams(callParams);
13457 
13458  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13459  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
13460  createInfo.memoryTypeIndex,
13461  createInfo.flags,
13462  createInfo.blockSize,
13463  (uint64_t)createInfo.minBlockCount,
13464  (uint64_t)createInfo.maxBlockCount,
13465  createInfo.frameInUseCount,
13466  pool);
13467  Flush();
13468 }
13469 
13470 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
13471 {
13472  CallParams callParams;
13473  GetBasicParams(callParams);
13474 
13475  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13476  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
13477  pool);
13478  Flush();
13479 }
13480 
13481 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
13482  const VkMemoryRequirements& vkMemReq,
13483  const VmaAllocationCreateInfo& createInfo,
13484  VmaAllocation allocation)
13485 {
13486  CallParams callParams;
13487  GetBasicParams(callParams);
13488 
13489  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13490  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13491  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13492  vkMemReq.size,
13493  vkMemReq.alignment,
13494  vkMemReq.memoryTypeBits,
13495  createInfo.flags,
13496  createInfo.usage,
13497  createInfo.requiredFlags,
13498  createInfo.preferredFlags,
13499  createInfo.memoryTypeBits,
13500  createInfo.pool,
13501  allocation,
13502  userDataStr.GetString());
13503  Flush();
13504 }
13505 
13506 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
13507  const VkMemoryRequirements& vkMemReq,
13508  const VmaAllocationCreateInfo& createInfo,
13509  uint64_t allocationCount,
13510  const VmaAllocation* pAllocations)
13511 {
13512  CallParams callParams;
13513  GetBasicParams(callParams);
13514 
13515  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13516  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13517  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
13518  vkMemReq.size,
13519  vkMemReq.alignment,
13520  vkMemReq.memoryTypeBits,
13521  createInfo.flags,
13522  createInfo.usage,
13523  createInfo.requiredFlags,
13524  createInfo.preferredFlags,
13525  createInfo.memoryTypeBits,
13526  createInfo.pool);
13527  PrintPointerList(allocationCount, pAllocations);
13528  fprintf(m_File, ",%s\n", userDataStr.GetString());
13529  Flush();
13530 }
13531 
13532 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
13533  const VkMemoryRequirements& vkMemReq,
13534  bool requiresDedicatedAllocation,
13535  bool prefersDedicatedAllocation,
13536  const VmaAllocationCreateInfo& createInfo,
13537  VmaAllocation allocation)
13538 {
13539  CallParams callParams;
13540  GetBasicParams(callParams);
13541 
13542  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13543  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13544  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13545  vkMemReq.size,
13546  vkMemReq.alignment,
13547  vkMemReq.memoryTypeBits,
13548  requiresDedicatedAllocation ? 1 : 0,
13549  prefersDedicatedAllocation ? 1 : 0,
13550  createInfo.flags,
13551  createInfo.usage,
13552  createInfo.requiredFlags,
13553  createInfo.preferredFlags,
13554  createInfo.memoryTypeBits,
13555  createInfo.pool,
13556  allocation,
13557  userDataStr.GetString());
13558  Flush();
13559 }
13560 
13561 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
13562  const VkMemoryRequirements& vkMemReq,
13563  bool requiresDedicatedAllocation,
13564  bool prefersDedicatedAllocation,
13565  const VmaAllocationCreateInfo& createInfo,
13566  VmaAllocation allocation)
13567 {
13568  CallParams callParams;
13569  GetBasicParams(callParams);
13570 
13571  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13572  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13573  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13574  vkMemReq.size,
13575  vkMemReq.alignment,
13576  vkMemReq.memoryTypeBits,
13577  requiresDedicatedAllocation ? 1 : 0,
13578  prefersDedicatedAllocation ? 1 : 0,
13579  createInfo.flags,
13580  createInfo.usage,
13581  createInfo.requiredFlags,
13582  createInfo.preferredFlags,
13583  createInfo.memoryTypeBits,
13584  createInfo.pool,
13585  allocation,
13586  userDataStr.GetString());
13587  Flush();
13588 }
13589 
13590 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
13591  VmaAllocation allocation)
13592 {
13593  CallParams callParams;
13594  GetBasicParams(callParams);
13595 
13596  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13597  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13598  allocation);
13599  Flush();
13600 }
13601 
13602 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
13603  uint64_t allocationCount,
13604  const VmaAllocation* pAllocations)
13605 {
13606  CallParams callParams;
13607  GetBasicParams(callParams);
13608 
13609  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13610  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
13611  PrintPointerList(allocationCount, pAllocations);
13612  fprintf(m_File, "\n");
13613  Flush();
13614 }
13615 
13616 void VmaRecorder::RecordResizeAllocation(
13617  uint32_t frameIndex,
13618  VmaAllocation allocation,
13619  VkDeviceSize newSize)
13620 {
13621  CallParams callParams;
13622  GetBasicParams(callParams);
13623 
13624  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13625  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
13626  allocation, newSize);
13627  Flush();
13628 }
13629 
13630 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
13631  VmaAllocation allocation,
13632  const void* pUserData)
13633 {
13634  CallParams callParams;
13635  GetBasicParams(callParams);
13636 
13637  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13638  UserDataString userDataStr(
13639  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
13640  pUserData);
13641  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13642  allocation,
13643  userDataStr.GetString());
13644  Flush();
13645 }
13646 
13647 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
13648  VmaAllocation allocation)
13649 {
13650  CallParams callParams;
13651  GetBasicParams(callParams);
13652 
13653  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13654  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13655  allocation);
13656  Flush();
13657 }
13658 
13659 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
13660  VmaAllocation allocation)
13661 {
13662  CallParams callParams;
13663  GetBasicParams(callParams);
13664 
13665  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13666  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13667  allocation);
13668  Flush();
13669 }
13670 
13671 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
13672  VmaAllocation allocation)
13673 {
13674  CallParams callParams;
13675  GetBasicParams(callParams);
13676 
13677  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13678  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13679  allocation);
13680  Flush();
13681 }
13682 
13683 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
13684  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13685 {
13686  CallParams callParams;
13687  GetBasicParams(callParams);
13688 
13689  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13690  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13691  allocation,
13692  offset,
13693  size);
13694  Flush();
13695 }
13696 
13697 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
13698  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13699 {
13700  CallParams callParams;
13701  GetBasicParams(callParams);
13702 
13703  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13704  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13705  allocation,
13706  offset,
13707  size);
13708  Flush();
13709 }
13710 
13711 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
13712  const VkBufferCreateInfo& bufCreateInfo,
13713  const VmaAllocationCreateInfo& allocCreateInfo,
13714  VmaAllocation allocation)
13715 {
13716  CallParams callParams;
13717  GetBasicParams(callParams);
13718 
13719  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13720  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
13721  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13722  bufCreateInfo.flags,
13723  bufCreateInfo.size,
13724  bufCreateInfo.usage,
13725  bufCreateInfo.sharingMode,
13726  allocCreateInfo.flags,
13727  allocCreateInfo.usage,
13728  allocCreateInfo.requiredFlags,
13729  allocCreateInfo.preferredFlags,
13730  allocCreateInfo.memoryTypeBits,
13731  allocCreateInfo.pool,
13732  allocation,
13733  userDataStr.GetString());
13734  Flush();
13735 }
13736 
13737 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
13738  const VkImageCreateInfo& imageCreateInfo,
13739  const VmaAllocationCreateInfo& allocCreateInfo,
13740  VmaAllocation allocation)
13741 {
13742  CallParams callParams;
13743  GetBasicParams(callParams);
13744 
13745  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13746  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
13747  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13748  imageCreateInfo.flags,
13749  imageCreateInfo.imageType,
13750  imageCreateInfo.format,
13751  imageCreateInfo.extent.width,
13752  imageCreateInfo.extent.height,
13753  imageCreateInfo.extent.depth,
13754  imageCreateInfo.mipLevels,
13755  imageCreateInfo.arrayLayers,
13756  imageCreateInfo.samples,
13757  imageCreateInfo.tiling,
13758  imageCreateInfo.usage,
13759  imageCreateInfo.sharingMode,
13760  imageCreateInfo.initialLayout,
13761  allocCreateInfo.flags,
13762  allocCreateInfo.usage,
13763  allocCreateInfo.requiredFlags,
13764  allocCreateInfo.preferredFlags,
13765  allocCreateInfo.memoryTypeBits,
13766  allocCreateInfo.pool,
13767  allocation,
13768  userDataStr.GetString());
13769  Flush();
13770 }
13771 
13772 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
13773  VmaAllocation allocation)
13774 {
13775  CallParams callParams;
13776  GetBasicParams(callParams);
13777 
13778  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13779  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
13780  allocation);
13781  Flush();
13782 }
13783 
13784 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
13785  VmaAllocation allocation)
13786 {
13787  CallParams callParams;
13788  GetBasicParams(callParams);
13789 
13790  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13791  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
13792  allocation);
13793  Flush();
13794 }
13795 
13796 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
13797  VmaAllocation allocation)
13798 {
13799  CallParams callParams;
13800  GetBasicParams(callParams);
13801 
13802  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13803  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13804  allocation);
13805  Flush();
13806 }
13807 
13808 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
13809  VmaAllocation allocation)
13810 {
13811  CallParams callParams;
13812  GetBasicParams(callParams);
13813 
13814  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13815  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
13816  allocation);
13817  Flush();
13818 }
13819 
13820 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
13821  VmaPool pool)
13822 {
13823  CallParams callParams;
13824  GetBasicParams(callParams);
13825 
13826  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13827  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
13828  pool);
13829  Flush();
13830 }
13831 
13832 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
13833  const VmaDefragmentationInfo2& info,
13835 {
13836  CallParams callParams;
13837  GetBasicParams(callParams);
13838 
13839  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13840  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
13841  info.flags);
13842  PrintPointerList(info.allocationCount, info.pAllocations);
13843  fprintf(m_File, ",");
13844  PrintPointerList(info.poolCount, info.pPools);
13845  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
13846  info.maxCpuBytesToMove,
13848  info.maxGpuBytesToMove,
13850  info.commandBuffer,
13851  ctx);
13852  Flush();
13853 }
13854 
13855 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
13857 {
13858  CallParams callParams;
13859  GetBasicParams(callParams);
13860 
13861  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13862  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
13863  ctx);
13864  Flush();
13865 }
13866 
13867 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
13868 {
13869  if(pUserData != VMA_NULL)
13870  {
13871  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
13872  {
13873  m_Str = (const char*)pUserData;
13874  }
13875  else
13876  {
13877  sprintf_s(m_PtrStr, "%p", pUserData);
13878  m_Str = m_PtrStr;
13879  }
13880  }
13881  else
13882  {
13883  m_Str = "";
13884  }
13885 }
13886 
13887 void VmaRecorder::WriteConfiguration(
13888  const VkPhysicalDeviceProperties& devProps,
13889  const VkPhysicalDeviceMemoryProperties& memProps,
13890  bool dedicatedAllocationExtensionEnabled)
13891 {
13892  fprintf(m_File, "Config,Begin\n");
13893 
13894  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
13895  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
13896  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
13897  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
13898  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
13899  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
13900 
13901  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
13902  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
13903  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
13904 
13905  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
13906  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
13907  {
13908  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
13909  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
13910  }
13911  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
13912  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
13913  {
13914  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
13915  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
13916  }
13917 
13918  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
13919 
13920  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
13921  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
13922  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
13923  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
13924  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
13925  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
13926  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
13927  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
13928  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
13929 
13930  fprintf(m_File, "Config,End\n");
13931 }
13932 
13933 void VmaRecorder::GetBasicParams(CallParams& outParams)
13934 {
13935  outParams.threadId = GetCurrentThreadId();
13936 
13937  LARGE_INTEGER counter;
13938  QueryPerformanceCounter(&counter);
13939  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
13940 }
13941 
13942 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
13943 {
13944  if(count)
13945  {
13946  fprintf(m_File, "%p", pItems[0]);
13947  for(uint64_t i = 1; i < count; ++i)
13948  {
13949  fprintf(m_File, " %p", pItems[i]);
13950  }
13951  }
13952 }
13953 
13954 void VmaRecorder::Flush()
13955 {
13956  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
13957  {
13958  fflush(m_File);
13959  }
13960 }
13961 
13962 #endif // #if VMA_RECORDING_ENABLED
13963 
13965 // VmaAllocator_T
13966 
13967 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
13968  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
13969  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
13970  m_hDevice(pCreateInfo->device),
13971  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
13972  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
13973  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
13974  m_PreferredLargeHeapBlockSize(0),
13975  m_PhysicalDevice(pCreateInfo->physicalDevice),
13976  m_CurrentFrameIndex(0),
13977  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
13978  m_NextPoolId(0)
13980  ,m_pRecorder(VMA_NULL)
13981 #endif
13982 {
13983  if(VMA_DEBUG_DETECT_CORRUPTION)
13984  {
13985  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
13986  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
13987  }
13988 
13989  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
13990 
13991 #if !(VMA_DEDICATED_ALLOCATION)
13993  {
13994  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
13995  }
13996 #endif
13997 
13998  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
13999  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
14000  memset(&m_MemProps, 0, sizeof(m_MemProps));
14001 
14002  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
14003  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
14004 
14005  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
14006  {
14007  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
14008  }
14009 
14010  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
14011  {
14012  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
14013  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
14014  }
14015 
14016  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
14017 
14018  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
14019  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
14020 
14021  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
14022  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
14023  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
14024  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
14025 
14026  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
14027  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
14028 
14029  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
14030  {
14031  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
14032  {
14033  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
14034  if(limit != VK_WHOLE_SIZE)
14035  {
14036  m_HeapSizeLimit[heapIndex] = limit;
14037  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
14038  {
14039  m_MemProps.memoryHeaps[heapIndex].size = limit;
14040  }
14041  }
14042  }
14043  }
14044 
14045  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14046  {
14047  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
14048 
14049  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
14050  this,
14051  memTypeIndex,
14052  preferredBlockSize,
14053  0,
14054  SIZE_MAX,
14055  GetBufferImageGranularity(),
14056  pCreateInfo->frameInUseCount,
14057  false, // isCustomPool
14058  false, // explicitBlockSize
14059  false); // linearAlgorithm
14060  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
14061  // becase minBlockCount is 0.
14062  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
14063 
14064  }
14065 }
14066 
14067 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
14068 {
14069  VkResult res = VK_SUCCESS;
14070 
14071  if(pCreateInfo->pRecordSettings != VMA_NULL &&
14072  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
14073  {
14074 #if VMA_RECORDING_ENABLED
14075  m_pRecorder = vma_new(this, VmaRecorder)();
14076  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
14077  if(res != VK_SUCCESS)
14078  {
14079  return res;
14080  }
14081  m_pRecorder->WriteConfiguration(
14082  m_PhysicalDeviceProperties,
14083  m_MemProps,
14084  m_UseKhrDedicatedAllocation);
14085  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
14086 #else
14087  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
14088  return VK_ERROR_FEATURE_NOT_PRESENT;
14089 #endif
14090  }
14091 
14092  return res;
14093 }
14094 
14095 VmaAllocator_T::~VmaAllocator_T()
14096 {
14097 #if VMA_RECORDING_ENABLED
14098  if(m_pRecorder != VMA_NULL)
14099  {
14100  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
14101  vma_delete(this, m_pRecorder);
14102  }
14103 #endif
14104 
14105  VMA_ASSERT(m_Pools.empty());
14106 
14107  for(size_t i = GetMemoryTypeCount(); i--; )
14108  {
14109  vma_delete(this, m_pDedicatedAllocations[i]);
14110  vma_delete(this, m_pBlockVectors[i]);
14111  }
14112 }
14113 
14114 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
14115 {
14116 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14117  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
14118  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
14119  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
14120  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
14121  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
14122  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
14123  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
14124  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
14125  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
14126  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
14127  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
14128  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
14129  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
14130  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
14131  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
14132  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
14133  m_VulkanFunctions.vkCmdCopyBuffer = &vkCmdCopyBuffer;
14134 #if VMA_DEDICATED_ALLOCATION
14135  if(m_UseKhrDedicatedAllocation)
14136  {
14137  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
14138  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
14139  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
14140  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
14141  }
14142 #endif // #if VMA_DEDICATED_ALLOCATION
14143 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14144 
14145 #define VMA_COPY_IF_NOT_NULL(funcName) \
14146  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
14147 
14148  if(pVulkanFunctions != VMA_NULL)
14149  {
14150  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
14151  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
14152  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
14153  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
14154  VMA_COPY_IF_NOT_NULL(vkMapMemory);
14155  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
14156  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
14157  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
14158  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
14159  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
14160  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
14161  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
14162  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
14163  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
14164  VMA_COPY_IF_NOT_NULL(vkCreateImage);
14165  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
14166  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
14167 #if VMA_DEDICATED_ALLOCATION
14168  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
14169  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
14170 #endif
14171  }
14172 
14173 #undef VMA_COPY_IF_NOT_NULL
14174 
14175  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
14176  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
14177  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
14178  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
14179  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
14180  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
14181  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
14182  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
14183  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
14184  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
14185  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
14186  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
14187  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
14188  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
14189  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
14190  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
14191  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
14192  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
14193  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
14194 #if VMA_DEDICATED_ALLOCATION
14195  if(m_UseKhrDedicatedAllocation)
14196  {
14197  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
14198  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
14199  }
14200 #endif
14201 }
14202 
14203 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
14204 {
14205  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14206  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
14207  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
14208  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
14209 }
14210 
14211 VkResult VmaAllocator_T::AllocateMemoryOfType(
14212  VkDeviceSize size,
14213  VkDeviceSize alignment,
14214  bool dedicatedAllocation,
14215  VkBuffer dedicatedBuffer,
14216  VkImage dedicatedImage,
14217  const VmaAllocationCreateInfo& createInfo,
14218  uint32_t memTypeIndex,
14219  VmaSuballocationType suballocType,
14220  size_t allocationCount,
14221  VmaAllocation* pAllocations)
14222 {
14223  VMA_ASSERT(pAllocations != VMA_NULL);
14224  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, vkMemReq.size);
14225 
14226  VmaAllocationCreateInfo finalCreateInfo = createInfo;
14227 
14228  // If memory type is not HOST_VISIBLE, disable MAPPED.
14229  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14230  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
14231  {
14232  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
14233  }
14234 
14235  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
14236  VMA_ASSERT(blockVector);
14237 
14238  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
14239  bool preferDedicatedMemory =
14240  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
14241  dedicatedAllocation ||
14242  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
14243  size > preferredBlockSize / 2;
14244 
14245  if(preferDedicatedMemory &&
14246  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
14247  finalCreateInfo.pool == VK_NULL_HANDLE)
14248  {
14250  }
14251 
14252  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
14253  {
14254  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14255  {
14256  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14257  }
14258  else
14259  {
14260  return AllocateDedicatedMemory(
14261  size,
14262  suballocType,
14263  memTypeIndex,
14264  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14265  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14266  finalCreateInfo.pUserData,
14267  dedicatedBuffer,
14268  dedicatedImage,
14269  allocationCount,
14270  pAllocations);
14271  }
14272  }
14273  else
14274  {
14275  VkResult res = blockVector->Allocate(
14276  VK_NULL_HANDLE, // hCurrentPool
14277  m_CurrentFrameIndex.load(),
14278  size,
14279  alignment,
14280  finalCreateInfo,
14281  suballocType,
14282  allocationCount,
14283  pAllocations);
14284  if(res == VK_SUCCESS)
14285  {
14286  return res;
14287  }
14288 
14289  // 5. Try dedicated memory.
14290  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14291  {
14292  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14293  }
14294  else
14295  {
14296  res = AllocateDedicatedMemory(
14297  size,
14298  suballocType,
14299  memTypeIndex,
14300  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14301  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14302  finalCreateInfo.pUserData,
14303  dedicatedBuffer,
14304  dedicatedImage,
14305  allocationCount,
14306  pAllocations);
14307  if(res == VK_SUCCESS)
14308  {
14309  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
14310  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
14311  return VK_SUCCESS;
14312  }
14313  else
14314  {
14315  // Everything failed: Return error code.
14316  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14317  return res;
14318  }
14319  }
14320  }
14321 }
14322 
14323 VkResult VmaAllocator_T::AllocateDedicatedMemory(
14324  VkDeviceSize size,
14325  VmaSuballocationType suballocType,
14326  uint32_t memTypeIndex,
14327  bool map,
14328  bool isUserDataString,
14329  void* pUserData,
14330  VkBuffer dedicatedBuffer,
14331  VkImage dedicatedImage,
14332  size_t allocationCount,
14333  VmaAllocation* pAllocations)
14334 {
14335  VMA_ASSERT(allocationCount > 0 && pAllocations);
14336 
14337  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
14338  allocInfo.memoryTypeIndex = memTypeIndex;
14339  allocInfo.allocationSize = size;
14340 
14341 #if VMA_DEDICATED_ALLOCATION
14342  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
14343  if(m_UseKhrDedicatedAllocation)
14344  {
14345  if(dedicatedBuffer != VK_NULL_HANDLE)
14346  {
14347  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
14348  dedicatedAllocInfo.buffer = dedicatedBuffer;
14349  allocInfo.pNext = &dedicatedAllocInfo;
14350  }
14351  else if(dedicatedImage != VK_NULL_HANDLE)
14352  {
14353  dedicatedAllocInfo.image = dedicatedImage;
14354  allocInfo.pNext = &dedicatedAllocInfo;
14355  }
14356  }
14357 #endif // #if VMA_DEDICATED_ALLOCATION
14358 
14359  size_t allocIndex;
14360  VkResult res;
14361  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14362  {
14363  res = AllocateDedicatedMemoryPage(
14364  size,
14365  suballocType,
14366  memTypeIndex,
14367  allocInfo,
14368  map,
14369  isUserDataString,
14370  pUserData,
14371  pAllocations + allocIndex);
14372  if(res != VK_SUCCESS)
14373  {
14374  break;
14375  }
14376  }
14377 
14378  if(res == VK_SUCCESS)
14379  {
14380  // Register them in m_pDedicatedAllocations.
14381  {
14382  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
14383  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
14384  VMA_ASSERT(pDedicatedAllocations);
14385  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14386  {
14387  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
14388  }
14389  }
14390 
14391  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
14392  }
14393  else
14394  {
14395  // Free all already created allocations.
14396  while(allocIndex--)
14397  {
14398  VmaAllocation currAlloc = pAllocations[allocIndex];
14399  VkDeviceMemory hMemory = currAlloc->GetMemory();
14400 
14401  /*
14402  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
14403  before vkFreeMemory.
14404 
14405  if(currAlloc->GetMappedData() != VMA_NULL)
14406  {
14407  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
14408  }
14409  */
14410 
14411  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
14412 
14413  currAlloc->SetUserData(this, VMA_NULL);
14414  vma_delete(this, currAlloc);
14415  }
14416 
14417  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
14418  }
14419 
14420  return res;
14421 }
14422 
14423 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
14424  VkDeviceSize size,
14425  VmaSuballocationType suballocType,
14426  uint32_t memTypeIndex,
14427  const VkMemoryAllocateInfo& allocInfo,
14428  bool map,
14429  bool isUserDataString,
14430  void* pUserData,
14431  VmaAllocation* pAllocation)
14432 {
14433  VkDeviceMemory hMemory = VK_NULL_HANDLE;
14434  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
14435  if(res < 0)
14436  {
14437  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14438  return res;
14439  }
14440 
14441  void* pMappedData = VMA_NULL;
14442  if(map)
14443  {
14444  res = (*m_VulkanFunctions.vkMapMemory)(
14445  m_hDevice,
14446  hMemory,
14447  0,
14448  VK_WHOLE_SIZE,
14449  0,
14450  &pMappedData);
14451  if(res < 0)
14452  {
14453  VMA_DEBUG_LOG(" vkMapMemory FAILED");
14454  FreeVulkanMemory(memTypeIndex, size, hMemory);
14455  return res;
14456  }
14457  }
14458 
14459  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
14460  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
14461  (*pAllocation)->SetUserData(this, pUserData);
14462  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14463  {
14464  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
14465  }
14466 
14467  return VK_SUCCESS;
14468 }
14469 
14470 void VmaAllocator_T::GetBufferMemoryRequirements(
14471  VkBuffer hBuffer,
14472  VkMemoryRequirements& memReq,
14473  bool& requiresDedicatedAllocation,
14474  bool& prefersDedicatedAllocation) const
14475 {
14476 #if VMA_DEDICATED_ALLOCATION
14477  if(m_UseKhrDedicatedAllocation)
14478  {
14479  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
14480  memReqInfo.buffer = hBuffer;
14481 
14482  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14483 
14484  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14485  memReq2.pNext = &memDedicatedReq;
14486 
14487  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14488 
14489  memReq = memReq2.memoryRequirements;
14490  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14491  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14492  }
14493  else
14494 #endif // #if VMA_DEDICATED_ALLOCATION
14495  {
14496  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
14497  requiresDedicatedAllocation = false;
14498  prefersDedicatedAllocation = false;
14499  }
14500 }
14501 
14502 void VmaAllocator_T::GetImageMemoryRequirements(
14503  VkImage hImage,
14504  VkMemoryRequirements& memReq,
14505  bool& requiresDedicatedAllocation,
14506  bool& prefersDedicatedAllocation) const
14507 {
14508 #if VMA_DEDICATED_ALLOCATION
14509  if(m_UseKhrDedicatedAllocation)
14510  {
14511  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
14512  memReqInfo.image = hImage;
14513 
14514  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14515 
14516  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14517  memReq2.pNext = &memDedicatedReq;
14518 
14519  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14520 
14521  memReq = memReq2.memoryRequirements;
14522  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14523  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14524  }
14525  else
14526 #endif // #if VMA_DEDICATED_ALLOCATION
14527  {
14528  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
14529  requiresDedicatedAllocation = false;
14530  prefersDedicatedAllocation = false;
14531  }
14532 }
14533 
14534 VkResult VmaAllocator_T::AllocateMemory(
14535  const VkMemoryRequirements& vkMemReq,
14536  bool requiresDedicatedAllocation,
14537  bool prefersDedicatedAllocation,
14538  VkBuffer dedicatedBuffer,
14539  VkImage dedicatedImage,
14540  const VmaAllocationCreateInfo& createInfo,
14541  VmaSuballocationType suballocType,
14542  size_t allocationCount,
14543  VmaAllocation* pAllocations)
14544 {
14545  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
14546 
14547  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
14548 
14549  if(vkMemReq.size == 0)
14550  {
14551  return VK_ERROR_VALIDATION_FAILED_EXT;
14552  }
14553  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
14554  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14555  {
14556  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
14557  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14558  }
14559  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14561  {
14562  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
14563  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14564  }
14565  if(requiresDedicatedAllocation)
14566  {
14567  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14568  {
14569  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
14570  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14571  }
14572  if(createInfo.pool != VK_NULL_HANDLE)
14573  {
14574  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
14575  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14576  }
14577  }
14578  if((createInfo.pool != VK_NULL_HANDLE) &&
14579  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
14580  {
14581  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
14582  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14583  }
14584 
14585  if(createInfo.pool != VK_NULL_HANDLE)
14586  {
14587  const VkDeviceSize alignmentForPool = VMA_MAX(
14588  vkMemReq.alignment,
14589  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
14590  return createInfo.pool->m_BlockVector.Allocate(
14591  createInfo.pool,
14592  m_CurrentFrameIndex.load(),
14593  vkMemReq.size,
14594  alignmentForPool,
14595  createInfo,
14596  suballocType,
14597  allocationCount,
14598  pAllocations);
14599  }
14600  else
14601  {
14602  // Bit mask of memory Vulkan types acceptable for this allocation.
14603  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
14604  uint32_t memTypeIndex = UINT32_MAX;
14605  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
14606  if(res == VK_SUCCESS)
14607  {
14608  VkDeviceSize alignmentForMemType = VMA_MAX(
14609  vkMemReq.alignment,
14610  GetMemoryTypeMinAlignment(memTypeIndex));
14611 
14612  res = AllocateMemoryOfType(
14613  vkMemReq.size,
14614  alignmentForMemType,
14615  requiresDedicatedAllocation || prefersDedicatedAllocation,
14616  dedicatedBuffer,
14617  dedicatedImage,
14618  createInfo,
14619  memTypeIndex,
14620  suballocType,
14621  allocationCount,
14622  pAllocations);
14623  // Succeeded on first try.
14624  if(res == VK_SUCCESS)
14625  {
14626  return res;
14627  }
14628  // Allocation from this memory type failed. Try other compatible memory types.
14629  else
14630  {
14631  for(;;)
14632  {
14633  // Remove old memTypeIndex from list of possibilities.
14634  memoryTypeBits &= ~(1u << memTypeIndex);
14635  // Find alternative memTypeIndex.
14636  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
14637  if(res == VK_SUCCESS)
14638  {
14639  alignmentForMemType = VMA_MAX(
14640  vkMemReq.alignment,
14641  GetMemoryTypeMinAlignment(memTypeIndex));
14642 
14643  res = AllocateMemoryOfType(
14644  vkMemReq.size,
14645  alignmentForMemType,
14646  requiresDedicatedAllocation || prefersDedicatedAllocation,
14647  dedicatedBuffer,
14648  dedicatedImage,
14649  createInfo,
14650  memTypeIndex,
14651  suballocType,
14652  allocationCount,
14653  pAllocations);
14654  // Allocation from this alternative memory type succeeded.
14655  if(res == VK_SUCCESS)
14656  {
14657  return res;
14658  }
14659  // else: Allocation from this memory type failed. Try next one - next loop iteration.
14660  }
14661  // No other matching memory type index could be found.
14662  else
14663  {
14664  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
14665  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14666  }
14667  }
14668  }
14669  }
14670  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
14671  else
14672  return res;
14673  }
14674 }
14675 
14676 void VmaAllocator_T::FreeMemory(
14677  size_t allocationCount,
14678  const VmaAllocation* pAllocations)
14679 {
14680  VMA_ASSERT(pAllocations);
14681 
14682  for(size_t allocIndex = allocationCount; allocIndex--; )
14683  {
14684  VmaAllocation allocation = pAllocations[allocIndex];
14685 
14686  if(allocation != VK_NULL_HANDLE)
14687  {
14688  if(TouchAllocation(allocation))
14689  {
14690  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14691  {
14692  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
14693  }
14694 
14695  switch(allocation->GetType())
14696  {
14697  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14698  {
14699  VmaBlockVector* pBlockVector = VMA_NULL;
14700  VmaPool hPool = allocation->GetPool();
14701  if(hPool != VK_NULL_HANDLE)
14702  {
14703  pBlockVector = &hPool->m_BlockVector;
14704  }
14705  else
14706  {
14707  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
14708  pBlockVector = m_pBlockVectors[memTypeIndex];
14709  }
14710  pBlockVector->Free(allocation);
14711  }
14712  break;
14713  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14714  FreeDedicatedMemory(allocation);
14715  break;
14716  default:
14717  VMA_ASSERT(0);
14718  }
14719  }
14720 
14721  allocation->SetUserData(this, VMA_NULL);
14722  vma_delete(this, allocation);
14723  }
14724  }
14725 }
14726 
14727 VkResult VmaAllocator_T::ResizeAllocation(
14728  const VmaAllocation alloc,
14729  VkDeviceSize newSize)
14730 {
14731  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
14732  {
14733  return VK_ERROR_VALIDATION_FAILED_EXT;
14734  }
14735  if(newSize == alloc->GetSize())
14736  {
14737  return VK_SUCCESS;
14738  }
14739 
14740  switch(alloc->GetType())
14741  {
14742  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14743  return VK_ERROR_FEATURE_NOT_PRESENT;
14744  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14745  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
14746  {
14747  alloc->ChangeSize(newSize);
14748  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
14749  return VK_SUCCESS;
14750  }
14751  else
14752  {
14753  return VK_ERROR_OUT_OF_POOL_MEMORY;
14754  }
14755  default:
14756  VMA_ASSERT(0);
14757  return VK_ERROR_VALIDATION_FAILED_EXT;
14758  }
14759 }
14760 
14761 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
14762 {
14763  // Initialize.
14764  InitStatInfo(pStats->total);
14765  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
14766  InitStatInfo(pStats->memoryType[i]);
14767  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
14768  InitStatInfo(pStats->memoryHeap[i]);
14769 
14770  // Process default pools.
14771  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14772  {
14773  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
14774  VMA_ASSERT(pBlockVector);
14775  pBlockVector->AddStats(pStats);
14776  }
14777 
14778  // Process custom pools.
14779  {
14780  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
14781  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
14782  {
14783  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
14784  }
14785  }
14786 
14787  // Process dedicated allocations.
14788  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14789  {
14790  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14791  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
14792  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
14793  VMA_ASSERT(pDedicatedAllocVector);
14794  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
14795  {
14796  VmaStatInfo allocationStatInfo;
14797  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
14798  VmaAddStatInfo(pStats->total, allocationStatInfo);
14799  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
14800  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
14801  }
14802  }
14803 
14804  // Postprocess.
14805  VmaPostprocessCalcStatInfo(pStats->total);
14806  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
14807  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
14808  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
14809  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
14810 }
14811 
14812 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
14813 
14814 VkResult VmaAllocator_T::DefragmentationBegin(
14815  const VmaDefragmentationInfo2& info,
14816  VmaDefragmentationStats* pStats,
14817  VmaDefragmentationContext* pContext)
14818 {
14819  if(info.pAllocationsChanged != VMA_NULL)
14820  {
14821  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
14822  }
14823 
14824  *pContext = vma_new(this, VmaDefragmentationContext_T)(
14825  this, m_CurrentFrameIndex.load(), info.flags, pStats);
14826 
14827  (*pContext)->AddPools(info.poolCount, info.pPools);
14828  (*pContext)->AddAllocations(
14830 
14831  VkResult res = (*pContext)->Defragment(
14834  info.commandBuffer, pStats);
14835 
14836  if(res != VK_NOT_READY)
14837  {
14838  vma_delete(this, *pContext);
14839  *pContext = VMA_NULL;
14840  }
14841 
14842  return res;
14843 }
14844 
14845 VkResult VmaAllocator_T::DefragmentationEnd(
14846  VmaDefragmentationContext context)
14847 {
14848  vma_delete(this, context);
14849  return VK_SUCCESS;
14850 }
14851 
14852 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
14853 {
14854  if(hAllocation->CanBecomeLost())
14855  {
14856  /*
14857  Warning: This is a carefully designed algorithm.
14858  Do not modify unless you really know what you're doing :)
14859  */
14860  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14861  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14862  for(;;)
14863  {
14864  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
14865  {
14866  pAllocationInfo->memoryType = UINT32_MAX;
14867  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
14868  pAllocationInfo->offset = 0;
14869  pAllocationInfo->size = hAllocation->GetSize();
14870  pAllocationInfo->pMappedData = VMA_NULL;
14871  pAllocationInfo->pUserData = hAllocation->GetUserData();
14872  return;
14873  }
14874  else if(localLastUseFrameIndex == localCurrFrameIndex)
14875  {
14876  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
14877  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
14878  pAllocationInfo->offset = hAllocation->GetOffset();
14879  pAllocationInfo->size = hAllocation->GetSize();
14880  pAllocationInfo->pMappedData = VMA_NULL;
14881  pAllocationInfo->pUserData = hAllocation->GetUserData();
14882  return;
14883  }
14884  else // Last use time earlier than current time.
14885  {
14886  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14887  {
14888  localLastUseFrameIndex = localCurrFrameIndex;
14889  }
14890  }
14891  }
14892  }
14893  else
14894  {
14895 #if VMA_STATS_STRING_ENABLED
14896  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14897  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14898  for(;;)
14899  {
14900  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
14901  if(localLastUseFrameIndex == localCurrFrameIndex)
14902  {
14903  break;
14904  }
14905  else // Last use time earlier than current time.
14906  {
14907  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14908  {
14909  localLastUseFrameIndex = localCurrFrameIndex;
14910  }
14911  }
14912  }
14913 #endif
14914 
14915  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
14916  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
14917  pAllocationInfo->offset = hAllocation->GetOffset();
14918  pAllocationInfo->size = hAllocation->GetSize();
14919  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
14920  pAllocationInfo->pUserData = hAllocation->GetUserData();
14921  }
14922 }
14923 
14924 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
14925 {
14926  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
14927  if(hAllocation->CanBecomeLost())
14928  {
14929  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14930  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14931  for(;;)
14932  {
14933  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
14934  {
14935  return false;
14936  }
14937  else if(localLastUseFrameIndex == localCurrFrameIndex)
14938  {
14939  return true;
14940  }
14941  else // Last use time earlier than current time.
14942  {
14943  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14944  {
14945  localLastUseFrameIndex = localCurrFrameIndex;
14946  }
14947  }
14948  }
14949  }
14950  else
14951  {
14952 #if VMA_STATS_STRING_ENABLED
14953  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14954  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14955  for(;;)
14956  {
14957  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
14958  if(localLastUseFrameIndex == localCurrFrameIndex)
14959  {
14960  break;
14961  }
14962  else // Last use time earlier than current time.
14963  {
14964  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14965  {
14966  localLastUseFrameIndex = localCurrFrameIndex;
14967  }
14968  }
14969  }
14970 #endif
14971 
14972  return true;
14973  }
14974 }
14975 
14976 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
14977 {
14978  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
14979 
14980  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
14981 
14982  if(newCreateInfo.maxBlockCount == 0)
14983  {
14984  newCreateInfo.maxBlockCount = SIZE_MAX;
14985  }
14986  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
14987  {
14988  return VK_ERROR_INITIALIZATION_FAILED;
14989  }
14990 
14991  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
14992 
14993  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
14994 
14995  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
14996  if(res != VK_SUCCESS)
14997  {
14998  vma_delete(this, *pPool);
14999  *pPool = VMA_NULL;
15000  return res;
15001  }
15002 
15003  // Add to m_Pools.
15004  {
15005  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15006  (*pPool)->SetId(m_NextPoolId++);
15007  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
15008  }
15009 
15010  return VK_SUCCESS;
15011 }
15012 
15013 void VmaAllocator_T::DestroyPool(VmaPool pool)
15014 {
15015  // Remove from m_Pools.
15016  {
15017  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15018  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
15019  VMA_ASSERT(success && "Pool not found in Allocator.");
15020  }
15021 
15022  vma_delete(this, pool);
15023 }
15024 
15025 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
15026 {
15027  pool->m_BlockVector.GetPoolStats(pPoolStats);
15028 }
15029 
15030 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
15031 {
15032  m_CurrentFrameIndex.store(frameIndex);
15033 }
15034 
15035 void VmaAllocator_T::MakePoolAllocationsLost(
15036  VmaPool hPool,
15037  size_t* pLostAllocationCount)
15038 {
15039  hPool->m_BlockVector.MakePoolAllocationsLost(
15040  m_CurrentFrameIndex.load(),
15041  pLostAllocationCount);
15042 }
15043 
15044 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
15045 {
15046  return hPool->m_BlockVector.CheckCorruption();
15047 }
15048 
15049 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
15050 {
15051  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
15052 
15053  // Process default pools.
15054  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15055  {
15056  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
15057  {
15058  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
15059  VMA_ASSERT(pBlockVector);
15060  VkResult localRes = pBlockVector->CheckCorruption();
15061  switch(localRes)
15062  {
15063  case VK_ERROR_FEATURE_NOT_PRESENT:
15064  break;
15065  case VK_SUCCESS:
15066  finalRes = VK_SUCCESS;
15067  break;
15068  default:
15069  return localRes;
15070  }
15071  }
15072  }
15073 
15074  // Process custom pools.
15075  {
15076  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15077  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
15078  {
15079  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
15080  {
15081  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
15082  switch(localRes)
15083  {
15084  case VK_ERROR_FEATURE_NOT_PRESENT:
15085  break;
15086  case VK_SUCCESS:
15087  finalRes = VK_SUCCESS;
15088  break;
15089  default:
15090  return localRes;
15091  }
15092  }
15093  }
15094  }
15095 
15096  return finalRes;
15097 }
15098 
15099 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
15100 {
15101  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
15102  (*pAllocation)->InitLost();
15103 }
15104 
15105 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
15106 {
15107  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
15108 
15109  VkResult res;
15110  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
15111  {
15112  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
15113  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
15114  {
15115  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15116  if(res == VK_SUCCESS)
15117  {
15118  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
15119  }
15120  }
15121  else
15122  {
15123  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
15124  }
15125  }
15126  else
15127  {
15128  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15129  }
15130 
15131  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
15132  {
15133  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
15134  }
15135 
15136  return res;
15137 }
15138 
15139 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
15140 {
15141  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
15142  {
15143  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
15144  }
15145 
15146  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
15147 
15148  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
15149  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
15150  {
15151  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
15152  m_HeapSizeLimit[heapIndex] += size;
15153  }
15154 }
15155 
15156 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
15157 {
15158  if(hAllocation->CanBecomeLost())
15159  {
15160  return VK_ERROR_MEMORY_MAP_FAILED;
15161  }
15162 
15163  switch(hAllocation->GetType())
15164  {
15165  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15166  {
15167  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15168  char *pBytes = VMA_NULL;
15169  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
15170  if(res == VK_SUCCESS)
15171  {
15172  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
15173  hAllocation->BlockAllocMap();
15174  }
15175  return res;
15176  }
15177  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15178  return hAllocation->DedicatedAllocMap(this, ppData);
15179  default:
15180  VMA_ASSERT(0);
15181  return VK_ERROR_MEMORY_MAP_FAILED;
15182  }
15183 }
15184 
15185 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
15186 {
15187  switch(hAllocation->GetType())
15188  {
15189  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15190  {
15191  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15192  hAllocation->BlockAllocUnmap();
15193  pBlock->Unmap(this, 1);
15194  }
15195  break;
15196  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15197  hAllocation->DedicatedAllocUnmap(this);
15198  break;
15199  default:
15200  VMA_ASSERT(0);
15201  }
15202 }
15203 
15204 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
15205 {
15206  VkResult res = VK_SUCCESS;
15207  switch(hAllocation->GetType())
15208  {
15209  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15210  res = GetVulkanFunctions().vkBindBufferMemory(
15211  m_hDevice,
15212  hBuffer,
15213  hAllocation->GetMemory(),
15214  0); //memoryOffset
15215  break;
15216  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15217  {
15218  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
15219  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
15220  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
15221  break;
15222  }
15223  default:
15224  VMA_ASSERT(0);
15225  }
15226  return res;
15227 }
15228 
15229 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
15230 {
15231  VkResult res = VK_SUCCESS;
15232  switch(hAllocation->GetType())
15233  {
15234  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15235  res = GetVulkanFunctions().vkBindImageMemory(
15236  m_hDevice,
15237  hImage,
15238  hAllocation->GetMemory(),
15239  0); //memoryOffset
15240  break;
15241  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15242  {
15243  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
15244  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
15245  res = pBlock->BindImageMemory(this, hAllocation, hImage);
15246  break;
15247  }
15248  default:
15249  VMA_ASSERT(0);
15250  }
15251  return res;
15252 }
15253 
15254 void VmaAllocator_T::FlushOrInvalidateAllocation(
15255  VmaAllocation hAllocation,
15256  VkDeviceSize offset, VkDeviceSize size,
15257  VMA_CACHE_OPERATION op)
15258 {
15259  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
15260  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
15261  {
15262  const VkDeviceSize allocationSize = hAllocation->GetSize();
15263  VMA_ASSERT(offset <= allocationSize);
15264 
15265  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
15266 
15267  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
15268  memRange.memory = hAllocation->GetMemory();
15269 
15270  switch(hAllocation->GetType())
15271  {
15272  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15273  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15274  if(size == VK_WHOLE_SIZE)
15275  {
15276  memRange.size = allocationSize - memRange.offset;
15277  }
15278  else
15279  {
15280  VMA_ASSERT(offset + size <= allocationSize);
15281  memRange.size = VMA_MIN(
15282  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
15283  allocationSize - memRange.offset);
15284  }
15285  break;
15286 
15287  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15288  {
15289  // 1. Still within this allocation.
15290  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15291  if(size == VK_WHOLE_SIZE)
15292  {
15293  size = allocationSize - offset;
15294  }
15295  else
15296  {
15297  VMA_ASSERT(offset + size <= allocationSize);
15298  }
15299  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
15300 
15301  // 2. Adjust to whole block.
15302  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
15303  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
15304  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
15305  memRange.offset += allocationOffset;
15306  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
15307 
15308  break;
15309  }
15310 
15311  default:
15312  VMA_ASSERT(0);
15313  }
15314 
15315  switch(op)
15316  {
15317  case VMA_CACHE_FLUSH:
15318  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
15319  break;
15320  case VMA_CACHE_INVALIDATE:
15321  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
15322  break;
15323  default:
15324  VMA_ASSERT(0);
15325  }
15326  }
15327  // else: Just ignore this call.
15328 }
15329 
15330 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
15331 {
15332  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
15333 
15334  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
15335  {
15336  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15337  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
15338  VMA_ASSERT(pDedicatedAllocations);
15339  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
15340  VMA_ASSERT(success);
15341  }
15342 
15343  VkDeviceMemory hMemory = allocation->GetMemory();
15344 
15345  /*
15346  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
15347  before vkFreeMemory.
15348 
15349  if(allocation->GetMappedData() != VMA_NULL)
15350  {
15351  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
15352  }
15353  */
15354 
15355  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
15356 
15357  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
15358 }
15359 
15360 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
15361 {
15362  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
15363  !hAllocation->CanBecomeLost() &&
15364  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15365  {
15366  void* pData = VMA_NULL;
15367  VkResult res = Map(hAllocation, &pData);
15368  if(res == VK_SUCCESS)
15369  {
15370  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
15371  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
15372  Unmap(hAllocation);
15373  }
15374  else
15375  {
15376  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
15377  }
15378  }
15379 }
15380 
15381 #if VMA_STATS_STRING_ENABLED
15382 
15383 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
15384 {
15385  bool dedicatedAllocationsStarted = false;
15386  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15387  {
15388  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15389  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
15390  VMA_ASSERT(pDedicatedAllocVector);
15391  if(pDedicatedAllocVector->empty() == false)
15392  {
15393  if(dedicatedAllocationsStarted == false)
15394  {
15395  dedicatedAllocationsStarted = true;
15396  json.WriteString("DedicatedAllocations");
15397  json.BeginObject();
15398  }
15399 
15400  json.BeginString("Type ");
15401  json.ContinueString(memTypeIndex);
15402  json.EndString();
15403 
15404  json.BeginArray();
15405 
15406  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
15407  {
15408  json.BeginObject(true);
15409  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
15410  hAlloc->PrintParameters(json);
15411  json.EndObject();
15412  }
15413 
15414  json.EndArray();
15415  }
15416  }
15417  if(dedicatedAllocationsStarted)
15418  {
15419  json.EndObject();
15420  }
15421 
15422  {
15423  bool allocationsStarted = false;
15424  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15425  {
15426  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
15427  {
15428  if(allocationsStarted == false)
15429  {
15430  allocationsStarted = true;
15431  json.WriteString("DefaultPools");
15432  json.BeginObject();
15433  }
15434 
15435  json.BeginString("Type ");
15436  json.ContinueString(memTypeIndex);
15437  json.EndString();
15438 
15439  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
15440  }
15441  }
15442  if(allocationsStarted)
15443  {
15444  json.EndObject();
15445  }
15446  }
15447 
15448  // Custom pools
15449  {
15450  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15451  const size_t poolCount = m_Pools.size();
15452  if(poolCount > 0)
15453  {
15454  json.WriteString("Pools");
15455  json.BeginObject();
15456  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
15457  {
15458  json.BeginString();
15459  json.ContinueString(m_Pools[poolIndex]->GetId());
15460  json.EndString();
15461 
15462  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
15463  }
15464  json.EndObject();
15465  }
15466  }
15467 }
15468 
15469 #endif // #if VMA_STATS_STRING_ENABLED
15470 
15472 // Public interface
15473 
15474 VkResult vmaCreateAllocator(
15475  const VmaAllocatorCreateInfo* pCreateInfo,
15476  VmaAllocator* pAllocator)
15477 {
15478  VMA_ASSERT(pCreateInfo && pAllocator);
15479  VMA_DEBUG_LOG("vmaCreateAllocator");
15480  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
15481  return (*pAllocator)->Init(pCreateInfo);
15482 }
15483 
15484 void vmaDestroyAllocator(
15485  VmaAllocator allocator)
15486 {
15487  if(allocator != VK_NULL_HANDLE)
15488  {
15489  VMA_DEBUG_LOG("vmaDestroyAllocator");
15490  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
15491  vma_delete(&allocationCallbacks, allocator);
15492  }
15493 }
15494 
15496  VmaAllocator allocator,
15497  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
15498 {
15499  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
15500  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
15501 }
15502 
15504  VmaAllocator allocator,
15505  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
15506 {
15507  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
15508  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
15509 }
15510 
15512  VmaAllocator allocator,
15513  uint32_t memoryTypeIndex,
15514  VkMemoryPropertyFlags* pFlags)
15515 {
15516  VMA_ASSERT(allocator && pFlags);
15517  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
15518  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
15519 }
15520 
15522  VmaAllocator allocator,
15523  uint32_t frameIndex)
15524 {
15525  VMA_ASSERT(allocator);
15526  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
15527 
15528  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15529 
15530  allocator->SetCurrentFrameIndex(frameIndex);
15531 }
15532 
15533 void vmaCalculateStats(
15534  VmaAllocator allocator,
15535  VmaStats* pStats)
15536 {
15537  VMA_ASSERT(allocator && pStats);
15538  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15539  allocator->CalculateStats(pStats);
15540 }
15541 
15542 #if VMA_STATS_STRING_ENABLED
15543 
15544 void vmaBuildStatsString(
15545  VmaAllocator allocator,
15546  char** ppStatsString,
15547  VkBool32 detailedMap)
15548 {
15549  VMA_ASSERT(allocator && ppStatsString);
15550  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15551 
15552  VmaStringBuilder sb(allocator);
15553  {
15554  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
15555  json.BeginObject();
15556 
15557  VmaStats stats;
15558  allocator->CalculateStats(&stats);
15559 
15560  json.WriteString("Total");
15561  VmaPrintStatInfo(json, stats.total);
15562 
15563  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
15564  {
15565  json.BeginString("Heap ");
15566  json.ContinueString(heapIndex);
15567  json.EndString();
15568  json.BeginObject();
15569 
15570  json.WriteString("Size");
15571  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
15572 
15573  json.WriteString("Flags");
15574  json.BeginArray(true);
15575  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
15576  {
15577  json.WriteString("DEVICE_LOCAL");
15578  }
15579  json.EndArray();
15580 
15581  if(stats.memoryHeap[heapIndex].blockCount > 0)
15582  {
15583  json.WriteString("Stats");
15584  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
15585  }
15586 
15587  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
15588  {
15589  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
15590  {
15591  json.BeginString("Type ");
15592  json.ContinueString(typeIndex);
15593  json.EndString();
15594 
15595  json.BeginObject();
15596 
15597  json.WriteString("Flags");
15598  json.BeginArray(true);
15599  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
15600  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
15601  {
15602  json.WriteString("DEVICE_LOCAL");
15603  }
15604  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15605  {
15606  json.WriteString("HOST_VISIBLE");
15607  }
15608  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
15609  {
15610  json.WriteString("HOST_COHERENT");
15611  }
15612  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
15613  {
15614  json.WriteString("HOST_CACHED");
15615  }
15616  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
15617  {
15618  json.WriteString("LAZILY_ALLOCATED");
15619  }
15620  json.EndArray();
15621 
15622  if(stats.memoryType[typeIndex].blockCount > 0)
15623  {
15624  json.WriteString("Stats");
15625  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
15626  }
15627 
15628  json.EndObject();
15629  }
15630  }
15631 
15632  json.EndObject();
15633  }
15634  if(detailedMap == VK_TRUE)
15635  {
15636  allocator->PrintDetailedMap(json);
15637  }
15638 
15639  json.EndObject();
15640  }
15641 
15642  const size_t len = sb.GetLength();
15643  char* const pChars = vma_new_array(allocator, char, len + 1);
15644  if(len > 0)
15645  {
15646  memcpy(pChars, sb.GetData(), len);
15647  }
15648  pChars[len] = '\0';
15649  *ppStatsString = pChars;
15650 }
15651 
15652 void vmaFreeStatsString(
15653  VmaAllocator allocator,
15654  char* pStatsString)
15655 {
15656  if(pStatsString != VMA_NULL)
15657  {
15658  VMA_ASSERT(allocator);
15659  size_t len = strlen(pStatsString);
15660  vma_delete_array(allocator, pStatsString, len + 1);
15661  }
15662 }
15663 
15664 #endif // #if VMA_STATS_STRING_ENABLED
15665 
15666 /*
15667 This function is not protected by any mutex because it just reads immutable data.
15668 */
15669 VkResult vmaFindMemoryTypeIndex(
15670  VmaAllocator allocator,
15671  uint32_t memoryTypeBits,
15672  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15673  uint32_t* pMemoryTypeIndex)
15674 {
15675  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15676  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15677  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15678 
15679  if(pAllocationCreateInfo->memoryTypeBits != 0)
15680  {
15681  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
15682  }
15683 
15684  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
15685  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
15686 
15687  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
15688  if(mapped)
15689  {
15690  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15691  }
15692 
15693  // Convert usage to requiredFlags and preferredFlags.
15694  switch(pAllocationCreateInfo->usage)
15695  {
15697  break;
15699  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15700  {
15701  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15702  }
15703  break;
15705  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
15706  break;
15708  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15709  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15710  {
15711  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15712  }
15713  break;
15715  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15716  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
15717  break;
15718  default:
15719  break;
15720  }
15721 
15722  *pMemoryTypeIndex = UINT32_MAX;
15723  uint32_t minCost = UINT32_MAX;
15724  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
15725  memTypeIndex < allocator->GetMemoryTypeCount();
15726  ++memTypeIndex, memTypeBit <<= 1)
15727  {
15728  // This memory type is acceptable according to memoryTypeBits bitmask.
15729  if((memTypeBit & memoryTypeBits) != 0)
15730  {
15731  const VkMemoryPropertyFlags currFlags =
15732  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
15733  // This memory type contains requiredFlags.
15734  if((requiredFlags & ~currFlags) == 0)
15735  {
15736  // Calculate cost as number of bits from preferredFlags not present in this memory type.
15737  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
15738  // Remember memory type with lowest cost.
15739  if(currCost < minCost)
15740  {
15741  *pMemoryTypeIndex = memTypeIndex;
15742  if(currCost == 0)
15743  {
15744  return VK_SUCCESS;
15745  }
15746  minCost = currCost;
15747  }
15748  }
15749  }
15750  }
15751  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
15752 }
15753 
15755  VmaAllocator allocator,
15756  const VkBufferCreateInfo* pBufferCreateInfo,
15757  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15758  uint32_t* pMemoryTypeIndex)
15759 {
15760  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15761  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
15762  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15763  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15764 
15765  const VkDevice hDev = allocator->m_hDevice;
15766  VkBuffer hBuffer = VK_NULL_HANDLE;
15767  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
15768  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
15769  if(res == VK_SUCCESS)
15770  {
15771  VkMemoryRequirements memReq = {};
15772  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
15773  hDev, hBuffer, &memReq);
15774 
15775  res = vmaFindMemoryTypeIndex(
15776  allocator,
15777  memReq.memoryTypeBits,
15778  pAllocationCreateInfo,
15779  pMemoryTypeIndex);
15780 
15781  allocator->GetVulkanFunctions().vkDestroyBuffer(
15782  hDev, hBuffer, allocator->GetAllocationCallbacks());
15783  }
15784  return res;
15785 }
15786 
15788  VmaAllocator allocator,
15789  const VkImageCreateInfo* pImageCreateInfo,
15790  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15791  uint32_t* pMemoryTypeIndex)
15792 {
15793  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15794  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
15795  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15796  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15797 
15798  const VkDevice hDev = allocator->m_hDevice;
15799  VkImage hImage = VK_NULL_HANDLE;
15800  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
15801  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
15802  if(res == VK_SUCCESS)
15803  {
15804  VkMemoryRequirements memReq = {};
15805  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
15806  hDev, hImage, &memReq);
15807 
15808  res = vmaFindMemoryTypeIndex(
15809  allocator,
15810  memReq.memoryTypeBits,
15811  pAllocationCreateInfo,
15812  pMemoryTypeIndex);
15813 
15814  allocator->GetVulkanFunctions().vkDestroyImage(
15815  hDev, hImage, allocator->GetAllocationCallbacks());
15816  }
15817  return res;
15818 }
15819 
15820 VkResult vmaCreatePool(
15821  VmaAllocator allocator,
15822  const VmaPoolCreateInfo* pCreateInfo,
15823  VmaPool* pPool)
15824 {
15825  VMA_ASSERT(allocator && pCreateInfo && pPool);
15826 
15827  VMA_DEBUG_LOG("vmaCreatePool");
15828 
15829  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15830 
15831  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
15832 
15833 #if VMA_RECORDING_ENABLED
15834  if(allocator->GetRecorder() != VMA_NULL)
15835  {
15836  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
15837  }
15838 #endif
15839 
15840  return res;
15841 }
15842 
15843 void vmaDestroyPool(
15844  VmaAllocator allocator,
15845  VmaPool pool)
15846 {
15847  VMA_ASSERT(allocator);
15848 
15849  if(pool == VK_NULL_HANDLE)
15850  {
15851  return;
15852  }
15853 
15854  VMA_DEBUG_LOG("vmaDestroyPool");
15855 
15856  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15857 
15858 #if VMA_RECORDING_ENABLED
15859  if(allocator->GetRecorder() != VMA_NULL)
15860  {
15861  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
15862  }
15863 #endif
15864 
15865  allocator->DestroyPool(pool);
15866 }
15867 
15868 void vmaGetPoolStats(
15869  VmaAllocator allocator,
15870  VmaPool pool,
15871  VmaPoolStats* pPoolStats)
15872 {
15873  VMA_ASSERT(allocator && pool && pPoolStats);
15874 
15875  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15876 
15877  allocator->GetPoolStats(pool, pPoolStats);
15878 }
15879 
15881  VmaAllocator allocator,
15882  VmaPool pool,
15883  size_t* pLostAllocationCount)
15884 {
15885  VMA_ASSERT(allocator && pool);
15886 
15887  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15888 
15889 #if VMA_RECORDING_ENABLED
15890  if(allocator->GetRecorder() != VMA_NULL)
15891  {
15892  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
15893  }
15894 #endif
15895 
15896  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
15897 }
15898 
15899 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
15900 {
15901  VMA_ASSERT(allocator && pool);
15902 
15903  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15904 
15905  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
15906 
15907  return allocator->CheckPoolCorruption(pool);
15908 }
15909 
15910 VkResult vmaAllocateMemory(
15911  VmaAllocator allocator,
15912  const VkMemoryRequirements* pVkMemoryRequirements,
15913  const VmaAllocationCreateInfo* pCreateInfo,
15914  VmaAllocation* pAllocation,
15915  VmaAllocationInfo* pAllocationInfo)
15916 {
15917  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
15918 
15919  VMA_DEBUG_LOG("vmaAllocateMemory");
15920 
15921  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15922 
15923  VkResult result = allocator->AllocateMemory(
15924  *pVkMemoryRequirements,
15925  false, // requiresDedicatedAllocation
15926  false, // prefersDedicatedAllocation
15927  VK_NULL_HANDLE, // dedicatedBuffer
15928  VK_NULL_HANDLE, // dedicatedImage
15929  *pCreateInfo,
15930  VMA_SUBALLOCATION_TYPE_UNKNOWN,
15931  1, // allocationCount
15932  pAllocation);
15933 
15934 #if VMA_RECORDING_ENABLED
15935  if(allocator->GetRecorder() != VMA_NULL)
15936  {
15937  allocator->GetRecorder()->RecordAllocateMemory(
15938  allocator->GetCurrentFrameIndex(),
15939  *pVkMemoryRequirements,
15940  *pCreateInfo,
15941  *pAllocation);
15942  }
15943 #endif
15944 
15945  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
15946  {
15947  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
15948  }
15949 
15950  return result;
15951 }
15952 
15953 VkResult vmaAllocateMemoryPages(
15954  VmaAllocator allocator,
15955  const VkMemoryRequirements* pVkMemoryRequirements,
15956  const VmaAllocationCreateInfo* pCreateInfo,
15957  size_t allocationCount,
15958  VmaAllocation* pAllocations,
15959  VmaAllocationInfo* pAllocationInfo)
15960 {
15961  if(allocationCount == 0)
15962  {
15963  return VK_SUCCESS;
15964  }
15965 
15966  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
15967 
15968  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
15969 
15970  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15971 
15972  VkResult result = allocator->AllocateMemory(
15973  *pVkMemoryRequirements,
15974  false, // requiresDedicatedAllocation
15975  false, // prefersDedicatedAllocation
15976  VK_NULL_HANDLE, // dedicatedBuffer
15977  VK_NULL_HANDLE, // dedicatedImage
15978  *pCreateInfo,
15979  VMA_SUBALLOCATION_TYPE_UNKNOWN,
15980  allocationCount,
15981  pAllocations);
15982 
15983 #if VMA_RECORDING_ENABLED
15984  if(allocator->GetRecorder() != VMA_NULL)
15985  {
15986  allocator->GetRecorder()->RecordAllocateMemoryPages(
15987  allocator->GetCurrentFrameIndex(),
15988  *pVkMemoryRequirements,
15989  *pCreateInfo,
15990  (uint64_t)allocationCount,
15991  pAllocations);
15992  }
15993 #endif
15994 
15995  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
15996  {
15997  for(size_t i = 0; i < allocationCount; ++i)
15998  {
15999  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
16000  }
16001  }
16002 
16003  return result;
16004 }
16005 
16007  VmaAllocator allocator,
16008  VkBuffer buffer,
16009  const VmaAllocationCreateInfo* pCreateInfo,
16010  VmaAllocation* pAllocation,
16011  VmaAllocationInfo* pAllocationInfo)
16012 {
16013  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16014 
16015  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
16016 
16017  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16018 
16019  VkMemoryRequirements vkMemReq = {};
16020  bool requiresDedicatedAllocation = false;
16021  bool prefersDedicatedAllocation = false;
16022  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
16023  requiresDedicatedAllocation,
16024  prefersDedicatedAllocation);
16025 
16026  VkResult result = allocator->AllocateMemory(
16027  vkMemReq,
16028  requiresDedicatedAllocation,
16029  prefersDedicatedAllocation,
16030  buffer, // dedicatedBuffer
16031  VK_NULL_HANDLE, // dedicatedImage
16032  *pCreateInfo,
16033  VMA_SUBALLOCATION_TYPE_BUFFER,
16034  1, // allocationCount
16035  pAllocation);
16036 
16037 #if VMA_RECORDING_ENABLED
16038  if(allocator->GetRecorder() != VMA_NULL)
16039  {
16040  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
16041  allocator->GetCurrentFrameIndex(),
16042  vkMemReq,
16043  requiresDedicatedAllocation,
16044  prefersDedicatedAllocation,
16045  *pCreateInfo,
16046  *pAllocation);
16047  }
16048 #endif
16049 
16050  if(pAllocationInfo && result == VK_SUCCESS)
16051  {
16052  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16053  }
16054 
16055  return result;
16056 }
16057 
16058 VkResult vmaAllocateMemoryForImage(
16059  VmaAllocator allocator,
16060  VkImage image,
16061  const VmaAllocationCreateInfo* pCreateInfo,
16062  VmaAllocation* pAllocation,
16063  VmaAllocationInfo* pAllocationInfo)
16064 {
16065  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16066 
16067  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
16068 
16069  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16070 
16071  VkMemoryRequirements vkMemReq = {};
16072  bool requiresDedicatedAllocation = false;
16073  bool prefersDedicatedAllocation = false;
16074  allocator->GetImageMemoryRequirements(image, vkMemReq,
16075  requiresDedicatedAllocation, prefersDedicatedAllocation);
16076 
16077  VkResult result = allocator->AllocateMemory(
16078  vkMemReq,
16079  requiresDedicatedAllocation,
16080  prefersDedicatedAllocation,
16081  VK_NULL_HANDLE, // dedicatedBuffer
16082  image, // dedicatedImage
16083  *pCreateInfo,
16084  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
16085  1, // allocationCount
16086  pAllocation);
16087 
16088 #if VMA_RECORDING_ENABLED
16089  if(allocator->GetRecorder() != VMA_NULL)
16090  {
16091  allocator->GetRecorder()->RecordAllocateMemoryForImage(
16092  allocator->GetCurrentFrameIndex(),
16093  vkMemReq,
16094  requiresDedicatedAllocation,
16095  prefersDedicatedAllocation,
16096  *pCreateInfo,
16097  *pAllocation);
16098  }
16099 #endif
16100 
16101  if(pAllocationInfo && result == VK_SUCCESS)
16102  {
16103  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16104  }
16105 
16106  return result;
16107 }
16108 
16109 void vmaFreeMemory(
16110  VmaAllocator allocator,
16111  VmaAllocation allocation)
16112 {
16113  VMA_ASSERT(allocator);
16114 
16115  if(allocation == VK_NULL_HANDLE)
16116  {
16117  return;
16118  }
16119 
16120  VMA_DEBUG_LOG("vmaFreeMemory");
16121 
16122  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16123 
16124 #if VMA_RECORDING_ENABLED
16125  if(allocator->GetRecorder() != VMA_NULL)
16126  {
16127  allocator->GetRecorder()->RecordFreeMemory(
16128  allocator->GetCurrentFrameIndex(),
16129  allocation);
16130  }
16131 #endif
16132 
16133  allocator->FreeMemory(
16134  1, // allocationCount
16135  &allocation);
16136 }
16137 
16138 void vmaFreeMemoryPages(
16139  VmaAllocator allocator,
16140  size_t allocationCount,
16141  VmaAllocation* pAllocations)
16142 {
16143  if(allocationCount == 0)
16144  {
16145  return;
16146  }
16147 
16148  VMA_ASSERT(allocator);
16149 
16150  VMA_DEBUG_LOG("vmaFreeMemoryPages");
16151 
16152  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16153 
16154 #if VMA_RECORDING_ENABLED
16155  if(allocator->GetRecorder() != VMA_NULL)
16156  {
16157  allocator->GetRecorder()->RecordFreeMemoryPages(
16158  allocator->GetCurrentFrameIndex(),
16159  (uint64_t)allocationCount,
16160  pAllocations);
16161  }
16162 #endif
16163 
16164  allocator->FreeMemory(allocationCount, pAllocations);
16165 }
16166 
16167 VkResult vmaResizeAllocation(
16168  VmaAllocator allocator,
16169  VmaAllocation allocation,
16170  VkDeviceSize newSize)
16171 {
16172  VMA_ASSERT(allocator && allocation);
16173 
16174  VMA_DEBUG_LOG("vmaResizeAllocation");
16175 
16176  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16177 
16178 #if VMA_RECORDING_ENABLED
16179  if(allocator->GetRecorder() != VMA_NULL)
16180  {
16181  allocator->GetRecorder()->RecordResizeAllocation(
16182  allocator->GetCurrentFrameIndex(),
16183  allocation,
16184  newSize);
16185  }
16186 #endif
16187 
16188  return allocator->ResizeAllocation(allocation, newSize);
16189 }
16190 
16192  VmaAllocator allocator,
16193  VmaAllocation allocation,
16194  VmaAllocationInfo* pAllocationInfo)
16195 {
16196  VMA_ASSERT(allocator && allocation && pAllocationInfo);
16197 
16198  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16199 
16200 #if VMA_RECORDING_ENABLED
16201  if(allocator->GetRecorder() != VMA_NULL)
16202  {
16203  allocator->GetRecorder()->RecordGetAllocationInfo(
16204  allocator->GetCurrentFrameIndex(),
16205  allocation);
16206  }
16207 #endif
16208 
16209  allocator->GetAllocationInfo(allocation, pAllocationInfo);
16210 }
16211 
16212 VkBool32 vmaTouchAllocation(
16213  VmaAllocator allocator,
16214  VmaAllocation allocation)
16215 {
16216  VMA_ASSERT(allocator && allocation);
16217 
16218  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16219 
16220 #if VMA_RECORDING_ENABLED
16221  if(allocator->GetRecorder() != VMA_NULL)
16222  {
16223  allocator->GetRecorder()->RecordTouchAllocation(
16224  allocator->GetCurrentFrameIndex(),
16225  allocation);
16226  }
16227 #endif
16228 
16229  return allocator->TouchAllocation(allocation);
16230 }
16231 
16233  VmaAllocator allocator,
16234  VmaAllocation allocation,
16235  void* pUserData)
16236 {
16237  VMA_ASSERT(allocator && allocation);
16238 
16239  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16240 
16241  allocation->SetUserData(allocator, pUserData);
16242 
16243 #if VMA_RECORDING_ENABLED
16244  if(allocator->GetRecorder() != VMA_NULL)
16245  {
16246  allocator->GetRecorder()->RecordSetAllocationUserData(
16247  allocator->GetCurrentFrameIndex(),
16248  allocation,
16249  pUserData);
16250  }
16251 #endif
16252 }
16253 
16255  VmaAllocator allocator,
16256  VmaAllocation* pAllocation)
16257 {
16258  VMA_ASSERT(allocator && pAllocation);
16259 
16260  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
16261 
16262  allocator->CreateLostAllocation(pAllocation);
16263 
16264 #if VMA_RECORDING_ENABLED
16265  if(allocator->GetRecorder() != VMA_NULL)
16266  {
16267  allocator->GetRecorder()->RecordCreateLostAllocation(
16268  allocator->GetCurrentFrameIndex(),
16269  *pAllocation);
16270  }
16271 #endif
16272 }
16273 
16274 VkResult vmaMapMemory(
16275  VmaAllocator allocator,
16276  VmaAllocation allocation,
16277  void** ppData)
16278 {
16279  VMA_ASSERT(allocator && allocation && ppData);
16280 
16281  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16282 
16283  VkResult res = allocator->Map(allocation, ppData);
16284 
16285 #if VMA_RECORDING_ENABLED
16286  if(allocator->GetRecorder() != VMA_NULL)
16287  {
16288  allocator->GetRecorder()->RecordMapMemory(
16289  allocator->GetCurrentFrameIndex(),
16290  allocation);
16291  }
16292 #endif
16293 
16294  return res;
16295 }
16296 
16297 void vmaUnmapMemory(
16298  VmaAllocator allocator,
16299  VmaAllocation allocation)
16300 {
16301  VMA_ASSERT(allocator && allocation);
16302 
16303  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16304 
16305 #if VMA_RECORDING_ENABLED
16306  if(allocator->GetRecorder() != VMA_NULL)
16307  {
16308  allocator->GetRecorder()->RecordUnmapMemory(
16309  allocator->GetCurrentFrameIndex(),
16310  allocation);
16311  }
16312 #endif
16313 
16314  allocator->Unmap(allocation);
16315 }
16316 
16317 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
16318 {
16319  VMA_ASSERT(allocator && allocation);
16320 
16321  VMA_DEBUG_LOG("vmaFlushAllocation");
16322 
16323  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16324 
16325  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
16326 
16327 #if VMA_RECORDING_ENABLED
16328  if(allocator->GetRecorder() != VMA_NULL)
16329  {
16330  allocator->GetRecorder()->RecordFlushAllocation(
16331  allocator->GetCurrentFrameIndex(),
16332  allocation, offset, size);
16333  }
16334 #endif
16335 }
16336 
16337 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
16338 {
16339  VMA_ASSERT(allocator && allocation);
16340 
16341  VMA_DEBUG_LOG("vmaInvalidateAllocation");
16342 
16343  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16344 
16345  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
16346 
16347 #if VMA_RECORDING_ENABLED
16348  if(allocator->GetRecorder() != VMA_NULL)
16349  {
16350  allocator->GetRecorder()->RecordInvalidateAllocation(
16351  allocator->GetCurrentFrameIndex(),
16352  allocation, offset, size);
16353  }
16354 #endif
16355 }
16356 
16357 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
16358 {
16359  VMA_ASSERT(allocator);
16360 
16361  VMA_DEBUG_LOG("vmaCheckCorruption");
16362 
16363  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16364 
16365  return allocator->CheckCorruption(memoryTypeBits);
16366 }
16367 
16368 VkResult vmaDefragment(
16369  VmaAllocator allocator,
16370  VmaAllocation* pAllocations,
16371  size_t allocationCount,
16372  VkBool32* pAllocationsChanged,
16373  const VmaDefragmentationInfo *pDefragmentationInfo,
16374  VmaDefragmentationStats* pDefragmentationStats)
16375 {
16376  // Deprecated interface, reimplemented using new one.
16377 
16378  VmaDefragmentationInfo2 info2 = {};
16379  info2.allocationCount = (uint32_t)allocationCount;
16380  info2.pAllocations = pAllocations;
16381  info2.pAllocationsChanged = pAllocationsChanged;
16382  if(pDefragmentationInfo != VMA_NULL)
16383  {
16384  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
16385  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
16386  }
16387  else
16388  {
16389  info2.maxCpuAllocationsToMove = UINT32_MAX;
16390  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
16391  }
16392  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
16393 
16395  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
16396  if(res == VK_NOT_READY)
16397  {
16398  res = vmaDefragmentationEnd( allocator, ctx);
16399  }
16400  return res;
16401 }
16402 
16403 VkResult vmaDefragmentationBegin(
16404  VmaAllocator allocator,
16405  const VmaDefragmentationInfo2* pInfo,
16406  VmaDefragmentationStats* pStats,
16407  VmaDefragmentationContext *pContext)
16408 {
16409  VMA_ASSERT(allocator && pInfo && pContext);
16410 
16411  // Degenerate case: Nothing to defragment.
16412  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
16413  {
16414  return VK_SUCCESS;
16415  }
16416 
16417  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
16418  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
16419  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
16420  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
16421 
16422  VMA_DEBUG_LOG("vmaDefragmentationBegin");
16423 
16424  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16425 
16426  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
16427 
16428 #if VMA_RECORDING_ENABLED
16429  if(allocator->GetRecorder() != VMA_NULL)
16430  {
16431  allocator->GetRecorder()->RecordDefragmentationBegin(
16432  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
16433  }
16434 #endif
16435 
16436  return res;
16437 }
16438 
16439 VkResult vmaDefragmentationEnd(
16440  VmaAllocator allocator,
16441  VmaDefragmentationContext context)
16442 {
16443  VMA_ASSERT(allocator);
16444 
16445  VMA_DEBUG_LOG("vmaDefragmentationEnd");
16446 
16447  if(context != VK_NULL_HANDLE)
16448  {
16449  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16450 
16451 #if VMA_RECORDING_ENABLED
16452  if(allocator->GetRecorder() != VMA_NULL)
16453  {
16454  allocator->GetRecorder()->RecordDefragmentationEnd(
16455  allocator->GetCurrentFrameIndex(), context);
16456  }
16457 #endif
16458 
16459  return allocator->DefragmentationEnd(context);
16460  }
16461  else
16462  {
16463  return VK_SUCCESS;
16464  }
16465 }
16466 
16467 VkResult vmaBindBufferMemory(
16468  VmaAllocator allocator,
16469  VmaAllocation allocation,
16470  VkBuffer buffer)
16471 {
16472  VMA_ASSERT(allocator && allocation && buffer);
16473 
16474  VMA_DEBUG_LOG("vmaBindBufferMemory");
16475 
16476  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16477 
16478  return allocator->BindBufferMemory(allocation, buffer);
16479 }
16480 
16481 VkResult vmaBindImageMemory(
16482  VmaAllocator allocator,
16483  VmaAllocation allocation,
16484  VkImage image)
16485 {
16486  VMA_ASSERT(allocator && allocation && image);
16487 
16488  VMA_DEBUG_LOG("vmaBindImageMemory");
16489 
16490  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16491 
16492  return allocator->BindImageMemory(allocation, image);
16493 }
16494 
16495 VkResult vmaCreateBuffer(
16496  VmaAllocator allocator,
16497  const VkBufferCreateInfo* pBufferCreateInfo,
16498  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16499  VkBuffer* pBuffer,
16500  VmaAllocation* pAllocation,
16501  VmaAllocationInfo* pAllocationInfo)
16502 {
16503  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
16504 
16505  if(pBufferCreateInfo->size == 0)
16506  {
16507  return VK_ERROR_VALIDATION_FAILED_EXT;
16508  }
16509 
16510  VMA_DEBUG_LOG("vmaCreateBuffer");
16511 
16512  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16513 
16514  *pBuffer = VK_NULL_HANDLE;
16515  *pAllocation = VK_NULL_HANDLE;
16516 
16517  // 1. Create VkBuffer.
16518  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
16519  allocator->m_hDevice,
16520  pBufferCreateInfo,
16521  allocator->GetAllocationCallbacks(),
16522  pBuffer);
16523  if(res >= 0)
16524  {
16525  // 2. vkGetBufferMemoryRequirements.
16526  VkMemoryRequirements vkMemReq = {};
16527  bool requiresDedicatedAllocation = false;
16528  bool prefersDedicatedAllocation = false;
16529  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
16530  requiresDedicatedAllocation, prefersDedicatedAllocation);
16531 
16532  // Make sure alignment requirements for specific buffer usages reported
16533  // in Physical Device Properties are included in alignment reported by memory requirements.
16534  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
16535  {
16536  VMA_ASSERT(vkMemReq.alignment %
16537  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
16538  }
16539  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
16540  {
16541  VMA_ASSERT(vkMemReq.alignment %
16542  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
16543  }
16544  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
16545  {
16546  VMA_ASSERT(vkMemReq.alignment %
16547  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
16548  }
16549 
16550  // 3. Allocate memory using allocator.
16551  res = allocator->AllocateMemory(
16552  vkMemReq,
16553  requiresDedicatedAllocation,
16554  prefersDedicatedAllocation,
16555  *pBuffer, // dedicatedBuffer
16556  VK_NULL_HANDLE, // dedicatedImage
16557  *pAllocationCreateInfo,
16558  VMA_SUBALLOCATION_TYPE_BUFFER,
16559  1, // allocationCount
16560  pAllocation);
16561 
16562 #if VMA_RECORDING_ENABLED
16563  if(allocator->GetRecorder() != VMA_NULL)
16564  {
16565  allocator->GetRecorder()->RecordCreateBuffer(
16566  allocator->GetCurrentFrameIndex(),
16567  *pBufferCreateInfo,
16568  *pAllocationCreateInfo,
16569  *pAllocation);
16570  }
16571 #endif
16572 
16573  if(res >= 0)
16574  {
16575  // 3. Bind buffer with memory.
16576  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
16577  if(res >= 0)
16578  {
16579  // All steps succeeded.
16580  #if VMA_STATS_STRING_ENABLED
16581  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
16582  #endif
16583  if(pAllocationInfo != VMA_NULL)
16584  {
16585  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16586  }
16587 
16588  return VK_SUCCESS;
16589  }
16590  allocator->FreeMemory(
16591  1, // allocationCount
16592  pAllocation);
16593  *pAllocation = VK_NULL_HANDLE;
16594  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16595  *pBuffer = VK_NULL_HANDLE;
16596  return res;
16597  }
16598  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16599  *pBuffer = VK_NULL_HANDLE;
16600  return res;
16601  }
16602  return res;
16603 }
16604 
16605 void vmaDestroyBuffer(
16606  VmaAllocator allocator,
16607  VkBuffer buffer,
16608  VmaAllocation allocation)
16609 {
16610  VMA_ASSERT(allocator);
16611 
16612  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
16613  {
16614  return;
16615  }
16616 
16617  VMA_DEBUG_LOG("vmaDestroyBuffer");
16618 
16619  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16620 
16621 #if VMA_RECORDING_ENABLED
16622  if(allocator->GetRecorder() != VMA_NULL)
16623  {
16624  allocator->GetRecorder()->RecordDestroyBuffer(
16625  allocator->GetCurrentFrameIndex(),
16626  allocation);
16627  }
16628 #endif
16629 
16630  if(buffer != VK_NULL_HANDLE)
16631  {
16632  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
16633  }
16634 
16635  if(allocation != VK_NULL_HANDLE)
16636  {
16637  allocator->FreeMemory(
16638  1, // allocationCount
16639  &allocation);
16640  }
16641 }
16642 
16643 VkResult vmaCreateImage(
16644  VmaAllocator allocator,
16645  const VkImageCreateInfo* pImageCreateInfo,
16646  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16647  VkImage* pImage,
16648  VmaAllocation* pAllocation,
16649  VmaAllocationInfo* pAllocationInfo)
16650 {
16651  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
16652 
16653  if(pImageCreateInfo->extent.width == 0 ||
16654  pImageCreateInfo->extent.height == 0 ||
16655  pImageCreateInfo->extent.depth == 0 ||
16656  pImageCreateInfo->mipLevels == 0 ||
16657  pImageCreateInfo->arrayLayers == 0)
16658  {
16659  return VK_ERROR_VALIDATION_FAILED_EXT;
16660  }
16661 
16662  VMA_DEBUG_LOG("vmaCreateImage");
16663 
16664  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16665 
16666  *pImage = VK_NULL_HANDLE;
16667  *pAllocation = VK_NULL_HANDLE;
16668 
16669  // 1. Create VkImage.
16670  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
16671  allocator->m_hDevice,
16672  pImageCreateInfo,
16673  allocator->GetAllocationCallbacks(),
16674  pImage);
16675  if(res >= 0)
16676  {
16677  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
16678  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
16679  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
16680 
16681  // 2. Allocate memory using allocator.
16682  VkMemoryRequirements vkMemReq = {};
16683  bool requiresDedicatedAllocation = false;
16684  bool prefersDedicatedAllocation = false;
16685  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
16686  requiresDedicatedAllocation, prefersDedicatedAllocation);
16687 
16688  res = allocator->AllocateMemory(
16689  vkMemReq,
16690  requiresDedicatedAllocation,
16691  prefersDedicatedAllocation,
16692  VK_NULL_HANDLE, // dedicatedBuffer
16693  *pImage, // dedicatedImage
16694  *pAllocationCreateInfo,
16695  suballocType,
16696  1, // allocationCount
16697  pAllocation);
16698 
16699 #if VMA_RECORDING_ENABLED
16700  if(allocator->GetRecorder() != VMA_NULL)
16701  {
16702  allocator->GetRecorder()->RecordCreateImage(
16703  allocator->GetCurrentFrameIndex(),
16704  *pImageCreateInfo,
16705  *pAllocationCreateInfo,
16706  *pAllocation);
16707  }
16708 #endif
16709 
16710  if(res >= 0)
16711  {
16712  // 3. Bind image with memory.
16713  res = allocator->BindImageMemory(*pAllocation, *pImage);
16714  if(res >= 0)
16715  {
16716  // All steps succeeded.
16717  #if VMA_STATS_STRING_ENABLED
16718  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
16719  #endif
16720  if(pAllocationInfo != VMA_NULL)
16721  {
16722  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16723  }
16724 
16725  return VK_SUCCESS;
16726  }
16727  allocator->FreeMemory(
16728  1, // allocationCount
16729  pAllocation);
16730  *pAllocation = VK_NULL_HANDLE;
16731  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
16732  *pImage = VK_NULL_HANDLE;
16733  return res;
16734  }
16735  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
16736  *pImage = VK_NULL_HANDLE;
16737  return res;
16738  }
16739  return res;
16740 }
16741 
16742 void vmaDestroyImage(
16743  VmaAllocator allocator,
16744  VkImage image,
16745  VmaAllocation allocation)
16746 {
16747  VMA_ASSERT(allocator);
16748 
16749  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
16750  {
16751  return;
16752  }
16753 
16754  VMA_DEBUG_LOG("vmaDestroyImage");
16755 
16756  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16757 
16758 #if VMA_RECORDING_ENABLED
16759  if(allocator->GetRecorder() != VMA_NULL)
16760  {
16761  allocator->GetRecorder()->RecordDestroyImage(
16762  allocator->GetCurrentFrameIndex(),
16763  allocation);
16764  }
16765 #endif
16766 
16767  if(image != VK_NULL_HANDLE)
16768  {
16769  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
16770  }
16771  if(allocation != VK_NULL_HANDLE)
16772  {
16773  allocator->FreeMemory(
16774  1, // allocationCount
16775  &allocation);
16776  }
16777 }
16778 
16779 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1744
+Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1635 /*
1636 Define this macro to 0/1 to disable/enable support for recording functionality,
1637 available through VmaAllocatorCreateInfo::pRecordSettings.
1638 */
1639 #ifndef VMA_RECORDING_ENABLED
1640  #ifdef _WIN32
1641  #define VMA_RECORDING_ENABLED 1
1642  #else
1643  #define VMA_RECORDING_ENABLED 0
1644  #endif
1645 #endif
1646 
1647 #ifndef NOMINMAX
1648  #define NOMINMAX // For windows.h
1649 #endif
1650 
1651 #ifndef VULKAN_H_
1652  #include <vulkan/vulkan.h>
1653 #endif
1654 
1655 #if VMA_RECORDING_ENABLED
1656  #include <windows.h>
1657 #endif
1658 
1659 #if !defined(VMA_DEDICATED_ALLOCATION)
1660  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1661  #define VMA_DEDICATED_ALLOCATION 1
1662  #else
1663  #define VMA_DEDICATED_ALLOCATION 0
1664  #endif
1665 #endif
1666 
1676 VK_DEFINE_HANDLE(VmaAllocator)
1677 
1678 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1680  VmaAllocator allocator,
1681  uint32_t memoryType,
1682  VkDeviceMemory memory,
1683  VkDeviceSize size);
1685 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1686  VmaAllocator allocator,
1687  uint32_t memoryType,
1688  VkDeviceMemory memory,
1689  VkDeviceSize size);
1690 
1704 
1734 
1737 typedef VkFlags VmaAllocatorCreateFlags;
1738 
1743 typedef struct VmaVulkanFunctions {
1744  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1745  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1746  PFN_vkAllocateMemory vkAllocateMemory;
1747  PFN_vkFreeMemory vkFreeMemory;
1748  PFN_vkMapMemory vkMapMemory;
1749  PFN_vkUnmapMemory vkUnmapMemory;
1750  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1751  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1752  PFN_vkBindBufferMemory vkBindBufferMemory;
1753  PFN_vkBindImageMemory vkBindImageMemory;
1754  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1755  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1756  PFN_vkCreateBuffer vkCreateBuffer;
1757  PFN_vkDestroyBuffer vkDestroyBuffer;
1758  PFN_vkCreateImage vkCreateImage;
1759  PFN_vkDestroyImage vkDestroyImage;
1760  PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
1761 #if VMA_DEDICATED_ALLOCATION
1762  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1763  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1764 #endif
1766 
1768 typedef enum VmaRecordFlagBits {
1775 
1778 typedef VkFlags VmaRecordFlags;
1779 
1781 typedef struct VmaRecordSettings
1782 {
1792  const char* pFilePath;
1794 
1797 {
1801 
1802  VkPhysicalDevice physicalDevice;
1804 
1805  VkDevice device;
1807 
1810 
1811  const VkAllocationCallbacks* pAllocationCallbacks;
1813 
1853  const VkDeviceSize* pHeapSizeLimit;
1874 
1876 VkResult vmaCreateAllocator(
1877  const VmaAllocatorCreateInfo* pCreateInfo,
1878  VmaAllocator* pAllocator);
1879 
1881 void vmaDestroyAllocator(
1882  VmaAllocator allocator);
1883 
1889  VmaAllocator allocator,
1890  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1891 
1897  VmaAllocator allocator,
1898  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1899 
1907  VmaAllocator allocator,
1908  uint32_t memoryTypeIndex,
1909  VkMemoryPropertyFlags* pFlags);
1910 
1920  VmaAllocator allocator,
1921  uint32_t frameIndex);
1922 
1925 typedef struct VmaStatInfo
1926 {
1928  uint32_t blockCount;
1934  VkDeviceSize usedBytes;
1936  VkDeviceSize unusedBytes;
1939 } VmaStatInfo;
1940 
1942 typedef struct VmaStats
1943 {
1944  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1945  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1947 } VmaStats;
1948 
1950 void vmaCalculateStats(
1951  VmaAllocator allocator,
1952  VmaStats* pStats);
1953 
1954 #define VMA_STATS_STRING_ENABLED 1
1955 
1956 #if VMA_STATS_STRING_ENABLED
1957 
1959 
1961 void vmaBuildStatsString(
1962  VmaAllocator allocator,
1963  char** ppStatsString,
1964  VkBool32 detailedMap);
1965 
1966 void vmaFreeStatsString(
1967  VmaAllocator allocator,
1968  char* pStatsString);
1969 
1970 #endif // #if VMA_STATS_STRING_ENABLED
1971 
1980 VK_DEFINE_HANDLE(VmaPool)
1981 
1982 typedef enum VmaMemoryUsage
1983 {
2032 } VmaMemoryUsage;
2033 
2043 
2098 
2114 
2124 
2131 
2135 
2137 {
2150  VkMemoryPropertyFlags requiredFlags;
2155  VkMemoryPropertyFlags preferredFlags;
2163  uint32_t memoryTypeBits;
2176  void* pUserData;
2178 
2195 VkResult vmaFindMemoryTypeIndex(
2196  VmaAllocator allocator,
2197  uint32_t memoryTypeBits,
2198  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2199  uint32_t* pMemoryTypeIndex);
2200 
2214  VmaAllocator allocator,
2215  const VkBufferCreateInfo* pBufferCreateInfo,
2216  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2217  uint32_t* pMemoryTypeIndex);
2218 
2232  VmaAllocator allocator,
2233  const VkImageCreateInfo* pImageCreateInfo,
2234  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2235  uint32_t* pMemoryTypeIndex);
2236 
2257 
2274 
2285 
2291 
2294 typedef VkFlags VmaPoolCreateFlags;
2295 
2298 typedef struct VmaPoolCreateInfo {
2313  VkDeviceSize blockSize;
2342 
2345 typedef struct VmaPoolStats {
2348  VkDeviceSize size;
2351  VkDeviceSize unusedSize;
2364  VkDeviceSize unusedRangeSizeMax;
2367  size_t blockCount;
2368 } VmaPoolStats;
2369 
2376 VkResult vmaCreatePool(
2377  VmaAllocator allocator,
2378  const VmaPoolCreateInfo* pCreateInfo,
2379  VmaPool* pPool);
2380 
2383 void vmaDestroyPool(
2384  VmaAllocator allocator,
2385  VmaPool pool);
2386 
2393 void vmaGetPoolStats(
2394  VmaAllocator allocator,
2395  VmaPool pool,
2396  VmaPoolStats* pPoolStats);
2397 
2405  VmaAllocator allocator,
2406  VmaPool pool,
2407  size_t* pLostAllocationCount);
2408 
2423 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2424 
2449 VK_DEFINE_HANDLE(VmaAllocation)
2450 
2451 
2453 typedef struct VmaAllocationInfo {
2458  uint32_t memoryType;
2467  VkDeviceMemory deviceMemory;
2472  VkDeviceSize offset;
2477  VkDeviceSize size;
2491  void* pUserData;
2493 
2504 VkResult vmaAllocateMemory(
2505  VmaAllocator allocator,
2506  const VkMemoryRequirements* pVkMemoryRequirements,
2507  const VmaAllocationCreateInfo* pCreateInfo,
2508  VmaAllocation* pAllocation,
2509  VmaAllocationInfo* pAllocationInfo);
2510 
2530 VkResult vmaAllocateMemoryPages(
2531  VmaAllocator allocator,
2532  const VkMemoryRequirements* pVkMemoryRequirements,
2533  const VmaAllocationCreateInfo* pCreateInfo,
2534  size_t allocationCount,
2535  VmaAllocation* pAllocations,
2536  VmaAllocationInfo* pAllocationInfo);
2537 
2545  VmaAllocator allocator,
2546  VkBuffer buffer,
2547  const VmaAllocationCreateInfo* pCreateInfo,
2548  VmaAllocation* pAllocation,
2549  VmaAllocationInfo* pAllocationInfo);
2550 
2552 VkResult vmaAllocateMemoryForImage(
2553  VmaAllocator allocator,
2554  VkImage image,
2555  const VmaAllocationCreateInfo* pCreateInfo,
2556  VmaAllocation* pAllocation,
2557  VmaAllocationInfo* pAllocationInfo);
2558 
2563 void vmaFreeMemory(
2564  VmaAllocator allocator,
2565  VmaAllocation allocation);
2566 
2577 void vmaFreeMemoryPages(
2578  VmaAllocator allocator,
2579  size_t allocationCount,
2580  VmaAllocation* pAllocations);
2581 
2602 VkResult vmaResizeAllocation(
2603  VmaAllocator allocator,
2604  VmaAllocation allocation,
2605  VkDeviceSize newSize);
2606 
2624  VmaAllocator allocator,
2625  VmaAllocation allocation,
2626  VmaAllocationInfo* pAllocationInfo);
2627 
2642 VkBool32 vmaTouchAllocation(
2643  VmaAllocator allocator,
2644  VmaAllocation allocation);
2645 
2660  VmaAllocator allocator,
2661  VmaAllocation allocation,
2662  void* pUserData);
2663 
2675  VmaAllocator allocator,
2676  VmaAllocation* pAllocation);
2677 
2712 VkResult vmaMapMemory(
2713  VmaAllocator allocator,
2714  VmaAllocation allocation,
2715  void** ppData);
2716 
2721 void vmaUnmapMemory(
2722  VmaAllocator allocator,
2723  VmaAllocation allocation);
2724 
2737 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2738 
2751 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2752 
2769 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
2770 
2777 VK_DEFINE_HANDLE(VmaDefragmentationContext)
2778 
2779 typedef enum VmaDefragmentationFlagBits {
2783 typedef VkFlags VmaDefragmentationFlags;
2784 
2789 typedef struct VmaDefragmentationInfo2 {
2813  uint32_t poolCount;
2834  VkDeviceSize maxCpuBytesToMove;
2844  VkDeviceSize maxGpuBytesToMove;
2858  VkCommandBuffer commandBuffer;
2860 
2865 typedef struct VmaDefragmentationInfo {
2870  VkDeviceSize maxBytesToMove;
2877 
2879 typedef struct VmaDefragmentationStats {
2881  VkDeviceSize bytesMoved;
2883  VkDeviceSize bytesFreed;
2889 
2916 VkResult vmaDefragmentationBegin(
2917  VmaAllocator allocator,
2918  const VmaDefragmentationInfo2* pInfo,
2919  VmaDefragmentationStats* pStats,
2920  VmaDefragmentationContext *pContext);
2921 
2927 VkResult vmaDefragmentationEnd(
2928  VmaAllocator allocator,
2929  VmaDefragmentationContext context);
2930 
2971 VkResult vmaDefragment(
2972  VmaAllocator allocator,
2973  VmaAllocation* pAllocations,
2974  size_t allocationCount,
2975  VkBool32* pAllocationsChanged,
2976  const VmaDefragmentationInfo *pDefragmentationInfo,
2977  VmaDefragmentationStats* pDefragmentationStats);
2978 
2991 VkResult vmaBindBufferMemory(
2992  VmaAllocator allocator,
2993  VmaAllocation allocation,
2994  VkBuffer buffer);
2995 
3008 VkResult vmaBindImageMemory(
3009  VmaAllocator allocator,
3010  VmaAllocation allocation,
3011  VkImage image);
3012 
3039 VkResult vmaCreateBuffer(
3040  VmaAllocator allocator,
3041  const VkBufferCreateInfo* pBufferCreateInfo,
3042  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3043  VkBuffer* pBuffer,
3044  VmaAllocation* pAllocation,
3045  VmaAllocationInfo* pAllocationInfo);
3046 
3058 void vmaDestroyBuffer(
3059  VmaAllocator allocator,
3060  VkBuffer buffer,
3061  VmaAllocation allocation);
3062 
3064 VkResult vmaCreateImage(
3065  VmaAllocator allocator,
3066  const VkImageCreateInfo* pImageCreateInfo,
3067  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3068  VkImage* pImage,
3069  VmaAllocation* pAllocation,
3070  VmaAllocationInfo* pAllocationInfo);
3071 
3083 void vmaDestroyImage(
3084  VmaAllocator allocator,
3085  VkImage image,
3086  VmaAllocation allocation);
3087 
3088 #ifdef __cplusplus
3089 }
3090 #endif
3091 
3092 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
3093 
3094 // For Visual Studio IntelliSense.
3095 #if defined(__cplusplus) && defined(__INTELLISENSE__)
3096 #define VMA_IMPLEMENTATION
3097 #endif
3098 
3099 #ifdef VMA_IMPLEMENTATION
3100 #undef VMA_IMPLEMENTATION
3101 
3102 #include <cstdint>
3103 #include <cstdlib>
3104 #include <cstring>
3105 
3106 /*******************************************************************************
3107 CONFIGURATION SECTION
3108 
3109 Define some of these macros before each #include of this header or change them
3110 here if you need other then default behavior depending on your environment.
3111 */
3112 
3113 /*
3114 Define this macro to 1 to make the library fetch pointers to Vulkan functions
3115 internally, like:
3116 
3117  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
3118 
3119 Define to 0 if you are going to provide you own pointers to Vulkan functions via
3120 VmaAllocatorCreateInfo::pVulkanFunctions.
3121 */
3122 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
3123 #define VMA_STATIC_VULKAN_FUNCTIONS 1
3124 #endif
3125 
3126 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
3127 //#define VMA_USE_STL_CONTAINERS 1
3128 
3129 /* Set this macro to 1 to make the library including and using STL containers:
3130 std::pair, std::vector, std::list, std::unordered_map.
3131 
3132 Set it to 0 or undefined to make the library using its own implementation of
3133 the containers.
3134 */
3135 #if VMA_USE_STL_CONTAINERS
3136  #define VMA_USE_STL_VECTOR 1
3137  #define VMA_USE_STL_UNORDERED_MAP 1
3138  #define VMA_USE_STL_LIST 1
3139 #endif
3140 
3141 #ifndef VMA_USE_STL_SHARED_MUTEX
3142  // Compiler conforms to C++17.
3143  #if __cplusplus >= 201703L
3144  #define VMA_USE_STL_SHARED_MUTEX 1
3145  // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
3146  // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
3147  // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/
3148  #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
3149  #define VMA_USE_STL_SHARED_MUTEX 1
3150  #else
3151  #define VMA_USE_STL_SHARED_MUTEX 0
3152  #endif
3153 #endif
3154 
3155 #if VMA_USE_STL_VECTOR
3156  #include <vector>
3157 #endif
3158 
3159 #if VMA_USE_STL_UNORDERED_MAP
3160  #include <unordered_map>
3161 #endif
3162 
3163 #if VMA_USE_STL_LIST
3164  #include <list>
3165 #endif
3166 
3167 /*
3168 Following headers are used in this CONFIGURATION section only, so feel free to
3169 remove them if not needed.
3170 */
3171 #include <cassert> // for assert
3172 #include <algorithm> // for min, max
3173 #include <mutex>
3174 #include <atomic> // for std::atomic
3175 
3176 #ifndef VMA_NULL
3177  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
3178  #define VMA_NULL nullptr
3179 #endif
3180 
3181 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
3182 #include <cstdlib>
3183 void *aligned_alloc(size_t alignment, size_t size)
3184 {
3185  // alignment must be >= sizeof(void*)
3186  if(alignment < sizeof(void*))
3187  {
3188  alignment = sizeof(void*);
3189  }
3190 
3191  return memalign(alignment, size);
3192 }
3193 #elif defined(__APPLE__) || defined(__ANDROID__)
3194 #include <cstdlib>
3195 void *aligned_alloc(size_t alignment, size_t size)
3196 {
3197  // alignment must be >= sizeof(void*)
3198  if(alignment < sizeof(void*))
3199  {
3200  alignment = sizeof(void*);
3201  }
3202 
3203  void *pointer;
3204  if(posix_memalign(&pointer, alignment, size) == 0)
3205  return pointer;
3206  return VMA_NULL;
3207 }
3208 #endif
3209 
3210 // If your compiler is not compatible with C++11 and definition of
3211 // aligned_alloc() function is missing, uncommeting following line may help:
3212 
3213 //#include <malloc.h>
3214 
3215 // Normal assert to check for programmer's errors, especially in Debug configuration.
3216 #ifndef VMA_ASSERT
3217  #ifdef _DEBUG
3218  #define VMA_ASSERT(expr) assert(expr)
3219  #else
3220  #define VMA_ASSERT(expr)
3221  #endif
3222 #endif
3223 
3224 // Assert that will be called very often, like inside data structures e.g. operator[].
3225 // Making it non-empty can make program slow.
3226 #ifndef VMA_HEAVY_ASSERT
3227  #ifdef _DEBUG
3228  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
3229  #else
3230  #define VMA_HEAVY_ASSERT(expr)
3231  #endif
3232 #endif
3233 
3234 #ifndef VMA_ALIGN_OF
3235  #define VMA_ALIGN_OF(type) (__alignof(type))
3236 #endif
3237 
3238 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
3239  #if defined(_WIN32)
3240  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
3241  #else
3242  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
3243  #endif
3244 #endif
3245 
3246 #ifndef VMA_SYSTEM_FREE
3247  #if defined(_WIN32)
3248  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
3249  #else
3250  #define VMA_SYSTEM_FREE(ptr) free(ptr)
3251  #endif
3252 #endif
3253 
3254 #ifndef VMA_MIN
3255  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
3256 #endif
3257 
3258 #ifndef VMA_MAX
3259  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
3260 #endif
3261 
3262 #ifndef VMA_SWAP
3263  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
3264 #endif
3265 
3266 #ifndef VMA_SORT
3267  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
3268 #endif
3269 
3270 #ifndef VMA_DEBUG_LOG
3271  #define VMA_DEBUG_LOG(format, ...)
3272  /*
3273  #define VMA_DEBUG_LOG(format, ...) do { \
3274  printf(format, __VA_ARGS__); \
3275  printf("\n"); \
3276  } while(false)
3277  */
3278 #endif
3279 
3280 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
3281 #if VMA_STATS_STRING_ENABLED
3282  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
3283  {
3284  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
3285  }
3286  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
3287  {
3288  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
3289  }
3290  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
3291  {
3292  snprintf(outStr, strLen, "%p", ptr);
3293  }
3294 #endif
3295 
3296 #ifndef VMA_MUTEX
3297  class VmaMutex
3298  {
3299  public:
3300  void Lock() { m_Mutex.lock(); }
3301  void Unlock() { m_Mutex.unlock(); }
3302  private:
3303  std::mutex m_Mutex;
3304  };
3305  #define VMA_MUTEX VmaMutex
3306 #endif
3307 
3308 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
3309 #ifndef VMA_RW_MUTEX
3310  #if VMA_USE_STL_SHARED_MUTEX
3311  // Use std::shared_mutex from C++17.
3312  #include <shared_mutex>
3313  class VmaRWMutex
3314  {
3315  public:
3316  void LockRead() { m_Mutex.lock_shared(); }
3317  void UnlockRead() { m_Mutex.unlock_shared(); }
3318  void LockWrite() { m_Mutex.lock(); }
3319  void UnlockWrite() { m_Mutex.unlock(); }
3320  private:
3321  std::shared_mutex m_Mutex;
3322  };
3323  #define VMA_RW_MUTEX VmaRWMutex
3324  #elif defined(_WIN32)
3325  // Use SRWLOCK from WinAPI.
3326  class VmaRWMutex
3327  {
3328  public:
3329  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
3330  void LockRead() { AcquireSRWLockShared(&m_Lock); }
3331  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
3332  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
3333  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
3334  private:
3335  SRWLOCK m_Lock;
3336  };
3337  #define VMA_RW_MUTEX VmaRWMutex
3338  #else
3339  // Less efficient fallback: Use normal mutex.
3340  class VmaRWMutex
3341  {
3342  public:
3343  void LockRead() { m_Mutex.Lock(); }
3344  void UnlockRead() { m_Mutex.Unlock(); }
3345  void LockWrite() { m_Mutex.Lock(); }
3346  void UnlockWrite() { m_Mutex.Unlock(); }
3347  private:
3348  VMA_MUTEX m_Mutex;
3349  };
3350  #define VMA_RW_MUTEX VmaRWMutex
3351  #endif // #if VMA_USE_STL_SHARED_MUTEX
3352 #endif // #ifndef VMA_RW_MUTEX
3353 
3354 /*
3355 If providing your own implementation, you need to implement a subset of std::atomic:
3356 
3357 - Constructor(uint32_t desired)
3358 - uint32_t load() const
3359 - void store(uint32_t desired)
3360 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
3361 */
3362 #ifndef VMA_ATOMIC_UINT32
3363  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
3364 #endif
3365 
3366 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
3367 
3371  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
3372 #endif
3373 
3374 #ifndef VMA_DEBUG_ALIGNMENT
3375 
3379  #define VMA_DEBUG_ALIGNMENT (1)
3380 #endif
3381 
3382 #ifndef VMA_DEBUG_MARGIN
3383 
3387  #define VMA_DEBUG_MARGIN (0)
3388 #endif
3389 
3390 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
3391 
3395  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
3396 #endif
3397 
3398 #ifndef VMA_DEBUG_DETECT_CORRUPTION
3399 
3404  #define VMA_DEBUG_DETECT_CORRUPTION (0)
3405 #endif
3406 
3407 #ifndef VMA_DEBUG_GLOBAL_MUTEX
3408 
3412  #define VMA_DEBUG_GLOBAL_MUTEX (0)
3413 #endif
3414 
3415 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
3416 
3420  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
3421 #endif
3422 
3423 #ifndef VMA_SMALL_HEAP_MAX_SIZE
3424  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
3426 #endif
3427 
3428 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
3429  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
3431 #endif
3432 
3433 #ifndef VMA_CLASS_NO_COPY
3434  #define VMA_CLASS_NO_COPY(className) \
3435  private: \
3436  className(const className&) = delete; \
3437  className& operator=(const className&) = delete;
3438 #endif
3439 
3440 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
3441 
3442 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
3443 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
3444 
3445 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
3446 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
3447 
3448 /*******************************************************************************
3449 END OF CONFIGURATION
3450 */
3451 
3452 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
3453 
3454 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
3455  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
3456 
3457 // Returns number of bits set to 1 in (v).
3458 static inline uint32_t VmaCountBitsSet(uint32_t v)
3459 {
3460  uint32_t c = v - ((v >> 1) & 0x55555555);
3461  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
3462  c = ((c >> 4) + c) & 0x0F0F0F0F;
3463  c = ((c >> 8) + c) & 0x00FF00FF;
3464  c = ((c >> 16) + c) & 0x0000FFFF;
3465  return c;
3466 }
3467 
3468 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
3469 // Use types like uint32_t, uint64_t as T.
3470 template <typename T>
3471 static inline T VmaAlignUp(T val, T align)
3472 {
3473  return (val + align - 1) / align * align;
3474 }
3475 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
3476 // Use types like uint32_t, uint64_t as T.
3477 template <typename T>
3478 static inline T VmaAlignDown(T val, T align)
3479 {
3480  return val / align * align;
3481 }
3482 
3483 // Division with mathematical rounding to nearest number.
3484 template <typename T>
3485 static inline T VmaRoundDiv(T x, T y)
3486 {
3487  return (x + (y / (T)2)) / y;
3488 }
3489 
3490 /*
3491 Returns true if given number is a power of two.
3492 T must be unsigned integer number or signed integer but always nonnegative.
3493 For 0 returns true.
3494 */
3495 template <typename T>
3496 inline bool VmaIsPow2(T x)
3497 {
3498  return (x & (x-1)) == 0;
3499 }
3500 
3501 // Returns smallest power of 2 greater or equal to v.
3502 static inline uint32_t VmaNextPow2(uint32_t v)
3503 {
3504  v--;
3505  v |= v >> 1;
3506  v |= v >> 2;
3507  v |= v >> 4;
3508  v |= v >> 8;
3509  v |= v >> 16;
3510  v++;
3511  return v;
3512 }
3513 static inline uint64_t VmaNextPow2(uint64_t v)
3514 {
3515  v--;
3516  v |= v >> 1;
3517  v |= v >> 2;
3518  v |= v >> 4;
3519  v |= v >> 8;
3520  v |= v >> 16;
3521  v |= v >> 32;
3522  v++;
3523  return v;
3524 }
3525 
3526 // Returns largest power of 2 less or equal to v.
3527 static inline uint32_t VmaPrevPow2(uint32_t v)
3528 {
3529  v |= v >> 1;
3530  v |= v >> 2;
3531  v |= v >> 4;
3532  v |= v >> 8;
3533  v |= v >> 16;
3534  v = v ^ (v >> 1);
3535  return v;
3536 }
3537 static inline uint64_t VmaPrevPow2(uint64_t v)
3538 {
3539  v |= v >> 1;
3540  v |= v >> 2;
3541  v |= v >> 4;
3542  v |= v >> 8;
3543  v |= v >> 16;
3544  v |= v >> 32;
3545  v = v ^ (v >> 1);
3546  return v;
3547 }
3548 
3549 static inline bool VmaStrIsEmpty(const char* pStr)
3550 {
3551  return pStr == VMA_NULL || *pStr == '\0';
3552 }
3553 
3554 static const char* VmaAlgorithmToStr(uint32_t algorithm)
3555 {
3556  switch(algorithm)
3557  {
3559  return "Linear";
3561  return "Buddy";
3562  case 0:
3563  return "Default";
3564  default:
3565  VMA_ASSERT(0);
3566  return "";
3567  }
3568 }
3569 
3570 #ifndef VMA_SORT
3571 
3572 template<typename Iterator, typename Compare>
3573 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
3574 {
3575  Iterator centerValue = end; --centerValue;
3576  Iterator insertIndex = beg;
3577  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
3578  {
3579  if(cmp(*memTypeIndex, *centerValue))
3580  {
3581  if(insertIndex != memTypeIndex)
3582  {
3583  VMA_SWAP(*memTypeIndex, *insertIndex);
3584  }
3585  ++insertIndex;
3586  }
3587  }
3588  if(insertIndex != centerValue)
3589  {
3590  VMA_SWAP(*insertIndex, *centerValue);
3591  }
3592  return insertIndex;
3593 }
3594 
3595 template<typename Iterator, typename Compare>
3596 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
3597 {
3598  if(beg < end)
3599  {
3600  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
3601  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
3602  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
3603  }
3604 }
3605 
3606 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
3607 
3608 #endif // #ifndef VMA_SORT
3609 
3610 /*
3611 Returns true if two memory blocks occupy overlapping pages.
3612 ResourceA must be in less memory offset than ResourceB.
3613 
3614 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
3615 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
3616 */
3617 static inline bool VmaBlocksOnSamePage(
3618  VkDeviceSize resourceAOffset,
3619  VkDeviceSize resourceASize,
3620  VkDeviceSize resourceBOffset,
3621  VkDeviceSize pageSize)
3622 {
3623  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
3624  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
3625  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
3626  VkDeviceSize resourceBStart = resourceBOffset;
3627  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
3628  return resourceAEndPage == resourceBStartPage;
3629 }
3630 
3631 enum VmaSuballocationType
3632 {
3633  VMA_SUBALLOCATION_TYPE_FREE = 0,
3634  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
3635  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
3636  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
3637  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
3638  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
3639  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
3640 };
3641 
3642 /*
3643 Returns true if given suballocation types could conflict and must respect
3644 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
3645 or linear image and another one is optimal image. If type is unknown, behave
3646 conservatively.
3647 */
3648 static inline bool VmaIsBufferImageGranularityConflict(
3649  VmaSuballocationType suballocType1,
3650  VmaSuballocationType suballocType2)
3651 {
3652  if(suballocType1 > suballocType2)
3653  {
3654  VMA_SWAP(suballocType1, suballocType2);
3655  }
3656 
3657  switch(suballocType1)
3658  {
3659  case VMA_SUBALLOCATION_TYPE_FREE:
3660  return false;
3661  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3662  return true;
3663  case VMA_SUBALLOCATION_TYPE_BUFFER:
3664  return
3665  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3666  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3667  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3668  return
3669  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3670  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3671  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3672  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3673  return
3674  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3675  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3676  return false;
3677  default:
3678  VMA_ASSERT(0);
3679  return true;
3680  }
3681 }
3682 
3683 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
3684 {
3685  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
3686  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3687  for(size_t i = 0; i < numberCount; ++i, ++pDst)
3688  {
3689  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3690  }
3691 }
3692 
3693 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
3694 {
3695  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
3696  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3697  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
3698  {
3699  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
3700  {
3701  return false;
3702  }
3703  }
3704  return true;
3705 }
3706 
3707 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
3708 struct VmaMutexLock
3709 {
3710  VMA_CLASS_NO_COPY(VmaMutexLock)
3711 public:
3712  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
3713  m_pMutex(useMutex ? &mutex : VMA_NULL)
3714  { if(m_pMutex) { m_pMutex->Lock(); } }
3715  ~VmaMutexLock()
3716  { if(m_pMutex) { m_pMutex->Unlock(); } }
3717 private:
3718  VMA_MUTEX* m_pMutex;
3719 };
3720 
3721 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
3722 struct VmaMutexLockRead
3723 {
3724  VMA_CLASS_NO_COPY(VmaMutexLockRead)
3725 public:
3726  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
3727  m_pMutex(useMutex ? &mutex : VMA_NULL)
3728  { if(m_pMutex) { m_pMutex->LockRead(); } }
3729  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
3730 private:
3731  VMA_RW_MUTEX* m_pMutex;
3732 };
3733 
3734 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
3735 struct VmaMutexLockWrite
3736 {
3737  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
3738 public:
3739  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
3740  m_pMutex(useMutex ? &mutex : VMA_NULL)
3741  { if(m_pMutex) { m_pMutex->LockWrite(); } }
3742  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
3743 private:
3744  VMA_RW_MUTEX* m_pMutex;
3745 };
3746 
3747 #if VMA_DEBUG_GLOBAL_MUTEX
3748  static VMA_MUTEX gDebugGlobalMutex;
3749  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
3750 #else
3751  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
3752 #endif
3753 
3754 // Minimum size of a free suballocation to register it in the free suballocation collection.
3755 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
3756 
3757 /*
3758 Performs binary search and returns iterator to first element that is greater or
3759 equal to (key), according to comparison (cmp).
3760 
3761 Cmp should return true if first argument is less than second argument.
3762 
3763 Returned value is the found element, if present in the collection or place where
3764 new element with value (key) should be inserted.
3765 */
3766 template <typename CmpLess, typename IterT, typename KeyT>
3767 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
3768 {
3769  size_t down = 0, up = (end - beg);
3770  while(down < up)
3771  {
3772  const size_t mid = (down + up) / 2;
3773  if(cmp(*(beg+mid), key))
3774  {
3775  down = mid + 1;
3776  }
3777  else
3778  {
3779  up = mid;
3780  }
3781  }
3782  return beg + down;
3783 }
3784 
3785 /*
3786 Returns true if all pointers in the array are not-null and unique.
3787 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
3788 T must be pointer type, e.g. VmaAllocation, VmaPool.
3789 */
3790 template<typename T>
3791 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
3792 {
3793  for(uint32_t i = 0; i < count; ++i)
3794  {
3795  const T iPtr = arr[i];
3796  if(iPtr == VMA_NULL)
3797  {
3798  return false;
3799  }
3800  for(uint32_t j = i + 1; j < count; ++j)
3801  {
3802  if(iPtr == arr[j])
3803  {
3804  return false;
3805  }
3806  }
3807  }
3808  return true;
3809 }
3810 
3812 // Memory allocation
3813 
3814 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
3815 {
3816  if((pAllocationCallbacks != VMA_NULL) &&
3817  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
3818  {
3819  return (*pAllocationCallbacks->pfnAllocation)(
3820  pAllocationCallbacks->pUserData,
3821  size,
3822  alignment,
3823  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3824  }
3825  else
3826  {
3827  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
3828  }
3829 }
3830 
3831 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
3832 {
3833  if((pAllocationCallbacks != VMA_NULL) &&
3834  (pAllocationCallbacks->pfnFree != VMA_NULL))
3835  {
3836  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
3837  }
3838  else
3839  {
3840  VMA_SYSTEM_FREE(ptr);
3841  }
3842 }
3843 
3844 template<typename T>
3845 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
3846 {
3847  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
3848 }
3849 
3850 template<typename T>
3851 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
3852 {
3853  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
3854 }
3855 
3856 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
3857 
3858 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
3859 
3860 template<typename T>
3861 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
3862 {
3863  ptr->~T();
3864  VmaFree(pAllocationCallbacks, ptr);
3865 }
3866 
3867 template<typename T>
3868 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
3869 {
3870  if(ptr != VMA_NULL)
3871  {
3872  for(size_t i = count; i--; )
3873  {
3874  ptr[i].~T();
3875  }
3876  VmaFree(pAllocationCallbacks, ptr);
3877  }
3878 }
3879 
3880 // STL-compatible allocator.
3881 template<typename T>
3882 class VmaStlAllocator
3883 {
3884 public:
3885  const VkAllocationCallbacks* const m_pCallbacks;
3886  typedef T value_type;
3887 
3888  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
3889  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
3890 
3891  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
3892  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
3893 
3894  template<typename U>
3895  bool operator==(const VmaStlAllocator<U>& rhs) const
3896  {
3897  return m_pCallbacks == rhs.m_pCallbacks;
3898  }
3899  template<typename U>
3900  bool operator!=(const VmaStlAllocator<U>& rhs) const
3901  {
3902  return m_pCallbacks != rhs.m_pCallbacks;
3903  }
3904 
3905  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
3906 };
3907 
3908 #if VMA_USE_STL_VECTOR
3909 
3910 #define VmaVector std::vector
3911 
3912 template<typename T, typename allocatorT>
3913 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
3914 {
3915  vec.insert(vec.begin() + index, item);
3916 }
3917 
3918 template<typename T, typename allocatorT>
3919 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
3920 {
3921  vec.erase(vec.begin() + index);
3922 }
3923 
3924 #else // #if VMA_USE_STL_VECTOR
3925 
3926 /* Class with interface compatible with subset of std::vector.
3927 T must be POD because constructors and destructors are not called and memcpy is
3928 used for these objects. */
3929 template<typename T, typename AllocatorT>
3930 class VmaVector
3931 {
3932 public:
3933  typedef T value_type;
3934 
3935  VmaVector(const AllocatorT& allocator) :
3936  m_Allocator(allocator),
3937  m_pArray(VMA_NULL),
3938  m_Count(0),
3939  m_Capacity(0)
3940  {
3941  }
3942 
3943  VmaVector(size_t count, const AllocatorT& allocator) :
3944  m_Allocator(allocator),
3945  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
3946  m_Count(count),
3947  m_Capacity(count)
3948  {
3949  }
3950 
3951  VmaVector(const VmaVector<T, AllocatorT>& src) :
3952  m_Allocator(src.m_Allocator),
3953  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
3954  m_Count(src.m_Count),
3955  m_Capacity(src.m_Count)
3956  {
3957  if(m_Count != 0)
3958  {
3959  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
3960  }
3961  }
3962 
3963  ~VmaVector()
3964  {
3965  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3966  }
3967 
3968  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
3969  {
3970  if(&rhs != this)
3971  {
3972  resize(rhs.m_Count);
3973  if(m_Count != 0)
3974  {
3975  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
3976  }
3977  }
3978  return *this;
3979  }
3980 
3981  bool empty() const { return m_Count == 0; }
3982  size_t size() const { return m_Count; }
3983  T* data() { return m_pArray; }
3984  const T* data() const { return m_pArray; }
3985 
3986  T& operator[](size_t index)
3987  {
3988  VMA_HEAVY_ASSERT(index < m_Count);
3989  return m_pArray[index];
3990  }
3991  const T& operator[](size_t index) const
3992  {
3993  VMA_HEAVY_ASSERT(index < m_Count);
3994  return m_pArray[index];
3995  }
3996 
3997  T& front()
3998  {
3999  VMA_HEAVY_ASSERT(m_Count > 0);
4000  return m_pArray[0];
4001  }
4002  const T& front() const
4003  {
4004  VMA_HEAVY_ASSERT(m_Count > 0);
4005  return m_pArray[0];
4006  }
4007  T& back()
4008  {
4009  VMA_HEAVY_ASSERT(m_Count > 0);
4010  return m_pArray[m_Count - 1];
4011  }
4012  const T& back() const
4013  {
4014  VMA_HEAVY_ASSERT(m_Count > 0);
4015  return m_pArray[m_Count - 1];
4016  }
4017 
4018  void reserve(size_t newCapacity, bool freeMemory = false)
4019  {
4020  newCapacity = VMA_MAX(newCapacity, m_Count);
4021 
4022  if((newCapacity < m_Capacity) && !freeMemory)
4023  {
4024  newCapacity = m_Capacity;
4025  }
4026 
4027  if(newCapacity != m_Capacity)
4028  {
4029  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
4030  if(m_Count != 0)
4031  {
4032  memcpy(newArray, m_pArray, m_Count * sizeof(T));
4033  }
4034  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4035  m_Capacity = newCapacity;
4036  m_pArray = newArray;
4037  }
4038  }
4039 
4040  void resize(size_t newCount, bool freeMemory = false)
4041  {
4042  size_t newCapacity = m_Capacity;
4043  if(newCount > m_Capacity)
4044  {
4045  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
4046  }
4047  else if(freeMemory)
4048  {
4049  newCapacity = newCount;
4050  }
4051 
4052  if(newCapacity != m_Capacity)
4053  {
4054  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
4055  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
4056  if(elementsToCopy != 0)
4057  {
4058  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
4059  }
4060  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4061  m_Capacity = newCapacity;
4062  m_pArray = newArray;
4063  }
4064 
4065  m_Count = newCount;
4066  }
4067 
4068  void clear(bool freeMemory = false)
4069  {
4070  resize(0, freeMemory);
4071  }
4072 
4073  void insert(size_t index, const T& src)
4074  {
4075  VMA_HEAVY_ASSERT(index <= m_Count);
4076  const size_t oldCount = size();
4077  resize(oldCount + 1);
4078  if(index < oldCount)
4079  {
4080  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
4081  }
4082  m_pArray[index] = src;
4083  }
4084 
4085  void remove(size_t index)
4086  {
4087  VMA_HEAVY_ASSERT(index < m_Count);
4088  const size_t oldCount = size();
4089  if(index < oldCount - 1)
4090  {
4091  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
4092  }
4093  resize(oldCount - 1);
4094  }
4095 
4096  void push_back(const T& src)
4097  {
4098  const size_t newIndex = size();
4099  resize(newIndex + 1);
4100  m_pArray[newIndex] = src;
4101  }
4102 
4103  void pop_back()
4104  {
4105  VMA_HEAVY_ASSERT(m_Count > 0);
4106  resize(size() - 1);
4107  }
4108 
4109  void push_front(const T& src)
4110  {
4111  insert(0, src);
4112  }
4113 
4114  void pop_front()
4115  {
4116  VMA_HEAVY_ASSERT(m_Count > 0);
4117  remove(0);
4118  }
4119 
4120  typedef T* iterator;
4121 
4122  iterator begin() { return m_pArray; }
4123  iterator end() { return m_pArray + m_Count; }
4124 
4125 private:
4126  AllocatorT m_Allocator;
4127  T* m_pArray;
4128  size_t m_Count;
4129  size_t m_Capacity;
4130 };
4131 
4132 template<typename T, typename allocatorT>
4133 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
4134 {
4135  vec.insert(index, item);
4136 }
4137 
4138 template<typename T, typename allocatorT>
4139 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
4140 {
4141  vec.remove(index);
4142 }
4143 
4144 #endif // #if VMA_USE_STL_VECTOR
4145 
4146 template<typename CmpLess, typename VectorT>
4147 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
4148 {
4149  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4150  vector.data(),
4151  vector.data() + vector.size(),
4152  value,
4153  CmpLess()) - vector.data();
4154  VmaVectorInsert(vector, indexToInsert, value);
4155  return indexToInsert;
4156 }
4157 
4158 template<typename CmpLess, typename VectorT>
4159 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
4160 {
4161  CmpLess comparator;
4162  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
4163  vector.begin(),
4164  vector.end(),
4165  value,
4166  comparator);
4167  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
4168  {
4169  size_t indexToRemove = it - vector.begin();
4170  VmaVectorRemove(vector, indexToRemove);
4171  return true;
4172  }
4173  return false;
4174 }
4175 
4176 template<typename CmpLess, typename IterT, typename KeyT>
4177 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
4178 {
4179  CmpLess comparator;
4180  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
4181  beg, end, value, comparator);
4182  if(it == end ||
4183  (!comparator(*it, value) && !comparator(value, *it)))
4184  {
4185  return it;
4186  }
4187  return end;
4188 }
4189 
4191 // class VmaPoolAllocator
4192 
4193 /*
4194 Allocator for objects of type T using a list of arrays (pools) to speed up
4195 allocation. Number of elements that can be allocated is not bounded because
4196 allocator can create multiple blocks.
4197 */
4198 template<typename T>
4199 class VmaPoolAllocator
4200 {
4201  VMA_CLASS_NO_COPY(VmaPoolAllocator)
4202 public:
4203  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
4204  ~VmaPoolAllocator();
4205  void Clear();
4206  T* Alloc();
4207  void Free(T* ptr);
4208 
4209 private:
4210  union Item
4211  {
4212  uint32_t NextFreeIndex;
4213  T Value;
4214  };
4215 
4216  struct ItemBlock
4217  {
4218  Item* pItems;
4219  uint32_t FirstFreeIndex;
4220  };
4221 
4222  const VkAllocationCallbacks* m_pAllocationCallbacks;
4223  size_t m_ItemsPerBlock;
4224  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
4225 
4226  ItemBlock& CreateNewBlock();
4227 };
4228 
4229 template<typename T>
4230 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
4231  m_pAllocationCallbacks(pAllocationCallbacks),
4232  m_ItemsPerBlock(itemsPerBlock),
4233  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
4234 {
4235  VMA_ASSERT(itemsPerBlock > 0);
4236 }
4237 
4238 template<typename T>
4239 VmaPoolAllocator<T>::~VmaPoolAllocator()
4240 {
4241  Clear();
4242 }
4243 
4244 template<typename T>
4245 void VmaPoolAllocator<T>::Clear()
4246 {
4247  for(size_t i = m_ItemBlocks.size(); i--; )
4248  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
4249  m_ItemBlocks.clear();
4250 }
4251 
4252 template<typename T>
4253 T* VmaPoolAllocator<T>::Alloc()
4254 {
4255  for(size_t i = m_ItemBlocks.size(); i--; )
4256  {
4257  ItemBlock& block = m_ItemBlocks[i];
4258  // This block has some free items: Use first one.
4259  if(block.FirstFreeIndex != UINT32_MAX)
4260  {
4261  Item* const pItem = &block.pItems[block.FirstFreeIndex];
4262  block.FirstFreeIndex = pItem->NextFreeIndex;
4263  return &pItem->Value;
4264  }
4265  }
4266 
4267  // No block has free item: Create new one and use it.
4268  ItemBlock& newBlock = CreateNewBlock();
4269  Item* const pItem = &newBlock.pItems[0];
4270  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
4271  return &pItem->Value;
4272 }
4273 
4274 template<typename T>
4275 void VmaPoolAllocator<T>::Free(T* ptr)
4276 {
4277  // Search all memory blocks to find ptr.
4278  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
4279  {
4280  ItemBlock& block = m_ItemBlocks[i];
4281 
4282  // Casting to union.
4283  Item* pItemPtr;
4284  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
4285 
4286  // Check if pItemPtr is in address range of this block.
4287  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
4288  {
4289  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
4290  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
4291  block.FirstFreeIndex = index;
4292  return;
4293  }
4294  }
4295  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
4296 }
4297 
4298 template<typename T>
4299 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
4300 {
4301  ItemBlock newBlock = {
4302  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
4303 
4304  m_ItemBlocks.push_back(newBlock);
4305 
4306  // Setup singly-linked list of all free items in this block.
4307  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
4308  newBlock.pItems[i].NextFreeIndex = i + 1;
4309  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
4310  return m_ItemBlocks.back();
4311 }
4312 
4314 // class VmaRawList, VmaList
4315 
4316 #if VMA_USE_STL_LIST
4317 
4318 #define VmaList std::list
4319 
4320 #else // #if VMA_USE_STL_LIST
4321 
4322 template<typename T>
4323 struct VmaListItem
4324 {
4325  VmaListItem* pPrev;
4326  VmaListItem* pNext;
4327  T Value;
4328 };
4329 
4330 // Doubly linked list.
4331 template<typename T>
4332 class VmaRawList
4333 {
4334  VMA_CLASS_NO_COPY(VmaRawList)
4335 public:
4336  typedef VmaListItem<T> ItemType;
4337 
4338  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
4339  ~VmaRawList();
4340  void Clear();
4341 
4342  size_t GetCount() const { return m_Count; }
4343  bool IsEmpty() const { return m_Count == 0; }
4344 
4345  ItemType* Front() { return m_pFront; }
4346  const ItemType* Front() const { return m_pFront; }
4347  ItemType* Back() { return m_pBack; }
4348  const ItemType* Back() const { return m_pBack; }
4349 
4350  ItemType* PushBack();
4351  ItemType* PushFront();
4352  ItemType* PushBack(const T& value);
4353  ItemType* PushFront(const T& value);
4354  void PopBack();
4355  void PopFront();
4356 
4357  // Item can be null - it means PushBack.
4358  ItemType* InsertBefore(ItemType* pItem);
4359  // Item can be null - it means PushFront.
4360  ItemType* InsertAfter(ItemType* pItem);
4361 
4362  ItemType* InsertBefore(ItemType* pItem, const T& value);
4363  ItemType* InsertAfter(ItemType* pItem, const T& value);
4364 
4365  void Remove(ItemType* pItem);
4366 
4367 private:
4368  const VkAllocationCallbacks* const m_pAllocationCallbacks;
4369  VmaPoolAllocator<ItemType> m_ItemAllocator;
4370  ItemType* m_pFront;
4371  ItemType* m_pBack;
4372  size_t m_Count;
4373 };
4374 
4375 template<typename T>
4376 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
4377  m_pAllocationCallbacks(pAllocationCallbacks),
4378  m_ItemAllocator(pAllocationCallbacks, 128),
4379  m_pFront(VMA_NULL),
4380  m_pBack(VMA_NULL),
4381  m_Count(0)
4382 {
4383 }
4384 
4385 template<typename T>
4386 VmaRawList<T>::~VmaRawList()
4387 {
4388  // Intentionally not calling Clear, because that would be unnecessary
4389  // computations to return all items to m_ItemAllocator as free.
4390 }
4391 
4392 template<typename T>
4393 void VmaRawList<T>::Clear()
4394 {
4395  if(IsEmpty() == false)
4396  {
4397  ItemType* pItem = m_pBack;
4398  while(pItem != VMA_NULL)
4399  {
4400  ItemType* const pPrevItem = pItem->pPrev;
4401  m_ItemAllocator.Free(pItem);
4402  pItem = pPrevItem;
4403  }
4404  m_pFront = VMA_NULL;
4405  m_pBack = VMA_NULL;
4406  m_Count = 0;
4407  }
4408 }
4409 
4410 template<typename T>
4411 VmaListItem<T>* VmaRawList<T>::PushBack()
4412 {
4413  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4414  pNewItem->pNext = VMA_NULL;
4415  if(IsEmpty())
4416  {
4417  pNewItem->pPrev = VMA_NULL;
4418  m_pFront = pNewItem;
4419  m_pBack = pNewItem;
4420  m_Count = 1;
4421  }
4422  else
4423  {
4424  pNewItem->pPrev = m_pBack;
4425  m_pBack->pNext = pNewItem;
4426  m_pBack = pNewItem;
4427  ++m_Count;
4428  }
4429  return pNewItem;
4430 }
4431 
4432 template<typename T>
4433 VmaListItem<T>* VmaRawList<T>::PushFront()
4434 {
4435  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4436  pNewItem->pPrev = VMA_NULL;
4437  if(IsEmpty())
4438  {
4439  pNewItem->pNext = VMA_NULL;
4440  m_pFront = pNewItem;
4441  m_pBack = pNewItem;
4442  m_Count = 1;
4443  }
4444  else
4445  {
4446  pNewItem->pNext = m_pFront;
4447  m_pFront->pPrev = pNewItem;
4448  m_pFront = pNewItem;
4449  ++m_Count;
4450  }
4451  return pNewItem;
4452 }
4453 
4454 template<typename T>
4455 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
4456 {
4457  ItemType* const pNewItem = PushBack();
4458  pNewItem->Value = value;
4459  return pNewItem;
4460 }
4461 
4462 template<typename T>
4463 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
4464 {
4465  ItemType* const pNewItem = PushFront();
4466  pNewItem->Value = value;
4467  return pNewItem;
4468 }
4469 
4470 template<typename T>
4471 void VmaRawList<T>::PopBack()
4472 {
4473  VMA_HEAVY_ASSERT(m_Count > 0);
4474  ItemType* const pBackItem = m_pBack;
4475  ItemType* const pPrevItem = pBackItem->pPrev;
4476  if(pPrevItem != VMA_NULL)
4477  {
4478  pPrevItem->pNext = VMA_NULL;
4479  }
4480  m_pBack = pPrevItem;
4481  m_ItemAllocator.Free(pBackItem);
4482  --m_Count;
4483 }
4484 
4485 template<typename T>
4486 void VmaRawList<T>::PopFront()
4487 {
4488  VMA_HEAVY_ASSERT(m_Count > 0);
4489  ItemType* const pFrontItem = m_pFront;
4490  ItemType* const pNextItem = pFrontItem->pNext;
4491  if(pNextItem != VMA_NULL)
4492  {
4493  pNextItem->pPrev = VMA_NULL;
4494  }
4495  m_pFront = pNextItem;
4496  m_ItemAllocator.Free(pFrontItem);
4497  --m_Count;
4498 }
4499 
4500 template<typename T>
4501 void VmaRawList<T>::Remove(ItemType* pItem)
4502 {
4503  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
4504  VMA_HEAVY_ASSERT(m_Count > 0);
4505 
4506  if(pItem->pPrev != VMA_NULL)
4507  {
4508  pItem->pPrev->pNext = pItem->pNext;
4509  }
4510  else
4511  {
4512  VMA_HEAVY_ASSERT(m_pFront == pItem);
4513  m_pFront = pItem->pNext;
4514  }
4515 
4516  if(pItem->pNext != VMA_NULL)
4517  {
4518  pItem->pNext->pPrev = pItem->pPrev;
4519  }
4520  else
4521  {
4522  VMA_HEAVY_ASSERT(m_pBack == pItem);
4523  m_pBack = pItem->pPrev;
4524  }
4525 
4526  m_ItemAllocator.Free(pItem);
4527  --m_Count;
4528 }
4529 
4530 template<typename T>
4531 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
4532 {
4533  if(pItem != VMA_NULL)
4534  {
4535  ItemType* const prevItem = pItem->pPrev;
4536  ItemType* const newItem = m_ItemAllocator.Alloc();
4537  newItem->pPrev = prevItem;
4538  newItem->pNext = pItem;
4539  pItem->pPrev = newItem;
4540  if(prevItem != VMA_NULL)
4541  {
4542  prevItem->pNext = newItem;
4543  }
4544  else
4545  {
4546  VMA_HEAVY_ASSERT(m_pFront == pItem);
4547  m_pFront = newItem;
4548  }
4549  ++m_Count;
4550  return newItem;
4551  }
4552  else
4553  return PushBack();
4554 }
4555 
4556 template<typename T>
4557 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
4558 {
4559  if(pItem != VMA_NULL)
4560  {
4561  ItemType* const nextItem = pItem->pNext;
4562  ItemType* const newItem = m_ItemAllocator.Alloc();
4563  newItem->pNext = nextItem;
4564  newItem->pPrev = pItem;
4565  pItem->pNext = newItem;
4566  if(nextItem != VMA_NULL)
4567  {
4568  nextItem->pPrev = newItem;
4569  }
4570  else
4571  {
4572  VMA_HEAVY_ASSERT(m_pBack == pItem);
4573  m_pBack = newItem;
4574  }
4575  ++m_Count;
4576  return newItem;
4577  }
4578  else
4579  return PushFront();
4580 }
4581 
4582 template<typename T>
4583 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
4584 {
4585  ItemType* const newItem = InsertBefore(pItem);
4586  newItem->Value = value;
4587  return newItem;
4588 }
4589 
4590 template<typename T>
4591 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
4592 {
4593  ItemType* const newItem = InsertAfter(pItem);
4594  newItem->Value = value;
4595  return newItem;
4596 }
4597 
4598 template<typename T, typename AllocatorT>
4599 class VmaList
4600 {
4601  VMA_CLASS_NO_COPY(VmaList)
4602 public:
4603  class iterator
4604  {
4605  public:
4606  iterator() :
4607  m_pList(VMA_NULL),
4608  m_pItem(VMA_NULL)
4609  {
4610  }
4611 
4612  T& operator*() const
4613  {
4614  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4615  return m_pItem->Value;
4616  }
4617  T* operator->() const
4618  {
4619  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4620  return &m_pItem->Value;
4621  }
4622 
4623  iterator& operator++()
4624  {
4625  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4626  m_pItem = m_pItem->pNext;
4627  return *this;
4628  }
4629  iterator& operator--()
4630  {
4631  if(m_pItem != VMA_NULL)
4632  {
4633  m_pItem = m_pItem->pPrev;
4634  }
4635  else
4636  {
4637  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4638  m_pItem = m_pList->Back();
4639  }
4640  return *this;
4641  }
4642 
4643  iterator operator++(int)
4644  {
4645  iterator result = *this;
4646  ++*this;
4647  return result;
4648  }
4649  iterator operator--(int)
4650  {
4651  iterator result = *this;
4652  --*this;
4653  return result;
4654  }
4655 
4656  bool operator==(const iterator& rhs) const
4657  {
4658  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4659  return m_pItem == rhs.m_pItem;
4660  }
4661  bool operator!=(const iterator& rhs) const
4662  {
4663  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4664  return m_pItem != rhs.m_pItem;
4665  }
4666 
4667  private:
4668  VmaRawList<T>* m_pList;
4669  VmaListItem<T>* m_pItem;
4670 
4671  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
4672  m_pList(pList),
4673  m_pItem(pItem)
4674  {
4675  }
4676 
4677  friend class VmaList<T, AllocatorT>;
4678  };
4679 
4680  class const_iterator
4681  {
4682  public:
4683  const_iterator() :
4684  m_pList(VMA_NULL),
4685  m_pItem(VMA_NULL)
4686  {
4687  }
4688 
4689  const_iterator(const iterator& src) :
4690  m_pList(src.m_pList),
4691  m_pItem(src.m_pItem)
4692  {
4693  }
4694 
4695  const T& operator*() const
4696  {
4697  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4698  return m_pItem->Value;
4699  }
4700  const T* operator->() const
4701  {
4702  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4703  return &m_pItem->Value;
4704  }
4705 
4706  const_iterator& operator++()
4707  {
4708  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4709  m_pItem = m_pItem->pNext;
4710  return *this;
4711  }
4712  const_iterator& operator--()
4713  {
4714  if(m_pItem != VMA_NULL)
4715  {
4716  m_pItem = m_pItem->pPrev;
4717  }
4718  else
4719  {
4720  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4721  m_pItem = m_pList->Back();
4722  }
4723  return *this;
4724  }
4725 
4726  const_iterator operator++(int)
4727  {
4728  const_iterator result = *this;
4729  ++*this;
4730  return result;
4731  }
4732  const_iterator operator--(int)
4733  {
4734  const_iterator result = *this;
4735  --*this;
4736  return result;
4737  }
4738 
4739  bool operator==(const const_iterator& rhs) const
4740  {
4741  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4742  return m_pItem == rhs.m_pItem;
4743  }
4744  bool operator!=(const const_iterator& rhs) const
4745  {
4746  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4747  return m_pItem != rhs.m_pItem;
4748  }
4749 
4750  private:
4751  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
4752  m_pList(pList),
4753  m_pItem(pItem)
4754  {
4755  }
4756 
4757  const VmaRawList<T>* m_pList;
4758  const VmaListItem<T>* m_pItem;
4759 
4760  friend class VmaList<T, AllocatorT>;
4761  };
4762 
4763  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
4764 
4765  bool empty() const { return m_RawList.IsEmpty(); }
4766  size_t size() const { return m_RawList.GetCount(); }
4767 
4768  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
4769  iterator end() { return iterator(&m_RawList, VMA_NULL); }
4770 
4771  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
4772  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
4773 
4774  void clear() { m_RawList.Clear(); }
4775  void push_back(const T& value) { m_RawList.PushBack(value); }
4776  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
4777  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
4778 
4779 private:
4780  VmaRawList<T> m_RawList;
4781 };
4782 
4783 #endif // #if VMA_USE_STL_LIST
4784 
4786 // class VmaMap
4787 
4788 // Unused in this version.
4789 #if 0
4790 
4791 #if VMA_USE_STL_UNORDERED_MAP
4792 
4793 #define VmaPair std::pair
4794 
4795 #define VMA_MAP_TYPE(KeyT, ValueT) \
4796  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
4797 
4798 #else // #if VMA_USE_STL_UNORDERED_MAP
4799 
4800 template<typename T1, typename T2>
4801 struct VmaPair
4802 {
4803  T1 first;
4804  T2 second;
4805 
4806  VmaPair() : first(), second() { }
4807  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
4808 };
4809 
4810 /* Class compatible with subset of interface of std::unordered_map.
4811 KeyT, ValueT must be POD because they will be stored in VmaVector.
4812 */
4813 template<typename KeyT, typename ValueT>
4814 class VmaMap
4815 {
4816 public:
4817  typedef VmaPair<KeyT, ValueT> PairType;
4818  typedef PairType* iterator;
4819 
4820  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
4821 
4822  iterator begin() { return m_Vector.begin(); }
4823  iterator end() { return m_Vector.end(); }
4824 
4825  void insert(const PairType& pair);
4826  iterator find(const KeyT& key);
4827  void erase(iterator it);
4828 
4829 private:
4830  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
4831 };
4832 
4833 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
4834 
4835 template<typename FirstT, typename SecondT>
4836 struct VmaPairFirstLess
4837 {
4838  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
4839  {
4840  return lhs.first < rhs.first;
4841  }
4842  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
4843  {
4844  return lhs.first < rhsFirst;
4845  }
4846 };
4847 
4848 template<typename KeyT, typename ValueT>
4849 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
4850 {
4851  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4852  m_Vector.data(),
4853  m_Vector.data() + m_Vector.size(),
4854  pair,
4855  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
4856  VmaVectorInsert(m_Vector, indexToInsert, pair);
4857 }
4858 
4859 template<typename KeyT, typename ValueT>
4860 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
4861 {
4862  PairType* it = VmaBinaryFindFirstNotLess(
4863  m_Vector.data(),
4864  m_Vector.data() + m_Vector.size(),
4865  key,
4866  VmaPairFirstLess<KeyT, ValueT>());
4867  if((it != m_Vector.end()) && (it->first == key))
4868  {
4869  return it;
4870  }
4871  else
4872  {
4873  return m_Vector.end();
4874  }
4875 }
4876 
4877 template<typename KeyT, typename ValueT>
4878 void VmaMap<KeyT, ValueT>::erase(iterator it)
4879 {
4880  VmaVectorRemove(m_Vector, it - m_Vector.begin());
4881 }
4882 
4883 #endif // #if VMA_USE_STL_UNORDERED_MAP
4884 
4885 #endif // #if 0
4886 
4888 
4889 class VmaDeviceMemoryBlock;
4890 
4891 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
4892 
4893 struct VmaAllocation_T
4894 {
4895  VMA_CLASS_NO_COPY(VmaAllocation_T)
4896 private:
4897  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
4898 
4899  enum FLAGS
4900  {
4901  FLAG_USER_DATA_STRING = 0x01,
4902  };
4903 
4904 public:
4905  enum ALLOCATION_TYPE
4906  {
4907  ALLOCATION_TYPE_NONE,
4908  ALLOCATION_TYPE_BLOCK,
4909  ALLOCATION_TYPE_DEDICATED,
4910  };
4911 
4912  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
4913  m_Alignment(1),
4914  m_Size(0),
4915  m_pUserData(VMA_NULL),
4916  m_LastUseFrameIndex(currentFrameIndex),
4917  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
4918  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
4919  m_MapCount(0),
4920  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
4921  {
4922 #if VMA_STATS_STRING_ENABLED
4923  m_CreationFrameIndex = currentFrameIndex;
4924  m_BufferImageUsage = 0;
4925 #endif
4926  }
4927 
4928  ~VmaAllocation_T()
4929  {
4930  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
4931 
4932  // Check if owned string was freed.
4933  VMA_ASSERT(m_pUserData == VMA_NULL);
4934  }
4935 
4936  void InitBlockAllocation(
4937  VmaPool hPool,
4938  VmaDeviceMemoryBlock* block,
4939  VkDeviceSize offset,
4940  VkDeviceSize alignment,
4941  VkDeviceSize size,
4942  VmaSuballocationType suballocationType,
4943  bool mapped,
4944  bool canBecomeLost)
4945  {
4946  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4947  VMA_ASSERT(block != VMA_NULL);
4948  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4949  m_Alignment = alignment;
4950  m_Size = size;
4951  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4952  m_SuballocationType = (uint8_t)suballocationType;
4953  m_BlockAllocation.m_hPool = hPool;
4954  m_BlockAllocation.m_Block = block;
4955  m_BlockAllocation.m_Offset = offset;
4956  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
4957  }
4958 
4959  void InitLost()
4960  {
4961  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4962  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
4963  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4964  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
4965  m_BlockAllocation.m_Block = VMA_NULL;
4966  m_BlockAllocation.m_Offset = 0;
4967  m_BlockAllocation.m_CanBecomeLost = true;
4968  }
4969 
4970  void ChangeBlockAllocation(
4971  VmaAllocator hAllocator,
4972  VmaDeviceMemoryBlock* block,
4973  VkDeviceSize offset);
4974 
4975  void ChangeSize(VkDeviceSize newSize);
4976  void ChangeOffset(VkDeviceSize newOffset);
4977 
4978  // pMappedData not null means allocation is created with MAPPED flag.
4979  void InitDedicatedAllocation(
4980  uint32_t memoryTypeIndex,
4981  VkDeviceMemory hMemory,
4982  VmaSuballocationType suballocationType,
4983  void* pMappedData,
4984  VkDeviceSize size)
4985  {
4986  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4987  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
4988  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
4989  m_Alignment = 0;
4990  m_Size = size;
4991  m_SuballocationType = (uint8_t)suballocationType;
4992  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4993  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
4994  m_DedicatedAllocation.m_hMemory = hMemory;
4995  m_DedicatedAllocation.m_pMappedData = pMappedData;
4996  }
4997 
4998  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
4999  VkDeviceSize GetAlignment() const { return m_Alignment; }
5000  VkDeviceSize GetSize() const { return m_Size; }
5001  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
5002  void* GetUserData() const { return m_pUserData; }
5003  void SetUserData(VmaAllocator hAllocator, void* pUserData);
5004  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
5005 
5006  VmaDeviceMemoryBlock* GetBlock() const
5007  {
5008  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
5009  return m_BlockAllocation.m_Block;
5010  }
5011  VkDeviceSize GetOffset() const;
5012  VkDeviceMemory GetMemory() const;
5013  uint32_t GetMemoryTypeIndex() const;
5014  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
5015  void* GetMappedData() const;
5016  bool CanBecomeLost() const;
5017  VmaPool GetPool() const;
5018 
5019  uint32_t GetLastUseFrameIndex() const
5020  {
5021  return m_LastUseFrameIndex.load();
5022  }
5023  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
5024  {
5025  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
5026  }
5027  /*
5028  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
5029  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
5030  - Else, returns false.
5031 
5032  If hAllocation is already lost, assert - you should not call it then.
5033  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
5034  */
5035  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5036 
5037  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
5038  {
5039  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
5040  outInfo.blockCount = 1;
5041  outInfo.allocationCount = 1;
5042  outInfo.unusedRangeCount = 0;
5043  outInfo.usedBytes = m_Size;
5044  outInfo.unusedBytes = 0;
5045  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
5046  outInfo.unusedRangeSizeMin = UINT64_MAX;
5047  outInfo.unusedRangeSizeMax = 0;
5048  }
5049 
5050  void BlockAllocMap();
5051  void BlockAllocUnmap();
5052  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
5053  void DedicatedAllocUnmap(VmaAllocator hAllocator);
5054 
5055 #if VMA_STATS_STRING_ENABLED
5056  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
5057  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
5058 
5059  void InitBufferImageUsage(uint32_t bufferImageUsage)
5060  {
5061  VMA_ASSERT(m_BufferImageUsage == 0);
5062  m_BufferImageUsage = bufferImageUsage;
5063  }
5064 
5065  void PrintParameters(class VmaJsonWriter& json) const;
5066 #endif
5067 
5068 private:
5069  VkDeviceSize m_Alignment;
5070  VkDeviceSize m_Size;
5071  void* m_pUserData;
5072  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
5073  uint8_t m_Type; // ALLOCATION_TYPE
5074  uint8_t m_SuballocationType; // VmaSuballocationType
5075  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
5076  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
5077  uint8_t m_MapCount;
5078  uint8_t m_Flags; // enum FLAGS
5079 
5080  // Allocation out of VmaDeviceMemoryBlock.
5081  struct BlockAllocation
5082  {
5083  VmaPool m_hPool; // Null if belongs to general memory.
5084  VmaDeviceMemoryBlock* m_Block;
5085  VkDeviceSize m_Offset;
5086  bool m_CanBecomeLost;
5087  };
5088 
5089  // Allocation for an object that has its own private VkDeviceMemory.
5090  struct DedicatedAllocation
5091  {
5092  uint32_t m_MemoryTypeIndex;
5093  VkDeviceMemory m_hMemory;
5094  void* m_pMappedData; // Not null means memory is mapped.
5095  };
5096 
5097  union
5098  {
5099  // Allocation out of VmaDeviceMemoryBlock.
5100  BlockAllocation m_BlockAllocation;
5101  // Allocation for an object that has its own private VkDeviceMemory.
5102  DedicatedAllocation m_DedicatedAllocation;
5103  };
5104 
5105 #if VMA_STATS_STRING_ENABLED
5106  uint32_t m_CreationFrameIndex;
5107  uint32_t m_BufferImageUsage; // 0 if unknown.
5108 #endif
5109 
5110  void FreeUserDataString(VmaAllocator hAllocator);
5111 };
5112 
5113 /*
5114 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
5115 allocated memory block or free.
5116 */
5117 struct VmaSuballocation
5118 {
5119  VkDeviceSize offset;
5120  VkDeviceSize size;
5121  VmaAllocation hAllocation;
5122  VmaSuballocationType type;
5123 };
5124 
5125 // Comparator for offsets.
5126 struct VmaSuballocationOffsetLess
5127 {
5128  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5129  {
5130  return lhs.offset < rhs.offset;
5131  }
5132 };
5133 struct VmaSuballocationOffsetGreater
5134 {
5135  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5136  {
5137  return lhs.offset > rhs.offset;
5138  }
5139 };
5140 
5141 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
5142 
5143 // Cost of one additional allocation lost, as equivalent in bytes.
5144 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
5145 
5146 /*
5147 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
5148 
5149 If canMakeOtherLost was false:
5150 - item points to a FREE suballocation.
5151 - itemsToMakeLostCount is 0.
5152 
5153 If canMakeOtherLost was true:
5154 - item points to first of sequence of suballocations, which are either FREE,
5155  or point to VmaAllocations that can become lost.
5156 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
5157  the requested allocation to succeed.
5158 */
5159 struct VmaAllocationRequest
5160 {
5161  VkDeviceSize offset;
5162  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
5163  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
5164  VmaSuballocationList::iterator item;
5165  size_t itemsToMakeLostCount;
5166  void* customData;
5167 
5168  VkDeviceSize CalcCost() const
5169  {
5170  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
5171  }
5172 };
5173 
5174 /*
5175 Data structure used for bookkeeping of allocations and unused ranges of memory
5176 in a single VkDeviceMemory block.
5177 */
5178 class VmaBlockMetadata
5179 {
5180 public:
5181  VmaBlockMetadata(VmaAllocator hAllocator);
5182  virtual ~VmaBlockMetadata() { }
5183  virtual void Init(VkDeviceSize size) { m_Size = size; }
5184 
5185  // Validates all data structures inside this object. If not valid, returns false.
5186  virtual bool Validate() const = 0;
5187  VkDeviceSize GetSize() const { return m_Size; }
5188  virtual size_t GetAllocationCount() const = 0;
5189  virtual VkDeviceSize GetSumFreeSize() const = 0;
5190  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
5191  // Returns true if this block is empty - contains only single free suballocation.
5192  virtual bool IsEmpty() const = 0;
5193 
5194  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
5195  // Shouldn't modify blockCount.
5196  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
5197 
5198 #if VMA_STATS_STRING_ENABLED
5199  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
5200 #endif
5201 
5202  // Tries to find a place for suballocation with given parameters inside this block.
5203  // If succeeded, fills pAllocationRequest and returns true.
5204  // If failed, returns false.
5205  virtual bool CreateAllocationRequest(
5206  uint32_t currentFrameIndex,
5207  uint32_t frameInUseCount,
5208  VkDeviceSize bufferImageGranularity,
5209  VkDeviceSize allocSize,
5210  VkDeviceSize allocAlignment,
5211  bool upperAddress,
5212  VmaSuballocationType allocType,
5213  bool canMakeOtherLost,
5214  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
5215  uint32_t strategy,
5216  VmaAllocationRequest* pAllocationRequest) = 0;
5217 
5218  virtual bool MakeRequestedAllocationsLost(
5219  uint32_t currentFrameIndex,
5220  uint32_t frameInUseCount,
5221  VmaAllocationRequest* pAllocationRequest) = 0;
5222 
5223  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
5224 
5225  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
5226 
5227  // Makes actual allocation based on request. Request must already be checked and valid.
5228  virtual void Alloc(
5229  const VmaAllocationRequest& request,
5230  VmaSuballocationType type,
5231  VkDeviceSize allocSize,
5232  bool upperAddress,
5233  VmaAllocation hAllocation) = 0;
5234 
5235  // Frees suballocation assigned to given memory region.
5236  virtual void Free(const VmaAllocation allocation) = 0;
5237  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
5238 
5239  // Tries to resize (grow or shrink) space for given allocation, in place.
5240  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
5241 
5242 protected:
5243  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
5244 
5245 #if VMA_STATS_STRING_ENABLED
5246  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
5247  VkDeviceSize unusedBytes,
5248  size_t allocationCount,
5249  size_t unusedRangeCount) const;
5250  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
5251  VkDeviceSize offset,
5252  VmaAllocation hAllocation) const;
5253  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
5254  VkDeviceSize offset,
5255  VkDeviceSize size) const;
5256  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
5257 #endif
5258 
5259 private:
5260  VkDeviceSize m_Size;
5261  const VkAllocationCallbacks* m_pAllocationCallbacks;
5262 };
5263 
5264 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
5265  VMA_ASSERT(0 && "Validation failed: " #cond); \
5266  return false; \
5267  } } while(false)
5268 
5269 class VmaBlockMetadata_Generic : public VmaBlockMetadata
5270 {
5271  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
5272 public:
5273  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
5274  virtual ~VmaBlockMetadata_Generic();
5275  virtual void Init(VkDeviceSize size);
5276 
5277  virtual bool Validate() const;
5278  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
5279  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5280  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5281  virtual bool IsEmpty() const;
5282 
5283  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5284  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5285 
5286 #if VMA_STATS_STRING_ENABLED
5287  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5288 #endif
5289 
5290  virtual bool CreateAllocationRequest(
5291  uint32_t currentFrameIndex,
5292  uint32_t frameInUseCount,
5293  VkDeviceSize bufferImageGranularity,
5294  VkDeviceSize allocSize,
5295  VkDeviceSize allocAlignment,
5296  bool upperAddress,
5297  VmaSuballocationType allocType,
5298  bool canMakeOtherLost,
5299  uint32_t strategy,
5300  VmaAllocationRequest* pAllocationRequest);
5301 
5302  virtual bool MakeRequestedAllocationsLost(
5303  uint32_t currentFrameIndex,
5304  uint32_t frameInUseCount,
5305  VmaAllocationRequest* pAllocationRequest);
5306 
5307  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5308 
5309  virtual VkResult CheckCorruption(const void* pBlockData);
5310 
5311  virtual void Alloc(
5312  const VmaAllocationRequest& request,
5313  VmaSuballocationType type,
5314  VkDeviceSize allocSize,
5315  bool upperAddress,
5316  VmaAllocation hAllocation);
5317 
5318  virtual void Free(const VmaAllocation allocation);
5319  virtual void FreeAtOffset(VkDeviceSize offset);
5320 
5321  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
5322 
5324  // For defragmentation
5325 
5326  bool IsBufferImageGranularityConflictPossible(
5327  VkDeviceSize bufferImageGranularity,
5328  VmaSuballocationType& inOutPrevSuballocType) const;
5329 
5330 private:
5331  friend class VmaDefragmentationAlgorithm_Generic;
5332  friend class VmaDefragmentationAlgorithm_Fast;
5333 
5334  uint32_t m_FreeCount;
5335  VkDeviceSize m_SumFreeSize;
5336  VmaSuballocationList m_Suballocations;
5337  // Suballocations that are free and have size greater than certain threshold.
5338  // Sorted by size, ascending.
5339  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
5340 
5341  bool ValidateFreeSuballocationList() const;
5342 
5343  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
5344  // If yes, fills pOffset and returns true. If no, returns false.
5345  bool CheckAllocation(
5346  uint32_t currentFrameIndex,
5347  uint32_t frameInUseCount,
5348  VkDeviceSize bufferImageGranularity,
5349  VkDeviceSize allocSize,
5350  VkDeviceSize allocAlignment,
5351  VmaSuballocationType allocType,
5352  VmaSuballocationList::const_iterator suballocItem,
5353  bool canMakeOtherLost,
5354  VkDeviceSize* pOffset,
5355  size_t* itemsToMakeLostCount,
5356  VkDeviceSize* pSumFreeSize,
5357  VkDeviceSize* pSumItemSize) const;
5358  // Given free suballocation, it merges it with following one, which must also be free.
5359  void MergeFreeWithNext(VmaSuballocationList::iterator item);
5360  // Releases given suballocation, making it free.
5361  // Merges it with adjacent free suballocations if applicable.
5362  // Returns iterator to new free suballocation at this place.
5363  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
5364  // Given free suballocation, it inserts it into sorted list of
5365  // m_FreeSuballocationsBySize if it's suitable.
5366  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
5367  // Given free suballocation, it removes it from sorted list of
5368  // m_FreeSuballocationsBySize if it's suitable.
5369  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
5370 };
5371 
5372 /*
5373 Allocations and their references in internal data structure look like this:
5374 
5375 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
5376 
5377  0 +-------+
5378  | |
5379  | |
5380  | |
5381  +-------+
5382  | Alloc | 1st[m_1stNullItemsBeginCount]
5383  +-------+
5384  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5385  +-------+
5386  | ... |
5387  +-------+
5388  | Alloc | 1st[1st.size() - 1]
5389  +-------+
5390  | |
5391  | |
5392  | |
5393 GetSize() +-------+
5394 
5395 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
5396 
5397  0 +-------+
5398  | Alloc | 2nd[0]
5399  +-------+
5400  | Alloc | 2nd[1]
5401  +-------+
5402  | ... |
5403  +-------+
5404  | Alloc | 2nd[2nd.size() - 1]
5405  +-------+
5406  | |
5407  | |
5408  | |
5409  +-------+
5410  | Alloc | 1st[m_1stNullItemsBeginCount]
5411  +-------+
5412  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5413  +-------+
5414  | ... |
5415  +-------+
5416  | Alloc | 1st[1st.size() - 1]
5417  +-------+
5418  | |
5419 GetSize() +-------+
5420 
5421 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
5422 
5423  0 +-------+
5424  | |
5425  | |
5426  | |
5427  +-------+
5428  | Alloc | 1st[m_1stNullItemsBeginCount]
5429  +-------+
5430  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5431  +-------+
5432  | ... |
5433  +-------+
5434  | Alloc | 1st[1st.size() - 1]
5435  +-------+
5436  | |
5437  | |
5438  | |
5439  +-------+
5440  | Alloc | 2nd[2nd.size() - 1]
5441  +-------+
5442  | ... |
5443  +-------+
5444  | Alloc | 2nd[1]
5445  +-------+
5446  | Alloc | 2nd[0]
5447 GetSize() +-------+
5448 
5449 */
5450 class VmaBlockMetadata_Linear : public VmaBlockMetadata
5451 {
5452  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
5453 public:
5454  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
5455  virtual ~VmaBlockMetadata_Linear();
5456  virtual void Init(VkDeviceSize size);
5457 
5458  virtual bool Validate() const;
5459  virtual size_t GetAllocationCount() const;
5460  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5461  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5462  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
5463 
5464  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5465  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5466 
5467 #if VMA_STATS_STRING_ENABLED
5468  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5469 #endif
5470 
5471  virtual bool CreateAllocationRequest(
5472  uint32_t currentFrameIndex,
5473  uint32_t frameInUseCount,
5474  VkDeviceSize bufferImageGranularity,
5475  VkDeviceSize allocSize,
5476  VkDeviceSize allocAlignment,
5477  bool upperAddress,
5478  VmaSuballocationType allocType,
5479  bool canMakeOtherLost,
5480  uint32_t strategy,
5481  VmaAllocationRequest* pAllocationRequest);
5482 
5483  virtual bool MakeRequestedAllocationsLost(
5484  uint32_t currentFrameIndex,
5485  uint32_t frameInUseCount,
5486  VmaAllocationRequest* pAllocationRequest);
5487 
5488  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5489 
5490  virtual VkResult CheckCorruption(const void* pBlockData);
5491 
5492  virtual void Alloc(
5493  const VmaAllocationRequest& request,
5494  VmaSuballocationType type,
5495  VkDeviceSize allocSize,
5496  bool upperAddress,
5497  VmaAllocation hAllocation);
5498 
5499  virtual void Free(const VmaAllocation allocation);
5500  virtual void FreeAtOffset(VkDeviceSize offset);
5501 
5502 private:
5503  /*
5504  There are two suballocation vectors, used in ping-pong way.
5505  The one with index m_1stVectorIndex is called 1st.
5506  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
5507  2nd can be non-empty only when 1st is not empty.
5508  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
5509  */
5510  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
5511 
5512  enum SECOND_VECTOR_MODE
5513  {
5514  SECOND_VECTOR_EMPTY,
5515  /*
5516  Suballocations in 2nd vector are created later than the ones in 1st, but they
5517  all have smaller offset.
5518  */
5519  SECOND_VECTOR_RING_BUFFER,
5520  /*
5521  Suballocations in 2nd vector are upper side of double stack.
5522  They all have offsets higher than those in 1st vector.
5523  Top of this stack means smaller offsets, but higher indices in this vector.
5524  */
5525  SECOND_VECTOR_DOUBLE_STACK,
5526  };
5527 
5528  VkDeviceSize m_SumFreeSize;
5529  SuballocationVectorType m_Suballocations0, m_Suballocations1;
5530  uint32_t m_1stVectorIndex;
5531  SECOND_VECTOR_MODE m_2ndVectorMode;
5532 
5533  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5534  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5535  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5536  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5537 
5538  // Number of items in 1st vector with hAllocation = null at the beginning.
5539  size_t m_1stNullItemsBeginCount;
5540  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
5541  size_t m_1stNullItemsMiddleCount;
5542  // Number of items in 2nd vector with hAllocation = null.
5543  size_t m_2ndNullItemsCount;
5544 
5545  bool ShouldCompact1st() const;
5546  void CleanupAfterFree();
5547 };
5548 
5549 /*
5550 - GetSize() is the original size of allocated memory block.
5551 - m_UsableSize is this size aligned down to a power of two.
5552  All allocations and calculations happen relative to m_UsableSize.
5553 - GetUnusableSize() is the difference between them.
5554  It is repoted as separate, unused range, not available for allocations.
5555 
5556 Node at level 0 has size = m_UsableSize.
5557 Each next level contains nodes with size 2 times smaller than current level.
5558 m_LevelCount is the maximum number of levels to use in the current object.
5559 */
5560 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
5561 {
5562  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
5563 public:
5564  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
5565  virtual ~VmaBlockMetadata_Buddy();
5566  virtual void Init(VkDeviceSize size);
5567 
5568  virtual bool Validate() const;
5569  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
5570  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
5571  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5572  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
5573 
5574  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5575  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5576 
5577 #if VMA_STATS_STRING_ENABLED
5578  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5579 #endif
5580 
5581  virtual bool CreateAllocationRequest(
5582  uint32_t currentFrameIndex,
5583  uint32_t frameInUseCount,
5584  VkDeviceSize bufferImageGranularity,
5585  VkDeviceSize allocSize,
5586  VkDeviceSize allocAlignment,
5587  bool upperAddress,
5588  VmaSuballocationType allocType,
5589  bool canMakeOtherLost,
5590  uint32_t strategy,
5591  VmaAllocationRequest* pAllocationRequest);
5592 
5593  virtual bool MakeRequestedAllocationsLost(
5594  uint32_t currentFrameIndex,
5595  uint32_t frameInUseCount,
5596  VmaAllocationRequest* pAllocationRequest);
5597 
5598  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5599 
5600  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
5601 
5602  virtual void Alloc(
5603  const VmaAllocationRequest& request,
5604  VmaSuballocationType type,
5605  VkDeviceSize allocSize,
5606  bool upperAddress,
5607  VmaAllocation hAllocation);
5608 
5609  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
5610  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
5611 
5612 private:
5613  static const VkDeviceSize MIN_NODE_SIZE = 32;
5614  static const size_t MAX_LEVELS = 30;
5615 
5616  struct ValidationContext
5617  {
5618  size_t calculatedAllocationCount;
5619  size_t calculatedFreeCount;
5620  VkDeviceSize calculatedSumFreeSize;
5621 
5622  ValidationContext() :
5623  calculatedAllocationCount(0),
5624  calculatedFreeCount(0),
5625  calculatedSumFreeSize(0) { }
5626  };
5627 
5628  struct Node
5629  {
5630  VkDeviceSize offset;
5631  enum TYPE
5632  {
5633  TYPE_FREE,
5634  TYPE_ALLOCATION,
5635  TYPE_SPLIT,
5636  TYPE_COUNT
5637  } type;
5638  Node* parent;
5639  Node* buddy;
5640 
5641  union
5642  {
5643  struct
5644  {
5645  Node* prev;
5646  Node* next;
5647  } free;
5648  struct
5649  {
5650  VmaAllocation alloc;
5651  } allocation;
5652  struct
5653  {
5654  Node* leftChild;
5655  } split;
5656  };
5657  };
5658 
5659  // Size of the memory block aligned down to a power of two.
5660  VkDeviceSize m_UsableSize;
5661  uint32_t m_LevelCount;
5662 
5663  Node* m_Root;
5664  struct {
5665  Node* front;
5666  Node* back;
5667  } m_FreeList[MAX_LEVELS];
5668  // Number of nodes in the tree with type == TYPE_ALLOCATION.
5669  size_t m_AllocationCount;
5670  // Number of nodes in the tree with type == TYPE_FREE.
5671  size_t m_FreeCount;
5672  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
5673  VkDeviceSize m_SumFreeSize;
5674 
5675  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
5676  void DeleteNode(Node* node);
5677  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
5678  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
5679  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
5680  // Alloc passed just for validation. Can be null.
5681  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
5682  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
5683  // Adds node to the front of FreeList at given level.
5684  // node->type must be FREE.
5685  // node->free.prev, next can be undefined.
5686  void AddToFreeListFront(uint32_t level, Node* node);
5687  // Removes node from FreeList at given level.
5688  // node->type must be FREE.
5689  // node->free.prev, next stay untouched.
5690  void RemoveFromFreeList(uint32_t level, Node* node);
5691 
5692 #if VMA_STATS_STRING_ENABLED
5693  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
5694 #endif
5695 };
5696 
5697 /*
5698 Represents a single block of device memory (`VkDeviceMemory`) with all the
5699 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
5700 
5701 Thread-safety: This class must be externally synchronized.
5702 */
5703 class VmaDeviceMemoryBlock
5704 {
5705  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
5706 public:
5707  VmaBlockMetadata* m_pMetadata;
5708 
5709  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
5710 
5711  ~VmaDeviceMemoryBlock()
5712  {
5713  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5714  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5715  }
5716 
5717  // Always call after construction.
5718  void Init(
5719  VmaAllocator hAllocator,
5720  uint32_t newMemoryTypeIndex,
5721  VkDeviceMemory newMemory,
5722  VkDeviceSize newSize,
5723  uint32_t id,
5724  uint32_t algorithm);
5725  // Always call before destruction.
5726  void Destroy(VmaAllocator allocator);
5727 
5728  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
5729  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5730  uint32_t GetId() const { return m_Id; }
5731  void* GetMappedData() const { return m_pMappedData; }
5732 
5733  // Validates all data structures inside this object. If not valid, returns false.
5734  bool Validate() const;
5735 
5736  VkResult CheckCorruption(VmaAllocator hAllocator);
5737 
5738  // ppData can be null.
5739  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
5740  void Unmap(VmaAllocator hAllocator, uint32_t count);
5741 
5742  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5743  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5744 
5745  VkResult BindBufferMemory(
5746  const VmaAllocator hAllocator,
5747  const VmaAllocation hAllocation,
5748  VkBuffer hBuffer);
5749  VkResult BindImageMemory(
5750  const VmaAllocator hAllocator,
5751  const VmaAllocation hAllocation,
5752  VkImage hImage);
5753 
5754 private:
5755  uint32_t m_MemoryTypeIndex;
5756  uint32_t m_Id;
5757  VkDeviceMemory m_hMemory;
5758 
5759  /*
5760  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
5761  Also protects m_MapCount, m_pMappedData.
5762  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
5763  */
5764  VMA_MUTEX m_Mutex;
5765  uint32_t m_MapCount;
5766  void* m_pMappedData;
5767 };
5768 
5769 struct VmaPointerLess
5770 {
5771  bool operator()(const void* lhs, const void* rhs) const
5772  {
5773  return lhs < rhs;
5774  }
5775 };
5776 
5777 struct VmaDefragmentationMove
5778 {
5779  size_t srcBlockIndex;
5780  size_t dstBlockIndex;
5781  VkDeviceSize srcOffset;
5782  VkDeviceSize dstOffset;
5783  VkDeviceSize size;
5784 };
5785 
5786 class VmaDefragmentationAlgorithm;
5787 
5788 /*
5789 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
5790 Vulkan memory type.
5791 
5792 Synchronized internally with a mutex.
5793 */
5794 struct VmaBlockVector
5795 {
5796  VMA_CLASS_NO_COPY(VmaBlockVector)
5797 public:
5798  VmaBlockVector(
5799  VmaAllocator hAllocator,
5800  uint32_t memoryTypeIndex,
5801  VkDeviceSize preferredBlockSize,
5802  size_t minBlockCount,
5803  size_t maxBlockCount,
5804  VkDeviceSize bufferImageGranularity,
5805  uint32_t frameInUseCount,
5806  bool isCustomPool,
5807  bool explicitBlockSize,
5808  uint32_t algorithm);
5809  ~VmaBlockVector();
5810 
5811  VkResult CreateMinBlocks();
5812 
5813  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5814  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
5815  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
5816  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
5817  uint32_t GetAlgorithm() const { return m_Algorithm; }
5818 
5819  void GetPoolStats(VmaPoolStats* pStats);
5820 
5821  bool IsEmpty() const { return m_Blocks.empty(); }
5822  bool IsCorruptionDetectionEnabled() const;
5823 
5824  VkResult Allocate(
5825  VmaPool hCurrentPool,
5826  uint32_t currentFrameIndex,
5827  VkDeviceSize size,
5828  VkDeviceSize alignment,
5829  const VmaAllocationCreateInfo& createInfo,
5830  VmaSuballocationType suballocType,
5831  size_t allocationCount,
5832  VmaAllocation* pAllocations);
5833 
5834  void Free(
5835  VmaAllocation hAllocation);
5836 
5837  // Adds statistics of this BlockVector to pStats.
5838  void AddStats(VmaStats* pStats);
5839 
5840 #if VMA_STATS_STRING_ENABLED
5841  void PrintDetailedMap(class VmaJsonWriter& json);
5842 #endif
5843 
5844  void MakePoolAllocationsLost(
5845  uint32_t currentFrameIndex,
5846  size_t* pLostAllocationCount);
5847  VkResult CheckCorruption();
5848 
5849  // Saves results in pCtx->res.
5850  void Defragment(
5851  class VmaBlockVectorDefragmentationContext* pCtx,
5852  VmaDefragmentationStats* pStats,
5853  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
5854  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
5855  VkCommandBuffer commandBuffer);
5856  void DefragmentationEnd(
5857  class VmaBlockVectorDefragmentationContext* pCtx,
5858  VmaDefragmentationStats* pStats);
5859 
5861  // To be used only while the m_Mutex is locked. Used during defragmentation.
5862 
5863  size_t GetBlockCount() const { return m_Blocks.size(); }
5864  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
5865  size_t CalcAllocationCount() const;
5866  bool IsBufferImageGranularityConflictPossible() const;
5867 
5868 private:
5869  friend class VmaDefragmentationAlgorithm_Generic;
5870 
5871  const VmaAllocator m_hAllocator;
5872  const uint32_t m_MemoryTypeIndex;
5873  const VkDeviceSize m_PreferredBlockSize;
5874  const size_t m_MinBlockCount;
5875  const size_t m_MaxBlockCount;
5876  const VkDeviceSize m_BufferImageGranularity;
5877  const uint32_t m_FrameInUseCount;
5878  const bool m_IsCustomPool;
5879  const bool m_ExplicitBlockSize;
5880  const uint32_t m_Algorithm;
5881  /* There can be at most one allocation that is completely empty - a
5882  hysteresis to avoid pessimistic case of alternating creation and destruction
5883  of a VkDeviceMemory. */
5884  bool m_HasEmptyBlock;
5885  VMA_RW_MUTEX m_Mutex;
5886  // Incrementally sorted by sumFreeSize, ascending.
5887  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
5888  uint32_t m_NextBlockId;
5889 
5890  VkDeviceSize CalcMaxBlockSize() const;
5891 
5892  // Finds and removes given block from vector.
5893  void Remove(VmaDeviceMemoryBlock* pBlock);
5894 
5895  // Performs single step in sorting m_Blocks. They may not be fully sorted
5896  // after this call.
5897  void IncrementallySortBlocks();
5898 
5899  VkResult AllocatePage(
5900  VmaPool hCurrentPool,
5901  uint32_t currentFrameIndex,
5902  VkDeviceSize size,
5903  VkDeviceSize alignment,
5904  const VmaAllocationCreateInfo& createInfo,
5905  VmaSuballocationType suballocType,
5906  VmaAllocation* pAllocation);
5907 
5908  // To be used only without CAN_MAKE_OTHER_LOST flag.
5909  VkResult AllocateFromBlock(
5910  VmaDeviceMemoryBlock* pBlock,
5911  VmaPool hCurrentPool,
5912  uint32_t currentFrameIndex,
5913  VkDeviceSize size,
5914  VkDeviceSize alignment,
5915  VmaAllocationCreateFlags allocFlags,
5916  void* pUserData,
5917  VmaSuballocationType suballocType,
5918  uint32_t strategy,
5919  VmaAllocation* pAllocation);
5920 
5921  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
5922 
5923  // Saves result to pCtx->res.
5924  void ApplyDefragmentationMovesCpu(
5925  class VmaBlockVectorDefragmentationContext* pDefragCtx,
5926  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
5927  // Saves result to pCtx->res.
5928  void ApplyDefragmentationMovesGpu(
5929  class VmaBlockVectorDefragmentationContext* pDefragCtx,
5930  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
5931  VkCommandBuffer commandBuffer);
5932 
5933  /*
5934  Used during defragmentation. pDefragmentationStats is optional. It's in/out
5935  - updated with new data.
5936  */
5937  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
5938 };
5939 
5940 struct VmaPool_T
5941 {
5942  VMA_CLASS_NO_COPY(VmaPool_T)
5943 public:
5944  VmaBlockVector m_BlockVector;
5945 
5946  VmaPool_T(
5947  VmaAllocator hAllocator,
5948  const VmaPoolCreateInfo& createInfo,
5949  VkDeviceSize preferredBlockSize);
5950  ~VmaPool_T();
5951 
5952  uint32_t GetId() const { return m_Id; }
5953  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
5954 
5955 #if VMA_STATS_STRING_ENABLED
5956  //void PrintDetailedMap(class VmaStringBuilder& sb);
5957 #endif
5958 
5959 private:
5960  uint32_t m_Id;
5961 };
5962 
5963 /*
5964 Performs defragmentation:
5965 
5966 - Updates `pBlockVector->m_pMetadata`.
5967 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
5968 - Does not move actual data, only returns requested moves as `moves`.
5969 */
5970 class VmaDefragmentationAlgorithm
5971 {
5972  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
5973 public:
5974  VmaDefragmentationAlgorithm(
5975  VmaAllocator hAllocator,
5976  VmaBlockVector* pBlockVector,
5977  uint32_t currentFrameIndex) :
5978  m_hAllocator(hAllocator),
5979  m_pBlockVector(pBlockVector),
5980  m_CurrentFrameIndex(currentFrameIndex)
5981  {
5982  }
5983  virtual ~VmaDefragmentationAlgorithm()
5984  {
5985  }
5986 
5987  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
5988  virtual void AddAll() = 0;
5989 
5990  virtual VkResult Defragment(
5991  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
5992  VkDeviceSize maxBytesToMove,
5993  uint32_t maxAllocationsToMove) = 0;
5994 
5995  virtual VkDeviceSize GetBytesMoved() const = 0;
5996  virtual uint32_t GetAllocationsMoved() const = 0;
5997 
5998 protected:
5999  VmaAllocator const m_hAllocator;
6000  VmaBlockVector* const m_pBlockVector;
6001  const uint32_t m_CurrentFrameIndex;
6002 
6003  struct AllocationInfo
6004  {
6005  VmaAllocation m_hAllocation;
6006  VkBool32* m_pChanged;
6007 
6008  AllocationInfo() :
6009  m_hAllocation(VK_NULL_HANDLE),
6010  m_pChanged(VMA_NULL)
6011  {
6012  }
6013  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
6014  m_hAllocation(hAlloc),
6015  m_pChanged(pChanged)
6016  {
6017  }
6018  };
6019 };
6020 
6021 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
6022 {
6023  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
6024 public:
6025  VmaDefragmentationAlgorithm_Generic(
6026  VmaAllocator hAllocator,
6027  VmaBlockVector* pBlockVector,
6028  uint32_t currentFrameIndex,
6029  bool overlappingMoveSupported);
6030  virtual ~VmaDefragmentationAlgorithm_Generic();
6031 
6032  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6033  virtual void AddAll() { m_AllAllocations = true; }
6034 
6035  virtual VkResult Defragment(
6036  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6037  VkDeviceSize maxBytesToMove,
6038  uint32_t maxAllocationsToMove);
6039 
6040  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6041  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6042 
6043 private:
6044  uint32_t m_AllocationCount;
6045  bool m_AllAllocations;
6046 
6047  VkDeviceSize m_BytesMoved;
6048  uint32_t m_AllocationsMoved;
6049 
6050  struct AllocationInfoSizeGreater
6051  {
6052  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6053  {
6054  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
6055  }
6056  };
6057 
6058  struct AllocationInfoOffsetGreater
6059  {
6060  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6061  {
6062  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
6063  }
6064  };
6065 
6066  struct BlockInfo
6067  {
6068  size_t m_OriginalBlockIndex;
6069  VmaDeviceMemoryBlock* m_pBlock;
6070  bool m_HasNonMovableAllocations;
6071  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
6072 
6073  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
6074  m_OriginalBlockIndex(SIZE_MAX),
6075  m_pBlock(VMA_NULL),
6076  m_HasNonMovableAllocations(true),
6077  m_Allocations(pAllocationCallbacks)
6078  {
6079  }
6080 
6081  void CalcHasNonMovableAllocations()
6082  {
6083  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
6084  const size_t defragmentAllocCount = m_Allocations.size();
6085  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
6086  }
6087 
6088  void SortAllocationsBySizeDescending()
6089  {
6090  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
6091  }
6092 
6093  void SortAllocationsByOffsetDescending()
6094  {
6095  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
6096  }
6097  };
6098 
6099  struct BlockPointerLess
6100  {
6101  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
6102  {
6103  return pLhsBlockInfo->m_pBlock < pRhsBlock;
6104  }
6105  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6106  {
6107  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
6108  }
6109  };
6110 
6111  // 1. Blocks with some non-movable allocations go first.
6112  // 2. Blocks with smaller sumFreeSize go first.
6113  struct BlockInfoCompareMoveDestination
6114  {
6115  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6116  {
6117  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
6118  {
6119  return true;
6120  }
6121  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
6122  {
6123  return false;
6124  }
6125  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
6126  {
6127  return true;
6128  }
6129  return false;
6130  }
6131  };
6132 
6133  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
6134  BlockInfoVector m_Blocks;
6135 
6136  VkResult DefragmentRound(
6137  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6138  VkDeviceSize maxBytesToMove,
6139  uint32_t maxAllocationsToMove);
6140 
6141  size_t CalcBlocksWithNonMovableCount() const;
6142 
6143  static bool MoveMakesSense(
6144  size_t dstBlockIndex, VkDeviceSize dstOffset,
6145  size_t srcBlockIndex, VkDeviceSize srcOffset);
6146 };
6147 
6148 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
6149 {
6150  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
6151 public:
6152  VmaDefragmentationAlgorithm_Fast(
6153  VmaAllocator hAllocator,
6154  VmaBlockVector* pBlockVector,
6155  uint32_t currentFrameIndex,
6156  bool overlappingMoveSupported);
6157  virtual ~VmaDefragmentationAlgorithm_Fast();
6158 
6159  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
6160  virtual void AddAll() { m_AllAllocations = true; }
6161 
6162  virtual VkResult Defragment(
6163  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6164  VkDeviceSize maxBytesToMove,
6165  uint32_t maxAllocationsToMove);
6166 
6167  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6168  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6169 
6170 private:
6171  struct BlockInfo
6172  {
6173  size_t origBlockIndex;
6174  };
6175 
6176  class FreeSpaceDatabase
6177  {
6178  public:
6179  FreeSpaceDatabase()
6180  {
6181  FreeSpace s = {};
6182  s.blockInfoIndex = SIZE_MAX;
6183  for(size_t i = 0; i < MAX_COUNT; ++i)
6184  {
6185  m_FreeSpaces[i] = s;
6186  }
6187  }
6188 
6189  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
6190  {
6191  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6192  {
6193  return;
6194  }
6195 
6196  // Find first invalid or the smallest structure.
6197  size_t bestIndex = SIZE_MAX;
6198  for(size_t i = 0; i < MAX_COUNT; ++i)
6199  {
6200  // Empty structure.
6201  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
6202  {
6203  bestIndex = i;
6204  break;
6205  }
6206  if(m_FreeSpaces[i].size < size &&
6207  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
6208  {
6209  bestIndex = i;
6210  }
6211  }
6212 
6213  if(bestIndex != SIZE_MAX)
6214  {
6215  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
6216  m_FreeSpaces[bestIndex].offset = offset;
6217  m_FreeSpaces[bestIndex].size = size;
6218  }
6219  }
6220 
6221  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
6222  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
6223  {
6224  size_t bestIndex = SIZE_MAX;
6225  VkDeviceSize bestFreeSpaceAfter = 0;
6226  for(size_t i = 0; i < MAX_COUNT; ++i)
6227  {
6228  // Structure is valid.
6229  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
6230  {
6231  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
6232  // Allocation fits into this structure.
6233  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
6234  {
6235  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
6236  (dstOffset + size);
6237  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
6238  {
6239  bestIndex = i;
6240  bestFreeSpaceAfter = freeSpaceAfter;
6241  }
6242  }
6243  }
6244  }
6245 
6246  if(bestIndex != SIZE_MAX)
6247  {
6248  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
6249  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
6250 
6251  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6252  {
6253  // Leave this structure for remaining empty space.
6254  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
6255  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
6256  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
6257  }
6258  else
6259  {
6260  // This structure becomes invalid.
6261  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
6262  }
6263 
6264  return true;
6265  }
6266 
6267  return false;
6268  }
6269 
6270  private:
6271  static const size_t MAX_COUNT = 4;
6272 
6273  struct FreeSpace
6274  {
6275  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
6276  VkDeviceSize offset;
6277  VkDeviceSize size;
6278  } m_FreeSpaces[MAX_COUNT];
6279  };
6280 
6281  const bool m_OverlappingMoveSupported;
6282 
6283  uint32_t m_AllocationCount;
6284  bool m_AllAllocations;
6285 
6286  VkDeviceSize m_BytesMoved;
6287  uint32_t m_AllocationsMoved;
6288 
6289  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
6290 
6291  void PreprocessMetadata();
6292  void PostprocessMetadata();
6293  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
6294 };
6295 
6296 struct VmaBlockDefragmentationContext
6297 {
6298  enum BLOCK_FLAG
6299  {
6300  BLOCK_FLAG_USED = 0x00000001,
6301  };
6302  uint32_t flags;
6303  VkBuffer hBuffer;
6304 
6305  VmaBlockDefragmentationContext() :
6306  flags(0),
6307  hBuffer(VK_NULL_HANDLE)
6308  {
6309  }
6310 };
6311 
6312 class VmaBlockVectorDefragmentationContext
6313 {
6314  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
6315 public:
6316  VkResult res;
6317  bool mutexLocked;
6318  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
6319 
6320  VmaBlockVectorDefragmentationContext(
6321  VmaAllocator hAllocator,
6322  VmaPool hCustomPool, // Optional.
6323  VmaBlockVector* pBlockVector,
6324  uint32_t currFrameIndex,
6325  uint32_t flags);
6326  ~VmaBlockVectorDefragmentationContext();
6327 
6328  VmaPool GetCustomPool() const { return m_hCustomPool; }
6329  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
6330  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
6331 
6332  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6333  void AddAll() { m_AllAllocations = true; }
6334 
6335  void Begin(bool overlappingMoveSupported);
6336 
6337 private:
6338  const VmaAllocator m_hAllocator;
6339  // Null if not from custom pool.
6340  const VmaPool m_hCustomPool;
6341  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
6342  VmaBlockVector* const m_pBlockVector;
6343  const uint32_t m_CurrFrameIndex;
6344  const uint32_t m_AlgorithmFlags;
6345  // Owner of this object.
6346  VmaDefragmentationAlgorithm* m_pAlgorithm;
6347 
6348  struct AllocInfo
6349  {
6350  VmaAllocation hAlloc;
6351  VkBool32* pChanged;
6352  };
6353  // Used between constructor and Begin.
6354  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
6355  bool m_AllAllocations;
6356 };
6357 
6358 struct VmaDefragmentationContext_T
6359 {
6360 private:
6361  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
6362 public:
6363  VmaDefragmentationContext_T(
6364  VmaAllocator hAllocator,
6365  uint32_t currFrameIndex,
6366  uint32_t flags,
6367  VmaDefragmentationStats* pStats);
6368  ~VmaDefragmentationContext_T();
6369 
6370  void AddPools(uint32_t poolCount, VmaPool* pPools);
6371  void AddAllocations(
6372  uint32_t allocationCount,
6373  VmaAllocation* pAllocations,
6374  VkBool32* pAllocationsChanged);
6375 
6376  /*
6377  Returns:
6378  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
6379  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
6380  - Negative value if error occured and object can be destroyed immediately.
6381  */
6382  VkResult Defragment(
6383  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
6384  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
6385  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats);
6386 
6387 private:
6388  const VmaAllocator m_hAllocator;
6389  const uint32_t m_CurrFrameIndex;
6390  const uint32_t m_Flags;
6391  VmaDefragmentationStats* const m_pStats;
6392  // Owner of these objects.
6393  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
6394  // Owner of these objects.
6395  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
6396 };
6397 
6398 #if VMA_RECORDING_ENABLED
6399 
6400 class VmaRecorder
6401 {
6402 public:
6403  VmaRecorder();
6404  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
6405  void WriteConfiguration(
6406  const VkPhysicalDeviceProperties& devProps,
6407  const VkPhysicalDeviceMemoryProperties& memProps,
6408  bool dedicatedAllocationExtensionEnabled);
6409  ~VmaRecorder();
6410 
6411  void RecordCreateAllocator(uint32_t frameIndex);
6412  void RecordDestroyAllocator(uint32_t frameIndex);
6413  void RecordCreatePool(uint32_t frameIndex,
6414  const VmaPoolCreateInfo& createInfo,
6415  VmaPool pool);
6416  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
6417  void RecordAllocateMemory(uint32_t frameIndex,
6418  const VkMemoryRequirements& vkMemReq,
6419  const VmaAllocationCreateInfo& createInfo,
6420  VmaAllocation allocation);
6421  void RecordAllocateMemoryPages(uint32_t frameIndex,
6422  const VkMemoryRequirements& vkMemReq,
6423  const VmaAllocationCreateInfo& createInfo,
6424  uint64_t allocationCount,
6425  const VmaAllocation* pAllocations);
6426  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
6427  const VkMemoryRequirements& vkMemReq,
6428  bool requiresDedicatedAllocation,
6429  bool prefersDedicatedAllocation,
6430  const VmaAllocationCreateInfo& createInfo,
6431  VmaAllocation allocation);
6432  void RecordAllocateMemoryForImage(uint32_t frameIndex,
6433  const VkMemoryRequirements& vkMemReq,
6434  bool requiresDedicatedAllocation,
6435  bool prefersDedicatedAllocation,
6436  const VmaAllocationCreateInfo& createInfo,
6437  VmaAllocation allocation);
6438  void RecordFreeMemory(uint32_t frameIndex,
6439  VmaAllocation allocation);
6440  void RecordFreeMemoryPages(uint32_t frameIndex,
6441  uint64_t allocationCount,
6442  const VmaAllocation* pAllocations);
6443  void RecordResizeAllocation(
6444  uint32_t frameIndex,
6445  VmaAllocation allocation,
6446  VkDeviceSize newSize);
6447  void RecordSetAllocationUserData(uint32_t frameIndex,
6448  VmaAllocation allocation,
6449  const void* pUserData);
6450  void RecordCreateLostAllocation(uint32_t frameIndex,
6451  VmaAllocation allocation);
6452  void RecordMapMemory(uint32_t frameIndex,
6453  VmaAllocation allocation);
6454  void RecordUnmapMemory(uint32_t frameIndex,
6455  VmaAllocation allocation);
6456  void RecordFlushAllocation(uint32_t frameIndex,
6457  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6458  void RecordInvalidateAllocation(uint32_t frameIndex,
6459  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6460  void RecordCreateBuffer(uint32_t frameIndex,
6461  const VkBufferCreateInfo& bufCreateInfo,
6462  const VmaAllocationCreateInfo& allocCreateInfo,
6463  VmaAllocation allocation);
6464  void RecordCreateImage(uint32_t frameIndex,
6465  const VkImageCreateInfo& imageCreateInfo,
6466  const VmaAllocationCreateInfo& allocCreateInfo,
6467  VmaAllocation allocation);
6468  void RecordDestroyBuffer(uint32_t frameIndex,
6469  VmaAllocation allocation);
6470  void RecordDestroyImage(uint32_t frameIndex,
6471  VmaAllocation allocation);
6472  void RecordTouchAllocation(uint32_t frameIndex,
6473  VmaAllocation allocation);
6474  void RecordGetAllocationInfo(uint32_t frameIndex,
6475  VmaAllocation allocation);
6476  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
6477  VmaPool pool);
6478  void RecordDefragmentationBegin(uint32_t frameIndex,
6479  const VmaDefragmentationInfo2& info,
6481  void RecordDefragmentationEnd(uint32_t frameIndex,
6483 
6484 private:
6485  struct CallParams
6486  {
6487  uint32_t threadId;
6488  double time;
6489  };
6490 
6491  class UserDataString
6492  {
6493  public:
6494  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
6495  const char* GetString() const { return m_Str; }
6496 
6497  private:
6498  char m_PtrStr[17];
6499  const char* m_Str;
6500  };
6501 
6502  bool m_UseMutex;
6503  VmaRecordFlags m_Flags;
6504  FILE* m_File;
6505  VMA_MUTEX m_FileMutex;
6506  int64_t m_Freq;
6507  int64_t m_StartCounter;
6508 
6509  void GetBasicParams(CallParams& outParams);
6510 
6511  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
6512  template<typename T>
6513  void PrintPointerList(uint64_t count, const T* pItems)
6514  {
6515  if(count)
6516  {
6517  fprintf(m_File, "%p", pItems[0]);
6518  for(uint64_t i = 1; i < count; ++i)
6519  {
6520  fprintf(m_File, " %p", pItems[i]);
6521  }
6522  }
6523  }
6524 
6525  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
6526  void Flush();
6527 };
6528 
6529 #endif // #if VMA_RECORDING_ENABLED
6530 
6531 // Main allocator object.
6532 struct VmaAllocator_T
6533 {
6534  VMA_CLASS_NO_COPY(VmaAllocator_T)
6535 public:
6536  bool m_UseMutex;
6537  bool m_UseKhrDedicatedAllocation;
6538  VkDevice m_hDevice;
6539  bool m_AllocationCallbacksSpecified;
6540  VkAllocationCallbacks m_AllocationCallbacks;
6541  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
6542 
6543  // Number of bytes free out of limit, or VK_WHOLE_SIZE if no limit for that heap.
6544  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
6545  VMA_MUTEX m_HeapSizeLimitMutex;
6546 
6547  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
6548  VkPhysicalDeviceMemoryProperties m_MemProps;
6549 
6550  // Default pools.
6551  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
6552 
6553  // Each vector is sorted by memory (handle value).
6554  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
6555  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
6556  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
6557 
6558  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
6559  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
6560  ~VmaAllocator_T();
6561 
6562  const VkAllocationCallbacks* GetAllocationCallbacks() const
6563  {
6564  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
6565  }
6566  const VmaVulkanFunctions& GetVulkanFunctions() const
6567  {
6568  return m_VulkanFunctions;
6569  }
6570 
6571  VkDeviceSize GetBufferImageGranularity() const
6572  {
6573  return VMA_MAX(
6574  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
6575  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
6576  }
6577 
6578  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
6579  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
6580 
6581  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
6582  {
6583  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
6584  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
6585  }
6586  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
6587  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
6588  {
6589  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
6590  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
6591  }
6592  // Minimum alignment for all allocations in specific memory type.
6593  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
6594  {
6595  return IsMemoryTypeNonCoherent(memTypeIndex) ?
6596  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
6597  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
6598  }
6599 
6600  bool IsIntegratedGpu() const
6601  {
6602  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
6603  }
6604 
6605 #if VMA_RECORDING_ENABLED
6606  VmaRecorder* GetRecorder() const { return m_pRecorder; }
6607 #endif
6608 
6609  void GetBufferMemoryRequirements(
6610  VkBuffer hBuffer,
6611  VkMemoryRequirements& memReq,
6612  bool& requiresDedicatedAllocation,
6613  bool& prefersDedicatedAllocation) const;
6614  void GetImageMemoryRequirements(
6615  VkImage hImage,
6616  VkMemoryRequirements& memReq,
6617  bool& requiresDedicatedAllocation,
6618  bool& prefersDedicatedAllocation) const;
6619 
6620  // Main allocation function.
6621  VkResult AllocateMemory(
6622  const VkMemoryRequirements& vkMemReq,
6623  bool requiresDedicatedAllocation,
6624  bool prefersDedicatedAllocation,
6625  VkBuffer dedicatedBuffer,
6626  VkImage dedicatedImage,
6627  const VmaAllocationCreateInfo& createInfo,
6628  VmaSuballocationType suballocType,
6629  size_t allocationCount,
6630  VmaAllocation* pAllocations);
6631 
6632  // Main deallocation function.
6633  void FreeMemory(
6634  size_t allocationCount,
6635  const VmaAllocation* pAllocations);
6636 
6637  VkResult ResizeAllocation(
6638  const VmaAllocation alloc,
6639  VkDeviceSize newSize);
6640 
6641  void CalculateStats(VmaStats* pStats);
6642 
6643 #if VMA_STATS_STRING_ENABLED
6644  void PrintDetailedMap(class VmaJsonWriter& json);
6645 #endif
6646 
6647  VkResult DefragmentationBegin(
6648  const VmaDefragmentationInfo2& info,
6649  VmaDefragmentationStats* pStats,
6650  VmaDefragmentationContext* pContext);
6651  VkResult DefragmentationEnd(
6652  VmaDefragmentationContext context);
6653 
6654  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
6655  bool TouchAllocation(VmaAllocation hAllocation);
6656 
6657  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
6658  void DestroyPool(VmaPool pool);
6659  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
6660 
6661  void SetCurrentFrameIndex(uint32_t frameIndex);
6662  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
6663 
6664  void MakePoolAllocationsLost(
6665  VmaPool hPool,
6666  size_t* pLostAllocationCount);
6667  VkResult CheckPoolCorruption(VmaPool hPool);
6668  VkResult CheckCorruption(uint32_t memoryTypeBits);
6669 
6670  void CreateLostAllocation(VmaAllocation* pAllocation);
6671 
6672  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
6673  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
6674 
6675  VkResult Map(VmaAllocation hAllocation, void** ppData);
6676  void Unmap(VmaAllocation hAllocation);
6677 
6678  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
6679  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
6680 
6681  void FlushOrInvalidateAllocation(
6682  VmaAllocation hAllocation,
6683  VkDeviceSize offset, VkDeviceSize size,
6684  VMA_CACHE_OPERATION op);
6685 
6686  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
6687 
6688 private:
6689  VkDeviceSize m_PreferredLargeHeapBlockSize;
6690 
6691  VkPhysicalDevice m_PhysicalDevice;
6692  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
6693 
6694  VMA_RW_MUTEX m_PoolsMutex;
6695  // Protected by m_PoolsMutex. Sorted by pointer value.
6696  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
6697  uint32_t m_NextPoolId;
6698 
6699  VmaVulkanFunctions m_VulkanFunctions;
6700 
6701 #if VMA_RECORDING_ENABLED
6702  VmaRecorder* m_pRecorder;
6703 #endif
6704 
6705  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
6706 
6707  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
6708 
6709  VkResult AllocateMemoryOfType(
6710  VkDeviceSize size,
6711  VkDeviceSize alignment,
6712  bool dedicatedAllocation,
6713  VkBuffer dedicatedBuffer,
6714  VkImage dedicatedImage,
6715  const VmaAllocationCreateInfo& createInfo,
6716  uint32_t memTypeIndex,
6717  VmaSuballocationType suballocType,
6718  size_t allocationCount,
6719  VmaAllocation* pAllocations);
6720 
6721  // Helper function only to be used inside AllocateDedicatedMemory.
6722  VkResult AllocateDedicatedMemoryPage(
6723  VkDeviceSize size,
6724  VmaSuballocationType suballocType,
6725  uint32_t memTypeIndex,
6726  const VkMemoryAllocateInfo& allocInfo,
6727  bool map,
6728  bool isUserDataString,
6729  void* pUserData,
6730  VmaAllocation* pAllocation);
6731 
6732  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
6733  VkResult AllocateDedicatedMemory(
6734  VkDeviceSize size,
6735  VmaSuballocationType suballocType,
6736  uint32_t memTypeIndex,
6737  bool map,
6738  bool isUserDataString,
6739  void* pUserData,
6740  VkBuffer dedicatedBuffer,
6741  VkImage dedicatedImage,
6742  size_t allocationCount,
6743  VmaAllocation* pAllocations);
6744 
6745  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
6746  void FreeDedicatedMemory(VmaAllocation allocation);
6747 };
6748 
6750 // Memory allocation #2 after VmaAllocator_T definition
6751 
6752 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
6753 {
6754  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
6755 }
6756 
6757 static void VmaFree(VmaAllocator hAllocator, void* ptr)
6758 {
6759  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
6760 }
6761 
6762 template<typename T>
6763 static T* VmaAllocate(VmaAllocator hAllocator)
6764 {
6765  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
6766 }
6767 
6768 template<typename T>
6769 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
6770 {
6771  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
6772 }
6773 
6774 template<typename T>
6775 static void vma_delete(VmaAllocator hAllocator, T* ptr)
6776 {
6777  if(ptr != VMA_NULL)
6778  {
6779  ptr->~T();
6780  VmaFree(hAllocator, ptr);
6781  }
6782 }
6783 
6784 template<typename T>
6785 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
6786 {
6787  if(ptr != VMA_NULL)
6788  {
6789  for(size_t i = count; i--; )
6790  ptr[i].~T();
6791  VmaFree(hAllocator, ptr);
6792  }
6793 }
6794 
6796 // VmaStringBuilder
6797 
6798 #if VMA_STATS_STRING_ENABLED
6799 
6800 class VmaStringBuilder
6801 {
6802 public:
6803  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
6804  size_t GetLength() const { return m_Data.size(); }
6805  const char* GetData() const { return m_Data.data(); }
6806 
6807  void Add(char ch) { m_Data.push_back(ch); }
6808  void Add(const char* pStr);
6809  void AddNewLine() { Add('\n'); }
6810  void AddNumber(uint32_t num);
6811  void AddNumber(uint64_t num);
6812  void AddPointer(const void* ptr);
6813 
6814 private:
6815  VmaVector< char, VmaStlAllocator<char> > m_Data;
6816 };
6817 
6818 void VmaStringBuilder::Add(const char* pStr)
6819 {
6820  const size_t strLen = strlen(pStr);
6821  if(strLen > 0)
6822  {
6823  const size_t oldCount = m_Data.size();
6824  m_Data.resize(oldCount + strLen);
6825  memcpy(m_Data.data() + oldCount, pStr, strLen);
6826  }
6827 }
6828 
6829 void VmaStringBuilder::AddNumber(uint32_t num)
6830 {
6831  char buf[11];
6832  VmaUint32ToStr(buf, sizeof(buf), num);
6833  Add(buf);
6834 }
6835 
6836 void VmaStringBuilder::AddNumber(uint64_t num)
6837 {
6838  char buf[21];
6839  VmaUint64ToStr(buf, sizeof(buf), num);
6840  Add(buf);
6841 }
6842 
6843 void VmaStringBuilder::AddPointer(const void* ptr)
6844 {
6845  char buf[21];
6846  VmaPtrToStr(buf, sizeof(buf), ptr);
6847  Add(buf);
6848 }
6849 
6850 #endif // #if VMA_STATS_STRING_ENABLED
6851 
6853 // VmaJsonWriter
6854 
6855 #if VMA_STATS_STRING_ENABLED
6856 
6857 class VmaJsonWriter
6858 {
6859  VMA_CLASS_NO_COPY(VmaJsonWriter)
6860 public:
6861  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
6862  ~VmaJsonWriter();
6863 
6864  void BeginObject(bool singleLine = false);
6865  void EndObject();
6866 
6867  void BeginArray(bool singleLine = false);
6868  void EndArray();
6869 
6870  void WriteString(const char* pStr);
6871  void BeginString(const char* pStr = VMA_NULL);
6872  void ContinueString(const char* pStr);
6873  void ContinueString(uint32_t n);
6874  void ContinueString(uint64_t n);
6875  void ContinueString_Pointer(const void* ptr);
6876  void EndString(const char* pStr = VMA_NULL);
6877 
6878  void WriteNumber(uint32_t n);
6879  void WriteNumber(uint64_t n);
6880  void WriteBool(bool b);
6881  void WriteNull();
6882 
6883 private:
6884  static const char* const INDENT;
6885 
6886  enum COLLECTION_TYPE
6887  {
6888  COLLECTION_TYPE_OBJECT,
6889  COLLECTION_TYPE_ARRAY,
6890  };
6891  struct StackItem
6892  {
6893  COLLECTION_TYPE type;
6894  uint32_t valueCount;
6895  bool singleLineMode;
6896  };
6897 
6898  VmaStringBuilder& m_SB;
6899  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
6900  bool m_InsideString;
6901 
6902  void BeginValue(bool isString);
6903  void WriteIndent(bool oneLess = false);
6904 };
6905 
6906 const char* const VmaJsonWriter::INDENT = " ";
6907 
6908 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
6909  m_SB(sb),
6910  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
6911  m_InsideString(false)
6912 {
6913 }
6914 
6915 VmaJsonWriter::~VmaJsonWriter()
6916 {
6917  VMA_ASSERT(!m_InsideString);
6918  VMA_ASSERT(m_Stack.empty());
6919 }
6920 
6921 void VmaJsonWriter::BeginObject(bool singleLine)
6922 {
6923  VMA_ASSERT(!m_InsideString);
6924 
6925  BeginValue(false);
6926  m_SB.Add('{');
6927 
6928  StackItem item;
6929  item.type = COLLECTION_TYPE_OBJECT;
6930  item.valueCount = 0;
6931  item.singleLineMode = singleLine;
6932  m_Stack.push_back(item);
6933 }
6934 
6935 void VmaJsonWriter::EndObject()
6936 {
6937  VMA_ASSERT(!m_InsideString);
6938 
6939  WriteIndent(true);
6940  m_SB.Add('}');
6941 
6942  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
6943  m_Stack.pop_back();
6944 }
6945 
6946 void VmaJsonWriter::BeginArray(bool singleLine)
6947 {
6948  VMA_ASSERT(!m_InsideString);
6949 
6950  BeginValue(false);
6951  m_SB.Add('[');
6952 
6953  StackItem item;
6954  item.type = COLLECTION_TYPE_ARRAY;
6955  item.valueCount = 0;
6956  item.singleLineMode = singleLine;
6957  m_Stack.push_back(item);
6958 }
6959 
6960 void VmaJsonWriter::EndArray()
6961 {
6962  VMA_ASSERT(!m_InsideString);
6963 
6964  WriteIndent(true);
6965  m_SB.Add(']');
6966 
6967  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
6968  m_Stack.pop_back();
6969 }
6970 
6971 void VmaJsonWriter::WriteString(const char* pStr)
6972 {
6973  BeginString(pStr);
6974  EndString();
6975 }
6976 
6977 void VmaJsonWriter::BeginString(const char* pStr)
6978 {
6979  VMA_ASSERT(!m_InsideString);
6980 
6981  BeginValue(true);
6982  m_SB.Add('"');
6983  m_InsideString = true;
6984  if(pStr != VMA_NULL && pStr[0] != '\0')
6985  {
6986  ContinueString(pStr);
6987  }
6988 }
6989 
6990 void VmaJsonWriter::ContinueString(const char* pStr)
6991 {
6992  VMA_ASSERT(m_InsideString);
6993 
6994  const size_t strLen = strlen(pStr);
6995  for(size_t i = 0; i < strLen; ++i)
6996  {
6997  char ch = pStr[i];
6998  if(ch == '\\')
6999  {
7000  m_SB.Add("\\\\");
7001  }
7002  else if(ch == '"')
7003  {
7004  m_SB.Add("\\\"");
7005  }
7006  else if(ch >= 32)
7007  {
7008  m_SB.Add(ch);
7009  }
7010  else switch(ch)
7011  {
7012  case '\b':
7013  m_SB.Add("\\b");
7014  break;
7015  case '\f':
7016  m_SB.Add("\\f");
7017  break;
7018  case '\n':
7019  m_SB.Add("\\n");
7020  break;
7021  case '\r':
7022  m_SB.Add("\\r");
7023  break;
7024  case '\t':
7025  m_SB.Add("\\t");
7026  break;
7027  default:
7028  VMA_ASSERT(0 && "Character not currently supported.");
7029  break;
7030  }
7031  }
7032 }
7033 
7034 void VmaJsonWriter::ContinueString(uint32_t n)
7035 {
7036  VMA_ASSERT(m_InsideString);
7037  m_SB.AddNumber(n);
7038 }
7039 
7040 void VmaJsonWriter::ContinueString(uint64_t n)
7041 {
7042  VMA_ASSERT(m_InsideString);
7043  m_SB.AddNumber(n);
7044 }
7045 
7046 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
7047 {
7048  VMA_ASSERT(m_InsideString);
7049  m_SB.AddPointer(ptr);
7050 }
7051 
7052 void VmaJsonWriter::EndString(const char* pStr)
7053 {
7054  VMA_ASSERT(m_InsideString);
7055  if(pStr != VMA_NULL && pStr[0] != '\0')
7056  {
7057  ContinueString(pStr);
7058  }
7059  m_SB.Add('"');
7060  m_InsideString = false;
7061 }
7062 
7063 void VmaJsonWriter::WriteNumber(uint32_t n)
7064 {
7065  VMA_ASSERT(!m_InsideString);
7066  BeginValue(false);
7067  m_SB.AddNumber(n);
7068 }
7069 
7070 void VmaJsonWriter::WriteNumber(uint64_t n)
7071 {
7072  VMA_ASSERT(!m_InsideString);
7073  BeginValue(false);
7074  m_SB.AddNumber(n);
7075 }
7076 
7077 void VmaJsonWriter::WriteBool(bool b)
7078 {
7079  VMA_ASSERT(!m_InsideString);
7080  BeginValue(false);
7081  m_SB.Add(b ? "true" : "false");
7082 }
7083 
7084 void VmaJsonWriter::WriteNull()
7085 {
7086  VMA_ASSERT(!m_InsideString);
7087  BeginValue(false);
7088  m_SB.Add("null");
7089 }
7090 
7091 void VmaJsonWriter::BeginValue(bool isString)
7092 {
7093  if(!m_Stack.empty())
7094  {
7095  StackItem& currItem = m_Stack.back();
7096  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7097  currItem.valueCount % 2 == 0)
7098  {
7099  VMA_ASSERT(isString);
7100  }
7101 
7102  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7103  currItem.valueCount % 2 != 0)
7104  {
7105  m_SB.Add(": ");
7106  }
7107  else if(currItem.valueCount > 0)
7108  {
7109  m_SB.Add(", ");
7110  WriteIndent();
7111  }
7112  else
7113  {
7114  WriteIndent();
7115  }
7116  ++currItem.valueCount;
7117  }
7118 }
7119 
7120 void VmaJsonWriter::WriteIndent(bool oneLess)
7121 {
7122  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
7123  {
7124  m_SB.AddNewLine();
7125 
7126  size_t count = m_Stack.size();
7127  if(count > 0 && oneLess)
7128  {
7129  --count;
7130  }
7131  for(size_t i = 0; i < count; ++i)
7132  {
7133  m_SB.Add(INDENT);
7134  }
7135  }
7136 }
7137 
7138 #endif // #if VMA_STATS_STRING_ENABLED
7139 
7141 
7142 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
7143 {
7144  if(IsUserDataString())
7145  {
7146  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
7147 
7148  FreeUserDataString(hAllocator);
7149 
7150  if(pUserData != VMA_NULL)
7151  {
7152  const char* const newStrSrc = (char*)pUserData;
7153  const size_t newStrLen = strlen(newStrSrc);
7154  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
7155  memcpy(newStrDst, newStrSrc, newStrLen + 1);
7156  m_pUserData = newStrDst;
7157  }
7158  }
7159  else
7160  {
7161  m_pUserData = pUserData;
7162  }
7163 }
7164 
7165 void VmaAllocation_T::ChangeBlockAllocation(
7166  VmaAllocator hAllocator,
7167  VmaDeviceMemoryBlock* block,
7168  VkDeviceSize offset)
7169 {
7170  VMA_ASSERT(block != VMA_NULL);
7171  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7172 
7173  // Move mapping reference counter from old block to new block.
7174  if(block != m_BlockAllocation.m_Block)
7175  {
7176  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
7177  if(IsPersistentMap())
7178  ++mapRefCount;
7179  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
7180  block->Map(hAllocator, mapRefCount, VMA_NULL);
7181  }
7182 
7183  m_BlockAllocation.m_Block = block;
7184  m_BlockAllocation.m_Offset = offset;
7185 }
7186 
7187 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
7188 {
7189  VMA_ASSERT(newSize > 0);
7190  m_Size = newSize;
7191 }
7192 
7193 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
7194 {
7195  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7196  m_BlockAllocation.m_Offset = newOffset;
7197 }
7198 
7199 VkDeviceSize VmaAllocation_T::GetOffset() const
7200 {
7201  switch(m_Type)
7202  {
7203  case ALLOCATION_TYPE_BLOCK:
7204  return m_BlockAllocation.m_Offset;
7205  case ALLOCATION_TYPE_DEDICATED:
7206  return 0;
7207  default:
7208  VMA_ASSERT(0);
7209  return 0;
7210  }
7211 }
7212 
7213 VkDeviceMemory VmaAllocation_T::GetMemory() const
7214 {
7215  switch(m_Type)
7216  {
7217  case ALLOCATION_TYPE_BLOCK:
7218  return m_BlockAllocation.m_Block->GetDeviceMemory();
7219  case ALLOCATION_TYPE_DEDICATED:
7220  return m_DedicatedAllocation.m_hMemory;
7221  default:
7222  VMA_ASSERT(0);
7223  return VK_NULL_HANDLE;
7224  }
7225 }
7226 
7227 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
7228 {
7229  switch(m_Type)
7230  {
7231  case ALLOCATION_TYPE_BLOCK:
7232  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
7233  case ALLOCATION_TYPE_DEDICATED:
7234  return m_DedicatedAllocation.m_MemoryTypeIndex;
7235  default:
7236  VMA_ASSERT(0);
7237  return UINT32_MAX;
7238  }
7239 }
7240 
7241 void* VmaAllocation_T::GetMappedData() const
7242 {
7243  switch(m_Type)
7244  {
7245  case ALLOCATION_TYPE_BLOCK:
7246  if(m_MapCount != 0)
7247  {
7248  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
7249  VMA_ASSERT(pBlockData != VMA_NULL);
7250  return (char*)pBlockData + m_BlockAllocation.m_Offset;
7251  }
7252  else
7253  {
7254  return VMA_NULL;
7255  }
7256  break;
7257  case ALLOCATION_TYPE_DEDICATED:
7258  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
7259  return m_DedicatedAllocation.m_pMappedData;
7260  default:
7261  VMA_ASSERT(0);
7262  return VMA_NULL;
7263  }
7264 }
7265 
7266 bool VmaAllocation_T::CanBecomeLost() const
7267 {
7268  switch(m_Type)
7269  {
7270  case ALLOCATION_TYPE_BLOCK:
7271  return m_BlockAllocation.m_CanBecomeLost;
7272  case ALLOCATION_TYPE_DEDICATED:
7273  return false;
7274  default:
7275  VMA_ASSERT(0);
7276  return false;
7277  }
7278 }
7279 
7280 VmaPool VmaAllocation_T::GetPool() const
7281 {
7282  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7283  return m_BlockAllocation.m_hPool;
7284 }
7285 
7286 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7287 {
7288  VMA_ASSERT(CanBecomeLost());
7289 
7290  /*
7291  Warning: This is a carefully designed algorithm.
7292  Do not modify unless you really know what you're doing :)
7293  */
7294  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
7295  for(;;)
7296  {
7297  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7298  {
7299  VMA_ASSERT(0);
7300  return false;
7301  }
7302  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
7303  {
7304  return false;
7305  }
7306  else // Last use time earlier than current time.
7307  {
7308  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
7309  {
7310  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
7311  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
7312  return true;
7313  }
7314  }
7315  }
7316 }
7317 
7318 #if VMA_STATS_STRING_ENABLED
7319 
7320 // Correspond to values of enum VmaSuballocationType.
7321 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
7322  "FREE",
7323  "UNKNOWN",
7324  "BUFFER",
7325  "IMAGE_UNKNOWN",
7326  "IMAGE_LINEAR",
7327  "IMAGE_OPTIMAL",
7328 };
7329 
7330 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
7331 {
7332  json.WriteString("Type");
7333  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
7334 
7335  json.WriteString("Size");
7336  json.WriteNumber(m_Size);
7337 
7338  if(m_pUserData != VMA_NULL)
7339  {
7340  json.WriteString("UserData");
7341  if(IsUserDataString())
7342  {
7343  json.WriteString((const char*)m_pUserData);
7344  }
7345  else
7346  {
7347  json.BeginString();
7348  json.ContinueString_Pointer(m_pUserData);
7349  json.EndString();
7350  }
7351  }
7352 
7353  json.WriteString("CreationFrameIndex");
7354  json.WriteNumber(m_CreationFrameIndex);
7355 
7356  json.WriteString("LastUseFrameIndex");
7357  json.WriteNumber(GetLastUseFrameIndex());
7358 
7359  if(m_BufferImageUsage != 0)
7360  {
7361  json.WriteString("Usage");
7362  json.WriteNumber(m_BufferImageUsage);
7363  }
7364 }
7365 
7366 #endif
7367 
7368 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
7369 {
7370  VMA_ASSERT(IsUserDataString());
7371  if(m_pUserData != VMA_NULL)
7372  {
7373  char* const oldStr = (char*)m_pUserData;
7374  const size_t oldStrLen = strlen(oldStr);
7375  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
7376  m_pUserData = VMA_NULL;
7377  }
7378 }
7379 
7380 void VmaAllocation_T::BlockAllocMap()
7381 {
7382  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7383 
7384  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7385  {
7386  ++m_MapCount;
7387  }
7388  else
7389  {
7390  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
7391  }
7392 }
7393 
7394 void VmaAllocation_T::BlockAllocUnmap()
7395 {
7396  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7397 
7398  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7399  {
7400  --m_MapCount;
7401  }
7402  else
7403  {
7404  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
7405  }
7406 }
7407 
7408 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
7409 {
7410  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7411 
7412  if(m_MapCount != 0)
7413  {
7414  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7415  {
7416  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
7417  *ppData = m_DedicatedAllocation.m_pMappedData;
7418  ++m_MapCount;
7419  return VK_SUCCESS;
7420  }
7421  else
7422  {
7423  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
7424  return VK_ERROR_MEMORY_MAP_FAILED;
7425  }
7426  }
7427  else
7428  {
7429  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
7430  hAllocator->m_hDevice,
7431  m_DedicatedAllocation.m_hMemory,
7432  0, // offset
7433  VK_WHOLE_SIZE,
7434  0, // flags
7435  ppData);
7436  if(result == VK_SUCCESS)
7437  {
7438  m_DedicatedAllocation.m_pMappedData = *ppData;
7439  m_MapCount = 1;
7440  }
7441  return result;
7442  }
7443 }
7444 
7445 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
7446 {
7447  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7448 
7449  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7450  {
7451  --m_MapCount;
7452  if(m_MapCount == 0)
7453  {
7454  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
7455  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
7456  hAllocator->m_hDevice,
7457  m_DedicatedAllocation.m_hMemory);
7458  }
7459  }
7460  else
7461  {
7462  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
7463  }
7464 }
7465 
7466 #if VMA_STATS_STRING_ENABLED
7467 
7468 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
7469 {
7470  json.BeginObject();
7471 
7472  json.WriteString("Blocks");
7473  json.WriteNumber(stat.blockCount);
7474 
7475  json.WriteString("Allocations");
7476  json.WriteNumber(stat.allocationCount);
7477 
7478  json.WriteString("UnusedRanges");
7479  json.WriteNumber(stat.unusedRangeCount);
7480 
7481  json.WriteString("UsedBytes");
7482  json.WriteNumber(stat.usedBytes);
7483 
7484  json.WriteString("UnusedBytes");
7485  json.WriteNumber(stat.unusedBytes);
7486 
7487  if(stat.allocationCount > 1)
7488  {
7489  json.WriteString("AllocationSize");
7490  json.BeginObject(true);
7491  json.WriteString("Min");
7492  json.WriteNumber(stat.allocationSizeMin);
7493  json.WriteString("Avg");
7494  json.WriteNumber(stat.allocationSizeAvg);
7495  json.WriteString("Max");
7496  json.WriteNumber(stat.allocationSizeMax);
7497  json.EndObject();
7498  }
7499 
7500  if(stat.unusedRangeCount > 1)
7501  {
7502  json.WriteString("UnusedRangeSize");
7503  json.BeginObject(true);
7504  json.WriteString("Min");
7505  json.WriteNumber(stat.unusedRangeSizeMin);
7506  json.WriteString("Avg");
7507  json.WriteNumber(stat.unusedRangeSizeAvg);
7508  json.WriteString("Max");
7509  json.WriteNumber(stat.unusedRangeSizeMax);
7510  json.EndObject();
7511  }
7512 
7513  json.EndObject();
7514 }
7515 
7516 #endif // #if VMA_STATS_STRING_ENABLED
7517 
7518 struct VmaSuballocationItemSizeLess
7519 {
7520  bool operator()(
7521  const VmaSuballocationList::iterator lhs,
7522  const VmaSuballocationList::iterator rhs) const
7523  {
7524  return lhs->size < rhs->size;
7525  }
7526  bool operator()(
7527  const VmaSuballocationList::iterator lhs,
7528  VkDeviceSize rhsSize) const
7529  {
7530  return lhs->size < rhsSize;
7531  }
7532 };
7533 
7534 
7536 // class VmaBlockMetadata
7537 
7538 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
7539  m_Size(0),
7540  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
7541 {
7542 }
7543 
7544 #if VMA_STATS_STRING_ENABLED
7545 
7546 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
7547  VkDeviceSize unusedBytes,
7548  size_t allocationCount,
7549  size_t unusedRangeCount) const
7550 {
7551  json.BeginObject();
7552 
7553  json.WriteString("TotalBytes");
7554  json.WriteNumber(GetSize());
7555 
7556  json.WriteString("UnusedBytes");
7557  json.WriteNumber(unusedBytes);
7558 
7559  json.WriteString("Allocations");
7560  json.WriteNumber((uint64_t)allocationCount);
7561 
7562  json.WriteString("UnusedRanges");
7563  json.WriteNumber((uint64_t)unusedRangeCount);
7564 
7565  json.WriteString("Suballocations");
7566  json.BeginArray();
7567 }
7568 
7569 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
7570  VkDeviceSize offset,
7571  VmaAllocation hAllocation) const
7572 {
7573  json.BeginObject(true);
7574 
7575  json.WriteString("Offset");
7576  json.WriteNumber(offset);
7577 
7578  hAllocation->PrintParameters(json);
7579 
7580  json.EndObject();
7581 }
7582 
7583 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
7584  VkDeviceSize offset,
7585  VkDeviceSize size) const
7586 {
7587  json.BeginObject(true);
7588 
7589  json.WriteString("Offset");
7590  json.WriteNumber(offset);
7591 
7592  json.WriteString("Type");
7593  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
7594 
7595  json.WriteString("Size");
7596  json.WriteNumber(size);
7597 
7598  json.EndObject();
7599 }
7600 
7601 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
7602 {
7603  json.EndArray();
7604  json.EndObject();
7605 }
7606 
7607 #endif // #if VMA_STATS_STRING_ENABLED
7608 
7610 // class VmaBlockMetadata_Generic
7611 
7612 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
7613  VmaBlockMetadata(hAllocator),
7614  m_FreeCount(0),
7615  m_SumFreeSize(0),
7616  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7617  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
7618 {
7619 }
7620 
7621 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
7622 {
7623 }
7624 
7625 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
7626 {
7627  VmaBlockMetadata::Init(size);
7628 
7629  m_FreeCount = 1;
7630  m_SumFreeSize = size;
7631 
7632  VmaSuballocation suballoc = {};
7633  suballoc.offset = 0;
7634  suballoc.size = size;
7635  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7636  suballoc.hAllocation = VK_NULL_HANDLE;
7637 
7638  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
7639  m_Suballocations.push_back(suballoc);
7640  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
7641  --suballocItem;
7642  m_FreeSuballocationsBySize.push_back(suballocItem);
7643 }
7644 
7645 bool VmaBlockMetadata_Generic::Validate() const
7646 {
7647  VMA_VALIDATE(!m_Suballocations.empty());
7648 
7649  // Expected offset of new suballocation as calculated from previous ones.
7650  VkDeviceSize calculatedOffset = 0;
7651  // Expected number of free suballocations as calculated from traversing their list.
7652  uint32_t calculatedFreeCount = 0;
7653  // Expected sum size of free suballocations as calculated from traversing their list.
7654  VkDeviceSize calculatedSumFreeSize = 0;
7655  // Expected number of free suballocations that should be registered in
7656  // m_FreeSuballocationsBySize calculated from traversing their list.
7657  size_t freeSuballocationsToRegister = 0;
7658  // True if previous visited suballocation was free.
7659  bool prevFree = false;
7660 
7661  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7662  suballocItem != m_Suballocations.cend();
7663  ++suballocItem)
7664  {
7665  const VmaSuballocation& subAlloc = *suballocItem;
7666 
7667  // Actual offset of this suballocation doesn't match expected one.
7668  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
7669 
7670  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
7671  // Two adjacent free suballocations are invalid. They should be merged.
7672  VMA_VALIDATE(!prevFree || !currFree);
7673 
7674  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
7675 
7676  if(currFree)
7677  {
7678  calculatedSumFreeSize += subAlloc.size;
7679  ++calculatedFreeCount;
7680  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7681  {
7682  ++freeSuballocationsToRegister;
7683  }
7684 
7685  // Margin required between allocations - every free space must be at least that large.
7686  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
7687  }
7688  else
7689  {
7690  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
7691  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
7692 
7693  // Margin required between allocations - previous allocation must be free.
7694  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
7695  }
7696 
7697  calculatedOffset += subAlloc.size;
7698  prevFree = currFree;
7699  }
7700 
7701  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
7702  // match expected one.
7703  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
7704 
7705  VkDeviceSize lastSize = 0;
7706  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
7707  {
7708  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
7709 
7710  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
7711  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
7712  // They must be sorted by size ascending.
7713  VMA_VALIDATE(suballocItem->size >= lastSize);
7714 
7715  lastSize = suballocItem->size;
7716  }
7717 
7718  // Check if totals match calculacted values.
7719  VMA_VALIDATE(ValidateFreeSuballocationList());
7720  VMA_VALIDATE(calculatedOffset == GetSize());
7721  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
7722  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
7723 
7724  return true;
7725 }
7726 
7727 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
7728 {
7729  if(!m_FreeSuballocationsBySize.empty())
7730  {
7731  return m_FreeSuballocationsBySize.back()->size;
7732  }
7733  else
7734  {
7735  return 0;
7736  }
7737 }
7738 
7739 bool VmaBlockMetadata_Generic::IsEmpty() const
7740 {
7741  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
7742 }
7743 
7744 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
7745 {
7746  outInfo.blockCount = 1;
7747 
7748  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7749  outInfo.allocationCount = rangeCount - m_FreeCount;
7750  outInfo.unusedRangeCount = m_FreeCount;
7751 
7752  outInfo.unusedBytes = m_SumFreeSize;
7753  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
7754 
7755  outInfo.allocationSizeMin = UINT64_MAX;
7756  outInfo.allocationSizeMax = 0;
7757  outInfo.unusedRangeSizeMin = UINT64_MAX;
7758  outInfo.unusedRangeSizeMax = 0;
7759 
7760  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7761  suballocItem != m_Suballocations.cend();
7762  ++suballocItem)
7763  {
7764  const VmaSuballocation& suballoc = *suballocItem;
7765  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
7766  {
7767  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7768  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
7769  }
7770  else
7771  {
7772  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
7773  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
7774  }
7775  }
7776 }
7777 
7778 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
7779 {
7780  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7781 
7782  inoutStats.size += GetSize();
7783  inoutStats.unusedSize += m_SumFreeSize;
7784  inoutStats.allocationCount += rangeCount - m_FreeCount;
7785  inoutStats.unusedRangeCount += m_FreeCount;
7786  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
7787 }
7788 
7789 #if VMA_STATS_STRING_ENABLED
7790 
7791 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
7792 {
7793  PrintDetailedMap_Begin(json,
7794  m_SumFreeSize, // unusedBytes
7795  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
7796  m_FreeCount); // unusedRangeCount
7797 
7798  size_t i = 0;
7799  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7800  suballocItem != m_Suballocations.cend();
7801  ++suballocItem, ++i)
7802  {
7803  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7804  {
7805  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
7806  }
7807  else
7808  {
7809  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
7810  }
7811  }
7812 
7813  PrintDetailedMap_End(json);
7814 }
7815 
7816 #endif // #if VMA_STATS_STRING_ENABLED
7817 
7818 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
7819  uint32_t currentFrameIndex,
7820  uint32_t frameInUseCount,
7821  VkDeviceSize bufferImageGranularity,
7822  VkDeviceSize allocSize,
7823  VkDeviceSize allocAlignment,
7824  bool upperAddress,
7825  VmaSuballocationType allocType,
7826  bool canMakeOtherLost,
7827  uint32_t strategy,
7828  VmaAllocationRequest* pAllocationRequest)
7829 {
7830  VMA_ASSERT(allocSize > 0);
7831  VMA_ASSERT(!upperAddress);
7832  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
7833  VMA_ASSERT(pAllocationRequest != VMA_NULL);
7834  VMA_HEAVY_ASSERT(Validate());
7835 
7836  // There is not enough total free space in this block to fullfill the request: Early return.
7837  if(canMakeOtherLost == false &&
7838  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
7839  {
7840  return false;
7841  }
7842 
7843  // New algorithm, efficiently searching freeSuballocationsBySize.
7844  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
7845  if(freeSuballocCount > 0)
7846  {
7848  {
7849  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
7850  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7851  m_FreeSuballocationsBySize.data(),
7852  m_FreeSuballocationsBySize.data() + freeSuballocCount,
7853  allocSize + 2 * VMA_DEBUG_MARGIN,
7854  VmaSuballocationItemSizeLess());
7855  size_t index = it - m_FreeSuballocationsBySize.data();
7856  for(; index < freeSuballocCount; ++index)
7857  {
7858  if(CheckAllocation(
7859  currentFrameIndex,
7860  frameInUseCount,
7861  bufferImageGranularity,
7862  allocSize,
7863  allocAlignment,
7864  allocType,
7865  m_FreeSuballocationsBySize[index],
7866  false, // canMakeOtherLost
7867  &pAllocationRequest->offset,
7868  &pAllocationRequest->itemsToMakeLostCount,
7869  &pAllocationRequest->sumFreeSize,
7870  &pAllocationRequest->sumItemSize))
7871  {
7872  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7873  return true;
7874  }
7875  }
7876  }
7877  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
7878  {
7879  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7880  it != m_Suballocations.end();
7881  ++it)
7882  {
7883  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
7884  currentFrameIndex,
7885  frameInUseCount,
7886  bufferImageGranularity,
7887  allocSize,
7888  allocAlignment,
7889  allocType,
7890  it,
7891  false, // canMakeOtherLost
7892  &pAllocationRequest->offset,
7893  &pAllocationRequest->itemsToMakeLostCount,
7894  &pAllocationRequest->sumFreeSize,
7895  &pAllocationRequest->sumItemSize))
7896  {
7897  pAllocationRequest->item = it;
7898  return true;
7899  }
7900  }
7901  }
7902  else // WORST_FIT, FIRST_FIT
7903  {
7904  // Search staring from biggest suballocations.
7905  for(size_t index = freeSuballocCount; index--; )
7906  {
7907  if(CheckAllocation(
7908  currentFrameIndex,
7909  frameInUseCount,
7910  bufferImageGranularity,
7911  allocSize,
7912  allocAlignment,
7913  allocType,
7914  m_FreeSuballocationsBySize[index],
7915  false, // canMakeOtherLost
7916  &pAllocationRequest->offset,
7917  &pAllocationRequest->itemsToMakeLostCount,
7918  &pAllocationRequest->sumFreeSize,
7919  &pAllocationRequest->sumItemSize))
7920  {
7921  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7922  return true;
7923  }
7924  }
7925  }
7926  }
7927 
7928  if(canMakeOtherLost)
7929  {
7930  // Brute-force algorithm. TODO: Come up with something better.
7931 
7932  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
7933  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
7934 
7935  VmaAllocationRequest tmpAllocRequest = {};
7936  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
7937  suballocIt != m_Suballocations.end();
7938  ++suballocIt)
7939  {
7940  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
7941  suballocIt->hAllocation->CanBecomeLost())
7942  {
7943  if(CheckAllocation(
7944  currentFrameIndex,
7945  frameInUseCount,
7946  bufferImageGranularity,
7947  allocSize,
7948  allocAlignment,
7949  allocType,
7950  suballocIt,
7951  canMakeOtherLost,
7952  &tmpAllocRequest.offset,
7953  &tmpAllocRequest.itemsToMakeLostCount,
7954  &tmpAllocRequest.sumFreeSize,
7955  &tmpAllocRequest.sumItemSize))
7956  {
7957  tmpAllocRequest.item = suballocIt;
7958 
7959  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
7961  {
7962  *pAllocationRequest = tmpAllocRequest;
7963  }
7964  }
7965  }
7966  }
7967 
7968  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
7969  {
7970  return true;
7971  }
7972  }
7973 
7974  return false;
7975 }
7976 
7977 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
7978  uint32_t currentFrameIndex,
7979  uint32_t frameInUseCount,
7980  VmaAllocationRequest* pAllocationRequest)
7981 {
7982  while(pAllocationRequest->itemsToMakeLostCount > 0)
7983  {
7984  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
7985  {
7986  ++pAllocationRequest->item;
7987  }
7988  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
7989  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
7990  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
7991  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
7992  {
7993  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
7994  --pAllocationRequest->itemsToMakeLostCount;
7995  }
7996  else
7997  {
7998  return false;
7999  }
8000  }
8001 
8002  VMA_HEAVY_ASSERT(Validate());
8003  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8004  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
8005 
8006  return true;
8007 }
8008 
8009 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
8010 {
8011  uint32_t lostAllocationCount = 0;
8012  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8013  it != m_Suballocations.end();
8014  ++it)
8015  {
8016  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
8017  it->hAllocation->CanBecomeLost() &&
8018  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8019  {
8020  it = FreeSuballocation(it);
8021  ++lostAllocationCount;
8022  }
8023  }
8024  return lostAllocationCount;
8025 }
8026 
8027 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
8028 {
8029  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8030  it != m_Suballocations.end();
8031  ++it)
8032  {
8033  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
8034  {
8035  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
8036  {
8037  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
8038  return VK_ERROR_VALIDATION_FAILED_EXT;
8039  }
8040  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
8041  {
8042  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
8043  return VK_ERROR_VALIDATION_FAILED_EXT;
8044  }
8045  }
8046  }
8047 
8048  return VK_SUCCESS;
8049 }
8050 
8051 void VmaBlockMetadata_Generic::Alloc(
8052  const VmaAllocationRequest& request,
8053  VmaSuballocationType type,
8054  VkDeviceSize allocSize,
8055  bool upperAddress,
8056  VmaAllocation hAllocation)
8057 {
8058  VMA_ASSERT(!upperAddress);
8059  VMA_ASSERT(request.item != m_Suballocations.end());
8060  VmaSuballocation& suballoc = *request.item;
8061  // Given suballocation is a free block.
8062  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8063  // Given offset is inside this suballocation.
8064  VMA_ASSERT(request.offset >= suballoc.offset);
8065  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
8066  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
8067  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
8068 
8069  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
8070  // it to become used.
8071  UnregisterFreeSuballocation(request.item);
8072 
8073  suballoc.offset = request.offset;
8074  suballoc.size = allocSize;
8075  suballoc.type = type;
8076  suballoc.hAllocation = hAllocation;
8077 
8078  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
8079  if(paddingEnd)
8080  {
8081  VmaSuballocation paddingSuballoc = {};
8082  paddingSuballoc.offset = request.offset + allocSize;
8083  paddingSuballoc.size = paddingEnd;
8084  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8085  VmaSuballocationList::iterator next = request.item;
8086  ++next;
8087  const VmaSuballocationList::iterator paddingEndItem =
8088  m_Suballocations.insert(next, paddingSuballoc);
8089  RegisterFreeSuballocation(paddingEndItem);
8090  }
8091 
8092  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
8093  if(paddingBegin)
8094  {
8095  VmaSuballocation paddingSuballoc = {};
8096  paddingSuballoc.offset = request.offset - paddingBegin;
8097  paddingSuballoc.size = paddingBegin;
8098  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8099  const VmaSuballocationList::iterator paddingBeginItem =
8100  m_Suballocations.insert(request.item, paddingSuballoc);
8101  RegisterFreeSuballocation(paddingBeginItem);
8102  }
8103 
8104  // Update totals.
8105  m_FreeCount = m_FreeCount - 1;
8106  if(paddingBegin > 0)
8107  {
8108  ++m_FreeCount;
8109  }
8110  if(paddingEnd > 0)
8111  {
8112  ++m_FreeCount;
8113  }
8114  m_SumFreeSize -= allocSize;
8115 }
8116 
8117 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
8118 {
8119  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8120  suballocItem != m_Suballocations.end();
8121  ++suballocItem)
8122  {
8123  VmaSuballocation& suballoc = *suballocItem;
8124  if(suballoc.hAllocation == allocation)
8125  {
8126  FreeSuballocation(suballocItem);
8127  VMA_HEAVY_ASSERT(Validate());
8128  return;
8129  }
8130  }
8131  VMA_ASSERT(0 && "Not found!");
8132 }
8133 
8134 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
8135 {
8136  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8137  suballocItem != m_Suballocations.end();
8138  ++suballocItem)
8139  {
8140  VmaSuballocation& suballoc = *suballocItem;
8141  if(suballoc.offset == offset)
8142  {
8143  FreeSuballocation(suballocItem);
8144  return;
8145  }
8146  }
8147  VMA_ASSERT(0 && "Not found!");
8148 }
8149 
8150 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
8151 {
8152  typedef VmaSuballocationList::iterator iter_type;
8153  for(iter_type suballocItem = m_Suballocations.begin();
8154  suballocItem != m_Suballocations.end();
8155  ++suballocItem)
8156  {
8157  VmaSuballocation& suballoc = *suballocItem;
8158  if(suballoc.hAllocation == alloc)
8159  {
8160  iter_type nextItem = suballocItem;
8161  ++nextItem;
8162 
8163  // Should have been ensured on higher level.
8164  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
8165 
8166  // Shrinking.
8167  if(newSize < alloc->GetSize())
8168  {
8169  const VkDeviceSize sizeDiff = suballoc.size - newSize;
8170 
8171  // There is next item.
8172  if(nextItem != m_Suballocations.end())
8173  {
8174  // Next item is free.
8175  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8176  {
8177  // Grow this next item backward.
8178  UnregisterFreeSuballocation(nextItem);
8179  nextItem->offset -= sizeDiff;
8180  nextItem->size += sizeDiff;
8181  RegisterFreeSuballocation(nextItem);
8182  }
8183  // Next item is not free.
8184  else
8185  {
8186  // Create free item after current one.
8187  VmaSuballocation newFreeSuballoc;
8188  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
8189  newFreeSuballoc.offset = suballoc.offset + newSize;
8190  newFreeSuballoc.size = sizeDiff;
8191  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8192  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
8193  RegisterFreeSuballocation(newFreeSuballocIt);
8194 
8195  ++m_FreeCount;
8196  }
8197  }
8198  // This is the last item.
8199  else
8200  {
8201  // Create free item at the end.
8202  VmaSuballocation newFreeSuballoc;
8203  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
8204  newFreeSuballoc.offset = suballoc.offset + newSize;
8205  newFreeSuballoc.size = sizeDiff;
8206  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8207  m_Suballocations.push_back(newFreeSuballoc);
8208 
8209  iter_type newFreeSuballocIt = m_Suballocations.end();
8210  RegisterFreeSuballocation(--newFreeSuballocIt);
8211 
8212  ++m_FreeCount;
8213  }
8214 
8215  suballoc.size = newSize;
8216  m_SumFreeSize += sizeDiff;
8217  }
8218  // Growing.
8219  else
8220  {
8221  const VkDeviceSize sizeDiff = newSize - suballoc.size;
8222 
8223  // There is next item.
8224  if(nextItem != m_Suballocations.end())
8225  {
8226  // Next item is free.
8227  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8228  {
8229  // There is not enough free space, including margin.
8230  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
8231  {
8232  return false;
8233  }
8234 
8235  // There is more free space than required.
8236  if(nextItem->size > sizeDiff)
8237  {
8238  // Move and shrink this next item.
8239  UnregisterFreeSuballocation(nextItem);
8240  nextItem->offset += sizeDiff;
8241  nextItem->size -= sizeDiff;
8242  RegisterFreeSuballocation(nextItem);
8243  }
8244  // There is exactly the amount of free space required.
8245  else
8246  {
8247  // Remove this next free item.
8248  UnregisterFreeSuballocation(nextItem);
8249  m_Suballocations.erase(nextItem);
8250  --m_FreeCount;
8251  }
8252  }
8253  // Next item is not free - there is no space to grow.
8254  else
8255  {
8256  return false;
8257  }
8258  }
8259  // This is the last item - there is no space to grow.
8260  else
8261  {
8262  return false;
8263  }
8264 
8265  suballoc.size = newSize;
8266  m_SumFreeSize -= sizeDiff;
8267  }
8268 
8269  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
8270  return true;
8271  }
8272  }
8273  VMA_ASSERT(0 && "Not found!");
8274  return false;
8275 }
8276 
8277 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
8278 {
8279  VkDeviceSize lastSize = 0;
8280  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
8281  {
8282  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
8283 
8284  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
8285  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
8286  VMA_VALIDATE(it->size >= lastSize);
8287  lastSize = it->size;
8288  }
8289  return true;
8290 }
8291 
8292 bool VmaBlockMetadata_Generic::CheckAllocation(
8293  uint32_t currentFrameIndex,
8294  uint32_t frameInUseCount,
8295  VkDeviceSize bufferImageGranularity,
8296  VkDeviceSize allocSize,
8297  VkDeviceSize allocAlignment,
8298  VmaSuballocationType allocType,
8299  VmaSuballocationList::const_iterator suballocItem,
8300  bool canMakeOtherLost,
8301  VkDeviceSize* pOffset,
8302  size_t* itemsToMakeLostCount,
8303  VkDeviceSize* pSumFreeSize,
8304  VkDeviceSize* pSumItemSize) const
8305 {
8306  VMA_ASSERT(allocSize > 0);
8307  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8308  VMA_ASSERT(suballocItem != m_Suballocations.cend());
8309  VMA_ASSERT(pOffset != VMA_NULL);
8310 
8311  *itemsToMakeLostCount = 0;
8312  *pSumFreeSize = 0;
8313  *pSumItemSize = 0;
8314 
8315  if(canMakeOtherLost)
8316  {
8317  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8318  {
8319  *pSumFreeSize = suballocItem->size;
8320  }
8321  else
8322  {
8323  if(suballocItem->hAllocation->CanBecomeLost() &&
8324  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8325  {
8326  ++*itemsToMakeLostCount;
8327  *pSumItemSize = suballocItem->size;
8328  }
8329  else
8330  {
8331  return false;
8332  }
8333  }
8334 
8335  // Remaining size is too small for this request: Early return.
8336  if(GetSize() - suballocItem->offset < allocSize)
8337  {
8338  return false;
8339  }
8340 
8341  // Start from offset equal to beginning of this suballocation.
8342  *pOffset = suballocItem->offset;
8343 
8344  // Apply VMA_DEBUG_MARGIN at the beginning.
8345  if(VMA_DEBUG_MARGIN > 0)
8346  {
8347  *pOffset += VMA_DEBUG_MARGIN;
8348  }
8349 
8350  // Apply alignment.
8351  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8352 
8353  // Check previous suballocations for BufferImageGranularity conflicts.
8354  // Make bigger alignment if necessary.
8355  if(bufferImageGranularity > 1)
8356  {
8357  bool bufferImageGranularityConflict = false;
8358  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8359  while(prevSuballocItem != m_Suballocations.cbegin())
8360  {
8361  --prevSuballocItem;
8362  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8363  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8364  {
8365  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8366  {
8367  bufferImageGranularityConflict = true;
8368  break;
8369  }
8370  }
8371  else
8372  // Already on previous page.
8373  break;
8374  }
8375  if(bufferImageGranularityConflict)
8376  {
8377  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8378  }
8379  }
8380 
8381  // Now that we have final *pOffset, check if we are past suballocItem.
8382  // If yes, return false - this function should be called for another suballocItem as starting point.
8383  if(*pOffset >= suballocItem->offset + suballocItem->size)
8384  {
8385  return false;
8386  }
8387 
8388  // Calculate padding at the beginning based on current offset.
8389  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
8390 
8391  // Calculate required margin at the end.
8392  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8393 
8394  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
8395  // Another early return check.
8396  if(suballocItem->offset + totalSize > GetSize())
8397  {
8398  return false;
8399  }
8400 
8401  // Advance lastSuballocItem until desired size is reached.
8402  // Update itemsToMakeLostCount.
8403  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
8404  if(totalSize > suballocItem->size)
8405  {
8406  VkDeviceSize remainingSize = totalSize - suballocItem->size;
8407  while(remainingSize > 0)
8408  {
8409  ++lastSuballocItem;
8410  if(lastSuballocItem == m_Suballocations.cend())
8411  {
8412  return false;
8413  }
8414  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8415  {
8416  *pSumFreeSize += lastSuballocItem->size;
8417  }
8418  else
8419  {
8420  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
8421  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
8422  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8423  {
8424  ++*itemsToMakeLostCount;
8425  *pSumItemSize += lastSuballocItem->size;
8426  }
8427  else
8428  {
8429  return false;
8430  }
8431  }
8432  remainingSize = (lastSuballocItem->size < remainingSize) ?
8433  remainingSize - lastSuballocItem->size : 0;
8434  }
8435  }
8436 
8437  // Check next suballocations for BufferImageGranularity conflicts.
8438  // If conflict exists, we must mark more allocations lost or fail.
8439  if(bufferImageGranularity > 1)
8440  {
8441  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
8442  ++nextSuballocItem;
8443  while(nextSuballocItem != m_Suballocations.cend())
8444  {
8445  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8446  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8447  {
8448  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8449  {
8450  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
8451  if(nextSuballoc.hAllocation->CanBecomeLost() &&
8452  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8453  {
8454  ++*itemsToMakeLostCount;
8455  }
8456  else
8457  {
8458  return false;
8459  }
8460  }
8461  }
8462  else
8463  {
8464  // Already on next page.
8465  break;
8466  }
8467  ++nextSuballocItem;
8468  }
8469  }
8470  }
8471  else
8472  {
8473  const VmaSuballocation& suballoc = *suballocItem;
8474  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8475 
8476  *pSumFreeSize = suballoc.size;
8477 
8478  // Size of this suballocation is too small for this request: Early return.
8479  if(suballoc.size < allocSize)
8480  {
8481  return false;
8482  }
8483 
8484  // Start from offset equal to beginning of this suballocation.
8485  *pOffset = suballoc.offset;
8486 
8487  // Apply VMA_DEBUG_MARGIN at the beginning.
8488  if(VMA_DEBUG_MARGIN > 0)
8489  {
8490  *pOffset += VMA_DEBUG_MARGIN;
8491  }
8492 
8493  // Apply alignment.
8494  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8495 
8496  // Check previous suballocations for BufferImageGranularity conflicts.
8497  // Make bigger alignment if necessary.
8498  if(bufferImageGranularity > 1)
8499  {
8500  bool bufferImageGranularityConflict = false;
8501  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8502  while(prevSuballocItem != m_Suballocations.cbegin())
8503  {
8504  --prevSuballocItem;
8505  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8506  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8507  {
8508  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8509  {
8510  bufferImageGranularityConflict = true;
8511  break;
8512  }
8513  }
8514  else
8515  // Already on previous page.
8516  break;
8517  }
8518  if(bufferImageGranularityConflict)
8519  {
8520  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8521  }
8522  }
8523 
8524  // Calculate padding at the beginning based on current offset.
8525  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
8526 
8527  // Calculate required margin at the end.
8528  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8529 
8530  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
8531  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
8532  {
8533  return false;
8534  }
8535 
8536  // Check next suballocations for BufferImageGranularity conflicts.
8537  // If conflict exists, allocation cannot be made here.
8538  if(bufferImageGranularity > 1)
8539  {
8540  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
8541  ++nextSuballocItem;
8542  while(nextSuballocItem != m_Suballocations.cend())
8543  {
8544  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8545  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8546  {
8547  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8548  {
8549  return false;
8550  }
8551  }
8552  else
8553  {
8554  // Already on next page.
8555  break;
8556  }
8557  ++nextSuballocItem;
8558  }
8559  }
8560  }
8561 
8562  // All tests passed: Success. pOffset is already filled.
8563  return true;
8564 }
8565 
8566 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
8567 {
8568  VMA_ASSERT(item != m_Suballocations.end());
8569  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8570 
8571  VmaSuballocationList::iterator nextItem = item;
8572  ++nextItem;
8573  VMA_ASSERT(nextItem != m_Suballocations.end());
8574  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
8575 
8576  item->size += nextItem->size;
8577  --m_FreeCount;
8578  m_Suballocations.erase(nextItem);
8579 }
8580 
8581 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
8582 {
8583  // Change this suballocation to be marked as free.
8584  VmaSuballocation& suballoc = *suballocItem;
8585  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8586  suballoc.hAllocation = VK_NULL_HANDLE;
8587 
8588  // Update totals.
8589  ++m_FreeCount;
8590  m_SumFreeSize += suballoc.size;
8591 
8592  // Merge with previous and/or next suballocation if it's also free.
8593  bool mergeWithNext = false;
8594  bool mergeWithPrev = false;
8595 
8596  VmaSuballocationList::iterator nextItem = suballocItem;
8597  ++nextItem;
8598  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
8599  {
8600  mergeWithNext = true;
8601  }
8602 
8603  VmaSuballocationList::iterator prevItem = suballocItem;
8604  if(suballocItem != m_Suballocations.begin())
8605  {
8606  --prevItem;
8607  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8608  {
8609  mergeWithPrev = true;
8610  }
8611  }
8612 
8613  if(mergeWithNext)
8614  {
8615  UnregisterFreeSuballocation(nextItem);
8616  MergeFreeWithNext(suballocItem);
8617  }
8618 
8619  if(mergeWithPrev)
8620  {
8621  UnregisterFreeSuballocation(prevItem);
8622  MergeFreeWithNext(prevItem);
8623  RegisterFreeSuballocation(prevItem);
8624  return prevItem;
8625  }
8626  else
8627  {
8628  RegisterFreeSuballocation(suballocItem);
8629  return suballocItem;
8630  }
8631 }
8632 
8633 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
8634 {
8635  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8636  VMA_ASSERT(item->size > 0);
8637 
8638  // You may want to enable this validation at the beginning or at the end of
8639  // this function, depending on what do you want to check.
8640  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8641 
8642  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8643  {
8644  if(m_FreeSuballocationsBySize.empty())
8645  {
8646  m_FreeSuballocationsBySize.push_back(item);
8647  }
8648  else
8649  {
8650  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
8651  }
8652  }
8653 
8654  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8655 }
8656 
8657 
8658 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
8659 {
8660  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8661  VMA_ASSERT(item->size > 0);
8662 
8663  // You may want to enable this validation at the beginning or at the end of
8664  // this function, depending on what do you want to check.
8665  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8666 
8667  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8668  {
8669  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
8670  m_FreeSuballocationsBySize.data(),
8671  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
8672  item,
8673  VmaSuballocationItemSizeLess());
8674  for(size_t index = it - m_FreeSuballocationsBySize.data();
8675  index < m_FreeSuballocationsBySize.size();
8676  ++index)
8677  {
8678  if(m_FreeSuballocationsBySize[index] == item)
8679  {
8680  VmaVectorRemove(m_FreeSuballocationsBySize, index);
8681  return;
8682  }
8683  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
8684  }
8685  VMA_ASSERT(0 && "Not found.");
8686  }
8687 
8688  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8689 }
8690 
8691 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
8692  VkDeviceSize bufferImageGranularity,
8693  VmaSuballocationType& inOutPrevSuballocType) const
8694 {
8695  if(bufferImageGranularity == 1 || IsEmpty())
8696  {
8697  return false;
8698  }
8699 
8700  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
8701  bool typeConflictFound = false;
8702  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
8703  it != m_Suballocations.cend();
8704  ++it)
8705  {
8706  const VmaSuballocationType suballocType = it->type;
8707  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
8708  {
8709  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
8710  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
8711  {
8712  typeConflictFound = true;
8713  }
8714  inOutPrevSuballocType = suballocType;
8715  }
8716  }
8717 
8718  return typeConflictFound || minAlignment >= bufferImageGranularity;
8719 }
8720 
8722 // class VmaBlockMetadata_Linear
8723 
8724 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
8725  VmaBlockMetadata(hAllocator),
8726  m_SumFreeSize(0),
8727  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8728  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8729  m_1stVectorIndex(0),
8730  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
8731  m_1stNullItemsBeginCount(0),
8732  m_1stNullItemsMiddleCount(0),
8733  m_2ndNullItemsCount(0)
8734 {
8735 }
8736 
8737 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
8738 {
8739 }
8740 
8741 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
8742 {
8743  VmaBlockMetadata::Init(size);
8744  m_SumFreeSize = size;
8745 }
8746 
8747 bool VmaBlockMetadata_Linear::Validate() const
8748 {
8749  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8750  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8751 
8752  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
8753  VMA_VALIDATE(!suballocations1st.empty() ||
8754  suballocations2nd.empty() ||
8755  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
8756 
8757  if(!suballocations1st.empty())
8758  {
8759  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
8760  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
8761  // Null item at the end should be just pop_back().
8762  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
8763  }
8764  if(!suballocations2nd.empty())
8765  {
8766  // Null item at the end should be just pop_back().
8767  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
8768  }
8769 
8770  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
8771  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
8772 
8773  VkDeviceSize sumUsedSize = 0;
8774  const size_t suballoc1stCount = suballocations1st.size();
8775  VkDeviceSize offset = VMA_DEBUG_MARGIN;
8776 
8777  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8778  {
8779  const size_t suballoc2ndCount = suballocations2nd.size();
8780  size_t nullItem2ndCount = 0;
8781  for(size_t i = 0; i < suballoc2ndCount; ++i)
8782  {
8783  const VmaSuballocation& suballoc = suballocations2nd[i];
8784  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8785 
8786  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8787  VMA_VALIDATE(suballoc.offset >= offset);
8788 
8789  if(!currFree)
8790  {
8791  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8792  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8793  sumUsedSize += suballoc.size;
8794  }
8795  else
8796  {
8797  ++nullItem2ndCount;
8798  }
8799 
8800  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8801  }
8802 
8803  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8804  }
8805 
8806  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
8807  {
8808  const VmaSuballocation& suballoc = suballocations1st[i];
8809  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
8810  suballoc.hAllocation == VK_NULL_HANDLE);
8811  }
8812 
8813  size_t nullItem1stCount = m_1stNullItemsBeginCount;
8814 
8815  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
8816  {
8817  const VmaSuballocation& suballoc = suballocations1st[i];
8818  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8819 
8820  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8821  VMA_VALIDATE(suballoc.offset >= offset);
8822  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
8823 
8824  if(!currFree)
8825  {
8826  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8827  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8828  sumUsedSize += suballoc.size;
8829  }
8830  else
8831  {
8832  ++nullItem1stCount;
8833  }
8834 
8835  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8836  }
8837  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
8838 
8839  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8840  {
8841  const size_t suballoc2ndCount = suballocations2nd.size();
8842  size_t nullItem2ndCount = 0;
8843  for(size_t i = suballoc2ndCount; i--; )
8844  {
8845  const VmaSuballocation& suballoc = suballocations2nd[i];
8846  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8847 
8848  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8849  VMA_VALIDATE(suballoc.offset >= offset);
8850 
8851  if(!currFree)
8852  {
8853  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8854  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8855  sumUsedSize += suballoc.size;
8856  }
8857  else
8858  {
8859  ++nullItem2ndCount;
8860  }
8861 
8862  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8863  }
8864 
8865  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8866  }
8867 
8868  VMA_VALIDATE(offset <= GetSize());
8869  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
8870 
8871  return true;
8872 }
8873 
8874 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
8875 {
8876  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
8877  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
8878 }
8879 
8880 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
8881 {
8882  const VkDeviceSize size = GetSize();
8883 
8884  /*
8885  We don't consider gaps inside allocation vectors with freed allocations because
8886  they are not suitable for reuse in linear allocator. We consider only space that
8887  is available for new allocations.
8888  */
8889  if(IsEmpty())
8890  {
8891  return size;
8892  }
8893 
8894  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8895 
8896  switch(m_2ndVectorMode)
8897  {
8898  case SECOND_VECTOR_EMPTY:
8899  /*
8900  Available space is after end of 1st, as well as before beginning of 1st (which
8901  whould make it a ring buffer).
8902  */
8903  {
8904  const size_t suballocations1stCount = suballocations1st.size();
8905  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
8906  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
8907  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
8908  return VMA_MAX(
8909  firstSuballoc.offset,
8910  size - (lastSuballoc.offset + lastSuballoc.size));
8911  }
8912  break;
8913 
8914  case SECOND_VECTOR_RING_BUFFER:
8915  /*
8916  Available space is only between end of 2nd and beginning of 1st.
8917  */
8918  {
8919  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8920  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
8921  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
8922  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
8923  }
8924  break;
8925 
8926  case SECOND_VECTOR_DOUBLE_STACK:
8927  /*
8928  Available space is only between end of 1st and top of 2nd.
8929  */
8930  {
8931  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8932  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
8933  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
8934  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
8935  }
8936  break;
8937 
8938  default:
8939  VMA_ASSERT(0);
8940  return 0;
8941  }
8942 }
8943 
8944 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
8945 {
8946  const VkDeviceSize size = GetSize();
8947  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8948  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8949  const size_t suballoc1stCount = suballocations1st.size();
8950  const size_t suballoc2ndCount = suballocations2nd.size();
8951 
8952  outInfo.blockCount = 1;
8953  outInfo.allocationCount = (uint32_t)GetAllocationCount();
8954  outInfo.unusedRangeCount = 0;
8955  outInfo.usedBytes = 0;
8956  outInfo.allocationSizeMin = UINT64_MAX;
8957  outInfo.allocationSizeMax = 0;
8958  outInfo.unusedRangeSizeMin = UINT64_MAX;
8959  outInfo.unusedRangeSizeMax = 0;
8960 
8961  VkDeviceSize lastOffset = 0;
8962 
8963  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8964  {
8965  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8966  size_t nextAlloc2ndIndex = 0;
8967  while(lastOffset < freeSpace2ndTo1stEnd)
8968  {
8969  // Find next non-null allocation or move nextAllocIndex to the end.
8970  while(nextAlloc2ndIndex < suballoc2ndCount &&
8971  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8972  {
8973  ++nextAlloc2ndIndex;
8974  }
8975 
8976  // Found non-null allocation.
8977  if(nextAlloc2ndIndex < suballoc2ndCount)
8978  {
8979  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8980 
8981  // 1. Process free space before this allocation.
8982  if(lastOffset < suballoc.offset)
8983  {
8984  // There is free space from lastOffset to suballoc.offset.
8985  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8986  ++outInfo.unusedRangeCount;
8987  outInfo.unusedBytes += unusedRangeSize;
8988  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8989  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8990  }
8991 
8992  // 2. Process this allocation.
8993  // There is allocation with suballoc.offset, suballoc.size.
8994  outInfo.usedBytes += suballoc.size;
8995  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8996  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
8997 
8998  // 3. Prepare for next iteration.
8999  lastOffset = suballoc.offset + suballoc.size;
9000  ++nextAlloc2ndIndex;
9001  }
9002  // We are at the end.
9003  else
9004  {
9005  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9006  if(lastOffset < freeSpace2ndTo1stEnd)
9007  {
9008  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9009  ++outInfo.unusedRangeCount;
9010  outInfo.unusedBytes += unusedRangeSize;
9011  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9012  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9013  }
9014 
9015  // End of loop.
9016  lastOffset = freeSpace2ndTo1stEnd;
9017  }
9018  }
9019  }
9020 
9021  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9022  const VkDeviceSize freeSpace1stTo2ndEnd =
9023  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9024  while(lastOffset < freeSpace1stTo2ndEnd)
9025  {
9026  // Find next non-null allocation or move nextAllocIndex to the end.
9027  while(nextAlloc1stIndex < suballoc1stCount &&
9028  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9029  {
9030  ++nextAlloc1stIndex;
9031  }
9032 
9033  // Found non-null allocation.
9034  if(nextAlloc1stIndex < suballoc1stCount)
9035  {
9036  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9037 
9038  // 1. Process free space before this allocation.
9039  if(lastOffset < suballoc.offset)
9040  {
9041  // There is free space from lastOffset to suballoc.offset.
9042  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9043  ++outInfo.unusedRangeCount;
9044  outInfo.unusedBytes += unusedRangeSize;
9045  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9046  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9047  }
9048 
9049  // 2. Process this allocation.
9050  // There is allocation with suballoc.offset, suballoc.size.
9051  outInfo.usedBytes += suballoc.size;
9052  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9053  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9054 
9055  // 3. Prepare for next iteration.
9056  lastOffset = suballoc.offset + suballoc.size;
9057  ++nextAlloc1stIndex;
9058  }
9059  // We are at the end.
9060  else
9061  {
9062  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9063  if(lastOffset < freeSpace1stTo2ndEnd)
9064  {
9065  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9066  ++outInfo.unusedRangeCount;
9067  outInfo.unusedBytes += unusedRangeSize;
9068  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9069  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9070  }
9071 
9072  // End of loop.
9073  lastOffset = freeSpace1stTo2ndEnd;
9074  }
9075  }
9076 
9077  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9078  {
9079  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9080  while(lastOffset < size)
9081  {
9082  // Find next non-null allocation or move nextAllocIndex to the end.
9083  while(nextAlloc2ndIndex != SIZE_MAX &&
9084  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9085  {
9086  --nextAlloc2ndIndex;
9087  }
9088 
9089  // Found non-null allocation.
9090  if(nextAlloc2ndIndex != SIZE_MAX)
9091  {
9092  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9093 
9094  // 1. Process free space before this allocation.
9095  if(lastOffset < suballoc.offset)
9096  {
9097  // There is free space from lastOffset to suballoc.offset.
9098  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9099  ++outInfo.unusedRangeCount;
9100  outInfo.unusedBytes += unusedRangeSize;
9101  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9102  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9103  }
9104 
9105  // 2. Process this allocation.
9106  // There is allocation with suballoc.offset, suballoc.size.
9107  outInfo.usedBytes += suballoc.size;
9108  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9109  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9110 
9111  // 3. Prepare for next iteration.
9112  lastOffset = suballoc.offset + suballoc.size;
9113  --nextAlloc2ndIndex;
9114  }
9115  // We are at the end.
9116  else
9117  {
9118  // There is free space from lastOffset to size.
9119  if(lastOffset < size)
9120  {
9121  const VkDeviceSize unusedRangeSize = size - lastOffset;
9122  ++outInfo.unusedRangeCount;
9123  outInfo.unusedBytes += unusedRangeSize;
9124  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9125  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9126  }
9127 
9128  // End of loop.
9129  lastOffset = size;
9130  }
9131  }
9132  }
9133 
9134  outInfo.unusedBytes = size - outInfo.usedBytes;
9135 }
9136 
9137 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
9138 {
9139  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9140  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9141  const VkDeviceSize size = GetSize();
9142  const size_t suballoc1stCount = suballocations1st.size();
9143  const size_t suballoc2ndCount = suballocations2nd.size();
9144 
9145  inoutStats.size += size;
9146 
9147  VkDeviceSize lastOffset = 0;
9148 
9149  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9150  {
9151  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9152  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
9153  while(lastOffset < freeSpace2ndTo1stEnd)
9154  {
9155  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9156  while(nextAlloc2ndIndex < suballoc2ndCount &&
9157  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9158  {
9159  ++nextAlloc2ndIndex;
9160  }
9161 
9162  // Found non-null allocation.
9163  if(nextAlloc2ndIndex < suballoc2ndCount)
9164  {
9165  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9166 
9167  // 1. Process free space before this allocation.
9168  if(lastOffset < suballoc.offset)
9169  {
9170  // There is free space from lastOffset to suballoc.offset.
9171  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9172  inoutStats.unusedSize += unusedRangeSize;
9173  ++inoutStats.unusedRangeCount;
9174  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9175  }
9176 
9177  // 2. Process this allocation.
9178  // There is allocation with suballoc.offset, suballoc.size.
9179  ++inoutStats.allocationCount;
9180 
9181  // 3. Prepare for next iteration.
9182  lastOffset = suballoc.offset + suballoc.size;
9183  ++nextAlloc2ndIndex;
9184  }
9185  // We are at the end.
9186  else
9187  {
9188  if(lastOffset < freeSpace2ndTo1stEnd)
9189  {
9190  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9191  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9192  inoutStats.unusedSize += unusedRangeSize;
9193  ++inoutStats.unusedRangeCount;
9194  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9195  }
9196 
9197  // End of loop.
9198  lastOffset = freeSpace2ndTo1stEnd;
9199  }
9200  }
9201  }
9202 
9203  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9204  const VkDeviceSize freeSpace1stTo2ndEnd =
9205  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9206  while(lastOffset < freeSpace1stTo2ndEnd)
9207  {
9208  // Find next non-null allocation or move nextAllocIndex to the end.
9209  while(nextAlloc1stIndex < suballoc1stCount &&
9210  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9211  {
9212  ++nextAlloc1stIndex;
9213  }
9214 
9215  // Found non-null allocation.
9216  if(nextAlloc1stIndex < suballoc1stCount)
9217  {
9218  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9219 
9220  // 1. Process free space before this allocation.
9221  if(lastOffset < suballoc.offset)
9222  {
9223  // There is free space from lastOffset to suballoc.offset.
9224  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9225  inoutStats.unusedSize += unusedRangeSize;
9226  ++inoutStats.unusedRangeCount;
9227  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9228  }
9229 
9230  // 2. Process this allocation.
9231  // There is allocation with suballoc.offset, suballoc.size.
9232  ++inoutStats.allocationCount;
9233 
9234  // 3. Prepare for next iteration.
9235  lastOffset = suballoc.offset + suballoc.size;
9236  ++nextAlloc1stIndex;
9237  }
9238  // We are at the end.
9239  else
9240  {
9241  if(lastOffset < freeSpace1stTo2ndEnd)
9242  {
9243  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9244  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9245  inoutStats.unusedSize += unusedRangeSize;
9246  ++inoutStats.unusedRangeCount;
9247  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9248  }
9249 
9250  // End of loop.
9251  lastOffset = freeSpace1stTo2ndEnd;
9252  }
9253  }
9254 
9255  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9256  {
9257  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9258  while(lastOffset < size)
9259  {
9260  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9261  while(nextAlloc2ndIndex != SIZE_MAX &&
9262  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9263  {
9264  --nextAlloc2ndIndex;
9265  }
9266 
9267  // Found non-null allocation.
9268  if(nextAlloc2ndIndex != SIZE_MAX)
9269  {
9270  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9271 
9272  // 1. Process free space before this allocation.
9273  if(lastOffset < suballoc.offset)
9274  {
9275  // There is free space from lastOffset to suballoc.offset.
9276  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9277  inoutStats.unusedSize += unusedRangeSize;
9278  ++inoutStats.unusedRangeCount;
9279  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9280  }
9281 
9282  // 2. Process this allocation.
9283  // There is allocation with suballoc.offset, suballoc.size.
9284  ++inoutStats.allocationCount;
9285 
9286  // 3. Prepare for next iteration.
9287  lastOffset = suballoc.offset + suballoc.size;
9288  --nextAlloc2ndIndex;
9289  }
9290  // We are at the end.
9291  else
9292  {
9293  if(lastOffset < size)
9294  {
9295  // There is free space from lastOffset to size.
9296  const VkDeviceSize unusedRangeSize = size - lastOffset;
9297  inoutStats.unusedSize += unusedRangeSize;
9298  ++inoutStats.unusedRangeCount;
9299  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9300  }
9301 
9302  // End of loop.
9303  lastOffset = size;
9304  }
9305  }
9306  }
9307 }
9308 
9309 #if VMA_STATS_STRING_ENABLED
9310 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
9311 {
9312  const VkDeviceSize size = GetSize();
9313  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9314  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9315  const size_t suballoc1stCount = suballocations1st.size();
9316  const size_t suballoc2ndCount = suballocations2nd.size();
9317 
9318  // FIRST PASS
9319 
9320  size_t unusedRangeCount = 0;
9321  VkDeviceSize usedBytes = 0;
9322 
9323  VkDeviceSize lastOffset = 0;
9324 
9325  size_t alloc2ndCount = 0;
9326  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9327  {
9328  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9329  size_t nextAlloc2ndIndex = 0;
9330  while(lastOffset < freeSpace2ndTo1stEnd)
9331  {
9332  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9333  while(nextAlloc2ndIndex < suballoc2ndCount &&
9334  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9335  {
9336  ++nextAlloc2ndIndex;
9337  }
9338 
9339  // Found non-null allocation.
9340  if(nextAlloc2ndIndex < suballoc2ndCount)
9341  {
9342  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9343 
9344  // 1. Process free space before this allocation.
9345  if(lastOffset < suballoc.offset)
9346  {
9347  // There is free space from lastOffset to suballoc.offset.
9348  ++unusedRangeCount;
9349  }
9350 
9351  // 2. Process this allocation.
9352  // There is allocation with suballoc.offset, suballoc.size.
9353  ++alloc2ndCount;
9354  usedBytes += suballoc.size;
9355 
9356  // 3. Prepare for next iteration.
9357  lastOffset = suballoc.offset + suballoc.size;
9358  ++nextAlloc2ndIndex;
9359  }
9360  // We are at the end.
9361  else
9362  {
9363  if(lastOffset < freeSpace2ndTo1stEnd)
9364  {
9365  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9366  ++unusedRangeCount;
9367  }
9368 
9369  // End of loop.
9370  lastOffset = freeSpace2ndTo1stEnd;
9371  }
9372  }
9373  }
9374 
9375  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9376  size_t alloc1stCount = 0;
9377  const VkDeviceSize freeSpace1stTo2ndEnd =
9378  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9379  while(lastOffset < freeSpace1stTo2ndEnd)
9380  {
9381  // Find next non-null allocation or move nextAllocIndex to the end.
9382  while(nextAlloc1stIndex < suballoc1stCount &&
9383  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9384  {
9385  ++nextAlloc1stIndex;
9386  }
9387 
9388  // Found non-null allocation.
9389  if(nextAlloc1stIndex < suballoc1stCount)
9390  {
9391  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9392 
9393  // 1. Process free space before this allocation.
9394  if(lastOffset < suballoc.offset)
9395  {
9396  // There is free space from lastOffset to suballoc.offset.
9397  ++unusedRangeCount;
9398  }
9399 
9400  // 2. Process this allocation.
9401  // There is allocation with suballoc.offset, suballoc.size.
9402  ++alloc1stCount;
9403  usedBytes += suballoc.size;
9404 
9405  // 3. Prepare for next iteration.
9406  lastOffset = suballoc.offset + suballoc.size;
9407  ++nextAlloc1stIndex;
9408  }
9409  // We are at the end.
9410  else
9411  {
9412  if(lastOffset < size)
9413  {
9414  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9415  ++unusedRangeCount;
9416  }
9417 
9418  // End of loop.
9419  lastOffset = freeSpace1stTo2ndEnd;
9420  }
9421  }
9422 
9423  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9424  {
9425  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9426  while(lastOffset < size)
9427  {
9428  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9429  while(nextAlloc2ndIndex != SIZE_MAX &&
9430  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9431  {
9432  --nextAlloc2ndIndex;
9433  }
9434 
9435  // Found non-null allocation.
9436  if(nextAlloc2ndIndex != SIZE_MAX)
9437  {
9438  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9439 
9440  // 1. Process free space before this allocation.
9441  if(lastOffset < suballoc.offset)
9442  {
9443  // There is free space from lastOffset to suballoc.offset.
9444  ++unusedRangeCount;
9445  }
9446 
9447  // 2. Process this allocation.
9448  // There is allocation with suballoc.offset, suballoc.size.
9449  ++alloc2ndCount;
9450  usedBytes += suballoc.size;
9451 
9452  // 3. Prepare for next iteration.
9453  lastOffset = suballoc.offset + suballoc.size;
9454  --nextAlloc2ndIndex;
9455  }
9456  // We are at the end.
9457  else
9458  {
9459  if(lastOffset < size)
9460  {
9461  // There is free space from lastOffset to size.
9462  ++unusedRangeCount;
9463  }
9464 
9465  // End of loop.
9466  lastOffset = size;
9467  }
9468  }
9469  }
9470 
9471  const VkDeviceSize unusedBytes = size - usedBytes;
9472  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
9473 
9474  // SECOND PASS
9475  lastOffset = 0;
9476 
9477  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9478  {
9479  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9480  size_t nextAlloc2ndIndex = 0;
9481  while(lastOffset < freeSpace2ndTo1stEnd)
9482  {
9483  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9484  while(nextAlloc2ndIndex < suballoc2ndCount &&
9485  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9486  {
9487  ++nextAlloc2ndIndex;
9488  }
9489 
9490  // Found non-null allocation.
9491  if(nextAlloc2ndIndex < suballoc2ndCount)
9492  {
9493  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9494 
9495  // 1. Process free space before this allocation.
9496  if(lastOffset < suballoc.offset)
9497  {
9498  // There is free space from lastOffset to suballoc.offset.
9499  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9500  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9501  }
9502 
9503  // 2. Process this allocation.
9504  // There is allocation with suballoc.offset, suballoc.size.
9505  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9506 
9507  // 3. Prepare for next iteration.
9508  lastOffset = suballoc.offset + suballoc.size;
9509  ++nextAlloc2ndIndex;
9510  }
9511  // We are at the end.
9512  else
9513  {
9514  if(lastOffset < freeSpace2ndTo1stEnd)
9515  {
9516  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9517  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9518  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9519  }
9520 
9521  // End of loop.
9522  lastOffset = freeSpace2ndTo1stEnd;
9523  }
9524  }
9525  }
9526 
9527  nextAlloc1stIndex = m_1stNullItemsBeginCount;
9528  while(lastOffset < freeSpace1stTo2ndEnd)
9529  {
9530  // Find next non-null allocation or move nextAllocIndex to the end.
9531  while(nextAlloc1stIndex < suballoc1stCount &&
9532  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9533  {
9534  ++nextAlloc1stIndex;
9535  }
9536 
9537  // Found non-null allocation.
9538  if(nextAlloc1stIndex < suballoc1stCount)
9539  {
9540  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9541 
9542  // 1. Process free space before this allocation.
9543  if(lastOffset < suballoc.offset)
9544  {
9545  // There is free space from lastOffset to suballoc.offset.
9546  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9547  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9548  }
9549 
9550  // 2. Process this allocation.
9551  // There is allocation with suballoc.offset, suballoc.size.
9552  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9553 
9554  // 3. Prepare for next iteration.
9555  lastOffset = suballoc.offset + suballoc.size;
9556  ++nextAlloc1stIndex;
9557  }
9558  // We are at the end.
9559  else
9560  {
9561  if(lastOffset < freeSpace1stTo2ndEnd)
9562  {
9563  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9564  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9565  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9566  }
9567 
9568  // End of loop.
9569  lastOffset = freeSpace1stTo2ndEnd;
9570  }
9571  }
9572 
9573  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9574  {
9575  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9576  while(lastOffset < size)
9577  {
9578  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9579  while(nextAlloc2ndIndex != SIZE_MAX &&
9580  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9581  {
9582  --nextAlloc2ndIndex;
9583  }
9584 
9585  // Found non-null allocation.
9586  if(nextAlloc2ndIndex != SIZE_MAX)
9587  {
9588  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9589 
9590  // 1. Process free space before this allocation.
9591  if(lastOffset < suballoc.offset)
9592  {
9593  // There is free space from lastOffset to suballoc.offset.
9594  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9595  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9596  }
9597 
9598  // 2. Process this allocation.
9599  // There is allocation with suballoc.offset, suballoc.size.
9600  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9601 
9602  // 3. Prepare for next iteration.
9603  lastOffset = suballoc.offset + suballoc.size;
9604  --nextAlloc2ndIndex;
9605  }
9606  // We are at the end.
9607  else
9608  {
9609  if(lastOffset < size)
9610  {
9611  // There is free space from lastOffset to size.
9612  const VkDeviceSize unusedRangeSize = size - lastOffset;
9613  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9614  }
9615 
9616  // End of loop.
9617  lastOffset = size;
9618  }
9619  }
9620  }
9621 
9622  PrintDetailedMap_End(json);
9623 }
9624 #endif // #if VMA_STATS_STRING_ENABLED
9625 
9626 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
9627  uint32_t currentFrameIndex,
9628  uint32_t frameInUseCount,
9629  VkDeviceSize bufferImageGranularity,
9630  VkDeviceSize allocSize,
9631  VkDeviceSize allocAlignment,
9632  bool upperAddress,
9633  VmaSuballocationType allocType,
9634  bool canMakeOtherLost,
9635  uint32_t strategy,
9636  VmaAllocationRequest* pAllocationRequest)
9637 {
9638  VMA_ASSERT(allocSize > 0);
9639  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
9640  VMA_ASSERT(pAllocationRequest != VMA_NULL);
9641  VMA_HEAVY_ASSERT(Validate());
9642 
9643  const VkDeviceSize size = GetSize();
9644  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9645  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9646 
9647  if(upperAddress)
9648  {
9649  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9650  {
9651  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
9652  return false;
9653  }
9654 
9655  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
9656  if(allocSize > size)
9657  {
9658  return false;
9659  }
9660  VkDeviceSize resultBaseOffset = size - allocSize;
9661  if(!suballocations2nd.empty())
9662  {
9663  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9664  resultBaseOffset = lastSuballoc.offset - allocSize;
9665  if(allocSize > lastSuballoc.offset)
9666  {
9667  return false;
9668  }
9669  }
9670 
9671  // Start from offset equal to end of free space.
9672  VkDeviceSize resultOffset = resultBaseOffset;
9673 
9674  // Apply VMA_DEBUG_MARGIN at the end.
9675  if(VMA_DEBUG_MARGIN > 0)
9676  {
9677  if(resultOffset < VMA_DEBUG_MARGIN)
9678  {
9679  return false;
9680  }
9681  resultOffset -= VMA_DEBUG_MARGIN;
9682  }
9683 
9684  // Apply alignment.
9685  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
9686 
9687  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
9688  // Make bigger alignment if necessary.
9689  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
9690  {
9691  bool bufferImageGranularityConflict = false;
9692  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9693  {
9694  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9695  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9696  {
9697  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
9698  {
9699  bufferImageGranularityConflict = true;
9700  break;
9701  }
9702  }
9703  else
9704  // Already on previous page.
9705  break;
9706  }
9707  if(bufferImageGranularityConflict)
9708  {
9709  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
9710  }
9711  }
9712 
9713  // There is enough free space.
9714  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
9715  suballocations1st.back().offset + suballocations1st.back().size :
9716  0;
9717  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
9718  {
9719  // Check previous suballocations for BufferImageGranularity conflicts.
9720  // If conflict exists, allocation cannot be made here.
9721  if(bufferImageGranularity > 1)
9722  {
9723  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9724  {
9725  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9726  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9727  {
9728  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
9729  {
9730  return false;
9731  }
9732  }
9733  else
9734  {
9735  // Already on next page.
9736  break;
9737  }
9738  }
9739  }
9740 
9741  // All tests passed: Success.
9742  pAllocationRequest->offset = resultOffset;
9743  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
9744  pAllocationRequest->sumItemSize = 0;
9745  // pAllocationRequest->item unused.
9746  pAllocationRequest->itemsToMakeLostCount = 0;
9747  return true;
9748  }
9749  }
9750  else // !upperAddress
9751  {
9752  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9753  {
9754  // Try to allocate at the end of 1st vector.
9755 
9756  VkDeviceSize resultBaseOffset = 0;
9757  if(!suballocations1st.empty())
9758  {
9759  const VmaSuballocation& lastSuballoc = suballocations1st.back();
9760  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
9761  }
9762 
9763  // Start from offset equal to beginning of free space.
9764  VkDeviceSize resultOffset = resultBaseOffset;
9765 
9766  // Apply VMA_DEBUG_MARGIN at the beginning.
9767  if(VMA_DEBUG_MARGIN > 0)
9768  {
9769  resultOffset += VMA_DEBUG_MARGIN;
9770  }
9771 
9772  // Apply alignment.
9773  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
9774 
9775  // Check previous suballocations for BufferImageGranularity conflicts.
9776  // Make bigger alignment if necessary.
9777  if(bufferImageGranularity > 1 && !suballocations1st.empty())
9778  {
9779  bool bufferImageGranularityConflict = false;
9780  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9781  {
9782  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9783  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9784  {
9785  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9786  {
9787  bufferImageGranularityConflict = true;
9788  break;
9789  }
9790  }
9791  else
9792  // Already on previous page.
9793  break;
9794  }
9795  if(bufferImageGranularityConflict)
9796  {
9797  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
9798  }
9799  }
9800 
9801  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
9802  suballocations2nd.back().offset : size;
9803 
9804  // There is enough free space at the end after alignment.
9805  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
9806  {
9807  // Check next suballocations for BufferImageGranularity conflicts.
9808  // If conflict exists, allocation cannot be made here.
9809  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9810  {
9811  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9812  {
9813  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9814  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9815  {
9816  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9817  {
9818  return false;
9819  }
9820  }
9821  else
9822  {
9823  // Already on previous page.
9824  break;
9825  }
9826  }
9827  }
9828 
9829  // All tests passed: Success.
9830  pAllocationRequest->offset = resultOffset;
9831  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
9832  pAllocationRequest->sumItemSize = 0;
9833  // pAllocationRequest->item unused.
9834  pAllocationRequest->itemsToMakeLostCount = 0;
9835  return true;
9836  }
9837  }
9838 
9839  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
9840  // beginning of 1st vector as the end of free space.
9841  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9842  {
9843  VMA_ASSERT(!suballocations1st.empty());
9844 
9845  VkDeviceSize resultBaseOffset = 0;
9846  if(!suballocations2nd.empty())
9847  {
9848  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9849  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
9850  }
9851 
9852  // Start from offset equal to beginning of free space.
9853  VkDeviceSize resultOffset = resultBaseOffset;
9854 
9855  // Apply VMA_DEBUG_MARGIN at the beginning.
9856  if(VMA_DEBUG_MARGIN > 0)
9857  {
9858  resultOffset += VMA_DEBUG_MARGIN;
9859  }
9860 
9861  // Apply alignment.
9862  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
9863 
9864  // Check previous suballocations for BufferImageGranularity conflicts.
9865  // Make bigger alignment if necessary.
9866  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
9867  {
9868  bool bufferImageGranularityConflict = false;
9869  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
9870  {
9871  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
9872  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9873  {
9874  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9875  {
9876  bufferImageGranularityConflict = true;
9877  break;
9878  }
9879  }
9880  else
9881  // Already on previous page.
9882  break;
9883  }
9884  if(bufferImageGranularityConflict)
9885  {
9886  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
9887  }
9888  }
9889 
9890  pAllocationRequest->itemsToMakeLostCount = 0;
9891  pAllocationRequest->sumItemSize = 0;
9892  size_t index1st = m_1stNullItemsBeginCount;
9893 
9894  if(canMakeOtherLost)
9895  {
9896  while(index1st < suballocations1st.size() &&
9897  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
9898  {
9899  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
9900  const VmaSuballocation& suballoc = suballocations1st[index1st];
9901  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
9902  {
9903  // No problem.
9904  }
9905  else
9906  {
9907  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
9908  if(suballoc.hAllocation->CanBecomeLost() &&
9909  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9910  {
9911  ++pAllocationRequest->itemsToMakeLostCount;
9912  pAllocationRequest->sumItemSize += suballoc.size;
9913  }
9914  else
9915  {
9916  return false;
9917  }
9918  }
9919  ++index1st;
9920  }
9921 
9922  // Check next suballocations for BufferImageGranularity conflicts.
9923  // If conflict exists, we must mark more allocations lost or fail.
9924  if(bufferImageGranularity > 1)
9925  {
9926  while(index1st < suballocations1st.size())
9927  {
9928  const VmaSuballocation& suballoc = suballocations1st[index1st];
9929  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
9930  {
9931  if(suballoc.hAllocation != VK_NULL_HANDLE)
9932  {
9933  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
9934  if(suballoc.hAllocation->CanBecomeLost() &&
9935  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9936  {
9937  ++pAllocationRequest->itemsToMakeLostCount;
9938  pAllocationRequest->sumItemSize += suballoc.size;
9939  }
9940  else
9941  {
9942  return false;
9943  }
9944  }
9945  }
9946  else
9947  {
9948  // Already on next page.
9949  break;
9950  }
9951  ++index1st;
9952  }
9953  }
9954  }
9955 
9956  // There is enough free space at the end after alignment.
9957  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
9958  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
9959  {
9960  // Check next suballocations for BufferImageGranularity conflicts.
9961  // If conflict exists, allocation cannot be made here.
9962  if(bufferImageGranularity > 1)
9963  {
9964  for(size_t nextSuballocIndex = index1st;
9965  nextSuballocIndex < suballocations1st.size();
9966  nextSuballocIndex++)
9967  {
9968  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
9969  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9970  {
9971  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9972  {
9973  return false;
9974  }
9975  }
9976  else
9977  {
9978  // Already on next page.
9979  break;
9980  }
9981  }
9982  }
9983 
9984  // All tests passed: Success.
9985  pAllocationRequest->offset = resultOffset;
9986  pAllocationRequest->sumFreeSize =
9987  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
9988  - resultBaseOffset
9989  - pAllocationRequest->sumItemSize;
9990  // pAllocationRequest->item unused.
9991  return true;
9992  }
9993  }
9994  }
9995 
9996  return false;
9997 }
9998 
9999 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
10000  uint32_t currentFrameIndex,
10001  uint32_t frameInUseCount,
10002  VmaAllocationRequest* pAllocationRequest)
10003 {
10004  if(pAllocationRequest->itemsToMakeLostCount == 0)
10005  {
10006  return true;
10007  }
10008 
10009  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
10010 
10011  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10012  size_t index1st = m_1stNullItemsBeginCount;
10013  size_t madeLostCount = 0;
10014  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
10015  {
10016  VMA_ASSERT(index1st < suballocations1st.size());
10017  VmaSuballocation& suballoc = suballocations1st[index1st];
10018  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10019  {
10020  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10021  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
10022  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10023  {
10024  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10025  suballoc.hAllocation = VK_NULL_HANDLE;
10026  m_SumFreeSize += suballoc.size;
10027  ++m_1stNullItemsMiddleCount;
10028  ++madeLostCount;
10029  }
10030  else
10031  {
10032  return false;
10033  }
10034  }
10035  ++index1st;
10036  }
10037 
10038  CleanupAfterFree();
10039  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
10040 
10041  return true;
10042 }
10043 
10044 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10045 {
10046  uint32_t lostAllocationCount = 0;
10047 
10048  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10049  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10050  {
10051  VmaSuballocation& suballoc = suballocations1st[i];
10052  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10053  suballoc.hAllocation->CanBecomeLost() &&
10054  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10055  {
10056  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10057  suballoc.hAllocation = VK_NULL_HANDLE;
10058  ++m_1stNullItemsMiddleCount;
10059  m_SumFreeSize += suballoc.size;
10060  ++lostAllocationCount;
10061  }
10062  }
10063 
10064  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10065  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10066  {
10067  VmaSuballocation& suballoc = suballocations2nd[i];
10068  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10069  suballoc.hAllocation->CanBecomeLost() &&
10070  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10071  {
10072  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10073  suballoc.hAllocation = VK_NULL_HANDLE;
10074  ++m_2ndNullItemsCount;
10075  ++lostAllocationCount;
10076  }
10077  }
10078 
10079  if(lostAllocationCount)
10080  {
10081  CleanupAfterFree();
10082  }
10083 
10084  return lostAllocationCount;
10085 }
10086 
10087 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
10088 {
10089  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10090  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10091  {
10092  const VmaSuballocation& suballoc = suballocations1st[i];
10093  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10094  {
10095  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10096  {
10097  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10098  return VK_ERROR_VALIDATION_FAILED_EXT;
10099  }
10100  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10101  {
10102  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10103  return VK_ERROR_VALIDATION_FAILED_EXT;
10104  }
10105  }
10106  }
10107 
10108  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10109  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10110  {
10111  const VmaSuballocation& suballoc = suballocations2nd[i];
10112  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10113  {
10114  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10115  {
10116  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10117  return VK_ERROR_VALIDATION_FAILED_EXT;
10118  }
10119  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10120  {
10121  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10122  return VK_ERROR_VALIDATION_FAILED_EXT;
10123  }
10124  }
10125  }
10126 
10127  return VK_SUCCESS;
10128 }
10129 
10130 void VmaBlockMetadata_Linear::Alloc(
10131  const VmaAllocationRequest& request,
10132  VmaSuballocationType type,
10133  VkDeviceSize allocSize,
10134  bool upperAddress,
10135  VmaAllocation hAllocation)
10136 {
10137  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
10138 
10139  if(upperAddress)
10140  {
10141  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
10142  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
10143  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10144  suballocations2nd.push_back(newSuballoc);
10145  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
10146  }
10147  else
10148  {
10149  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10150 
10151  // First allocation.
10152  if(suballocations1st.empty())
10153  {
10154  suballocations1st.push_back(newSuballoc);
10155  }
10156  else
10157  {
10158  // New allocation at the end of 1st vector.
10159  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
10160  {
10161  // Check if it fits before the end of the block.
10162  VMA_ASSERT(request.offset + allocSize <= GetSize());
10163  suballocations1st.push_back(newSuballoc);
10164  }
10165  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
10166  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
10167  {
10168  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10169 
10170  switch(m_2ndVectorMode)
10171  {
10172  case SECOND_VECTOR_EMPTY:
10173  // First allocation from second part ring buffer.
10174  VMA_ASSERT(suballocations2nd.empty());
10175  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
10176  break;
10177  case SECOND_VECTOR_RING_BUFFER:
10178  // 2-part ring buffer is already started.
10179  VMA_ASSERT(!suballocations2nd.empty());
10180  break;
10181  case SECOND_VECTOR_DOUBLE_STACK:
10182  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
10183  break;
10184  default:
10185  VMA_ASSERT(0);
10186  }
10187 
10188  suballocations2nd.push_back(newSuballoc);
10189  }
10190  else
10191  {
10192  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
10193  }
10194  }
10195  }
10196 
10197  m_SumFreeSize -= newSuballoc.size;
10198 }
10199 
10200 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
10201 {
10202  FreeAtOffset(allocation->GetOffset());
10203 }
10204 
10205 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
10206 {
10207  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10208  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10209 
10210  if(!suballocations1st.empty())
10211  {
10212  // First allocation: Mark it as next empty at the beginning.
10213  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
10214  if(firstSuballoc.offset == offset)
10215  {
10216  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10217  firstSuballoc.hAllocation = VK_NULL_HANDLE;
10218  m_SumFreeSize += firstSuballoc.size;
10219  ++m_1stNullItemsBeginCount;
10220  CleanupAfterFree();
10221  return;
10222  }
10223  }
10224 
10225  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
10226  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
10227  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10228  {
10229  VmaSuballocation& lastSuballoc = suballocations2nd.back();
10230  if(lastSuballoc.offset == offset)
10231  {
10232  m_SumFreeSize += lastSuballoc.size;
10233  suballocations2nd.pop_back();
10234  CleanupAfterFree();
10235  return;
10236  }
10237  }
10238  // Last allocation in 1st vector.
10239  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
10240  {
10241  VmaSuballocation& lastSuballoc = suballocations1st.back();
10242  if(lastSuballoc.offset == offset)
10243  {
10244  m_SumFreeSize += lastSuballoc.size;
10245  suballocations1st.pop_back();
10246  CleanupAfterFree();
10247  return;
10248  }
10249  }
10250 
10251  // Item from the middle of 1st vector.
10252  {
10253  VmaSuballocation refSuballoc;
10254  refSuballoc.offset = offset;
10255  // Rest of members stays uninitialized intentionally for better performance.
10256  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
10257  suballocations1st.begin() + m_1stNullItemsBeginCount,
10258  suballocations1st.end(),
10259  refSuballoc);
10260  if(it != suballocations1st.end())
10261  {
10262  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10263  it->hAllocation = VK_NULL_HANDLE;
10264  ++m_1stNullItemsMiddleCount;
10265  m_SumFreeSize += it->size;
10266  CleanupAfterFree();
10267  return;
10268  }
10269  }
10270 
10271  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
10272  {
10273  // Item from the middle of 2nd vector.
10274  VmaSuballocation refSuballoc;
10275  refSuballoc.offset = offset;
10276  // Rest of members stays uninitialized intentionally for better performance.
10277  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
10278  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
10279  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
10280  if(it != suballocations2nd.end())
10281  {
10282  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10283  it->hAllocation = VK_NULL_HANDLE;
10284  ++m_2ndNullItemsCount;
10285  m_SumFreeSize += it->size;
10286  CleanupAfterFree();
10287  return;
10288  }
10289  }
10290 
10291  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
10292 }
10293 
10294 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
10295 {
10296  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10297  const size_t suballocCount = AccessSuballocations1st().size();
10298  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
10299 }
10300 
10301 void VmaBlockMetadata_Linear::CleanupAfterFree()
10302 {
10303  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10304  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10305 
10306  if(IsEmpty())
10307  {
10308  suballocations1st.clear();
10309  suballocations2nd.clear();
10310  m_1stNullItemsBeginCount = 0;
10311  m_1stNullItemsMiddleCount = 0;
10312  m_2ndNullItemsCount = 0;
10313  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10314  }
10315  else
10316  {
10317  const size_t suballoc1stCount = suballocations1st.size();
10318  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10319  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
10320 
10321  // Find more null items at the beginning of 1st vector.
10322  while(m_1stNullItemsBeginCount < suballoc1stCount &&
10323  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10324  {
10325  ++m_1stNullItemsBeginCount;
10326  --m_1stNullItemsMiddleCount;
10327  }
10328 
10329  // Find more null items at the end of 1st vector.
10330  while(m_1stNullItemsMiddleCount > 0 &&
10331  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
10332  {
10333  --m_1stNullItemsMiddleCount;
10334  suballocations1st.pop_back();
10335  }
10336 
10337  // Find more null items at the end of 2nd vector.
10338  while(m_2ndNullItemsCount > 0 &&
10339  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
10340  {
10341  --m_2ndNullItemsCount;
10342  suballocations2nd.pop_back();
10343  }
10344 
10345  if(ShouldCompact1st())
10346  {
10347  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
10348  size_t srcIndex = m_1stNullItemsBeginCount;
10349  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
10350  {
10351  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
10352  {
10353  ++srcIndex;
10354  }
10355  if(dstIndex != srcIndex)
10356  {
10357  suballocations1st[dstIndex] = suballocations1st[srcIndex];
10358  }
10359  ++srcIndex;
10360  }
10361  suballocations1st.resize(nonNullItemCount);
10362  m_1stNullItemsBeginCount = 0;
10363  m_1stNullItemsMiddleCount = 0;
10364  }
10365 
10366  // 2nd vector became empty.
10367  if(suballocations2nd.empty())
10368  {
10369  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10370  }
10371 
10372  // 1st vector became empty.
10373  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
10374  {
10375  suballocations1st.clear();
10376  m_1stNullItemsBeginCount = 0;
10377 
10378  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10379  {
10380  // Swap 1st with 2nd. Now 2nd is empty.
10381  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10382  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
10383  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
10384  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10385  {
10386  ++m_1stNullItemsBeginCount;
10387  --m_1stNullItemsMiddleCount;
10388  }
10389  m_2ndNullItemsCount = 0;
10390  m_1stVectorIndex ^= 1;
10391  }
10392  }
10393  }
10394 
10395  VMA_HEAVY_ASSERT(Validate());
10396 }
10397 
10398 
10400 // class VmaBlockMetadata_Buddy
10401 
10402 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
10403  VmaBlockMetadata(hAllocator),
10404  m_Root(VMA_NULL),
10405  m_AllocationCount(0),
10406  m_FreeCount(1),
10407  m_SumFreeSize(0)
10408 {
10409  memset(m_FreeList, 0, sizeof(m_FreeList));
10410 }
10411 
10412 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
10413 {
10414  DeleteNode(m_Root);
10415 }
10416 
10417 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
10418 {
10419  VmaBlockMetadata::Init(size);
10420 
10421  m_UsableSize = VmaPrevPow2(size);
10422  m_SumFreeSize = m_UsableSize;
10423 
10424  // Calculate m_LevelCount.
10425  m_LevelCount = 1;
10426  while(m_LevelCount < MAX_LEVELS &&
10427  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
10428  {
10429  ++m_LevelCount;
10430  }
10431 
10432  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
10433  rootNode->offset = 0;
10434  rootNode->type = Node::TYPE_FREE;
10435  rootNode->parent = VMA_NULL;
10436  rootNode->buddy = VMA_NULL;
10437 
10438  m_Root = rootNode;
10439  AddToFreeListFront(0, rootNode);
10440 }
10441 
10442 bool VmaBlockMetadata_Buddy::Validate() const
10443 {
10444  // Validate tree.
10445  ValidationContext ctx;
10446  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
10447  {
10448  VMA_VALIDATE(false && "ValidateNode failed.");
10449  }
10450  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
10451  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
10452 
10453  // Validate free node lists.
10454  for(uint32_t level = 0; level < m_LevelCount; ++level)
10455  {
10456  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
10457  m_FreeList[level].front->free.prev == VMA_NULL);
10458 
10459  for(Node* node = m_FreeList[level].front;
10460  node != VMA_NULL;
10461  node = node->free.next)
10462  {
10463  VMA_VALIDATE(node->type == Node::TYPE_FREE);
10464 
10465  if(node->free.next == VMA_NULL)
10466  {
10467  VMA_VALIDATE(m_FreeList[level].back == node);
10468  }
10469  else
10470  {
10471  VMA_VALIDATE(node->free.next->free.prev == node);
10472  }
10473  }
10474  }
10475 
10476  // Validate that free lists ar higher levels are empty.
10477  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
10478  {
10479  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
10480  }
10481 
10482  return true;
10483 }
10484 
10485 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
10486 {
10487  for(uint32_t level = 0; level < m_LevelCount; ++level)
10488  {
10489  if(m_FreeList[level].front != VMA_NULL)
10490  {
10491  return LevelToNodeSize(level);
10492  }
10493  }
10494  return 0;
10495 }
10496 
10497 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
10498 {
10499  const VkDeviceSize unusableSize = GetUnusableSize();
10500 
10501  outInfo.blockCount = 1;
10502 
10503  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
10504  outInfo.usedBytes = outInfo.unusedBytes = 0;
10505 
10506  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
10507  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
10508  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
10509 
10510  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
10511 
10512  if(unusableSize > 0)
10513  {
10514  ++outInfo.unusedRangeCount;
10515  outInfo.unusedBytes += unusableSize;
10516  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
10517  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
10518  }
10519 }
10520 
10521 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
10522 {
10523  const VkDeviceSize unusableSize = GetUnusableSize();
10524 
10525  inoutStats.size += GetSize();
10526  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
10527  inoutStats.allocationCount += m_AllocationCount;
10528  inoutStats.unusedRangeCount += m_FreeCount;
10529  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
10530 
10531  if(unusableSize > 0)
10532  {
10533  ++inoutStats.unusedRangeCount;
10534  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
10535  }
10536 }
10537 
10538 #if VMA_STATS_STRING_ENABLED
10539 
10540 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
10541 {
10542  // TODO optimize
10543  VmaStatInfo stat;
10544  CalcAllocationStatInfo(stat);
10545 
10546  PrintDetailedMap_Begin(
10547  json,
10548  stat.unusedBytes,
10549  stat.allocationCount,
10550  stat.unusedRangeCount);
10551 
10552  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
10553 
10554  const VkDeviceSize unusableSize = GetUnusableSize();
10555  if(unusableSize > 0)
10556  {
10557  PrintDetailedMap_UnusedRange(json,
10558  m_UsableSize, // offset
10559  unusableSize); // size
10560  }
10561 
10562  PrintDetailedMap_End(json);
10563 }
10564 
10565 #endif // #if VMA_STATS_STRING_ENABLED
10566 
10567 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
10568  uint32_t currentFrameIndex,
10569  uint32_t frameInUseCount,
10570  VkDeviceSize bufferImageGranularity,
10571  VkDeviceSize allocSize,
10572  VkDeviceSize allocAlignment,
10573  bool upperAddress,
10574  VmaSuballocationType allocType,
10575  bool canMakeOtherLost,
10576  uint32_t strategy,
10577  VmaAllocationRequest* pAllocationRequest)
10578 {
10579  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
10580 
10581  // Simple way to respect bufferImageGranularity. May be optimized some day.
10582  // Whenever it might be an OPTIMAL image...
10583  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
10584  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
10585  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
10586  {
10587  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
10588  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
10589  }
10590 
10591  if(allocSize > m_UsableSize)
10592  {
10593  return false;
10594  }
10595 
10596  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10597  for(uint32_t level = targetLevel + 1; level--; )
10598  {
10599  for(Node* freeNode = m_FreeList[level].front;
10600  freeNode != VMA_NULL;
10601  freeNode = freeNode->free.next)
10602  {
10603  if(freeNode->offset % allocAlignment == 0)
10604  {
10605  pAllocationRequest->offset = freeNode->offset;
10606  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
10607  pAllocationRequest->sumItemSize = 0;
10608  pAllocationRequest->itemsToMakeLostCount = 0;
10609  pAllocationRequest->customData = (void*)(uintptr_t)level;
10610  return true;
10611  }
10612  }
10613  }
10614 
10615  return false;
10616 }
10617 
10618 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
10619  uint32_t currentFrameIndex,
10620  uint32_t frameInUseCount,
10621  VmaAllocationRequest* pAllocationRequest)
10622 {
10623  /*
10624  Lost allocations are not supported in buddy allocator at the moment.
10625  Support might be added in the future.
10626  */
10627  return pAllocationRequest->itemsToMakeLostCount == 0;
10628 }
10629 
10630 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10631 {
10632  /*
10633  Lost allocations are not supported in buddy allocator at the moment.
10634  Support might be added in the future.
10635  */
10636  return 0;
10637 }
10638 
10639 void VmaBlockMetadata_Buddy::Alloc(
10640  const VmaAllocationRequest& request,
10641  VmaSuballocationType type,
10642  VkDeviceSize allocSize,
10643  bool upperAddress,
10644  VmaAllocation hAllocation)
10645 {
10646  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10647  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
10648 
10649  Node* currNode = m_FreeList[currLevel].front;
10650  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10651  while(currNode->offset != request.offset)
10652  {
10653  currNode = currNode->free.next;
10654  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10655  }
10656 
10657  // Go down, splitting free nodes.
10658  while(currLevel < targetLevel)
10659  {
10660  // currNode is already first free node at currLevel.
10661  // Remove it from list of free nodes at this currLevel.
10662  RemoveFromFreeList(currLevel, currNode);
10663 
10664  const uint32_t childrenLevel = currLevel + 1;
10665 
10666  // Create two free sub-nodes.
10667  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
10668  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
10669 
10670  leftChild->offset = currNode->offset;
10671  leftChild->type = Node::TYPE_FREE;
10672  leftChild->parent = currNode;
10673  leftChild->buddy = rightChild;
10674 
10675  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
10676  rightChild->type = Node::TYPE_FREE;
10677  rightChild->parent = currNode;
10678  rightChild->buddy = leftChild;
10679 
10680  // Convert current currNode to split type.
10681  currNode->type = Node::TYPE_SPLIT;
10682  currNode->split.leftChild = leftChild;
10683 
10684  // Add child nodes to free list. Order is important!
10685  AddToFreeListFront(childrenLevel, rightChild);
10686  AddToFreeListFront(childrenLevel, leftChild);
10687 
10688  ++m_FreeCount;
10689  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
10690  ++currLevel;
10691  currNode = m_FreeList[currLevel].front;
10692 
10693  /*
10694  We can be sure that currNode, as left child of node previously split,
10695  also fullfills the alignment requirement.
10696  */
10697  }
10698 
10699  // Remove from free list.
10700  VMA_ASSERT(currLevel == targetLevel &&
10701  currNode != VMA_NULL &&
10702  currNode->type == Node::TYPE_FREE);
10703  RemoveFromFreeList(currLevel, currNode);
10704 
10705  // Convert to allocation node.
10706  currNode->type = Node::TYPE_ALLOCATION;
10707  currNode->allocation.alloc = hAllocation;
10708 
10709  ++m_AllocationCount;
10710  --m_FreeCount;
10711  m_SumFreeSize -= allocSize;
10712 }
10713 
10714 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
10715 {
10716  if(node->type == Node::TYPE_SPLIT)
10717  {
10718  DeleteNode(node->split.leftChild->buddy);
10719  DeleteNode(node->split.leftChild);
10720  }
10721 
10722  vma_delete(GetAllocationCallbacks(), node);
10723 }
10724 
10725 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
10726 {
10727  VMA_VALIDATE(level < m_LevelCount);
10728  VMA_VALIDATE(curr->parent == parent);
10729  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
10730  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
10731  switch(curr->type)
10732  {
10733  case Node::TYPE_FREE:
10734  // curr->free.prev, next are validated separately.
10735  ctx.calculatedSumFreeSize += levelNodeSize;
10736  ++ctx.calculatedFreeCount;
10737  break;
10738  case Node::TYPE_ALLOCATION:
10739  ++ctx.calculatedAllocationCount;
10740  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
10741  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
10742  break;
10743  case Node::TYPE_SPLIT:
10744  {
10745  const uint32_t childrenLevel = level + 1;
10746  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
10747  const Node* const leftChild = curr->split.leftChild;
10748  VMA_VALIDATE(leftChild != VMA_NULL);
10749  VMA_VALIDATE(leftChild->offset == curr->offset);
10750  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
10751  {
10752  VMA_VALIDATE(false && "ValidateNode for left child failed.");
10753  }
10754  const Node* const rightChild = leftChild->buddy;
10755  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
10756  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
10757  {
10758  VMA_VALIDATE(false && "ValidateNode for right child failed.");
10759  }
10760  }
10761  break;
10762  default:
10763  return false;
10764  }
10765 
10766  return true;
10767 }
10768 
10769 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
10770 {
10771  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
10772  uint32_t level = 0;
10773  VkDeviceSize currLevelNodeSize = m_UsableSize;
10774  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
10775  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
10776  {
10777  ++level;
10778  currLevelNodeSize = nextLevelNodeSize;
10779  nextLevelNodeSize = currLevelNodeSize >> 1;
10780  }
10781  return level;
10782 }
10783 
10784 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
10785 {
10786  // Find node and level.
10787  Node* node = m_Root;
10788  VkDeviceSize nodeOffset = 0;
10789  uint32_t level = 0;
10790  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
10791  while(node->type == Node::TYPE_SPLIT)
10792  {
10793  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
10794  if(offset < nodeOffset + nextLevelSize)
10795  {
10796  node = node->split.leftChild;
10797  }
10798  else
10799  {
10800  node = node->split.leftChild->buddy;
10801  nodeOffset += nextLevelSize;
10802  }
10803  ++level;
10804  levelNodeSize = nextLevelSize;
10805  }
10806 
10807  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
10808  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
10809 
10810  ++m_FreeCount;
10811  --m_AllocationCount;
10812  m_SumFreeSize += alloc->GetSize();
10813 
10814  node->type = Node::TYPE_FREE;
10815 
10816  // Join free nodes if possible.
10817  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
10818  {
10819  RemoveFromFreeList(level, node->buddy);
10820  Node* const parent = node->parent;
10821 
10822  vma_delete(GetAllocationCallbacks(), node->buddy);
10823  vma_delete(GetAllocationCallbacks(), node);
10824  parent->type = Node::TYPE_FREE;
10825 
10826  node = parent;
10827  --level;
10828  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
10829  --m_FreeCount;
10830  }
10831 
10832  AddToFreeListFront(level, node);
10833 }
10834 
10835 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
10836 {
10837  switch(node->type)
10838  {
10839  case Node::TYPE_FREE:
10840  ++outInfo.unusedRangeCount;
10841  outInfo.unusedBytes += levelNodeSize;
10842  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
10843  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
10844  break;
10845  case Node::TYPE_ALLOCATION:
10846  {
10847  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
10848  ++outInfo.allocationCount;
10849  outInfo.usedBytes += allocSize;
10850  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
10851  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
10852 
10853  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
10854  if(unusedRangeSize > 0)
10855  {
10856  ++outInfo.unusedRangeCount;
10857  outInfo.unusedBytes += unusedRangeSize;
10858  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
10859  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
10860  }
10861  }
10862  break;
10863  case Node::TYPE_SPLIT:
10864  {
10865  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
10866  const Node* const leftChild = node->split.leftChild;
10867  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
10868  const Node* const rightChild = leftChild->buddy;
10869  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
10870  }
10871  break;
10872  default:
10873  VMA_ASSERT(0);
10874  }
10875 }
10876 
10877 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
10878 {
10879  VMA_ASSERT(node->type == Node::TYPE_FREE);
10880 
10881  // List is empty.
10882  Node* const frontNode = m_FreeList[level].front;
10883  if(frontNode == VMA_NULL)
10884  {
10885  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
10886  node->free.prev = node->free.next = VMA_NULL;
10887  m_FreeList[level].front = m_FreeList[level].back = node;
10888  }
10889  else
10890  {
10891  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
10892  node->free.prev = VMA_NULL;
10893  node->free.next = frontNode;
10894  frontNode->free.prev = node;
10895  m_FreeList[level].front = node;
10896  }
10897 }
10898 
10899 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
10900 {
10901  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
10902 
10903  // It is at the front.
10904  if(node->free.prev == VMA_NULL)
10905  {
10906  VMA_ASSERT(m_FreeList[level].front == node);
10907  m_FreeList[level].front = node->free.next;
10908  }
10909  else
10910  {
10911  Node* const prevFreeNode = node->free.prev;
10912  VMA_ASSERT(prevFreeNode->free.next == node);
10913  prevFreeNode->free.next = node->free.next;
10914  }
10915 
10916  // It is at the back.
10917  if(node->free.next == VMA_NULL)
10918  {
10919  VMA_ASSERT(m_FreeList[level].back == node);
10920  m_FreeList[level].back = node->free.prev;
10921  }
10922  else
10923  {
10924  Node* const nextFreeNode = node->free.next;
10925  VMA_ASSERT(nextFreeNode->free.prev == node);
10926  nextFreeNode->free.prev = node->free.prev;
10927  }
10928 }
10929 
10930 #if VMA_STATS_STRING_ENABLED
10931 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
10932 {
10933  switch(node->type)
10934  {
10935  case Node::TYPE_FREE:
10936  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
10937  break;
10938  case Node::TYPE_ALLOCATION:
10939  {
10940  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
10941  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
10942  if(allocSize < levelNodeSize)
10943  {
10944  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
10945  }
10946  }
10947  break;
10948  case Node::TYPE_SPLIT:
10949  {
10950  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
10951  const Node* const leftChild = node->split.leftChild;
10952  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
10953  const Node* const rightChild = leftChild->buddy;
10954  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
10955  }
10956  break;
10957  default:
10958  VMA_ASSERT(0);
10959  }
10960 }
10961 #endif // #if VMA_STATS_STRING_ENABLED
10962 
10963 
10965 // class VmaDeviceMemoryBlock
10966 
10967 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
10968  m_pMetadata(VMA_NULL),
10969  m_MemoryTypeIndex(UINT32_MAX),
10970  m_Id(0),
10971  m_hMemory(VK_NULL_HANDLE),
10972  m_MapCount(0),
10973  m_pMappedData(VMA_NULL)
10974 {
10975 }
10976 
10977 void VmaDeviceMemoryBlock::Init(
10978  VmaAllocator hAllocator,
10979  uint32_t newMemoryTypeIndex,
10980  VkDeviceMemory newMemory,
10981  VkDeviceSize newSize,
10982  uint32_t id,
10983  uint32_t algorithm)
10984 {
10985  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
10986 
10987  m_MemoryTypeIndex = newMemoryTypeIndex;
10988  m_Id = id;
10989  m_hMemory = newMemory;
10990 
10991  switch(algorithm)
10992  {
10994  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
10995  break;
10997  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
10998  break;
10999  default:
11000  VMA_ASSERT(0);
11001  // Fall-through.
11002  case 0:
11003  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
11004  }
11005  m_pMetadata->Init(newSize);
11006 }
11007 
11008 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
11009 {
11010  // This is the most important assert in the entire library.
11011  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
11012  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
11013 
11014  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
11015  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
11016  m_hMemory = VK_NULL_HANDLE;
11017 
11018  vma_delete(allocator, m_pMetadata);
11019  m_pMetadata = VMA_NULL;
11020 }
11021 
11022 bool VmaDeviceMemoryBlock::Validate() const
11023 {
11024  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
11025  (m_pMetadata->GetSize() != 0));
11026 
11027  return m_pMetadata->Validate();
11028 }
11029 
11030 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
11031 {
11032  void* pData = nullptr;
11033  VkResult res = Map(hAllocator, 1, &pData);
11034  if(res != VK_SUCCESS)
11035  {
11036  return res;
11037  }
11038 
11039  res = m_pMetadata->CheckCorruption(pData);
11040 
11041  Unmap(hAllocator, 1);
11042 
11043  return res;
11044 }
11045 
11046 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
11047 {
11048  if(count == 0)
11049  {
11050  return VK_SUCCESS;
11051  }
11052 
11053  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11054  if(m_MapCount != 0)
11055  {
11056  m_MapCount += count;
11057  VMA_ASSERT(m_pMappedData != VMA_NULL);
11058  if(ppData != VMA_NULL)
11059  {
11060  *ppData = m_pMappedData;
11061  }
11062  return VK_SUCCESS;
11063  }
11064  else
11065  {
11066  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
11067  hAllocator->m_hDevice,
11068  m_hMemory,
11069  0, // offset
11070  VK_WHOLE_SIZE,
11071  0, // flags
11072  &m_pMappedData);
11073  if(result == VK_SUCCESS)
11074  {
11075  if(ppData != VMA_NULL)
11076  {
11077  *ppData = m_pMappedData;
11078  }
11079  m_MapCount = count;
11080  }
11081  return result;
11082  }
11083 }
11084 
11085 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
11086 {
11087  if(count == 0)
11088  {
11089  return;
11090  }
11091 
11092  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11093  if(m_MapCount >= count)
11094  {
11095  m_MapCount -= count;
11096  if(m_MapCount == 0)
11097  {
11098  m_pMappedData = VMA_NULL;
11099  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
11100  }
11101  }
11102  else
11103  {
11104  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
11105  }
11106 }
11107 
11108 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11109 {
11110  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11111  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11112 
11113  void* pData;
11114  VkResult res = Map(hAllocator, 1, &pData);
11115  if(res != VK_SUCCESS)
11116  {
11117  return res;
11118  }
11119 
11120  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
11121  VmaWriteMagicValue(pData, allocOffset + allocSize);
11122 
11123  Unmap(hAllocator, 1);
11124 
11125  return VK_SUCCESS;
11126 }
11127 
11128 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11129 {
11130  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11131  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11132 
11133  void* pData;
11134  VkResult res = Map(hAllocator, 1, &pData);
11135  if(res != VK_SUCCESS)
11136  {
11137  return res;
11138  }
11139 
11140  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
11141  {
11142  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
11143  }
11144  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
11145  {
11146  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
11147  }
11148 
11149  Unmap(hAllocator, 1);
11150 
11151  return VK_SUCCESS;
11152 }
11153 
11154 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
11155  const VmaAllocator hAllocator,
11156  const VmaAllocation hAllocation,
11157  VkBuffer hBuffer)
11158 {
11159  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11160  hAllocation->GetBlock() == this);
11161  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11162  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11163  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
11164  hAllocator->m_hDevice,
11165  hBuffer,
11166  m_hMemory,
11167  hAllocation->GetOffset());
11168 }
11169 
11170 VkResult VmaDeviceMemoryBlock::BindImageMemory(
11171  const VmaAllocator hAllocator,
11172  const VmaAllocation hAllocation,
11173  VkImage hImage)
11174 {
11175  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11176  hAllocation->GetBlock() == this);
11177  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11178  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11179  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
11180  hAllocator->m_hDevice,
11181  hImage,
11182  m_hMemory,
11183  hAllocation->GetOffset());
11184 }
11185 
11186 static void InitStatInfo(VmaStatInfo& outInfo)
11187 {
11188  memset(&outInfo, 0, sizeof(outInfo));
11189  outInfo.allocationSizeMin = UINT64_MAX;
11190  outInfo.unusedRangeSizeMin = UINT64_MAX;
11191 }
11192 
11193 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
11194 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
11195 {
11196  inoutInfo.blockCount += srcInfo.blockCount;
11197  inoutInfo.allocationCount += srcInfo.allocationCount;
11198  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
11199  inoutInfo.usedBytes += srcInfo.usedBytes;
11200  inoutInfo.unusedBytes += srcInfo.unusedBytes;
11201  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
11202  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
11203  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
11204  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
11205 }
11206 
11207 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
11208 {
11209  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
11210  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
11211  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
11212  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
11213 }
11214 
11215 VmaPool_T::VmaPool_T(
11216  VmaAllocator hAllocator,
11217  const VmaPoolCreateInfo& createInfo,
11218  VkDeviceSize preferredBlockSize) :
11219  m_BlockVector(
11220  hAllocator,
11221  createInfo.memoryTypeIndex,
11222  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
11223  createInfo.minBlockCount,
11224  createInfo.maxBlockCount,
11225  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
11226  createInfo.frameInUseCount,
11227  true, // isCustomPool
11228  createInfo.blockSize != 0, // explicitBlockSize
11229  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
11230  m_Id(0)
11231 {
11232 }
11233 
11234 VmaPool_T::~VmaPool_T()
11235 {
11236 }
11237 
11238 #if VMA_STATS_STRING_ENABLED
11239 
11240 #endif // #if VMA_STATS_STRING_ENABLED
11241 
11242 VmaBlockVector::VmaBlockVector(
11243  VmaAllocator hAllocator,
11244  uint32_t memoryTypeIndex,
11245  VkDeviceSize preferredBlockSize,
11246  size_t minBlockCount,
11247  size_t maxBlockCount,
11248  VkDeviceSize bufferImageGranularity,
11249  uint32_t frameInUseCount,
11250  bool isCustomPool,
11251  bool explicitBlockSize,
11252  uint32_t algorithm) :
11253  m_hAllocator(hAllocator),
11254  m_MemoryTypeIndex(memoryTypeIndex),
11255  m_PreferredBlockSize(preferredBlockSize),
11256  m_MinBlockCount(minBlockCount),
11257  m_MaxBlockCount(maxBlockCount),
11258  m_BufferImageGranularity(bufferImageGranularity),
11259  m_FrameInUseCount(frameInUseCount),
11260  m_IsCustomPool(isCustomPool),
11261  m_ExplicitBlockSize(explicitBlockSize),
11262  m_Algorithm(algorithm),
11263  m_HasEmptyBlock(false),
11264  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
11265  m_NextBlockId(0)
11266 {
11267 }
11268 
11269 VmaBlockVector::~VmaBlockVector()
11270 {
11271  for(size_t i = m_Blocks.size(); i--; )
11272  {
11273  m_Blocks[i]->Destroy(m_hAllocator);
11274  vma_delete(m_hAllocator, m_Blocks[i]);
11275  }
11276 }
11277 
11278 VkResult VmaBlockVector::CreateMinBlocks()
11279 {
11280  for(size_t i = 0; i < m_MinBlockCount; ++i)
11281  {
11282  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
11283  if(res != VK_SUCCESS)
11284  {
11285  return res;
11286  }
11287  }
11288  return VK_SUCCESS;
11289 }
11290 
11291 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
11292 {
11293  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
11294 
11295  const size_t blockCount = m_Blocks.size();
11296 
11297  pStats->size = 0;
11298  pStats->unusedSize = 0;
11299  pStats->allocationCount = 0;
11300  pStats->unusedRangeCount = 0;
11301  pStats->unusedRangeSizeMax = 0;
11302  pStats->blockCount = blockCount;
11303 
11304  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11305  {
11306  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11307  VMA_ASSERT(pBlock);
11308  VMA_HEAVY_ASSERT(pBlock->Validate());
11309  pBlock->m_pMetadata->AddPoolStats(*pStats);
11310  }
11311 }
11312 
11313 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
11314 {
11315  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
11316  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
11317  (VMA_DEBUG_MARGIN > 0) &&
11318  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
11319 }
11320 
11321 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
11322 
11323 VkResult VmaBlockVector::Allocate(
11324  VmaPool hCurrentPool,
11325  uint32_t currentFrameIndex,
11326  VkDeviceSize size,
11327  VkDeviceSize alignment,
11328  const VmaAllocationCreateInfo& createInfo,
11329  VmaSuballocationType suballocType,
11330  size_t allocationCount,
11331  VmaAllocation* pAllocations)
11332 {
11333  size_t allocIndex;
11334  VkResult res = VK_SUCCESS;
11335 
11336  {
11337  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11338  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
11339  {
11340  res = AllocatePage(
11341  hCurrentPool,
11342  currentFrameIndex,
11343  size,
11344  alignment,
11345  createInfo,
11346  suballocType,
11347  pAllocations + allocIndex);
11348  if(res != VK_SUCCESS)
11349  {
11350  break;
11351  }
11352  }
11353  }
11354 
11355  if(res != VK_SUCCESS)
11356  {
11357  // Free all already created allocations.
11358  while(allocIndex--)
11359  {
11360  Free(pAllocations[allocIndex]);
11361  }
11362  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
11363  }
11364 
11365  return res;
11366 }
11367 
11368 VkResult VmaBlockVector::AllocatePage(
11369  VmaPool hCurrentPool,
11370  uint32_t currentFrameIndex,
11371  VkDeviceSize size,
11372  VkDeviceSize alignment,
11373  const VmaAllocationCreateInfo& createInfo,
11374  VmaSuballocationType suballocType,
11375  VmaAllocation* pAllocation)
11376 {
11377  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
11378  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
11379  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
11380  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
11381  const bool canCreateNewBlock =
11382  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
11383  (m_Blocks.size() < m_MaxBlockCount);
11384  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
11385 
11386  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
11387  // Which in turn is available only when maxBlockCount = 1.
11388  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
11389  {
11390  canMakeOtherLost = false;
11391  }
11392 
11393  // Upper address can only be used with linear allocator and within single memory block.
11394  if(isUpperAddress &&
11395  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
11396  {
11397  return VK_ERROR_FEATURE_NOT_PRESENT;
11398  }
11399 
11400  // Validate strategy.
11401  switch(strategy)
11402  {
11403  case 0:
11405  break;
11409  break;
11410  default:
11411  return VK_ERROR_FEATURE_NOT_PRESENT;
11412  }
11413 
11414  // Early reject: requested allocation size is larger that maximum block size for this block vector.
11415  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
11416  {
11417  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11418  }
11419 
11420  /*
11421  Under certain condition, this whole section can be skipped for optimization, so
11422  we move on directly to trying to allocate with canMakeOtherLost. That's the case
11423  e.g. for custom pools with linear algorithm.
11424  */
11425  if(!canMakeOtherLost || canCreateNewBlock)
11426  {
11427  // 1. Search existing allocations. Try to allocate without making other allocations lost.
11428  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
11430 
11431  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
11432  {
11433  // Use only last block.
11434  if(!m_Blocks.empty())
11435  {
11436  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
11437  VMA_ASSERT(pCurrBlock);
11438  VkResult res = AllocateFromBlock(
11439  pCurrBlock,
11440  hCurrentPool,
11441  currentFrameIndex,
11442  size,
11443  alignment,
11444  allocFlagsCopy,
11445  createInfo.pUserData,
11446  suballocType,
11447  strategy,
11448  pAllocation);
11449  if(res == VK_SUCCESS)
11450  {
11451  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
11452  return VK_SUCCESS;
11453  }
11454  }
11455  }
11456  else
11457  {
11459  {
11460  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11461  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11462  {
11463  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11464  VMA_ASSERT(pCurrBlock);
11465  VkResult res = AllocateFromBlock(
11466  pCurrBlock,
11467  hCurrentPool,
11468  currentFrameIndex,
11469  size,
11470  alignment,
11471  allocFlagsCopy,
11472  createInfo.pUserData,
11473  suballocType,
11474  strategy,
11475  pAllocation);
11476  if(res == VK_SUCCESS)
11477  {
11478  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
11479  return VK_SUCCESS;
11480  }
11481  }
11482  }
11483  else // WORST_FIT, FIRST_FIT
11484  {
11485  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11486  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11487  {
11488  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11489  VMA_ASSERT(pCurrBlock);
11490  VkResult res = AllocateFromBlock(
11491  pCurrBlock,
11492  hCurrentPool,
11493  currentFrameIndex,
11494  size,
11495  alignment,
11496  allocFlagsCopy,
11497  createInfo.pUserData,
11498  suballocType,
11499  strategy,
11500  pAllocation);
11501  if(res == VK_SUCCESS)
11502  {
11503  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
11504  return VK_SUCCESS;
11505  }
11506  }
11507  }
11508  }
11509 
11510  // 2. Try to create new block.
11511  if(canCreateNewBlock)
11512  {
11513  // Calculate optimal size for new block.
11514  VkDeviceSize newBlockSize = m_PreferredBlockSize;
11515  uint32_t newBlockSizeShift = 0;
11516  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
11517 
11518  if(!m_ExplicitBlockSize)
11519  {
11520  // Allocate 1/8, 1/4, 1/2 as first blocks.
11521  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
11522  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
11523  {
11524  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11525  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
11526  {
11527  newBlockSize = smallerNewBlockSize;
11528  ++newBlockSizeShift;
11529  }
11530  else
11531  {
11532  break;
11533  }
11534  }
11535  }
11536 
11537  size_t newBlockIndex = 0;
11538  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
11539  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
11540  if(!m_ExplicitBlockSize)
11541  {
11542  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
11543  {
11544  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11545  if(smallerNewBlockSize >= size)
11546  {
11547  newBlockSize = smallerNewBlockSize;
11548  ++newBlockSizeShift;
11549  res = CreateBlock(newBlockSize, &newBlockIndex);
11550  }
11551  else
11552  {
11553  break;
11554  }
11555  }
11556  }
11557 
11558  if(res == VK_SUCCESS)
11559  {
11560  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
11561  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
11562 
11563  res = AllocateFromBlock(
11564  pBlock,
11565  hCurrentPool,
11566  currentFrameIndex,
11567  size,
11568  alignment,
11569  allocFlagsCopy,
11570  createInfo.pUserData,
11571  suballocType,
11572  strategy,
11573  pAllocation);
11574  if(res == VK_SUCCESS)
11575  {
11576  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
11577  return VK_SUCCESS;
11578  }
11579  else
11580  {
11581  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
11582  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11583  }
11584  }
11585  }
11586  }
11587 
11588  // 3. Try to allocate from existing blocks with making other allocations lost.
11589  if(canMakeOtherLost)
11590  {
11591  uint32_t tryIndex = 0;
11592  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
11593  {
11594  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
11595  VmaAllocationRequest bestRequest = {};
11596  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
11597 
11598  // 1. Search existing allocations.
11600  {
11601  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11602  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11603  {
11604  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11605  VMA_ASSERT(pCurrBlock);
11606  VmaAllocationRequest currRequest = {};
11607  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11608  currentFrameIndex,
11609  m_FrameInUseCount,
11610  m_BufferImageGranularity,
11611  size,
11612  alignment,
11613  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
11614  suballocType,
11615  canMakeOtherLost,
11616  strategy,
11617  &currRequest))
11618  {
11619  const VkDeviceSize currRequestCost = currRequest.CalcCost();
11620  if(pBestRequestBlock == VMA_NULL ||
11621  currRequestCost < bestRequestCost)
11622  {
11623  pBestRequestBlock = pCurrBlock;
11624  bestRequest = currRequest;
11625  bestRequestCost = currRequestCost;
11626 
11627  if(bestRequestCost == 0)
11628  {
11629  break;
11630  }
11631  }
11632  }
11633  }
11634  }
11635  else // WORST_FIT, FIRST_FIT
11636  {
11637  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11638  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11639  {
11640  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11641  VMA_ASSERT(pCurrBlock);
11642  VmaAllocationRequest currRequest = {};
11643  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11644  currentFrameIndex,
11645  m_FrameInUseCount,
11646  m_BufferImageGranularity,
11647  size,
11648  alignment,
11649  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
11650  suballocType,
11651  canMakeOtherLost,
11652  strategy,
11653  &currRequest))
11654  {
11655  const VkDeviceSize currRequestCost = currRequest.CalcCost();
11656  if(pBestRequestBlock == VMA_NULL ||
11657  currRequestCost < bestRequestCost ||
11659  {
11660  pBestRequestBlock = pCurrBlock;
11661  bestRequest = currRequest;
11662  bestRequestCost = currRequestCost;
11663 
11664  if(bestRequestCost == 0 ||
11666  {
11667  break;
11668  }
11669  }
11670  }
11671  }
11672  }
11673 
11674  if(pBestRequestBlock != VMA_NULL)
11675  {
11676  if(mapped)
11677  {
11678  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
11679  if(res != VK_SUCCESS)
11680  {
11681  return res;
11682  }
11683  }
11684 
11685  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
11686  currentFrameIndex,
11687  m_FrameInUseCount,
11688  &bestRequest))
11689  {
11690  // We no longer have an empty Allocation.
11691  if(pBestRequestBlock->m_pMetadata->IsEmpty())
11692  {
11693  m_HasEmptyBlock = false;
11694  }
11695  // Allocate from this pBlock.
11696  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
11697  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
11698  (*pAllocation)->InitBlockAllocation(
11699  hCurrentPool,
11700  pBestRequestBlock,
11701  bestRequest.offset,
11702  alignment,
11703  size,
11704  suballocType,
11705  mapped,
11706  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
11707  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
11708  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
11709  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
11710  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
11711  {
11712  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
11713  }
11714  if(IsCorruptionDetectionEnabled())
11715  {
11716  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
11717  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
11718  }
11719  return VK_SUCCESS;
11720  }
11721  // else: Some allocations must have been touched while we are here. Next try.
11722  }
11723  else
11724  {
11725  // Could not find place in any of the blocks - break outer loop.
11726  break;
11727  }
11728  }
11729  /* Maximum number of tries exceeded - a very unlike event when many other
11730  threads are simultaneously touching allocations making it impossible to make
11731  lost at the same time as we try to allocate. */
11732  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
11733  {
11734  return VK_ERROR_TOO_MANY_OBJECTS;
11735  }
11736  }
11737 
11738  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11739 }
11740 
11741 void VmaBlockVector::Free(
11742  VmaAllocation hAllocation)
11743 {
11744  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
11745 
11746  // Scope for lock.
11747  {
11748  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11749 
11750  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
11751 
11752  if(IsCorruptionDetectionEnabled())
11753  {
11754  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
11755  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
11756  }
11757 
11758  if(hAllocation->IsPersistentMap())
11759  {
11760  pBlock->Unmap(m_hAllocator, 1);
11761  }
11762 
11763  pBlock->m_pMetadata->Free(hAllocation);
11764  VMA_HEAVY_ASSERT(pBlock->Validate());
11765 
11766  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
11767 
11768  // pBlock became empty after this deallocation.
11769  if(pBlock->m_pMetadata->IsEmpty())
11770  {
11771  // Already has empty Allocation. We don't want to have two, so delete this one.
11772  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
11773  {
11774  pBlockToDelete = pBlock;
11775  Remove(pBlock);
11776  }
11777  // We now have first empty block.
11778  else
11779  {
11780  m_HasEmptyBlock = true;
11781  }
11782  }
11783  // pBlock didn't become empty, but we have another empty block - find and free that one.
11784  // (This is optional, heuristics.)
11785  else if(m_HasEmptyBlock)
11786  {
11787  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
11788  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
11789  {
11790  pBlockToDelete = pLastBlock;
11791  m_Blocks.pop_back();
11792  m_HasEmptyBlock = false;
11793  }
11794  }
11795 
11796  IncrementallySortBlocks();
11797  }
11798 
11799  // Destruction of a free Allocation. Deferred until this point, outside of mutex
11800  // lock, for performance reason.
11801  if(pBlockToDelete != VMA_NULL)
11802  {
11803  VMA_DEBUG_LOG(" Deleted empty allocation");
11804  pBlockToDelete->Destroy(m_hAllocator);
11805  vma_delete(m_hAllocator, pBlockToDelete);
11806  }
11807 }
11808 
11809 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
11810 {
11811  VkDeviceSize result = 0;
11812  for(size_t i = m_Blocks.size(); i--; )
11813  {
11814  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
11815  if(result >= m_PreferredBlockSize)
11816  {
11817  break;
11818  }
11819  }
11820  return result;
11821 }
11822 
11823 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
11824 {
11825  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11826  {
11827  if(m_Blocks[blockIndex] == pBlock)
11828  {
11829  VmaVectorRemove(m_Blocks, blockIndex);
11830  return;
11831  }
11832  }
11833  VMA_ASSERT(0);
11834 }
11835 
11836 void VmaBlockVector::IncrementallySortBlocks()
11837 {
11838  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
11839  {
11840  // Bubble sort only until first swap.
11841  for(size_t i = 1; i < m_Blocks.size(); ++i)
11842  {
11843  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
11844  {
11845  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
11846  return;
11847  }
11848  }
11849  }
11850 }
11851 
11852 VkResult VmaBlockVector::AllocateFromBlock(
11853  VmaDeviceMemoryBlock* pBlock,
11854  VmaPool hCurrentPool,
11855  uint32_t currentFrameIndex,
11856  VkDeviceSize size,
11857  VkDeviceSize alignment,
11858  VmaAllocationCreateFlags allocFlags,
11859  void* pUserData,
11860  VmaSuballocationType suballocType,
11861  uint32_t strategy,
11862  VmaAllocation* pAllocation)
11863 {
11864  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
11865  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
11866  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
11867  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
11868 
11869  VmaAllocationRequest currRequest = {};
11870  if(pBlock->m_pMetadata->CreateAllocationRequest(
11871  currentFrameIndex,
11872  m_FrameInUseCount,
11873  m_BufferImageGranularity,
11874  size,
11875  alignment,
11876  isUpperAddress,
11877  suballocType,
11878  false, // canMakeOtherLost
11879  strategy,
11880  &currRequest))
11881  {
11882  // Allocate from pCurrBlock.
11883  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
11884 
11885  if(mapped)
11886  {
11887  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
11888  if(res != VK_SUCCESS)
11889  {
11890  return res;
11891  }
11892  }
11893 
11894  // We no longer have an empty Allocation.
11895  if(pBlock->m_pMetadata->IsEmpty())
11896  {
11897  m_HasEmptyBlock = false;
11898  }
11899 
11900  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
11901  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
11902  (*pAllocation)->InitBlockAllocation(
11903  hCurrentPool,
11904  pBlock,
11905  currRequest.offset,
11906  alignment,
11907  size,
11908  suballocType,
11909  mapped,
11910  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
11911  VMA_HEAVY_ASSERT(pBlock->Validate());
11912  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
11913  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
11914  {
11915  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
11916  }
11917  if(IsCorruptionDetectionEnabled())
11918  {
11919  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
11920  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
11921  }
11922  return VK_SUCCESS;
11923  }
11924  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11925 }
11926 
11927 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
11928 {
11929  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
11930  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
11931  allocInfo.allocationSize = blockSize;
11932  VkDeviceMemory mem = VK_NULL_HANDLE;
11933  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
11934  if(res < 0)
11935  {
11936  return res;
11937  }
11938 
11939  // New VkDeviceMemory successfully created.
11940 
11941  // Create new Allocation for it.
11942  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
11943  pBlock->Init(
11944  m_hAllocator,
11945  m_MemoryTypeIndex,
11946  mem,
11947  allocInfo.allocationSize,
11948  m_NextBlockId++,
11949  m_Algorithm);
11950 
11951  m_Blocks.push_back(pBlock);
11952  if(pNewBlockIndex != VMA_NULL)
11953  {
11954  *pNewBlockIndex = m_Blocks.size() - 1;
11955  }
11956 
11957  return VK_SUCCESS;
11958 }
11959 
11960 void VmaBlockVector::ApplyDefragmentationMovesCpu(
11961  class VmaBlockVectorDefragmentationContext* pDefragCtx,
11962  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
11963 {
11964  const size_t blockCount = m_Blocks.size();
11965  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
11966 
11967  enum BLOCK_FLAG
11968  {
11969  BLOCK_FLAG_USED = 0x00000001,
11970  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
11971  };
11972 
11973  struct BlockInfo
11974  {
11975  uint32_t flags;
11976  void* pMappedData;
11977  };
11978  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
11979  blockInfo(blockCount, VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
11980  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
11981 
11982  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
11983  const size_t moveCount = moves.size();
11984  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
11985  {
11986  const VmaDefragmentationMove& move = moves[moveIndex];
11987  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
11988  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
11989  }
11990 
11991  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
11992 
11993  // Go over all blocks. Get mapped pointer or map if necessary.
11994  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
11995  {
11996  BlockInfo& currBlockInfo = blockInfo[blockIndex];
11997  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
11998  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
11999  {
12000  currBlockInfo.pMappedData = pBlock->GetMappedData();
12001  // It is not originally mapped - map it.
12002  if(currBlockInfo.pMappedData == VMA_NULL)
12003  {
12004  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
12005  if(pDefragCtx->res == VK_SUCCESS)
12006  {
12007  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
12008  }
12009  }
12010  }
12011  }
12012 
12013  // Go over all moves. Do actual data transfer.
12014  if(pDefragCtx->res == VK_SUCCESS)
12015  {
12016  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
12017  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
12018 
12019  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12020  {
12021  const VmaDefragmentationMove& move = moves[moveIndex];
12022 
12023  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
12024  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
12025 
12026  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
12027 
12028  // Invalidate source.
12029  if(isNonCoherent)
12030  {
12031  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
12032  memRange.memory = pSrcBlock->GetDeviceMemory();
12033  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
12034  memRange.size = VMA_MIN(
12035  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
12036  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
12037  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12038  }
12039 
12040  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
12041  memmove(
12042  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
12043  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
12044  static_cast<size_t>(move.size));
12045 
12046  if(IsCorruptionDetectionEnabled())
12047  {
12048  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
12049  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
12050  }
12051 
12052  // Flush destination.
12053  if(isNonCoherent)
12054  {
12055  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
12056  memRange.memory = pDstBlock->GetDeviceMemory();
12057  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
12058  memRange.size = VMA_MIN(
12059  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
12060  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
12061  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12062  }
12063  }
12064  }
12065 
12066  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
12067  // Regardless of pCtx->res == VK_SUCCESS.
12068  for(size_t blockIndex = blockCount; blockIndex--; )
12069  {
12070  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
12071  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
12072  {
12073  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12074  pBlock->Unmap(m_hAllocator, 1);
12075  }
12076  }
12077 }
12078 
12079 void VmaBlockVector::ApplyDefragmentationMovesGpu(
12080  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12081  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12082  VkCommandBuffer commandBuffer)
12083 {
12084  const size_t blockCount = m_Blocks.size();
12085 
12086  pDefragCtx->blockContexts.resize(blockCount);
12087  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
12088 
12089  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12090  const size_t moveCount = moves.size();
12091  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12092  {
12093  const VmaDefragmentationMove& move = moves[moveIndex];
12094  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12095  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12096  }
12097 
12098  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12099 
12100  // Go over all blocks. Create and bind buffer for whole block if necessary.
12101  {
12102  VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
12103  bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
12104  VK_BUFFER_USAGE_TRANSFER_DST_BIT;
12105 
12106  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12107  {
12108  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
12109  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12110  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
12111  {
12112  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
12113  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
12114  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
12115  if(pDefragCtx->res == VK_SUCCESS)
12116  {
12117  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
12118  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
12119  }
12120  }
12121  }
12122  }
12123 
12124  // Go over all moves. Post data transfer commands to command buffer.
12125  if(pDefragCtx->res == VK_SUCCESS)
12126  {
12127  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
12128  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
12129 
12130  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12131  {
12132  const VmaDefragmentationMove& move = moves[moveIndex];
12133 
12134  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
12135  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
12136 
12137  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
12138 
12139  VkBufferCopy region = {
12140  move.srcOffset,
12141  move.dstOffset,
12142  move.size };
12143  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
12144  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
12145  }
12146  }
12147 
12148  // Save buffers to defrag context for later destruction.
12149  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
12150  {
12151  pDefragCtx->res = VK_NOT_READY;
12152  }
12153 }
12154 
12155 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
12156 {
12157  m_HasEmptyBlock = false;
12158  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12159  {
12160  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12161  if(pBlock->m_pMetadata->IsEmpty())
12162  {
12163  if(m_Blocks.size() > m_MinBlockCount)
12164  {
12165  if(pDefragmentationStats != VMA_NULL)
12166  {
12167  ++pDefragmentationStats->deviceMemoryBlocksFreed;
12168  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
12169  }
12170 
12171  VmaVectorRemove(m_Blocks, blockIndex);
12172  pBlock->Destroy(m_hAllocator);
12173  vma_delete(m_hAllocator, pBlock);
12174  }
12175  else
12176  {
12177  m_HasEmptyBlock = true;
12178  }
12179  }
12180  }
12181 }
12182 
12183 #if VMA_STATS_STRING_ENABLED
12184 
12185 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
12186 {
12187  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12188 
12189  json.BeginObject();
12190 
12191  if(m_IsCustomPool)
12192  {
12193  json.WriteString("MemoryTypeIndex");
12194  json.WriteNumber(m_MemoryTypeIndex);
12195 
12196  json.WriteString("BlockSize");
12197  json.WriteNumber(m_PreferredBlockSize);
12198 
12199  json.WriteString("BlockCount");
12200  json.BeginObject(true);
12201  if(m_MinBlockCount > 0)
12202  {
12203  json.WriteString("Min");
12204  json.WriteNumber((uint64_t)m_MinBlockCount);
12205  }
12206  if(m_MaxBlockCount < SIZE_MAX)
12207  {
12208  json.WriteString("Max");
12209  json.WriteNumber((uint64_t)m_MaxBlockCount);
12210  }
12211  json.WriteString("Cur");
12212  json.WriteNumber((uint64_t)m_Blocks.size());
12213  json.EndObject();
12214 
12215  if(m_FrameInUseCount > 0)
12216  {
12217  json.WriteString("FrameInUseCount");
12218  json.WriteNumber(m_FrameInUseCount);
12219  }
12220 
12221  if(m_Algorithm != 0)
12222  {
12223  json.WriteString("Algorithm");
12224  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
12225  }
12226  }
12227  else
12228  {
12229  json.WriteString("PreferredBlockSize");
12230  json.WriteNumber(m_PreferredBlockSize);
12231  }
12232 
12233  json.WriteString("Blocks");
12234  json.BeginObject();
12235  for(size_t i = 0; i < m_Blocks.size(); ++i)
12236  {
12237  json.BeginString();
12238  json.ContinueString(m_Blocks[i]->GetId());
12239  json.EndString();
12240 
12241  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
12242  }
12243  json.EndObject();
12244 
12245  json.EndObject();
12246 }
12247 
12248 #endif // #if VMA_STATS_STRING_ENABLED
12249 
12250 void VmaBlockVector::Defragment(
12251  class VmaBlockVectorDefragmentationContext* pCtx,
12252  VmaDefragmentationStats* pStats,
12253  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
12254  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
12255  VkCommandBuffer commandBuffer)
12256 {
12257  pCtx->res = VK_SUCCESS;
12258 
12259  const VkMemoryPropertyFlags memPropFlags =
12260  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
12261  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
12262  const bool isHostCoherent = (memPropFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0;
12263 
12264  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
12265  isHostVisible;
12266  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
12267  (VMA_DEBUG_DETECT_CORRUPTION == 0 || !(isHostVisible && isHostCoherent));
12268 
12269  // There are options to defragment this memory type.
12270  if(canDefragmentOnCpu || canDefragmentOnGpu)
12271  {
12272  bool defragmentOnGpu;
12273  // There is only one option to defragment this memory type.
12274  if(canDefragmentOnGpu != canDefragmentOnCpu)
12275  {
12276  defragmentOnGpu = canDefragmentOnGpu;
12277  }
12278  // Both options are available: Heuristics to choose the best one.
12279  else
12280  {
12281  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
12282  m_hAllocator->IsIntegratedGpu();
12283  }
12284 
12285  bool overlappingMoveSupported = !defragmentOnGpu;
12286 
12287  if(m_hAllocator->m_UseMutex)
12288  {
12289  m_Mutex.LockWrite();
12290  pCtx->mutexLocked = true;
12291  }
12292 
12293  pCtx->Begin(overlappingMoveSupported);
12294 
12295  // Defragment.
12296 
12297  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
12298  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
12299  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
12300  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
12301  pCtx->res = pCtx->GetAlgorithm()->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
12302 
12303  // Accumulate statistics.
12304  if(pStats != VMA_NULL)
12305  {
12306  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
12307  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
12308  pStats->bytesMoved += bytesMoved;
12309  pStats->allocationsMoved += allocationsMoved;
12310  VMA_ASSERT(bytesMoved <= maxBytesToMove);
12311  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
12312  if(defragmentOnGpu)
12313  {
12314  maxGpuBytesToMove -= bytesMoved;
12315  maxGpuAllocationsToMove -= allocationsMoved;
12316  }
12317  else
12318  {
12319  maxCpuBytesToMove -= bytesMoved;
12320  maxCpuAllocationsToMove -= allocationsMoved;
12321  }
12322  }
12323 
12324  if(pCtx->res >= VK_SUCCESS)
12325  {
12326  if(defragmentOnGpu)
12327  {
12328  ApplyDefragmentationMovesGpu(pCtx, moves, commandBuffer);
12329  }
12330  else
12331  {
12332  ApplyDefragmentationMovesCpu(pCtx, moves);
12333  }
12334  }
12335  }
12336 }
12337 
12338 void VmaBlockVector::DefragmentationEnd(
12339  class VmaBlockVectorDefragmentationContext* pCtx,
12340  VmaDefragmentationStats* pStats)
12341 {
12342  // Destroy buffers.
12343  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
12344  {
12345  VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];
12346  if(blockCtx.hBuffer)
12347  {
12348  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
12349  m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
12350  }
12351  }
12352 
12353  if(pCtx->res >= VK_SUCCESS)
12354  {
12355  FreeEmptyBlocks(pStats);
12356  }
12357 
12358  if(pCtx->mutexLocked)
12359  {
12360  VMA_ASSERT(m_hAllocator->m_UseMutex);
12361  m_Mutex.UnlockWrite();
12362  }
12363 }
12364 
12365 size_t VmaBlockVector::CalcAllocationCount() const
12366 {
12367  size_t result = 0;
12368  for(size_t i = 0; i < m_Blocks.size(); ++i)
12369  {
12370  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
12371  }
12372  return result;
12373 }
12374 
12375 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
12376 {
12377  if(m_BufferImageGranularity == 1)
12378  {
12379  return false;
12380  }
12381  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
12382  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
12383  {
12384  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
12385  VMA_ASSERT(m_Algorithm == 0);
12386  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
12387  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
12388  {
12389  return true;
12390  }
12391  }
12392  return false;
12393 }
12394 
12395 void VmaBlockVector::MakePoolAllocationsLost(
12396  uint32_t currentFrameIndex,
12397  size_t* pLostAllocationCount)
12398 {
12399  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12400  size_t lostAllocationCount = 0;
12401  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12402  {
12403  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12404  VMA_ASSERT(pBlock);
12405  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
12406  }
12407  if(pLostAllocationCount != VMA_NULL)
12408  {
12409  *pLostAllocationCount = lostAllocationCount;
12410  }
12411 }
12412 
12413 VkResult VmaBlockVector::CheckCorruption()
12414 {
12415  if(!IsCorruptionDetectionEnabled())
12416  {
12417  return VK_ERROR_FEATURE_NOT_PRESENT;
12418  }
12419 
12420  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12421  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12422  {
12423  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12424  VMA_ASSERT(pBlock);
12425  VkResult res = pBlock->CheckCorruption(m_hAllocator);
12426  if(res != VK_SUCCESS)
12427  {
12428  return res;
12429  }
12430  }
12431  return VK_SUCCESS;
12432 }
12433 
12434 void VmaBlockVector::AddStats(VmaStats* pStats)
12435 {
12436  const uint32_t memTypeIndex = m_MemoryTypeIndex;
12437  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
12438 
12439  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12440 
12441  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12442  {
12443  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12444  VMA_ASSERT(pBlock);
12445  VMA_HEAVY_ASSERT(pBlock->Validate());
12446  VmaStatInfo allocationStatInfo;
12447  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
12448  VmaAddStatInfo(pStats->total, allocationStatInfo);
12449  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
12450  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
12451  }
12452 }
12453 
12455 // VmaDefragmentationAlgorithm_Generic members definition
12456 
12457 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
12458  VmaAllocator hAllocator,
12459  VmaBlockVector* pBlockVector,
12460  uint32_t currentFrameIndex,
12461  bool overlappingMoveSupported) :
12462  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12463  m_AllAllocations(false),
12464  m_AllocationCount(0),
12465  m_BytesMoved(0),
12466  m_AllocationsMoved(0),
12467  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
12468 {
12469  // Create block info for each block.
12470  const size_t blockCount = m_pBlockVector->m_Blocks.size();
12471  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12472  {
12473  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
12474  pBlockInfo->m_OriginalBlockIndex = blockIndex;
12475  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
12476  m_Blocks.push_back(pBlockInfo);
12477  }
12478 
12479  // Sort them by m_pBlock pointer value.
12480  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
12481 }
12482 
12483 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
12484 {
12485  for(size_t i = m_Blocks.size(); i--; )
12486  {
12487  vma_delete(m_hAllocator, m_Blocks[i]);
12488  }
12489 }
12490 
12491 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
12492 {
12493  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
12494  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
12495  {
12496  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
12497  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
12498  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
12499  {
12500  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
12501  (*it)->m_Allocations.push_back(allocInfo);
12502  }
12503  else
12504  {
12505  VMA_ASSERT(0);
12506  }
12507 
12508  ++m_AllocationCount;
12509  }
12510 }
12511 
12512 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
12513  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12514  VkDeviceSize maxBytesToMove,
12515  uint32_t maxAllocationsToMove)
12516 {
12517  if(m_Blocks.empty())
12518  {
12519  return VK_SUCCESS;
12520  }
12521 
12522  // This is a choice based on research.
12523  // Option 1:
12524  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
12525  // Option 2:
12526  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
12527  // Option 3:
12528  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
12529 
12530  size_t srcBlockMinIndex = 0;
12531  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
12532  /*
12533  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
12534  {
12535  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
12536  if(blocksWithNonMovableCount > 0)
12537  {
12538  srcBlockMinIndex = blocksWithNonMovableCount - 1;
12539  }
12540  }
12541  */
12542 
12543  size_t srcBlockIndex = m_Blocks.size() - 1;
12544  size_t srcAllocIndex = SIZE_MAX;
12545  for(;;)
12546  {
12547  // 1. Find next allocation to move.
12548  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
12549  // 1.2. Then start from last to first m_Allocations.
12550  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
12551  {
12552  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
12553  {
12554  // Finished: no more allocations to process.
12555  if(srcBlockIndex == srcBlockMinIndex)
12556  {
12557  return VK_SUCCESS;
12558  }
12559  else
12560  {
12561  --srcBlockIndex;
12562  srcAllocIndex = SIZE_MAX;
12563  }
12564  }
12565  else
12566  {
12567  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
12568  }
12569  }
12570 
12571  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
12572  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
12573 
12574  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
12575  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
12576  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
12577  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
12578 
12579  // 2. Try to find new place for this allocation in preceding or current block.
12580  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
12581  {
12582  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
12583  VmaAllocationRequest dstAllocRequest;
12584  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
12585  m_CurrentFrameIndex,
12586  m_pBlockVector->GetFrameInUseCount(),
12587  m_pBlockVector->GetBufferImageGranularity(),
12588  size,
12589  alignment,
12590  false, // upperAddress
12591  suballocType,
12592  false, // canMakeOtherLost
12593  strategy,
12594  &dstAllocRequest) &&
12595  MoveMakesSense(
12596  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
12597  {
12598  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
12599 
12600  // Reached limit on number of allocations or bytes to move.
12601  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
12602  (m_BytesMoved + size > maxBytesToMove))
12603  {
12604  return VK_SUCCESS;
12605  }
12606 
12607  VmaDefragmentationMove move;
12608  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
12609  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
12610  move.srcOffset = srcOffset;
12611  move.dstOffset = dstAllocRequest.offset;
12612  move.size = size;
12613  moves.push_back(move);
12614 
12615  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
12616  dstAllocRequest,
12617  suballocType,
12618  size,
12619  false, // upperAddress
12620  allocInfo.m_hAllocation);
12621  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
12622 
12623  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
12624 
12625  if(allocInfo.m_pChanged != VMA_NULL)
12626  {
12627  *allocInfo.m_pChanged = VK_TRUE;
12628  }
12629 
12630  ++m_AllocationsMoved;
12631  m_BytesMoved += size;
12632 
12633  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
12634 
12635  break;
12636  }
12637  }
12638 
12639  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
12640 
12641  if(srcAllocIndex > 0)
12642  {
12643  --srcAllocIndex;
12644  }
12645  else
12646  {
12647  if(srcBlockIndex > 0)
12648  {
12649  --srcBlockIndex;
12650  srcAllocIndex = SIZE_MAX;
12651  }
12652  else
12653  {
12654  return VK_SUCCESS;
12655  }
12656  }
12657  }
12658 }
12659 
12660 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
12661 {
12662  size_t result = 0;
12663  for(size_t i = 0; i < m_Blocks.size(); ++i)
12664  {
12665  if(m_Blocks[i]->m_HasNonMovableAllocations)
12666  {
12667  ++result;
12668  }
12669  }
12670  return result;
12671 }
12672 
12673 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
12674  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12675  VkDeviceSize maxBytesToMove,
12676  uint32_t maxAllocationsToMove)
12677 {
12678  if(!m_AllAllocations && m_AllocationCount == 0)
12679  {
12680  return VK_SUCCESS;
12681  }
12682 
12683  const size_t blockCount = m_Blocks.size();
12684  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12685  {
12686  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
12687 
12688  if(m_AllAllocations)
12689  {
12690  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
12691  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
12692  it != pMetadata->m_Suballocations.end();
12693  ++it)
12694  {
12695  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
12696  {
12697  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
12698  pBlockInfo->m_Allocations.push_back(allocInfo);
12699  }
12700  }
12701  }
12702 
12703  pBlockInfo->CalcHasNonMovableAllocations();
12704 
12705  // This is a choice based on research.
12706  // Option 1:
12707  pBlockInfo->SortAllocationsByOffsetDescending();
12708  // Option 2:
12709  //pBlockInfo->SortAllocationsBySizeDescending();
12710  }
12711 
12712  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
12713  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
12714 
12715  // This is a choice based on research.
12716  const uint32_t roundCount = 2;
12717 
12718  // Execute defragmentation rounds (the main part).
12719  VkResult result = VK_SUCCESS;
12720  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
12721  {
12722  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove);
12723  }
12724 
12725  return result;
12726 }
12727 
12728 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
12729  size_t dstBlockIndex, VkDeviceSize dstOffset,
12730  size_t srcBlockIndex, VkDeviceSize srcOffset)
12731 {
12732  if(dstBlockIndex < srcBlockIndex)
12733  {
12734  return true;
12735  }
12736  if(dstBlockIndex > srcBlockIndex)
12737  {
12738  return false;
12739  }
12740  if(dstOffset < srcOffset)
12741  {
12742  return true;
12743  }
12744  return false;
12745 }
12746 
12748 // VmaDefragmentationAlgorithm_Fast
12749 
12750 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
12751  VmaAllocator hAllocator,
12752  VmaBlockVector* pBlockVector,
12753  uint32_t currentFrameIndex,
12754  bool overlappingMoveSupported) :
12755  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12756  m_OverlappingMoveSupported(overlappingMoveSupported),
12757  m_AllocationCount(0),
12758  m_AllAllocations(false),
12759  m_BytesMoved(0),
12760  m_AllocationsMoved(0),
12761  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
12762 {
12763  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
12764 
12765 }
12766 
12767 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
12768 {
12769 }
12770 
12771 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
12772  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12773  VkDeviceSize maxBytesToMove,
12774  uint32_t maxAllocationsToMove)
12775 {
12776  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
12777 
12778  const size_t blockCount = m_pBlockVector->GetBlockCount();
12779  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
12780  {
12781  return VK_SUCCESS;
12782  }
12783 
12784  PreprocessMetadata();
12785 
12786  // Sort blocks in order from most destination.
12787 
12788  m_BlockInfos.resize(blockCount);
12789  for(size_t i = 0; i < blockCount; ++i)
12790  {
12791  m_BlockInfos[i].origBlockIndex = i;
12792  }
12793 
12794  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
12795  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
12796  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
12797  });
12798 
12799  // THE MAIN ALGORITHM
12800 
12801  FreeSpaceDatabase freeSpaceDb;
12802 
12803  size_t dstBlockInfoIndex = 0;
12804  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
12805  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
12806  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
12807  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
12808  VkDeviceSize dstOffset = 0;
12809 
12810  bool end = false;
12811  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
12812  {
12813  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
12814  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
12815  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
12816  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
12817  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
12818  {
12819  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
12820  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
12821  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
12822  if(m_AllocationsMoved == maxAllocationsToMove ||
12823  m_BytesMoved + srcAllocSize > maxBytesToMove)
12824  {
12825  end = true;
12826  break;
12827  }
12828  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
12829 
12830  // Try to place it in one of free spaces from the database.
12831  size_t freeSpaceInfoIndex;
12832  VkDeviceSize dstAllocOffset;
12833  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
12834  freeSpaceInfoIndex, dstAllocOffset))
12835  {
12836  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
12837  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
12838  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
12839  VkDeviceSize freeSpaceBlockSize = pFreeSpaceMetadata->GetSize();
12840 
12841  // Same block
12842  if(freeSpaceInfoIndex == srcBlockInfoIndex)
12843  {
12844  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
12845 
12846  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
12847 
12848  VmaSuballocation suballoc = *srcSuballocIt;
12849  suballoc.offset = dstAllocOffset;
12850  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
12851  m_BytesMoved += srcAllocSize;
12852  ++m_AllocationsMoved;
12853 
12854  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
12855  ++nextSuballocIt;
12856  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
12857  srcSuballocIt = nextSuballocIt;
12858 
12859  InsertSuballoc(pFreeSpaceMetadata, suballoc);
12860 
12861  VmaDefragmentationMove move = {
12862  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
12863  srcAllocOffset, dstAllocOffset,
12864  srcAllocSize };
12865  moves.push_back(move);
12866  }
12867  // Different block
12868  else
12869  {
12870  // MOVE OPTION 2: Move the allocation to a different block.
12871 
12872  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
12873 
12874  VmaSuballocation suballoc = *srcSuballocIt;
12875  suballoc.offset = dstAllocOffset;
12876  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
12877  m_BytesMoved += srcAllocSize;
12878  ++m_AllocationsMoved;
12879 
12880  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
12881  ++nextSuballocIt;
12882  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
12883  srcSuballocIt = nextSuballocIt;
12884 
12885  InsertSuballoc(pFreeSpaceMetadata, suballoc);
12886 
12887  VmaDefragmentationMove move = {
12888  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
12889  srcAllocOffset, dstAllocOffset,
12890  srcAllocSize };
12891  moves.push_back(move);
12892  }
12893  }
12894  else
12895  {
12896  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
12897 
12898  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
12899  while(dstBlockInfoIndex < srcBlockInfoIndex &&
12900  dstAllocOffset + srcAllocSize > dstBlockSize)
12901  {
12902  // But before that, register remaining free space at the end of dst block.
12903  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
12904 
12905  ++dstBlockInfoIndex;
12906  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
12907  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
12908  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
12909  dstBlockSize = pDstMetadata->GetSize();
12910  dstOffset = 0;
12911  dstAllocOffset = 0;
12912  }
12913 
12914  // Same block
12915  if(dstBlockInfoIndex == srcBlockInfoIndex)
12916  {
12917  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
12918 
12919  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
12920 
12921  bool skipOver = overlap;
12922  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
12923  {
12924  // If destination and source place overlap, skip if it would move it
12925  // by only < 1/64 of its size.
12926  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
12927  }
12928 
12929  if(skipOver)
12930  {
12931  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
12932 
12933  dstOffset = srcAllocOffset + srcAllocSize;
12934  ++srcSuballocIt;
12935  }
12936  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
12937  else
12938  {
12939  srcSuballocIt->offset = dstAllocOffset;
12940  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
12941  dstOffset = dstAllocOffset + srcAllocSize;
12942  m_BytesMoved += srcAllocSize;
12943  ++m_AllocationsMoved;
12944  ++srcSuballocIt;
12945  VmaDefragmentationMove move = {
12946  srcOrigBlockIndex, dstOrigBlockIndex,
12947  srcAllocOffset, dstAllocOffset,
12948  srcAllocSize };
12949  moves.push_back(move);
12950  }
12951  }
12952  // Different block
12953  else
12954  {
12955  // MOVE OPTION 2: Move the allocation to a different block.
12956 
12957  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
12958  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
12959 
12960  VmaSuballocation suballoc = *srcSuballocIt;
12961  suballoc.offset = dstAllocOffset;
12962  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
12963  dstOffset = dstAllocOffset + srcAllocSize;
12964  m_BytesMoved += srcAllocSize;
12965  ++m_AllocationsMoved;
12966 
12967  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
12968  ++nextSuballocIt;
12969  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
12970  srcSuballocIt = nextSuballocIt;
12971 
12972  pDstMetadata->m_Suballocations.push_back(suballoc);
12973 
12974  VmaDefragmentationMove move = {
12975  srcOrigBlockIndex, dstOrigBlockIndex,
12976  srcAllocOffset, dstAllocOffset,
12977  srcAllocSize };
12978  moves.push_back(move);
12979  }
12980  }
12981  }
12982  }
12983 
12984  m_BlockInfos.clear();
12985 
12986  PostprocessMetadata();
12987 
12988  return VK_SUCCESS;
12989 }
12990 
12991 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
12992 {
12993  const size_t blockCount = m_pBlockVector->GetBlockCount();
12994  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12995  {
12996  VmaBlockMetadata_Generic* const pMetadata =
12997  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
12998  pMetadata->m_FreeCount = 0;
12999  pMetadata->m_SumFreeSize = pMetadata->GetSize();
13000  pMetadata->m_FreeSuballocationsBySize.clear();
13001  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13002  it != pMetadata->m_Suballocations.end(); )
13003  {
13004  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
13005  {
13006  VmaSuballocationList::iterator nextIt = it;
13007  ++nextIt;
13008  pMetadata->m_Suballocations.erase(it);
13009  it = nextIt;
13010  }
13011  else
13012  {
13013  ++it;
13014  }
13015  }
13016  }
13017 }
13018 
13019 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
13020 {
13021  const size_t blockCount = m_pBlockVector->GetBlockCount();
13022  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13023  {
13024  VmaBlockMetadata_Generic* const pMetadata =
13025  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13026  const VkDeviceSize blockSize = pMetadata->GetSize();
13027 
13028  // No allocations in this block - entire area is free.
13029  if(pMetadata->m_Suballocations.empty())
13030  {
13031  pMetadata->m_FreeCount = 1;
13032  //pMetadata->m_SumFreeSize is already set to blockSize.
13033  VmaSuballocation suballoc = {
13034  0, // offset
13035  blockSize, // size
13036  VMA_NULL, // hAllocation
13037  VMA_SUBALLOCATION_TYPE_FREE };
13038  pMetadata->m_Suballocations.push_back(suballoc);
13039  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
13040  }
13041  // There are some allocations in this block.
13042  else
13043  {
13044  VkDeviceSize offset = 0;
13045  VmaSuballocationList::iterator it;
13046  for(it = pMetadata->m_Suballocations.begin();
13047  it != pMetadata->m_Suballocations.end();
13048  ++it)
13049  {
13050  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
13051  VMA_ASSERT(it->offset >= offset);
13052 
13053  // Need to insert preceding free space.
13054  if(it->offset > offset)
13055  {
13056  ++pMetadata->m_FreeCount;
13057  const VkDeviceSize freeSize = it->offset - offset;
13058  VmaSuballocation suballoc = {
13059  offset, // offset
13060  freeSize, // size
13061  VMA_NULL, // hAllocation
13062  VMA_SUBALLOCATION_TYPE_FREE };
13063  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13064  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13065  {
13066  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
13067  }
13068  }
13069 
13070  pMetadata->m_SumFreeSize -= it->size;
13071  offset = it->offset + it->size;
13072  }
13073 
13074  // Need to insert trailing free space.
13075  if(offset < blockSize)
13076  {
13077  ++pMetadata->m_FreeCount;
13078  const VkDeviceSize freeSize = blockSize - offset;
13079  VmaSuballocation suballoc = {
13080  offset, // offset
13081  freeSize, // size
13082  VMA_NULL, // hAllocation
13083  VMA_SUBALLOCATION_TYPE_FREE };
13084  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
13085  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13086  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13087  {
13088  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
13089  }
13090  }
13091 
13092  VMA_SORT(
13093  pMetadata->m_FreeSuballocationsBySize.begin(),
13094  pMetadata->m_FreeSuballocationsBySize.end(),
13095  VmaSuballocationItemSizeLess());
13096  }
13097 
13098  VMA_HEAVY_ASSERT(pMetadata->Validate());
13099  }
13100 }
13101 
13102 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
13103 {
13104  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
13105  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13106  while(it != pMetadata->m_Suballocations.end())
13107  {
13108  if(it->offset < suballoc.offset)
13109  {
13110  ++it;
13111  }
13112  }
13113  pMetadata->m_Suballocations.insert(it, suballoc);
13114 }
13115 
13117 // VmaBlockVectorDefragmentationContext
13118 
13119 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
13120  VmaAllocator hAllocator,
13121  VmaPool hCustomPool,
13122  VmaBlockVector* pBlockVector,
13123  uint32_t currFrameIndex,
13124  uint32_t algorithmFlags) :
13125  res(VK_SUCCESS),
13126  mutexLocked(false),
13127  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
13128  m_hAllocator(hAllocator),
13129  m_hCustomPool(hCustomPool),
13130  m_pBlockVector(pBlockVector),
13131  m_CurrFrameIndex(currFrameIndex),
13132  m_AlgorithmFlags(algorithmFlags),
13133  m_pAlgorithm(VMA_NULL),
13134  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
13135  m_AllAllocations(false)
13136 {
13137 }
13138 
13139 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
13140 {
13141  vma_delete(m_hAllocator, m_pAlgorithm);
13142 }
13143 
13144 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
13145 {
13146  AllocInfo info = { hAlloc, pChanged };
13147  m_Allocations.push_back(info);
13148 }
13149 
13150 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported)
13151 {
13152  const bool allAllocations = m_AllAllocations ||
13153  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
13154 
13155  /********************************
13156  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
13157  ********************************/
13158 
13159  /*
13160  Fast algorithm is supported only when certain criteria are met:
13161  - VMA_DEBUG_MARGIN is 0.
13162  - All allocations in this block vector are moveable.
13163  - There is no possibility of image/buffer granularity conflict.
13164  */
13165  if(VMA_DEBUG_MARGIN == 0 &&
13166  allAllocations &&
13167  !m_pBlockVector->IsBufferImageGranularityConflictPossible())
13168  {
13169  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
13170  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13171  }
13172  else
13173  {
13174  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
13175  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13176  }
13177 
13178  if(allAllocations)
13179  {
13180  m_pAlgorithm->AddAll();
13181  }
13182  else
13183  {
13184  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
13185  {
13186  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
13187  }
13188  }
13189 }
13190 
13192 // VmaDefragmentationContext
13193 
13194 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
13195  VmaAllocator hAllocator,
13196  uint32_t currFrameIndex,
13197  uint32_t flags,
13198  VmaDefragmentationStats* pStats) :
13199  m_hAllocator(hAllocator),
13200  m_CurrFrameIndex(currFrameIndex),
13201  m_Flags(flags),
13202  m_pStats(pStats),
13203  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
13204 {
13205  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
13206 }
13207 
13208 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
13209 {
13210  for(size_t i = m_CustomPoolContexts.size(); i--; )
13211  {
13212  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
13213  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13214  vma_delete(m_hAllocator, pBlockVectorCtx);
13215  }
13216  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
13217  {
13218  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
13219  if(pBlockVectorCtx)
13220  {
13221  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13222  vma_delete(m_hAllocator, pBlockVectorCtx);
13223  }
13224  }
13225 }
13226 
13227 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
13228 {
13229  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
13230  {
13231  VmaPool pool = pPools[poolIndex];
13232  VMA_ASSERT(pool);
13233  // Pools with algorithm other than default are not defragmented.
13234  if(pool->m_BlockVector.GetAlgorithm() == 0)
13235  {
13236  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13237 
13238  for(size_t i = m_CustomPoolContexts.size(); i--; )
13239  {
13240  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
13241  {
13242  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13243  break;
13244  }
13245  }
13246 
13247  if(!pBlockVectorDefragCtx)
13248  {
13249  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13250  m_hAllocator,
13251  pool,
13252  &pool->m_BlockVector,
13253  m_CurrFrameIndex,
13254  m_Flags);
13255  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13256  }
13257 
13258  pBlockVectorDefragCtx->AddAll();
13259  }
13260  }
13261 }
13262 
13263 void VmaDefragmentationContext_T::AddAllocations(
13264  uint32_t allocationCount,
13265  VmaAllocation* pAllocations,
13266  VkBool32* pAllocationsChanged)
13267 {
13268  // Dispatch pAllocations among defragmentators. Create them when necessary.
13269  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
13270  {
13271  const VmaAllocation hAlloc = pAllocations[allocIndex];
13272  VMA_ASSERT(hAlloc);
13273  // DedicatedAlloc cannot be defragmented.
13274  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
13275  // Lost allocation cannot be defragmented.
13276  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
13277  {
13278  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13279 
13280  const VmaPool hAllocPool = hAlloc->GetPool();
13281  // This allocation belongs to custom pool.
13282  if(hAllocPool != VK_NULL_HANDLE)
13283  {
13284  // Pools with algorithm other than default are not defragmented.
13285  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
13286  {
13287  for(size_t i = m_CustomPoolContexts.size(); i--; )
13288  {
13289  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
13290  {
13291  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13292  break;
13293  }
13294  }
13295  if(!pBlockVectorDefragCtx)
13296  {
13297  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13298  m_hAllocator,
13299  hAllocPool,
13300  &hAllocPool->m_BlockVector,
13301  m_CurrFrameIndex,
13302  m_Flags);
13303  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13304  }
13305  }
13306  }
13307  // This allocation belongs to default pool.
13308  else
13309  {
13310  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
13311  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
13312  if(!pBlockVectorDefragCtx)
13313  {
13314  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13315  m_hAllocator,
13316  VMA_NULL, // hCustomPool
13317  m_hAllocator->m_pBlockVectors[memTypeIndex],
13318  m_CurrFrameIndex,
13319  m_Flags);
13320  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
13321  }
13322  }
13323 
13324  if(pBlockVectorDefragCtx)
13325  {
13326  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
13327  &pAllocationsChanged[allocIndex] : VMA_NULL;
13328  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
13329  }
13330  }
13331  }
13332 }
13333 
13334 VkResult VmaDefragmentationContext_T::Defragment(
13335  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
13336  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
13337  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats)
13338 {
13339  if(pStats)
13340  {
13341  memset(pStats, 0, sizeof(VmaDefragmentationStats));
13342  }
13343 
13344  if(commandBuffer == VK_NULL_HANDLE)
13345  {
13346  maxGpuBytesToMove = 0;
13347  maxGpuAllocationsToMove = 0;
13348  }
13349 
13350  VkResult res = VK_SUCCESS;
13351 
13352  // Process default pools.
13353  for(uint32_t memTypeIndex = 0;
13354  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
13355  ++memTypeIndex)
13356  {
13357  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
13358  if(pBlockVectorCtx)
13359  {
13360  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
13361  pBlockVectorCtx->GetBlockVector()->Defragment(
13362  pBlockVectorCtx,
13363  pStats,
13364  maxCpuBytesToMove, maxCpuAllocationsToMove,
13365  maxGpuBytesToMove, maxGpuAllocationsToMove,
13366  commandBuffer);
13367  if(pBlockVectorCtx->res != VK_SUCCESS)
13368  {
13369  res = pBlockVectorCtx->res;
13370  }
13371  }
13372  }
13373 
13374  // Process custom pools.
13375  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
13376  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
13377  ++customCtxIndex)
13378  {
13379  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
13380  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
13381  pBlockVectorCtx->GetBlockVector()->Defragment(
13382  pBlockVectorCtx,
13383  pStats,
13384  maxCpuBytesToMove, maxCpuAllocationsToMove,
13385  maxGpuBytesToMove, maxGpuAllocationsToMove,
13386  commandBuffer);
13387  if(pBlockVectorCtx->res != VK_SUCCESS)
13388  {
13389  res = pBlockVectorCtx->res;
13390  }
13391  }
13392 
13393  return res;
13394 }
13395 
13397 // VmaRecorder
13398 
13399 #if VMA_RECORDING_ENABLED
13400 
13401 VmaRecorder::VmaRecorder() :
13402  m_UseMutex(true),
13403  m_Flags(0),
13404  m_File(VMA_NULL),
13405  m_Freq(INT64_MAX),
13406  m_StartCounter(INT64_MAX)
13407 {
13408 }
13409 
13410 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
13411 {
13412  m_UseMutex = useMutex;
13413  m_Flags = settings.flags;
13414 
13415  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
13416  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
13417 
13418  // Open file for writing.
13419  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
13420  if(err != 0)
13421  {
13422  return VK_ERROR_INITIALIZATION_FAILED;
13423  }
13424 
13425  // Write header.
13426  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
13427  fprintf(m_File, "%s\n", "1,5");
13428 
13429  return VK_SUCCESS;
13430 }
13431 
13432 VmaRecorder::~VmaRecorder()
13433 {
13434  if(m_File != VMA_NULL)
13435  {
13436  fclose(m_File);
13437  }
13438 }
13439 
13440 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
13441 {
13442  CallParams callParams;
13443  GetBasicParams(callParams);
13444 
13445  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13446  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
13447  Flush();
13448 }
13449 
13450 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
13451 {
13452  CallParams callParams;
13453  GetBasicParams(callParams);
13454 
13455  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13456  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
13457  Flush();
13458 }
13459 
13460 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
13461 {
13462  CallParams callParams;
13463  GetBasicParams(callParams);
13464 
13465  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13466  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
13467  createInfo.memoryTypeIndex,
13468  createInfo.flags,
13469  createInfo.blockSize,
13470  (uint64_t)createInfo.minBlockCount,
13471  (uint64_t)createInfo.maxBlockCount,
13472  createInfo.frameInUseCount,
13473  pool);
13474  Flush();
13475 }
13476 
13477 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
13478 {
13479  CallParams callParams;
13480  GetBasicParams(callParams);
13481 
13482  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13483  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
13484  pool);
13485  Flush();
13486 }
13487 
13488 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
13489  const VkMemoryRequirements& vkMemReq,
13490  const VmaAllocationCreateInfo& createInfo,
13491  VmaAllocation allocation)
13492 {
13493  CallParams callParams;
13494  GetBasicParams(callParams);
13495 
13496  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13497  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13498  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13499  vkMemReq.size,
13500  vkMemReq.alignment,
13501  vkMemReq.memoryTypeBits,
13502  createInfo.flags,
13503  createInfo.usage,
13504  createInfo.requiredFlags,
13505  createInfo.preferredFlags,
13506  createInfo.memoryTypeBits,
13507  createInfo.pool,
13508  allocation,
13509  userDataStr.GetString());
13510  Flush();
13511 }
13512 
13513 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
13514  const VkMemoryRequirements& vkMemReq,
13515  const VmaAllocationCreateInfo& createInfo,
13516  uint64_t allocationCount,
13517  const VmaAllocation* pAllocations)
13518 {
13519  CallParams callParams;
13520  GetBasicParams(callParams);
13521 
13522  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13523  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13524  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
13525  vkMemReq.size,
13526  vkMemReq.alignment,
13527  vkMemReq.memoryTypeBits,
13528  createInfo.flags,
13529  createInfo.usage,
13530  createInfo.requiredFlags,
13531  createInfo.preferredFlags,
13532  createInfo.memoryTypeBits,
13533  createInfo.pool);
13534  PrintPointerList(allocationCount, pAllocations);
13535  fprintf(m_File, ",%s\n", userDataStr.GetString());
13536  Flush();
13537 }
13538 
13539 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
13540  const VkMemoryRequirements& vkMemReq,
13541  bool requiresDedicatedAllocation,
13542  bool prefersDedicatedAllocation,
13543  const VmaAllocationCreateInfo& createInfo,
13544  VmaAllocation allocation)
13545 {
13546  CallParams callParams;
13547  GetBasicParams(callParams);
13548 
13549  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13550  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13551  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13552  vkMemReq.size,
13553  vkMemReq.alignment,
13554  vkMemReq.memoryTypeBits,
13555  requiresDedicatedAllocation ? 1 : 0,
13556  prefersDedicatedAllocation ? 1 : 0,
13557  createInfo.flags,
13558  createInfo.usage,
13559  createInfo.requiredFlags,
13560  createInfo.preferredFlags,
13561  createInfo.memoryTypeBits,
13562  createInfo.pool,
13563  allocation,
13564  userDataStr.GetString());
13565  Flush();
13566 }
13567 
13568 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
13569  const VkMemoryRequirements& vkMemReq,
13570  bool requiresDedicatedAllocation,
13571  bool prefersDedicatedAllocation,
13572  const VmaAllocationCreateInfo& createInfo,
13573  VmaAllocation allocation)
13574 {
13575  CallParams callParams;
13576  GetBasicParams(callParams);
13577 
13578  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13579  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13580  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13581  vkMemReq.size,
13582  vkMemReq.alignment,
13583  vkMemReq.memoryTypeBits,
13584  requiresDedicatedAllocation ? 1 : 0,
13585  prefersDedicatedAllocation ? 1 : 0,
13586  createInfo.flags,
13587  createInfo.usage,
13588  createInfo.requiredFlags,
13589  createInfo.preferredFlags,
13590  createInfo.memoryTypeBits,
13591  createInfo.pool,
13592  allocation,
13593  userDataStr.GetString());
13594  Flush();
13595 }
13596 
13597 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
13598  VmaAllocation allocation)
13599 {
13600  CallParams callParams;
13601  GetBasicParams(callParams);
13602 
13603  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13604  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13605  allocation);
13606  Flush();
13607 }
13608 
13609 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
13610  uint64_t allocationCount,
13611  const VmaAllocation* pAllocations)
13612 {
13613  CallParams callParams;
13614  GetBasicParams(callParams);
13615 
13616  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13617  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
13618  PrintPointerList(allocationCount, pAllocations);
13619  fprintf(m_File, "\n");
13620  Flush();
13621 }
13622 
13623 void VmaRecorder::RecordResizeAllocation(
13624  uint32_t frameIndex,
13625  VmaAllocation allocation,
13626  VkDeviceSize newSize)
13627 {
13628  CallParams callParams;
13629  GetBasicParams(callParams);
13630 
13631  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13632  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
13633  allocation, newSize);
13634  Flush();
13635 }
13636 
13637 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
13638  VmaAllocation allocation,
13639  const void* pUserData)
13640 {
13641  CallParams callParams;
13642  GetBasicParams(callParams);
13643 
13644  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13645  UserDataString userDataStr(
13646  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
13647  pUserData);
13648  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13649  allocation,
13650  userDataStr.GetString());
13651  Flush();
13652 }
13653 
13654 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
13655  VmaAllocation allocation)
13656 {
13657  CallParams callParams;
13658  GetBasicParams(callParams);
13659 
13660  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13661  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13662  allocation);
13663  Flush();
13664 }
13665 
13666 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
13667  VmaAllocation allocation)
13668 {
13669  CallParams callParams;
13670  GetBasicParams(callParams);
13671 
13672  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13673  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13674  allocation);
13675  Flush();
13676 }
13677 
13678 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
13679  VmaAllocation allocation)
13680 {
13681  CallParams callParams;
13682  GetBasicParams(callParams);
13683 
13684  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13685  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13686  allocation);
13687  Flush();
13688 }
13689 
13690 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
13691  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13692 {
13693  CallParams callParams;
13694  GetBasicParams(callParams);
13695 
13696  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13697  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13698  allocation,
13699  offset,
13700  size);
13701  Flush();
13702 }
13703 
13704 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
13705  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13706 {
13707  CallParams callParams;
13708  GetBasicParams(callParams);
13709 
13710  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13711  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13712  allocation,
13713  offset,
13714  size);
13715  Flush();
13716 }
13717 
13718 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
13719  const VkBufferCreateInfo& bufCreateInfo,
13720  const VmaAllocationCreateInfo& allocCreateInfo,
13721  VmaAllocation allocation)
13722 {
13723  CallParams callParams;
13724  GetBasicParams(callParams);
13725 
13726  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13727  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
13728  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13729  bufCreateInfo.flags,
13730  bufCreateInfo.size,
13731  bufCreateInfo.usage,
13732  bufCreateInfo.sharingMode,
13733  allocCreateInfo.flags,
13734  allocCreateInfo.usage,
13735  allocCreateInfo.requiredFlags,
13736  allocCreateInfo.preferredFlags,
13737  allocCreateInfo.memoryTypeBits,
13738  allocCreateInfo.pool,
13739  allocation,
13740  userDataStr.GetString());
13741  Flush();
13742 }
13743 
13744 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
13745  const VkImageCreateInfo& imageCreateInfo,
13746  const VmaAllocationCreateInfo& allocCreateInfo,
13747  VmaAllocation allocation)
13748 {
13749  CallParams callParams;
13750  GetBasicParams(callParams);
13751 
13752  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13753  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
13754  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13755  imageCreateInfo.flags,
13756  imageCreateInfo.imageType,
13757  imageCreateInfo.format,
13758  imageCreateInfo.extent.width,
13759  imageCreateInfo.extent.height,
13760  imageCreateInfo.extent.depth,
13761  imageCreateInfo.mipLevels,
13762  imageCreateInfo.arrayLayers,
13763  imageCreateInfo.samples,
13764  imageCreateInfo.tiling,
13765  imageCreateInfo.usage,
13766  imageCreateInfo.sharingMode,
13767  imageCreateInfo.initialLayout,
13768  allocCreateInfo.flags,
13769  allocCreateInfo.usage,
13770  allocCreateInfo.requiredFlags,
13771  allocCreateInfo.preferredFlags,
13772  allocCreateInfo.memoryTypeBits,
13773  allocCreateInfo.pool,
13774  allocation,
13775  userDataStr.GetString());
13776  Flush();
13777 }
13778 
13779 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
13780  VmaAllocation allocation)
13781 {
13782  CallParams callParams;
13783  GetBasicParams(callParams);
13784 
13785  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13786  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
13787  allocation);
13788  Flush();
13789 }
13790 
13791 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
13792  VmaAllocation allocation)
13793 {
13794  CallParams callParams;
13795  GetBasicParams(callParams);
13796 
13797  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13798  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
13799  allocation);
13800  Flush();
13801 }
13802 
13803 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
13804  VmaAllocation allocation)
13805 {
13806  CallParams callParams;
13807  GetBasicParams(callParams);
13808 
13809  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13810  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13811  allocation);
13812  Flush();
13813 }
13814 
13815 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
13816  VmaAllocation allocation)
13817 {
13818  CallParams callParams;
13819  GetBasicParams(callParams);
13820 
13821  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13822  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
13823  allocation);
13824  Flush();
13825 }
13826 
13827 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
13828  VmaPool pool)
13829 {
13830  CallParams callParams;
13831  GetBasicParams(callParams);
13832 
13833  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13834  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
13835  pool);
13836  Flush();
13837 }
13838 
13839 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
13840  const VmaDefragmentationInfo2& info,
13842 {
13843  CallParams callParams;
13844  GetBasicParams(callParams);
13845 
13846  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13847  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
13848  info.flags);
13849  PrintPointerList(info.allocationCount, info.pAllocations);
13850  fprintf(m_File, ",");
13851  PrintPointerList(info.poolCount, info.pPools);
13852  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
13853  info.maxCpuBytesToMove,
13855  info.maxGpuBytesToMove,
13857  info.commandBuffer,
13858  ctx);
13859  Flush();
13860 }
13861 
13862 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
13864 {
13865  CallParams callParams;
13866  GetBasicParams(callParams);
13867 
13868  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13869  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
13870  ctx);
13871  Flush();
13872 }
13873 
13874 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
13875 {
13876  if(pUserData != VMA_NULL)
13877  {
13878  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
13879  {
13880  m_Str = (const char*)pUserData;
13881  }
13882  else
13883  {
13884  sprintf_s(m_PtrStr, "%p", pUserData);
13885  m_Str = m_PtrStr;
13886  }
13887  }
13888  else
13889  {
13890  m_Str = "";
13891  }
13892 }
13893 
13894 void VmaRecorder::WriteConfiguration(
13895  const VkPhysicalDeviceProperties& devProps,
13896  const VkPhysicalDeviceMemoryProperties& memProps,
13897  bool dedicatedAllocationExtensionEnabled)
13898 {
13899  fprintf(m_File, "Config,Begin\n");
13900 
13901  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
13902  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
13903  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
13904  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
13905  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
13906  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
13907 
13908  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
13909  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
13910  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
13911 
13912  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
13913  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
13914  {
13915  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
13916  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
13917  }
13918  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
13919  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
13920  {
13921  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
13922  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
13923  }
13924 
13925  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
13926 
13927  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
13928  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
13929  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
13930  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
13931  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
13932  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
13933  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
13934  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
13935  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
13936 
13937  fprintf(m_File, "Config,End\n");
13938 }
13939 
13940 void VmaRecorder::GetBasicParams(CallParams& outParams)
13941 {
13942  outParams.threadId = GetCurrentThreadId();
13943 
13944  LARGE_INTEGER counter;
13945  QueryPerformanceCounter(&counter);
13946  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
13947 }
13948 
13949 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
13950 {
13951  if(count)
13952  {
13953  fprintf(m_File, "%p", pItems[0]);
13954  for(uint64_t i = 1; i < count; ++i)
13955  {
13956  fprintf(m_File, " %p", pItems[i]);
13957  }
13958  }
13959 }
13960 
13961 void VmaRecorder::Flush()
13962 {
13963  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
13964  {
13965  fflush(m_File);
13966  }
13967 }
13968 
13969 #endif // #if VMA_RECORDING_ENABLED
13970 
13972 // VmaAllocator_T
13973 
13974 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
13975  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
13976  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
13977  m_hDevice(pCreateInfo->device),
13978  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
13979  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
13980  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
13981  m_PreferredLargeHeapBlockSize(0),
13982  m_PhysicalDevice(pCreateInfo->physicalDevice),
13983  m_CurrentFrameIndex(0),
13984  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
13985  m_NextPoolId(0)
13987  ,m_pRecorder(VMA_NULL)
13988 #endif
13989 {
13990  if(VMA_DEBUG_DETECT_CORRUPTION)
13991  {
13992  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
13993  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
13994  }
13995 
13996  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
13997 
13998 #if !(VMA_DEDICATED_ALLOCATION)
14000  {
14001  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
14002  }
14003 #endif
14004 
14005  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
14006  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
14007  memset(&m_MemProps, 0, sizeof(m_MemProps));
14008 
14009  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
14010  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
14011 
14012  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
14013  {
14014  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
14015  }
14016 
14017  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
14018  {
14019  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
14020  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
14021  }
14022 
14023  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
14024 
14025  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
14026  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
14027 
14028  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
14029  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
14030  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
14031  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
14032 
14033  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
14034  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
14035 
14036  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
14037  {
14038  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
14039  {
14040  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
14041  if(limit != VK_WHOLE_SIZE)
14042  {
14043  m_HeapSizeLimit[heapIndex] = limit;
14044  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
14045  {
14046  m_MemProps.memoryHeaps[heapIndex].size = limit;
14047  }
14048  }
14049  }
14050  }
14051 
14052  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14053  {
14054  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
14055 
14056  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
14057  this,
14058  memTypeIndex,
14059  preferredBlockSize,
14060  0,
14061  SIZE_MAX,
14062  GetBufferImageGranularity(),
14063  pCreateInfo->frameInUseCount,
14064  false, // isCustomPool
14065  false, // explicitBlockSize
14066  false); // linearAlgorithm
14067  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
14068  // becase minBlockCount is 0.
14069  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
14070 
14071  }
14072 }
14073 
14074 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
14075 {
14076  VkResult res = VK_SUCCESS;
14077 
14078  if(pCreateInfo->pRecordSettings != VMA_NULL &&
14079  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
14080  {
14081 #if VMA_RECORDING_ENABLED
14082  m_pRecorder = vma_new(this, VmaRecorder)();
14083  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
14084  if(res != VK_SUCCESS)
14085  {
14086  return res;
14087  }
14088  m_pRecorder->WriteConfiguration(
14089  m_PhysicalDeviceProperties,
14090  m_MemProps,
14091  m_UseKhrDedicatedAllocation);
14092  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
14093 #else
14094  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
14095  return VK_ERROR_FEATURE_NOT_PRESENT;
14096 #endif
14097  }
14098 
14099  return res;
14100 }
14101 
14102 VmaAllocator_T::~VmaAllocator_T()
14103 {
14104 #if VMA_RECORDING_ENABLED
14105  if(m_pRecorder != VMA_NULL)
14106  {
14107  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
14108  vma_delete(this, m_pRecorder);
14109  }
14110 #endif
14111 
14112  VMA_ASSERT(m_Pools.empty());
14113 
14114  for(size_t i = GetMemoryTypeCount(); i--; )
14115  {
14116  vma_delete(this, m_pDedicatedAllocations[i]);
14117  vma_delete(this, m_pBlockVectors[i]);
14118  }
14119 }
14120 
14121 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
14122 {
14123 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14124  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
14125  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
14126  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
14127  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
14128  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
14129  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
14130  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
14131  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
14132  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
14133  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
14134  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
14135  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
14136  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
14137  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
14138  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
14139  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
14140  m_VulkanFunctions.vkCmdCopyBuffer = &vkCmdCopyBuffer;
14141 #if VMA_DEDICATED_ALLOCATION
14142  if(m_UseKhrDedicatedAllocation)
14143  {
14144  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
14145  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
14146  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
14147  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
14148  }
14149 #endif // #if VMA_DEDICATED_ALLOCATION
14150 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14151 
14152 #define VMA_COPY_IF_NOT_NULL(funcName) \
14153  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
14154 
14155  if(pVulkanFunctions != VMA_NULL)
14156  {
14157  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
14158  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
14159  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
14160  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
14161  VMA_COPY_IF_NOT_NULL(vkMapMemory);
14162  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
14163  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
14164  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
14165  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
14166  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
14167  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
14168  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
14169  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
14170  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
14171  VMA_COPY_IF_NOT_NULL(vkCreateImage);
14172  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
14173  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
14174 #if VMA_DEDICATED_ALLOCATION
14175  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
14176  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
14177 #endif
14178  }
14179 
14180 #undef VMA_COPY_IF_NOT_NULL
14181 
14182  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
14183  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
14184  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
14185  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
14186  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
14187  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
14188  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
14189  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
14190  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
14191  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
14192  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
14193  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
14194  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
14195  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
14196  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
14197  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
14198  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
14199  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
14200  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
14201 #if VMA_DEDICATED_ALLOCATION
14202  if(m_UseKhrDedicatedAllocation)
14203  {
14204  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
14205  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
14206  }
14207 #endif
14208 }
14209 
14210 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
14211 {
14212  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14213  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
14214  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
14215  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
14216 }
14217 
14218 VkResult VmaAllocator_T::AllocateMemoryOfType(
14219  VkDeviceSize size,
14220  VkDeviceSize alignment,
14221  bool dedicatedAllocation,
14222  VkBuffer dedicatedBuffer,
14223  VkImage dedicatedImage,
14224  const VmaAllocationCreateInfo& createInfo,
14225  uint32_t memTypeIndex,
14226  VmaSuballocationType suballocType,
14227  size_t allocationCount,
14228  VmaAllocation* pAllocations)
14229 {
14230  VMA_ASSERT(pAllocations != VMA_NULL);
14231  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, vkMemReq.size);
14232 
14233  VmaAllocationCreateInfo finalCreateInfo = createInfo;
14234 
14235  // If memory type is not HOST_VISIBLE, disable MAPPED.
14236  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14237  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
14238  {
14239  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
14240  }
14241 
14242  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
14243  VMA_ASSERT(blockVector);
14244 
14245  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
14246  bool preferDedicatedMemory =
14247  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
14248  dedicatedAllocation ||
14249  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
14250  size > preferredBlockSize / 2;
14251 
14252  if(preferDedicatedMemory &&
14253  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
14254  finalCreateInfo.pool == VK_NULL_HANDLE)
14255  {
14257  }
14258 
14259  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
14260  {
14261  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14262  {
14263  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14264  }
14265  else
14266  {
14267  return AllocateDedicatedMemory(
14268  size,
14269  suballocType,
14270  memTypeIndex,
14271  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14272  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14273  finalCreateInfo.pUserData,
14274  dedicatedBuffer,
14275  dedicatedImage,
14276  allocationCount,
14277  pAllocations);
14278  }
14279  }
14280  else
14281  {
14282  VkResult res = blockVector->Allocate(
14283  VK_NULL_HANDLE, // hCurrentPool
14284  m_CurrentFrameIndex.load(),
14285  size,
14286  alignment,
14287  finalCreateInfo,
14288  suballocType,
14289  allocationCount,
14290  pAllocations);
14291  if(res == VK_SUCCESS)
14292  {
14293  return res;
14294  }
14295 
14296  // 5. Try dedicated memory.
14297  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14298  {
14299  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14300  }
14301  else
14302  {
14303  res = AllocateDedicatedMemory(
14304  size,
14305  suballocType,
14306  memTypeIndex,
14307  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14308  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14309  finalCreateInfo.pUserData,
14310  dedicatedBuffer,
14311  dedicatedImage,
14312  allocationCount,
14313  pAllocations);
14314  if(res == VK_SUCCESS)
14315  {
14316  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
14317  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
14318  return VK_SUCCESS;
14319  }
14320  else
14321  {
14322  // Everything failed: Return error code.
14323  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14324  return res;
14325  }
14326  }
14327  }
14328 }
14329 
14330 VkResult VmaAllocator_T::AllocateDedicatedMemory(
14331  VkDeviceSize size,
14332  VmaSuballocationType suballocType,
14333  uint32_t memTypeIndex,
14334  bool map,
14335  bool isUserDataString,
14336  void* pUserData,
14337  VkBuffer dedicatedBuffer,
14338  VkImage dedicatedImage,
14339  size_t allocationCount,
14340  VmaAllocation* pAllocations)
14341 {
14342  VMA_ASSERT(allocationCount > 0 && pAllocations);
14343 
14344  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
14345  allocInfo.memoryTypeIndex = memTypeIndex;
14346  allocInfo.allocationSize = size;
14347 
14348 #if VMA_DEDICATED_ALLOCATION
14349  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
14350  if(m_UseKhrDedicatedAllocation)
14351  {
14352  if(dedicatedBuffer != VK_NULL_HANDLE)
14353  {
14354  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
14355  dedicatedAllocInfo.buffer = dedicatedBuffer;
14356  allocInfo.pNext = &dedicatedAllocInfo;
14357  }
14358  else if(dedicatedImage != VK_NULL_HANDLE)
14359  {
14360  dedicatedAllocInfo.image = dedicatedImage;
14361  allocInfo.pNext = &dedicatedAllocInfo;
14362  }
14363  }
14364 #endif // #if VMA_DEDICATED_ALLOCATION
14365 
14366  size_t allocIndex;
14367  VkResult res;
14368  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14369  {
14370  res = AllocateDedicatedMemoryPage(
14371  size,
14372  suballocType,
14373  memTypeIndex,
14374  allocInfo,
14375  map,
14376  isUserDataString,
14377  pUserData,
14378  pAllocations + allocIndex);
14379  if(res != VK_SUCCESS)
14380  {
14381  break;
14382  }
14383  }
14384 
14385  if(res == VK_SUCCESS)
14386  {
14387  // Register them in m_pDedicatedAllocations.
14388  {
14389  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
14390  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
14391  VMA_ASSERT(pDedicatedAllocations);
14392  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14393  {
14394  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
14395  }
14396  }
14397 
14398  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
14399  }
14400  else
14401  {
14402  // Free all already created allocations.
14403  while(allocIndex--)
14404  {
14405  VmaAllocation currAlloc = pAllocations[allocIndex];
14406  VkDeviceMemory hMemory = currAlloc->GetMemory();
14407 
14408  /*
14409  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
14410  before vkFreeMemory.
14411 
14412  if(currAlloc->GetMappedData() != VMA_NULL)
14413  {
14414  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
14415  }
14416  */
14417 
14418  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
14419 
14420  currAlloc->SetUserData(this, VMA_NULL);
14421  vma_delete(this, currAlloc);
14422  }
14423 
14424  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
14425  }
14426 
14427  return res;
14428 }
14429 
14430 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
14431  VkDeviceSize size,
14432  VmaSuballocationType suballocType,
14433  uint32_t memTypeIndex,
14434  const VkMemoryAllocateInfo& allocInfo,
14435  bool map,
14436  bool isUserDataString,
14437  void* pUserData,
14438  VmaAllocation* pAllocation)
14439 {
14440  VkDeviceMemory hMemory = VK_NULL_HANDLE;
14441  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
14442  if(res < 0)
14443  {
14444  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14445  return res;
14446  }
14447 
14448  void* pMappedData = VMA_NULL;
14449  if(map)
14450  {
14451  res = (*m_VulkanFunctions.vkMapMemory)(
14452  m_hDevice,
14453  hMemory,
14454  0,
14455  VK_WHOLE_SIZE,
14456  0,
14457  &pMappedData);
14458  if(res < 0)
14459  {
14460  VMA_DEBUG_LOG(" vkMapMemory FAILED");
14461  FreeVulkanMemory(memTypeIndex, size, hMemory);
14462  return res;
14463  }
14464  }
14465 
14466  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
14467  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
14468  (*pAllocation)->SetUserData(this, pUserData);
14469  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14470  {
14471  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
14472  }
14473 
14474  return VK_SUCCESS;
14475 }
14476 
14477 void VmaAllocator_T::GetBufferMemoryRequirements(
14478  VkBuffer hBuffer,
14479  VkMemoryRequirements& memReq,
14480  bool& requiresDedicatedAllocation,
14481  bool& prefersDedicatedAllocation) const
14482 {
14483 #if VMA_DEDICATED_ALLOCATION
14484  if(m_UseKhrDedicatedAllocation)
14485  {
14486  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
14487  memReqInfo.buffer = hBuffer;
14488 
14489  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14490 
14491  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14492  memReq2.pNext = &memDedicatedReq;
14493 
14494  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14495 
14496  memReq = memReq2.memoryRequirements;
14497  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14498  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14499  }
14500  else
14501 #endif // #if VMA_DEDICATED_ALLOCATION
14502  {
14503  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
14504  requiresDedicatedAllocation = false;
14505  prefersDedicatedAllocation = false;
14506  }
14507 }
14508 
14509 void VmaAllocator_T::GetImageMemoryRequirements(
14510  VkImage hImage,
14511  VkMemoryRequirements& memReq,
14512  bool& requiresDedicatedAllocation,
14513  bool& prefersDedicatedAllocation) const
14514 {
14515 #if VMA_DEDICATED_ALLOCATION
14516  if(m_UseKhrDedicatedAllocation)
14517  {
14518  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
14519  memReqInfo.image = hImage;
14520 
14521  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14522 
14523  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14524  memReq2.pNext = &memDedicatedReq;
14525 
14526  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14527 
14528  memReq = memReq2.memoryRequirements;
14529  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14530  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14531  }
14532  else
14533 #endif // #if VMA_DEDICATED_ALLOCATION
14534  {
14535  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
14536  requiresDedicatedAllocation = false;
14537  prefersDedicatedAllocation = false;
14538  }
14539 }
14540 
14541 VkResult VmaAllocator_T::AllocateMemory(
14542  const VkMemoryRequirements& vkMemReq,
14543  bool requiresDedicatedAllocation,
14544  bool prefersDedicatedAllocation,
14545  VkBuffer dedicatedBuffer,
14546  VkImage dedicatedImage,
14547  const VmaAllocationCreateInfo& createInfo,
14548  VmaSuballocationType suballocType,
14549  size_t allocationCount,
14550  VmaAllocation* pAllocations)
14551 {
14552  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
14553 
14554  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
14555 
14556  if(vkMemReq.size == 0)
14557  {
14558  return VK_ERROR_VALIDATION_FAILED_EXT;
14559  }
14560  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
14561  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14562  {
14563  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
14564  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14565  }
14566  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14568  {
14569  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
14570  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14571  }
14572  if(requiresDedicatedAllocation)
14573  {
14574  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14575  {
14576  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
14577  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14578  }
14579  if(createInfo.pool != VK_NULL_HANDLE)
14580  {
14581  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
14582  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14583  }
14584  }
14585  if((createInfo.pool != VK_NULL_HANDLE) &&
14586  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
14587  {
14588  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
14589  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14590  }
14591 
14592  if(createInfo.pool != VK_NULL_HANDLE)
14593  {
14594  const VkDeviceSize alignmentForPool = VMA_MAX(
14595  vkMemReq.alignment,
14596  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
14597  return createInfo.pool->m_BlockVector.Allocate(
14598  createInfo.pool,
14599  m_CurrentFrameIndex.load(),
14600  vkMemReq.size,
14601  alignmentForPool,
14602  createInfo,
14603  suballocType,
14604  allocationCount,
14605  pAllocations);
14606  }
14607  else
14608  {
14609  // Bit mask of memory Vulkan types acceptable for this allocation.
14610  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
14611  uint32_t memTypeIndex = UINT32_MAX;
14612  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
14613  if(res == VK_SUCCESS)
14614  {
14615  VkDeviceSize alignmentForMemType = VMA_MAX(
14616  vkMemReq.alignment,
14617  GetMemoryTypeMinAlignment(memTypeIndex));
14618 
14619  res = AllocateMemoryOfType(
14620  vkMemReq.size,
14621  alignmentForMemType,
14622  requiresDedicatedAllocation || prefersDedicatedAllocation,
14623  dedicatedBuffer,
14624  dedicatedImage,
14625  createInfo,
14626  memTypeIndex,
14627  suballocType,
14628  allocationCount,
14629  pAllocations);
14630  // Succeeded on first try.
14631  if(res == VK_SUCCESS)
14632  {
14633  return res;
14634  }
14635  // Allocation from this memory type failed. Try other compatible memory types.
14636  else
14637  {
14638  for(;;)
14639  {
14640  // Remove old memTypeIndex from list of possibilities.
14641  memoryTypeBits &= ~(1u << memTypeIndex);
14642  // Find alternative memTypeIndex.
14643  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
14644  if(res == VK_SUCCESS)
14645  {
14646  alignmentForMemType = VMA_MAX(
14647  vkMemReq.alignment,
14648  GetMemoryTypeMinAlignment(memTypeIndex));
14649 
14650  res = AllocateMemoryOfType(
14651  vkMemReq.size,
14652  alignmentForMemType,
14653  requiresDedicatedAllocation || prefersDedicatedAllocation,
14654  dedicatedBuffer,
14655  dedicatedImage,
14656  createInfo,
14657  memTypeIndex,
14658  suballocType,
14659  allocationCount,
14660  pAllocations);
14661  // Allocation from this alternative memory type succeeded.
14662  if(res == VK_SUCCESS)
14663  {
14664  return res;
14665  }
14666  // else: Allocation from this memory type failed. Try next one - next loop iteration.
14667  }
14668  // No other matching memory type index could be found.
14669  else
14670  {
14671  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
14672  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14673  }
14674  }
14675  }
14676  }
14677  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
14678  else
14679  return res;
14680  }
14681 }
14682 
14683 void VmaAllocator_T::FreeMemory(
14684  size_t allocationCount,
14685  const VmaAllocation* pAllocations)
14686 {
14687  VMA_ASSERT(pAllocations);
14688 
14689  for(size_t allocIndex = allocationCount; allocIndex--; )
14690  {
14691  VmaAllocation allocation = pAllocations[allocIndex];
14692 
14693  if(allocation != VK_NULL_HANDLE)
14694  {
14695  if(TouchAllocation(allocation))
14696  {
14697  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14698  {
14699  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
14700  }
14701 
14702  switch(allocation->GetType())
14703  {
14704  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14705  {
14706  VmaBlockVector* pBlockVector = VMA_NULL;
14707  VmaPool hPool = allocation->GetPool();
14708  if(hPool != VK_NULL_HANDLE)
14709  {
14710  pBlockVector = &hPool->m_BlockVector;
14711  }
14712  else
14713  {
14714  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
14715  pBlockVector = m_pBlockVectors[memTypeIndex];
14716  }
14717  pBlockVector->Free(allocation);
14718  }
14719  break;
14720  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14721  FreeDedicatedMemory(allocation);
14722  break;
14723  default:
14724  VMA_ASSERT(0);
14725  }
14726  }
14727 
14728  allocation->SetUserData(this, VMA_NULL);
14729  vma_delete(this, allocation);
14730  }
14731  }
14732 }
14733 
14734 VkResult VmaAllocator_T::ResizeAllocation(
14735  const VmaAllocation alloc,
14736  VkDeviceSize newSize)
14737 {
14738  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
14739  {
14740  return VK_ERROR_VALIDATION_FAILED_EXT;
14741  }
14742  if(newSize == alloc->GetSize())
14743  {
14744  return VK_SUCCESS;
14745  }
14746 
14747  switch(alloc->GetType())
14748  {
14749  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14750  return VK_ERROR_FEATURE_NOT_PRESENT;
14751  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14752  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
14753  {
14754  alloc->ChangeSize(newSize);
14755  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
14756  return VK_SUCCESS;
14757  }
14758  else
14759  {
14760  return VK_ERROR_OUT_OF_POOL_MEMORY;
14761  }
14762  default:
14763  VMA_ASSERT(0);
14764  return VK_ERROR_VALIDATION_FAILED_EXT;
14765  }
14766 }
14767 
14768 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
14769 {
14770  // Initialize.
14771  InitStatInfo(pStats->total);
14772  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
14773  InitStatInfo(pStats->memoryType[i]);
14774  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
14775  InitStatInfo(pStats->memoryHeap[i]);
14776 
14777  // Process default pools.
14778  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14779  {
14780  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
14781  VMA_ASSERT(pBlockVector);
14782  pBlockVector->AddStats(pStats);
14783  }
14784 
14785  // Process custom pools.
14786  {
14787  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
14788  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
14789  {
14790  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
14791  }
14792  }
14793 
14794  // Process dedicated allocations.
14795  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14796  {
14797  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14798  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
14799  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
14800  VMA_ASSERT(pDedicatedAllocVector);
14801  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
14802  {
14803  VmaStatInfo allocationStatInfo;
14804  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
14805  VmaAddStatInfo(pStats->total, allocationStatInfo);
14806  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
14807  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
14808  }
14809  }
14810 
14811  // Postprocess.
14812  VmaPostprocessCalcStatInfo(pStats->total);
14813  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
14814  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
14815  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
14816  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
14817 }
14818 
14819 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
14820 
14821 VkResult VmaAllocator_T::DefragmentationBegin(
14822  const VmaDefragmentationInfo2& info,
14823  VmaDefragmentationStats* pStats,
14824  VmaDefragmentationContext* pContext)
14825 {
14826  if(info.pAllocationsChanged != VMA_NULL)
14827  {
14828  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
14829  }
14830 
14831  *pContext = vma_new(this, VmaDefragmentationContext_T)(
14832  this, m_CurrentFrameIndex.load(), info.flags, pStats);
14833 
14834  (*pContext)->AddPools(info.poolCount, info.pPools);
14835  (*pContext)->AddAllocations(
14837 
14838  VkResult res = (*pContext)->Defragment(
14841  info.commandBuffer, pStats);
14842 
14843  if(res != VK_NOT_READY)
14844  {
14845  vma_delete(this, *pContext);
14846  *pContext = VMA_NULL;
14847  }
14848 
14849  return res;
14850 }
14851 
14852 VkResult VmaAllocator_T::DefragmentationEnd(
14853  VmaDefragmentationContext context)
14854 {
14855  vma_delete(this, context);
14856  return VK_SUCCESS;
14857 }
14858 
14859 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
14860 {
14861  if(hAllocation->CanBecomeLost())
14862  {
14863  /*
14864  Warning: This is a carefully designed algorithm.
14865  Do not modify unless you really know what you're doing :)
14866  */
14867  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14868  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14869  for(;;)
14870  {
14871  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
14872  {
14873  pAllocationInfo->memoryType = UINT32_MAX;
14874  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
14875  pAllocationInfo->offset = 0;
14876  pAllocationInfo->size = hAllocation->GetSize();
14877  pAllocationInfo->pMappedData = VMA_NULL;
14878  pAllocationInfo->pUserData = hAllocation->GetUserData();
14879  return;
14880  }
14881  else if(localLastUseFrameIndex == localCurrFrameIndex)
14882  {
14883  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
14884  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
14885  pAllocationInfo->offset = hAllocation->GetOffset();
14886  pAllocationInfo->size = hAllocation->GetSize();
14887  pAllocationInfo->pMappedData = VMA_NULL;
14888  pAllocationInfo->pUserData = hAllocation->GetUserData();
14889  return;
14890  }
14891  else // Last use time earlier than current time.
14892  {
14893  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14894  {
14895  localLastUseFrameIndex = localCurrFrameIndex;
14896  }
14897  }
14898  }
14899  }
14900  else
14901  {
14902 #if VMA_STATS_STRING_ENABLED
14903  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14904  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14905  for(;;)
14906  {
14907  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
14908  if(localLastUseFrameIndex == localCurrFrameIndex)
14909  {
14910  break;
14911  }
14912  else // Last use time earlier than current time.
14913  {
14914  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14915  {
14916  localLastUseFrameIndex = localCurrFrameIndex;
14917  }
14918  }
14919  }
14920 #endif
14921 
14922  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
14923  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
14924  pAllocationInfo->offset = hAllocation->GetOffset();
14925  pAllocationInfo->size = hAllocation->GetSize();
14926  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
14927  pAllocationInfo->pUserData = hAllocation->GetUserData();
14928  }
14929 }
14930 
14931 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
14932 {
14933  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
14934  if(hAllocation->CanBecomeLost())
14935  {
14936  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14937  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14938  for(;;)
14939  {
14940  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
14941  {
14942  return false;
14943  }
14944  else if(localLastUseFrameIndex == localCurrFrameIndex)
14945  {
14946  return true;
14947  }
14948  else // Last use time earlier than current time.
14949  {
14950  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14951  {
14952  localLastUseFrameIndex = localCurrFrameIndex;
14953  }
14954  }
14955  }
14956  }
14957  else
14958  {
14959 #if VMA_STATS_STRING_ENABLED
14960  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14961  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14962  for(;;)
14963  {
14964  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
14965  if(localLastUseFrameIndex == localCurrFrameIndex)
14966  {
14967  break;
14968  }
14969  else // Last use time earlier than current time.
14970  {
14971  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14972  {
14973  localLastUseFrameIndex = localCurrFrameIndex;
14974  }
14975  }
14976  }
14977 #endif
14978 
14979  return true;
14980  }
14981 }
14982 
14983 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
14984 {
14985  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
14986 
14987  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
14988 
14989  if(newCreateInfo.maxBlockCount == 0)
14990  {
14991  newCreateInfo.maxBlockCount = SIZE_MAX;
14992  }
14993  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
14994  {
14995  return VK_ERROR_INITIALIZATION_FAILED;
14996  }
14997 
14998  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
14999 
15000  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
15001 
15002  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
15003  if(res != VK_SUCCESS)
15004  {
15005  vma_delete(this, *pPool);
15006  *pPool = VMA_NULL;
15007  return res;
15008  }
15009 
15010  // Add to m_Pools.
15011  {
15012  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15013  (*pPool)->SetId(m_NextPoolId++);
15014  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
15015  }
15016 
15017  return VK_SUCCESS;
15018 }
15019 
15020 void VmaAllocator_T::DestroyPool(VmaPool pool)
15021 {
15022  // Remove from m_Pools.
15023  {
15024  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15025  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
15026  VMA_ASSERT(success && "Pool not found in Allocator.");
15027  }
15028 
15029  vma_delete(this, pool);
15030 }
15031 
15032 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
15033 {
15034  pool->m_BlockVector.GetPoolStats(pPoolStats);
15035 }
15036 
15037 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
15038 {
15039  m_CurrentFrameIndex.store(frameIndex);
15040 }
15041 
15042 void VmaAllocator_T::MakePoolAllocationsLost(
15043  VmaPool hPool,
15044  size_t* pLostAllocationCount)
15045 {
15046  hPool->m_BlockVector.MakePoolAllocationsLost(
15047  m_CurrentFrameIndex.load(),
15048  pLostAllocationCount);
15049 }
15050 
15051 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
15052 {
15053  return hPool->m_BlockVector.CheckCorruption();
15054 }
15055 
15056 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
15057 {
15058  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
15059 
15060  // Process default pools.
15061  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15062  {
15063  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
15064  {
15065  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
15066  VMA_ASSERT(pBlockVector);
15067  VkResult localRes = pBlockVector->CheckCorruption();
15068  switch(localRes)
15069  {
15070  case VK_ERROR_FEATURE_NOT_PRESENT:
15071  break;
15072  case VK_SUCCESS:
15073  finalRes = VK_SUCCESS;
15074  break;
15075  default:
15076  return localRes;
15077  }
15078  }
15079  }
15080 
15081  // Process custom pools.
15082  {
15083  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15084  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
15085  {
15086  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
15087  {
15088  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
15089  switch(localRes)
15090  {
15091  case VK_ERROR_FEATURE_NOT_PRESENT:
15092  break;
15093  case VK_SUCCESS:
15094  finalRes = VK_SUCCESS;
15095  break;
15096  default:
15097  return localRes;
15098  }
15099  }
15100  }
15101  }
15102 
15103  return finalRes;
15104 }
15105 
15106 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
15107 {
15108  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
15109  (*pAllocation)->InitLost();
15110 }
15111 
15112 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
15113 {
15114  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
15115 
15116  VkResult res;
15117  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
15118  {
15119  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
15120  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
15121  {
15122  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15123  if(res == VK_SUCCESS)
15124  {
15125  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
15126  }
15127  }
15128  else
15129  {
15130  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
15131  }
15132  }
15133  else
15134  {
15135  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15136  }
15137 
15138  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
15139  {
15140  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
15141  }
15142 
15143  return res;
15144 }
15145 
15146 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
15147 {
15148  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
15149  {
15150  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
15151  }
15152 
15153  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
15154 
15155  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
15156  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
15157  {
15158  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
15159  m_HeapSizeLimit[heapIndex] += size;
15160  }
15161 }
15162 
15163 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
15164 {
15165  if(hAllocation->CanBecomeLost())
15166  {
15167  return VK_ERROR_MEMORY_MAP_FAILED;
15168  }
15169 
15170  switch(hAllocation->GetType())
15171  {
15172  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15173  {
15174  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15175  char *pBytes = VMA_NULL;
15176  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
15177  if(res == VK_SUCCESS)
15178  {
15179  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
15180  hAllocation->BlockAllocMap();
15181  }
15182  return res;
15183  }
15184  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15185  return hAllocation->DedicatedAllocMap(this, ppData);
15186  default:
15187  VMA_ASSERT(0);
15188  return VK_ERROR_MEMORY_MAP_FAILED;
15189  }
15190 }
15191 
15192 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
15193 {
15194  switch(hAllocation->GetType())
15195  {
15196  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15197  {
15198  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15199  hAllocation->BlockAllocUnmap();
15200  pBlock->Unmap(this, 1);
15201  }
15202  break;
15203  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15204  hAllocation->DedicatedAllocUnmap(this);
15205  break;
15206  default:
15207  VMA_ASSERT(0);
15208  }
15209 }
15210 
15211 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
15212 {
15213  VkResult res = VK_SUCCESS;
15214  switch(hAllocation->GetType())
15215  {
15216  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15217  res = GetVulkanFunctions().vkBindBufferMemory(
15218  m_hDevice,
15219  hBuffer,
15220  hAllocation->GetMemory(),
15221  0); //memoryOffset
15222  break;
15223  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15224  {
15225  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
15226  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
15227  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
15228  break;
15229  }
15230  default:
15231  VMA_ASSERT(0);
15232  }
15233  return res;
15234 }
15235 
15236 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
15237 {
15238  VkResult res = VK_SUCCESS;
15239  switch(hAllocation->GetType())
15240  {
15241  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15242  res = GetVulkanFunctions().vkBindImageMemory(
15243  m_hDevice,
15244  hImage,
15245  hAllocation->GetMemory(),
15246  0); //memoryOffset
15247  break;
15248  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15249  {
15250  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
15251  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
15252  res = pBlock->BindImageMemory(this, hAllocation, hImage);
15253  break;
15254  }
15255  default:
15256  VMA_ASSERT(0);
15257  }
15258  return res;
15259 }
15260 
15261 void VmaAllocator_T::FlushOrInvalidateAllocation(
15262  VmaAllocation hAllocation,
15263  VkDeviceSize offset, VkDeviceSize size,
15264  VMA_CACHE_OPERATION op)
15265 {
15266  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
15267  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
15268  {
15269  const VkDeviceSize allocationSize = hAllocation->GetSize();
15270  VMA_ASSERT(offset <= allocationSize);
15271 
15272  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
15273 
15274  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
15275  memRange.memory = hAllocation->GetMemory();
15276 
15277  switch(hAllocation->GetType())
15278  {
15279  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15280  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15281  if(size == VK_WHOLE_SIZE)
15282  {
15283  memRange.size = allocationSize - memRange.offset;
15284  }
15285  else
15286  {
15287  VMA_ASSERT(offset + size <= allocationSize);
15288  memRange.size = VMA_MIN(
15289  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
15290  allocationSize - memRange.offset);
15291  }
15292  break;
15293 
15294  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15295  {
15296  // 1. Still within this allocation.
15297  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15298  if(size == VK_WHOLE_SIZE)
15299  {
15300  size = allocationSize - offset;
15301  }
15302  else
15303  {
15304  VMA_ASSERT(offset + size <= allocationSize);
15305  }
15306  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
15307 
15308  // 2. Adjust to whole block.
15309  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
15310  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
15311  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
15312  memRange.offset += allocationOffset;
15313  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
15314 
15315  break;
15316  }
15317 
15318  default:
15319  VMA_ASSERT(0);
15320  }
15321 
15322  switch(op)
15323  {
15324  case VMA_CACHE_FLUSH:
15325  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
15326  break;
15327  case VMA_CACHE_INVALIDATE:
15328  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
15329  break;
15330  default:
15331  VMA_ASSERT(0);
15332  }
15333  }
15334  // else: Just ignore this call.
15335 }
15336 
15337 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
15338 {
15339  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
15340 
15341  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
15342  {
15343  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15344  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
15345  VMA_ASSERT(pDedicatedAllocations);
15346  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
15347  VMA_ASSERT(success);
15348  }
15349 
15350  VkDeviceMemory hMemory = allocation->GetMemory();
15351 
15352  /*
15353  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
15354  before vkFreeMemory.
15355 
15356  if(allocation->GetMappedData() != VMA_NULL)
15357  {
15358  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
15359  }
15360  */
15361 
15362  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
15363 
15364  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
15365 }
15366 
15367 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
15368 {
15369  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
15370  !hAllocation->CanBecomeLost() &&
15371  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15372  {
15373  void* pData = VMA_NULL;
15374  VkResult res = Map(hAllocation, &pData);
15375  if(res == VK_SUCCESS)
15376  {
15377  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
15378  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
15379  Unmap(hAllocation);
15380  }
15381  else
15382  {
15383  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
15384  }
15385  }
15386 }
15387 
15388 #if VMA_STATS_STRING_ENABLED
15389 
15390 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
15391 {
15392  bool dedicatedAllocationsStarted = false;
15393  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15394  {
15395  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15396  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
15397  VMA_ASSERT(pDedicatedAllocVector);
15398  if(pDedicatedAllocVector->empty() == false)
15399  {
15400  if(dedicatedAllocationsStarted == false)
15401  {
15402  dedicatedAllocationsStarted = true;
15403  json.WriteString("DedicatedAllocations");
15404  json.BeginObject();
15405  }
15406 
15407  json.BeginString("Type ");
15408  json.ContinueString(memTypeIndex);
15409  json.EndString();
15410 
15411  json.BeginArray();
15412 
15413  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
15414  {
15415  json.BeginObject(true);
15416  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
15417  hAlloc->PrintParameters(json);
15418  json.EndObject();
15419  }
15420 
15421  json.EndArray();
15422  }
15423  }
15424  if(dedicatedAllocationsStarted)
15425  {
15426  json.EndObject();
15427  }
15428 
15429  {
15430  bool allocationsStarted = false;
15431  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15432  {
15433  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
15434  {
15435  if(allocationsStarted == false)
15436  {
15437  allocationsStarted = true;
15438  json.WriteString("DefaultPools");
15439  json.BeginObject();
15440  }
15441 
15442  json.BeginString("Type ");
15443  json.ContinueString(memTypeIndex);
15444  json.EndString();
15445 
15446  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
15447  }
15448  }
15449  if(allocationsStarted)
15450  {
15451  json.EndObject();
15452  }
15453  }
15454 
15455  // Custom pools
15456  {
15457  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15458  const size_t poolCount = m_Pools.size();
15459  if(poolCount > 0)
15460  {
15461  json.WriteString("Pools");
15462  json.BeginObject();
15463  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
15464  {
15465  json.BeginString();
15466  json.ContinueString(m_Pools[poolIndex]->GetId());
15467  json.EndString();
15468 
15469  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
15470  }
15471  json.EndObject();
15472  }
15473  }
15474 }
15475 
15476 #endif // #if VMA_STATS_STRING_ENABLED
15477 
15479 // Public interface
15480 
15481 VkResult vmaCreateAllocator(
15482  const VmaAllocatorCreateInfo* pCreateInfo,
15483  VmaAllocator* pAllocator)
15484 {
15485  VMA_ASSERT(pCreateInfo && pAllocator);
15486  VMA_DEBUG_LOG("vmaCreateAllocator");
15487  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
15488  return (*pAllocator)->Init(pCreateInfo);
15489 }
15490 
15491 void vmaDestroyAllocator(
15492  VmaAllocator allocator)
15493 {
15494  if(allocator != VK_NULL_HANDLE)
15495  {
15496  VMA_DEBUG_LOG("vmaDestroyAllocator");
15497  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
15498  vma_delete(&allocationCallbacks, allocator);
15499  }
15500 }
15501 
15503  VmaAllocator allocator,
15504  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
15505 {
15506  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
15507  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
15508 }
15509 
15511  VmaAllocator allocator,
15512  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
15513 {
15514  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
15515  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
15516 }
15517 
15519  VmaAllocator allocator,
15520  uint32_t memoryTypeIndex,
15521  VkMemoryPropertyFlags* pFlags)
15522 {
15523  VMA_ASSERT(allocator && pFlags);
15524  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
15525  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
15526 }
15527 
15529  VmaAllocator allocator,
15530  uint32_t frameIndex)
15531 {
15532  VMA_ASSERT(allocator);
15533  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
15534 
15535  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15536 
15537  allocator->SetCurrentFrameIndex(frameIndex);
15538 }
15539 
15540 void vmaCalculateStats(
15541  VmaAllocator allocator,
15542  VmaStats* pStats)
15543 {
15544  VMA_ASSERT(allocator && pStats);
15545  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15546  allocator->CalculateStats(pStats);
15547 }
15548 
15549 #if VMA_STATS_STRING_ENABLED
15550 
15551 void vmaBuildStatsString(
15552  VmaAllocator allocator,
15553  char** ppStatsString,
15554  VkBool32 detailedMap)
15555 {
15556  VMA_ASSERT(allocator && ppStatsString);
15557  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15558 
15559  VmaStringBuilder sb(allocator);
15560  {
15561  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
15562  json.BeginObject();
15563 
15564  VmaStats stats;
15565  allocator->CalculateStats(&stats);
15566 
15567  json.WriteString("Total");
15568  VmaPrintStatInfo(json, stats.total);
15569 
15570  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
15571  {
15572  json.BeginString("Heap ");
15573  json.ContinueString(heapIndex);
15574  json.EndString();
15575  json.BeginObject();
15576 
15577  json.WriteString("Size");
15578  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
15579 
15580  json.WriteString("Flags");
15581  json.BeginArray(true);
15582  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
15583  {
15584  json.WriteString("DEVICE_LOCAL");
15585  }
15586  json.EndArray();
15587 
15588  if(stats.memoryHeap[heapIndex].blockCount > 0)
15589  {
15590  json.WriteString("Stats");
15591  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
15592  }
15593 
15594  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
15595  {
15596  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
15597  {
15598  json.BeginString("Type ");
15599  json.ContinueString(typeIndex);
15600  json.EndString();
15601 
15602  json.BeginObject();
15603 
15604  json.WriteString("Flags");
15605  json.BeginArray(true);
15606  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
15607  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
15608  {
15609  json.WriteString("DEVICE_LOCAL");
15610  }
15611  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15612  {
15613  json.WriteString("HOST_VISIBLE");
15614  }
15615  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
15616  {
15617  json.WriteString("HOST_COHERENT");
15618  }
15619  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
15620  {
15621  json.WriteString("HOST_CACHED");
15622  }
15623  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
15624  {
15625  json.WriteString("LAZILY_ALLOCATED");
15626  }
15627  json.EndArray();
15628 
15629  if(stats.memoryType[typeIndex].blockCount > 0)
15630  {
15631  json.WriteString("Stats");
15632  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
15633  }
15634 
15635  json.EndObject();
15636  }
15637  }
15638 
15639  json.EndObject();
15640  }
15641  if(detailedMap == VK_TRUE)
15642  {
15643  allocator->PrintDetailedMap(json);
15644  }
15645 
15646  json.EndObject();
15647  }
15648 
15649  const size_t len = sb.GetLength();
15650  char* const pChars = vma_new_array(allocator, char, len + 1);
15651  if(len > 0)
15652  {
15653  memcpy(pChars, sb.GetData(), len);
15654  }
15655  pChars[len] = '\0';
15656  *ppStatsString = pChars;
15657 }
15658 
15659 void vmaFreeStatsString(
15660  VmaAllocator allocator,
15661  char* pStatsString)
15662 {
15663  if(pStatsString != VMA_NULL)
15664  {
15665  VMA_ASSERT(allocator);
15666  size_t len = strlen(pStatsString);
15667  vma_delete_array(allocator, pStatsString, len + 1);
15668  }
15669 }
15670 
15671 #endif // #if VMA_STATS_STRING_ENABLED
15672 
15673 /*
15674 This function is not protected by any mutex because it just reads immutable data.
15675 */
15676 VkResult vmaFindMemoryTypeIndex(
15677  VmaAllocator allocator,
15678  uint32_t memoryTypeBits,
15679  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15680  uint32_t* pMemoryTypeIndex)
15681 {
15682  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15683  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15684  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15685 
15686  if(pAllocationCreateInfo->memoryTypeBits != 0)
15687  {
15688  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
15689  }
15690 
15691  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
15692  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
15693 
15694  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
15695  if(mapped)
15696  {
15697  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15698  }
15699 
15700  // Convert usage to requiredFlags and preferredFlags.
15701  switch(pAllocationCreateInfo->usage)
15702  {
15704  break;
15706  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15707  {
15708  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15709  }
15710  break;
15712  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
15713  break;
15715  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15716  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15717  {
15718  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15719  }
15720  break;
15722  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15723  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
15724  break;
15725  default:
15726  break;
15727  }
15728 
15729  *pMemoryTypeIndex = UINT32_MAX;
15730  uint32_t minCost = UINT32_MAX;
15731  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
15732  memTypeIndex < allocator->GetMemoryTypeCount();
15733  ++memTypeIndex, memTypeBit <<= 1)
15734  {
15735  // This memory type is acceptable according to memoryTypeBits bitmask.
15736  if((memTypeBit & memoryTypeBits) != 0)
15737  {
15738  const VkMemoryPropertyFlags currFlags =
15739  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
15740  // This memory type contains requiredFlags.
15741  if((requiredFlags & ~currFlags) == 0)
15742  {
15743  // Calculate cost as number of bits from preferredFlags not present in this memory type.
15744  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
15745  // Remember memory type with lowest cost.
15746  if(currCost < minCost)
15747  {
15748  *pMemoryTypeIndex = memTypeIndex;
15749  if(currCost == 0)
15750  {
15751  return VK_SUCCESS;
15752  }
15753  minCost = currCost;
15754  }
15755  }
15756  }
15757  }
15758  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
15759 }
15760 
15762  VmaAllocator allocator,
15763  const VkBufferCreateInfo* pBufferCreateInfo,
15764  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15765  uint32_t* pMemoryTypeIndex)
15766 {
15767  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15768  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
15769  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15770  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15771 
15772  const VkDevice hDev = allocator->m_hDevice;
15773  VkBuffer hBuffer = VK_NULL_HANDLE;
15774  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
15775  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
15776  if(res == VK_SUCCESS)
15777  {
15778  VkMemoryRequirements memReq = {};
15779  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
15780  hDev, hBuffer, &memReq);
15781 
15782  res = vmaFindMemoryTypeIndex(
15783  allocator,
15784  memReq.memoryTypeBits,
15785  pAllocationCreateInfo,
15786  pMemoryTypeIndex);
15787 
15788  allocator->GetVulkanFunctions().vkDestroyBuffer(
15789  hDev, hBuffer, allocator->GetAllocationCallbacks());
15790  }
15791  return res;
15792 }
15793 
15795  VmaAllocator allocator,
15796  const VkImageCreateInfo* pImageCreateInfo,
15797  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15798  uint32_t* pMemoryTypeIndex)
15799 {
15800  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15801  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
15802  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15803  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15804 
15805  const VkDevice hDev = allocator->m_hDevice;
15806  VkImage hImage = VK_NULL_HANDLE;
15807  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
15808  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
15809  if(res == VK_SUCCESS)
15810  {
15811  VkMemoryRequirements memReq = {};
15812  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
15813  hDev, hImage, &memReq);
15814 
15815  res = vmaFindMemoryTypeIndex(
15816  allocator,
15817  memReq.memoryTypeBits,
15818  pAllocationCreateInfo,
15819  pMemoryTypeIndex);
15820 
15821  allocator->GetVulkanFunctions().vkDestroyImage(
15822  hDev, hImage, allocator->GetAllocationCallbacks());
15823  }
15824  return res;
15825 }
15826 
15827 VkResult vmaCreatePool(
15828  VmaAllocator allocator,
15829  const VmaPoolCreateInfo* pCreateInfo,
15830  VmaPool* pPool)
15831 {
15832  VMA_ASSERT(allocator && pCreateInfo && pPool);
15833 
15834  VMA_DEBUG_LOG("vmaCreatePool");
15835 
15836  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15837 
15838  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
15839 
15840 #if VMA_RECORDING_ENABLED
15841  if(allocator->GetRecorder() != VMA_NULL)
15842  {
15843  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
15844  }
15845 #endif
15846 
15847  return res;
15848 }
15849 
15850 void vmaDestroyPool(
15851  VmaAllocator allocator,
15852  VmaPool pool)
15853 {
15854  VMA_ASSERT(allocator);
15855 
15856  if(pool == VK_NULL_HANDLE)
15857  {
15858  return;
15859  }
15860 
15861  VMA_DEBUG_LOG("vmaDestroyPool");
15862 
15863  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15864 
15865 #if VMA_RECORDING_ENABLED
15866  if(allocator->GetRecorder() != VMA_NULL)
15867  {
15868  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
15869  }
15870 #endif
15871 
15872  allocator->DestroyPool(pool);
15873 }
15874 
15875 void vmaGetPoolStats(
15876  VmaAllocator allocator,
15877  VmaPool pool,
15878  VmaPoolStats* pPoolStats)
15879 {
15880  VMA_ASSERT(allocator && pool && pPoolStats);
15881 
15882  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15883 
15884  allocator->GetPoolStats(pool, pPoolStats);
15885 }
15886 
15888  VmaAllocator allocator,
15889  VmaPool pool,
15890  size_t* pLostAllocationCount)
15891 {
15892  VMA_ASSERT(allocator && pool);
15893 
15894  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15895 
15896 #if VMA_RECORDING_ENABLED
15897  if(allocator->GetRecorder() != VMA_NULL)
15898  {
15899  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
15900  }
15901 #endif
15902 
15903  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
15904 }
15905 
15906 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
15907 {
15908  VMA_ASSERT(allocator && pool);
15909 
15910  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15911 
15912  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
15913 
15914  return allocator->CheckPoolCorruption(pool);
15915 }
15916 
15917 VkResult vmaAllocateMemory(
15918  VmaAllocator allocator,
15919  const VkMemoryRequirements* pVkMemoryRequirements,
15920  const VmaAllocationCreateInfo* pCreateInfo,
15921  VmaAllocation* pAllocation,
15922  VmaAllocationInfo* pAllocationInfo)
15923 {
15924  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
15925 
15926  VMA_DEBUG_LOG("vmaAllocateMemory");
15927 
15928  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15929 
15930  VkResult result = allocator->AllocateMemory(
15931  *pVkMemoryRequirements,
15932  false, // requiresDedicatedAllocation
15933  false, // prefersDedicatedAllocation
15934  VK_NULL_HANDLE, // dedicatedBuffer
15935  VK_NULL_HANDLE, // dedicatedImage
15936  *pCreateInfo,
15937  VMA_SUBALLOCATION_TYPE_UNKNOWN,
15938  1, // allocationCount
15939  pAllocation);
15940 
15941 #if VMA_RECORDING_ENABLED
15942  if(allocator->GetRecorder() != VMA_NULL)
15943  {
15944  allocator->GetRecorder()->RecordAllocateMemory(
15945  allocator->GetCurrentFrameIndex(),
15946  *pVkMemoryRequirements,
15947  *pCreateInfo,
15948  *pAllocation);
15949  }
15950 #endif
15951 
15952  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
15953  {
15954  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
15955  }
15956 
15957  return result;
15958 }
15959 
15960 VkResult vmaAllocateMemoryPages(
15961  VmaAllocator allocator,
15962  const VkMemoryRequirements* pVkMemoryRequirements,
15963  const VmaAllocationCreateInfo* pCreateInfo,
15964  size_t allocationCount,
15965  VmaAllocation* pAllocations,
15966  VmaAllocationInfo* pAllocationInfo)
15967 {
15968  if(allocationCount == 0)
15969  {
15970  return VK_SUCCESS;
15971  }
15972 
15973  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
15974 
15975  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
15976 
15977  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15978 
15979  VkResult result = allocator->AllocateMemory(
15980  *pVkMemoryRequirements,
15981  false, // requiresDedicatedAllocation
15982  false, // prefersDedicatedAllocation
15983  VK_NULL_HANDLE, // dedicatedBuffer
15984  VK_NULL_HANDLE, // dedicatedImage
15985  *pCreateInfo,
15986  VMA_SUBALLOCATION_TYPE_UNKNOWN,
15987  allocationCount,
15988  pAllocations);
15989 
15990 #if VMA_RECORDING_ENABLED
15991  if(allocator->GetRecorder() != VMA_NULL)
15992  {
15993  allocator->GetRecorder()->RecordAllocateMemoryPages(
15994  allocator->GetCurrentFrameIndex(),
15995  *pVkMemoryRequirements,
15996  *pCreateInfo,
15997  (uint64_t)allocationCount,
15998  pAllocations);
15999  }
16000 #endif
16001 
16002  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
16003  {
16004  for(size_t i = 0; i < allocationCount; ++i)
16005  {
16006  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
16007  }
16008  }
16009 
16010  return result;
16011 }
16012 
16014  VmaAllocator allocator,
16015  VkBuffer buffer,
16016  const VmaAllocationCreateInfo* pCreateInfo,
16017  VmaAllocation* pAllocation,
16018  VmaAllocationInfo* pAllocationInfo)
16019 {
16020  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16021 
16022  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
16023 
16024  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16025 
16026  VkMemoryRequirements vkMemReq = {};
16027  bool requiresDedicatedAllocation = false;
16028  bool prefersDedicatedAllocation = false;
16029  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
16030  requiresDedicatedAllocation,
16031  prefersDedicatedAllocation);
16032 
16033  VkResult result = allocator->AllocateMemory(
16034  vkMemReq,
16035  requiresDedicatedAllocation,
16036  prefersDedicatedAllocation,
16037  buffer, // dedicatedBuffer
16038  VK_NULL_HANDLE, // dedicatedImage
16039  *pCreateInfo,
16040  VMA_SUBALLOCATION_TYPE_BUFFER,
16041  1, // allocationCount
16042  pAllocation);
16043 
16044 #if VMA_RECORDING_ENABLED
16045  if(allocator->GetRecorder() != VMA_NULL)
16046  {
16047  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
16048  allocator->GetCurrentFrameIndex(),
16049  vkMemReq,
16050  requiresDedicatedAllocation,
16051  prefersDedicatedAllocation,
16052  *pCreateInfo,
16053  *pAllocation);
16054  }
16055 #endif
16056 
16057  if(pAllocationInfo && result == VK_SUCCESS)
16058  {
16059  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16060  }
16061 
16062  return result;
16063 }
16064 
16065 VkResult vmaAllocateMemoryForImage(
16066  VmaAllocator allocator,
16067  VkImage image,
16068  const VmaAllocationCreateInfo* pCreateInfo,
16069  VmaAllocation* pAllocation,
16070  VmaAllocationInfo* pAllocationInfo)
16071 {
16072  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16073 
16074  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
16075 
16076  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16077 
16078  VkMemoryRequirements vkMemReq = {};
16079  bool requiresDedicatedAllocation = false;
16080  bool prefersDedicatedAllocation = false;
16081  allocator->GetImageMemoryRequirements(image, vkMemReq,
16082  requiresDedicatedAllocation, prefersDedicatedAllocation);
16083 
16084  VkResult result = allocator->AllocateMemory(
16085  vkMemReq,
16086  requiresDedicatedAllocation,
16087  prefersDedicatedAllocation,
16088  VK_NULL_HANDLE, // dedicatedBuffer
16089  image, // dedicatedImage
16090  *pCreateInfo,
16091  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
16092  1, // allocationCount
16093  pAllocation);
16094 
16095 #if VMA_RECORDING_ENABLED
16096  if(allocator->GetRecorder() != VMA_NULL)
16097  {
16098  allocator->GetRecorder()->RecordAllocateMemoryForImage(
16099  allocator->GetCurrentFrameIndex(),
16100  vkMemReq,
16101  requiresDedicatedAllocation,
16102  prefersDedicatedAllocation,
16103  *pCreateInfo,
16104  *pAllocation);
16105  }
16106 #endif
16107 
16108  if(pAllocationInfo && result == VK_SUCCESS)
16109  {
16110  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16111  }
16112 
16113  return result;
16114 }
16115 
16116 void vmaFreeMemory(
16117  VmaAllocator allocator,
16118  VmaAllocation allocation)
16119 {
16120  VMA_ASSERT(allocator);
16121 
16122  if(allocation == VK_NULL_HANDLE)
16123  {
16124  return;
16125  }
16126 
16127  VMA_DEBUG_LOG("vmaFreeMemory");
16128 
16129  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16130 
16131 #if VMA_RECORDING_ENABLED
16132  if(allocator->GetRecorder() != VMA_NULL)
16133  {
16134  allocator->GetRecorder()->RecordFreeMemory(
16135  allocator->GetCurrentFrameIndex(),
16136  allocation);
16137  }
16138 #endif
16139 
16140  allocator->FreeMemory(
16141  1, // allocationCount
16142  &allocation);
16143 }
16144 
16145 void vmaFreeMemoryPages(
16146  VmaAllocator allocator,
16147  size_t allocationCount,
16148  VmaAllocation* pAllocations)
16149 {
16150  if(allocationCount == 0)
16151  {
16152  return;
16153  }
16154 
16155  VMA_ASSERT(allocator);
16156 
16157  VMA_DEBUG_LOG("vmaFreeMemoryPages");
16158 
16159  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16160 
16161 #if VMA_RECORDING_ENABLED
16162  if(allocator->GetRecorder() != VMA_NULL)
16163  {
16164  allocator->GetRecorder()->RecordFreeMemoryPages(
16165  allocator->GetCurrentFrameIndex(),
16166  (uint64_t)allocationCount,
16167  pAllocations);
16168  }
16169 #endif
16170 
16171  allocator->FreeMemory(allocationCount, pAllocations);
16172 }
16173 
16174 VkResult vmaResizeAllocation(
16175  VmaAllocator allocator,
16176  VmaAllocation allocation,
16177  VkDeviceSize newSize)
16178 {
16179  VMA_ASSERT(allocator && allocation);
16180 
16181  VMA_DEBUG_LOG("vmaResizeAllocation");
16182 
16183  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16184 
16185 #if VMA_RECORDING_ENABLED
16186  if(allocator->GetRecorder() != VMA_NULL)
16187  {
16188  allocator->GetRecorder()->RecordResizeAllocation(
16189  allocator->GetCurrentFrameIndex(),
16190  allocation,
16191  newSize);
16192  }
16193 #endif
16194 
16195  return allocator->ResizeAllocation(allocation, newSize);
16196 }
16197 
16199  VmaAllocator allocator,
16200  VmaAllocation allocation,
16201  VmaAllocationInfo* pAllocationInfo)
16202 {
16203  VMA_ASSERT(allocator && allocation && pAllocationInfo);
16204 
16205  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16206 
16207 #if VMA_RECORDING_ENABLED
16208  if(allocator->GetRecorder() != VMA_NULL)
16209  {
16210  allocator->GetRecorder()->RecordGetAllocationInfo(
16211  allocator->GetCurrentFrameIndex(),
16212  allocation);
16213  }
16214 #endif
16215 
16216  allocator->GetAllocationInfo(allocation, pAllocationInfo);
16217 }
16218 
16219 VkBool32 vmaTouchAllocation(
16220  VmaAllocator allocator,
16221  VmaAllocation allocation)
16222 {
16223  VMA_ASSERT(allocator && allocation);
16224 
16225  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16226 
16227 #if VMA_RECORDING_ENABLED
16228  if(allocator->GetRecorder() != VMA_NULL)
16229  {
16230  allocator->GetRecorder()->RecordTouchAllocation(
16231  allocator->GetCurrentFrameIndex(),
16232  allocation);
16233  }
16234 #endif
16235 
16236  return allocator->TouchAllocation(allocation);
16237 }
16238 
16240  VmaAllocator allocator,
16241  VmaAllocation allocation,
16242  void* pUserData)
16243 {
16244  VMA_ASSERT(allocator && allocation);
16245 
16246  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16247 
16248  allocation->SetUserData(allocator, pUserData);
16249 
16250 #if VMA_RECORDING_ENABLED
16251  if(allocator->GetRecorder() != VMA_NULL)
16252  {
16253  allocator->GetRecorder()->RecordSetAllocationUserData(
16254  allocator->GetCurrentFrameIndex(),
16255  allocation,
16256  pUserData);
16257  }
16258 #endif
16259 }
16260 
16262  VmaAllocator allocator,
16263  VmaAllocation* pAllocation)
16264 {
16265  VMA_ASSERT(allocator && pAllocation);
16266 
16267  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
16268 
16269  allocator->CreateLostAllocation(pAllocation);
16270 
16271 #if VMA_RECORDING_ENABLED
16272  if(allocator->GetRecorder() != VMA_NULL)
16273  {
16274  allocator->GetRecorder()->RecordCreateLostAllocation(
16275  allocator->GetCurrentFrameIndex(),
16276  *pAllocation);
16277  }
16278 #endif
16279 }
16280 
16281 VkResult vmaMapMemory(
16282  VmaAllocator allocator,
16283  VmaAllocation allocation,
16284  void** ppData)
16285 {
16286  VMA_ASSERT(allocator && allocation && ppData);
16287 
16288  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16289 
16290  VkResult res = allocator->Map(allocation, ppData);
16291 
16292 #if VMA_RECORDING_ENABLED
16293  if(allocator->GetRecorder() != VMA_NULL)
16294  {
16295  allocator->GetRecorder()->RecordMapMemory(
16296  allocator->GetCurrentFrameIndex(),
16297  allocation);
16298  }
16299 #endif
16300 
16301  return res;
16302 }
16303 
16304 void vmaUnmapMemory(
16305  VmaAllocator allocator,
16306  VmaAllocation allocation)
16307 {
16308  VMA_ASSERT(allocator && allocation);
16309 
16310  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16311 
16312 #if VMA_RECORDING_ENABLED
16313  if(allocator->GetRecorder() != VMA_NULL)
16314  {
16315  allocator->GetRecorder()->RecordUnmapMemory(
16316  allocator->GetCurrentFrameIndex(),
16317  allocation);
16318  }
16319 #endif
16320 
16321  allocator->Unmap(allocation);
16322 }
16323 
16324 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
16325 {
16326  VMA_ASSERT(allocator && allocation);
16327 
16328  VMA_DEBUG_LOG("vmaFlushAllocation");
16329 
16330  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16331 
16332  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
16333 
16334 #if VMA_RECORDING_ENABLED
16335  if(allocator->GetRecorder() != VMA_NULL)
16336  {
16337  allocator->GetRecorder()->RecordFlushAllocation(
16338  allocator->GetCurrentFrameIndex(),
16339  allocation, offset, size);
16340  }
16341 #endif
16342 }
16343 
16344 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
16345 {
16346  VMA_ASSERT(allocator && allocation);
16347 
16348  VMA_DEBUG_LOG("vmaInvalidateAllocation");
16349 
16350  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16351 
16352  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
16353 
16354 #if VMA_RECORDING_ENABLED
16355  if(allocator->GetRecorder() != VMA_NULL)
16356  {
16357  allocator->GetRecorder()->RecordInvalidateAllocation(
16358  allocator->GetCurrentFrameIndex(),
16359  allocation, offset, size);
16360  }
16361 #endif
16362 }
16363 
16364 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
16365 {
16366  VMA_ASSERT(allocator);
16367 
16368  VMA_DEBUG_LOG("vmaCheckCorruption");
16369 
16370  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16371 
16372  return allocator->CheckCorruption(memoryTypeBits);
16373 }
16374 
16375 VkResult vmaDefragment(
16376  VmaAllocator allocator,
16377  VmaAllocation* pAllocations,
16378  size_t allocationCount,
16379  VkBool32* pAllocationsChanged,
16380  const VmaDefragmentationInfo *pDefragmentationInfo,
16381  VmaDefragmentationStats* pDefragmentationStats)
16382 {
16383  // Deprecated interface, reimplemented using new one.
16384 
16385  VmaDefragmentationInfo2 info2 = {};
16386  info2.allocationCount = (uint32_t)allocationCount;
16387  info2.pAllocations = pAllocations;
16388  info2.pAllocationsChanged = pAllocationsChanged;
16389  if(pDefragmentationInfo != VMA_NULL)
16390  {
16391  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
16392  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
16393  }
16394  else
16395  {
16396  info2.maxCpuAllocationsToMove = UINT32_MAX;
16397  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
16398  }
16399  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
16400 
16402  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
16403  if(res == VK_NOT_READY)
16404  {
16405  res = vmaDefragmentationEnd( allocator, ctx);
16406  }
16407  return res;
16408 }
16409 
16410 VkResult vmaDefragmentationBegin(
16411  VmaAllocator allocator,
16412  const VmaDefragmentationInfo2* pInfo,
16413  VmaDefragmentationStats* pStats,
16414  VmaDefragmentationContext *pContext)
16415 {
16416  VMA_ASSERT(allocator && pInfo && pContext);
16417 
16418  // Degenerate case: Nothing to defragment.
16419  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
16420  {
16421  return VK_SUCCESS;
16422  }
16423 
16424  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
16425  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
16426  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
16427  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
16428 
16429  VMA_DEBUG_LOG("vmaDefragmentationBegin");
16430 
16431  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16432 
16433  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
16434 
16435 #if VMA_RECORDING_ENABLED
16436  if(allocator->GetRecorder() != VMA_NULL)
16437  {
16438  allocator->GetRecorder()->RecordDefragmentationBegin(
16439  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
16440  }
16441 #endif
16442 
16443  return res;
16444 }
16445 
16446 VkResult vmaDefragmentationEnd(
16447  VmaAllocator allocator,
16448  VmaDefragmentationContext context)
16449 {
16450  VMA_ASSERT(allocator);
16451 
16452  VMA_DEBUG_LOG("vmaDefragmentationEnd");
16453 
16454  if(context != VK_NULL_HANDLE)
16455  {
16456  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16457 
16458 #if VMA_RECORDING_ENABLED
16459  if(allocator->GetRecorder() != VMA_NULL)
16460  {
16461  allocator->GetRecorder()->RecordDefragmentationEnd(
16462  allocator->GetCurrentFrameIndex(), context);
16463  }
16464 #endif
16465 
16466  return allocator->DefragmentationEnd(context);
16467  }
16468  else
16469  {
16470  return VK_SUCCESS;
16471  }
16472 }
16473 
16474 VkResult vmaBindBufferMemory(
16475  VmaAllocator allocator,
16476  VmaAllocation allocation,
16477  VkBuffer buffer)
16478 {
16479  VMA_ASSERT(allocator && allocation && buffer);
16480 
16481  VMA_DEBUG_LOG("vmaBindBufferMemory");
16482 
16483  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16484 
16485  return allocator->BindBufferMemory(allocation, buffer);
16486 }
16487 
16488 VkResult vmaBindImageMemory(
16489  VmaAllocator allocator,
16490  VmaAllocation allocation,
16491  VkImage image)
16492 {
16493  VMA_ASSERT(allocator && allocation && image);
16494 
16495  VMA_DEBUG_LOG("vmaBindImageMemory");
16496 
16497  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16498 
16499  return allocator->BindImageMemory(allocation, image);
16500 }
16501 
16502 VkResult vmaCreateBuffer(
16503  VmaAllocator allocator,
16504  const VkBufferCreateInfo* pBufferCreateInfo,
16505  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16506  VkBuffer* pBuffer,
16507  VmaAllocation* pAllocation,
16508  VmaAllocationInfo* pAllocationInfo)
16509 {
16510  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
16511 
16512  if(pBufferCreateInfo->size == 0)
16513  {
16514  return VK_ERROR_VALIDATION_FAILED_EXT;
16515  }
16516 
16517  VMA_DEBUG_LOG("vmaCreateBuffer");
16518 
16519  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16520 
16521  *pBuffer = VK_NULL_HANDLE;
16522  *pAllocation = VK_NULL_HANDLE;
16523 
16524  // 1. Create VkBuffer.
16525  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
16526  allocator->m_hDevice,
16527  pBufferCreateInfo,
16528  allocator->GetAllocationCallbacks(),
16529  pBuffer);
16530  if(res >= 0)
16531  {
16532  // 2. vkGetBufferMemoryRequirements.
16533  VkMemoryRequirements vkMemReq = {};
16534  bool requiresDedicatedAllocation = false;
16535  bool prefersDedicatedAllocation = false;
16536  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
16537  requiresDedicatedAllocation, prefersDedicatedAllocation);
16538 
16539  // Make sure alignment requirements for specific buffer usages reported
16540  // in Physical Device Properties are included in alignment reported by memory requirements.
16541  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
16542  {
16543  VMA_ASSERT(vkMemReq.alignment %
16544  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
16545  }
16546  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
16547  {
16548  VMA_ASSERT(vkMemReq.alignment %
16549  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
16550  }
16551  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
16552  {
16553  VMA_ASSERT(vkMemReq.alignment %
16554  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
16555  }
16556 
16557  // 3. Allocate memory using allocator.
16558  res = allocator->AllocateMemory(
16559  vkMemReq,
16560  requiresDedicatedAllocation,
16561  prefersDedicatedAllocation,
16562  *pBuffer, // dedicatedBuffer
16563  VK_NULL_HANDLE, // dedicatedImage
16564  *pAllocationCreateInfo,
16565  VMA_SUBALLOCATION_TYPE_BUFFER,
16566  1, // allocationCount
16567  pAllocation);
16568 
16569 #if VMA_RECORDING_ENABLED
16570  if(allocator->GetRecorder() != VMA_NULL)
16571  {
16572  allocator->GetRecorder()->RecordCreateBuffer(
16573  allocator->GetCurrentFrameIndex(),
16574  *pBufferCreateInfo,
16575  *pAllocationCreateInfo,
16576  *pAllocation);
16577  }
16578 #endif
16579 
16580  if(res >= 0)
16581  {
16582  // 3. Bind buffer with memory.
16583  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
16584  if(res >= 0)
16585  {
16586  // All steps succeeded.
16587  #if VMA_STATS_STRING_ENABLED
16588  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
16589  #endif
16590  if(pAllocationInfo != VMA_NULL)
16591  {
16592  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16593  }
16594 
16595  return VK_SUCCESS;
16596  }
16597  allocator->FreeMemory(
16598  1, // allocationCount
16599  pAllocation);
16600  *pAllocation = VK_NULL_HANDLE;
16601  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16602  *pBuffer = VK_NULL_HANDLE;
16603  return res;
16604  }
16605  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16606  *pBuffer = VK_NULL_HANDLE;
16607  return res;
16608  }
16609  return res;
16610 }
16611 
16612 void vmaDestroyBuffer(
16613  VmaAllocator allocator,
16614  VkBuffer buffer,
16615  VmaAllocation allocation)
16616 {
16617  VMA_ASSERT(allocator);
16618 
16619  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
16620  {
16621  return;
16622  }
16623 
16624  VMA_DEBUG_LOG("vmaDestroyBuffer");
16625 
16626  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16627 
16628 #if VMA_RECORDING_ENABLED
16629  if(allocator->GetRecorder() != VMA_NULL)
16630  {
16631  allocator->GetRecorder()->RecordDestroyBuffer(
16632  allocator->GetCurrentFrameIndex(),
16633  allocation);
16634  }
16635 #endif
16636 
16637  if(buffer != VK_NULL_HANDLE)
16638  {
16639  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
16640  }
16641 
16642  if(allocation != VK_NULL_HANDLE)
16643  {
16644  allocator->FreeMemory(
16645  1, // allocationCount
16646  &allocation);
16647  }
16648 }
16649 
16650 VkResult vmaCreateImage(
16651  VmaAllocator allocator,
16652  const VkImageCreateInfo* pImageCreateInfo,
16653  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16654  VkImage* pImage,
16655  VmaAllocation* pAllocation,
16656  VmaAllocationInfo* pAllocationInfo)
16657 {
16658  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
16659 
16660  if(pImageCreateInfo->extent.width == 0 ||
16661  pImageCreateInfo->extent.height == 0 ||
16662  pImageCreateInfo->extent.depth == 0 ||
16663  pImageCreateInfo->mipLevels == 0 ||
16664  pImageCreateInfo->arrayLayers == 0)
16665  {
16666  return VK_ERROR_VALIDATION_FAILED_EXT;
16667  }
16668 
16669  VMA_DEBUG_LOG("vmaCreateImage");
16670 
16671  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16672 
16673  *pImage = VK_NULL_HANDLE;
16674  *pAllocation = VK_NULL_HANDLE;
16675 
16676  // 1. Create VkImage.
16677  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
16678  allocator->m_hDevice,
16679  pImageCreateInfo,
16680  allocator->GetAllocationCallbacks(),
16681  pImage);
16682  if(res >= 0)
16683  {
16684  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
16685  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
16686  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
16687 
16688  // 2. Allocate memory using allocator.
16689  VkMemoryRequirements vkMemReq = {};
16690  bool requiresDedicatedAllocation = false;
16691  bool prefersDedicatedAllocation = false;
16692  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
16693  requiresDedicatedAllocation, prefersDedicatedAllocation);
16694 
16695  res = allocator->AllocateMemory(
16696  vkMemReq,
16697  requiresDedicatedAllocation,
16698  prefersDedicatedAllocation,
16699  VK_NULL_HANDLE, // dedicatedBuffer
16700  *pImage, // dedicatedImage
16701  *pAllocationCreateInfo,
16702  suballocType,
16703  1, // allocationCount
16704  pAllocation);
16705 
16706 #if VMA_RECORDING_ENABLED
16707  if(allocator->GetRecorder() != VMA_NULL)
16708  {
16709  allocator->GetRecorder()->RecordCreateImage(
16710  allocator->GetCurrentFrameIndex(),
16711  *pImageCreateInfo,
16712  *pAllocationCreateInfo,
16713  *pAllocation);
16714  }
16715 #endif
16716 
16717  if(res >= 0)
16718  {
16719  // 3. Bind image with memory.
16720  res = allocator->BindImageMemory(*pAllocation, *pImage);
16721  if(res >= 0)
16722  {
16723  // All steps succeeded.
16724  #if VMA_STATS_STRING_ENABLED
16725  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
16726  #endif
16727  if(pAllocationInfo != VMA_NULL)
16728  {
16729  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16730  }
16731 
16732  return VK_SUCCESS;
16733  }
16734  allocator->FreeMemory(
16735  1, // allocationCount
16736  pAllocation);
16737  *pAllocation = VK_NULL_HANDLE;
16738  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
16739  *pImage = VK_NULL_HANDLE;
16740  return res;
16741  }
16742  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
16743  *pImage = VK_NULL_HANDLE;
16744  return res;
16745  }
16746  return res;
16747 }
16748 
16749 void vmaDestroyImage(
16750  VmaAllocator allocator,
16751  VkImage image,
16752  VmaAllocation allocation)
16753 {
16754  VMA_ASSERT(allocator);
16755 
16756  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
16757  {
16758  return;
16759  }
16760 
16761  VMA_DEBUG_LOG("vmaDestroyImage");
16762 
16763  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16764 
16765 #if VMA_RECORDING_ENABLED
16766  if(allocator->GetRecorder() != VMA_NULL)
16767  {
16768  allocator->GetRecorder()->RecordDestroyImage(
16769  allocator->GetCurrentFrameIndex(),
16770  allocation);
16771  }
16772 #endif
16773 
16774  if(image != VK_NULL_HANDLE)
16775  {
16776  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
16777  }
16778  if(allocation != VK_NULL_HANDLE)
16779  {
16780  allocator->FreeMemory(
16781  1, // allocationCount
16782  &allocation);
16783  }
16784 }
16785 
16786 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1744
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:2042
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1802
diff --git a/src/Common.cpp b/src/Common.cpp index ea7b9a1..790cdda 100644 --- a/src/Common.cpp +++ b/src/Common.cpp @@ -1,3 +1,25 @@ +// +// Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. +// + #include "Common.h" #ifdef _WIN32 diff --git a/src/Common.h b/src/Common.h index 714fa6a..f5954c5 100644 --- a/src/Common.h +++ b/src/Common.h @@ -1,3 +1,25 @@ +// +// Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. +// + #ifndef COMMON_H_ #define COMMON_H_ diff --git a/src/Shaders/Shader.frag b/src/Shaders/Shader.frag index 6f1f9d3..4060483 100644 --- a/src/Shaders/Shader.frag +++ b/src/Shaders/Shader.frag @@ -1,5 +1,5 @@ // -// Copyright (c) 2017 Advanced Micro Devices, Inc. All rights reserved. +// Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/src/Shaders/Shader.vert b/src/Shaders/Shader.vert index c6e6cab..06ff262 100644 --- a/src/Shaders/Shader.vert +++ b/src/Shaders/Shader.vert @@ -1,5 +1,5 @@ // -// Copyright (c) 2017 Advanced Micro Devices, Inc. All rights reserved. +// Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/src/Shaders/SparseBindingTest.comp b/src/Shaders/SparseBindingTest.comp index 21c41ab..7c8889e 100644 --- a/src/Shaders/SparseBindingTest.comp +++ b/src/Shaders/SparseBindingTest.comp @@ -1,5 +1,5 @@ // -// Copyright (c) 2018 Advanced Micro Devices, Inc. All rights reserved. +// Copyright (c) 2018-2019 Advanced Micro Devices, Inc. All rights reserved. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/src/SparseBindingTest.cpp b/src/SparseBindingTest.cpp index 3722523..8fbd5e9 100644 --- a/src/SparseBindingTest.cpp +++ b/src/SparseBindingTest.cpp @@ -1,3 +1,25 @@ +// +// Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. +// + #include "Common.h" #include "SparseBindingTest.h" diff --git a/src/SparseBindingTest.h b/src/SparseBindingTest.h index 8637c9c..69b95d6 100644 --- a/src/SparseBindingTest.h +++ b/src/SparseBindingTest.h @@ -1,3 +1,25 @@ +// +// Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. +// + #pragma once #ifdef _WIN32 diff --git a/src/Tests.cpp b/src/Tests.cpp index 058fedb..6ad99fb 100644 --- a/src/Tests.cpp +++ b/src/Tests.cpp @@ -1,3 +1,25 @@ +// +// Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. +// + #include "Tests.h" #include "VmaUsage.h" #include "Common.h" diff --git a/src/Tests.h b/src/Tests.h index 9da4f6f..df79318 100644 --- a/src/Tests.h +++ b/src/Tests.h @@ -1,3 +1,25 @@ +// +// Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. +// + #ifndef TESTS_H_ #define TESTS_H_ diff --git a/src/VmaReplay/Common.cpp b/src/VmaReplay/Common.cpp index a7c723f..104d390 100644 --- a/src/VmaReplay/Common.cpp +++ b/src/VmaReplay/Common.cpp @@ -1,3 +1,25 @@ +// +// Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. +// + #include "Common.h" bool StrRangeToPtrList(const StrRange& s, std::vector& out) diff --git a/src/VmaReplay/Common.h b/src/VmaReplay/Common.h index d0548bf..fa2dfe9 100644 --- a/src/VmaReplay/Common.h +++ b/src/VmaReplay/Common.h @@ -1,3 +1,25 @@ +// +// Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. +// + #pragma once #include "VmaUsage.h" diff --git a/src/VmaReplay/Constants.cpp b/src/VmaReplay/Constants.cpp index 9f19d13..e353f49 100644 --- a/src/VmaReplay/Constants.cpp +++ b/src/VmaReplay/Constants.cpp @@ -1,3 +1,25 @@ +// +// Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. +// + #include "Common.h" #include "Constants.h" diff --git a/src/VmaReplay/Constants.h b/src/VmaReplay/Constants.h index 2c58f68..e479168 100644 --- a/src/VmaReplay/Constants.h +++ b/src/VmaReplay/Constants.h @@ -1,3 +1,25 @@ +// +// Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. +// + #pragma once extern const int RESULT_EXCEPTION; diff --git a/src/VmaReplay/VmaReplay.cpp b/src/VmaReplay/VmaReplay.cpp index 4228009..31a53ed 100644 --- a/src/VmaReplay/VmaReplay.cpp +++ b/src/VmaReplay/VmaReplay.cpp @@ -1,5 +1,5 @@ // -// Copyright (c) 2018 Advanced Micro Devices, Inc. All rights reserved. +// Copyright (c) 2018-2019 Advanced Micro Devices, Inc. All rights reserved. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/src/VmaReplay/VmaUsage.cpp b/src/VmaReplay/VmaUsage.cpp index d2d035b..ee781cc 100644 --- a/src/VmaReplay/VmaUsage.cpp +++ b/src/VmaReplay/VmaUsage.cpp @@ -1,2 +1,24 @@ +// +// Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. +// + #define VMA_IMPLEMENTATION #include "VmaUsage.h" diff --git a/src/VmaReplay/VmaUsage.h b/src/VmaReplay/VmaUsage.h index 31c2b59..ec00a29 100644 --- a/src/VmaReplay/VmaUsage.h +++ b/src/VmaReplay/VmaUsage.h @@ -1,3 +1,25 @@ +// +// Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. +// + #pragma once #define NOMINMAX diff --git a/src/VmaUsage.cpp b/src/VmaUsage.cpp index 2ebf69c..6dc7d7b 100644 --- a/src/VmaUsage.cpp +++ b/src/VmaUsage.cpp @@ -1,3 +1,25 @@ +// +// Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. +// + /* In exactly one CPP file define macro VMA_IMPLEMENTATION and then include vk_mem_alloc.h to include definitions of its internal implementation diff --git a/src/VmaUsage.h b/src/VmaUsage.h index f785ba2..b8761ad 100644 --- a/src/VmaUsage.h +++ b/src/VmaUsage.h @@ -1,3 +1,25 @@ +// +// Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. +// + #ifndef VMA_USAGE_H_ #define VMA_USAGE_H_ diff --git a/src/VulkanSample.cpp b/src/VulkanSample.cpp index f4b0f27..7884048 100644 --- a/src/VulkanSample.cpp +++ b/src/VulkanSample.cpp @@ -1,5 +1,5 @@ // -// Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved. +// Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/src/vk_mem_alloc.h b/src/vk_mem_alloc.h index 4a621f6..c49b8d4 100644 --- a/src/vk_mem_alloc.h +++ b/src/vk_mem_alloc.h @@ -1,5 +1,5 @@ // -// Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved. +// Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal