From bdb89a93eb746c51de359517417c62be18c94742 Mon Sep 17 00:00:00 2001 From: Adam Sawicki Date: Thu, 13 Dec 2018 11:56:30 +0100 Subject: [PATCH] Final fixes before v2.2.0 release. Minor fixes after tests. Rebuilt binaries and documentation again. --- bin/VmaReplay_Release_vs2017.exe | Bin 214016 -> 214016 bytes bin/VulkanSample_Release_vs2017.exe | Bin 291328 -> 291328 bytes docs/html/vk__mem__alloc_8h.html | 10 +++++----- docs/html/vk__mem__alloc_8h_source.html | 2 +- src/Tests.cpp | 5 +++++ src/vk_mem_alloc.h | 9 +++------ 6 files changed, 14 insertions(+), 12 deletions(-) diff --git a/bin/VmaReplay_Release_vs2017.exe b/bin/VmaReplay_Release_vs2017.exe index e05eca01114977298049020b4833c1a25d635d30..04f6b77adda85e731f4d40586888fffdf1439c2b 100644 GIT binary patch delta 106 zcmZqZ;BDyO-5|iow9%?rh_PLWk#V~aBU92@tBqDdF$_S!1jHbQ%Tp!>m#ZKi0|QKs l2gnuy;srpt1#r3P>&`Jr0nOWf>Ks!a8>9U8$g51t^8o6d8M^=g delta 106 zcmZqZ;BDyO-5|iov_Q96h_PLWk#V~aBU92@s|C73F$_S!1jHbQ%Tp!>m#ZKi0|QKs l2gnuy@quy+;BwQ~onw*$nz#MbIi@}~M!D^gSDBXQ0RYQy86*Gz diff --git a/bin/VulkanSample_Release_vs2017.exe b/bin/VulkanSample_Release_vs2017.exe index 99c7d7431e588069eca1291587e518ff6d0eaff4..f5df2b4c2ece0c6084cb415211f3b16d2c82ff54 100644 GIT binary patch delta 52421 zcmZsk2V4}#+sAL_Ji&4tsHcb`MN||EiYS68D9E9p*h}mcdjn%X6jAX!OI96|sL{lh zVu@%_W5M1_G{)XbR*gOOa_?_uc9Hx)uOIhz=b7(3^Gu(eon12ScxBx2+UQ9F)<%}- zYS8{YH3S-pQ4founbBYfB|6auXXFeT8+H`JWG~6#<(P%i1 ze>SIi8+U&Q%y*Y<3im&s-Q&3X%4hdQ?miAT!iLA;IxnYvp&^7u`R431btJ}H<&4VB z$kIxfn~hA#ndtVW5IL-*7r#!_oc88!l+06dEQdU&mM|I|`)rQn1Y6i~wN#0mS)N`- zN-WCKk{?JP<(pCww3s_W~N?FA3D3jx_#mBuw9 z$T($ejX=^t`L;$N4J)MVt`QLAS%@3#w5HYJrI8erqIP-gazRk>g)Vrc6sj3Wjw)5~ zyGCh_-)YKF{Pt4j)eNMKj5(WY+6i>m4P8iU5an3t4bocqrFKuxr%#0qH&C0n4O5iz zbt1_(O2;~Z-f_!m$JK#A1&chZq)H3&%O$4K$|vIqQW9bwvrasl*T_% zwuL!Mj(^OxU9dTA?qO%*oJ~5Z%W^u`&7id6Kg#8LJ4ioeas4cM{ef~LvW!wMVyMyg z0cA^*Nm*rgga>({oQ$YOKfO_gMOIXN8;qdC?+b8sgOTJ%#XGVxnW{97tU+ceLnBAh z3Tn=c$jOBCR(dq7NE_YDnbxp8L1kDG{gPI@qbzQOg1@8eZsb$a_XRa1I!fCdr;;7b zL@ny5{L{!r_A23xm(wb@bIv!eO~}feBC(@s>7Jg%V9Pw@Y0I7%l=`p~&+ne-zyjBb~JS<{!v~gGeRqn@kBOjE6W{1fm#WP_@ z$&AZXGyhO+aj@a4?lv0H1l@wYLmguK#^Ti_O%Ej?{Z$Z7*dGbSNf*(3QY*lx28CvND4_m zm6V-sw>f(FqxR0V8H*)3n!4MZt-X1ZYW0uuGUYYtsXS_(Qu$Pv4BFORT~J>Hb%~(P zw%*B({lIOGW9p5ZK5eo|asN8`>|Nze+YaPkrB%DBG~}dmwp}>+Gp9&L!Qo4o4rJDTzY~>o{B~9r_X?!-PAfZlH6)*ux4o_qru@~r5m6OupJX(_ zK7AU|RwtDo`UKGGCzXRZ6*;Ng?-M}orPqL$wAps0Q))PR-Z`m(m12${ zDs&@tUA$iNeq~mU=vUfq)%z9SBg(nd0P;?Gof=5aD?S4wsry}}!@zpfa7URlusnIL z{5&v?TvhxB#gcE8-h&z?r5wsn03Uje+p@a`xpzcL{B48ZjYTSK#<#YpQ)%87KVF4V zw#U@e_lP%h-&kAJ_4Fq;N7tZGTjqZM9{ZGE1~o5pPlYvM`)%1-F}#;@nyu(DRe!}i zcszadR>>ZmPIf7-!C!dzyoG$-Ct}FTv&^(8sYAYMH~tMoX5<PAad4jgA@p*5%6Fxe*!- zmg$?^VOQqP<YCGk};mKZ;Um~(U ztXgEA>LSH$L=vg0v>TB@yp?k!CI<%m3nHx)ClVbnr-3)GKZlY-4kk)-<|yeS=aj$r zs(>};3m3HJh~hV@64|Q6kE-qU={dBuMxODWuA`?~TA4Ab1^G!iJE|YqtTY&Xw_=B9 zAb#}9Cx*c|o3rECB%TH6I!>v3b7IrB5qj&n@*&-us!x>CV^X|4o*-B+OMbA*%9mrh z`2YL>mEurbP^y6twAEzy_^>>iQ;fz*L80nw<=L2$_7RT}bf1R|+J3n@leZA3ljkMy zSZ4}B(|1>YeWs_7Sh8mGYLuVfli}Kq1BL?he!=^Q_MS3t&;ec$>ZcTwu$&woHj7Nlb`JX_!H3gUoJF zyGpK70q)UO8S4~bTvAYkT3-wLyrsNY0vA0Q>l@FzsLgYz{DtZPvYy zbo(zzXoY}yf(C1vI_4*F%eO8QpFK`iT4mTsxUx8-6+L$==YED83HG4~wn~YqQ!(l;oVu{)nFdBf|9OdyKhbjDsD+Li4D%dkQG#9m zK?$DAaJgL8;eSA%26mXGnAB zJ=Y5`E$)4(rg(6Q=lu6SQ zD!8A}tD+~Lk~_`2fvHU@2K-a9$W zTZD(7vOc>*5${t%`JQ`SuYJ3k_ln!w~MW)=|EgRkoDpNuCix@bUz|G+(=< z%$!w;bj(>lE1kF{UgS|_D%EDk5ULEGUC(dd1yuJA&*eDR5vHma8}fj1WLu@=&Gx4s zE-JTXPar*%?#@JFRn|INmOaRadv)UiJ{dlr=s2PFJ1Oj2rda0GkF9y?KjwwHtaXpd z$wnyV8ENG)`;)z-o-2{aunX0y$EzIPT~b6MXY!FHKzQ4qiP^b~=$=DFw1FdjBIJib8<+bi=f%yC%M_;6+B{CL#2lk;nneqVi6t7$k0 zbw7p%%tf-^f=qIn5+SoW&Z;;6=0jZkTzW z4cUlcO=|f~B6g10h2BJsapp6MWV+_7sjMMB`N#-~G{{Gc8uD5+5pHj?L`vr)H@E4&C4ix>C+C`mhsjFx&i3&P#{~Kw< zQXQx(fsfgmm4`K82k(Bl*(D`#MF-Ysz%gCJ^JyZTze_;zX3^eX?tnHiBhN!`rmp<} z!r}Y&gff)XA(NEltY4WPJ216&yb%q{W$gtQ?=Mo>y7AOkX|TAIxyMf45YpTohh5gT zN|(ju!pjh7b+mL>8?AA(xA&~eX}H-z=QBWtEW$$?m7f-`@h^EC z1$R&t9b(owjQ@P)2D3BSA-+7OjQqN`-`-<@(luz+|3WL3Jzrm>M~~(#`6is$ABvvE zd6-ubKMZ8(Ce&#>2c>A^LYd2jE{RNxP~T%P0<&K*$r0*Y4Japp6!nq>evm+lI!pt4 z^S;oToT_e2n~$2Q#Z&*BX2?m2Fs0s#mSt?45Sys4sfEPA?n2+GPF22L z@ja=cH2VH7iOmW6!JUwZoEj@v6F#|Ib)4?srF_47B59`htyzsFfP-r$l0c>D+5~!O zhcbKZdQwe^`mr}X^tQ#1O1WPX%09@EL2luUATMzGpCCW{+6Be)=~tU)VU7L7^TA=V zC>=H>(Y>3Lm79i@jf6}@E&ZC}^gnvq=5(RCzbYGYej!DauA8fQ9o#5`?7|u9PHj}! z<{x-=Z1KU$Zig+=xCdRZrLq0Uue>mvJ^2j9x4$T|dxH=}6Rn>hi~f6wxua;*SN#OG zTVg^q0~oKm6C^f|Yda;e#S#;uo|?9a=qwNeZeMW6vsQ8=B?p;7ZJ?=ZFS$G}n37|?wrJg zXsL#^-Yu{Uj@|7mvShu)glLS0&62S$=GZQY&5#%u+0|AW*GF<2ImaZ|Q*uHStYIr9 zc9>%$B-TJ;LPRtyUSc;mmMF0j5)-0J<7AdRw#!7k;as5P9;S*c5uzN8TeCr=)e}9k z`o51y>wbv|(QFO7fnFcw8qBdjCAL&zLex*gyrgk4!0hTe$&Ha5m#K|3uBhZXaoKc< zwUU?+mC>+b(zp>E>mjjVi3!ouu`;c9)(hiiax6k(BvtfIP9Zy@aq%)smUGTaa+d~( zED@sbHSFFlp>8Y39`qJjk|Qx8nxJ9BWvr(;wohWSB_>2l(l&RyO)_J7#atqpev%QQ z8k(vAiK$#wy2KhuOo$3=*t>PYFay`tQetH!CPdf2l-cpSEGuuLp_4(aBDtsiMLHof zs9QBINphiFW{}tsi3!ns4ZF){4^F3zV;6gg?D$>@+1{+o7hglC=+~EH+`|o9HDsK+ zP?@=X4Ay{M+rOg+e^i$LKAkjDs_d9Se_NZgaYqS4zE}3`ROz^R+CK$bfvLwuV%K7PH9fi4uudX71}! z<-uINCTz6un$W%{%4{eWUevttn9n=S*;`OEN-0J5_opwHDnSO928LGTFIF!1UDgzD;;ZuP_2_#V|e|Q1iyhPb_I0Q3>Ylr=<{g*)b zwjEF&^`2LfKM`RYrR0$a+$SU*X=3+SCX;JZ`?e7|QUiDDJY#L@>jAP7JY1?Hu@Who zkNgRQ4|<0MlI*SeNkZ))YMQs2qv=?auj6BD5zRqCB-s(qJWa{8eCCp5evk~1p&HUF zAIX(SMn2M9LmK5H3nbDlAE}}thvHH3gdAVE2s^+XbeuflnHXS*|my!A@87H3=ZIQ)S zCZ4*igK{REa*|@3Gd1tC)A%ZrQu~iG#nK=Xj51A`cq&jy|HBrwMnRc<^9?arIv#mp zvs63z!qVAX4J%BRj%T?0Jls|vxNT0~Fifm%PSb30icVMF{E2fBZ#sSQlAG)EmPUFJT<+-KH+c?LNpwz@Z2a=rD+X`1??z)CAOq;qXYtICAN`XWD zxi$(cbJA9WWm*cx$o`h$rm+vG;f2K>A9^s}AAyqBx~x}`R@CRUaM@heLptVqRXsged3?4$UrGD38A(xw z{uzT=-}*nB*sq8dp6st06GT-!-a=GXf0cEWwb}6%*972JGXeap17kE`kObV-JrbBD z0e7`M0N&{(3Lq(HtoofKQU$>Ub-XpEvL4GMi8NDYoJ*!}CMkcNYvooS9fiyKqf-4Z zpTN@Mdc-t5on4xT*{7&{?SfKK``*vtSU)B8ue$W`9A(*GQC0^;YJ~;zCew;XLcqRM z9{<&o4<6@-^F@O5=O~sM7G22V9md5ubh|_O%`VqLpTw~ti{U9U#!DYdSbCVon@>k;%` zmXdM3Ii7(YyPiYuO;a*%%q8s=&zsR?xYF`w<5CvhRGdyR1LzYevbnml;$}&^Dk_pw zjI0f%hp90yASpW0O6Kq$iHTv~DzR&g1SUkQf$@&%8;OZw|8WBmO|Ha*$gXJ%<8{&L z#Hf$i_#w&7lbo0dbkfun|5|Wjh+i(Tff5s<+8VZQvA`nulyaQJnn+BDifY&=6e(im ztx9bpv2u3F3EAzIG7;w`C#K#ZlKVGWq*aJ^Xjp`dRZP7LN$iBgglM6Ljgy#|QeTY_ zS+ZJUTx3^=Y1{)QtXs^j`%Q9o$q7+{hV_x^#MFDP#5ze#i2OBdh{VLyJ5^$}B_>2~ zlVwsT*(LXe#~LfSqLLG$GvIgwJIK3Ir_+;91brlS8~bH+(a)N?9#UH{$6nMIY26_) zA)2OX8!ItB^R}zUCC9hlKpmH<-86M0CD)0|R!MA_#Du7xh7FR~2#!sbSc1fa$f99J znTDAh>nJh*hAl;`LZ&9kJh?Jeq*YA4LnZe%N?<~?N5ilyi5t9?t1BY0Gf~QiJN3(b zJq$FTf5r!+hkuB2RsX7h%y2}PQCi-8M5^XAyf={2-4k=xsD8x0T6Cnzr_~yDM2@|x z$0LJ#Pl7baBw9Kp#V;hW^DS_5HP+kLKbYOr4LUrJMGntGpb)cC9EE2G*>LrbsqZ5@R@i<)M zgsN`SfCD;ENCGExV6g^N*OPHOROG}jI`E|iWa=9BO282=?#Ga>4jR!#7Kf^?ki;Ne zMYslx)Kz3iAW;WOX~0Y!=plh%9e5EhbE1|GGy-5pfw}3#8IAZ&Cw!%fdv&#(_*DaT z>IwZ&OCgK+~N1V@JmznJoX8kBCbad#*AU7JNg4nMKG}b!YfAk z_;_=~?Y`9D?1-%cSfPI1hv&YvlPD$gilOKul1jN;+prf2&qucj(oK*j`Y89G%&z|= z4fBjT+{GzQryLm$e*%i-)L@{qPXAoO0;t1#6|= zc)*uk=z4tE5ttWtChQ2F*D0HyouW5KDOt}0$a-bX^FVsLr*iChAN%xP)Q}lvNH4Yx zsfETdaJ#IFp`(EjLX!~og3#jG^o^xz7N2S2@vu{ya?S;aRs3SxkQoRU0Wc}M&zrdH zihQ}>-{vT8aaoV{hu?w|F-^u3(o`LC5U&Z+!w zPg0~F^y4}G`CiK0m+@5XrTq1>Ia#6vyo$4TL|oWyaY}7e9lPk#-ikU=Z7$DkIr5LG zBt(s?@rouzZc%cSdx-G@GBL`xl9;!|gy=@3thCkV2<$1+rUpywSyiF!sbMH&+ca*S zROgN?REgw{N=}Hr(y-|g^XJ&5AYt4O5)-1q8rDN%5gf~r*usGtF7bYP$a>gd478kDUzk`cvLr*K1Y zdwHNW3n)aaq$aChh*T`vTgZ`NQ&`_ZWO=czm~Pq}DynA+=Ss3b6Y(oLK}bP;!&;a~ zrxI@`af|taN~_W$P2OyaiS)59^4M<-VA!6uWhUXfOG8oe_K3y;0 zT}MVqBq|>oLBKahEx5hloPi_&(oZ*ZxW{HMm z#E_qxkF z($iQSO>#`c=0>Nv2@5GsdeAdrY<6*S-aOy-`B7^$8(}8pV`2)3N+nRk!T1}5_t+`6 z>}2&Iw!Nb>{{XrEg!UN-R3Fu#krz0t7gv)vxuoyxz-xRXMAI6T6 z!Y=D-=H@{vnHLvOl*Sr(keYr=3y2y(w|KBS0z&NIaarrL=^mto{Wc~S4r5kcdJ(6M zxU7G|iTx&^3k*%WA(TT?d?+86l0#kA7eERON=ty)vPZwk4#O5xPtkb3k&C)@Iv@ip zbt8s(SktLd+vVEsiLO4*u@_^*4FgNDkTG$~U=+VGsy7B&wCBHp#YOo&^yEb>9|`9K z$hxwoqrf)uA>(~9f$jHXMLo$QGBo#%CmBb`7i?4sGMDsWv?NI+BUw^O5=&~aIVDLo z`;asq98Zv=+(pouVF0HI;6`qoWo*eijxs#61| z@J`#AoT6TkzzPYZs6%|UTZ9xzC@j`HdLab|2D%TUm}oknyZ-IWn9+IM4Q%Bb5{O z*T<6!W1i~<6f6@RVDbocX$U`QF1uP%=gL@q`Z|;2wwKchuqj72rRPtt?N}l6k|i4(SQ^27bX8OBb>4 ze941qw})x&Gq^y#^i&DL+O{IRfg6jP>zNO|!T-c^Dv)c%-eU$W!rvO2yTp%V7xvyQ zYy4?-ST*2-lV|pLSPT3-7G0T?B^_AD%A_GF!WL8}H+?qf6|+KBZsvZ2p^)JrdRpsr zHn$4#Cf(TDDkQ$R-}6r{NBcbVbX$(Gw^c}J;bI7=wq&(}Ng!#F+a{Pap|;=}VsaWF zPd@TgQl5(Ablhb%N!KS#Q8A5uEl*G7>ApPOs?M%dBavh^D-%K*MkY4o*}ymBfA{ng zzsFB~sSD~9hR%6SgNdOhj!8I{;`j~6X&g_PGlbM7#n|2uqLhD)*)ehxn`wfO(72$w zqQL4v--%vqWOXF97@Jj{TrX7&peUGkJ++Lz*^^DGL9Q2#&;?ak|C+>~e(J$y)+ASo z24gNEO2CpktQMK&PKvX)^-1|==R1ffXR4FE4Tf!fcm-&R_}1#;0bE=iW%{r>#N%{v z3K!4iVk^YW*%uKcA(T&X)pN%_x#Vj^NJ*&r6bn@cO%=;wpyS)XQECvj-pqGrdgPvo zAiGIq2)Y;KbC5jw%2R3H=eVpE>2jARB2Vwo+aSP;^6Xe7X-Kz}XGNmO*fK4!I4MWu znY;=*&9l%Ys#$Dt6d6XkGq;9h6Zx5KZ%7=(k2Q-XN%Tw=wji2}Mg?+jM266(m2<~5 zB1?!zn@SjBOikn%9jDwpRD~tPkn)AAq90J}vVk$AGx?tFiy zT+;$6S&#t4E7Vl*9ffv9t(Z>N0UK=|6ZhSEX#6BL!eSq+F%muUn|)QJ0ML`H1V zRZP-=+PV>kB`}{gOhn#W+1f<%m{ex0Ihje8GXEB&8+~$&*;^1ifG@_}lQ3?zXQ?>R zmqpo;ButD7v)p9Tmh@)DTH;;SdTd5ZxV~bSTaxi)0PE0-*o(|{6K(j08@tvDdONTI zDI~ySq6w`RGd-~tuuSfv6k;T9%A-$MSn*}cTa$WZPVV{Eq$_dDE`;#v4=l1RX-XQi z$!*DG%Nz1}e!J2@*}4&=Lc<>l2){H?!_2Pyb)h7@2Z}eYVlXrAi4nWIghaG$0HM@O z-dhUw&Mu@hbu0UxJNxAR)D`m__i=BzYu}CB7Tw8Z;m)ze6k>b?t(vEN3L zHh~{_ahL7QWOC*EwMYFiI%C&)D&=6-Gd$FIKh+vv?IOd>vH32QKklp`Zp z`$2kVxlsHnKH=+y1xYbEt8~@ z{>*P08N}y(IMIz>>=k#mW4*G-82ZSQUCbig$TC)cI=b<66aNelCwL4G3D*zYq) zclx^rD>oBi+Op|0;rq~>`(h^XCG<{lR$>+zf>8EZWFXRcpP!nrwhpLj#&$c%MzV^H zm`z;#iKK#yX9mKp~1hhS>KcCh|%i@vVz9^$~U6!C!N^1l_a6t(sjh}1&-D@ zvT@)e2?j0;*+2}g^~7)!$739Cad25Yp3_=!6vI&)M-&b&bK$s+;}0B{alFIf%Jp4E zP8sRe`|R;rl;Y!c%=<@Dt;k;J=Y7qr+!jBQ$0f;V7P*tmqA_Q35AGzM1g1?qfO2}a zi`cl|Zt{$b$gQ3S7nzxxzK1j;WO?q6y<|5fTREC$Z+ny&f^qobh{j>V!DT7Oh+#R- zwQz2LBLN4O(G$cl=QuG;!|@o$8ysBr305;E+s|&FB1uKgVab^%cPWcFO&-%%S-HV~5Cf05?&AA4?{^W=L%LUYeuz*L*Qn7~S2CKc()3AquMNgAy%0?$0K9vyZB z`pW-C46$3$*3qJka9{b29lc6Mns1K%%s83t8tzb|S?V>C;Ze7M&yCIO)in}8DlzZt zm-J5QaC2r>KgFh=nrF}qrO3y7I3K<0cA=Z7Hu|`i( z9acRcl{_K~0Dlc+7aowgbj#q}fe(p`T9>5arv8u{?ndzTR)s`oD{q%Im`!?2f@q)A zg;l99%X>^ddOYd(Io{iS*|8_2tcQR9&$1nT+4CnP+nim1tHfqLMJxU?clA?}hHnnA za?eR1J=Ht6@pBT*tK*~>A|kd{WT^Q z6WI5!NgLvs`|>rxLjNST{2lrUe`bD9hSI&?vWf38HGlpMyYn9RPFbwe2Xtnc?BEB$ zSH5PWKB9|DWA{IjDEBwW0NjZAvg)6RPsw-Og^|JoFXug+h)MIuRMzCE7r%2ZYCz??M_>hZ`mMs`aODCa}nCyeOUMkRV&gWcI{Dn-e8B(vx<;5-gkVOY4#y?4U1QPu8;Z3bZnr!j@E^ zK6KX@wzUG?Tso#2HALg6fTKE&`Z!{$v8jIa3GO~e`O`b}V0dn)0P2sgSFmw`vt0EaP5dI`;41-{Loow^(McOx-)202${v z5$8CNY&_!l*vc{cB!)M<8i9b^Qkyfu%Vy*me@u`E;vK~gsxEKLg$~3;QdZH{*o z25?4?^rknvTA5a~9~U9^shdH^Ia={tyst5Fj(zGW*jJAalo(!gfA$nLbZ&}3n{3@cZf$p5owk|r{L7JUVg z7RBo_`L=8pQp6jh-ux^*s7pQ7W1porzGR>9%?q?5YcLI~I%KG;{y3w1rBC%9jtux~v_TUricr?o{P-TTNoUYEo->BN1gQ zcQqR7UM0vkNJFsaC)d1!@8>-U)?zIqZf#NuDtj2j{T$!filP|M zMKs~}G&VdQa&uXk2U;z@*WfgF(38YWexM@UOY3g#L8=Yc4h=9>-2dja7kZL0l&>H(T)};<~gAB;bAHZ zX-_Mde=i{F$U3yAEj;=Z5V=0F4ejX>b6uAqKV&e=>OgCIw=5uf@R5Ij;1K_OOv=Jg z>dzi`pySPPpFRhQVCfwZsCxm?yAS^p=vqhm7n#YnbfP^;1ayN#xchi7^cgy z$gcD`!MaE{x{8#2hm{byO6-Z34w4+tZQ1QNCOK-1XAiq$i2jCM?m?$vy|HIc`bY8i zbpfMc`>VhDu=%}cNtV)!rqbh$*!EuZ3^6cgZ@Q86V2%3Fukh66k3I-r_#Fg(IZxSAZ<$K<)#m$H%SSP`}{fR4L)<&hz^u5U8N4Dksd`Kh};wt zhfgZoIGDBwe;a}*c;~kD?8pDi3o`ye-hQqOH=$E6_(?2u2%X}S^LKtUe4*WC-2{Qn zF*|4`v&=#XJ{&?zdaPGJ$NBx=Y|3=%Q?D@M6dkJb%tu$O?2NeEWo`HHKap;cfkh2$ z&AJYyD$P?_vthIc-TjKK8Abz2EV;{#K(9MHC_@t?#@@KL(UBr;+}6*&8RrltxH!f%JbT^$Am=*r?(3AX&ihm9s!Roa#D)4l2Fu zw!x4+Er?GUxG^$EzGhcOV8+Jo6fg#y&C$%?;qkvSwsEZCNX*#wyky-*(n;Z?&=ZTH z?&696TDAhF6tIj+<-)RM%rc6OBXKNy6s<*mVcSO00VU&Z!m_xaw9k3egoTZ!4b9QF zKATmWO&m=tVg-2lXc~Y{dhckOUbG+j|3p68PGqrZw0ylx-QD3BFF27~+#f5pFrt7> zlM$s}TYQ+3MwgS?tZq8>FXMTgmzO?*m4M6Uz#attYI!Yli5s5{D$LfTQ@?tZD{A%g z(UJew&n+;%K#ZkjP>~CpuCWj4bSQoQm<=97{YzfFnqN^fP&_W{g{y4Y7}~&o?%L=0 zj&KN-q`<(rAN8X|pcenO!yjGv7?jO%jz4NH_N#b4UrP7=7QO{X)jr680@m%95ee&h zBBFUh6YKCL{i1HpBgBxolaH}@nPeY+vBhz$7?sv>q02h|3YWe4Tz>P|-7jgBc{Vg5 zmqmNdV0atQKm2cht5b#!= z0n1pxL|Ut)2kg<~rQA1r2=f|Bt@SDg@Ypf6d2|4=7ns^?LHY}1M`@`*7S<*9VI1vL zX6!|Tl`DKB5|LQP1DhlK{R4*YgoSvXyr|`X_|nShi){6HTF>M3rO$TkzsMepr{#P$ z_&^PM>J&BE2bMV}2BnBMXj~7O-vk;IUbX@^5&hkW{r@eo2wp6o`@65DZee0L8#{sa z$IasP2{gpgRFP)x=p0z!W;T(nGv<`znNh~>I=lwR|XX^ z8Q8jsv=^R`SSHZ`OWpGYW2(m*Pofc)a73@iRQ0?;>AJXVy>LueMORU3?|)1E3Q|!Z zs%RNjM0N2mc5V_~K;PfX9Xy$0pW2&04PyE_J7_UV+GTz7C!1_XBYAo5bArqZ{ZmbGVhqjzw0D8xV5$Qw(=wW6^M(fxDapX`|IAL|(AZOeL;)+E`% z_Y^F!YT*_5@5U4yQ~!^nS-lMERdj`)<~M}3&YO9T_ysQt@nlwql}QiKR@T5ha6=GCflA!>7QGy&NQ0Qs5$Z|$!1`XL zw6WTI+y9K!*P*&VOffRBi0R4+_R}n^YCV}wJ7YSOI0M7lhpV}RX3)+Qt8V+VY3&lj zkLDXOPt>!@rA~T?ZavKU&!KH-(qT?DTQrx> z#ajKmxwH!=+7IVqZi#_$9&JTm9N<85ZqNA?dugY!ufL*gsrP;^Y{jA#OdJ6!9Na5F z)^h>vixqObY*-Cr>{FcRgFS5BB3g!h@tBzD{ynV8LK;J6F}(T^NP6V1UPw&@OF_RZ zqLoM(JF|%PqbGK=28>>$&v&s>i)nqjZC7scV!FeFl*}!_3?o&yYGS3_Jr@{>eH z=OfV?GVp?@ZtV=oq_9axX;j%#+0e!Zh^*7RYj9k$Io|TuE@!hpj#4XD0{%Wq`Z=a&Rn28=_c5Ai+3euLWty-VD{~yPrqwLr zIPFHpGJHiO&?5^IxrfDV#=~J}utT1mJB}`R!z>=`>-JJtEGvJ4HmlY}2;(5cuKyIC z>=?X*ID=c-$=zo3PD2Kt<*S~A%|1bE`c{Ki692k#8I29#tQfftC9v}+FbiwP-kzXg zWk$sF+{9CCmv!fK&Llr|Suam#O;4hC?vsU5a=hjDO+C}=XTGKBe4EgC^hV-#J{IO< zo@vQ?!Sf93C%Q+l?tu}4d(6~5EV{=Y>9Je)c*X7Gs@6)6$K0bDe;j&M`lRtKKm;AG zi21M-rcc&hmKeb*ouZXm4Z;LjKAClzKS2YsK&=q|QGcZ(B#V0=-tLAtT-Ip7nJ$`U z!uziLW2uPiIS=nNe|v=c-$lziMXQ&0o2FHbcszkL&tJ%wcCcq7Ebox16uWtfuBKZu zS@vn#t<>ZNyh@r97U6_Xn+9X+Xbbl4G!}E~v08u7ABtC-3Li)EVtA}!*Z!at>lTB= z;+5!W@icjg%UJ*C7Eg02TtA0A)M4_%Wp*s~?wYQ1^$v3=3qM0^dfXJQc0oNM`uG(a zd4`tl*M?`WBmPb7Hr~3D9H#pU4XUyRDOC?J8v)eoo1}CEm-4u<7wHTQwzo$I&Ff95df6u0`WGdR(| z`s2kRtaIYYFOqMK5GV7m=zg(Nd=mTlGWAU@E`?b_IAgNR6~$ZK`TP{U=4;p&%y{T3$Y2pyhA|i$4f=Ep$MfCp;n3!rE(Cy zJhMSsA0SEuaPRd;hVILXWucX{`@Vm7O|K?hsU$jn>55@50LDvD@Lw)+cdV~CQUcj z<0Am?G10_|vWmB8JMS%-T$B|I?obwgabDU|fz7=|t2ey~Uug#31K?)NLnn^mWvtA! z6~(yF+K1~v?w8@H!#qSVi@+phS4v=?ZqWexF_Q({rm>xF!A~f}&SQu9pDA41t1qRt zAgRsozuFE9=0Up9W`ti{_NwKD5bvQZ{y~EWC%%btn6OdxHVvpU5;uwcE%vO9@Nk&+ z@c}36Fs=_}9mg+E8J`OM%EKHGq{9tkhx5XRr@Q&~*Ru+=7`^Dg};lNDA z8olT)jdSxB*eurKF7+X;SpU1UzT0j)4_ce8yi3FAo(Z`Z@6vww-VSU0H!VYGZpXi= z2O;%y`>Qm8c(REi!y`non(j|zJMYt=&bNgWH;hSq@eN;hJ`HQd#$fD_GIyK6BN#qh z+Exi2kI3nT^N=cdHxk}6N9Mn*iDh(iRUyRwiwAhP(1MM6Ky6lvH~NmN|J3K9d@BJ! z9WBjHJ)jlr!|~#!%Q_BGK=uF*z24sA*AD9;ehRT}<)=OGjH;^^e@cG^a6jQ=>L7gH z;48X$mT7yC)p#Ug%H~2x@e8Q!V?_lrCGc3Yvod&niwjDr1Q3-bn+wW~~-}?8iB?hsZcr-2Czr@L5Fmwk8TCrtPYs(^0P7^NfCAY4rZNqW3ShM*_O1?3r_Bn9N}8d4HB8K-cq z7X2E1L3a2LT<);e<&Q#1HP(%K1VNy^-zFqgsGaBa8V}R^C ztMjFXt$QpV^7#LZ=zYDI-*Xy4udZjEpJUYBz8?O#LqsK0UwM7RyDIH^p?kA*%;VpA z%CKdpqmr7E*ox=099DOBKBq|)&i5n+k{*xw^PbUsphMjop;j1*y%A2+lijS!3)-^m z-|Ki5X5kJ7z3Nl^Vy6TmGJVJ9yr2%xoKdJ*2q-?};<7emRsY59T-0bJEIVs4MOcUF z5g53Z!-alz09>;B%@E$^mJqnCGy5708POejz#>c59K?bqwIhdzH`frr69AaOMQ~+K z>mKP(!LYZa7Relu5t!x6m|FN(%F!nfh% z@XhVUqF&MDfnDLkfE>yTfUg*YqNzlT0Pz=8ZWzX6?hGXe7YPzeYMz;x?2|Z4+^e|k z@XC-uec72m0r`Qm~@qXLqpt44iy>MlO?{P;ckBo;jVcs>kaiQl7}HH$yw>* z4z}(MRmgA_`<9Ntrr=F)>HM-~2kVu<6S|QC0|YMXtsboFJ6f&u$w9ov#IHr=IVLYR zW<-}_OWx5md-1Nkuz4{aRx@hwZo(AH8BOHG+qJw&?&6O z2kaO+Fo^Z~K>h4jQn`72J%lH404AV#&)6M*Lkh=x&rzol?L{Cm+O;y;wIWQU7UnV5 z1`r=o1{+O$tEX51beN|I>|10lSFH-{sUI*tEsTGI1B)cC4>Xis-OXx$q?H49V)T4r z@k*|XShHmgLDy5*{%`;{a}^u=k&g4mUn0{CdMXRLi|}p7ihQDjss!YlyIlHS=+DFM z)$I75C(&WbWZ!(ERp{9PY~LpuWc3whFFY(m3=kozvJanVS?{XF4~*)TiD5%Ov)@=ZA_v^J1qRyXu~@u(ZtxD z1h6H<*w}tEm)E!C<=8kVXL|gp9C!bM$v0o{2;HIqugeM`PX`c}REOwbX#8m}?;^O?Dl>7+AiAP{a!<<+{HArcACM_e@}@*-bLqC`sM zBUgZMdqaQKwNx(q(asm)pZM}LbAz{Oj5D7xN;SvxkwqGENg&9mo8BT{ei8)S=^8L@ z6ORXiJPBm$Ksx|DIa>i>rVHXbNu=sTm`41qtC%c-1RW@$0hRffz%_J}KxG|xRzt>d zR|gtOz@!7GC1AG(ZPrwjlf)hV1e2@Wps5(G1MkX+gznRUIU2A@2QEtBdmR|60bY6) z+A4u5I*?R@btq&kZ-0Q1+GX9}&S3bJsJ;)yjgkju4`D8SUUV1)TE5Hb!YUfpc%EYP z=d%z##n7fst8`?CMA~ydhjl$CHDs!e%$LYlQj7hV#_$PReO#-vrtzLPI!F4aY2H5Y zmJyp{p-El9KWU#G-Ck1MU>*^to(qw=9H)j!>WQT0X}b7C?q5s?c%tmqv+YHyL_`kj zN62-P|Iip~C*%$yyUG2WzQS^6_ylOP_#v72Ga#7{#UD^93B^nFKov!xC|lIE^vkve z(F_MzZoxAK14Wlcm7-YuF^xo5U%AU`#T?gAt&7(oMM(B|9@ zhdE4y7BBlq@PhpX6#%kVpy;7UOZbT9trVP$Hu?jQM&4|C5$CkPoD}=1*1|b#? zQe4(%u#=B4(V}NJ&wvkK-%HspGsunIb2CO&bRZ5+1%tv8_b)tEpfEABj+Kco%i`RP z0p$-NN1{vB>9^l9tu!WFJj@52Ey1P}Ti6tLV`Ix55qB0c1bN1nS=^0QycTfR-55xc znR^jqefxn{JQmYrkeJEhVRa0?W7S4z*^ADB+c&~tdeW5}v5-$!EWzftqSp}LrqaGu zg)h4zTCy45RkP5?@|m}#s&hHN3N{~soPYTBw8Mn=AdwqggVJzJgpBPfVl>A^L*WZc z_tVhPUtS)yRE^JolL@C6HXcFGAfZ2b9{iT8=HL0+y{VC~%=Ee|vlKN3b)SOF!(X~+ z&Mi3N$m6jd=AWP4g09jMyhj~Zm7gKola;5{!s)M8_&7FP4L+ z@wd%H{H9;n{G!I{cwn)ss4>Oe138CWt;I?gGgj)gq9u1(n|#4586H$1BSn>d3%SGk z9mH|YrsCr-kRRbsv~c*ltQV7cIFaqYeZF)EZKfmy!|Wpu(TrnDiW$2$Dg+7sg3S$Q zTLKUNL-F3S?(9ccUIEr2$3egB!po~YUxcf}FSFxMLuzOBiW?KHzF|=4abtu%V?BZ+wfF$GMfB!RN^(V9uq{Xq8S(juQ6k6lkwqF(6jsPUH#~%vAM=sX z8qzTz*(s6f9BCqAO3|2U`OG(x>65RfhK6j*N4}Iu48v!GeY#d^25tP_QoMy|UeuYV zq8-og-)$%le0&ZM=cA! zVw!7u1Bc5R$&4PxZ!HHCct3a_-@rufnwPV69>&V%g(x<4<;xHJF;W)0;b9D+Zxfi= zVyx^IkO{9l|FTGnv0=pXsXT+MPs9m->&<1I2D6eJcshW)*f+ShRx|(o;NpLF%L1+p zmJ1Rc$5jhkZ!!L0uLxFc*5#v1B_r^2JQY7x#(d;E-akwcii>E9L-7~EU?)_}z*0D` zW}#{gzGNq^GK=ziUF(N&x>t#OuM2o3L_jX<7NNj=5SrE2FFv>)*7D7Wp4bl6F3n9{ z5%|&d5FZLR-}njM3cTlq|CI#sS_i8I_fqCK?u|7M3OD>;IW+ZS2r*{Koi? z-yUP0@0@eZHP>8guf5OSYwmM4Z7&RNLneZ^l=>uc{?POD=HJ`p3v<) zb@H@(S!W~}Q?o1NTWpTJ2ekH@QzwXyO-|9VJT2Hc(F-vAcCoSn&axc;IFlKtUr66P zQuN_(0|%4kYq!0tK5q-TEgJZo@yF}*p1jBs>w*6e=+3hcpYobadGM?}+1g($ zdaqNLjx;$5x<{`ZQj}Y2Q^et7epxag+t}m2@o|<%tpI)p(ude|LmA z5U*4FJvstg*SCvL*uF{68V~)eVq<$&Zn^MRMwp@P^~YUy4wnKM`${+&`>lyK%r~XL z3-jIKHq5S{{LdL#v zgm2|ZATl}DS}y#}Jd@gi2=@(>0*SB+P9prVqm6K#6nGJSFw8|bGe;tv6gU&%tztAk z$Uk#5itt7=y(813*D$j&f$0$@RxZS)_5W*#56cPHhFEI3@P|yGliu3pEqel$2v@>M zgsnQ*2p^XMFTy8>x(E$fAzqH{KNI07F`6H|2nZd+VP;e!5jGuaBOF^j%4`Lc?MZ#`msH)pLwihkY=*Cjh^Sy&D@28N z1V0Sc=9pVkb!t@9))S|1xh?PN;1C$TL44+=RQ+h1);SF$+>^>?tF^ zW8vn!zA;~An+MZ$NB^4aHLKF}Fkjjw{mAkE?u(e4LMM3D@`#PWL8zohxX#TK2v0Hz z={l~>>AqH43)H?zf@@>YXY$f@u5YrLpRV_YkLbpau&F4|6ZJPUyXqVKJGz>huDXAp zTeG-_nN#T-GCz0&?7^l(76z|^x%cobad&^OBI+XUKCPk()y*xi3Sc$Vdu78dKw?9oWPUd_C^39D!!+xm6B3%*s9&qcMc>nW(aObD zq5V6S{awu99?Yf}%*-CTGe^eq9=ek+$Gp=+$F|JKpvn1z`7u>6##Hyv?frXunUg(q zM}BiA1_|}O&KT^rECH^JPz++@dCxy|lDq z{hZWhMxUiGO4yl-qlHh_N*@<5-7Y7+g&)}q^0ldE`dNAqj{b9&zWA(ze4Wmz=Mta$ zFu7dJ<&mzsh$ZWV&yy>ZALRR%>(boQG2eIQcYO3M(OlC@kK=C=-tMI@^lwNp%`
q@bvrYlw+@f}$eC>Ti^(D}Nh*$K zNKdCG*w2pkGtqt~+s{n03Crmvmw!67kNq5AKbzRkDt5-{)P0t(vHS~bF6yJ_ z`~H@>j1yiZQ4kztI%evuk>juN@o>l2zel8}o(mh1-m{Cq_zft+ZP}EQD1V-eGJj(W9?)Ef z^C=M%Dk9RiN2CwjBn2X5tujw#>UQ1l7sbSF5fi_NnD`|%uN65xE8oKAh>4%rz}_c< zo~Ale_e;CRMet2T*BPC2#WCSzm00twqRXEWF(oU*49wE09sjWE?Gaty>0BaxOdnW# zbwm4GuQnxF9JVi;vMe1x{vOvL&Rhr+KbeC01!$*4TykJ%VWR7mF!bIvo;snbc$KlPp5A9p__vKFwRWAeZ6Jo3k#KbLl+MFI>Pk zn4DRcIms)JJka_}xyH>~yr7Ws17kP2D_&%=Kag2@MwW8w{~+}jFDR1I?{>RVTmL-y z?m4jf&kEThALS|^^D^OQ{oY`1E7!kcNRNzL z?V@9?Kpj}=_(U-sU|t!dQ<7#i+@iQJrb^LdXI3Ir3e2yAxTNC*ldChspEzyXs+N*f z=B`}bKPvOXGs9OU>dt0Uu5N#B7;2)*5Pv5=u)M6u9jJ4BzVJ1Ux7D^g-bTLV1Cu;h zCy!a_bd}j1>b~>&M)qx&n z(h!{zUny%w%py^}tzWi77)DUhk9y6~G(b-Kl@|b1q0`tod-K)vN!jCiQL-kLs z{@#hJ1a78Ab2)!gCwnv1;f5%=nHsl?ce-C%k+(6}3me&ZAR^e#O#hpXYc({a+HhB` zTt(Dn>*6cHrH{Xt#`|rdaJN$XwG=`Y(CBlGsmmf z-qs7AsducypY`^1aP93z*U-Ja9nA1p-J|=zBP7nHI~(HUUvAH|cfZ)n-IDCuaNn8% zd3uKLjS)IKEUx0PFINnUhI7|oE4Yt(GkT03)-0p1Q`eej#_)Q-uwJJ1#rkYtxw+|L z9c?bZShsQf8sAH6x{lQsh52gDZ4;UE6=vN;ZK8@wocqUWoS)J)*Ilk3>|UJsy3<#A zSnXlan@*o`kHd0L&h_Mc{;f;GEP2mi>H7{VDjimR=CJ&8hxspf_=Q*gr9~FfzVZ}z zJB+M#0cJemeX2@ zu9-EX^WUe7kHXN#uG=Fjyv_=;zb{ZejlqJ(nyBqali<3*G@gg22#D*1(5HRO=bW>uKogHFuKC>Gpja(3&BYzr$X7FD_DVV`+& zw|>u#SP4aMc_UU~{{D^UumH8+b6Dlo+GW;#qwlB3!}sV;)^CRAx6ZRTVeZ_cN1kUp zO%eY_Z<~!A>4x#=nNEJ5#D;8J=Qw%VTqi#QN|<5yn&#CyZMgWU{zR)kyg(8?KRrAg zeV?=Mc)zQ^oeKyVn9&QIJcHXf@prActy=dQE$-Ih?mW+3uIJ_gP^uMp^4W`B^^^yl zyTu?qQPB@MdH%!B?Kbm!weC_ZZcB07+S~O-o}1a8+j*c=U*hSrmpb?DjC1!gDC2YU zF##QO{FjIOTcWeJ?CQhc7>BqnemvND_zdTY@auhWpr)Zi_SDb z*2UOveaXo`fU@-w`{T?hVgc7CFT3>s7Aas&!6IeBa5# zKq+5px_+y>ST}WBwR+YIrNVPFbE7jGQR%Af`q;@Mw>o(-C~iJ658=j6*-G5J%2ki3 zt2{SNtDL<1Q&(-;7f${flr0`j1Y&6_hQd&e`8&qW0?)4)3h}y4MtGEnhl9BQhWvUMmAYhFax;GD*gQ zLr*xHyH1KfmefG8SqX~GwWr*So$GVUw6&aBB=!hxHt*LRyUJ-bowlT%La&`^;P@s! z>*wWG;!UXsP1JYxlsoP_J<=X`idqe)))KGg%b?i52}p*rvd)2AEtGEahN6|bUCEI`m)Bk&&e3o=;`^TQ%p{+cWNrcY@g>X(KG7-ieWY=T^#~SJNck=bs|U?)l@Ka8^x`W)ce3&Z-0Yj zeX*$O?uID8k6Tp#4@=o3+*+&8SP>ORx05)K?!0S`G{lwtu~rXzag})OECR*3_b{w|K^$jFFY>JZoLc|)Ci!!Xx;+@Nr-LHZkWK*#l$ z86NquRtuy>w!8D3(bdD9B9H8>#LS&@M!^&p#LEUSTczz7eGOTspFr5Hp z7{W%meH-ne9HvbR-B4Wu$~FfSZY3!D{V*uCQYN|b(Pr;KF7m3($%DFIRwABcc|f)t z4k&5WfU^DQ$)m9mOtUT1Q+B;i?T2Q2WmYz? zP%T(G&8K#l-8k=KS;V!TRU} zSgr?UJ7Rj*={EN6p#CH0;%YM*)#c)~%2Tbs%UQ>B(FBN+4AP9$<403#4G>BE5FRFqX~N*Uf>lySK^weHwWr;r)J`{;!94mChCYzxv*r8 zPpyPvo4W3Y%5iaWAcw2wH#aF{j8H9a4l8O}*d#xsSMe%X z(@Nzo@~QI0K2_m0Xl3*3&w7yE>5nnSvhib*U-Z>6EUlOk}MiY%}Wt3(#RT-=K z>vw4OA?myDl&h>_D#{f$0B)7w|CA-5zf{C|sH4A5+9I#(6JK{T&zPiNb@w=y%TzU_ z>~|M#`98BcbIq^%{GP0uspp}2ExZwGB=Zf<*NDmt%IcbW-hA<^{>SAkq^S*1dP{E} z91U3cdvuu%x|*O8IbxenjTIfi|5X+rXX;RH{hH^0(X>fRtx>TIbxoLRvE8Rqq1bR2h?C8zV>)?o=?<<>b`qpl7AnWZ zHBoVsn>9K$|(~p$8^6K)|1pd&@RuLl}%i|?i(!nj^`lE=z85I zbq;JTG^CY_=dZFeonNo}<=Ar}P0CrSjITFAhRULREjP%Q9P++YLD@E&-|Kbzo>IO3 z3ujjDRd2ZH9(&kTxeb(U3BSM6o#LcQ6;*ou=!(~YvK=tDQ5-MDD?N`Dc&z-%6?d!S zcUwC7A}KHN%BwE%2OHkJ68j zYHuF@yQ_TRALjNybn-Z{$?$CI{+EWoKJFUsc*4~^7Zh$hC;=|?^y@tRE_37$-KlN$ zN#1R6ip#D@x4)ch(vItnmq@c=UbE^Vck*fJVZ#n;81XkwyJkx4SC>+&{C?Z~S0EEv z{S3;+&Db)0r^?B%3^^&xufoQ*<9pPvra@&4ffbc?sfu&@2{K?D=bKb1R=j zSJI&4EFf~52)|k(I^6tK79VHoP;UL2Uy07%MwJrXv#2GKT(1*-qg+IrP0mT(DOox( z)$2sQXEG6du8j*gJjUF9(z}n%_3RotDfncJv)cg5cGRrHZ4Ar0>PD!tjW@C)n_o}r zUTsx7ze# z?!mj@WQ*$(RBe4DR9Ib;iahcb@i*3P=QzGWb-g(G1cAKPtaptnU&3_r*%pPxmBxU&Dqa! zb8d#4H8Xp=aOW?L3Z%r8BYgkkVUrYb9P~ z&lh>Sbq+`)YBl(LmTMugpF2_xE&Vi*Vkdo>xBfjl$Hh|9-&vkEQNBPYd;Xk-$@{!+ zTbxG>aK+<5k^ke#UwU%dKv!O1uJZ-Xv1{J>UR+gPT)&*>s-_Ngrkl(rUm&GhHDcD^ z{Hm&*>vm%q)0)=G=}cRHAbE^*Io)$zjcfk;T6VeW3rb{BNzGMKW~7Q7)3UC4byyWM zphy?`^QE5h>eW&2B=52o`%xF01^z()G%4Rs!a#H?-<^>DJJH#^c)4q1=oRJ%f1sls zopR4M5?iSwN6G0coPFAr&VC#yyW%ELY@*EFCv`jfRWXn1M?Ce<5h3+@u33LnV?z z&wFC1cRbjx^3ijW^H>bZ_8cfJZS~~iW`_=RN*7nzxGKBKnePB)I{}J$yQ`hvRO>*j zJxN4jUFvB!fwCO}#k%Qab6FtJ(ayTsPhGb&yZ{G)yCysP*lV5F!PmKZubUSGfiAt} z5GeeL^|(muRUNzDnRUL!Rl5k3QF_LcHyK|r(9e!mHr0w}y5j3*Idk4?8uTV}B*n57 z+~!PP1Z5j<9tsA!*zexT?ONT)>zk#4_dwZVW}DA~fi!y#O{e^Qt5s^xY-g8um$SRt z#5D+E`diFHO}!?R*F@GlXYw5=8}D>_Slmb$7zmUL7dWwM9TA96P5KV)_` z_$l=U?SqG%O~*%^&A>-Ye6v9OBss~;O1b^kKN_z}D6fMOJ8@!>D}fEkR_?aMuLg(? zTPY8f8`V)<#p6d&&+5(6X5J}x9;5Q)qb`i!K-pr9lN&8Tagf|=4m1mNDlVh=eW=d1 zOcic}8tYUeIn>Q#xY_@hbDz{bpt^JE9;AN8GQS!Fh5Br)j`yfwGmE11$oBx)uG) zuZH}a$&u_PL}MA#4cy&Y8#AV5AZ>8u%YIb@6?pcIWhTtF=G2K+jrHQk)0sD0@?Br? zs$U(4Dm*trm%g`P2{78AIOdTqjo~;5YZ6i1ORSJ~Z*|q6pGo@7^ zEvIUeU;PG!`W88CvtP~i?Efm;I_85`fg9{%bjD7tK4TZkIUt{J6xk+%vR9{ivY?a; zGT>?kC=2u@UOBAY27l%D_-IhJ<)HX^W;@*mUje1wFCe$VtyR|@PVoUK2II`Sh(NnI zIj^v;uHw>yDkk%K72}(#n#e_^IT8`*-nMQJ*Drhhsyfw;h?7m9$Uxe4a%u~sOUd|` zkW0PLmxB_@N-yNBS{G``As6aRhn;-s5f}2ipp;BI=JZ33JN-4F=>KWfMFw0Nx0+^#wwlm@Q%1`g3Fjy*V zNp-!vYwu%Z?fv)G0eM6trVZ!(s!m!> zOVlc%tD72)Y~*~sKHqy*|W*-s?|TC2Af8*wV6Qgx@&bm)L;}U z$8p2U!f}RNW>inrs!vZh)-tY9u|XAkRdjXhqNu{i400XVi2mAMS}lbZn8ne7_Aw3W z4X6%6Z?HQKns=iE?Xnt*24-+|0Ue`Awq^n5Lh_#;zwNEn^gddpi~&uvm_Yk9smi2M z12cKhB1@}tC=$FCns-JvBVqyzcx~kuF@cf3T_&k*piMKLa<+}+n`aOW_iNey0Xb_H zv<;kncJUz|s+`M0W*vCTy<`Y~WyJ@4c@fWV{l*Rfck*0D34As9v^*?vjB9srLK*h( zgT4Qbl?$(=z8)5;3GWFPzYZJF8olb zO88Q6|0i5F{7#_YkuPZ&UXl2(*-Jowpa&-$;VT=y5d8b^O2r=6st9~V`_usV3a~zc z*V@CYNS|ucx|nsu6I$iA_NklNvET_`0$!KkQxoBfz_XHkD)%JE0{C_cZ=Hg#0G~-? z-4DJTY}Lgr2SM?$1mdP_l?ceufb&xYp&x1dmK*|Dm@CAFTsQULJW0%770| zIZEpBvFllfMlbjjGz~s9^(ZOGJBrCLE~4NL@NQ@&d}yX|2fD=RK2?ccunSZJFW3`O z0frsC4U$Zy;CrMQBhd?f2=#y$+ydpnho&KmB_Eka-f=1gf<2(Q@S!};P}jb89FXd8S9*y%1t5kB#*V*cg;A-f5~ z4yGa(z~_T+LM8AO;58+@vlgDGot3_u-2op7PJ`-14?YDcQgGFv{4u*61-alj$WB18 z#T=(UlOZgof}{@jK|SCF&m-5D3m=*ulr-VT&}8(2`=MFzp?N{c5B?omj$ZIys2si& zY=1wy3_cNj+2hN>;_P|Mc4mHP5>b+gACW1np@QJokRn4DnoX3f;(ZG^M9~W_hBDzp zQ-q=ELrEjb|BO$?Mo|8`dv;i$*fK*97EutIJCvMak45wby`W7E!iS~|C3RTv0IwZJ zFZjlTOgi`qu=%5$P2nTKYaZi>AZb_xPI{b?_=&SBnDzwwRsW`ZGS>=WBUl4%fftNKRXy-$7T(~&jQ zK`;0Q6iKGE0!-h}{uf6X`+e%F?|dqe0-G*{&xQ|8rAmr*)%VN;^n#n9B6z_%s03bc zS1p%T@YUdL2e9GfRstS8=*|aq;LnHnGQ&n4F6KCtjA;b>9^~zcDi1yq9AAf1_(Jfd zdU^?84z@Z@`9gzF1bb`VxD0rRZyrhX=ZG$fam$YPM2fhrP-O8^L$;E}{??Q8Mq4_z<+hs)h)mrQYv!E^Tg5#iF z@PftH@o@;D2>hWni81&(Fg?bva!Hc$Qwb&cv=aExyqe_K4nf<|3!V@eJ~Y85iMDg% zS!yAdB{&SqfDcKS6*tPLNoMV#_M}BI5_}aZffqagmBRBI40UY+SxNZNWSJz*>QV_v zY`|7&te24S3eB2H_Uv#vqljMcSbDKvlGy}d449{V; z3O*9-at;l^XMj(V;ff_kRR$*JV#CoS*yB7_)zD{vdxrYeBKXi8mgKhH$z%W5P#{bd zR1Yt>4GN2;0@!S%UnRgtf?tediBa_6sPXI$_y@C-yUka9A$u5Bx zyt|Md^DI;;n12Td)N&Loc~K;KAyvZB?VRB z@OueJjKJIPqe2I4z2FZhk8uLj2$ zzgmvI5PSmK4j;O}Uo5NtOCIC+r9f~c6rD(8;2J0$z5+ZBWy7n->HRXly_^bN)t43j zo=*@cHiCnpsqljN&}?|Yh09%CZ&AP4F2Bo)|9wv~OED5$2CaY>d=Xj;Uk*-q%1yM; zC4E``pY$|`A2x!wK-KVqbD^W~AH_$D zaN)a5Sol)#u65Xn4H&zDIVacuq4|K&T!G{aK7!U#K~R0bjE5Jz4%!7Dnl}i|4@jQi z(hsTAIbiV#_-J@R$qjUdcR2#~za$MLd9WVJgBQHL(yu1Mmw@X@7EFT=O(cXS3?z{- z=OfMs*a$wkg`tKQ%=wsk0G|t9yp;~X7lM61aeQtu%)}};T!r9epSr`N5S;P_y`(}B z_&L-vm9ruE^_TQUcyR4bCNF#iSpPNqRVLsrcIh5=ANtTdfaC-E)L@5R@R9G?|LJMW zUYG;NIlkfR!1X5yh-qns;*a)tIa^cIt z?krb}edtoPEMcF3^05(Y%aZkE_(X6SH2Y97%Wg0Y>)3)*pcxnm7DMykL)WooJ^M&! zp0`IYIFV)Rm*7K}tYs-Xz*6-)=mn=k+u%c&tYs;CZ*e+9N5Nk30@khT;X~J~Wi5MM zH}W;z0v3C)Y@Glfx@0X&+0XP~RL~2yWXU=gUhp4KKKu;uB}hi7>@0jjQ_&Z1fKe<` zOFV&! za!70hH}+xwi%^NsuCFVI1$#l_lpm+7?>&7jSnuftqgV$Qn`p3<_3!BJoMgafA z3jGWbXrS%z@~oCTr)7;sYny+n;}DfR)B;F&K}l?gji6*Qgcp>oh46xsrf5kJg5y1X pA^0Ig$GWL1n6oc-z03t)@c44D?O3Nz1n)CFuL*2?VM?Io{{x1x;%@)| delta 52327 zcmZsk34Dyl|HpS`vl3*piP(q`A`;?GL#pB6Us7d$ zoie=*+J6OxP(x|zV==Tg8Vr#{NE4JOGLlBSr5}<`C0iN|5B7!{ z4af1H&1oLa!{5FSALHRQ@541(A-wQ?IERO)A&j*5`0sJKT|Gt-nqM*Z7gKj){J4Tq zx!oi{iT1RSsN89uZ#>8(CDZ#RS()3_+=r3`CD(Gqw|*I;!SRdDk=n)YBDoR|124&(Ammlc1E^mrTu{kcPqjG0fSVxFM2@M`Zs^rcMzDMZxfw`kA*P~Sn zHc~^1BQdOp&Cw++&E_zBCVCjuQiCb%Ne$tJS)}Z*(vWmvO;-XYZ-}B7S{gKclt}N&1pbua)mxh z^E$DlhBB#6C@G<=t`kb{7?u5Xs)Xekxxr3rMt$BIDPd`9ug7jT1XZ8wg0+fA-B2<` zsgA$hmA3fXKpBm{e#(-%q4Y7$-BH(0pu0ija{9YL`7QDmAl*Dq8E88e>Jm_50#ka>*%FFaxXQHBBV=h$rcl6fUhqx*k&K`wdG6;OV2OM>$|_Y zTKTwDE9;p@D4lcpX$(Ach%;5tA@q>;(fC@HRPAHIXZc%Unvig`jBsxHm#46 zwTf?>QT~zFs2Bv!5MNo#`>*CMZu32 zvT1Jrwip~emBH(!K>4LzG})Z{y4@%bvPv18HX!nKgCcA8 zB8!w-nWs{6GVL};zYz4^#WrK<6h|vBo3mpeA5!joD-NcrF7^ri(WgSoa+)p=H!;_TA7e# zN{w!VNJy@;TPH$)ucO@VUX~W#RbF?$jrr>ZFy@x91*`kz2l(Cn2Mh zDj&>6Z`t(00P~lJd7nzH$IIGC3Fw``yJznZK2Y%2OIg)Bgx)`|{Mh>eRY&G-=#xcA zfKt6*9I2T*s9#AElJGM%EJ2+vQCq*nsBxrT{2**l;q*`oYC;5uoeTpZYp ztW>OnQZWPv4QfW+CzLM-RiW2UD2H*{eL{IKs0vw;TY7K=p${r5F+<|fi6;zcLqe4w zhs2Walovxn$U?8_fF>5Duw$1(vbB|w&GcgP;cSYS?hR;eVVF@WY=$5J3 z$1>5Y{dshMwV5(@Xe&}v$sJmq9{yFiKC}u3)VrafBul9>tT|bu3>fy3zS^zyNRPqH zyC^+0yx=IZ!Zgyr&HFWgo34E4un!94g zQ~N8aBd5?h|0u^sW}+UOkNUu;*WZw@eMb!0d6xO5lue^P?y~0%M1_7-h?bV-1FuV1 zEJokwJmS1RH6%9vZY4&7B|99kDUH8dA_9RjFkb{pBLHlT2)rw$EE*kCq6W&W-c^o` zZcQF3zGLQ-Im)UrQ>xy61+{krWWVmVQvEuJ8d4pHQ)(T?f|2YvrM^3%G#oo2A@^^$ zd-lF<&}gvC{mu(^&EB_8hCV}R_P!km5X-ELk}zuIyGbcV^t{)}aqbL1o8zRqOnERi z)$ga5$n1bs%gk4utF#%HLWV2L#$};jTgT4`9q>R(SV)@s_tT|hvxHSirejkRDH%j)0QGU&^T&OKjXMCpW?yHtij(pgTJXHKA48hQy zJmFr|6;DB|LUn6IUl?a|cAu2Os{lj)l=^+{%#7WHhCWxCP7I_G1xmMxX@2dWAX!IC zQL+HVHL+Jn{zG)8BZ*<@20}2_QoWL*^K4GBnx=$Bs>hY6kNoZ19wTY4k4)NiojQs4 zaHo^kCGZx`G=ib)r9OSCmylSp=kab-RNs?DrvmecFN%(?zA26*FI)E63?Fopj$C~g zU45Wh9_o1)ibPxt;;4FA`SqiY<0C7 z$&Dz+{jp2tdRQ2j5*DjYlTea?xL`jJo}+&e(o-U#MaUWrNhnst1`;aVs1?x@hGv3O z?33#U{diR$&2LZujOGzYslHs`cC)8(BusfSxe>XcRG)HzdgUwcrgR~Dl+II6liNzz zv==n&fD$x4J!qDSRyjoEsD4eM22B!yCK;}*ncf34>C5R+T*-_OvM<**qYJU;T}ENu z*4;Sq0=4*y*K|slO})QH)bqB_xvgAK^LvR@dLkNw4^j1J2{{Dh6e6RBwJC&n$>}Z-^pz})02dzTmrQ+`mF8sbKA(Plah_U+Z>&}QXEEo0DP$o zoHdJVQtr)KUN^Lf(J*vLvg164)Em{~SA$`R;~ZLWGip-}pXqkHt^3bFpX0LIdgH8e z=42&PiX$fO59PPnAC|5xV&1r|&CV(}+!d9MX5Y=IlZp*>Z@K_E}DCta$;_qN6OR`Yed^ zpLUMtx!fS87c9AM>n!D)1?9{3Kgp{|2$IB4bKPso4vQ^pQOeRB=H_rCu1Aax{ zQ@1VS&g-RQ#|d@kNnzh(rQf1PEoPkhk9m=9>m2ahIf#T`kWmrd+8&sWY<_cF2Ox^u z463#GX|8ukxwI%89iwnjjm8%*a+9|m=a4_|Ia=lXB;*l7-uVrWFsc9CBZky$cjnH+ zrqDx5zr|h1*UFB?&F#l-b9++4($&uv3NzD}$}XL*{_(l&_0=$XB{EGSNkzzh4QYkz zMTzv3NN5qV1_&Sfm*GuIq@hIIOOU3r$a=EIOuwx&{*t+_EM4*`xu8TY%_3ct?Mtm? zngQ8UO;pfP(c`m~%S)5c-zt3^MRqD_AJ=P@jYUB{xLHh7*?$8~ahg^^W^bs7;^|9HspHaWu7@Q(k{u+g|>HG%8XZn zr274*vTQfFc9bMiO_7MT2wAKl6N-=;5_#s-wT#jbLlII!BBwZF7kZO5=J%U=PIu-D zW4MGVtVvJ5a_(U49}%WRaE6N z9Lm(7_5TaKQ2ai*LW6$G{p*t$VviG3i}NV&B4e@AV4jH0=Ity++m_2(?s8T1g;=$n zWXcLAHCA1$0V5@lrq-0e^Eslkq^V;x;Co!2q4%V#B_(iH0CoscHDakC#O1kq&n`^a zA|NTMreSTR#r)bu{Yhf;Bql^|O#6Jc%jFBaQ~E4SV*Mp1+?dN8JAPcLytW}N=Tf?_ zZSLLoC``_Q3+ym`pnS2mCApk?Z|y=#qg~2`b@ho$*|4sSy#p@NP>hx8hN;5Bv00*= zE7gZfWWIHEAX@_Mb>J8P?!uS_z^AG~lBg{RF4&+U5BZflW*wWFAd!+9Vhd7dYx&H( zpczt0BKMWKpSLf!@_U#g`fIv}SlK%OQ_m{-pMOC_@%2DjZm;s>YX@1NOx?Va zJXOkmbCde*QC@zticC}%f9oPYDQ&(RM!G3mzx#!JtW4N)lYFmq-a3M|`9aygbu784 z)Y}$ae!)(xm%)YKM5Cm%I)<*%7P6(%9q>c)2OW_W|5c>%>%}}?r##q>TFOeOH3UhvmpQgsViP1LMEO&s zaor_Wz_Hm9OR-B%$PQ|pmn=j&pEi3-E?iYCDOQm6#AM)Ue2(1Qx-uFo``%7gZue zLo{rF%rpU*T`e3W%6eFGT&6bDxL%U$!DSaE_L;m?X>E zKw|SbHeX^L)9u1KAv>mV%VoQ+?dK$uI*Ea z4U?D<-TX*aM`_tsfks0QgZh!=5+nziLEWWsYp{}ItVVKKn#9UWOo*0hSd7GM9IGL* zXUfz)t;>D03Z)hE`Z}z8xMG`vjMaO}gNj%TMmne{utf^R=fFXQZG@VKK*#5Bb+l@PyrN@Qpea%+sV z^k5M(MIx<=kbN4`vk2)f5qn4x^NGfED`FZ*rmzn$F*oKT4f*ja-CjS5TrEPnXh{De zEsuMMQoD+fC=JPk7PF}9;}Th}T>hn3^(IU7jIFu%Rt{gb{GnuK(Ikb?#?K_-GHYoiLXVS>el#7SQlYNTq$Y|Q(Q^j>;6!!#< zw!v+XQAd|ikF^RtRtpY8-D4rvAJ#zm?!8bR_b=}x=aJzT%AjMhxJ+1ctfjs0r?R*< zbw?*rmg%@sm)^_kVX_m%6(LI{(yItL4}`CIfdWYlRL4ppRuBm1YC2x8)w4;JM1a&` zhnKWOQ*yqDsVSM~-E>n%YsmH@#8V=ti;%V&vZx5T+ffwgyCS5zhKwvi4tPptfpX{9 zRN6F0ssGzD+UyhM=x;9UIAt9V_3VYA>$VjeG%n*wAgZ8iMq51+2q_8 z$Ip^dE9Rnd(}a{A=cQmeE1&P0LRx+nLyQ%+LaV~!SGHJk4;DKpQMD}8Xb#@v~s z96uGR96Do*d##|&KKXR5#strdbFmR`>+z5Y&gj0#In9&{+hX^nr*v?=o2Cq+y91Y8h)f`j8r)TkJ`Zhm%5(D0P$D zT89_i*>w{_Hn+7LLfHDYK@&QpVNaHr_MA&_a$8HyRWi=iPn^gzv?RXC_q93WlM%@G z#E)3~NXm9+gtVCL_Q>$VU)-=W1xWTXXM>5x275149-a%MK8oUdzG9hW3b(*|Wdlx* z)Lm-GD5cr?Mtmpjd~32!*>^qxPT#xpE$y}CoE4&WZX-HK#T4GnQbSbMTlVH_A9GC* z)R(}&?KEJb2JDf5m+B{hs}k^1y8_^o-YNm4giTVPv=({n5(F3257d~6dM+m9Z zA(h%@DAg`@@SL}pce}rpX%~Y+hl<~M=sb?Kd2l{O=j#%dj^5W!MD0{|U2H%D7b}l1 z##ygn>qM=zOkQJJaSI685vAFs{(SMcG?woXT)sfD)9~lzY(8OJSwu@MRNh`0$Y+Xi1S#dL#Hk_-RzS$OcSt{Jhr46%{)3+9r^-BNS@npa9>Fwra`-%SI z6b?Xc6H!Ogl&81-?XfaKEUXJFC;e7E;%8)(_R*$y5xlCJymjv5)-1m8a7|%Dm?El z5(}1?5G~iRV-gb{bsdSliWf!YBD*?9Gb7(;0TFDOH7Eq z(bR2~+9EjCN@Cs;6QVhqwxbf`j<;Q{D7kx0MV)Y&+DB7&Kyp2}>}4ZS)}JIML=81; zkHp4t?4-n)#DvJAVVyDcqYU#owozi^nzk3Y3YnTB>!h~igy)?lxn!wMhz@AjkflQ1 zF0QVp#6p`YDfb#xcrXS^x%-(Eff+sy)~VGh!+J;jFy+&GkI3ZQg?|pG)G|HywOX0j zUx|q{^|U&@z9?erM%*TZ544d6brqu}RaJXSAY1~fx=sUL=s=tVJaoXW0ky=!1PwtF zxYJN%p{jk`Xgh~3b>eTn{J@RWiFi%LWF5FHfzNfIq6RF{0ha`_bl~6CvKU8nV1)$w zNx-hES2f~>PGm}=nXY2D23S|{I!8IWNFZ1TR%*aBy(Xe1@HR#(=tmlGPS2u@1g^!{ zHKLnFY?jTTs!tadX4bJX(UH}`^Z3gq6Y~3W9(fo8Y=7=?A-R|9Zb_n-JOA69c zkimnM#sv!-HP3{fQ6Ens@W*h{;relJxF#2l#-&V7VpwlH&XTB1o=quy9$z|CNZ{SK7zp>h zg3|wml`PJk_@X~4IT!kIpT7D)<>t#I8aq&__Np!UOBw$v(Y_I>@QB4JwOMUE^_KCs z=($rC^V-H!9+;~ShYQi-ntVJ+k!PG-<>JN*Osw*sNi1DrLUgN%?6lJs39JBRQzuER zg~SRBqaoX^afMiZkcAh|wWH*MBqu~4YuH7Jg>bB<#Gcm>#tG3#4cjWQSdJMbc0Bj< z*Nu&2R_?WbKJzI3M^|LWTOE&P-f&k^zI2y~=r9x=q|;m?p0w%wMlH91-^EHAW^*UR zx~(~SicQMz?&Gvwm*wA(fu%Zh;d)T-V^}#t>eR>^3?0AB;X3ei&>MCB{Ng(P>8#P^ zyYg0I^YMsnseZa}2J@s)ae5F`oSQApfQprPw&uMVrF6PR7olPjB{Rt&_75dJY00jv ztC57-FU;h1nR-gCSY9;WyC9Au46=G~mqd6EJ>Wf*fYoY? zcWinIQit?n-;^Ln$x>H_C#g%wK!$r0Tm8T9jmq6Q4K+IZ3CX;JX<;23Qj*y09X`}^ zxK~Exu%#TYG@fMy{4wsYkLs%gqSoWxd{18BI!lQAJwInMbmUG(J%{> zH8oRPU> zNiosjHq>x9=@#KrcA70GRXvR7)G?c<6k`%31FitYG_7J4eMnH1AL{dZPYE06wnhUM zYsl<39+sq|cx0r6+xm)i_aWhC6^Zp*y$4y252;)EQ87^t+$nKmHxEKQ!sE8iV;6l$ zJ9{)D9LDUt%#uzUaa+R>!~-XQimgo+2<6(86v-E+)JV6rl~k^;N`JvgvE@v7lM`*5 z{fn;{JfFy2-C5mGf#H3KVF@;MrboHmdwen1Cpvz{Y<|nY)>+8J#K%y^uZ-qSZoAt8eyFz@>EYroR&nM+D=o^K^k#c{nJxe zxm5y})tVYmR~k{E9+JQ!2^6R$H6UJU@KV2$z-S3n^itJ|vY>wr6=sI0vn7$@%|fjt zjr_wVSV;|P*u*}yk^!V}MF6Qpy0OXu#2OacihFPi!+7mui|k&EsHu4$VX}vxHCp@m%pa!d1jx;CF*@$vv6&cPv0!eK$ibVyIfjx$= z7rS$FgsQ`H!cIdY5-$wP6RLks;Hs915SJDR=~s|)lXwUwb;RPJ?ber^y5}mAhG0E z_GJY!mNvP}%2p)R+GI4soeN{0`xX>D9>?{hj#Jk*fT>54b|FW@gDv=j$lH2kmKN#D zBbUm^>uhF4Qkk4*pI0P-ap!cjriOBVFTWX-oY7-^5rYhP#F3=E^?7_H#2;fXDiYP- z7Z~4Uw7SR24N2>t>`5?rSTkA=hvElJ^;HSd+IAtmfg6j9>-qVCNcaykRwg$}w;Q9S zuRq51S7nk@BG8g95}sDa)C4{Z5r#AAGAWx0useT<+yB29eUa?`dNYf^( zn)1@{hv%OZyyG*#u9v!?ehKJY^1M1R_(l*z9UQ4RhT_P=v5x&-i!>k~urjrYQn3vj zXVj*b+AItOO$=)w8mvC_1rKBgYNMzhu&cGn&9Wa1&>E&K|AC!iYt#UCq7J!PYJPuB z@Dcl=E(xLS`?Je+$@Nl`;3o*MpX;Z3WPumy&5~nD#n#ou%!j*RGXf2U-Gg`sXa#j| zx(Q3>tHZdsHrh15HpIttaT*sd=3* zBTcC(nDvY!lgh2e=A>MZ=kqS;G+)Jxs9s=d92rBlus?M@&)$us8P zg4801S@RYc&G*=(7Nn(pRy9#=k?NJJq8of6AIFMRgK_Q3*P{VCuuB3nb>Qt2B3!-% z9bgjZuLIWra1E7pV4?(?>%g8TL_TKrE?hIBt0XGv#40V1-*upY1PX6x2_|X4ULEk4 zz;zwyt^uoc;Mo;X&L1UEAyRFq5%p6woo6KR2|L%4#Fg#K7vG}Iw^%^xY8KiG?=61B z7PTUK$sX1&kqjg^U0V{#Q0fu;;+>oIPa?rArVWW9pRkc_$OmKvJJN=9psjsbppC@Y z8^enclTgBWVNJ;^T6oR_S*dSypt1zi%Nj5U0I$@ImAQbwcGa$nLi#^T{JNt{v$^n;&PV z+7aA;_hvm(ux_nqyKtgOrCIq@_(k2AXM56_e9t~;k9SY!vP(Sl2dmYAOd&3|p#!m( zyy+>%aHJQjn+Clbm@AD`@i}h7aED_lo&+4``ZJ9fiRYb1@365lniX^;4ap5x^-iQW z@x0=J^y+iApfhPj7PFI`$qb8)zV}LB7$_?oM=CXaPTmWT7^q=>Z~l5v3f|kq8&U~y zjQeB7-XbB9?H7R1YNlspDC<6+RIV8K7n4xU5$EzwKOb5uYga@PioXY*YCYZ0QDUHFAwIr{^<=r$LrYNJmi1d zwYo34Mm+y~&4Wu^)BBV4#IwmO9=hwQF_5ezUaMd5P|^-|Y!Ep`4zg8)$slr?6%Hmf z$vs!Z5Yo=*`G5~yRdG#8Cppxs=OZ36e&;GZl6X_kqXxFx)0gSl96ci5BXhi@;8M|)l89xFmGtOn3Cy?gN1{G6} zmne&6JOCj*i+KE&89R#c)o-)v8KesJ{)4s6ARXH07E7@a*Rt;mu^k~@mlMCGu4~2k zk05n)x1mndx0m@49>eZukhWwhtDi}#k|V5JCW-P1Mv9_R?PAL_Nf+W^4>QSwz{Zs@ zSaQfOcz#uT*D(Z*UbT`de;w zzbUvI&S#%#Bn*qhZ_~)|CQZx3@fnlssEii0Ii~vJO*X1__QB%>E>j?n>-0Zyjbxmm zJR38e1d+d3&U8|Nt8D2MzpaFqw@C@Rj@#R>solK{tF0(autXn@Zx1FTX z;Pb3w7O78L1hDKZ5>2MD-C3B}erE+)WFi^EhRr0SNfx^>6C-s5n>vdOr-5bJpR?eo z>}6eNldZI@KPxeZq>~@m*g0eb_xo_7hBB;mHiBQV?b&1^jrU_U=8`_-F`G9R)A(HW z94A`Im-U}VHj^o=^nB8nmbS2w^O5FDc5yyp<9uALa!4?t4a{ud0x}AzPAwqAQO?E= zoIYk>I-qI=vn(Xr$V-;DkhuBDiMZ9Q<{}bBuCU&VNHmT2VoMj1AITDyx|r1VuXU7n zQD?hIb+VWEWqRORxR_M(BVVv@){(U|Z=0*_=VTF~m$$L2Uy!-Taln`4b6T*Cd3;3< z5-)b_E7GP<>UYFY8VAAA7)L7{Ty|{>F>J-L6vwAHzQ)02FSioIAsj#AxQ!zp2bXQd z{aXdcOdJjzn{jM)jb2Yq8L8Jp*6eGvVzVtQ{cBRQWEtq;bIn!P>aWRTfAR}ku#YUD z1!r6VKM`L7(~ch^L9E4oV&if9$x{;InwEzUiFF-1Kw1-$=JNi9?58B1qZxMNuf%Zq z7%`l{@d(Eo99(AjjTq8!zKP=jj@O#(`{TsW3`YYTpW@hrgUdcYA^GVz&&R=VaGCQo zF`PX~3XL^`m@OQeKr?j`amAz7~QEAX^wt7&Y|HByyUn&z5+ zjbzYDdAQ|)ZPVyuP&euaVra1o69NXb5#ib{7IcG*H%Ap?erGFh-~!djcHJOZK656$ zPaV0FwZ2KJkW7|-liW2MCW@&?ECnxE&s$`e`EKTWQ5HLKi%d7uVj^r?-6qlIpkksQ z7`sg_(T169+#jSO*~OfHkZR;9+wlj9H05QW7Fh6AQr<=HkTss>EhFFSN1uI2f&y+9 z6HOffJ|DZjt=Xai^oExYNx08~VnDSKEFzyQrlwJ@-TA~#t$(HCn*Io0oHXF&txCzx z4uNj#BzEF438T5`%O}%d=KF*c`ZOQ`P?=!BL%zFrHSwM2kSBAXjGTG$< z4B{iMR|O=4kWOsmGZIQG4{|MjM&b!6aGiKg&QSA~e(#f@NUumk^Do6j{n+DIZ~zvv z@~=re4d}yqyoO)#8+-PebRzv-NpHwzN=~qXe=tpqV}1T5qiLBn?D)U%%v-Kz4c_9? z=>psI7IWD-7ElPd_9yJ;Ldtyk&dW9&?dvp;X4Br0Apf>JgoVNf@8;!CgU_5g zoPGU{G@$iXv77IZ#R*r6o0yHXU02r!l+Gu92Rh@1T23{avwfh=5yuzF7e*S!?e(BD z$OYF*54xC=oh-tW=Frkhlrvs!VqT6kwC<5ss5MuH(3mqo4*mTbVw=#pll<^ezo(=-O0; zh7j_C9Sf!XusGDHNn1976g1yXJd@t%WI*!{~A&UK5l{T&(SYNcIc=g5` zkj>F1#9^^veU5l_ASAP9iX#to`Vadcj8>*CTd`?j^jOSBj0W*`j!%Vm|9Ly-Vl*#e z(c3wVpb(nGa68AQgwyW!*%*N~N3vIv<%hph9R=#tpm*5F#=y2t7BP6`CtZkIc|ew} z%eqJzo>Q}~(q#!!<_+0;T~=GlupV2t>#{(|xV0(HR%Q8(_uYt4Jp?#Ah0iNa>tUUJ zRQ?^_IGx=?itp>J|(#(QzWtaVXVz%u(3EvG62TH{8`lLT;(e*~ZUiFK zeTdHv@SLpe5R2A0qMk*RCL6|O(Ru3r_o6uvwGexT>euh3i!^YBF0wnVt2NndU51S- zO*HPk=!7Qf^Ir4^L|PVY-b+0&owwk7fC*~Uduf0!^;Ijpmqt!v?cqjY6b*@>(KYst zmYq%gvD`cNkEk^~p?h;e@RAgV#|@PH4Dz!%uraoV9f+VgnD5%vp!I3J+Aezyx{AUd zDqowfp`Nvvt2RAH>(^up>tIlph+v!Q(5UXw*nLZOwoi9kqwov9VnfX3`1*ktgz(jO z)a0Gtl3fw`A~KjqVkFf=B2>JZR0ciGX{sy5cXV;xGAyz#jq>@j8XOFj?;cEYTQ{(= zb!m)w6SC1A=QV75U22V4Bxb)3UaHd%Be9KqPp$)U?~2@ujl>^hQZW*7?Npa$_%06z zk?1JUZrZG1_Ifndz8v`$Io0zyk~1nS#1e61XtHI-l_bYulwlQCAl^~bNxtvgONzgp z*Sv<7YdLXcv#z+ZIIhg*_{>%c&44MQCBLMx;pUL1+j>8O$2r^idf=geaQ0&)4W~)f z*?>segHO4Um=O=M+mW;ppQWQ{6Vj1&iK0okeey{Z4YH3zw)lpLEobI7yeEa%kmhkg z&dgmh)KiA?WN3m69hISQ89FUPZDi=O3`NP%Eg3S(kSas*GE~4rc895h48D<(jg zOX@IviNe7cFB!7RIA0lZ%eX)p(hLoeq0W+wkfD2$j6^5{7cI$r=-x*Ec3Wo$@+e-J z*kR>8K4*3;C_ED4`n5jY=t0|sxF$5BaL&tCW*^7W>LlN_Czif7!NYeoqY>ng>sB-R zVQE}5bZ$!@61%HeJG#ikm-{ptL4CR#-_PRhC^L4ZHgnHnvX`t!XL_@imA>ztB?-(R zV|u(_3OdMG(E;;a1G`XfV!mN`ulOvR+Lcx^mqtQ;#n{L;bfxWla*I(N+|1pT9y8Bz z8;Y<=>_Rsh75Hf}QImK4`vXU8j^1Hu%d=<*Yu24kF)xEu*FB#d>W)NPiiz45{!gO1 zJ?KSpnVEXh{$#OhXiqwU(yB@9+6P#sN3sRI=`(_Dkv?=iDc|-b%F8b(vwiW-L5ky< zEvM_Y6i1z3SzJFX(GOXz{&Wr*&9?QYXT00R0LH)$QMU)N+XJXSTQ`8F(~8Ymse$w? zNn^hcq}#|=wrCLj7&l$23`UyzP4zShC@k{`|QkI8q}~` zEp0me{cNGTg`A4`^y;>LmH(eiO(=)n_@A?HM^lyhK42@x(0D1VZii&J6BBII4H z{7EiM%Ks!R(m}BwflFA83WGd zXdUA4`CotS7+W|V{+iz__T6|oJ?3Z3!(ySUaq_>GJ%uU7EbAnd3(Fp}ejn1wWEs2i zA+1N=vXT?%F#lz@VOe5W#``+@n9ZI*o0^?}yf7*WiolgLtUxDo7523<#HvN@SFq+I`-yuI}G zYal{42cAFRua(y`54`o>pl1f*S z3Hxy(4e_sWqo|{1p?Tcah#TzjMB3OMj_mc$S)N1aB*m7^gXkXvL$&-%9xHU?%TG4P z1%98o^mcLIe6SvyfAqiB<)Q+LSyxtOB&^%^JEDXpw&5fCK?CDsO1Y>puqF zlu3m_Q`y-`m>uV^nUiUi;DOivr?Lh>z(;i!EMw!R(R%)UVUM0~Cmx$KiVYY|tqmuN z+~L^7A4cxQ92*nLU#vQYN(Hj9uCbIUv`4w4S75o^-W!*U!a5$>95L`_~pNq(HW&LmeUTIROFP#3a8NlxINNuI;~=vbE$YvbJ^nQ zG}e-h?DaxVzGP6oU0J>%cUQ$!G5F{Iwt8*UP_e9>GOfs}#w8X$gD#`(?z?`RLGjr{ z+XaJgUKfU~L`%D^whQc}9fKt4;`@RfhCosYmS2%&q&7yv# zo>tc4_Oj2jXiURqx~}P9i#xMPC}Ob^Z-BviGqya>yl2vE%Y<`9)e1K~ldYRcWBoJE zzb|Dv09#ImvTdccVQt=@_!QXrr~g)M1vE~vjHb!-qG|)rv4*qIVNSc&&Vny&v7EsK zoHPgbC0DwwWzI0X?$+4Ha`t_O#xtzl99p5`V?SiZzxEjK$NO$lL<@&^81KHOrgzw^ zIW();C#a(o#}It9DJ9HHP5F^e!--*7wY-JIZS{j4#mxE;S?lHZIqgcwrgcaec9K`X zEk2*frp9H&T-wxU>8bZ7WuIi9FQB1~29(v->h#_JvsTwb0gC0cL?#v$Q2QkNX94!K zn$M#>;S8;shh;70hHDRk6nk!E7tp9O`+qGmVu|Q)Ke6r$uqf^R?Y$A-v2_b*)rMdD zYBns{`9C&vM@fp=uuG;DHgsWsFQA`N^KrJyK^Kr!ENmfdM`M0t;}_Do{5g?@xLU0B zD^r}XaTj~!q(`XNG4{hE+KH|?%Bj{YZ82Spo%&mgX)pNMaZBJ@Vr5)PJJ42#Ig#qx zwv@&a6!F2wv@=aV$b}u4Q-R+Ia2W@G5g@~7n+9Wl9IqPI#3Gx36K(P{D_lX#v2Pv| zGcEfw`*=A`AXgb)d#_cqMH_OYV2A zT}k))kU_3q8)+tTb}Ho0q-~-VdwHCO`dok~m+x&e9*sVW=i^!U3CzLn z1zgvvu2R=hHtGazU30S#CPIj3{?mA|6Y#$4JZ@ndxyK$i_v$Xt>FL<>$lHRn~%|};?!_=5tDUi!dU>FLD8X znAU1Ptmm~Kb*^bYg1>P6dx0hr8oY$HxP-|P_f#*@ljIU>cp0u^{!Bg>TIS!y19YgG zHEjH4s+1|1#Y3i!i};r5G00!DfGf0Zm1U5pILt4dyrQz#p@n#zrnT6G*L8UEg>2>( zI>XDHC6eR8m#eg6-5n0T*$EMPb8pqSZ0RB_wZJb#1_VQS(}rytXS{t~F617B%C#1TzHa|w9qz-$dD zcR~XyN#K5xmc_ts2DbDXZB>0Gc4y`M8e&2%I8C_ki!xj1!*JM77Vv35dItOV8VyeG zErk~_C%dheX2?2j4z~r>)l{t(FRIavSpF6iRt;+{ncyptV1t$*1_}5@c>)@Ng9!ks z1g8pcI?K9F%SG?g6!G{oumk3_74a$4WzIFA2*PX#ZH@z=Ca_A^Y)@w^IS zwJO9T5N2-yy_+HxML`i-B~}ej5Umn`SYFv6txdRghxyuE)CnwgHj3 z$zvX)w`k+wC?F|L@6+-Hy*$Jt^KY?&xvf6aSdUxuwdZ%}JgLAwN+geW@#+>vbl z+Pt)747>RUt=%dLvC<4*M8S+(n8dNXOqP}Qx%fbUHFq{I=GlyL9CeuY3g*vAsK=ae z#@gSZRcPuQHvSH6(W5?4p%Bj1pbsjM0 zNOm4h4^Mm&=P=UOOHV|F zL$NSL{!;}fvv|qUGo>xRV{0KLXGj7L8xD9XRwqllM-x5M1$ULLzDI+|XY7Z2w2`Mp z$Y!z^_h>Zro93$VCmn*%?Xbo7X*tr#weddnA!MHG2bH!VzW1l`M#zp8?P_W~otYof zu%6M=Wz$5Y@a;Ez%oz_S$WPnj0V(r$Q@L^b$4cYEG4qJ3UcLmyLi8dLeR+J*yPDWW zH%}HqJbv*IHy2j3pC3}2wG&?GJFfoKh^Gq91b|LDlvU2BmF)ZR-lf}m3|TwtG6rLC4}13w*jgOBLuS>_x-UE`LB=?WJ*yf34> zPZB-I#JGZ->@42l62sEM0ixIBu)X;-&^}H?V(CT{o~4H^&TNqmopzGLga;=LIZ0U# z(_5&;#p019j7(whM-X7lTrSq2*YOyI+gf*wtg{t7cQ+hK(+kL9?md3u=EhyN%u2u` zWf$VUQ6mPjb#Fp6Lkx{oMUb}RxYZURmibjM(( zFmXAD&`7Z&`5rV29|8Px!5q8xzgQW8En`prqD}1N5R&pw^QYGn>tIv^7{TeI+|2`vc8FK+o4(PRzswo&y47*oHx_HH~Y8v)yG3+Oa%`1kvTguY;=joZ^ ziy=&M{flvzHLiCNXHdsUtZ^}HJ22c_w5}HzdO(U~tb##!i3WHa;Q2vNO9C(W^AJ2d zwwIPuXPqb|iE}#9UQp&e%!%SVnQ3)(%*Pqa8U8cYph<=4lK2xXWbIM`* z8(}_aVNQ~dP2`v!3z1aXNyi6_6#32)+N`Dd3HkctDx)%ftEE<9F35>_&XYQV$xMO+?U%t9I`qbIYNgS5PQz(xp2t3VPTQAnw1rn;HZEZ>s}|rdo|HgF zrYG#ibL#LlPC(B>Lh&6Jw{;<#`~uf=4oHz`B}H0?DPD%wA%uC=^`SofW{GI?rx3WU zmj@dRS@9e2pcg(uy$HErNO|wz>CMvx(0m92Ii~ptAj(fh4HQvfJj!XcN?H34y}3eP z$!}-1U()IguZ-7n?Ya&cON(T|_{(Sf{${vrXk=~O;uq!*fxE4}Sc4u9eTAsQEb}!jfYUYf z4Xx!hXf)3_cmP}VhQ@eS6QNt|!W&w-q%YQ(6lZvieXQ^eRmgs}^lv&2&jizd=+g4T zN9vux7rIe`Gzi?*`u*9r|InHNl@PM!ByB?HsUUxD%ZeY&{`!Yz*n9Wpjm?|!sG3!W zPZOr4BY2;XA}8Lh<;@g;5ry97GfHISus-Sm#$oOY6tiAm5!q2%;jmtoku5~zYD8wm zN8q<1BN!@L;TM|EVf}$qB^~Ap0?TCTzqE2a{HqUeaKA#U9LZjXzv$CE-=Ab6eB5IR z1jzWraGr5L*6=N@95D~EC}01?oXqqiN%$BDPh)7|ea_Pd?%J*Z0HI$hHO$>Dz3Q*}h5V*)Gu1JjUh#@Dg)zgVbU8M*d^&uNF!;TJT|kCR;yypGvEVqeyI(R>lHoMt_83? zPUw2cjuz6%fm8LUCuiLoyk&;BCD=Ju#vyuYQc!-Ip; z)8jXIc=%g5-+ad-Yo`XZk$|rbH~?_Zpx!nP@H>6sl6cH-?Fm7;hKwpgs6>9#5xbhC zF^#wDYHpSnmTb})HB>WY;szbrE0OFX#9dcf+p0**Dv1m%Laqbh_GT3!6U!IcxewXA zh}o?%zZWr`rJ9OG$O;Xq`Mp*zQIhy4SgW{94LGI)z7n{i16=^{n)S-X*SthsKM52i z-lY@K8qo-Z08UDP=|C9`n5YBaNnoN5Jgp=1h~bL{Z_b4h=&A#!C1AH^=)_1#G|-8y znu=d^psfV_bzqSOP`!uLkiav37nJ8YS_1~?fR_Z$=s-#xw!y<#(cT0rwcA>@tHH3H zsKNQ-La8sjhiJF%t_6a?P;y&4bVq@5;uE-U0WXA~G>`6Oe+}`L$a;yqLrNVvt0T9u z83!$Y@HkwPRnXLM2W=i?>Vi3Z=8eY);_(MH6+wuW8Jm|vle&z5YCb1^z0|@D<{5FS zrATNV$*D1tYA$u%(sXfWFHtkcZmlZq5g9qGsgUa?SJ4>jrY^$dC?4kw7M43>euFkk zW!Q*+1!Q)<`1e!%q4+QO8KSl(h(vL=xEq<*Y>ngZB5_6(O7snWU}ihdl{w7zec)T{ z;`6lIx(Z47v}oGJBXdl<`KfXIW*`|6NIG67{RCkooh?%G85gs^v@lD=Sd)1=t!er| zB> z4|W@h=rEn;tka5TbUckA*gAgXX{=Q1X&(r&d64F|u7I6D{UHY}a{}OSam{aWwnUhM ze_&_4jLj_#IG>Y^lAtJjo28`Diq`^SN*Y7STGpeav60=1YDJGc30vUF;&yccK4XPa zLQ9z)e4)fPh{M#J7s9!myDOFmb21{}^e#cOYM-jYhh34NBjgZZHs_wVrG|4I{}o{_ zj{(lV`+C}8!fTMQsCQTfj%koFpHfD1q7(Dfb4%aT(D8%(`KzTyQWk>uMbHnA9zo9@ zp+9vA;?}9=ANl6JX%ScGG_~o&`js+<^*w_+!M}6SmRoSlk;ijA%D+Bqf*wo60d;Z> zeul^|X^Ew~d4wopwk+3aNbyK8GBSeJoEOXC)A*;%ME<6?>~<+*ZQQZ&DQ!&i>f1ro zs+|ojZ44jqv^@`5KmLGsGTf;^MT#z+49a1B0&$|VmH6}vbex9t6r1a9Oty|r)%s0lW5ieP$tTyp*b4}F9&xkYN~ zXh_K-V~$8<3H#k_4Cs9Dh$vFHuo{m4& zCl>N2-cx1?#XZFdmr3{s!P*GPCG2qXP8q3AuZOx6zh;->*Sgje+)a>zfkjc5#cLfx zfeE?6r~s%`KY#w#onJ3FEB<&Fbh`{s^=W9KJ0IT(H%ESh=sSpB5}lXd1U(#`zlF1- zuiHA8b?`IRpeadgik~qd5ktR|MA;n%=4XduDRxV?6ue4d(Cyu;cfWt zvFx1x9-ZTM3fF(o9+TWkKN&D)ODla>FlLy!AYNa>&xSn}um2qEJJe(*=*59`=9vUt z#B+Dc)_M`&r{$-x?h2kW#B^w*FX^^Ioj$|=I&qJN*6WhWee=D7dW&eDqLHzk1^;6RezHLDYKR#$TlSLpuU&7{(y zN4U$~E;dWTCBVqLLxaaGn4zVzp><>hw+fpa1n2YR4fJ zOk}@c=TxtP(QnDU&Q`F-a{S;-S(1Jtv#ph)n*#?s$yV8R_SH1fR`SQul8M)%K|h%B zZFOeW>sB#nlR$SiLwvx$V#?ODs!rDaanXB?`p0OqBw6?9wPcXADoq^T%{NPyX1l#G{VDxCyznxC+_w^`jUE3c2#kTFgyFMuXY|E78q64GN zs&@L_F5M)v?R);S+V=f>skWL;CBds^^e9`+uKyS))f~dRBh~zZ&VO|zMoYr40shWk zZ$3}f{rK&S*6nph|3Wcx^?T!IU*9(#9L>ba)-cj?(M?C18{1REJ_V8>qazhgM#qs9 zTf;a>@M@S);A*(FztnI>=(ief8G+G~Fh9)EsD`hZS5p`sB_m9m_6(27V&$s1@5KLF z#V_qdDdV@!O__Vl#18tsF3qeYH9SEeTWZ*%y{+K^iEDc`JUrah@Ef8+@9e)L_*)Gh z7NaHMM*x2x8uM@}HJmiu)^K9BSh*U8k5fbY|GU}p7pAFvAM4D)VFi2#Zb^9KV9ey3 z9Lu;3=QA@+=9HB)T4mI>ELav>uq^03i47iT3Onj}zKVExN8KyAP;xHeoUlP?N%&lm zDDIL?XVN2bO;tzTI&iPq+EK6TePtgSP;e%FiieG4dttrv+qc!&H5vV;S=&jo0eXOG zm8NeDhWeYOY5F2&kUeSos^E}3Gq$sy)T?kHH>D$eswp&cQ&r&LaXi^Jrt(*H14+E| z&6ngUAZFO6#o_J)%}1Se|3JNoPS@!f9}eIZJ9L!at{^EorE zdzs1UI#+(rC0(~`e>Em-oMG~hi3`uRLR|E<;FfUS0J8k+xQB7djlJHBg2b&CA7Je4y&cin* z-SLfzsgJqijEXB#*Eh#1gw-lj(M@N!nT1Wvi(kjzaP8Lcyspmu)-XSs&`tO5nU7*o zP2|>aFBnaYmxQz6hWxZCm9@p1)UDyxh&;%X5Z~3D*Ijo>KZi$9yG3d+44d4oSo31j zkdiZ;dI{qvs;gPiU8h~$g|hemQ#yjj6`9{!^ra*1o`6NwG8P}kfV;pS zj3wc@sJ4dBMI(;4hR=qRF5MbF&*L^QrSdyo)zZC#b4+Rv-8!vrrZ^fCN^MA#&~H+M z62s`;W=ap8oIEL0D)pZexH#(!cUpP6DzZPvlACGn?ZIgJ$!zVRJ9A|m>cJ0+EiqA9 zIx(s=n`}#l@NKH_H(NlM7o|C~@T2;MiQTh}!p{Kqexm$)*P_`q{zEwMA)?4v`rgSK3g@V)zPoWIX#npEgs%^r;1DP6 zl-|1S;IqjYv$C4ao#B&=5Wxc$xs6A+}|AhUFw3=r2 zGs=Fpu%EH^v!(rvx1X);XQj>6*7C{cTS&2=srIvz{Ven8D{O{^_H()YyvTm0+Rr}L zGRN{it$(DCUJ^L3>;WdtUJ3sKu1}mD^WB@Q@xS#$%;e~&Bq4s|fwLGrF;hO|-%4&l zLu&L4NtyZHhI76jQgwOx+$cbq6`ui=3HT zVd1Tqsqfp0y-p2oGugSiU&iyclutj8={m1-zBr~jxwY2(J<%1;j+vcXYVOX}=_#Y6 zG|KJza_3TMW9FbdF|_5U%rdpPT(YN4eXedh&Ri9yzCRoDHE3tYTy%PN%+x{U zmd^~&w>V`hrfaNQ409(q<#7z_V!E#BTuyQ5+jpO3F6*l&2Hr3m`s(cTvdhHZ)X!(f zOx`5Pvz#oxK0`h*J7!8&or&(J+Yfx!CViUqfW^!~3oS}jx|VgWw(h46y2d4jFSICi z%FOM5nK|5Fw+Y5xW^V7Ndj;M%PxsT^moDvyqTslPp8g`xAL#KQ5HYVI!10e8;2}*eXetz z>xDtGK-bWXueroB`xBPbH~g?wxVkTNyzNHmpP4gk8-g8Yn1A=v-QxDK#^Z0q8)kKX z-L2ctR{e**akiP^cWt8c|BN}*Und3^7df{himcm~&Hv!`;B<4apYD`4f|V{hC)L6? zF;mXaC3J@p*>bI?n+*f>kSHx0JFE^|YQD?St%q!6P0By69~C(>cd3Pjah(}{z{**R zTSQy2Ad!o^)^8c87X(^-n6&Q2g+)GlO>(} z1EiF@mljKE-_KpD&3~@kc@BhrcaSIYuv7B9w6sF_nv)IYHiJ!%N7qo;eslR?F2hQ5 z=U_dw_pmnd;%F`95;~BBjO85RBi*ZweZWLlD7s_j^;uYtGy<=RptI0{N@+L#mhDW+@1=c{ zVqQB>C$_NBIIosVQfw_#uD1EYzvUa@%h$`xmxZpOb-h!}{fW9q_fs)a-l|;<)#N8` ze{0>x@b*z5bkd4c=#b>aUTrSl?_iVp|!#W-~&uMIYn`=~VI#_JJH>-sqL zYV+uL{@-syPt$UO?i(mF*GO- zdav`7zy8W;`kwA3wa+GKZDJxqPU$#r))Eb`=fPgXBEefDb(^IvyZT;s6xLx)8l zIaE)0xZO+NVX;J2IYyOwj$`+`3f4W~42vIhn7_thwP#oBVbw!UU+1BE*vVBMsz;n$ z?_ud$Cujd%kfpqan#FoGjIDGA#U9prSo*cASh-hlv4^V%d1vPTf>Qqf)j>m#)izXY zp{w}+ufO7N2w%xitNzq$@otBupE->69Hx3$`nl6*dvaBS{Dsq3cvw~E{u^uD8TE|$zF@<;)6E5KEYwNXQ$daxz>}}du{D1y?hnzynGGzi(GnY zveU;l$cxs0zf6CaH(+sAOLkefeInDrZTq#__90UpM?)Hm6Dl6cIiOX}M-6G_*Uh?Z zaBu@ zI(?#Vjh3`LFKvZ+@)JE2hlfAW9a>7^*>7tVLm*yATB=3n@tvWIJOWss3y$2}m&oldW>w!sPR*Ts} zbYfOWvfv8S|1*7oEuoTRQ5AU}R+xuA)34bcD<$bgug8kbHJ^J9C!-E}4okgU%gx5m z^UZ}qrWY7 z;l8y_e%egIY_K#rAG0rMpLB2`O)GoS@$Z0gWUO=Y1E3tcJ^9Q!^ZJ)MV~F@j#K%=( zXIJEP&m2O9;&T}b9CA$G=;T^Zj^my@+;sg)cd>5j-_zssIs1+#Zl6x$y3gIGdo`2N3Z==~bla=j$~CS+W3>A5l=If}w97pZlqydK z#iq@#Zd~UC++ywOpj(=KEok1_r&GGh+?t7BIUTY1lSBSOwP{4a`kB^3yeV~#iTm2l zyBB_~N81yi$ki~vW_vlOfnt9pD5c-%$#;0Tg3{!8h(NuRc-jW+mAWR$mG(DK^1TR( zWvFdHaS5rRp!mHGq`TXx_hIBXyPb2i8I+Ti#~KeWVn>w} zJ_D4awu4#vjqYnN%PR8BL@nu6o_*U+&aS7~^NsFj%@$+!m}ge)nLXXvnYA|k_j?mo z5oTX{X0@K#Zcq&OgVNMvpsn9nZJQbe(nOUA`j20`xGuPnC3ML*o5g*xDDCdLNcC|` z>zAGtP!Y^)Y9Z^Q;;0+p18L4R=4eB`@~GDeuU^?+IcI}XuL4l~PXxt(5h$l%E-1&e zJXeqL{aro!4{-IEKiYBsFLXlqOo!;21R~rtoinUcZ=Hpj#k}F#{;^RJkF{sE^sCn%IhV^Oi;L8 z6P&)?L?@pMimOGQ+>xJi56_BF6&LXfwvz%X`>31S{c@mLNHv?)L^bOZQEcsVB2@mA zfZ7EuSE2Ll&A_Kn4MwpS^TWLmM(&2;lar$9pqZRNhH5o!xz8N^R<|2o!4{rc=!BQW zFWY+L*m@M%BIZS?>}dft3Caw(BCKr29n`&Mi09RKmgZJ?r*9o7-Lk=xH+v{o=KATb zYh<&R9A|;TO$KF1JOs)h_z09EVD=p3;jYx2KB)WU*5XN42IM#}!+9DZ*AmCWpwuF8 zzAHVcSx`E&u+&_6$a=W*knWNjOKO9MH(4o_99>wOmE(3$UWCfaiBt*4nyK)_rmF6mfa-d!`TVeMo#6Q2^I6%9J3^&O+35ck zG&$rdF^n*u8DX48TS4|4qPz1T?Z6HX!sb7oX?-a-12*uTG>4G zo$lVc{MLYa6+^MYu<=74i_cJKN?ws4)u)6`2<5APQ zUbnJu1t;EiE?Sv!sHTb6T2FPu?aulwPzFv?d`QJp>4@>cVzI0A?1tKxQKjN+dNUPq zy-P3k(jWKIo0*LiVV}Pzdr1>YT@jV$K)vqJRJ>O2;K|p-9o1=Lvlnqrp~NPx`F=Sm zi3ueqS~a^qqG%#>VK&Wj7c}Sh&VafAD)jPM+1zndx5^Mxxj-6BD=_;GTI`uR*}QmE z4@juLE1=RB2h=t%&B~_f_q;=tFJV@KVk5m1BeEGpQb|5D^0m+~NwPCz6DzMlSLm5b zx?ldUJQEqH%K~aTRB+hMrhYl8U4)9tiBVD4M;13JiYWB8s;vs|ZK3MPSaf$lReJ?m z+5GsuKF6N))%39(9n6p)^koSwsi-TV#e{Y{qPDVm;Rk(JN$N8JH4RE;{_ZGaXPXrehr!^ zIx7E1S$zDKhji=L{QW1LmY}vXt3vC%tX4Lk{iNF^u{x*HJ`OP3dFPE0WpyU*xNg<0 zQL=RP-bnQl=G&pfXjc&@oAZzBPDAoP38?3xiC&stPD+YUNwbkcUjF%0*UN*= zGso>B#=FOLzXaBiR3Dz|c6;8eY?4msv%;xgvv1gpJE2>pp9Q-b8rH(q=Z~^8U2;PA z>u={mij-NZklULeU1d>j%MEhGAvcqa0Oh#VoIIi1Wl8oE+nrgFm%ZURJ9OA(=?cm* z%nUiHJ0wY#TC#lMC0}>MCEssuA-Sz2S9%_c@hE?*B*(7r%(EwTrwb&#)Jre*mDo^G zvHkN=SMv0qT;38;IDW%jlGQd(KKWC?91Moq^EF1TCVNiH&)4@6XK(h_31@uzq`CEH z-RVNH$@Xk2{+EK6opJ@g2g-2-6t3wnPCpQo<2q2d<>u(mxB%~%D&kx{tZY6zr8}_Ely#YH?nKvm#m|ax?ov$BFFGw(Og4BXb-oUg z5<5lnN0uDdfpUBWN(Zzs*ZrcqOv`Q&RDGfOtgzVmYhp7i{|jAdgN|81YxH_ZVuaNxj#IuVP(|ZZ-W+>kgfy5p%pos0&(6FYc}FGf#qD)p>YdOpUXqi| zfL~c_VAWR*hw^zou>EqZiIuNKH&JwyAnCu$l*)wU&-ce^Jds-OZs%&3)FrH12S%yL z`iSaKA@5xb2pK6IJGW4s=Xd0v1$0sZOUP;=RP7aLWfOgdx>u(M)ln!u!PUjdX8al5 zt#wsqP`wOI_R^eeR*;rZ-ktLVt@hHaY(6-n)7s11Sv_mbL59bXUT)NM>Fui9e^p#4 zEujeMCMc1wT1W?2dDX~JhlEuA^RFLN=%rYB)or1)-f~)N@G56M)jO?cffS-Sg zOzU8%(|BoersujG*Z&#m+JwTysbT@);oe9FsLLB4;D zclN)IH%o(|0U46MlR$y^sPd|O?NnzoWtuDF$7$xi^D1}vgkBPMYtMbIiD~H<)8dmLu{OXEFtpqp7(!9O`24 zy)`?v>hNcm%=P>;8@cX(bvIik${5ng>0@gm2bh0bKuB4={3`2>>`IcxLrAV;?{ zXHo*n@p74I+a%P<#&&jk&91!DE9p)%o-{kx6q5gX>~HvsOPg_*%fG4I^<%8SjAtR&CYUX^Fz7W)!?Vx>$DqJIGguCIS#Kd zZJUPL&X7sI2HD>AkF{0V_i@LEQadoQMDirCxs#Q5p*vf2*h;!zZj?us-teQyXZ2=P zQ*X*$Os|ano2$kXpd4>_a-$_EE|OexplPT>Nudd<>!EsEGFf;HYOGU@WWSr$xLLK@ z71yzQNOkAYJxu<(`%bdR^sAfX`3N_X#Wo5ImS*X?3u@47T zAINXsSV!o_Z{siO5sHHRdGj0TDC&3FoH?Nr?C2||;c~oG;re(VC=*c!b6xY0d!{bH zBbZzq>-sMfl%vobXdXJJTk1c9>KGK+$qk6cGNzk(yR|lE zd{ii7NcFm)TKO;T{+@kfnE|tnFUM?-SB>@J$J3b?qPW)=KO0m}LDilcE1RQHp>|=% z#zlu(d2H9{(4vysO+obuv@(r)hUM)zY~^p!RfvuzNxEPDuRJ_Lq`nbUL!iY+d7};m zRcM~F@+fqNJU^1|m&rpWlhq=W*1CF2P<;gDbarJr+01Sc%IIJEW>7r=`F)GL0V?zC z|0vr$=8YDit8M%vdzV%patdWSQibm1M}cxy6Fpf_(gkI4yfY|k^4VTGtUZqIaA%x9 z`?B&FQ2dPBNwdKjpyYcTw2S+nc#0c8aRy4@ z?ReP9O^&)MUjs@?@^Pm>_OsI`oO1d}W@Bu~#dGVJC08_c#T9$*7kI@j2F1-vQ2Mq4 zl%7}zN>6M6<=B7P74zJ$u9zlgTrn30T)rkjm#Ghd{B5iLnkml*Zcn$d;i?F07@s1r$bSsmR$O#VJ+caZb0St*dBuJ6F*U zl3hiQfN~=Dl2=Y(682(Y*WSm=+WX0tA=#Rd(26;~w1Za3sZdupG#bfaMjuN`b)+xs z$Sb&9ka%h&dp3ogv^oSe_-Q1I&0azlX0cPrWLbb`@P3p4azN)?@;0C3BwMpw5Ob1}kHTw82+_o8nkH{Ds$Euw$DN7C8~J+XPfn zazIUmPX#|u4XFIni~#WJG?rZ9i@|Xj#7W?bz*iwz0QIX1YywUg9{z<`V;XC^ny1*J>UfwKn3vr(4&MP zcMxuzgT6$V-B1~VKh7wz$c_XL*P<89gsR~MdqTV6{Q<_c=;YrK51|*_07ViG6nqux z4DXLD=H1Mn;m+s$=a2xS-hm3?1wVx5!uvyx5_&AVl@_5FTnep(_s1S31{qoqP|u+k zYzA$E&j#PWjb4PW1^W{z%!{DcZ!aO1fG`^3g z_1|fg3hsEo@$COrvmWF;XJW)AaMgDWmkK-^!PS!wxy)?xRwp4je{A$t2O#NEHhZgE z*K)06&xUW+^Y3&iygx!Ik;<>3HRuITLsjtX16QF+{0LtHGY8s@z=m*DCEj@+am*a_g6~3O;QcX6iF5Y)4?$M+g4?0_@PeO0B`Xp9aY%pM zQsS3$pbgjvu7uu!7kmTS4KMgMR1eQye|5w2j2ohl?DJRb@mHDf{-C7uf=ZgTU%WKRB*!aVn66TEBNcW-_ybF2`UT_Dr8D22wb*JYG1nN&U zR2n^BB2avRKpksd!uf-l{t?ayLHdKC{(z=G*eOBK3%AiT*bDv%S^zJ22wDy=cp6#* z?~i^;6m;ee&OLg;1tolJL+}SWB_4VURF7Uz0-Pa2kp94@1VH=l1Bfo&F8u1HIraP%*q<1yl+zxO_jiaQJet>j5{rWrGhMbn`(4_}C$CX4q7L zMTZ%rF`W9tCH#5$5w3fLYOrZNPT^z0sV8V9d=dES&x~Z^h1KA~)7ZfK5BN3;BhD$-XA3M1<^`uG;K(`pqh)3;7q6# zUhsaX9G>r6s5Z$&BH{f3G6|Gbq)WxI0bhkeM7gTLhDe#(F^_qDu8zpvJw|E zlh9ckLS(Z@5G;ch!}~*L5<(l=E2t{b3l4*7;0wXYM96l+`=ezNHLLH#DI`87n8-p` zKD<9})~^I*IaEY~;Jwg1_zLiqJSqZT4Sq}@Y$tpjICc=uh)WfMwdY{N(=;jtMw$KZfLM0~S`9DQyNDLE|0y3-GYL4u3w{aJ!Pn0WmZ%Reb;U)@fN9LP@YSMfv2VVrvzLm@u*#ym zU=cJ6UT^_44_@#ts0?24ub#df3>q#|^sykn3#4|!`xo_P!GG9lE_?JP!c2zZQz;C* z0?LFh2A_iR;H$t7?+@C?DgR2otnz;b6=EZJ6q*Ass2-%U@PY%^xV-*_d|Bl0`w-`U zIYuyQ6tosza0;{mz6fmoup4OpWqeukk9&m64;#UBs19B*2Ra7tTe`Q){Cz8!8#+=J zI0{OJA5~F8VF=j>Mc|Z2gDPKm@Fl2>$K-0T{xL=`dcpG^XJEn?g8zc#*{}*MT1yCC z@`80wkOy8c?w^baZan@)d|BMjeF}X^r;uenMaY2{T)mF-126atBzphKy{zq3CFL1tJVZg%YgFQFWLt+EgY+}&ycqJn5UhgUffr1AgBcFqA1m<31SB@lWD5hYbI9Uo_;`3hi3fCs_s0n&UU0)!&VK<3 zH*Dp)tO=^A@Y&!E1PA8A`-2DmAb|uA&U%|ug^l3Ycj#((!2{6C@b#d6j|RZUf?s)j zJs4H%dMmaBrr8JX@`wdnZ>N=HNCg)`QR&Qv;IbXGMtE?}E=DhWF2Q|`|6l(zvA?v7aYo(er5)P7rg%z=RXgj0_KL()QBaMQgBKvF-!DC;8<3ytGkfF zw{k73+?x`$+KXN=CW(D%@PhN9sLYVX2R(fS*qOEKWc1nK==NImfG=#%`roM(Uah(^ za9EbU6_U%zza%Y7)#pJyuo0XN<--?&pF(2qU!Im_>vd2eHiBEAS@5;ss7}uQ_oeC* zp4CydW{r9tMuNSd#qj>MYgxm7v@?6`(F;bgRJ{(~zf3L5*Ux2W^)hHvOnMHz;4dY7oIvoeQ_K4Gbu3Xwb_-eD4WA6}U#6Dj>v28k74(8HL;3K6X<4)$ zzB4!#k{&ATiBD(_`gvd#B>IvKFkgAXUho1|rsrZ*2tEU?6sKShR;#Pw1uutWd2J5( z3?z=81DCUEEt|>&(^=n>~I$xU~=GUlM8%zJTmn9%xwO7X50lQi#F? zH+uYLFrPK=`0gQ#;A4_IY7Kl5_zbiGK0BXvoB|4k=bQbi6I3pG zP`<`1dz|GfTk>@+;pM9vP0pt$g7gqrGJ=nt2(mv*_GVe5LIPfd7wWl21qXP%pae6- zMo^*`!V5~ILU=(5MMO~v!KUM#J{G(Ql7^LnUpv0Ds)xB~f=eg@w|IOl*mt7U6AgLy JiP@p3{{eGfZF2ws diff --git a/docs/html/vk__mem__alloc_8h.html b/docs/html/vk__mem__alloc_8h.html index b775504..becb3d2 100644 --- a/docs/html/vk__mem__alloc_8h.html +++ b/docs/html/vk__mem__alloc_8h.html @@ -1285,7 +1285,7 @@ Functions pCreateInfoCreation parameters for each alloction. allocationCountNumber of allocations to make. [out]pAllocationsPointer to array that will be filled with handles to created allocations. - [out]pAlocationInfoOptional. Pointer to array that will be filled with parameters of created allocations. + [out]pAllocationInfoOptional. Pointer to array that will be filled with parameters of created allocations.
@@ -1873,10 +1873,10 @@ Functions

Begins defragmentation process.

Parameters
- - - - + + + +
allocatorAllocator object.
pInfoStructure filled with parameters of defragmentation.
pStats[out]Optional. Statistics of defragmentation. You can pass null if you are not interested in this information.
pContext[out]Context object that must be passed to vmaDefragmentationEnd() to finish defragmentation.
allocatorAllocator object.
pInfoStructure filled with parameters of defragmentation.
[out]pStatsOptional. Statistics of defragmentation. You can pass null if you are not interested in this information.
[out]pContextContext object that must be passed to vmaDefragmentationEnd() to finish defragmentation.
diff --git a/docs/html/vk__mem__alloc_8h_source.html b/docs/html/vk__mem__alloc_8h_source.html index 8bf1edd..6acbaab 100644 --- a/docs/html/vk__mem__alloc_8h_source.html +++ b/docs/html/vk__mem__alloc_8h_source.html @@ -65,7 +65,7 @@ $(function() {
vk_mem_alloc.h
-Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1614 /*
1615 Define this macro to 0/1 to disable/enable support for recording functionality,
1616 available through VmaAllocatorCreateInfo::pRecordSettings.
1617 */
1618 #ifndef VMA_RECORDING_ENABLED
1619  #ifdef _WIN32
1620  #define VMA_RECORDING_ENABLED 1
1621  #else
1622  #define VMA_RECORDING_ENABLED 0
1623  #endif
1624 #endif
1625 
1626 #ifndef NOMINMAX
1627  #define NOMINMAX // For windows.h
1628 #endif
1629 
1630 #ifndef VULKAN_H_
1631  #include <vulkan/vulkan.h>
1632 #endif
1633 
1634 #if VMA_RECORDING_ENABLED
1635  #include <windows.h>
1636 #endif
1637 
1638 #if !defined(VMA_DEDICATED_ALLOCATION)
1639  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1640  #define VMA_DEDICATED_ALLOCATION 1
1641  #else
1642  #define VMA_DEDICATED_ALLOCATION 0
1643  #endif
1644 #endif
1645 
1655 VK_DEFINE_HANDLE(VmaAllocator)
1656 
1657 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1659  VmaAllocator allocator,
1660  uint32_t memoryType,
1661  VkDeviceMemory memory,
1662  VkDeviceSize size);
1664 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1665  VmaAllocator allocator,
1666  uint32_t memoryType,
1667  VkDeviceMemory memory,
1668  VkDeviceSize size);
1669 
1683 
1713 
1716 typedef VkFlags VmaAllocatorCreateFlags;
1717 
1722 typedef struct VmaVulkanFunctions {
1723  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1724  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1725  PFN_vkAllocateMemory vkAllocateMemory;
1726  PFN_vkFreeMemory vkFreeMemory;
1727  PFN_vkMapMemory vkMapMemory;
1728  PFN_vkUnmapMemory vkUnmapMemory;
1729  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1730  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1731  PFN_vkBindBufferMemory vkBindBufferMemory;
1732  PFN_vkBindImageMemory vkBindImageMemory;
1733  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1734  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1735  PFN_vkCreateBuffer vkCreateBuffer;
1736  PFN_vkDestroyBuffer vkDestroyBuffer;
1737  PFN_vkCreateImage vkCreateImage;
1738  PFN_vkDestroyImage vkDestroyImage;
1739  PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
1740 #if VMA_DEDICATED_ALLOCATION
1741  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1742  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1743 #endif
1745 
1747 typedef enum VmaRecordFlagBits {
1754 
1757 typedef VkFlags VmaRecordFlags;
1758 
1760 typedef struct VmaRecordSettings
1761 {
1771  const char* pFilePath;
1773 
1776 {
1780 
1781  VkPhysicalDevice physicalDevice;
1783 
1784  VkDevice device;
1786 
1789 
1790  const VkAllocationCallbacks* pAllocationCallbacks;
1792 
1832  const VkDeviceSize* pHeapSizeLimit;
1853 
1855 VkResult vmaCreateAllocator(
1856  const VmaAllocatorCreateInfo* pCreateInfo,
1857  VmaAllocator* pAllocator);
1858 
1860 void vmaDestroyAllocator(
1861  VmaAllocator allocator);
1862 
1868  VmaAllocator allocator,
1869  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1870 
1876  VmaAllocator allocator,
1877  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1878 
1886  VmaAllocator allocator,
1887  uint32_t memoryTypeIndex,
1888  VkMemoryPropertyFlags* pFlags);
1889 
1899  VmaAllocator allocator,
1900  uint32_t frameIndex);
1901 
1904 typedef struct VmaStatInfo
1905 {
1907  uint32_t blockCount;
1913  VkDeviceSize usedBytes;
1915  VkDeviceSize unusedBytes;
1918 } VmaStatInfo;
1919 
1921 typedef struct VmaStats
1922 {
1923  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1924  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1926 } VmaStats;
1927 
1929 void vmaCalculateStats(
1930  VmaAllocator allocator,
1931  VmaStats* pStats);
1932 
1933 #define VMA_STATS_STRING_ENABLED 1
1934 
1935 #if VMA_STATS_STRING_ENABLED
1936 
1938 
1940 void vmaBuildStatsString(
1941  VmaAllocator allocator,
1942  char** ppStatsString,
1943  VkBool32 detailedMap);
1944 
1945 void vmaFreeStatsString(
1946  VmaAllocator allocator,
1947  char* pStatsString);
1948 
1949 #endif // #if VMA_STATS_STRING_ENABLED
1950 
1959 VK_DEFINE_HANDLE(VmaPool)
1960 
1961 typedef enum VmaMemoryUsage
1962 {
2011 } VmaMemoryUsage;
2012 
2027 
2082 
2098 
2108 
2115 
2119 
2121 {
2134  VkMemoryPropertyFlags requiredFlags;
2139  VkMemoryPropertyFlags preferredFlags;
2147  uint32_t memoryTypeBits;
2160  void* pUserData;
2162 
2179 VkResult vmaFindMemoryTypeIndex(
2180  VmaAllocator allocator,
2181  uint32_t memoryTypeBits,
2182  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2183  uint32_t* pMemoryTypeIndex);
2184 
2198  VmaAllocator allocator,
2199  const VkBufferCreateInfo* pBufferCreateInfo,
2200  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2201  uint32_t* pMemoryTypeIndex);
2202 
2216  VmaAllocator allocator,
2217  const VkImageCreateInfo* pImageCreateInfo,
2218  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2219  uint32_t* pMemoryTypeIndex);
2220 
2241 
2258 
2269 
2275 
2278 typedef VkFlags VmaPoolCreateFlags;
2279 
2282 typedef struct VmaPoolCreateInfo {
2297  VkDeviceSize blockSize;
2326 
2329 typedef struct VmaPoolStats {
2332  VkDeviceSize size;
2335  VkDeviceSize unusedSize;
2348  VkDeviceSize unusedRangeSizeMax;
2351  size_t blockCount;
2352 } VmaPoolStats;
2353 
2360 VkResult vmaCreatePool(
2361  VmaAllocator allocator,
2362  const VmaPoolCreateInfo* pCreateInfo,
2363  VmaPool* pPool);
2364 
2367 void vmaDestroyPool(
2368  VmaAllocator allocator,
2369  VmaPool pool);
2370 
2377 void vmaGetPoolStats(
2378  VmaAllocator allocator,
2379  VmaPool pool,
2380  VmaPoolStats* pPoolStats);
2381 
2389  VmaAllocator allocator,
2390  VmaPool pool,
2391  size_t* pLostAllocationCount);
2392 
2407 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2408 
2433 VK_DEFINE_HANDLE(VmaAllocation)
2434 
2435 
2437 typedef struct VmaAllocationInfo {
2442  uint32_t memoryType;
2451  VkDeviceMemory deviceMemory;
2456  VkDeviceSize offset;
2461  VkDeviceSize size;
2475  void* pUserData;
2477 
2488 VkResult vmaAllocateMemory(
2489  VmaAllocator allocator,
2490  const VkMemoryRequirements* pVkMemoryRequirements,
2491  const VmaAllocationCreateInfo* pCreateInfo,
2492  VmaAllocation* pAllocation,
2493  VmaAllocationInfo* pAllocationInfo);
2494 
2514 VkResult vmaAllocateMemoryPages(
2515  VmaAllocator allocator,
2516  const VkMemoryRequirements* pVkMemoryRequirements,
2517  const VmaAllocationCreateInfo* pCreateInfo,
2518  size_t allocationCount,
2519  VmaAllocation* pAllocations,
2520  VmaAllocationInfo* pAllocationInfo);
2521 
2529  VmaAllocator allocator,
2530  VkBuffer buffer,
2531  const VmaAllocationCreateInfo* pCreateInfo,
2532  VmaAllocation* pAllocation,
2533  VmaAllocationInfo* pAllocationInfo);
2534 
2536 VkResult vmaAllocateMemoryForImage(
2537  VmaAllocator allocator,
2538  VkImage image,
2539  const VmaAllocationCreateInfo* pCreateInfo,
2540  VmaAllocation* pAllocation,
2541  VmaAllocationInfo* pAllocationInfo);
2542 
2547 void vmaFreeMemory(
2548  VmaAllocator allocator,
2549  VmaAllocation allocation);
2550 
2561 void vmaFreeMemoryPages(
2562  VmaAllocator allocator,
2563  size_t allocationCount,
2564  VmaAllocation* pAllocations);
2565 
2586 VkResult vmaResizeAllocation(
2587  VmaAllocator allocator,
2588  VmaAllocation allocation,
2589  VkDeviceSize newSize);
2590 
2608  VmaAllocator allocator,
2609  VmaAllocation allocation,
2610  VmaAllocationInfo* pAllocationInfo);
2611 
2626 VkBool32 vmaTouchAllocation(
2627  VmaAllocator allocator,
2628  VmaAllocation allocation);
2629 
2644  VmaAllocator allocator,
2645  VmaAllocation allocation,
2646  void* pUserData);
2647 
2659  VmaAllocator allocator,
2660  VmaAllocation* pAllocation);
2661 
2696 VkResult vmaMapMemory(
2697  VmaAllocator allocator,
2698  VmaAllocation allocation,
2699  void** ppData);
2700 
2705 void vmaUnmapMemory(
2706  VmaAllocator allocator,
2707  VmaAllocation allocation);
2708 
2721 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2722 
2735 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2736 
2753 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
2754 
2761 VK_DEFINE_HANDLE(VmaDefragmentationContext)
2762 
2763 typedef enum VmaDefragmentationFlagBits {
2767 typedef VkFlags VmaDefragmentationFlags;
2768 
2773 typedef struct VmaDefragmentationInfo2 {
2797  uint32_t poolCount;
2818  VkDeviceSize maxCpuBytesToMove;
2828  VkDeviceSize maxGpuBytesToMove;
2842  VkCommandBuffer commandBuffer;
2844 
2849 typedef struct VmaDefragmentationInfo {
2854  VkDeviceSize maxBytesToMove;
2861 
2863 typedef struct VmaDefragmentationStats {
2865  VkDeviceSize bytesMoved;
2867  VkDeviceSize bytesFreed;
2873 
2900 VkResult vmaDefragmentationBegin(
2901  VmaAllocator allocator,
2902  const VmaDefragmentationInfo2* pInfo,
2903  VmaDefragmentationStats* pStats,
2904  VmaDefragmentationContext *pContext);
2905 
2911 VkResult vmaDefragmentationEnd(
2912  VmaAllocator allocator,
2913  VmaDefragmentationContext context);
2914 
2955 VkResult vmaDefragment(
2956  VmaAllocator allocator,
2957  VmaAllocation* pAllocations,
2958  size_t allocationCount,
2959  VkBool32* pAllocationsChanged,
2960  const VmaDefragmentationInfo *pDefragmentationInfo,
2961  VmaDefragmentationStats* pDefragmentationStats);
2962 
2975 VkResult vmaBindBufferMemory(
2976  VmaAllocator allocator,
2977  VmaAllocation allocation,
2978  VkBuffer buffer);
2979 
2992 VkResult vmaBindImageMemory(
2993  VmaAllocator allocator,
2994  VmaAllocation allocation,
2995  VkImage image);
2996 
3023 VkResult vmaCreateBuffer(
3024  VmaAllocator allocator,
3025  const VkBufferCreateInfo* pBufferCreateInfo,
3026  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3027  VkBuffer* pBuffer,
3028  VmaAllocation* pAllocation,
3029  VmaAllocationInfo* pAllocationInfo);
3030 
3042 void vmaDestroyBuffer(
3043  VmaAllocator allocator,
3044  VkBuffer buffer,
3045  VmaAllocation allocation);
3046 
3048 VkResult vmaCreateImage(
3049  VmaAllocator allocator,
3050  const VkImageCreateInfo* pImageCreateInfo,
3051  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3052  VkImage* pImage,
3053  VmaAllocation* pAllocation,
3054  VmaAllocationInfo* pAllocationInfo);
3055 
3067 void vmaDestroyImage(
3068  VmaAllocator allocator,
3069  VkImage image,
3070  VmaAllocation allocation);
3071 
3072 #ifdef __cplusplus
3073 }
3074 #endif
3075 
3076 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
3077 
3078 // For Visual Studio IntelliSense.
3079 #if defined(__cplusplus) && defined(__INTELLISENSE__)
3080 #define VMA_IMPLEMENTATION
3081 #endif
3082 
3083 #ifdef VMA_IMPLEMENTATION
3084 #undef VMA_IMPLEMENTATION
3085 
3086 #include <cstdint>
3087 #include <cstdlib>
3088 #include <cstring>
3089 
3090 /*******************************************************************************
3091 CONFIGURATION SECTION
3092 
3093 Define some of these macros before each #include of this header or change them
3094 here if you need other then default behavior depending on your environment.
3095 */
3096 
3097 /*
3098 Define this macro to 1 to make the library fetch pointers to Vulkan functions
3099 internally, like:
3100 
3101  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
3102 
3103 Define to 0 if you are going to provide you own pointers to Vulkan functions via
3104 VmaAllocatorCreateInfo::pVulkanFunctions.
3105 */
3106 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
3107 #define VMA_STATIC_VULKAN_FUNCTIONS 1
3108 #endif
3109 
3110 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
3111 //#define VMA_USE_STL_CONTAINERS 1
3112 
3113 /* Set this macro to 1 to make the library including and using STL containers:
3114 std::pair, std::vector, std::list, std::unordered_map.
3115 
3116 Set it to 0 or undefined to make the library using its own implementation of
3117 the containers.
3118 */
3119 #if VMA_USE_STL_CONTAINERS
3120  #define VMA_USE_STL_VECTOR 1
3121  #define VMA_USE_STL_UNORDERED_MAP 1
3122  #define VMA_USE_STL_LIST 1
3123 #endif
3124 
3125 #ifndef VMA_USE_STL_SHARED_MUTEX
3126  // Minimum Visual Studio 2015 Update 2
3127  #if defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918
3128  #define VMA_USE_STL_SHARED_MUTEX 1
3129  #endif
3130 #endif
3131 
3132 #if VMA_USE_STL_VECTOR
3133  #include <vector>
3134 #endif
3135 
3136 #if VMA_USE_STL_UNORDERED_MAP
3137  #include <unordered_map>
3138 #endif
3139 
3140 #if VMA_USE_STL_LIST
3141  #include <list>
3142 #endif
3143 
3144 /*
3145 Following headers are used in this CONFIGURATION section only, so feel free to
3146 remove them if not needed.
3147 */
3148 #include <cassert> // for assert
3149 #include <algorithm> // for min, max
3150 #include <mutex>
3151 #include <atomic> // for std::atomic
3152 
3153 #ifndef VMA_NULL
3154  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
3155  #define VMA_NULL nullptr
3156 #endif
3157 
3158 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
3159 #include <cstdlib>
3160 void *aligned_alloc(size_t alignment, size_t size)
3161 {
3162  // alignment must be >= sizeof(void*)
3163  if(alignment < sizeof(void*))
3164  {
3165  alignment = sizeof(void*);
3166  }
3167 
3168  return memalign(alignment, size);
3169 }
3170 #elif defined(__APPLE__) || defined(__ANDROID__)
3171 #include <cstdlib>
3172 void *aligned_alloc(size_t alignment, size_t size)
3173 {
3174  // alignment must be >= sizeof(void*)
3175  if(alignment < sizeof(void*))
3176  {
3177  alignment = sizeof(void*);
3178  }
3179 
3180  void *pointer;
3181  if(posix_memalign(&pointer, alignment, size) == 0)
3182  return pointer;
3183  return VMA_NULL;
3184 }
3185 #endif
3186 
3187 // If your compiler is not compatible with C++11 and definition of
3188 // aligned_alloc() function is missing, uncommeting following line may help:
3189 
3190 //#include <malloc.h>
3191 
3192 // Normal assert to check for programmer's errors, especially in Debug configuration.
3193 #ifndef VMA_ASSERT
3194  #ifdef _DEBUG
3195  #define VMA_ASSERT(expr) assert(expr)
3196  #else
3197  #define VMA_ASSERT(expr)
3198  #endif
3199 #endif
3200 
3201 // Assert that will be called very often, like inside data structures e.g. operator[].
3202 // Making it non-empty can make program slow.
3203 #ifndef VMA_HEAVY_ASSERT
3204  #ifdef _DEBUG
3205  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
3206  #else
3207  #define VMA_HEAVY_ASSERT(expr)
3208  #endif
3209 #endif
3210 
3211 #ifndef VMA_ALIGN_OF
3212  #define VMA_ALIGN_OF(type) (__alignof(type))
3213 #endif
3214 
3215 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
3216  #if defined(_WIN32)
3217  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
3218  #else
3219  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
3220  #endif
3221 #endif
3222 
3223 #ifndef VMA_SYSTEM_FREE
3224  #if defined(_WIN32)
3225  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
3226  #else
3227  #define VMA_SYSTEM_FREE(ptr) free(ptr)
3228  #endif
3229 #endif
3230 
3231 #ifndef VMA_MIN
3232  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
3233 #endif
3234 
3235 #ifndef VMA_MAX
3236  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
3237 #endif
3238 
3239 #ifndef VMA_SWAP
3240  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
3241 #endif
3242 
3243 #ifndef VMA_SORT
3244  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
3245 #endif
3246 
3247 #ifndef VMA_DEBUG_LOG
3248  #define VMA_DEBUG_LOG(format, ...)
3249  /*
3250  #define VMA_DEBUG_LOG(format, ...) do { \
3251  printf(format, __VA_ARGS__); \
3252  printf("\n"); \
3253  } while(false)
3254  */
3255 #endif
3256 
3257 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
3258 #if VMA_STATS_STRING_ENABLED
3259  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
3260  {
3261  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
3262  }
3263  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
3264  {
3265  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
3266  }
3267  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
3268  {
3269  snprintf(outStr, strLen, "%p", ptr);
3270  }
3271 #endif
3272 
3273 #ifndef VMA_MUTEX
3274  class VmaMutex
3275  {
3276  public:
3277  void Lock() { m_Mutex.lock(); }
3278  void Unlock() { m_Mutex.unlock(); }
3279  private:
3280  std::mutex m_Mutex;
3281  };
3282  #define VMA_MUTEX VmaMutex
3283 #endif
3284 
3285 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
3286 #ifndef VMA_RW_MUTEX
3287  #if VMA_USE_STL_SHARED_MUTEX
3288  // Use std::shared_mutex from C++17.
3289  #include <shared_mutex>
3290  class VmaRWMutex
3291  {
3292  public:
3293  void LockRead() { m_Mutex.lock_shared(); }
3294  void UnlockRead() { m_Mutex.unlock_shared(); }
3295  void LockWrite() { m_Mutex.lock(); }
3296  void UnlockWrite() { m_Mutex.unlock(); }
3297  private:
3298  std::shared_mutex m_Mutex;
3299  };
3300  #define VMA_RW_MUTEX VmaRWMutex
3301  #elif defined(_WIN32)
3302  // Use SRWLOCK from WinAPI.
3303  class VmaRWMutex
3304  {
3305  public:
3306  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
3307  void LockRead() { AcquireSRWLockShared(&m_Lock); }
3308  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
3309  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
3310  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
3311  private:
3312  SRWLOCK m_Lock;
3313  };
3314  #define VMA_RW_MUTEX VmaRWMutex
3315  #else
3316  // Less efficient fallback: Use normal mutex.
3317  class VmaRWMutex
3318  {
3319  public:
3320  void LockRead() { m_Mutex.Lock(); }
3321  void UnlockRead() { m_Mutex.Unlock(); }
3322  void LockWrite() { m_Mutex.Lock(); }
3323  void UnlockWrite() { m_Mutex.Unlock(); }
3324  private:
3325  VMA_MUTEX m_Mutex;
3326  };
3327  #define VMA_RW_MUTEX VmaRWMutex
3328  #endif // #if VMA_USE_STL_SHARED_MUTEX
3329 #endif // #ifndef VMA_RW_MUTEX
3330 
3331 /*
3332 If providing your own implementation, you need to implement a subset of std::atomic:
3333 
3334 - Constructor(uint32_t desired)
3335 - uint32_t load() const
3336 - void store(uint32_t desired)
3337 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
3338 */
3339 #ifndef VMA_ATOMIC_UINT32
3340  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
3341 #endif
3342 
3343 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
3344 
3348  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
3349 #endif
3350 
3351 #ifndef VMA_DEBUG_ALIGNMENT
3352 
3356  #define VMA_DEBUG_ALIGNMENT (1)
3357 #endif
3358 
3359 #ifndef VMA_DEBUG_MARGIN
3360 
3364  #define VMA_DEBUG_MARGIN (0)
3365 #endif
3366 
3367 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
3368 
3372  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
3373 #endif
3374 
3375 #ifndef VMA_DEBUG_DETECT_CORRUPTION
3376 
3381  #define VMA_DEBUG_DETECT_CORRUPTION (0)
3382 #endif
3383 
3384 #ifndef VMA_DEBUG_GLOBAL_MUTEX
3385 
3389  #define VMA_DEBUG_GLOBAL_MUTEX (0)
3390 #endif
3391 
3392 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
3393 
3397  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
3398 #endif
3399 
3400 #ifndef VMA_SMALL_HEAP_MAX_SIZE
3401  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
3403 #endif
3404 
3405 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
3406  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
3408 #endif
3409 
3410 #ifndef VMA_CLASS_NO_COPY
3411  #define VMA_CLASS_NO_COPY(className) \
3412  private: \
3413  className(const className&) = delete; \
3414  className& operator=(const className&) = delete;
3415 #endif
3416 
3417 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
3418 
3419 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
3420 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
3421 
3422 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
3423 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
3424 
3425 /*******************************************************************************
3426 END OF CONFIGURATION
3427 */
3428 
3429 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
3430 
3431 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
3432  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
3433 
3434 // Returns number of bits set to 1 in (v).
3435 static inline uint32_t VmaCountBitsSet(uint32_t v)
3436 {
3437  uint32_t c = v - ((v >> 1) & 0x55555555);
3438  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
3439  c = ((c >> 4) + c) & 0x0F0F0F0F;
3440  c = ((c >> 8) + c) & 0x00FF00FF;
3441  c = ((c >> 16) + c) & 0x0000FFFF;
3442  return c;
3443 }
3444 
3445 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
3446 // Use types like uint32_t, uint64_t as T.
3447 template <typename T>
3448 static inline T VmaAlignUp(T val, T align)
3449 {
3450  return (val + align - 1) / align * align;
3451 }
3452 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
3453 // Use types like uint32_t, uint64_t as T.
3454 template <typename T>
3455 static inline T VmaAlignDown(T val, T align)
3456 {
3457  return val / align * align;
3458 }
3459 
3460 // Division with mathematical rounding to nearest number.
3461 template <typename T>
3462 static inline T VmaRoundDiv(T x, T y)
3463 {
3464  return (x + (y / (T)2)) / y;
3465 }
3466 
3467 /*
3468 Returns true if given number is a power of two.
3469 T must be unsigned integer number or signed integer but always nonnegative.
3470 For 0 returns true.
3471 */
3472 template <typename T>
3473 inline bool VmaIsPow2(T x)
3474 {
3475  return (x & (x-1)) == 0;
3476 }
3477 
3478 // Returns smallest power of 2 greater or equal to v.
3479 static inline uint32_t VmaNextPow2(uint32_t v)
3480 {
3481  v--;
3482  v |= v >> 1;
3483  v |= v >> 2;
3484  v |= v >> 4;
3485  v |= v >> 8;
3486  v |= v >> 16;
3487  v++;
3488  return v;
3489 }
3490 static inline uint64_t VmaNextPow2(uint64_t v)
3491 {
3492  v--;
3493  v |= v >> 1;
3494  v |= v >> 2;
3495  v |= v >> 4;
3496  v |= v >> 8;
3497  v |= v >> 16;
3498  v |= v >> 32;
3499  v++;
3500  return v;
3501 }
3502 
3503 // Returns largest power of 2 less or equal to v.
3504 static inline uint32_t VmaPrevPow2(uint32_t v)
3505 {
3506  v |= v >> 1;
3507  v |= v >> 2;
3508  v |= v >> 4;
3509  v |= v >> 8;
3510  v |= v >> 16;
3511  v = v ^ (v >> 1);
3512  return v;
3513 }
3514 static inline uint64_t VmaPrevPow2(uint64_t v)
3515 {
3516  v |= v >> 1;
3517  v |= v >> 2;
3518  v |= v >> 4;
3519  v |= v >> 8;
3520  v |= v >> 16;
3521  v |= v >> 32;
3522  v = v ^ (v >> 1);
3523  return v;
3524 }
3525 
3526 static inline bool VmaStrIsEmpty(const char* pStr)
3527 {
3528  return pStr == VMA_NULL || *pStr == '\0';
3529 }
3530 
3531 static const char* VmaAlgorithmToStr(uint32_t algorithm)
3532 {
3533  switch(algorithm)
3534  {
3536  return "Linear";
3538  return "Buddy";
3539  case 0:
3540  return "Default";
3541  default:
3542  VMA_ASSERT(0);
3543  return "";
3544  }
3545 }
3546 
3547 #ifndef VMA_SORT
3548 
3549 template<typename Iterator, typename Compare>
3550 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
3551 {
3552  Iterator centerValue = end; --centerValue;
3553  Iterator insertIndex = beg;
3554  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
3555  {
3556  if(cmp(*memTypeIndex, *centerValue))
3557  {
3558  if(insertIndex != memTypeIndex)
3559  {
3560  VMA_SWAP(*memTypeIndex, *insertIndex);
3561  }
3562  ++insertIndex;
3563  }
3564  }
3565  if(insertIndex != centerValue)
3566  {
3567  VMA_SWAP(*insertIndex, *centerValue);
3568  }
3569  return insertIndex;
3570 }
3571 
3572 template<typename Iterator, typename Compare>
3573 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
3574 {
3575  if(beg < end)
3576  {
3577  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
3578  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
3579  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
3580  }
3581 }
3582 
3583 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
3584 
3585 #endif // #ifndef VMA_SORT
3586 
3587 /*
3588 Returns true if two memory blocks occupy overlapping pages.
3589 ResourceA must be in less memory offset than ResourceB.
3590 
3591 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
3592 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
3593 */
3594 static inline bool VmaBlocksOnSamePage(
3595  VkDeviceSize resourceAOffset,
3596  VkDeviceSize resourceASize,
3597  VkDeviceSize resourceBOffset,
3598  VkDeviceSize pageSize)
3599 {
3600  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
3601  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
3602  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
3603  VkDeviceSize resourceBStart = resourceBOffset;
3604  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
3605  return resourceAEndPage == resourceBStartPage;
3606 }
3607 
3608 enum VmaSuballocationType
3609 {
3610  VMA_SUBALLOCATION_TYPE_FREE = 0,
3611  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
3612  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
3613  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
3614  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
3615  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
3616  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
3617 };
3618 
3619 /*
3620 Returns true if given suballocation types could conflict and must respect
3621 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
3622 or linear image and another one is optimal image. If type is unknown, behave
3623 conservatively.
3624 */
3625 static inline bool VmaIsBufferImageGranularityConflict(
3626  VmaSuballocationType suballocType1,
3627  VmaSuballocationType suballocType2)
3628 {
3629  if(suballocType1 > suballocType2)
3630  {
3631  VMA_SWAP(suballocType1, suballocType2);
3632  }
3633 
3634  switch(suballocType1)
3635  {
3636  case VMA_SUBALLOCATION_TYPE_FREE:
3637  return false;
3638  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3639  return true;
3640  case VMA_SUBALLOCATION_TYPE_BUFFER:
3641  return
3642  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3643  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3644  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3645  return
3646  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3647  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3648  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3649  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3650  return
3651  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3652  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3653  return false;
3654  default:
3655  VMA_ASSERT(0);
3656  return true;
3657  }
3658 }
3659 
3660 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
3661 {
3662  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
3663  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3664  for(size_t i = 0; i < numberCount; ++i, ++pDst)
3665  {
3666  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3667  }
3668 }
3669 
3670 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
3671 {
3672  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
3673  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3674  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
3675  {
3676  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
3677  {
3678  return false;
3679  }
3680  }
3681  return true;
3682 }
3683 
3684 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
3685 struct VmaMutexLock
3686 {
3687  VMA_CLASS_NO_COPY(VmaMutexLock)
3688 public:
3689  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
3690  m_pMutex(useMutex ? &mutex : VMA_NULL)
3691  { if(m_pMutex) { m_pMutex->Lock(); } }
3692  ~VmaMutexLock()
3693  { if(m_pMutex) { m_pMutex->Unlock(); } }
3694 private:
3695  VMA_MUTEX* m_pMutex;
3696 };
3697 
3698 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
3699 struct VmaMutexLockRead
3700 {
3701  VMA_CLASS_NO_COPY(VmaMutexLockRead)
3702 public:
3703  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
3704  m_pMutex(useMutex ? &mutex : VMA_NULL)
3705  { if(m_pMutex) { m_pMutex->LockRead(); } }
3706  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
3707 private:
3708  VMA_RW_MUTEX* m_pMutex;
3709 };
3710 
3711 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
3712 struct VmaMutexLockWrite
3713 {
3714  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
3715 public:
3716  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
3717  m_pMutex(useMutex ? &mutex : VMA_NULL)
3718  { if(m_pMutex) { m_pMutex->LockWrite(); } }
3719  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
3720 private:
3721  VMA_RW_MUTEX* m_pMutex;
3722 };
3723 
3724 #if VMA_DEBUG_GLOBAL_MUTEX
3725  static VMA_MUTEX gDebugGlobalMutex;
3726  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
3727 #else
3728  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
3729 #endif
3730 
3731 // Minimum size of a free suballocation to register it in the free suballocation collection.
3732 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
3733 
3734 /*
3735 Performs binary search and returns iterator to first element that is greater or
3736 equal to (key), according to comparison (cmp).
3737 
3738 Cmp should return true if first argument is less than second argument.
3739 
3740 Returned value is the found element, if present in the collection or place where
3741 new element with value (key) should be inserted.
3742 */
3743 template <typename CmpLess, typename IterT, typename KeyT>
3744 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
3745 {
3746  size_t down = 0, up = (end - beg);
3747  while(down < up)
3748  {
3749  const size_t mid = (down + up) / 2;
3750  if(cmp(*(beg+mid), key))
3751  {
3752  down = mid + 1;
3753  }
3754  else
3755  {
3756  up = mid;
3757  }
3758  }
3759  return beg + down;
3760 }
3761 
3762 /*
3763 Returns true if all pointers in the array are not-null and unique.
3764 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
3765 T must be pointer type, e.g. VmaAllocation, VmaPool.
3766 */
3767 template<typename T>
3768 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
3769 {
3770  for(uint32_t i = 0; i < count; ++i)
3771  {
3772  const T iPtr = arr[i];
3773  if(iPtr == VMA_NULL)
3774  {
3775  return false;
3776  }
3777  for(uint32_t j = i + 1; j < count; ++j)
3778  {
3779  if(iPtr == arr[j])
3780  {
3781  return false;
3782  }
3783  }
3784  }
3785  return true;
3786 }
3787 
3789 // Memory allocation
3790 
3791 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
3792 {
3793  if((pAllocationCallbacks != VMA_NULL) &&
3794  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
3795  {
3796  return (*pAllocationCallbacks->pfnAllocation)(
3797  pAllocationCallbacks->pUserData,
3798  size,
3799  alignment,
3800  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3801  }
3802  else
3803  {
3804  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
3805  }
3806 }
3807 
3808 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
3809 {
3810  if((pAllocationCallbacks != VMA_NULL) &&
3811  (pAllocationCallbacks->pfnFree != VMA_NULL))
3812  {
3813  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
3814  }
3815  else
3816  {
3817  VMA_SYSTEM_FREE(ptr);
3818  }
3819 }
3820 
3821 template<typename T>
3822 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
3823 {
3824  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
3825 }
3826 
3827 template<typename T>
3828 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
3829 {
3830  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
3831 }
3832 
3833 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
3834 
3835 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
3836 
3837 template<typename T>
3838 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
3839 {
3840  ptr->~T();
3841  VmaFree(pAllocationCallbacks, ptr);
3842 }
3843 
3844 template<typename T>
3845 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
3846 {
3847  if(ptr != VMA_NULL)
3848  {
3849  for(size_t i = count; i--; )
3850  {
3851  ptr[i].~T();
3852  }
3853  VmaFree(pAllocationCallbacks, ptr);
3854  }
3855 }
3856 
3857 // STL-compatible allocator.
3858 template<typename T>
3859 class VmaStlAllocator
3860 {
3861 public:
3862  const VkAllocationCallbacks* const m_pCallbacks;
3863  typedef T value_type;
3864 
3865  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
3866  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
3867 
3868  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
3869  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
3870 
3871  template<typename U>
3872  bool operator==(const VmaStlAllocator<U>& rhs) const
3873  {
3874  return m_pCallbacks == rhs.m_pCallbacks;
3875  }
3876  template<typename U>
3877  bool operator!=(const VmaStlAllocator<U>& rhs) const
3878  {
3879  return m_pCallbacks != rhs.m_pCallbacks;
3880  }
3881 
3882  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
3883 };
3884 
3885 #if VMA_USE_STL_VECTOR
3886 
3887 #define VmaVector std::vector
3888 
3889 template<typename T, typename allocatorT>
3890 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
3891 {
3892  vec.insert(vec.begin() + index, item);
3893 }
3894 
3895 template<typename T, typename allocatorT>
3896 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
3897 {
3898  vec.erase(vec.begin() + index);
3899 }
3900 
3901 #else // #if VMA_USE_STL_VECTOR
3902 
3903 /* Class with interface compatible with subset of std::vector.
3904 T must be POD because constructors and destructors are not called and memcpy is
3905 used for these objects. */
3906 template<typename T, typename AllocatorT>
3907 class VmaVector
3908 {
3909 public:
3910  typedef T value_type;
3911 
3912  VmaVector(const AllocatorT& allocator) :
3913  m_Allocator(allocator),
3914  m_pArray(VMA_NULL),
3915  m_Count(0),
3916  m_Capacity(0)
3917  {
3918  }
3919 
3920  VmaVector(size_t count, const AllocatorT& allocator) :
3921  m_Allocator(allocator),
3922  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
3923  m_Count(count),
3924  m_Capacity(count)
3925  {
3926  }
3927 
3928  VmaVector(const VmaVector<T, AllocatorT>& src) :
3929  m_Allocator(src.m_Allocator),
3930  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
3931  m_Count(src.m_Count),
3932  m_Capacity(src.m_Count)
3933  {
3934  if(m_Count != 0)
3935  {
3936  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
3937  }
3938  }
3939 
3940  ~VmaVector()
3941  {
3942  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3943  }
3944 
3945  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
3946  {
3947  if(&rhs != this)
3948  {
3949  resize(rhs.m_Count);
3950  if(m_Count != 0)
3951  {
3952  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
3953  }
3954  }
3955  return *this;
3956  }
3957 
3958  bool empty() const { return m_Count == 0; }
3959  size_t size() const { return m_Count; }
3960  T* data() { return m_pArray; }
3961  const T* data() const { return m_pArray; }
3962 
3963  T& operator[](size_t index)
3964  {
3965  VMA_HEAVY_ASSERT(index < m_Count);
3966  return m_pArray[index];
3967  }
3968  const T& operator[](size_t index) const
3969  {
3970  VMA_HEAVY_ASSERT(index < m_Count);
3971  return m_pArray[index];
3972  }
3973 
3974  T& front()
3975  {
3976  VMA_HEAVY_ASSERT(m_Count > 0);
3977  return m_pArray[0];
3978  }
3979  const T& front() const
3980  {
3981  VMA_HEAVY_ASSERT(m_Count > 0);
3982  return m_pArray[0];
3983  }
3984  T& back()
3985  {
3986  VMA_HEAVY_ASSERT(m_Count > 0);
3987  return m_pArray[m_Count - 1];
3988  }
3989  const T& back() const
3990  {
3991  VMA_HEAVY_ASSERT(m_Count > 0);
3992  return m_pArray[m_Count - 1];
3993  }
3994 
3995  void reserve(size_t newCapacity, bool freeMemory = false)
3996  {
3997  newCapacity = VMA_MAX(newCapacity, m_Count);
3998 
3999  if((newCapacity < m_Capacity) && !freeMemory)
4000  {
4001  newCapacity = m_Capacity;
4002  }
4003 
4004  if(newCapacity != m_Capacity)
4005  {
4006  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
4007  if(m_Count != 0)
4008  {
4009  memcpy(newArray, m_pArray, m_Count * sizeof(T));
4010  }
4011  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4012  m_Capacity = newCapacity;
4013  m_pArray = newArray;
4014  }
4015  }
4016 
4017  void resize(size_t newCount, bool freeMemory = false)
4018  {
4019  size_t newCapacity = m_Capacity;
4020  if(newCount > m_Capacity)
4021  {
4022  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
4023  }
4024  else if(freeMemory)
4025  {
4026  newCapacity = newCount;
4027  }
4028 
4029  if(newCapacity != m_Capacity)
4030  {
4031  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
4032  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
4033  if(elementsToCopy != 0)
4034  {
4035  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
4036  }
4037  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4038  m_Capacity = newCapacity;
4039  m_pArray = newArray;
4040  }
4041 
4042  m_Count = newCount;
4043  }
4044 
4045  void clear(bool freeMemory = false)
4046  {
4047  resize(0, freeMemory);
4048  }
4049 
4050  void insert(size_t index, const T& src)
4051  {
4052  VMA_HEAVY_ASSERT(index <= m_Count);
4053  const size_t oldCount = size();
4054  resize(oldCount + 1);
4055  if(index < oldCount)
4056  {
4057  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
4058  }
4059  m_pArray[index] = src;
4060  }
4061 
4062  void remove(size_t index)
4063  {
4064  VMA_HEAVY_ASSERT(index < m_Count);
4065  const size_t oldCount = size();
4066  if(index < oldCount - 1)
4067  {
4068  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
4069  }
4070  resize(oldCount - 1);
4071  }
4072 
4073  void push_back(const T& src)
4074  {
4075  const size_t newIndex = size();
4076  resize(newIndex + 1);
4077  m_pArray[newIndex] = src;
4078  }
4079 
4080  void pop_back()
4081  {
4082  VMA_HEAVY_ASSERT(m_Count > 0);
4083  resize(size() - 1);
4084  }
4085 
4086  void push_front(const T& src)
4087  {
4088  insert(0, src);
4089  }
4090 
4091  void pop_front()
4092  {
4093  VMA_HEAVY_ASSERT(m_Count > 0);
4094  remove(0);
4095  }
4096 
4097  typedef T* iterator;
4098 
4099  iterator begin() { return m_pArray; }
4100  iterator end() { return m_pArray + m_Count; }
4101 
4102 private:
4103  AllocatorT m_Allocator;
4104  T* m_pArray;
4105  size_t m_Count;
4106  size_t m_Capacity;
4107 };
4108 
4109 template<typename T, typename allocatorT>
4110 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
4111 {
4112  vec.insert(index, item);
4113 }
4114 
4115 template<typename T, typename allocatorT>
4116 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
4117 {
4118  vec.remove(index);
4119 }
4120 
4121 #endif // #if VMA_USE_STL_VECTOR
4122 
4123 template<typename CmpLess, typename VectorT>
4124 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
4125 {
4126  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4127  vector.data(),
4128  vector.data() + vector.size(),
4129  value,
4130  CmpLess()) - vector.data();
4131  VmaVectorInsert(vector, indexToInsert, value);
4132  return indexToInsert;
4133 }
4134 
4135 template<typename CmpLess, typename VectorT>
4136 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
4137 {
4138  CmpLess comparator;
4139  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
4140  vector.begin(),
4141  vector.end(),
4142  value,
4143  comparator);
4144  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
4145  {
4146  size_t indexToRemove = it - vector.begin();
4147  VmaVectorRemove(vector, indexToRemove);
4148  return true;
4149  }
4150  return false;
4151 }
4152 
4153 template<typename CmpLess, typename IterT, typename KeyT>
4154 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
4155 {
4156  CmpLess comparator;
4157  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
4158  beg, end, value, comparator);
4159  if(it == end ||
4160  (!comparator(*it, value) && !comparator(value, *it)))
4161  {
4162  return it;
4163  }
4164  return end;
4165 }
4166 
4168 // class VmaPoolAllocator
4169 
4170 /*
4171 Allocator for objects of type T using a list of arrays (pools) to speed up
4172 allocation. Number of elements that can be allocated is not bounded because
4173 allocator can create multiple blocks.
4174 */
4175 template<typename T>
4176 class VmaPoolAllocator
4177 {
4178  VMA_CLASS_NO_COPY(VmaPoolAllocator)
4179 public:
4180  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
4181  ~VmaPoolAllocator();
4182  void Clear();
4183  T* Alloc();
4184  void Free(T* ptr);
4185 
4186 private:
4187  union Item
4188  {
4189  uint32_t NextFreeIndex;
4190  T Value;
4191  };
4192 
4193  struct ItemBlock
4194  {
4195  Item* pItems;
4196  uint32_t FirstFreeIndex;
4197  };
4198 
4199  const VkAllocationCallbacks* m_pAllocationCallbacks;
4200  size_t m_ItemsPerBlock;
4201  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
4202 
4203  ItemBlock& CreateNewBlock();
4204 };
4205 
4206 template<typename T>
4207 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
4208  m_pAllocationCallbacks(pAllocationCallbacks),
4209  m_ItemsPerBlock(itemsPerBlock),
4210  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
4211 {
4212  VMA_ASSERT(itemsPerBlock > 0);
4213 }
4214 
4215 template<typename T>
4216 VmaPoolAllocator<T>::~VmaPoolAllocator()
4217 {
4218  Clear();
4219 }
4220 
4221 template<typename T>
4222 void VmaPoolAllocator<T>::Clear()
4223 {
4224  for(size_t i = m_ItemBlocks.size(); i--; )
4225  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
4226  m_ItemBlocks.clear();
4227 }
4228 
4229 template<typename T>
4230 T* VmaPoolAllocator<T>::Alloc()
4231 {
4232  for(size_t i = m_ItemBlocks.size(); i--; )
4233  {
4234  ItemBlock& block = m_ItemBlocks[i];
4235  // This block has some free items: Use first one.
4236  if(block.FirstFreeIndex != UINT32_MAX)
4237  {
4238  Item* const pItem = &block.pItems[block.FirstFreeIndex];
4239  block.FirstFreeIndex = pItem->NextFreeIndex;
4240  return &pItem->Value;
4241  }
4242  }
4243 
4244  // No block has free item: Create new one and use it.
4245  ItemBlock& newBlock = CreateNewBlock();
4246  Item* const pItem = &newBlock.pItems[0];
4247  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
4248  return &pItem->Value;
4249 }
4250 
4251 template<typename T>
4252 void VmaPoolAllocator<T>::Free(T* ptr)
4253 {
4254  // Search all memory blocks to find ptr.
4255  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
4256  {
4257  ItemBlock& block = m_ItemBlocks[i];
4258 
4259  // Casting to union.
4260  Item* pItemPtr;
4261  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
4262 
4263  // Check if pItemPtr is in address range of this block.
4264  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
4265  {
4266  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
4267  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
4268  block.FirstFreeIndex = index;
4269  return;
4270  }
4271  }
4272  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
4273 }
4274 
4275 template<typename T>
4276 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
4277 {
4278  ItemBlock newBlock = {
4279  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
4280 
4281  m_ItemBlocks.push_back(newBlock);
4282 
4283  // Setup singly-linked list of all free items in this block.
4284  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
4285  newBlock.pItems[i].NextFreeIndex = i + 1;
4286  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
4287  return m_ItemBlocks.back();
4288 }
4289 
4291 // class VmaRawList, VmaList
4292 
4293 #if VMA_USE_STL_LIST
4294 
4295 #define VmaList std::list
4296 
4297 #else // #if VMA_USE_STL_LIST
4298 
4299 template<typename T>
4300 struct VmaListItem
4301 {
4302  VmaListItem* pPrev;
4303  VmaListItem* pNext;
4304  T Value;
4305 };
4306 
4307 // Doubly linked list.
4308 template<typename T>
4309 class VmaRawList
4310 {
4311  VMA_CLASS_NO_COPY(VmaRawList)
4312 public:
4313  typedef VmaListItem<T> ItemType;
4314 
4315  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
4316  ~VmaRawList();
4317  void Clear();
4318 
4319  size_t GetCount() const { return m_Count; }
4320  bool IsEmpty() const { return m_Count == 0; }
4321 
4322  ItemType* Front() { return m_pFront; }
4323  const ItemType* Front() const { return m_pFront; }
4324  ItemType* Back() { return m_pBack; }
4325  const ItemType* Back() const { return m_pBack; }
4326 
4327  ItemType* PushBack();
4328  ItemType* PushFront();
4329  ItemType* PushBack(const T& value);
4330  ItemType* PushFront(const T& value);
4331  void PopBack();
4332  void PopFront();
4333 
4334  // Item can be null - it means PushBack.
4335  ItemType* InsertBefore(ItemType* pItem);
4336  // Item can be null - it means PushFront.
4337  ItemType* InsertAfter(ItemType* pItem);
4338 
4339  ItemType* InsertBefore(ItemType* pItem, const T& value);
4340  ItemType* InsertAfter(ItemType* pItem, const T& value);
4341 
4342  void Remove(ItemType* pItem);
4343 
4344 private:
4345  const VkAllocationCallbacks* const m_pAllocationCallbacks;
4346  VmaPoolAllocator<ItemType> m_ItemAllocator;
4347  ItemType* m_pFront;
4348  ItemType* m_pBack;
4349  size_t m_Count;
4350 };
4351 
4352 template<typename T>
4353 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
4354  m_pAllocationCallbacks(pAllocationCallbacks),
4355  m_ItemAllocator(pAllocationCallbacks, 128),
4356  m_pFront(VMA_NULL),
4357  m_pBack(VMA_NULL),
4358  m_Count(0)
4359 {
4360 }
4361 
4362 template<typename T>
4363 VmaRawList<T>::~VmaRawList()
4364 {
4365  // Intentionally not calling Clear, because that would be unnecessary
4366  // computations to return all items to m_ItemAllocator as free.
4367 }
4368 
4369 template<typename T>
4370 void VmaRawList<T>::Clear()
4371 {
4372  if(IsEmpty() == false)
4373  {
4374  ItemType* pItem = m_pBack;
4375  while(pItem != VMA_NULL)
4376  {
4377  ItemType* const pPrevItem = pItem->pPrev;
4378  m_ItemAllocator.Free(pItem);
4379  pItem = pPrevItem;
4380  }
4381  m_pFront = VMA_NULL;
4382  m_pBack = VMA_NULL;
4383  m_Count = 0;
4384  }
4385 }
4386 
4387 template<typename T>
4388 VmaListItem<T>* VmaRawList<T>::PushBack()
4389 {
4390  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4391  pNewItem->pNext = VMA_NULL;
4392  if(IsEmpty())
4393  {
4394  pNewItem->pPrev = VMA_NULL;
4395  m_pFront = pNewItem;
4396  m_pBack = pNewItem;
4397  m_Count = 1;
4398  }
4399  else
4400  {
4401  pNewItem->pPrev = m_pBack;
4402  m_pBack->pNext = pNewItem;
4403  m_pBack = pNewItem;
4404  ++m_Count;
4405  }
4406  return pNewItem;
4407 }
4408 
4409 template<typename T>
4410 VmaListItem<T>* VmaRawList<T>::PushFront()
4411 {
4412  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4413  pNewItem->pPrev = VMA_NULL;
4414  if(IsEmpty())
4415  {
4416  pNewItem->pNext = VMA_NULL;
4417  m_pFront = pNewItem;
4418  m_pBack = pNewItem;
4419  m_Count = 1;
4420  }
4421  else
4422  {
4423  pNewItem->pNext = m_pFront;
4424  m_pFront->pPrev = pNewItem;
4425  m_pFront = pNewItem;
4426  ++m_Count;
4427  }
4428  return pNewItem;
4429 }
4430 
4431 template<typename T>
4432 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
4433 {
4434  ItemType* const pNewItem = PushBack();
4435  pNewItem->Value = value;
4436  return pNewItem;
4437 }
4438 
4439 template<typename T>
4440 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
4441 {
4442  ItemType* const pNewItem = PushFront();
4443  pNewItem->Value = value;
4444  return pNewItem;
4445 }
4446 
4447 template<typename T>
4448 void VmaRawList<T>::PopBack()
4449 {
4450  VMA_HEAVY_ASSERT(m_Count > 0);
4451  ItemType* const pBackItem = m_pBack;
4452  ItemType* const pPrevItem = pBackItem->pPrev;
4453  if(pPrevItem != VMA_NULL)
4454  {
4455  pPrevItem->pNext = VMA_NULL;
4456  }
4457  m_pBack = pPrevItem;
4458  m_ItemAllocator.Free(pBackItem);
4459  --m_Count;
4460 }
4461 
4462 template<typename T>
4463 void VmaRawList<T>::PopFront()
4464 {
4465  VMA_HEAVY_ASSERT(m_Count > 0);
4466  ItemType* const pFrontItem = m_pFront;
4467  ItemType* const pNextItem = pFrontItem->pNext;
4468  if(pNextItem != VMA_NULL)
4469  {
4470  pNextItem->pPrev = VMA_NULL;
4471  }
4472  m_pFront = pNextItem;
4473  m_ItemAllocator.Free(pFrontItem);
4474  --m_Count;
4475 }
4476 
4477 template<typename T>
4478 void VmaRawList<T>::Remove(ItemType* pItem)
4479 {
4480  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
4481  VMA_HEAVY_ASSERT(m_Count > 0);
4482 
4483  if(pItem->pPrev != VMA_NULL)
4484  {
4485  pItem->pPrev->pNext = pItem->pNext;
4486  }
4487  else
4488  {
4489  VMA_HEAVY_ASSERT(m_pFront == pItem);
4490  m_pFront = pItem->pNext;
4491  }
4492 
4493  if(pItem->pNext != VMA_NULL)
4494  {
4495  pItem->pNext->pPrev = pItem->pPrev;
4496  }
4497  else
4498  {
4499  VMA_HEAVY_ASSERT(m_pBack == pItem);
4500  m_pBack = pItem->pPrev;
4501  }
4502 
4503  m_ItemAllocator.Free(pItem);
4504  --m_Count;
4505 }
4506 
4507 template<typename T>
4508 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
4509 {
4510  if(pItem != VMA_NULL)
4511  {
4512  ItemType* const prevItem = pItem->pPrev;
4513  ItemType* const newItem = m_ItemAllocator.Alloc();
4514  newItem->pPrev = prevItem;
4515  newItem->pNext = pItem;
4516  pItem->pPrev = newItem;
4517  if(prevItem != VMA_NULL)
4518  {
4519  prevItem->pNext = newItem;
4520  }
4521  else
4522  {
4523  VMA_HEAVY_ASSERT(m_pFront == pItem);
4524  m_pFront = newItem;
4525  }
4526  ++m_Count;
4527  return newItem;
4528  }
4529  else
4530  return PushBack();
4531 }
4532 
4533 template<typename T>
4534 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
4535 {
4536  if(pItem != VMA_NULL)
4537  {
4538  ItemType* const nextItem = pItem->pNext;
4539  ItemType* const newItem = m_ItemAllocator.Alloc();
4540  newItem->pNext = nextItem;
4541  newItem->pPrev = pItem;
4542  pItem->pNext = newItem;
4543  if(nextItem != VMA_NULL)
4544  {
4545  nextItem->pPrev = newItem;
4546  }
4547  else
4548  {
4549  VMA_HEAVY_ASSERT(m_pBack == pItem);
4550  m_pBack = newItem;
4551  }
4552  ++m_Count;
4553  return newItem;
4554  }
4555  else
4556  return PushFront();
4557 }
4558 
4559 template<typename T>
4560 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
4561 {
4562  ItemType* const newItem = InsertBefore(pItem);
4563  newItem->Value = value;
4564  return newItem;
4565 }
4566 
4567 template<typename T>
4568 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
4569 {
4570  ItemType* const newItem = InsertAfter(pItem);
4571  newItem->Value = value;
4572  return newItem;
4573 }
4574 
4575 template<typename T, typename AllocatorT>
4576 class VmaList
4577 {
4578  VMA_CLASS_NO_COPY(VmaList)
4579 public:
4580  class iterator
4581  {
4582  public:
4583  iterator() :
4584  m_pList(VMA_NULL),
4585  m_pItem(VMA_NULL)
4586  {
4587  }
4588 
4589  T& operator*() const
4590  {
4591  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4592  return m_pItem->Value;
4593  }
4594  T* operator->() const
4595  {
4596  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4597  return &m_pItem->Value;
4598  }
4599 
4600  iterator& operator++()
4601  {
4602  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4603  m_pItem = m_pItem->pNext;
4604  return *this;
4605  }
4606  iterator& operator--()
4607  {
4608  if(m_pItem != VMA_NULL)
4609  {
4610  m_pItem = m_pItem->pPrev;
4611  }
4612  else
4613  {
4614  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4615  m_pItem = m_pList->Back();
4616  }
4617  return *this;
4618  }
4619 
4620  iterator operator++(int)
4621  {
4622  iterator result = *this;
4623  ++*this;
4624  return result;
4625  }
4626  iterator operator--(int)
4627  {
4628  iterator result = *this;
4629  --*this;
4630  return result;
4631  }
4632 
4633  bool operator==(const iterator& rhs) const
4634  {
4635  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4636  return m_pItem == rhs.m_pItem;
4637  }
4638  bool operator!=(const iterator& rhs) const
4639  {
4640  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4641  return m_pItem != rhs.m_pItem;
4642  }
4643 
4644  private:
4645  VmaRawList<T>* m_pList;
4646  VmaListItem<T>* m_pItem;
4647 
4648  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
4649  m_pList(pList),
4650  m_pItem(pItem)
4651  {
4652  }
4653 
4654  friend class VmaList<T, AllocatorT>;
4655  };
4656 
4657  class const_iterator
4658  {
4659  public:
4660  const_iterator() :
4661  m_pList(VMA_NULL),
4662  m_pItem(VMA_NULL)
4663  {
4664  }
4665 
4666  const_iterator(const iterator& src) :
4667  m_pList(src.m_pList),
4668  m_pItem(src.m_pItem)
4669  {
4670  }
4671 
4672  const T& operator*() const
4673  {
4674  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4675  return m_pItem->Value;
4676  }
4677  const T* operator->() const
4678  {
4679  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4680  return &m_pItem->Value;
4681  }
4682 
4683  const_iterator& operator++()
4684  {
4685  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4686  m_pItem = m_pItem->pNext;
4687  return *this;
4688  }
4689  const_iterator& operator--()
4690  {
4691  if(m_pItem != VMA_NULL)
4692  {
4693  m_pItem = m_pItem->pPrev;
4694  }
4695  else
4696  {
4697  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4698  m_pItem = m_pList->Back();
4699  }
4700  return *this;
4701  }
4702 
4703  const_iterator operator++(int)
4704  {
4705  const_iterator result = *this;
4706  ++*this;
4707  return result;
4708  }
4709  const_iterator operator--(int)
4710  {
4711  const_iterator result = *this;
4712  --*this;
4713  return result;
4714  }
4715 
4716  bool operator==(const const_iterator& rhs) const
4717  {
4718  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4719  return m_pItem == rhs.m_pItem;
4720  }
4721  bool operator!=(const const_iterator& rhs) const
4722  {
4723  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4724  return m_pItem != rhs.m_pItem;
4725  }
4726 
4727  private:
4728  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
4729  m_pList(pList),
4730  m_pItem(pItem)
4731  {
4732  }
4733 
4734  const VmaRawList<T>* m_pList;
4735  const VmaListItem<T>* m_pItem;
4736 
4737  friend class VmaList<T, AllocatorT>;
4738  };
4739 
4740  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
4741 
4742  bool empty() const { return m_RawList.IsEmpty(); }
4743  size_t size() const { return m_RawList.GetCount(); }
4744 
4745  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
4746  iterator end() { return iterator(&m_RawList, VMA_NULL); }
4747 
4748  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
4749  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
4750 
4751  void clear() { m_RawList.Clear(); }
4752  void push_back(const T& value) { m_RawList.PushBack(value); }
4753  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
4754  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
4755 
4756 private:
4757  VmaRawList<T> m_RawList;
4758 };
4759 
4760 #endif // #if VMA_USE_STL_LIST
4761 
4763 // class VmaMap
4764 
4765 // Unused in this version.
4766 #if 0
4767 
4768 #if VMA_USE_STL_UNORDERED_MAP
4769 
4770 #define VmaPair std::pair
4771 
4772 #define VMA_MAP_TYPE(KeyT, ValueT) \
4773  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
4774 
4775 #else // #if VMA_USE_STL_UNORDERED_MAP
4776 
4777 template<typename T1, typename T2>
4778 struct VmaPair
4779 {
4780  T1 first;
4781  T2 second;
4782 
4783  VmaPair() : first(), second() { }
4784  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
4785 };
4786 
4787 /* Class compatible with subset of interface of std::unordered_map.
4788 KeyT, ValueT must be POD because they will be stored in VmaVector.
4789 */
4790 template<typename KeyT, typename ValueT>
4791 class VmaMap
4792 {
4793 public:
4794  typedef VmaPair<KeyT, ValueT> PairType;
4795  typedef PairType* iterator;
4796 
4797  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
4798 
4799  iterator begin() { return m_Vector.begin(); }
4800  iterator end() { return m_Vector.end(); }
4801 
4802  void insert(const PairType& pair);
4803  iterator find(const KeyT& key);
4804  void erase(iterator it);
4805 
4806 private:
4807  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
4808 };
4809 
4810 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
4811 
4812 template<typename FirstT, typename SecondT>
4813 struct VmaPairFirstLess
4814 {
4815  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
4816  {
4817  return lhs.first < rhs.first;
4818  }
4819  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
4820  {
4821  return lhs.first < rhsFirst;
4822  }
4823 };
4824 
4825 template<typename KeyT, typename ValueT>
4826 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
4827 {
4828  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4829  m_Vector.data(),
4830  m_Vector.data() + m_Vector.size(),
4831  pair,
4832  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
4833  VmaVectorInsert(m_Vector, indexToInsert, pair);
4834 }
4835 
4836 template<typename KeyT, typename ValueT>
4837 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
4838 {
4839  PairType* it = VmaBinaryFindFirstNotLess(
4840  m_Vector.data(),
4841  m_Vector.data() + m_Vector.size(),
4842  key,
4843  VmaPairFirstLess<KeyT, ValueT>());
4844  if((it != m_Vector.end()) && (it->first == key))
4845  {
4846  return it;
4847  }
4848  else
4849  {
4850  return m_Vector.end();
4851  }
4852 }
4853 
4854 template<typename KeyT, typename ValueT>
4855 void VmaMap<KeyT, ValueT>::erase(iterator it)
4856 {
4857  VmaVectorRemove(m_Vector, it - m_Vector.begin());
4858 }
4859 
4860 #endif // #if VMA_USE_STL_UNORDERED_MAP
4861 
4862 #endif // #if 0
4863 
4865 
4866 class VmaDeviceMemoryBlock;
4867 
4868 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
4869 
4870 struct VmaAllocation_T
4871 {
4872  VMA_CLASS_NO_COPY(VmaAllocation_T)
4873 private:
4874  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
4875 
4876  enum FLAGS
4877  {
4878  FLAG_USER_DATA_STRING = 0x01,
4879  };
4880 
4881 public:
4882  enum ALLOCATION_TYPE
4883  {
4884  ALLOCATION_TYPE_NONE,
4885  ALLOCATION_TYPE_BLOCK,
4886  ALLOCATION_TYPE_DEDICATED,
4887  };
4888 
4889  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
4890  m_Alignment(1),
4891  m_Size(0),
4892  m_pUserData(VMA_NULL),
4893  m_LastUseFrameIndex(currentFrameIndex),
4894  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
4895  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
4896  m_MapCount(0),
4897  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
4898  {
4899 #if VMA_STATS_STRING_ENABLED
4900  m_CreationFrameIndex = currentFrameIndex;
4901  m_BufferImageUsage = 0;
4902 #endif
4903  }
4904 
4905  ~VmaAllocation_T()
4906  {
4907  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
4908 
4909  // Check if owned string was freed.
4910  VMA_ASSERT(m_pUserData == VMA_NULL);
4911  }
4912 
4913  void InitBlockAllocation(
4914  VmaPool hPool,
4915  VmaDeviceMemoryBlock* block,
4916  VkDeviceSize offset,
4917  VkDeviceSize alignment,
4918  VkDeviceSize size,
4919  VmaSuballocationType suballocationType,
4920  bool mapped,
4921  bool canBecomeLost)
4922  {
4923  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4924  VMA_ASSERT(block != VMA_NULL);
4925  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4926  m_Alignment = alignment;
4927  m_Size = size;
4928  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4929  m_SuballocationType = (uint8_t)suballocationType;
4930  m_BlockAllocation.m_hPool = hPool;
4931  m_BlockAllocation.m_Block = block;
4932  m_BlockAllocation.m_Offset = offset;
4933  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
4934  }
4935 
4936  void InitLost()
4937  {
4938  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4939  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
4940  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4941  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
4942  m_BlockAllocation.m_Block = VMA_NULL;
4943  m_BlockAllocation.m_Offset = 0;
4944  m_BlockAllocation.m_CanBecomeLost = true;
4945  }
4946 
4947  void ChangeBlockAllocation(
4948  VmaAllocator hAllocator,
4949  VmaDeviceMemoryBlock* block,
4950  VkDeviceSize offset);
4951 
4952  void ChangeSize(VkDeviceSize newSize);
4953  void ChangeOffset(VkDeviceSize newOffset);
4954 
4955  // pMappedData not null means allocation is created with MAPPED flag.
4956  void InitDedicatedAllocation(
4957  uint32_t memoryTypeIndex,
4958  VkDeviceMemory hMemory,
4959  VmaSuballocationType suballocationType,
4960  void* pMappedData,
4961  VkDeviceSize size)
4962  {
4963  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4964  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
4965  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
4966  m_Alignment = 0;
4967  m_Size = size;
4968  m_SuballocationType = (uint8_t)suballocationType;
4969  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4970  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
4971  m_DedicatedAllocation.m_hMemory = hMemory;
4972  m_DedicatedAllocation.m_pMappedData = pMappedData;
4973  }
4974 
4975  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
4976  VkDeviceSize GetAlignment() const { return m_Alignment; }
4977  VkDeviceSize GetSize() const { return m_Size; }
4978  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
4979  void* GetUserData() const { return m_pUserData; }
4980  void SetUserData(VmaAllocator hAllocator, void* pUserData);
4981  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
4982 
4983  VmaDeviceMemoryBlock* GetBlock() const
4984  {
4985  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4986  return m_BlockAllocation.m_Block;
4987  }
4988  VkDeviceSize GetOffset() const;
4989  VkDeviceMemory GetMemory() const;
4990  uint32_t GetMemoryTypeIndex() const;
4991  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
4992  void* GetMappedData() const;
4993  bool CanBecomeLost() const;
4994  VmaPool GetPool() const;
4995 
4996  uint32_t GetLastUseFrameIndex() const
4997  {
4998  return m_LastUseFrameIndex.load();
4999  }
5000  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
5001  {
5002  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
5003  }
5004  /*
5005  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
5006  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
5007  - Else, returns false.
5008 
5009  If hAllocation is already lost, assert - you should not call it then.
5010  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
5011  */
5012  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5013 
5014  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
5015  {
5016  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
5017  outInfo.blockCount = 1;
5018  outInfo.allocationCount = 1;
5019  outInfo.unusedRangeCount = 0;
5020  outInfo.usedBytes = m_Size;
5021  outInfo.unusedBytes = 0;
5022  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
5023  outInfo.unusedRangeSizeMin = UINT64_MAX;
5024  outInfo.unusedRangeSizeMax = 0;
5025  }
5026 
5027  void BlockAllocMap();
5028  void BlockAllocUnmap();
5029  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
5030  void DedicatedAllocUnmap(VmaAllocator hAllocator);
5031 
5032 #if VMA_STATS_STRING_ENABLED
5033  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
5034  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
5035 
5036  void InitBufferImageUsage(uint32_t bufferImageUsage)
5037  {
5038  VMA_ASSERT(m_BufferImageUsage == 0);
5039  m_BufferImageUsage = bufferImageUsage;
5040  }
5041 
5042  void PrintParameters(class VmaJsonWriter& json) const;
5043 #endif
5044 
5045 private:
5046  VkDeviceSize m_Alignment;
5047  VkDeviceSize m_Size;
5048  void* m_pUserData;
5049  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
5050  uint8_t m_Type; // ALLOCATION_TYPE
5051  uint8_t m_SuballocationType; // VmaSuballocationType
5052  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
5053  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
5054  uint8_t m_MapCount;
5055  uint8_t m_Flags; // enum FLAGS
5056 
5057  // Allocation out of VmaDeviceMemoryBlock.
5058  struct BlockAllocation
5059  {
5060  VmaPool m_hPool; // Null if belongs to general memory.
5061  VmaDeviceMemoryBlock* m_Block;
5062  VkDeviceSize m_Offset;
5063  bool m_CanBecomeLost;
5064  };
5065 
5066  // Allocation for an object that has its own private VkDeviceMemory.
5067  struct DedicatedAllocation
5068  {
5069  uint32_t m_MemoryTypeIndex;
5070  VkDeviceMemory m_hMemory;
5071  void* m_pMappedData; // Not null means memory is mapped.
5072  };
5073 
5074  union
5075  {
5076  // Allocation out of VmaDeviceMemoryBlock.
5077  BlockAllocation m_BlockAllocation;
5078  // Allocation for an object that has its own private VkDeviceMemory.
5079  DedicatedAllocation m_DedicatedAllocation;
5080  };
5081 
5082 #if VMA_STATS_STRING_ENABLED
5083  uint32_t m_CreationFrameIndex;
5084  uint32_t m_BufferImageUsage; // 0 if unknown.
5085 #endif
5086 
5087  void FreeUserDataString(VmaAllocator hAllocator);
5088 };
5089 
5090 /*
5091 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
5092 allocated memory block or free.
5093 */
5094 struct VmaSuballocation
5095 {
5096  VkDeviceSize offset;
5097  VkDeviceSize size;
5098  VmaAllocation hAllocation;
5099  VmaSuballocationType type;
5100 };
5101 
5102 // Comparator for offsets.
5103 struct VmaSuballocationOffsetLess
5104 {
5105  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5106  {
5107  return lhs.offset < rhs.offset;
5108  }
5109 };
5110 struct VmaSuballocationOffsetGreater
5111 {
5112  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5113  {
5114  return lhs.offset > rhs.offset;
5115  }
5116 };
5117 
5118 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
5119 
5120 // Cost of one additional allocation lost, as equivalent in bytes.
5121 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
5122 
5123 /*
5124 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
5125 
5126 If canMakeOtherLost was false:
5127 - item points to a FREE suballocation.
5128 - itemsToMakeLostCount is 0.
5129 
5130 If canMakeOtherLost was true:
5131 - item points to first of sequence of suballocations, which are either FREE,
5132  or point to VmaAllocations that can become lost.
5133 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
5134  the requested allocation to succeed.
5135 */
5136 struct VmaAllocationRequest
5137 {
5138  VkDeviceSize offset;
5139  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
5140  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
5141  VmaSuballocationList::iterator item;
5142  size_t itemsToMakeLostCount;
5143  void* customData;
5144 
5145  VkDeviceSize CalcCost() const
5146  {
5147  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
5148  }
5149 };
5150 
5151 /*
5152 Data structure used for bookkeeping of allocations and unused ranges of memory
5153 in a single VkDeviceMemory block.
5154 */
5155 class VmaBlockMetadata
5156 {
5157 public:
5158  VmaBlockMetadata(VmaAllocator hAllocator);
5159  virtual ~VmaBlockMetadata() { }
5160  virtual void Init(VkDeviceSize size) { m_Size = size; }
5161 
5162  // Validates all data structures inside this object. If not valid, returns false.
5163  virtual bool Validate() const = 0;
5164  VkDeviceSize GetSize() const { return m_Size; }
5165  virtual size_t GetAllocationCount() const = 0;
5166  virtual VkDeviceSize GetSumFreeSize() const = 0;
5167  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
5168  // Returns true if this block is empty - contains only single free suballocation.
5169  virtual bool IsEmpty() const = 0;
5170 
5171  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
5172  // Shouldn't modify blockCount.
5173  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
5174 
5175 #if VMA_STATS_STRING_ENABLED
5176  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
5177 #endif
5178 
5179  // Tries to find a place for suballocation with given parameters inside this block.
5180  // If succeeded, fills pAllocationRequest and returns true.
5181  // If failed, returns false.
5182  virtual bool CreateAllocationRequest(
5183  uint32_t currentFrameIndex,
5184  uint32_t frameInUseCount,
5185  VkDeviceSize bufferImageGranularity,
5186  VkDeviceSize allocSize,
5187  VkDeviceSize allocAlignment,
5188  bool upperAddress,
5189  VmaSuballocationType allocType,
5190  bool canMakeOtherLost,
5191  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
5192  uint32_t strategy,
5193  VmaAllocationRequest* pAllocationRequest) = 0;
5194 
5195  virtual bool MakeRequestedAllocationsLost(
5196  uint32_t currentFrameIndex,
5197  uint32_t frameInUseCount,
5198  VmaAllocationRequest* pAllocationRequest) = 0;
5199 
5200  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
5201 
5202  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
5203 
5204  // Makes actual allocation based on request. Request must already be checked and valid.
5205  virtual void Alloc(
5206  const VmaAllocationRequest& request,
5207  VmaSuballocationType type,
5208  VkDeviceSize allocSize,
5209  bool upperAddress,
5210  VmaAllocation hAllocation) = 0;
5211 
5212  // Frees suballocation assigned to given memory region.
5213  virtual void Free(const VmaAllocation allocation) = 0;
5214  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
5215 
5216  // Tries to resize (grow or shrink) space for given allocation, in place.
5217  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
5218 
5219 protected:
5220  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
5221 
5222 #if VMA_STATS_STRING_ENABLED
5223  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
5224  VkDeviceSize unusedBytes,
5225  size_t allocationCount,
5226  size_t unusedRangeCount) const;
5227  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
5228  VkDeviceSize offset,
5229  VmaAllocation hAllocation) const;
5230  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
5231  VkDeviceSize offset,
5232  VkDeviceSize size) const;
5233  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
5234 #endif
5235 
5236 private:
5237  VkDeviceSize m_Size;
5238  const VkAllocationCallbacks* m_pAllocationCallbacks;
5239 };
5240 
5241 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
5242  VMA_ASSERT(0 && "Validation failed: " #cond); \
5243  return false; \
5244  } } while(false)
5245 
5246 class VmaBlockMetadata_Generic : public VmaBlockMetadata
5247 {
5248  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
5249 public:
5250  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
5251  virtual ~VmaBlockMetadata_Generic();
5252  virtual void Init(VkDeviceSize size);
5253 
5254  virtual bool Validate() const;
5255  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
5256  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5257  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5258  virtual bool IsEmpty() const;
5259 
5260  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5261  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5262 
5263 #if VMA_STATS_STRING_ENABLED
5264  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5265 #endif
5266 
5267  virtual bool CreateAllocationRequest(
5268  uint32_t currentFrameIndex,
5269  uint32_t frameInUseCount,
5270  VkDeviceSize bufferImageGranularity,
5271  VkDeviceSize allocSize,
5272  VkDeviceSize allocAlignment,
5273  bool upperAddress,
5274  VmaSuballocationType allocType,
5275  bool canMakeOtherLost,
5276  uint32_t strategy,
5277  VmaAllocationRequest* pAllocationRequest);
5278 
5279  virtual bool MakeRequestedAllocationsLost(
5280  uint32_t currentFrameIndex,
5281  uint32_t frameInUseCount,
5282  VmaAllocationRequest* pAllocationRequest);
5283 
5284  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5285 
5286  virtual VkResult CheckCorruption(const void* pBlockData);
5287 
5288  virtual void Alloc(
5289  const VmaAllocationRequest& request,
5290  VmaSuballocationType type,
5291  VkDeviceSize allocSize,
5292  bool upperAddress,
5293  VmaAllocation hAllocation);
5294 
5295  virtual void Free(const VmaAllocation allocation);
5296  virtual void FreeAtOffset(VkDeviceSize offset);
5297 
5298  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
5299 
5301  // For defragmentation
5302 
5303  bool IsBufferImageGranularityConflictPossible(
5304  VkDeviceSize bufferImageGranularity,
5305  VmaSuballocationType& inOutPrevSuballocType) const;
5306 
5307 private:
5308  friend class VmaDefragmentationAlgorithm_Generic;
5309  friend class VmaDefragmentationAlgorithm_Fast;
5310 
5311  uint32_t m_FreeCount;
5312  VkDeviceSize m_SumFreeSize;
5313  VmaSuballocationList m_Suballocations;
5314  // Suballocations that are free and have size greater than certain threshold.
5315  // Sorted by size, ascending.
5316  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
5317 
5318  bool ValidateFreeSuballocationList() const;
5319 
5320  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
5321  // If yes, fills pOffset and returns true. If no, returns false.
5322  bool CheckAllocation(
5323  uint32_t currentFrameIndex,
5324  uint32_t frameInUseCount,
5325  VkDeviceSize bufferImageGranularity,
5326  VkDeviceSize allocSize,
5327  VkDeviceSize allocAlignment,
5328  VmaSuballocationType allocType,
5329  VmaSuballocationList::const_iterator suballocItem,
5330  bool canMakeOtherLost,
5331  VkDeviceSize* pOffset,
5332  size_t* itemsToMakeLostCount,
5333  VkDeviceSize* pSumFreeSize,
5334  VkDeviceSize* pSumItemSize) const;
5335  // Given free suballocation, it merges it with following one, which must also be free.
5336  void MergeFreeWithNext(VmaSuballocationList::iterator item);
5337  // Releases given suballocation, making it free.
5338  // Merges it with adjacent free suballocations if applicable.
5339  // Returns iterator to new free suballocation at this place.
5340  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
5341  // Given free suballocation, it inserts it into sorted list of
5342  // m_FreeSuballocationsBySize if it's suitable.
5343  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
5344  // Given free suballocation, it removes it from sorted list of
5345  // m_FreeSuballocationsBySize if it's suitable.
5346  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
5347 };
5348 
5349 /*
5350 Allocations and their references in internal data structure look like this:
5351 
5352 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
5353 
5354  0 +-------+
5355  | |
5356  | |
5357  | |
5358  +-------+
5359  | Alloc | 1st[m_1stNullItemsBeginCount]
5360  +-------+
5361  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5362  +-------+
5363  | ... |
5364  +-------+
5365  | Alloc | 1st[1st.size() - 1]
5366  +-------+
5367  | |
5368  | |
5369  | |
5370 GetSize() +-------+
5371 
5372 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
5373 
5374  0 +-------+
5375  | Alloc | 2nd[0]
5376  +-------+
5377  | Alloc | 2nd[1]
5378  +-------+
5379  | ... |
5380  +-------+
5381  | Alloc | 2nd[2nd.size() - 1]
5382  +-------+
5383  | |
5384  | |
5385  | |
5386  +-------+
5387  | Alloc | 1st[m_1stNullItemsBeginCount]
5388  +-------+
5389  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5390  +-------+
5391  | ... |
5392  +-------+
5393  | Alloc | 1st[1st.size() - 1]
5394  +-------+
5395  | |
5396 GetSize() +-------+
5397 
5398 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
5399 
5400  0 +-------+
5401  | |
5402  | |
5403  | |
5404  +-------+
5405  | Alloc | 1st[m_1stNullItemsBeginCount]
5406  +-------+
5407  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5408  +-------+
5409  | ... |
5410  +-------+
5411  | Alloc | 1st[1st.size() - 1]
5412  +-------+
5413  | |
5414  | |
5415  | |
5416  +-------+
5417  | Alloc | 2nd[2nd.size() - 1]
5418  +-------+
5419  | ... |
5420  +-------+
5421  | Alloc | 2nd[1]
5422  +-------+
5423  | Alloc | 2nd[0]
5424 GetSize() +-------+
5425 
5426 */
5427 class VmaBlockMetadata_Linear : public VmaBlockMetadata
5428 {
5429  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
5430 public:
5431  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
5432  virtual ~VmaBlockMetadata_Linear();
5433  virtual void Init(VkDeviceSize size);
5434 
5435  virtual bool Validate() const;
5436  virtual size_t GetAllocationCount() const;
5437  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5438  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5439  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
5440 
5441  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5442  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5443 
5444 #if VMA_STATS_STRING_ENABLED
5445  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5446 #endif
5447 
5448  virtual bool CreateAllocationRequest(
5449  uint32_t currentFrameIndex,
5450  uint32_t frameInUseCount,
5451  VkDeviceSize bufferImageGranularity,
5452  VkDeviceSize allocSize,
5453  VkDeviceSize allocAlignment,
5454  bool upperAddress,
5455  VmaSuballocationType allocType,
5456  bool canMakeOtherLost,
5457  uint32_t strategy,
5458  VmaAllocationRequest* pAllocationRequest);
5459 
5460  virtual bool MakeRequestedAllocationsLost(
5461  uint32_t currentFrameIndex,
5462  uint32_t frameInUseCount,
5463  VmaAllocationRequest* pAllocationRequest);
5464 
5465  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5466 
5467  virtual VkResult CheckCorruption(const void* pBlockData);
5468 
5469  virtual void Alloc(
5470  const VmaAllocationRequest& request,
5471  VmaSuballocationType type,
5472  VkDeviceSize allocSize,
5473  bool upperAddress,
5474  VmaAllocation hAllocation);
5475 
5476  virtual void Free(const VmaAllocation allocation);
5477  virtual void FreeAtOffset(VkDeviceSize offset);
5478 
5479 private:
5480  /*
5481  There are two suballocation vectors, used in ping-pong way.
5482  The one with index m_1stVectorIndex is called 1st.
5483  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
5484  2nd can be non-empty only when 1st is not empty.
5485  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
5486  */
5487  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
5488 
5489  enum SECOND_VECTOR_MODE
5490  {
5491  SECOND_VECTOR_EMPTY,
5492  /*
5493  Suballocations in 2nd vector are created later than the ones in 1st, but they
5494  all have smaller offset.
5495  */
5496  SECOND_VECTOR_RING_BUFFER,
5497  /*
5498  Suballocations in 2nd vector are upper side of double stack.
5499  They all have offsets higher than those in 1st vector.
5500  Top of this stack means smaller offsets, but higher indices in this vector.
5501  */
5502  SECOND_VECTOR_DOUBLE_STACK,
5503  };
5504 
5505  VkDeviceSize m_SumFreeSize;
5506  SuballocationVectorType m_Suballocations0, m_Suballocations1;
5507  uint32_t m_1stVectorIndex;
5508  SECOND_VECTOR_MODE m_2ndVectorMode;
5509 
5510  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5511  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5512  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5513  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5514 
5515  // Number of items in 1st vector with hAllocation = null at the beginning.
5516  size_t m_1stNullItemsBeginCount;
5517  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
5518  size_t m_1stNullItemsMiddleCount;
5519  // Number of items in 2nd vector with hAllocation = null.
5520  size_t m_2ndNullItemsCount;
5521 
5522  bool ShouldCompact1st() const;
5523  void CleanupAfterFree();
5524 };
5525 
5526 /*
5527 - GetSize() is the original size of allocated memory block.
5528 - m_UsableSize is this size aligned down to a power of two.
5529  All allocations and calculations happen relative to m_UsableSize.
5530 - GetUnusableSize() is the difference between them.
5531  It is repoted as separate, unused range, not available for allocations.
5532 
5533 Node at level 0 has size = m_UsableSize.
5534 Each next level contains nodes with size 2 times smaller than current level.
5535 m_LevelCount is the maximum number of levels to use in the current object.
5536 */
5537 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
5538 {
5539  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
5540 public:
5541  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
5542  virtual ~VmaBlockMetadata_Buddy();
5543  virtual void Init(VkDeviceSize size);
5544 
5545  virtual bool Validate() const;
5546  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
5547  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
5548  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5549  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
5550 
5551  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5552  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5553 
5554 #if VMA_STATS_STRING_ENABLED
5555  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5556 #endif
5557 
5558  virtual bool CreateAllocationRequest(
5559  uint32_t currentFrameIndex,
5560  uint32_t frameInUseCount,
5561  VkDeviceSize bufferImageGranularity,
5562  VkDeviceSize allocSize,
5563  VkDeviceSize allocAlignment,
5564  bool upperAddress,
5565  VmaSuballocationType allocType,
5566  bool canMakeOtherLost,
5567  uint32_t strategy,
5568  VmaAllocationRequest* pAllocationRequest);
5569 
5570  virtual bool MakeRequestedAllocationsLost(
5571  uint32_t currentFrameIndex,
5572  uint32_t frameInUseCount,
5573  VmaAllocationRequest* pAllocationRequest);
5574 
5575  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5576 
5577  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
5578 
5579  virtual void Alloc(
5580  const VmaAllocationRequest& request,
5581  VmaSuballocationType type,
5582  VkDeviceSize allocSize,
5583  bool upperAddress,
5584  VmaAllocation hAllocation);
5585 
5586  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
5587  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
5588 
5589 private:
5590  static const VkDeviceSize MIN_NODE_SIZE = 32;
5591  static const size_t MAX_LEVELS = 30;
5592 
5593  struct ValidationContext
5594  {
5595  size_t calculatedAllocationCount;
5596  size_t calculatedFreeCount;
5597  VkDeviceSize calculatedSumFreeSize;
5598 
5599  ValidationContext() :
5600  calculatedAllocationCount(0),
5601  calculatedFreeCount(0),
5602  calculatedSumFreeSize(0) { }
5603  };
5604 
5605  struct Node
5606  {
5607  VkDeviceSize offset;
5608  enum TYPE
5609  {
5610  TYPE_FREE,
5611  TYPE_ALLOCATION,
5612  TYPE_SPLIT,
5613  TYPE_COUNT
5614  } type;
5615  Node* parent;
5616  Node* buddy;
5617 
5618  union
5619  {
5620  struct
5621  {
5622  Node* prev;
5623  Node* next;
5624  } free;
5625  struct
5626  {
5627  VmaAllocation alloc;
5628  } allocation;
5629  struct
5630  {
5631  Node* leftChild;
5632  } split;
5633  };
5634  };
5635 
5636  // Size of the memory block aligned down to a power of two.
5637  VkDeviceSize m_UsableSize;
5638  uint32_t m_LevelCount;
5639 
5640  Node* m_Root;
5641  struct {
5642  Node* front;
5643  Node* back;
5644  } m_FreeList[MAX_LEVELS];
5645  // Number of nodes in the tree with type == TYPE_ALLOCATION.
5646  size_t m_AllocationCount;
5647  // Number of nodes in the tree with type == TYPE_FREE.
5648  size_t m_FreeCount;
5649  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
5650  VkDeviceSize m_SumFreeSize;
5651 
5652  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
5653  void DeleteNode(Node* node);
5654  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
5655  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
5656  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
5657  // Alloc passed just for validation. Can be null.
5658  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
5659  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
5660  // Adds node to the front of FreeList at given level.
5661  // node->type must be FREE.
5662  // node->free.prev, next can be undefined.
5663  void AddToFreeListFront(uint32_t level, Node* node);
5664  // Removes node from FreeList at given level.
5665  // node->type must be FREE.
5666  // node->free.prev, next stay untouched.
5667  void RemoveFromFreeList(uint32_t level, Node* node);
5668 
5669 #if VMA_STATS_STRING_ENABLED
5670  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
5671 #endif
5672 };
5673 
5674 /*
5675 Represents a single block of device memory (`VkDeviceMemory`) with all the
5676 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
5677 
5678 Thread-safety: This class must be externally synchronized.
5679 */
5680 class VmaDeviceMemoryBlock
5681 {
5682  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
5683 public:
5684  VmaBlockMetadata* m_pMetadata;
5685 
5686  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
5687 
5688  ~VmaDeviceMemoryBlock()
5689  {
5690  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5691  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5692  }
5693 
5694  // Always call after construction.
5695  void Init(
5696  VmaAllocator hAllocator,
5697  uint32_t newMemoryTypeIndex,
5698  VkDeviceMemory newMemory,
5699  VkDeviceSize newSize,
5700  uint32_t id,
5701  uint32_t algorithm);
5702  // Always call before destruction.
5703  void Destroy(VmaAllocator allocator);
5704 
5705  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
5706  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5707  uint32_t GetId() const { return m_Id; }
5708  void* GetMappedData() const { return m_pMappedData; }
5709 
5710  // Validates all data structures inside this object. If not valid, returns false.
5711  bool Validate() const;
5712 
5713  VkResult CheckCorruption(VmaAllocator hAllocator);
5714 
5715  // ppData can be null.
5716  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
5717  void Unmap(VmaAllocator hAllocator, uint32_t count);
5718 
5719  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5720  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5721 
5722  VkResult BindBufferMemory(
5723  const VmaAllocator hAllocator,
5724  const VmaAllocation hAllocation,
5725  VkBuffer hBuffer);
5726  VkResult BindImageMemory(
5727  const VmaAllocator hAllocator,
5728  const VmaAllocation hAllocation,
5729  VkImage hImage);
5730 
5731 private:
5732  uint32_t m_MemoryTypeIndex;
5733  uint32_t m_Id;
5734  VkDeviceMemory m_hMemory;
5735 
5736  /*
5737  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
5738  Also protects m_MapCount, m_pMappedData.
5739  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
5740  */
5741  VMA_MUTEX m_Mutex;
5742  uint32_t m_MapCount;
5743  void* m_pMappedData;
5744 };
5745 
5746 struct VmaPointerLess
5747 {
5748  bool operator()(const void* lhs, const void* rhs) const
5749  {
5750  return lhs < rhs;
5751  }
5752 };
5753 
5754 struct VmaDefragmentationMove
5755 {
5756  size_t srcBlockIndex;
5757  size_t dstBlockIndex;
5758  VkDeviceSize srcOffset;
5759  VkDeviceSize dstOffset;
5760  VkDeviceSize size;
5761 };
5762 
5763 class VmaDefragmentationAlgorithm;
5764 
5765 /*
5766 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
5767 Vulkan memory type.
5768 
5769 Synchronized internally with a mutex.
5770 */
5771 struct VmaBlockVector
5772 {
5773  VMA_CLASS_NO_COPY(VmaBlockVector)
5774 public:
5775  VmaBlockVector(
5776  VmaAllocator hAllocator,
5777  uint32_t memoryTypeIndex,
5778  VkDeviceSize preferredBlockSize,
5779  size_t minBlockCount,
5780  size_t maxBlockCount,
5781  VkDeviceSize bufferImageGranularity,
5782  uint32_t frameInUseCount,
5783  bool isCustomPool,
5784  bool explicitBlockSize,
5785  uint32_t algorithm);
5786  ~VmaBlockVector();
5787 
5788  VkResult CreateMinBlocks();
5789 
5790  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5791  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
5792  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
5793  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
5794  uint32_t GetAlgorithm() const { return m_Algorithm; }
5795 
5796  void GetPoolStats(VmaPoolStats* pStats);
5797 
5798  bool IsEmpty() const { return m_Blocks.empty(); }
5799  bool IsCorruptionDetectionEnabled() const;
5800 
5801  VkResult Allocate(
5802  VmaPool hCurrentPool,
5803  uint32_t currentFrameIndex,
5804  VkDeviceSize size,
5805  VkDeviceSize alignment,
5806  const VmaAllocationCreateInfo& createInfo,
5807  VmaSuballocationType suballocType,
5808  size_t allocationCount,
5809  VmaAllocation* pAllocations);
5810 
5811  void Free(
5812  VmaAllocation hAllocation);
5813 
5814  // Adds statistics of this BlockVector to pStats.
5815  void AddStats(VmaStats* pStats);
5816 
5817 #if VMA_STATS_STRING_ENABLED
5818  void PrintDetailedMap(class VmaJsonWriter& json);
5819 #endif
5820 
5821  void MakePoolAllocationsLost(
5822  uint32_t currentFrameIndex,
5823  size_t* pLostAllocationCount);
5824  VkResult CheckCorruption();
5825 
5826  // Saves results in pCtx->res.
5827  void Defragment(
5828  class VmaBlockVectorDefragmentationContext* pCtx,
5829  VmaDefragmentationStats* pStats,
5830  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
5831  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
5832  VkCommandBuffer commandBuffer);
5833  void DefragmentationEnd(
5834  class VmaBlockVectorDefragmentationContext* pCtx,
5835  VmaDefragmentationStats* pStats);
5836 
5838  // To be used only while the m_Mutex is locked. Used during defragmentation.
5839 
5840  size_t GetBlockCount() const { return m_Blocks.size(); }
5841  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
5842  size_t CalcAllocationCount() const;
5843  bool IsBufferImageGranularityConflictPossible() const;
5844 
5845 private:
5846  friend class VmaDefragmentationAlgorithm_Generic;
5847 
5848  const VmaAllocator m_hAllocator;
5849  const uint32_t m_MemoryTypeIndex;
5850  const VkDeviceSize m_PreferredBlockSize;
5851  const size_t m_MinBlockCount;
5852  const size_t m_MaxBlockCount;
5853  const VkDeviceSize m_BufferImageGranularity;
5854  const uint32_t m_FrameInUseCount;
5855  const bool m_IsCustomPool;
5856  const bool m_ExplicitBlockSize;
5857  const uint32_t m_Algorithm;
5858  /* There can be at most one allocation that is completely empty - a
5859  hysteresis to avoid pessimistic case of alternating creation and destruction
5860  of a VkDeviceMemory. */
5861  bool m_HasEmptyBlock;
5862  VMA_RW_MUTEX m_Mutex;
5863  // Incrementally sorted by sumFreeSize, ascending.
5864  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
5865  uint32_t m_NextBlockId;
5866 
5867  VkDeviceSize CalcMaxBlockSize() const;
5868 
5869  // Finds and removes given block from vector.
5870  void Remove(VmaDeviceMemoryBlock* pBlock);
5871 
5872  // Performs single step in sorting m_Blocks. They may not be fully sorted
5873  // after this call.
5874  void IncrementallySortBlocks();
5875 
5876  VkResult AllocatePage(
5877  VmaPool hCurrentPool,
5878  uint32_t currentFrameIndex,
5879  VkDeviceSize size,
5880  VkDeviceSize alignment,
5881  const VmaAllocationCreateInfo& createInfo,
5882  VmaSuballocationType suballocType,
5883  VmaAllocation* pAllocation);
5884 
5885  // To be used only without CAN_MAKE_OTHER_LOST flag.
5886  VkResult AllocateFromBlock(
5887  VmaDeviceMemoryBlock* pBlock,
5888  VmaPool hCurrentPool,
5889  uint32_t currentFrameIndex,
5890  VkDeviceSize size,
5891  VkDeviceSize alignment,
5892  VmaAllocationCreateFlags allocFlags,
5893  void* pUserData,
5894  VmaSuballocationType suballocType,
5895  uint32_t strategy,
5896  VmaAllocation* pAllocation);
5897 
5898  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
5899 
5900  // Saves result to pCtx->res.
5901  void ApplyDefragmentationMovesCpu(
5902  class VmaBlockVectorDefragmentationContext* pDefragCtx,
5903  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
5904  // Saves result to pCtx->res.
5905  void ApplyDefragmentationMovesGpu(
5906  class VmaBlockVectorDefragmentationContext* pDefragCtx,
5907  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
5908  VkCommandBuffer commandBuffer);
5909 
5910  /*
5911  Used during defragmentation. pDefragmentationStats is optional. It's in/out
5912  - updated with new data.
5913  */
5914  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
5915 };
5916 
5917 struct VmaPool_T
5918 {
5919  VMA_CLASS_NO_COPY(VmaPool_T)
5920 public:
5921  VmaBlockVector m_BlockVector;
5922 
5923  VmaPool_T(
5924  VmaAllocator hAllocator,
5925  const VmaPoolCreateInfo& createInfo,
5926  VkDeviceSize preferredBlockSize);
5927  ~VmaPool_T();
5928 
5929  uint32_t GetId() const { return m_Id; }
5930  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
5931 
5932 #if VMA_STATS_STRING_ENABLED
5933  //void PrintDetailedMap(class VmaStringBuilder& sb);
5934 #endif
5935 
5936 private:
5937  uint32_t m_Id;
5938 };
5939 
5940 /*
5941 Performs defragmentation:
5942 
5943 - Updates `pBlockVector->m_pMetadata`.
5944 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
5945 - Does not move actual data, only returns requested moves as `moves`.
5946 */
5947 class VmaDefragmentationAlgorithm
5948 {
5949  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
5950 public:
5951  VmaDefragmentationAlgorithm(
5952  VmaAllocator hAllocator,
5953  VmaBlockVector* pBlockVector,
5954  uint32_t currentFrameIndex) :
5955  m_hAllocator(hAllocator),
5956  m_pBlockVector(pBlockVector),
5957  m_CurrentFrameIndex(currentFrameIndex)
5958  {
5959  }
5960  virtual ~VmaDefragmentationAlgorithm()
5961  {
5962  }
5963 
5964  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
5965  virtual void AddAll() = 0;
5966 
5967  virtual VkResult Defragment(
5968  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
5969  VkDeviceSize maxBytesToMove,
5970  uint32_t maxAllocationsToMove) = 0;
5971 
5972  virtual VkDeviceSize GetBytesMoved() const = 0;
5973  virtual uint32_t GetAllocationsMoved() const = 0;
5974 
5975 protected:
5976  VmaAllocator const m_hAllocator;
5977  VmaBlockVector* const m_pBlockVector;
5978  const uint32_t m_CurrentFrameIndex;
5979 
5980  struct AllocationInfo
5981  {
5982  VmaAllocation m_hAllocation;
5983  VkBool32* m_pChanged;
5984 
5985  AllocationInfo() :
5986  m_hAllocation(VK_NULL_HANDLE),
5987  m_pChanged(VMA_NULL)
5988  {
5989  }
5990  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
5991  m_hAllocation(hAlloc),
5992  m_pChanged(pChanged)
5993  {
5994  }
5995  };
5996 };
5997 
5998 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
5999 {
6000  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
6001 public:
6002  VmaDefragmentationAlgorithm_Generic(
6003  VmaAllocator hAllocator,
6004  VmaBlockVector* pBlockVector,
6005  uint32_t currentFrameIndex,
6006  bool overlappingMoveSupported);
6007  virtual ~VmaDefragmentationAlgorithm_Generic();
6008 
6009  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6010  virtual void AddAll() { m_AllAllocations = true; }
6011 
6012  virtual VkResult Defragment(
6013  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6014  VkDeviceSize maxBytesToMove,
6015  uint32_t maxAllocationsToMove);
6016 
6017  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6018  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6019 
6020 private:
6021  uint32_t m_AllocationCount;
6022  bool m_AllAllocations;
6023 
6024  VkDeviceSize m_BytesMoved;
6025  uint32_t m_AllocationsMoved;
6026 
6027  struct AllocationInfoSizeGreater
6028  {
6029  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6030  {
6031  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
6032  }
6033  };
6034 
6035  struct AllocationInfoOffsetGreater
6036  {
6037  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6038  {
6039  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
6040  }
6041  };
6042 
6043  struct BlockInfo
6044  {
6045  size_t m_OriginalBlockIndex;
6046  VmaDeviceMemoryBlock* m_pBlock;
6047  bool m_HasNonMovableAllocations;
6048  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
6049 
6050  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
6051  m_OriginalBlockIndex(SIZE_MAX),
6052  m_pBlock(VMA_NULL),
6053  m_HasNonMovableAllocations(true),
6054  m_Allocations(pAllocationCallbacks)
6055  {
6056  }
6057 
6058  void CalcHasNonMovableAllocations()
6059  {
6060  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
6061  const size_t defragmentAllocCount = m_Allocations.size();
6062  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
6063  }
6064 
6065  void SortAllocationsBySizeDescending()
6066  {
6067  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
6068  }
6069 
6070  void SortAllocationsByOffsetDescending()
6071  {
6072  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
6073  }
6074  };
6075 
6076  struct BlockPointerLess
6077  {
6078  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
6079  {
6080  return pLhsBlockInfo->m_pBlock < pRhsBlock;
6081  }
6082  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6083  {
6084  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
6085  }
6086  };
6087 
6088  // 1. Blocks with some non-movable allocations go first.
6089  // 2. Blocks with smaller sumFreeSize go first.
6090  struct BlockInfoCompareMoveDestination
6091  {
6092  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6093  {
6094  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
6095  {
6096  return true;
6097  }
6098  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
6099  {
6100  return false;
6101  }
6102  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
6103  {
6104  return true;
6105  }
6106  return false;
6107  }
6108  };
6109 
6110  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
6111  BlockInfoVector m_Blocks;
6112 
6113  VkResult DefragmentRound(
6114  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6115  VkDeviceSize maxBytesToMove,
6116  uint32_t maxAllocationsToMove);
6117 
6118  size_t CalcBlocksWithNonMovableCount() const;
6119 
6120  static bool MoveMakesSense(
6121  size_t dstBlockIndex, VkDeviceSize dstOffset,
6122  size_t srcBlockIndex, VkDeviceSize srcOffset);
6123 };
6124 
6125 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
6126 {
6127  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
6128 public:
6129  VmaDefragmentationAlgorithm_Fast(
6130  VmaAllocator hAllocator,
6131  VmaBlockVector* pBlockVector,
6132  uint32_t currentFrameIndex,
6133  bool overlappingMoveSupported);
6134  virtual ~VmaDefragmentationAlgorithm_Fast();
6135 
6136  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
6137  virtual void AddAll() { m_AllAllocations = true; }
6138 
6139  virtual VkResult Defragment(
6140  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6141  VkDeviceSize maxBytesToMove,
6142  uint32_t maxAllocationsToMove);
6143 
6144  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6145  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6146 
6147 private:
6148  struct BlockInfo
6149  {
6150  size_t origBlockIndex;
6151  };
6152 
6153  class FreeSpaceDatabase
6154  {
6155  public:
6156  FreeSpaceDatabase()
6157  {
6158  FreeSpace s = {};
6159  s.blockInfoIndex = SIZE_MAX;
6160  for(size_t i = 0; i < MAX_COUNT; ++i)
6161  {
6162  m_FreeSpaces[i] = s;
6163  }
6164  }
6165 
6166  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
6167  {
6168  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6169  {
6170  return;
6171  }
6172 
6173  // Find first invalid or the smallest structure.
6174  size_t bestIndex = SIZE_MAX;
6175  for(size_t i = 0; i < MAX_COUNT; ++i)
6176  {
6177  // Empty structure.
6178  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
6179  {
6180  bestIndex = i;
6181  break;
6182  }
6183  if(m_FreeSpaces[i].size < size &&
6184  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
6185  {
6186  bestIndex = i;
6187  }
6188  }
6189 
6190  if(bestIndex != SIZE_MAX)
6191  {
6192  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
6193  m_FreeSpaces[bestIndex].offset = offset;
6194  m_FreeSpaces[bestIndex].size = size;
6195  }
6196  }
6197 
6198  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
6199  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
6200  {
6201  size_t bestIndex = SIZE_MAX;
6202  VkDeviceSize bestFreeSpaceAfter = 0;
6203  for(size_t i = 0; i < MAX_COUNT; ++i)
6204  {
6205  // Structure is valid.
6206  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
6207  {
6208  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
6209  // Allocation fits into this structure.
6210  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
6211  {
6212  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
6213  (dstOffset + size);
6214  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
6215  {
6216  bestIndex = i;
6217  bestFreeSpaceAfter = freeSpaceAfter;
6218  }
6219  }
6220  }
6221  }
6222 
6223  if(bestIndex != SIZE_MAX)
6224  {
6225  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
6226  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
6227 
6228  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6229  {
6230  // Leave this structure for remaining empty space.
6231  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
6232  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
6233  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
6234  }
6235  else
6236  {
6237  // This structure becomes invalid.
6238  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
6239  }
6240 
6241  return true;
6242  }
6243 
6244  return false;
6245  }
6246 
6247  private:
6248  static const size_t MAX_COUNT = 4;
6249 
6250  struct FreeSpace
6251  {
6252  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
6253  VkDeviceSize offset;
6254  VkDeviceSize size;
6255  } m_FreeSpaces[MAX_COUNT];
6256  };
6257 
6258  const bool m_OverlappingMoveSupported;
6259 
6260  uint32_t m_AllocationCount;
6261  bool m_AllAllocations;
6262 
6263  VkDeviceSize m_BytesMoved;
6264  uint32_t m_AllocationsMoved;
6265 
6266  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
6267 
6268  void PreprocessMetadata();
6269  void PostprocessMetadata();
6270  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
6271 };
6272 
6273 struct VmaBlockDefragmentationContext
6274 {
6275 private:
6276  VMA_CLASS_NO_COPY(VmaBlockDefragmentationContext)
6277 public:
6278  enum BLOCK_FLAG
6279  {
6280  BLOCK_FLAG_USED = 0x00000001,
6281  };
6282  uint32_t flags;
6283  VkBuffer hBuffer;
6284 
6285  VmaBlockDefragmentationContext() :
6286  flags(0),
6287  hBuffer(VK_NULL_HANDLE)
6288  {
6289  }
6290 };
6291 
6292 class VmaBlockVectorDefragmentationContext
6293 {
6294  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
6295 public:
6296  VkResult res;
6297  bool mutexLocked;
6298  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
6299 
6300  VmaBlockVectorDefragmentationContext(
6301  VmaAllocator hAllocator,
6302  VmaPool hCustomPool, // Optional.
6303  VmaBlockVector* pBlockVector,
6304  uint32_t currFrameIndex,
6305  uint32_t flags);
6306  ~VmaBlockVectorDefragmentationContext();
6307 
6308  VmaPool GetCustomPool() const { return m_hCustomPool; }
6309  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
6310  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
6311 
6312  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6313  void AddAll() { m_AllAllocations = true; }
6314 
6315  void Begin(bool overlappingMoveSupported);
6316 
6317 private:
6318  const VmaAllocator m_hAllocator;
6319  // Null if not from custom pool.
6320  const VmaPool m_hCustomPool;
6321  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
6322  VmaBlockVector* const m_pBlockVector;
6323  const uint32_t m_CurrFrameIndex;
6324  const uint32_t m_AlgorithmFlags;
6325  // Owner of this object.
6326  VmaDefragmentationAlgorithm* m_pAlgorithm;
6327 
6328  struct AllocInfo
6329  {
6330  VmaAllocation hAlloc;
6331  VkBool32* pChanged;
6332  };
6333  // Used between constructor and Begin.
6334  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
6335  bool m_AllAllocations;
6336 };
6337 
6338 struct VmaDefragmentationContext_T
6339 {
6340 private:
6341  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
6342 public:
6343  VmaDefragmentationContext_T(
6344  VmaAllocator hAllocator,
6345  uint32_t currFrameIndex,
6346  uint32_t flags,
6347  VmaDefragmentationStats* pStats);
6348  ~VmaDefragmentationContext_T();
6349 
6350  void AddPools(uint32_t poolCount, VmaPool* pPools);
6351  void AddAllocations(
6352  uint32_t allocationCount,
6353  VmaAllocation* pAllocations,
6354  VkBool32* pAllocationsChanged);
6355 
6356  /*
6357  Returns:
6358  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
6359  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
6360  - Negative value if error occured and object can be destroyed immediately.
6361  */
6362  VkResult Defragment(
6363  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
6364  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
6365  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats);
6366 
6367 private:
6368  const VmaAllocator m_hAllocator;
6369  const uint32_t m_CurrFrameIndex;
6370  const uint32_t m_Flags;
6371  VmaDefragmentationStats* const m_pStats;
6372  // Owner of these objects.
6373  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
6374  // Owner of these objects.
6375  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
6376 };
6377 
6378 #if VMA_RECORDING_ENABLED
6379 
6380 class VmaRecorder
6381 {
6382 public:
6383  VmaRecorder();
6384  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
6385  void WriteConfiguration(
6386  const VkPhysicalDeviceProperties& devProps,
6387  const VkPhysicalDeviceMemoryProperties& memProps,
6388  bool dedicatedAllocationExtensionEnabled);
6389  ~VmaRecorder();
6390 
6391  void RecordCreateAllocator(uint32_t frameIndex);
6392  void RecordDestroyAllocator(uint32_t frameIndex);
6393  void RecordCreatePool(uint32_t frameIndex,
6394  const VmaPoolCreateInfo& createInfo,
6395  VmaPool pool);
6396  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
6397  void RecordAllocateMemory(uint32_t frameIndex,
6398  const VkMemoryRequirements& vkMemReq,
6399  const VmaAllocationCreateInfo& createInfo,
6400  VmaAllocation allocation);
6401  void RecordAllocateMemoryPages(uint32_t frameIndex,
6402  const VkMemoryRequirements& vkMemReq,
6403  const VmaAllocationCreateInfo& createInfo,
6404  uint64_t allocationCount,
6405  const VmaAllocation* pAllocations);
6406  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
6407  const VkMemoryRequirements& vkMemReq,
6408  bool requiresDedicatedAllocation,
6409  bool prefersDedicatedAllocation,
6410  const VmaAllocationCreateInfo& createInfo,
6411  VmaAllocation allocation);
6412  void RecordAllocateMemoryForImage(uint32_t frameIndex,
6413  const VkMemoryRequirements& vkMemReq,
6414  bool requiresDedicatedAllocation,
6415  bool prefersDedicatedAllocation,
6416  const VmaAllocationCreateInfo& createInfo,
6417  VmaAllocation allocation);
6418  void RecordFreeMemory(uint32_t frameIndex,
6419  VmaAllocation allocation);
6420  void RecordFreeMemoryPages(uint32_t frameIndex,
6421  uint64_t allocationCount,
6422  const VmaAllocation* pAllocations);
6423  void RecordResizeAllocation(
6424  uint32_t frameIndex,
6425  VmaAllocation allocation,
6426  VkDeviceSize newSize);
6427  void RecordSetAllocationUserData(uint32_t frameIndex,
6428  VmaAllocation allocation,
6429  const void* pUserData);
6430  void RecordCreateLostAllocation(uint32_t frameIndex,
6431  VmaAllocation allocation);
6432  void RecordMapMemory(uint32_t frameIndex,
6433  VmaAllocation allocation);
6434  void RecordUnmapMemory(uint32_t frameIndex,
6435  VmaAllocation allocation);
6436  void RecordFlushAllocation(uint32_t frameIndex,
6437  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6438  void RecordInvalidateAllocation(uint32_t frameIndex,
6439  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6440  void RecordCreateBuffer(uint32_t frameIndex,
6441  const VkBufferCreateInfo& bufCreateInfo,
6442  const VmaAllocationCreateInfo& allocCreateInfo,
6443  VmaAllocation allocation);
6444  void RecordCreateImage(uint32_t frameIndex,
6445  const VkImageCreateInfo& imageCreateInfo,
6446  const VmaAllocationCreateInfo& allocCreateInfo,
6447  VmaAllocation allocation);
6448  void RecordDestroyBuffer(uint32_t frameIndex,
6449  VmaAllocation allocation);
6450  void RecordDestroyImage(uint32_t frameIndex,
6451  VmaAllocation allocation);
6452  void RecordTouchAllocation(uint32_t frameIndex,
6453  VmaAllocation allocation);
6454  void RecordGetAllocationInfo(uint32_t frameIndex,
6455  VmaAllocation allocation);
6456  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
6457  VmaPool pool);
6458  void RecordDefragmentationBegin(uint32_t frameIndex,
6459  const VmaDefragmentationInfo2& info,
6461  void RecordDefragmentationEnd(uint32_t frameIndex,
6463 
6464 private:
6465  struct CallParams
6466  {
6467  uint32_t threadId;
6468  double time;
6469  };
6470 
6471  class UserDataString
6472  {
6473  public:
6474  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
6475  const char* GetString() const { return m_Str; }
6476 
6477  private:
6478  char m_PtrStr[17];
6479  const char* m_Str;
6480  };
6481 
6482  bool m_UseMutex;
6483  VmaRecordFlags m_Flags;
6484  FILE* m_File;
6485  VMA_MUTEX m_FileMutex;
6486  int64_t m_Freq;
6487  int64_t m_StartCounter;
6488 
6489  void GetBasicParams(CallParams& outParams);
6490 
6491  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
6492  template<typename T>
6493  void PrintPointerList(uint64_t count, const T* pItems)
6494  {
6495  if(count)
6496  {
6497  fprintf(m_File, "%p", pItems[0]);
6498  for(uint64_t i = 1; i < count; ++i)
6499  {
6500  fprintf(m_File, " %p", pItems[i]);
6501  }
6502  }
6503  }
6504 
6505  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
6506  void Flush();
6507 };
6508 
6509 #endif // #if VMA_RECORDING_ENABLED
6510 
6511 // Main allocator object.
6512 struct VmaAllocator_T
6513 {
6514  VMA_CLASS_NO_COPY(VmaAllocator_T)
6515 public:
6516  bool m_UseMutex;
6517  bool m_UseKhrDedicatedAllocation;
6518  VkDevice m_hDevice;
6519  bool m_AllocationCallbacksSpecified;
6520  VkAllocationCallbacks m_AllocationCallbacks;
6521  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
6522 
6523  // Number of bytes free out of limit, or VK_WHOLE_SIZE if no limit for that heap.
6524  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
6525  VMA_MUTEX m_HeapSizeLimitMutex;
6526 
6527  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
6528  VkPhysicalDeviceMemoryProperties m_MemProps;
6529 
6530  // Default pools.
6531  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
6532 
6533  // Each vector is sorted by memory (handle value).
6534  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
6535  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
6536  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
6537 
6538  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
6539  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
6540  ~VmaAllocator_T();
6541 
6542  const VkAllocationCallbacks* GetAllocationCallbacks() const
6543  {
6544  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
6545  }
6546  const VmaVulkanFunctions& GetVulkanFunctions() const
6547  {
6548  return m_VulkanFunctions;
6549  }
6550 
6551  VkDeviceSize GetBufferImageGranularity() const
6552  {
6553  return VMA_MAX(
6554  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
6555  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
6556  }
6557 
6558  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
6559  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
6560 
6561  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
6562  {
6563  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
6564  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
6565  }
6566  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
6567  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
6568  {
6569  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
6570  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
6571  }
6572  // Minimum alignment for all allocations in specific memory type.
6573  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
6574  {
6575  return IsMemoryTypeNonCoherent(memTypeIndex) ?
6576  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
6577  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
6578  }
6579 
6580  bool IsIntegratedGpu() const
6581  {
6582  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
6583  }
6584 
6585 #if VMA_RECORDING_ENABLED
6586  VmaRecorder* GetRecorder() const { return m_pRecorder; }
6587 #endif
6588 
6589  void GetBufferMemoryRequirements(
6590  VkBuffer hBuffer,
6591  VkMemoryRequirements& memReq,
6592  bool& requiresDedicatedAllocation,
6593  bool& prefersDedicatedAllocation) const;
6594  void GetImageMemoryRequirements(
6595  VkImage hImage,
6596  VkMemoryRequirements& memReq,
6597  bool& requiresDedicatedAllocation,
6598  bool& prefersDedicatedAllocation) const;
6599 
6600  // Main allocation function.
6601  VkResult AllocateMemory(
6602  const VkMemoryRequirements& vkMemReq,
6603  bool requiresDedicatedAllocation,
6604  bool prefersDedicatedAllocation,
6605  VkBuffer dedicatedBuffer,
6606  VkImage dedicatedImage,
6607  const VmaAllocationCreateInfo& createInfo,
6608  VmaSuballocationType suballocType,
6609  size_t allocationCount,
6610  VmaAllocation* pAllocations);
6611 
6612  // Main deallocation function.
6613  void FreeMemory(
6614  size_t allocationCount,
6615  const VmaAllocation* pAllocations);
6616 
6617  VkResult ResizeAllocation(
6618  const VmaAllocation alloc,
6619  VkDeviceSize newSize);
6620 
6621  void CalculateStats(VmaStats* pStats);
6622 
6623 #if VMA_STATS_STRING_ENABLED
6624  void PrintDetailedMap(class VmaJsonWriter& json);
6625 #endif
6626 
6627  VkResult DefragmentationBegin(
6628  const VmaDefragmentationInfo2& info,
6629  VmaDefragmentationStats* pStats,
6630  VmaDefragmentationContext* pContext);
6631  VkResult DefragmentationEnd(
6632  VmaDefragmentationContext context);
6633 
6634  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
6635  bool TouchAllocation(VmaAllocation hAllocation);
6636 
6637  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
6638  void DestroyPool(VmaPool pool);
6639  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
6640 
6641  void SetCurrentFrameIndex(uint32_t frameIndex);
6642  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
6643 
6644  void MakePoolAllocationsLost(
6645  VmaPool hPool,
6646  size_t* pLostAllocationCount);
6647  VkResult CheckPoolCorruption(VmaPool hPool);
6648  VkResult CheckCorruption(uint32_t memoryTypeBits);
6649 
6650  void CreateLostAllocation(VmaAllocation* pAllocation);
6651 
6652  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
6653  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
6654 
6655  VkResult Map(VmaAllocation hAllocation, void** ppData);
6656  void Unmap(VmaAllocation hAllocation);
6657 
6658  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
6659  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
6660 
6661  void FlushOrInvalidateAllocation(
6662  VmaAllocation hAllocation,
6663  VkDeviceSize offset, VkDeviceSize size,
6664  VMA_CACHE_OPERATION op);
6665 
6666  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
6667 
6668 private:
6669  VkDeviceSize m_PreferredLargeHeapBlockSize;
6670 
6671  VkPhysicalDevice m_PhysicalDevice;
6672  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
6673 
6674  VMA_RW_MUTEX m_PoolsMutex;
6675  // Protected by m_PoolsMutex. Sorted by pointer value.
6676  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
6677  uint32_t m_NextPoolId;
6678 
6679  VmaVulkanFunctions m_VulkanFunctions;
6680 
6681 #if VMA_RECORDING_ENABLED
6682  VmaRecorder* m_pRecorder;
6683 #endif
6684 
6685  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
6686 
6687  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
6688 
6689  VkResult AllocateMemoryOfType(
6690  VkDeviceSize size,
6691  VkDeviceSize alignment,
6692  bool dedicatedAllocation,
6693  VkBuffer dedicatedBuffer,
6694  VkImage dedicatedImage,
6695  const VmaAllocationCreateInfo& createInfo,
6696  uint32_t memTypeIndex,
6697  VmaSuballocationType suballocType,
6698  size_t allocationCount,
6699  VmaAllocation* pAllocations);
6700 
6701  // Helper function only to be used inside AllocateDedicatedMemory.
6702  VkResult AllocateDedicatedMemoryPage(
6703  VkDeviceSize size,
6704  VmaSuballocationType suballocType,
6705  uint32_t memTypeIndex,
6706  const VkMemoryAllocateInfo& allocInfo,
6707  bool map,
6708  bool isUserDataString,
6709  void* pUserData,
6710  VmaAllocation* pAllocation);
6711 
6712  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
6713  VkResult AllocateDedicatedMemory(
6714  VkDeviceSize size,
6715  VmaSuballocationType suballocType,
6716  uint32_t memTypeIndex,
6717  bool map,
6718  bool isUserDataString,
6719  void* pUserData,
6720  VkBuffer dedicatedBuffer,
6721  VkImage dedicatedImage,
6722  size_t allocationCount,
6723  VmaAllocation* pAllocations);
6724 
6725  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
6726  void FreeDedicatedMemory(VmaAllocation allocation);
6727 };
6728 
6730 // Memory allocation #2 after VmaAllocator_T definition
6731 
6732 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
6733 {
6734  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
6735 }
6736 
6737 static void VmaFree(VmaAllocator hAllocator, void* ptr)
6738 {
6739  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
6740 }
6741 
6742 template<typename T>
6743 static T* VmaAllocate(VmaAllocator hAllocator)
6744 {
6745  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
6746 }
6747 
6748 template<typename T>
6749 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
6750 {
6751  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
6752 }
6753 
6754 template<typename T>
6755 static void vma_delete(VmaAllocator hAllocator, T* ptr)
6756 {
6757  if(ptr != VMA_NULL)
6758  {
6759  ptr->~T();
6760  VmaFree(hAllocator, ptr);
6761  }
6762 }
6763 
6764 template<typename T>
6765 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
6766 {
6767  if(ptr != VMA_NULL)
6768  {
6769  for(size_t i = count; i--; )
6770  ptr[i].~T();
6771  VmaFree(hAllocator, ptr);
6772  }
6773 }
6774 
6776 // VmaStringBuilder
6777 
6778 #if VMA_STATS_STRING_ENABLED
6779 
6780 class VmaStringBuilder
6781 {
6782 public:
6783  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
6784  size_t GetLength() const { return m_Data.size(); }
6785  const char* GetData() const { return m_Data.data(); }
6786 
6787  void Add(char ch) { m_Data.push_back(ch); }
6788  void Add(const char* pStr);
6789  void AddNewLine() { Add('\n'); }
6790  void AddNumber(uint32_t num);
6791  void AddNumber(uint64_t num);
6792  void AddPointer(const void* ptr);
6793 
6794 private:
6795  VmaVector< char, VmaStlAllocator<char> > m_Data;
6796 };
6797 
6798 void VmaStringBuilder::Add(const char* pStr)
6799 {
6800  const size_t strLen = strlen(pStr);
6801  if(strLen > 0)
6802  {
6803  const size_t oldCount = m_Data.size();
6804  m_Data.resize(oldCount + strLen);
6805  memcpy(m_Data.data() + oldCount, pStr, strLen);
6806  }
6807 }
6808 
6809 void VmaStringBuilder::AddNumber(uint32_t num)
6810 {
6811  char buf[11];
6812  VmaUint32ToStr(buf, sizeof(buf), num);
6813  Add(buf);
6814 }
6815 
6816 void VmaStringBuilder::AddNumber(uint64_t num)
6817 {
6818  char buf[21];
6819  VmaUint64ToStr(buf, sizeof(buf), num);
6820  Add(buf);
6821 }
6822 
6823 void VmaStringBuilder::AddPointer(const void* ptr)
6824 {
6825  char buf[21];
6826  VmaPtrToStr(buf, sizeof(buf), ptr);
6827  Add(buf);
6828 }
6829 
6830 #endif // #if VMA_STATS_STRING_ENABLED
6831 
6833 // VmaJsonWriter
6834 
6835 #if VMA_STATS_STRING_ENABLED
6836 
6837 class VmaJsonWriter
6838 {
6839  VMA_CLASS_NO_COPY(VmaJsonWriter)
6840 public:
6841  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
6842  ~VmaJsonWriter();
6843 
6844  void BeginObject(bool singleLine = false);
6845  void EndObject();
6846 
6847  void BeginArray(bool singleLine = false);
6848  void EndArray();
6849 
6850  void WriteString(const char* pStr);
6851  void BeginString(const char* pStr = VMA_NULL);
6852  void ContinueString(const char* pStr);
6853  void ContinueString(uint32_t n);
6854  void ContinueString(uint64_t n);
6855  void ContinueString_Pointer(const void* ptr);
6856  void EndString(const char* pStr = VMA_NULL);
6857 
6858  void WriteNumber(uint32_t n);
6859  void WriteNumber(uint64_t n);
6860  void WriteBool(bool b);
6861  void WriteNull();
6862 
6863 private:
6864  static const char* const INDENT;
6865 
6866  enum COLLECTION_TYPE
6867  {
6868  COLLECTION_TYPE_OBJECT,
6869  COLLECTION_TYPE_ARRAY,
6870  };
6871  struct StackItem
6872  {
6873  COLLECTION_TYPE type;
6874  uint32_t valueCount;
6875  bool singleLineMode;
6876  };
6877 
6878  VmaStringBuilder& m_SB;
6879  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
6880  bool m_InsideString;
6881 
6882  void BeginValue(bool isString);
6883  void WriteIndent(bool oneLess = false);
6884 };
6885 
6886 const char* const VmaJsonWriter::INDENT = " ";
6887 
6888 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
6889  m_SB(sb),
6890  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
6891  m_InsideString(false)
6892 {
6893 }
6894 
6895 VmaJsonWriter::~VmaJsonWriter()
6896 {
6897  VMA_ASSERT(!m_InsideString);
6898  VMA_ASSERT(m_Stack.empty());
6899 }
6900 
6901 void VmaJsonWriter::BeginObject(bool singleLine)
6902 {
6903  VMA_ASSERT(!m_InsideString);
6904 
6905  BeginValue(false);
6906  m_SB.Add('{');
6907 
6908  StackItem item;
6909  item.type = COLLECTION_TYPE_OBJECT;
6910  item.valueCount = 0;
6911  item.singleLineMode = singleLine;
6912  m_Stack.push_back(item);
6913 }
6914 
6915 void VmaJsonWriter::EndObject()
6916 {
6917  VMA_ASSERT(!m_InsideString);
6918 
6919  WriteIndent(true);
6920  m_SB.Add('}');
6921 
6922  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
6923  m_Stack.pop_back();
6924 }
6925 
6926 void VmaJsonWriter::BeginArray(bool singleLine)
6927 {
6928  VMA_ASSERT(!m_InsideString);
6929 
6930  BeginValue(false);
6931  m_SB.Add('[');
6932 
6933  StackItem item;
6934  item.type = COLLECTION_TYPE_ARRAY;
6935  item.valueCount = 0;
6936  item.singleLineMode = singleLine;
6937  m_Stack.push_back(item);
6938 }
6939 
6940 void VmaJsonWriter::EndArray()
6941 {
6942  VMA_ASSERT(!m_InsideString);
6943 
6944  WriteIndent(true);
6945  m_SB.Add(']');
6946 
6947  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
6948  m_Stack.pop_back();
6949 }
6950 
6951 void VmaJsonWriter::WriteString(const char* pStr)
6952 {
6953  BeginString(pStr);
6954  EndString();
6955 }
6956 
6957 void VmaJsonWriter::BeginString(const char* pStr)
6958 {
6959  VMA_ASSERT(!m_InsideString);
6960 
6961  BeginValue(true);
6962  m_SB.Add('"');
6963  m_InsideString = true;
6964  if(pStr != VMA_NULL && pStr[0] != '\0')
6965  {
6966  ContinueString(pStr);
6967  }
6968 }
6969 
6970 void VmaJsonWriter::ContinueString(const char* pStr)
6971 {
6972  VMA_ASSERT(m_InsideString);
6973 
6974  const size_t strLen = strlen(pStr);
6975  for(size_t i = 0; i < strLen; ++i)
6976  {
6977  char ch = pStr[i];
6978  if(ch == '\\')
6979  {
6980  m_SB.Add("\\\\");
6981  }
6982  else if(ch == '"')
6983  {
6984  m_SB.Add("\\\"");
6985  }
6986  else if(ch >= 32)
6987  {
6988  m_SB.Add(ch);
6989  }
6990  else switch(ch)
6991  {
6992  case '\b':
6993  m_SB.Add("\\b");
6994  break;
6995  case '\f':
6996  m_SB.Add("\\f");
6997  break;
6998  case '\n':
6999  m_SB.Add("\\n");
7000  break;
7001  case '\r':
7002  m_SB.Add("\\r");
7003  break;
7004  case '\t':
7005  m_SB.Add("\\t");
7006  break;
7007  default:
7008  VMA_ASSERT(0 && "Character not currently supported.");
7009  break;
7010  }
7011  }
7012 }
7013 
7014 void VmaJsonWriter::ContinueString(uint32_t n)
7015 {
7016  VMA_ASSERT(m_InsideString);
7017  m_SB.AddNumber(n);
7018 }
7019 
7020 void VmaJsonWriter::ContinueString(uint64_t n)
7021 {
7022  VMA_ASSERT(m_InsideString);
7023  m_SB.AddNumber(n);
7024 }
7025 
7026 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
7027 {
7028  VMA_ASSERT(m_InsideString);
7029  m_SB.AddPointer(ptr);
7030 }
7031 
7032 void VmaJsonWriter::EndString(const char* pStr)
7033 {
7034  VMA_ASSERT(m_InsideString);
7035  if(pStr != VMA_NULL && pStr[0] != '\0')
7036  {
7037  ContinueString(pStr);
7038  }
7039  m_SB.Add('"');
7040  m_InsideString = false;
7041 }
7042 
7043 void VmaJsonWriter::WriteNumber(uint32_t n)
7044 {
7045  VMA_ASSERT(!m_InsideString);
7046  BeginValue(false);
7047  m_SB.AddNumber(n);
7048 }
7049 
7050 void VmaJsonWriter::WriteNumber(uint64_t n)
7051 {
7052  VMA_ASSERT(!m_InsideString);
7053  BeginValue(false);
7054  m_SB.AddNumber(n);
7055 }
7056 
7057 void VmaJsonWriter::WriteBool(bool b)
7058 {
7059  VMA_ASSERT(!m_InsideString);
7060  BeginValue(false);
7061  m_SB.Add(b ? "true" : "false");
7062 }
7063 
7064 void VmaJsonWriter::WriteNull()
7065 {
7066  VMA_ASSERT(!m_InsideString);
7067  BeginValue(false);
7068  m_SB.Add("null");
7069 }
7070 
7071 void VmaJsonWriter::BeginValue(bool isString)
7072 {
7073  if(!m_Stack.empty())
7074  {
7075  StackItem& currItem = m_Stack.back();
7076  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7077  currItem.valueCount % 2 == 0)
7078  {
7079  VMA_ASSERT(isString);
7080  }
7081 
7082  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7083  currItem.valueCount % 2 != 0)
7084  {
7085  m_SB.Add(": ");
7086  }
7087  else if(currItem.valueCount > 0)
7088  {
7089  m_SB.Add(", ");
7090  WriteIndent();
7091  }
7092  else
7093  {
7094  WriteIndent();
7095  }
7096  ++currItem.valueCount;
7097  }
7098 }
7099 
7100 void VmaJsonWriter::WriteIndent(bool oneLess)
7101 {
7102  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
7103  {
7104  m_SB.AddNewLine();
7105 
7106  size_t count = m_Stack.size();
7107  if(count > 0 && oneLess)
7108  {
7109  --count;
7110  }
7111  for(size_t i = 0; i < count; ++i)
7112  {
7113  m_SB.Add(INDENT);
7114  }
7115  }
7116 }
7117 
7118 #endif // #if VMA_STATS_STRING_ENABLED
7119 
7121 
7122 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
7123 {
7124  if(IsUserDataString())
7125  {
7126  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
7127 
7128  FreeUserDataString(hAllocator);
7129 
7130  if(pUserData != VMA_NULL)
7131  {
7132  const char* const newStrSrc = (char*)pUserData;
7133  const size_t newStrLen = strlen(newStrSrc);
7134  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
7135  memcpy(newStrDst, newStrSrc, newStrLen + 1);
7136  m_pUserData = newStrDst;
7137  }
7138  }
7139  else
7140  {
7141  m_pUserData = pUserData;
7142  }
7143 }
7144 
7145 void VmaAllocation_T::ChangeBlockAllocation(
7146  VmaAllocator hAllocator,
7147  VmaDeviceMemoryBlock* block,
7148  VkDeviceSize offset)
7149 {
7150  VMA_ASSERT(block != VMA_NULL);
7151  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7152 
7153  // Move mapping reference counter from old block to new block.
7154  if(block != m_BlockAllocation.m_Block)
7155  {
7156  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
7157  if(IsPersistentMap())
7158  ++mapRefCount;
7159  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
7160  block->Map(hAllocator, mapRefCount, VMA_NULL);
7161  }
7162 
7163  m_BlockAllocation.m_Block = block;
7164  m_BlockAllocation.m_Offset = offset;
7165 }
7166 
7167 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
7168 {
7169  VMA_ASSERT(newSize > 0);
7170  m_Size = newSize;
7171 }
7172 
7173 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
7174 {
7175  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7176  m_BlockAllocation.m_Offset = newOffset;
7177 }
7178 
7179 VkDeviceSize VmaAllocation_T::GetOffset() const
7180 {
7181  switch(m_Type)
7182  {
7183  case ALLOCATION_TYPE_BLOCK:
7184  return m_BlockAllocation.m_Offset;
7185  case ALLOCATION_TYPE_DEDICATED:
7186  return 0;
7187  default:
7188  VMA_ASSERT(0);
7189  return 0;
7190  }
7191 }
7192 
7193 VkDeviceMemory VmaAllocation_T::GetMemory() const
7194 {
7195  switch(m_Type)
7196  {
7197  case ALLOCATION_TYPE_BLOCK:
7198  return m_BlockAllocation.m_Block->GetDeviceMemory();
7199  case ALLOCATION_TYPE_DEDICATED:
7200  return m_DedicatedAllocation.m_hMemory;
7201  default:
7202  VMA_ASSERT(0);
7203  return VK_NULL_HANDLE;
7204  }
7205 }
7206 
7207 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
7208 {
7209  switch(m_Type)
7210  {
7211  case ALLOCATION_TYPE_BLOCK:
7212  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
7213  case ALLOCATION_TYPE_DEDICATED:
7214  return m_DedicatedAllocation.m_MemoryTypeIndex;
7215  default:
7216  VMA_ASSERT(0);
7217  return UINT32_MAX;
7218  }
7219 }
7220 
7221 void* VmaAllocation_T::GetMappedData() const
7222 {
7223  switch(m_Type)
7224  {
7225  case ALLOCATION_TYPE_BLOCK:
7226  if(m_MapCount != 0)
7227  {
7228  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
7229  VMA_ASSERT(pBlockData != VMA_NULL);
7230  return (char*)pBlockData + m_BlockAllocation.m_Offset;
7231  }
7232  else
7233  {
7234  return VMA_NULL;
7235  }
7236  break;
7237  case ALLOCATION_TYPE_DEDICATED:
7238  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
7239  return m_DedicatedAllocation.m_pMappedData;
7240  default:
7241  VMA_ASSERT(0);
7242  return VMA_NULL;
7243  }
7244 }
7245 
7246 bool VmaAllocation_T::CanBecomeLost() const
7247 {
7248  switch(m_Type)
7249  {
7250  case ALLOCATION_TYPE_BLOCK:
7251  return m_BlockAllocation.m_CanBecomeLost;
7252  case ALLOCATION_TYPE_DEDICATED:
7253  return false;
7254  default:
7255  VMA_ASSERT(0);
7256  return false;
7257  }
7258 }
7259 
7260 VmaPool VmaAllocation_T::GetPool() const
7261 {
7262  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7263  return m_BlockAllocation.m_hPool;
7264 }
7265 
7266 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7267 {
7268  VMA_ASSERT(CanBecomeLost());
7269 
7270  /*
7271  Warning: This is a carefully designed algorithm.
7272  Do not modify unless you really know what you're doing :)
7273  */
7274  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
7275  for(;;)
7276  {
7277  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7278  {
7279  VMA_ASSERT(0);
7280  return false;
7281  }
7282  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
7283  {
7284  return false;
7285  }
7286  else // Last use time earlier than current time.
7287  {
7288  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
7289  {
7290  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
7291  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
7292  return true;
7293  }
7294  }
7295  }
7296 }
7297 
7298 #if VMA_STATS_STRING_ENABLED
7299 
7300 // Correspond to values of enum VmaSuballocationType.
7301 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
7302  "FREE",
7303  "UNKNOWN",
7304  "BUFFER",
7305  "IMAGE_UNKNOWN",
7306  "IMAGE_LINEAR",
7307  "IMAGE_OPTIMAL",
7308 };
7309 
7310 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
7311 {
7312  json.WriteString("Type");
7313  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
7314 
7315  json.WriteString("Size");
7316  json.WriteNumber(m_Size);
7317 
7318  if(m_pUserData != VMA_NULL)
7319  {
7320  json.WriteString("UserData");
7321  if(IsUserDataString())
7322  {
7323  json.WriteString((const char*)m_pUserData);
7324  }
7325  else
7326  {
7327  json.BeginString();
7328  json.ContinueString_Pointer(m_pUserData);
7329  json.EndString();
7330  }
7331  }
7332 
7333  json.WriteString("CreationFrameIndex");
7334  json.WriteNumber(m_CreationFrameIndex);
7335 
7336  json.WriteString("LastUseFrameIndex");
7337  json.WriteNumber(GetLastUseFrameIndex());
7338 
7339  if(m_BufferImageUsage != 0)
7340  {
7341  json.WriteString("Usage");
7342  json.WriteNumber(m_BufferImageUsage);
7343  }
7344 }
7345 
7346 #endif
7347 
7348 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
7349 {
7350  VMA_ASSERT(IsUserDataString());
7351  if(m_pUserData != VMA_NULL)
7352  {
7353  char* const oldStr = (char*)m_pUserData;
7354  const size_t oldStrLen = strlen(oldStr);
7355  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
7356  m_pUserData = VMA_NULL;
7357  }
7358 }
7359 
7360 void VmaAllocation_T::BlockAllocMap()
7361 {
7362  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7363 
7364  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7365  {
7366  ++m_MapCount;
7367  }
7368  else
7369  {
7370  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
7371  }
7372 }
7373 
7374 void VmaAllocation_T::BlockAllocUnmap()
7375 {
7376  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7377 
7378  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7379  {
7380  --m_MapCount;
7381  }
7382  else
7383  {
7384  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
7385  }
7386 }
7387 
7388 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
7389 {
7390  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7391 
7392  if(m_MapCount != 0)
7393  {
7394  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7395  {
7396  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
7397  *ppData = m_DedicatedAllocation.m_pMappedData;
7398  ++m_MapCount;
7399  return VK_SUCCESS;
7400  }
7401  else
7402  {
7403  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
7404  return VK_ERROR_MEMORY_MAP_FAILED;
7405  }
7406  }
7407  else
7408  {
7409  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
7410  hAllocator->m_hDevice,
7411  m_DedicatedAllocation.m_hMemory,
7412  0, // offset
7413  VK_WHOLE_SIZE,
7414  0, // flags
7415  ppData);
7416  if(result == VK_SUCCESS)
7417  {
7418  m_DedicatedAllocation.m_pMappedData = *ppData;
7419  m_MapCount = 1;
7420  }
7421  return result;
7422  }
7423 }
7424 
7425 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
7426 {
7427  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7428 
7429  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7430  {
7431  --m_MapCount;
7432  if(m_MapCount == 0)
7433  {
7434  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
7435  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
7436  hAllocator->m_hDevice,
7437  m_DedicatedAllocation.m_hMemory);
7438  }
7439  }
7440  else
7441  {
7442  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
7443  }
7444 }
7445 
7446 #if VMA_STATS_STRING_ENABLED
7447 
7448 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
7449 {
7450  json.BeginObject();
7451 
7452  json.WriteString("Blocks");
7453  json.WriteNumber(stat.blockCount);
7454 
7455  json.WriteString("Allocations");
7456  json.WriteNumber(stat.allocationCount);
7457 
7458  json.WriteString("UnusedRanges");
7459  json.WriteNumber(stat.unusedRangeCount);
7460 
7461  json.WriteString("UsedBytes");
7462  json.WriteNumber(stat.usedBytes);
7463 
7464  json.WriteString("UnusedBytes");
7465  json.WriteNumber(stat.unusedBytes);
7466 
7467  if(stat.allocationCount > 1)
7468  {
7469  json.WriteString("AllocationSize");
7470  json.BeginObject(true);
7471  json.WriteString("Min");
7472  json.WriteNumber(stat.allocationSizeMin);
7473  json.WriteString("Avg");
7474  json.WriteNumber(stat.allocationSizeAvg);
7475  json.WriteString("Max");
7476  json.WriteNumber(stat.allocationSizeMax);
7477  json.EndObject();
7478  }
7479 
7480  if(stat.unusedRangeCount > 1)
7481  {
7482  json.WriteString("UnusedRangeSize");
7483  json.BeginObject(true);
7484  json.WriteString("Min");
7485  json.WriteNumber(stat.unusedRangeSizeMin);
7486  json.WriteString("Avg");
7487  json.WriteNumber(stat.unusedRangeSizeAvg);
7488  json.WriteString("Max");
7489  json.WriteNumber(stat.unusedRangeSizeMax);
7490  json.EndObject();
7491  }
7492 
7493  json.EndObject();
7494 }
7495 
7496 #endif // #if VMA_STATS_STRING_ENABLED
7497 
7498 struct VmaSuballocationItemSizeLess
7499 {
7500  bool operator()(
7501  const VmaSuballocationList::iterator lhs,
7502  const VmaSuballocationList::iterator rhs) const
7503  {
7504  return lhs->size < rhs->size;
7505  }
7506  bool operator()(
7507  const VmaSuballocationList::iterator lhs,
7508  VkDeviceSize rhsSize) const
7509  {
7510  return lhs->size < rhsSize;
7511  }
7512 };
7513 
7514 
7516 // class VmaBlockMetadata
7517 
7518 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
7519  m_Size(0),
7520  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
7521 {
7522 }
7523 
7524 #if VMA_STATS_STRING_ENABLED
7525 
7526 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
7527  VkDeviceSize unusedBytes,
7528  size_t allocationCount,
7529  size_t unusedRangeCount) const
7530 {
7531  json.BeginObject();
7532 
7533  json.WriteString("TotalBytes");
7534  json.WriteNumber(GetSize());
7535 
7536  json.WriteString("UnusedBytes");
7537  json.WriteNumber(unusedBytes);
7538 
7539  json.WriteString("Allocations");
7540  json.WriteNumber((uint64_t)allocationCount);
7541 
7542  json.WriteString("UnusedRanges");
7543  json.WriteNumber((uint64_t)unusedRangeCount);
7544 
7545  json.WriteString("Suballocations");
7546  json.BeginArray();
7547 }
7548 
7549 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
7550  VkDeviceSize offset,
7551  VmaAllocation hAllocation) const
7552 {
7553  json.BeginObject(true);
7554 
7555  json.WriteString("Offset");
7556  json.WriteNumber(offset);
7557 
7558  hAllocation->PrintParameters(json);
7559 
7560  json.EndObject();
7561 }
7562 
7563 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
7564  VkDeviceSize offset,
7565  VkDeviceSize size) const
7566 {
7567  json.BeginObject(true);
7568 
7569  json.WriteString("Offset");
7570  json.WriteNumber(offset);
7571 
7572  json.WriteString("Type");
7573  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
7574 
7575  json.WriteString("Size");
7576  json.WriteNumber(size);
7577 
7578  json.EndObject();
7579 }
7580 
7581 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
7582 {
7583  json.EndArray();
7584  json.EndObject();
7585 }
7586 
7587 #endif // #if VMA_STATS_STRING_ENABLED
7588 
7590 // class VmaBlockMetadata_Generic
7591 
7592 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
7593  VmaBlockMetadata(hAllocator),
7594  m_FreeCount(0),
7595  m_SumFreeSize(0),
7596  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7597  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
7598 {
7599 }
7600 
7601 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
7602 {
7603 }
7604 
7605 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
7606 {
7607  VmaBlockMetadata::Init(size);
7608 
7609  m_FreeCount = 1;
7610  m_SumFreeSize = size;
7611 
7612  VmaSuballocation suballoc = {};
7613  suballoc.offset = 0;
7614  suballoc.size = size;
7615  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7616  suballoc.hAllocation = VK_NULL_HANDLE;
7617 
7618  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
7619  m_Suballocations.push_back(suballoc);
7620  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
7621  --suballocItem;
7622  m_FreeSuballocationsBySize.push_back(suballocItem);
7623 }
7624 
7625 bool VmaBlockMetadata_Generic::Validate() const
7626 {
7627  VMA_VALIDATE(!m_Suballocations.empty());
7628 
7629  // Expected offset of new suballocation as calculated from previous ones.
7630  VkDeviceSize calculatedOffset = 0;
7631  // Expected number of free suballocations as calculated from traversing their list.
7632  uint32_t calculatedFreeCount = 0;
7633  // Expected sum size of free suballocations as calculated from traversing their list.
7634  VkDeviceSize calculatedSumFreeSize = 0;
7635  // Expected number of free suballocations that should be registered in
7636  // m_FreeSuballocationsBySize calculated from traversing their list.
7637  size_t freeSuballocationsToRegister = 0;
7638  // True if previous visited suballocation was free.
7639  bool prevFree = false;
7640 
7641  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7642  suballocItem != m_Suballocations.cend();
7643  ++suballocItem)
7644  {
7645  const VmaSuballocation& subAlloc = *suballocItem;
7646 
7647  // Actual offset of this suballocation doesn't match expected one.
7648  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
7649 
7650  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
7651  // Two adjacent free suballocations are invalid. They should be merged.
7652  VMA_VALIDATE(!prevFree || !currFree);
7653 
7654  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
7655 
7656  if(currFree)
7657  {
7658  calculatedSumFreeSize += subAlloc.size;
7659  ++calculatedFreeCount;
7660  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7661  {
7662  ++freeSuballocationsToRegister;
7663  }
7664 
7665  // Margin required between allocations - every free space must be at least that large.
7666  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
7667  }
7668  else
7669  {
7670  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
7671  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
7672 
7673  // Margin required between allocations - previous allocation must be free.
7674  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
7675  }
7676 
7677  calculatedOffset += subAlloc.size;
7678  prevFree = currFree;
7679  }
7680 
7681  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
7682  // match expected one.
7683  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
7684 
7685  VkDeviceSize lastSize = 0;
7686  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
7687  {
7688  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
7689 
7690  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
7691  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
7692  // They must be sorted by size ascending.
7693  VMA_VALIDATE(suballocItem->size >= lastSize);
7694 
7695  lastSize = suballocItem->size;
7696  }
7697 
7698  // Check if totals match calculacted values.
7699  VMA_VALIDATE(ValidateFreeSuballocationList());
7700  VMA_VALIDATE(calculatedOffset == GetSize());
7701  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
7702  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
7703 
7704  return true;
7705 }
7706 
7707 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
7708 {
7709  if(!m_FreeSuballocationsBySize.empty())
7710  {
7711  return m_FreeSuballocationsBySize.back()->size;
7712  }
7713  else
7714  {
7715  return 0;
7716  }
7717 }
7718 
7719 bool VmaBlockMetadata_Generic::IsEmpty() const
7720 {
7721  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
7722 }
7723 
7724 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
7725 {
7726  outInfo.blockCount = 1;
7727 
7728  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7729  outInfo.allocationCount = rangeCount - m_FreeCount;
7730  outInfo.unusedRangeCount = m_FreeCount;
7731 
7732  outInfo.unusedBytes = m_SumFreeSize;
7733  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
7734 
7735  outInfo.allocationSizeMin = UINT64_MAX;
7736  outInfo.allocationSizeMax = 0;
7737  outInfo.unusedRangeSizeMin = UINT64_MAX;
7738  outInfo.unusedRangeSizeMax = 0;
7739 
7740  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7741  suballocItem != m_Suballocations.cend();
7742  ++suballocItem)
7743  {
7744  const VmaSuballocation& suballoc = *suballocItem;
7745  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
7746  {
7747  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7748  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
7749  }
7750  else
7751  {
7752  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
7753  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
7754  }
7755  }
7756 }
7757 
7758 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
7759 {
7760  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7761 
7762  inoutStats.size += GetSize();
7763  inoutStats.unusedSize += m_SumFreeSize;
7764  inoutStats.allocationCount += rangeCount - m_FreeCount;
7765  inoutStats.unusedRangeCount += m_FreeCount;
7766  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
7767 }
7768 
7769 #if VMA_STATS_STRING_ENABLED
7770 
7771 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
7772 {
7773  PrintDetailedMap_Begin(json,
7774  m_SumFreeSize, // unusedBytes
7775  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
7776  m_FreeCount); // unusedRangeCount
7777 
7778  size_t i = 0;
7779  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7780  suballocItem != m_Suballocations.cend();
7781  ++suballocItem, ++i)
7782  {
7783  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7784  {
7785  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
7786  }
7787  else
7788  {
7789  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
7790  }
7791  }
7792 
7793  PrintDetailedMap_End(json);
7794 }
7795 
7796 #endif // #if VMA_STATS_STRING_ENABLED
7797 
7798 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
7799  uint32_t currentFrameIndex,
7800  uint32_t frameInUseCount,
7801  VkDeviceSize bufferImageGranularity,
7802  VkDeviceSize allocSize,
7803  VkDeviceSize allocAlignment,
7804  bool upperAddress,
7805  VmaSuballocationType allocType,
7806  bool canMakeOtherLost,
7807  uint32_t strategy,
7808  VmaAllocationRequest* pAllocationRequest)
7809 {
7810  VMA_ASSERT(allocSize > 0);
7811  VMA_ASSERT(!upperAddress);
7812  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
7813  VMA_ASSERT(pAllocationRequest != VMA_NULL);
7814  VMA_HEAVY_ASSERT(Validate());
7815 
7816  // There is not enough total free space in this block to fullfill the request: Early return.
7817  if(canMakeOtherLost == false &&
7818  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
7819  {
7820  return false;
7821  }
7822 
7823  // New algorithm, efficiently searching freeSuballocationsBySize.
7824  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
7825  if(freeSuballocCount > 0)
7826  {
7828  {
7829  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
7830  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7831  m_FreeSuballocationsBySize.data(),
7832  m_FreeSuballocationsBySize.data() + freeSuballocCount,
7833  allocSize + 2 * VMA_DEBUG_MARGIN,
7834  VmaSuballocationItemSizeLess());
7835  size_t index = it - m_FreeSuballocationsBySize.data();
7836  for(; index < freeSuballocCount; ++index)
7837  {
7838  if(CheckAllocation(
7839  currentFrameIndex,
7840  frameInUseCount,
7841  bufferImageGranularity,
7842  allocSize,
7843  allocAlignment,
7844  allocType,
7845  m_FreeSuballocationsBySize[index],
7846  false, // canMakeOtherLost
7847  &pAllocationRequest->offset,
7848  &pAllocationRequest->itemsToMakeLostCount,
7849  &pAllocationRequest->sumFreeSize,
7850  &pAllocationRequest->sumItemSize))
7851  {
7852  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7853  return true;
7854  }
7855  }
7856  }
7857  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
7858  {
7859  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7860  it != m_Suballocations.end();
7861  ++it)
7862  {
7863  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
7864  currentFrameIndex,
7865  frameInUseCount,
7866  bufferImageGranularity,
7867  allocSize,
7868  allocAlignment,
7869  allocType,
7870  it,
7871  false, // canMakeOtherLost
7872  &pAllocationRequest->offset,
7873  &pAllocationRequest->itemsToMakeLostCount,
7874  &pAllocationRequest->sumFreeSize,
7875  &pAllocationRequest->sumItemSize))
7876  {
7877  pAllocationRequest->item = it;
7878  return true;
7879  }
7880  }
7881  }
7882  else // WORST_FIT, FIRST_FIT
7883  {
7884  // Search staring from biggest suballocations.
7885  for(size_t index = freeSuballocCount; index--; )
7886  {
7887  if(CheckAllocation(
7888  currentFrameIndex,
7889  frameInUseCount,
7890  bufferImageGranularity,
7891  allocSize,
7892  allocAlignment,
7893  allocType,
7894  m_FreeSuballocationsBySize[index],
7895  false, // canMakeOtherLost
7896  &pAllocationRequest->offset,
7897  &pAllocationRequest->itemsToMakeLostCount,
7898  &pAllocationRequest->sumFreeSize,
7899  &pAllocationRequest->sumItemSize))
7900  {
7901  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7902  return true;
7903  }
7904  }
7905  }
7906  }
7907 
7908  if(canMakeOtherLost)
7909  {
7910  // Brute-force algorithm. TODO: Come up with something better.
7911 
7912  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
7913  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
7914 
7915  VmaAllocationRequest tmpAllocRequest = {};
7916  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
7917  suballocIt != m_Suballocations.end();
7918  ++suballocIt)
7919  {
7920  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
7921  suballocIt->hAllocation->CanBecomeLost())
7922  {
7923  if(CheckAllocation(
7924  currentFrameIndex,
7925  frameInUseCount,
7926  bufferImageGranularity,
7927  allocSize,
7928  allocAlignment,
7929  allocType,
7930  suballocIt,
7931  canMakeOtherLost,
7932  &tmpAllocRequest.offset,
7933  &tmpAllocRequest.itemsToMakeLostCount,
7934  &tmpAllocRequest.sumFreeSize,
7935  &tmpAllocRequest.sumItemSize))
7936  {
7937  tmpAllocRequest.item = suballocIt;
7938 
7939  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
7941  {
7942  *pAllocationRequest = tmpAllocRequest;
7943  }
7944  }
7945  }
7946  }
7947 
7948  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
7949  {
7950  return true;
7951  }
7952  }
7953 
7954  return false;
7955 }
7956 
7957 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
7958  uint32_t currentFrameIndex,
7959  uint32_t frameInUseCount,
7960  VmaAllocationRequest* pAllocationRequest)
7961 {
7962  while(pAllocationRequest->itemsToMakeLostCount > 0)
7963  {
7964  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
7965  {
7966  ++pAllocationRequest->item;
7967  }
7968  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
7969  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
7970  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
7971  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
7972  {
7973  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
7974  --pAllocationRequest->itemsToMakeLostCount;
7975  }
7976  else
7977  {
7978  return false;
7979  }
7980  }
7981 
7982  VMA_HEAVY_ASSERT(Validate());
7983  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
7984  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
7985 
7986  return true;
7987 }
7988 
7989 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7990 {
7991  uint32_t lostAllocationCount = 0;
7992  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7993  it != m_Suballocations.end();
7994  ++it)
7995  {
7996  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
7997  it->hAllocation->CanBecomeLost() &&
7998  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
7999  {
8000  it = FreeSuballocation(it);
8001  ++lostAllocationCount;
8002  }
8003  }
8004  return lostAllocationCount;
8005 }
8006 
8007 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
8008 {
8009  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8010  it != m_Suballocations.end();
8011  ++it)
8012  {
8013  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
8014  {
8015  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
8016  {
8017  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
8018  return VK_ERROR_VALIDATION_FAILED_EXT;
8019  }
8020  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
8021  {
8022  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
8023  return VK_ERROR_VALIDATION_FAILED_EXT;
8024  }
8025  }
8026  }
8027 
8028  return VK_SUCCESS;
8029 }
8030 
8031 void VmaBlockMetadata_Generic::Alloc(
8032  const VmaAllocationRequest& request,
8033  VmaSuballocationType type,
8034  VkDeviceSize allocSize,
8035  bool upperAddress,
8036  VmaAllocation hAllocation)
8037 {
8038  VMA_ASSERT(!upperAddress);
8039  VMA_ASSERT(request.item != m_Suballocations.end());
8040  VmaSuballocation& suballoc = *request.item;
8041  // Given suballocation is a free block.
8042  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8043  // Given offset is inside this suballocation.
8044  VMA_ASSERT(request.offset >= suballoc.offset);
8045  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
8046  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
8047  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
8048 
8049  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
8050  // it to become used.
8051  UnregisterFreeSuballocation(request.item);
8052 
8053  suballoc.offset = request.offset;
8054  suballoc.size = allocSize;
8055  suballoc.type = type;
8056  suballoc.hAllocation = hAllocation;
8057 
8058  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
8059  if(paddingEnd)
8060  {
8061  VmaSuballocation paddingSuballoc = {};
8062  paddingSuballoc.offset = request.offset + allocSize;
8063  paddingSuballoc.size = paddingEnd;
8064  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8065  VmaSuballocationList::iterator next = request.item;
8066  ++next;
8067  const VmaSuballocationList::iterator paddingEndItem =
8068  m_Suballocations.insert(next, paddingSuballoc);
8069  RegisterFreeSuballocation(paddingEndItem);
8070  }
8071 
8072  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
8073  if(paddingBegin)
8074  {
8075  VmaSuballocation paddingSuballoc = {};
8076  paddingSuballoc.offset = request.offset - paddingBegin;
8077  paddingSuballoc.size = paddingBegin;
8078  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8079  const VmaSuballocationList::iterator paddingBeginItem =
8080  m_Suballocations.insert(request.item, paddingSuballoc);
8081  RegisterFreeSuballocation(paddingBeginItem);
8082  }
8083 
8084  // Update totals.
8085  m_FreeCount = m_FreeCount - 1;
8086  if(paddingBegin > 0)
8087  {
8088  ++m_FreeCount;
8089  }
8090  if(paddingEnd > 0)
8091  {
8092  ++m_FreeCount;
8093  }
8094  m_SumFreeSize -= allocSize;
8095 }
8096 
8097 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
8098 {
8099  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8100  suballocItem != m_Suballocations.end();
8101  ++suballocItem)
8102  {
8103  VmaSuballocation& suballoc = *suballocItem;
8104  if(suballoc.hAllocation == allocation)
8105  {
8106  FreeSuballocation(suballocItem);
8107  VMA_HEAVY_ASSERT(Validate());
8108  return;
8109  }
8110  }
8111  VMA_ASSERT(0 && "Not found!");
8112 }
8113 
8114 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
8115 {
8116  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8117  suballocItem != m_Suballocations.end();
8118  ++suballocItem)
8119  {
8120  VmaSuballocation& suballoc = *suballocItem;
8121  if(suballoc.offset == offset)
8122  {
8123  FreeSuballocation(suballocItem);
8124  return;
8125  }
8126  }
8127  VMA_ASSERT(0 && "Not found!");
8128 }
8129 
8130 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
8131 {
8132  typedef VmaSuballocationList::iterator iter_type;
8133  for(iter_type suballocItem = m_Suballocations.begin();
8134  suballocItem != m_Suballocations.end();
8135  ++suballocItem)
8136  {
8137  VmaSuballocation& suballoc = *suballocItem;
8138  if(suballoc.hAllocation == alloc)
8139  {
8140  iter_type nextItem = suballocItem;
8141  ++nextItem;
8142 
8143  // Should have been ensured on higher level.
8144  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
8145 
8146  // Shrinking.
8147  if(newSize < alloc->GetSize())
8148  {
8149  const VkDeviceSize sizeDiff = suballoc.size - newSize;
8150 
8151  // There is next item.
8152  if(nextItem != m_Suballocations.end())
8153  {
8154  // Next item is free.
8155  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8156  {
8157  // Grow this next item backward.
8158  UnregisterFreeSuballocation(nextItem);
8159  nextItem->offset -= sizeDiff;
8160  nextItem->size += sizeDiff;
8161  RegisterFreeSuballocation(nextItem);
8162  }
8163  // Next item is not free.
8164  else
8165  {
8166  // Create free item after current one.
8167  VmaSuballocation newFreeSuballoc;
8168  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
8169  newFreeSuballoc.offset = suballoc.offset + newSize;
8170  newFreeSuballoc.size = sizeDiff;
8171  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8172  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
8173  RegisterFreeSuballocation(newFreeSuballocIt);
8174 
8175  ++m_FreeCount;
8176  }
8177  }
8178  // This is the last item.
8179  else
8180  {
8181  // Create free item at the end.
8182  VmaSuballocation newFreeSuballoc;
8183  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
8184  newFreeSuballoc.offset = suballoc.offset + newSize;
8185  newFreeSuballoc.size = sizeDiff;
8186  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8187  m_Suballocations.push_back(newFreeSuballoc);
8188 
8189  iter_type newFreeSuballocIt = m_Suballocations.end();
8190  RegisterFreeSuballocation(--newFreeSuballocIt);
8191 
8192  ++m_FreeCount;
8193  }
8194 
8195  suballoc.size = newSize;
8196  m_SumFreeSize += sizeDiff;
8197  }
8198  // Growing.
8199  else
8200  {
8201  const VkDeviceSize sizeDiff = newSize - suballoc.size;
8202 
8203  // There is next item.
8204  if(nextItem != m_Suballocations.end())
8205  {
8206  // Next item is free.
8207  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8208  {
8209  // There is not enough free space, including margin.
8210  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
8211  {
8212  return false;
8213  }
8214 
8215  // There is more free space than required.
8216  if(nextItem->size > sizeDiff)
8217  {
8218  // Move and shrink this next item.
8219  UnregisterFreeSuballocation(nextItem);
8220  nextItem->offset += sizeDiff;
8221  nextItem->size -= sizeDiff;
8222  RegisterFreeSuballocation(nextItem);
8223  }
8224  // There is exactly the amount of free space required.
8225  else
8226  {
8227  // Remove this next free item.
8228  UnregisterFreeSuballocation(nextItem);
8229  m_Suballocations.erase(nextItem);
8230  --m_FreeCount;
8231  }
8232  }
8233  // Next item is not free - there is no space to grow.
8234  else
8235  {
8236  return false;
8237  }
8238  }
8239  // This is the last item - there is no space to grow.
8240  else
8241  {
8242  return false;
8243  }
8244 
8245  suballoc.size = newSize;
8246  m_SumFreeSize -= sizeDiff;
8247  }
8248 
8249  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
8250  return true;
8251  }
8252  }
8253  VMA_ASSERT(0 && "Not found!");
8254  return false;
8255 }
8256 
8257 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
8258 {
8259  VkDeviceSize lastSize = 0;
8260  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
8261  {
8262  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
8263 
8264  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
8265  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
8266  VMA_VALIDATE(it->size >= lastSize);
8267  lastSize = it->size;
8268  }
8269  return true;
8270 }
8271 
8272 bool VmaBlockMetadata_Generic::CheckAllocation(
8273  uint32_t currentFrameIndex,
8274  uint32_t frameInUseCount,
8275  VkDeviceSize bufferImageGranularity,
8276  VkDeviceSize allocSize,
8277  VkDeviceSize allocAlignment,
8278  VmaSuballocationType allocType,
8279  VmaSuballocationList::const_iterator suballocItem,
8280  bool canMakeOtherLost,
8281  VkDeviceSize* pOffset,
8282  size_t* itemsToMakeLostCount,
8283  VkDeviceSize* pSumFreeSize,
8284  VkDeviceSize* pSumItemSize) const
8285 {
8286  VMA_ASSERT(allocSize > 0);
8287  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8288  VMA_ASSERT(suballocItem != m_Suballocations.cend());
8289  VMA_ASSERT(pOffset != VMA_NULL);
8290 
8291  *itemsToMakeLostCount = 0;
8292  *pSumFreeSize = 0;
8293  *pSumItemSize = 0;
8294 
8295  if(canMakeOtherLost)
8296  {
8297  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8298  {
8299  *pSumFreeSize = suballocItem->size;
8300  }
8301  else
8302  {
8303  if(suballocItem->hAllocation->CanBecomeLost() &&
8304  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8305  {
8306  ++*itemsToMakeLostCount;
8307  *pSumItemSize = suballocItem->size;
8308  }
8309  else
8310  {
8311  return false;
8312  }
8313  }
8314 
8315  // Remaining size is too small for this request: Early return.
8316  if(GetSize() - suballocItem->offset < allocSize)
8317  {
8318  return false;
8319  }
8320 
8321  // Start from offset equal to beginning of this suballocation.
8322  *pOffset = suballocItem->offset;
8323 
8324  // Apply VMA_DEBUG_MARGIN at the beginning.
8325  if(VMA_DEBUG_MARGIN > 0)
8326  {
8327  *pOffset += VMA_DEBUG_MARGIN;
8328  }
8329 
8330  // Apply alignment.
8331  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8332 
8333  // Check previous suballocations for BufferImageGranularity conflicts.
8334  // Make bigger alignment if necessary.
8335  if(bufferImageGranularity > 1)
8336  {
8337  bool bufferImageGranularityConflict = false;
8338  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8339  while(prevSuballocItem != m_Suballocations.cbegin())
8340  {
8341  --prevSuballocItem;
8342  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8343  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8344  {
8345  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8346  {
8347  bufferImageGranularityConflict = true;
8348  break;
8349  }
8350  }
8351  else
8352  // Already on previous page.
8353  break;
8354  }
8355  if(bufferImageGranularityConflict)
8356  {
8357  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8358  }
8359  }
8360 
8361  // Now that we have final *pOffset, check if we are past suballocItem.
8362  // If yes, return false - this function should be called for another suballocItem as starting point.
8363  if(*pOffset >= suballocItem->offset + suballocItem->size)
8364  {
8365  return false;
8366  }
8367 
8368  // Calculate padding at the beginning based on current offset.
8369  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
8370 
8371  // Calculate required margin at the end.
8372  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8373 
8374  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
8375  // Another early return check.
8376  if(suballocItem->offset + totalSize > GetSize())
8377  {
8378  return false;
8379  }
8380 
8381  // Advance lastSuballocItem until desired size is reached.
8382  // Update itemsToMakeLostCount.
8383  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
8384  if(totalSize > suballocItem->size)
8385  {
8386  VkDeviceSize remainingSize = totalSize - suballocItem->size;
8387  while(remainingSize > 0)
8388  {
8389  ++lastSuballocItem;
8390  if(lastSuballocItem == m_Suballocations.cend())
8391  {
8392  return false;
8393  }
8394  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8395  {
8396  *pSumFreeSize += lastSuballocItem->size;
8397  }
8398  else
8399  {
8400  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
8401  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
8402  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8403  {
8404  ++*itemsToMakeLostCount;
8405  *pSumItemSize += lastSuballocItem->size;
8406  }
8407  else
8408  {
8409  return false;
8410  }
8411  }
8412  remainingSize = (lastSuballocItem->size < remainingSize) ?
8413  remainingSize - lastSuballocItem->size : 0;
8414  }
8415  }
8416 
8417  // Check next suballocations for BufferImageGranularity conflicts.
8418  // If conflict exists, we must mark more allocations lost or fail.
8419  if(bufferImageGranularity > 1)
8420  {
8421  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
8422  ++nextSuballocItem;
8423  while(nextSuballocItem != m_Suballocations.cend())
8424  {
8425  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8426  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8427  {
8428  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8429  {
8430  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
8431  if(nextSuballoc.hAllocation->CanBecomeLost() &&
8432  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8433  {
8434  ++*itemsToMakeLostCount;
8435  }
8436  else
8437  {
8438  return false;
8439  }
8440  }
8441  }
8442  else
8443  {
8444  // Already on next page.
8445  break;
8446  }
8447  ++nextSuballocItem;
8448  }
8449  }
8450  }
8451  else
8452  {
8453  const VmaSuballocation& suballoc = *suballocItem;
8454  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8455 
8456  *pSumFreeSize = suballoc.size;
8457 
8458  // Size of this suballocation is too small for this request: Early return.
8459  if(suballoc.size < allocSize)
8460  {
8461  return false;
8462  }
8463 
8464  // Start from offset equal to beginning of this suballocation.
8465  *pOffset = suballoc.offset;
8466 
8467  // Apply VMA_DEBUG_MARGIN at the beginning.
8468  if(VMA_DEBUG_MARGIN > 0)
8469  {
8470  *pOffset += VMA_DEBUG_MARGIN;
8471  }
8472 
8473  // Apply alignment.
8474  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8475 
8476  // Check previous suballocations for BufferImageGranularity conflicts.
8477  // Make bigger alignment if necessary.
8478  if(bufferImageGranularity > 1)
8479  {
8480  bool bufferImageGranularityConflict = false;
8481  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8482  while(prevSuballocItem != m_Suballocations.cbegin())
8483  {
8484  --prevSuballocItem;
8485  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8486  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8487  {
8488  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8489  {
8490  bufferImageGranularityConflict = true;
8491  break;
8492  }
8493  }
8494  else
8495  // Already on previous page.
8496  break;
8497  }
8498  if(bufferImageGranularityConflict)
8499  {
8500  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8501  }
8502  }
8503 
8504  // Calculate padding at the beginning based on current offset.
8505  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
8506 
8507  // Calculate required margin at the end.
8508  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8509 
8510  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
8511  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
8512  {
8513  return false;
8514  }
8515 
8516  // Check next suballocations for BufferImageGranularity conflicts.
8517  // If conflict exists, allocation cannot be made here.
8518  if(bufferImageGranularity > 1)
8519  {
8520  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
8521  ++nextSuballocItem;
8522  while(nextSuballocItem != m_Suballocations.cend())
8523  {
8524  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8525  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8526  {
8527  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8528  {
8529  return false;
8530  }
8531  }
8532  else
8533  {
8534  // Already on next page.
8535  break;
8536  }
8537  ++nextSuballocItem;
8538  }
8539  }
8540  }
8541 
8542  // All tests passed: Success. pOffset is already filled.
8543  return true;
8544 }
8545 
8546 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
8547 {
8548  VMA_ASSERT(item != m_Suballocations.end());
8549  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8550 
8551  VmaSuballocationList::iterator nextItem = item;
8552  ++nextItem;
8553  VMA_ASSERT(nextItem != m_Suballocations.end());
8554  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
8555 
8556  item->size += nextItem->size;
8557  --m_FreeCount;
8558  m_Suballocations.erase(nextItem);
8559 }
8560 
8561 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
8562 {
8563  // Change this suballocation to be marked as free.
8564  VmaSuballocation& suballoc = *suballocItem;
8565  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8566  suballoc.hAllocation = VK_NULL_HANDLE;
8567 
8568  // Update totals.
8569  ++m_FreeCount;
8570  m_SumFreeSize += suballoc.size;
8571 
8572  // Merge with previous and/or next suballocation if it's also free.
8573  bool mergeWithNext = false;
8574  bool mergeWithPrev = false;
8575 
8576  VmaSuballocationList::iterator nextItem = suballocItem;
8577  ++nextItem;
8578  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
8579  {
8580  mergeWithNext = true;
8581  }
8582 
8583  VmaSuballocationList::iterator prevItem = suballocItem;
8584  if(suballocItem != m_Suballocations.begin())
8585  {
8586  --prevItem;
8587  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8588  {
8589  mergeWithPrev = true;
8590  }
8591  }
8592 
8593  if(mergeWithNext)
8594  {
8595  UnregisterFreeSuballocation(nextItem);
8596  MergeFreeWithNext(suballocItem);
8597  }
8598 
8599  if(mergeWithPrev)
8600  {
8601  UnregisterFreeSuballocation(prevItem);
8602  MergeFreeWithNext(prevItem);
8603  RegisterFreeSuballocation(prevItem);
8604  return prevItem;
8605  }
8606  else
8607  {
8608  RegisterFreeSuballocation(suballocItem);
8609  return suballocItem;
8610  }
8611 }
8612 
8613 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
8614 {
8615  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8616  VMA_ASSERT(item->size > 0);
8617 
8618  // You may want to enable this validation at the beginning or at the end of
8619  // this function, depending on what do you want to check.
8620  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8621 
8622  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8623  {
8624  if(m_FreeSuballocationsBySize.empty())
8625  {
8626  m_FreeSuballocationsBySize.push_back(item);
8627  }
8628  else
8629  {
8630  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
8631  }
8632  }
8633 
8634  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8635 }
8636 
8637 
8638 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
8639 {
8640  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8641  VMA_ASSERT(item->size > 0);
8642 
8643  // You may want to enable this validation at the beginning or at the end of
8644  // this function, depending on what do you want to check.
8645  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8646 
8647  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8648  {
8649  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
8650  m_FreeSuballocationsBySize.data(),
8651  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
8652  item,
8653  VmaSuballocationItemSizeLess());
8654  for(size_t index = it - m_FreeSuballocationsBySize.data();
8655  index < m_FreeSuballocationsBySize.size();
8656  ++index)
8657  {
8658  if(m_FreeSuballocationsBySize[index] == item)
8659  {
8660  VmaVectorRemove(m_FreeSuballocationsBySize, index);
8661  return;
8662  }
8663  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
8664  }
8665  VMA_ASSERT(0 && "Not found.");
8666  }
8667 
8668  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8669 }
8670 
8671 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
8672  VkDeviceSize bufferImageGranularity,
8673  VmaSuballocationType& inOutPrevSuballocType) const
8674 {
8675  if(bufferImageGranularity == 1 || IsEmpty())
8676  {
8677  return false;
8678  }
8679 
8680  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
8681  bool typeConflictFound = false;
8682  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
8683  it != m_Suballocations.cend();
8684  ++it)
8685  {
8686  const VmaSuballocationType suballocType = it->type;
8687  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
8688  {
8689  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
8690  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
8691  {
8692  typeConflictFound = true;
8693  }
8694  inOutPrevSuballocType = suballocType;
8695  }
8696  }
8697 
8698  return typeConflictFound || minAlignment >= bufferImageGranularity;
8699 }
8700 
8702 // class VmaBlockMetadata_Linear
8703 
8704 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
8705  VmaBlockMetadata(hAllocator),
8706  m_SumFreeSize(0),
8707  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8708  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8709  m_1stVectorIndex(0),
8710  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
8711  m_1stNullItemsBeginCount(0),
8712  m_1stNullItemsMiddleCount(0),
8713  m_2ndNullItemsCount(0)
8714 {
8715 }
8716 
8717 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
8718 {
8719 }
8720 
8721 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
8722 {
8723  VmaBlockMetadata::Init(size);
8724  m_SumFreeSize = size;
8725 }
8726 
8727 bool VmaBlockMetadata_Linear::Validate() const
8728 {
8729  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8730  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8731 
8732  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
8733  VMA_VALIDATE(!suballocations1st.empty() ||
8734  suballocations2nd.empty() ||
8735  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
8736 
8737  if(!suballocations1st.empty())
8738  {
8739  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
8740  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
8741  // Null item at the end should be just pop_back().
8742  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
8743  }
8744  if(!suballocations2nd.empty())
8745  {
8746  // Null item at the end should be just pop_back().
8747  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
8748  }
8749 
8750  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
8751  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
8752 
8753  VkDeviceSize sumUsedSize = 0;
8754  const size_t suballoc1stCount = suballocations1st.size();
8755  VkDeviceSize offset = VMA_DEBUG_MARGIN;
8756 
8757  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8758  {
8759  const size_t suballoc2ndCount = suballocations2nd.size();
8760  size_t nullItem2ndCount = 0;
8761  for(size_t i = 0; i < suballoc2ndCount; ++i)
8762  {
8763  const VmaSuballocation& suballoc = suballocations2nd[i];
8764  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8765 
8766  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8767  VMA_VALIDATE(suballoc.offset >= offset);
8768 
8769  if(!currFree)
8770  {
8771  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8772  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8773  sumUsedSize += suballoc.size;
8774  }
8775  else
8776  {
8777  ++nullItem2ndCount;
8778  }
8779 
8780  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8781  }
8782 
8783  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8784  }
8785 
8786  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
8787  {
8788  const VmaSuballocation& suballoc = suballocations1st[i];
8789  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
8790  suballoc.hAllocation == VK_NULL_HANDLE);
8791  }
8792 
8793  size_t nullItem1stCount = m_1stNullItemsBeginCount;
8794 
8795  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
8796  {
8797  const VmaSuballocation& suballoc = suballocations1st[i];
8798  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8799 
8800  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8801  VMA_VALIDATE(suballoc.offset >= offset);
8802  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
8803 
8804  if(!currFree)
8805  {
8806  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8807  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8808  sumUsedSize += suballoc.size;
8809  }
8810  else
8811  {
8812  ++nullItem1stCount;
8813  }
8814 
8815  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8816  }
8817  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
8818 
8819  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8820  {
8821  const size_t suballoc2ndCount = suballocations2nd.size();
8822  size_t nullItem2ndCount = 0;
8823  for(size_t i = suballoc2ndCount; i--; )
8824  {
8825  const VmaSuballocation& suballoc = suballocations2nd[i];
8826  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8827 
8828  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8829  VMA_VALIDATE(suballoc.offset >= offset);
8830 
8831  if(!currFree)
8832  {
8833  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8834  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8835  sumUsedSize += suballoc.size;
8836  }
8837  else
8838  {
8839  ++nullItem2ndCount;
8840  }
8841 
8842  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8843  }
8844 
8845  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8846  }
8847 
8848  VMA_VALIDATE(offset <= GetSize());
8849  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
8850 
8851  return true;
8852 }
8853 
8854 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
8855 {
8856  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
8857  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
8858 }
8859 
8860 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
8861 {
8862  const VkDeviceSize size = GetSize();
8863 
8864  /*
8865  We don't consider gaps inside allocation vectors with freed allocations because
8866  they are not suitable for reuse in linear allocator. We consider only space that
8867  is available for new allocations.
8868  */
8869  if(IsEmpty())
8870  {
8871  return size;
8872  }
8873 
8874  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8875 
8876  switch(m_2ndVectorMode)
8877  {
8878  case SECOND_VECTOR_EMPTY:
8879  /*
8880  Available space is after end of 1st, as well as before beginning of 1st (which
8881  whould make it a ring buffer).
8882  */
8883  {
8884  const size_t suballocations1stCount = suballocations1st.size();
8885  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
8886  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
8887  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
8888  return VMA_MAX(
8889  firstSuballoc.offset,
8890  size - (lastSuballoc.offset + lastSuballoc.size));
8891  }
8892  break;
8893 
8894  case SECOND_VECTOR_RING_BUFFER:
8895  /*
8896  Available space is only between end of 2nd and beginning of 1st.
8897  */
8898  {
8899  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8900  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
8901  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
8902  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
8903  }
8904  break;
8905 
8906  case SECOND_VECTOR_DOUBLE_STACK:
8907  /*
8908  Available space is only between end of 1st and top of 2nd.
8909  */
8910  {
8911  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8912  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
8913  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
8914  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
8915  }
8916  break;
8917 
8918  default:
8919  VMA_ASSERT(0);
8920  return 0;
8921  }
8922 }
8923 
8924 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
8925 {
8926  const VkDeviceSize size = GetSize();
8927  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8928  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8929  const size_t suballoc1stCount = suballocations1st.size();
8930  const size_t suballoc2ndCount = suballocations2nd.size();
8931 
8932  outInfo.blockCount = 1;
8933  outInfo.allocationCount = (uint32_t)GetAllocationCount();
8934  outInfo.unusedRangeCount = 0;
8935  outInfo.usedBytes = 0;
8936  outInfo.allocationSizeMin = UINT64_MAX;
8937  outInfo.allocationSizeMax = 0;
8938  outInfo.unusedRangeSizeMin = UINT64_MAX;
8939  outInfo.unusedRangeSizeMax = 0;
8940 
8941  VkDeviceSize lastOffset = 0;
8942 
8943  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8944  {
8945  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8946  size_t nextAlloc2ndIndex = 0;
8947  while(lastOffset < freeSpace2ndTo1stEnd)
8948  {
8949  // Find next non-null allocation or move nextAllocIndex to the end.
8950  while(nextAlloc2ndIndex < suballoc2ndCount &&
8951  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8952  {
8953  ++nextAlloc2ndIndex;
8954  }
8955 
8956  // Found non-null allocation.
8957  if(nextAlloc2ndIndex < suballoc2ndCount)
8958  {
8959  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8960 
8961  // 1. Process free space before this allocation.
8962  if(lastOffset < suballoc.offset)
8963  {
8964  // There is free space from lastOffset to suballoc.offset.
8965  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8966  ++outInfo.unusedRangeCount;
8967  outInfo.unusedBytes += unusedRangeSize;
8968  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8969  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8970  }
8971 
8972  // 2. Process this allocation.
8973  // There is allocation with suballoc.offset, suballoc.size.
8974  outInfo.usedBytes += suballoc.size;
8975  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8976  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
8977 
8978  // 3. Prepare for next iteration.
8979  lastOffset = suballoc.offset + suballoc.size;
8980  ++nextAlloc2ndIndex;
8981  }
8982  // We are at the end.
8983  else
8984  {
8985  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8986  if(lastOffset < freeSpace2ndTo1stEnd)
8987  {
8988  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8989  ++outInfo.unusedRangeCount;
8990  outInfo.unusedBytes += unusedRangeSize;
8991  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8992  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8993  }
8994 
8995  // End of loop.
8996  lastOffset = freeSpace2ndTo1stEnd;
8997  }
8998  }
8999  }
9000 
9001  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9002  const VkDeviceSize freeSpace1stTo2ndEnd =
9003  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9004  while(lastOffset < freeSpace1stTo2ndEnd)
9005  {
9006  // Find next non-null allocation or move nextAllocIndex to the end.
9007  while(nextAlloc1stIndex < suballoc1stCount &&
9008  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9009  {
9010  ++nextAlloc1stIndex;
9011  }
9012 
9013  // Found non-null allocation.
9014  if(nextAlloc1stIndex < suballoc1stCount)
9015  {
9016  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9017 
9018  // 1. Process free space before this allocation.
9019  if(lastOffset < suballoc.offset)
9020  {
9021  // There is free space from lastOffset to suballoc.offset.
9022  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9023  ++outInfo.unusedRangeCount;
9024  outInfo.unusedBytes += unusedRangeSize;
9025  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9026  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9027  }
9028 
9029  // 2. Process this allocation.
9030  // There is allocation with suballoc.offset, suballoc.size.
9031  outInfo.usedBytes += suballoc.size;
9032  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9033  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9034 
9035  // 3. Prepare for next iteration.
9036  lastOffset = suballoc.offset + suballoc.size;
9037  ++nextAlloc1stIndex;
9038  }
9039  // We are at the end.
9040  else
9041  {
9042  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9043  if(lastOffset < freeSpace1stTo2ndEnd)
9044  {
9045  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9046  ++outInfo.unusedRangeCount;
9047  outInfo.unusedBytes += unusedRangeSize;
9048  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9049  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9050  }
9051 
9052  // End of loop.
9053  lastOffset = freeSpace1stTo2ndEnd;
9054  }
9055  }
9056 
9057  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9058  {
9059  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9060  while(lastOffset < size)
9061  {
9062  // Find next non-null allocation or move nextAllocIndex to the end.
9063  while(nextAlloc2ndIndex != SIZE_MAX &&
9064  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9065  {
9066  --nextAlloc2ndIndex;
9067  }
9068 
9069  // Found non-null allocation.
9070  if(nextAlloc2ndIndex != SIZE_MAX)
9071  {
9072  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9073 
9074  // 1. Process free space before this allocation.
9075  if(lastOffset < suballoc.offset)
9076  {
9077  // There is free space from lastOffset to suballoc.offset.
9078  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9079  ++outInfo.unusedRangeCount;
9080  outInfo.unusedBytes += unusedRangeSize;
9081  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9082  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9083  }
9084 
9085  // 2. Process this allocation.
9086  // There is allocation with suballoc.offset, suballoc.size.
9087  outInfo.usedBytes += suballoc.size;
9088  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9089  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9090 
9091  // 3. Prepare for next iteration.
9092  lastOffset = suballoc.offset + suballoc.size;
9093  --nextAlloc2ndIndex;
9094  }
9095  // We are at the end.
9096  else
9097  {
9098  // There is free space from lastOffset to size.
9099  if(lastOffset < size)
9100  {
9101  const VkDeviceSize unusedRangeSize = size - lastOffset;
9102  ++outInfo.unusedRangeCount;
9103  outInfo.unusedBytes += unusedRangeSize;
9104  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9105  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9106  }
9107 
9108  // End of loop.
9109  lastOffset = size;
9110  }
9111  }
9112  }
9113 
9114  outInfo.unusedBytes = size - outInfo.usedBytes;
9115 }
9116 
9117 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
9118 {
9119  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9120  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9121  const VkDeviceSize size = GetSize();
9122  const size_t suballoc1stCount = suballocations1st.size();
9123  const size_t suballoc2ndCount = suballocations2nd.size();
9124 
9125  inoutStats.size += size;
9126 
9127  VkDeviceSize lastOffset = 0;
9128 
9129  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9130  {
9131  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9132  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
9133  while(lastOffset < freeSpace2ndTo1stEnd)
9134  {
9135  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9136  while(nextAlloc2ndIndex < suballoc2ndCount &&
9137  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9138  {
9139  ++nextAlloc2ndIndex;
9140  }
9141 
9142  // Found non-null allocation.
9143  if(nextAlloc2ndIndex < suballoc2ndCount)
9144  {
9145  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9146 
9147  // 1. Process free space before this allocation.
9148  if(lastOffset < suballoc.offset)
9149  {
9150  // There is free space from lastOffset to suballoc.offset.
9151  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9152  inoutStats.unusedSize += unusedRangeSize;
9153  ++inoutStats.unusedRangeCount;
9154  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9155  }
9156 
9157  // 2. Process this allocation.
9158  // There is allocation with suballoc.offset, suballoc.size.
9159  ++inoutStats.allocationCount;
9160 
9161  // 3. Prepare for next iteration.
9162  lastOffset = suballoc.offset + suballoc.size;
9163  ++nextAlloc2ndIndex;
9164  }
9165  // We are at the end.
9166  else
9167  {
9168  if(lastOffset < freeSpace2ndTo1stEnd)
9169  {
9170  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9171  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9172  inoutStats.unusedSize += unusedRangeSize;
9173  ++inoutStats.unusedRangeCount;
9174  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9175  }
9176 
9177  // End of loop.
9178  lastOffset = freeSpace2ndTo1stEnd;
9179  }
9180  }
9181  }
9182 
9183  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9184  const VkDeviceSize freeSpace1stTo2ndEnd =
9185  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9186  while(lastOffset < freeSpace1stTo2ndEnd)
9187  {
9188  // Find next non-null allocation or move nextAllocIndex to the end.
9189  while(nextAlloc1stIndex < suballoc1stCount &&
9190  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9191  {
9192  ++nextAlloc1stIndex;
9193  }
9194 
9195  // Found non-null allocation.
9196  if(nextAlloc1stIndex < suballoc1stCount)
9197  {
9198  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9199 
9200  // 1. Process free space before this allocation.
9201  if(lastOffset < suballoc.offset)
9202  {
9203  // There is free space from lastOffset to suballoc.offset.
9204  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9205  inoutStats.unusedSize += unusedRangeSize;
9206  ++inoutStats.unusedRangeCount;
9207  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9208  }
9209 
9210  // 2. Process this allocation.
9211  // There is allocation with suballoc.offset, suballoc.size.
9212  ++inoutStats.allocationCount;
9213 
9214  // 3. Prepare for next iteration.
9215  lastOffset = suballoc.offset + suballoc.size;
9216  ++nextAlloc1stIndex;
9217  }
9218  // We are at the end.
9219  else
9220  {
9221  if(lastOffset < freeSpace1stTo2ndEnd)
9222  {
9223  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9224  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9225  inoutStats.unusedSize += unusedRangeSize;
9226  ++inoutStats.unusedRangeCount;
9227  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9228  }
9229 
9230  // End of loop.
9231  lastOffset = freeSpace1stTo2ndEnd;
9232  }
9233  }
9234 
9235  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9236  {
9237  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9238  while(lastOffset < size)
9239  {
9240  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9241  while(nextAlloc2ndIndex != SIZE_MAX &&
9242  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9243  {
9244  --nextAlloc2ndIndex;
9245  }
9246 
9247  // Found non-null allocation.
9248  if(nextAlloc2ndIndex != SIZE_MAX)
9249  {
9250  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9251 
9252  // 1. Process free space before this allocation.
9253  if(lastOffset < suballoc.offset)
9254  {
9255  // There is free space from lastOffset to suballoc.offset.
9256  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9257  inoutStats.unusedSize += unusedRangeSize;
9258  ++inoutStats.unusedRangeCount;
9259  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9260  }
9261 
9262  // 2. Process this allocation.
9263  // There is allocation with suballoc.offset, suballoc.size.
9264  ++inoutStats.allocationCount;
9265 
9266  // 3. Prepare for next iteration.
9267  lastOffset = suballoc.offset + suballoc.size;
9268  --nextAlloc2ndIndex;
9269  }
9270  // We are at the end.
9271  else
9272  {
9273  if(lastOffset < size)
9274  {
9275  // There is free space from lastOffset to size.
9276  const VkDeviceSize unusedRangeSize = size - lastOffset;
9277  inoutStats.unusedSize += unusedRangeSize;
9278  ++inoutStats.unusedRangeCount;
9279  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9280  }
9281 
9282  // End of loop.
9283  lastOffset = size;
9284  }
9285  }
9286  }
9287 }
9288 
9289 #if VMA_STATS_STRING_ENABLED
9290 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
9291 {
9292  const VkDeviceSize size = GetSize();
9293  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9294  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9295  const size_t suballoc1stCount = suballocations1st.size();
9296  const size_t suballoc2ndCount = suballocations2nd.size();
9297 
9298  // FIRST PASS
9299 
9300  size_t unusedRangeCount = 0;
9301  VkDeviceSize usedBytes = 0;
9302 
9303  VkDeviceSize lastOffset = 0;
9304 
9305  size_t alloc2ndCount = 0;
9306  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9307  {
9308  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9309  size_t nextAlloc2ndIndex = 0;
9310  while(lastOffset < freeSpace2ndTo1stEnd)
9311  {
9312  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9313  while(nextAlloc2ndIndex < suballoc2ndCount &&
9314  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9315  {
9316  ++nextAlloc2ndIndex;
9317  }
9318 
9319  // Found non-null allocation.
9320  if(nextAlloc2ndIndex < suballoc2ndCount)
9321  {
9322  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9323 
9324  // 1. Process free space before this allocation.
9325  if(lastOffset < suballoc.offset)
9326  {
9327  // There is free space from lastOffset to suballoc.offset.
9328  ++unusedRangeCount;
9329  }
9330 
9331  // 2. Process this allocation.
9332  // There is allocation with suballoc.offset, suballoc.size.
9333  ++alloc2ndCount;
9334  usedBytes += suballoc.size;
9335 
9336  // 3. Prepare for next iteration.
9337  lastOffset = suballoc.offset + suballoc.size;
9338  ++nextAlloc2ndIndex;
9339  }
9340  // We are at the end.
9341  else
9342  {
9343  if(lastOffset < freeSpace2ndTo1stEnd)
9344  {
9345  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9346  ++unusedRangeCount;
9347  }
9348 
9349  // End of loop.
9350  lastOffset = freeSpace2ndTo1stEnd;
9351  }
9352  }
9353  }
9354 
9355  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9356  size_t alloc1stCount = 0;
9357  const VkDeviceSize freeSpace1stTo2ndEnd =
9358  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9359  while(lastOffset < freeSpace1stTo2ndEnd)
9360  {
9361  // Find next non-null allocation or move nextAllocIndex to the end.
9362  while(nextAlloc1stIndex < suballoc1stCount &&
9363  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9364  {
9365  ++nextAlloc1stIndex;
9366  }
9367 
9368  // Found non-null allocation.
9369  if(nextAlloc1stIndex < suballoc1stCount)
9370  {
9371  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9372 
9373  // 1. Process free space before this allocation.
9374  if(lastOffset < suballoc.offset)
9375  {
9376  // There is free space from lastOffset to suballoc.offset.
9377  ++unusedRangeCount;
9378  }
9379 
9380  // 2. Process this allocation.
9381  // There is allocation with suballoc.offset, suballoc.size.
9382  ++alloc1stCount;
9383  usedBytes += suballoc.size;
9384 
9385  // 3. Prepare for next iteration.
9386  lastOffset = suballoc.offset + suballoc.size;
9387  ++nextAlloc1stIndex;
9388  }
9389  // We are at the end.
9390  else
9391  {
9392  if(lastOffset < size)
9393  {
9394  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9395  ++unusedRangeCount;
9396  }
9397 
9398  // End of loop.
9399  lastOffset = freeSpace1stTo2ndEnd;
9400  }
9401  }
9402 
9403  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9404  {
9405  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9406  while(lastOffset < size)
9407  {
9408  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9409  while(nextAlloc2ndIndex != SIZE_MAX &&
9410  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9411  {
9412  --nextAlloc2ndIndex;
9413  }
9414 
9415  // Found non-null allocation.
9416  if(nextAlloc2ndIndex != SIZE_MAX)
9417  {
9418  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9419 
9420  // 1. Process free space before this allocation.
9421  if(lastOffset < suballoc.offset)
9422  {
9423  // There is free space from lastOffset to suballoc.offset.
9424  ++unusedRangeCount;
9425  }
9426 
9427  // 2. Process this allocation.
9428  // There is allocation with suballoc.offset, suballoc.size.
9429  ++alloc2ndCount;
9430  usedBytes += suballoc.size;
9431 
9432  // 3. Prepare for next iteration.
9433  lastOffset = suballoc.offset + suballoc.size;
9434  --nextAlloc2ndIndex;
9435  }
9436  // We are at the end.
9437  else
9438  {
9439  if(lastOffset < size)
9440  {
9441  // There is free space from lastOffset to size.
9442  ++unusedRangeCount;
9443  }
9444 
9445  // End of loop.
9446  lastOffset = size;
9447  }
9448  }
9449  }
9450 
9451  const VkDeviceSize unusedBytes = size - usedBytes;
9452  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
9453 
9454  // SECOND PASS
9455  lastOffset = 0;
9456 
9457  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9458  {
9459  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9460  size_t nextAlloc2ndIndex = 0;
9461  while(lastOffset < freeSpace2ndTo1stEnd)
9462  {
9463  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9464  while(nextAlloc2ndIndex < suballoc2ndCount &&
9465  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9466  {
9467  ++nextAlloc2ndIndex;
9468  }
9469 
9470  // Found non-null allocation.
9471  if(nextAlloc2ndIndex < suballoc2ndCount)
9472  {
9473  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9474 
9475  // 1. Process free space before this allocation.
9476  if(lastOffset < suballoc.offset)
9477  {
9478  // There is free space from lastOffset to suballoc.offset.
9479  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9480  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9481  }
9482 
9483  // 2. Process this allocation.
9484  // There is allocation with suballoc.offset, suballoc.size.
9485  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9486 
9487  // 3. Prepare for next iteration.
9488  lastOffset = suballoc.offset + suballoc.size;
9489  ++nextAlloc2ndIndex;
9490  }
9491  // We are at the end.
9492  else
9493  {
9494  if(lastOffset < freeSpace2ndTo1stEnd)
9495  {
9496  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9497  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9498  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9499  }
9500 
9501  // End of loop.
9502  lastOffset = freeSpace2ndTo1stEnd;
9503  }
9504  }
9505  }
9506 
9507  nextAlloc1stIndex = m_1stNullItemsBeginCount;
9508  while(lastOffset < freeSpace1stTo2ndEnd)
9509  {
9510  // Find next non-null allocation or move nextAllocIndex to the end.
9511  while(nextAlloc1stIndex < suballoc1stCount &&
9512  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9513  {
9514  ++nextAlloc1stIndex;
9515  }
9516 
9517  // Found non-null allocation.
9518  if(nextAlloc1stIndex < suballoc1stCount)
9519  {
9520  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9521 
9522  // 1. Process free space before this allocation.
9523  if(lastOffset < suballoc.offset)
9524  {
9525  // There is free space from lastOffset to suballoc.offset.
9526  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9527  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9528  }
9529 
9530  // 2. Process this allocation.
9531  // There is allocation with suballoc.offset, suballoc.size.
9532  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9533 
9534  // 3. Prepare for next iteration.
9535  lastOffset = suballoc.offset + suballoc.size;
9536  ++nextAlloc1stIndex;
9537  }
9538  // We are at the end.
9539  else
9540  {
9541  if(lastOffset < freeSpace1stTo2ndEnd)
9542  {
9543  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9544  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9545  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9546  }
9547 
9548  // End of loop.
9549  lastOffset = freeSpace1stTo2ndEnd;
9550  }
9551  }
9552 
9553  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9554  {
9555  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9556  while(lastOffset < size)
9557  {
9558  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9559  while(nextAlloc2ndIndex != SIZE_MAX &&
9560  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9561  {
9562  --nextAlloc2ndIndex;
9563  }
9564 
9565  // Found non-null allocation.
9566  if(nextAlloc2ndIndex != SIZE_MAX)
9567  {
9568  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9569 
9570  // 1. Process free space before this allocation.
9571  if(lastOffset < suballoc.offset)
9572  {
9573  // There is free space from lastOffset to suballoc.offset.
9574  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9575  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9576  }
9577 
9578  // 2. Process this allocation.
9579  // There is allocation with suballoc.offset, suballoc.size.
9580  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9581 
9582  // 3. Prepare for next iteration.
9583  lastOffset = suballoc.offset + suballoc.size;
9584  --nextAlloc2ndIndex;
9585  }
9586  // We are at the end.
9587  else
9588  {
9589  if(lastOffset < size)
9590  {
9591  // There is free space from lastOffset to size.
9592  const VkDeviceSize unusedRangeSize = size - lastOffset;
9593  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9594  }
9595 
9596  // End of loop.
9597  lastOffset = size;
9598  }
9599  }
9600  }
9601 
9602  PrintDetailedMap_End(json);
9603 }
9604 #endif // #if VMA_STATS_STRING_ENABLED
9605 
9606 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
9607  uint32_t currentFrameIndex,
9608  uint32_t frameInUseCount,
9609  VkDeviceSize bufferImageGranularity,
9610  VkDeviceSize allocSize,
9611  VkDeviceSize allocAlignment,
9612  bool upperAddress,
9613  VmaSuballocationType allocType,
9614  bool canMakeOtherLost,
9615  uint32_t strategy,
9616  VmaAllocationRequest* pAllocationRequest)
9617 {
9618  VMA_ASSERT(allocSize > 0);
9619  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
9620  VMA_ASSERT(pAllocationRequest != VMA_NULL);
9621  VMA_HEAVY_ASSERT(Validate());
9622 
9623  const VkDeviceSize size = GetSize();
9624  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9625  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9626 
9627  if(upperAddress)
9628  {
9629  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9630  {
9631  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
9632  return false;
9633  }
9634 
9635  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
9636  if(allocSize > size)
9637  {
9638  return false;
9639  }
9640  VkDeviceSize resultBaseOffset = size - allocSize;
9641  if(!suballocations2nd.empty())
9642  {
9643  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9644  resultBaseOffset = lastSuballoc.offset - allocSize;
9645  if(allocSize > lastSuballoc.offset)
9646  {
9647  return false;
9648  }
9649  }
9650 
9651  // Start from offset equal to end of free space.
9652  VkDeviceSize resultOffset = resultBaseOffset;
9653 
9654  // Apply VMA_DEBUG_MARGIN at the end.
9655  if(VMA_DEBUG_MARGIN > 0)
9656  {
9657  if(resultOffset < VMA_DEBUG_MARGIN)
9658  {
9659  return false;
9660  }
9661  resultOffset -= VMA_DEBUG_MARGIN;
9662  }
9663 
9664  // Apply alignment.
9665  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
9666 
9667  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
9668  // Make bigger alignment if necessary.
9669  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
9670  {
9671  bool bufferImageGranularityConflict = false;
9672  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9673  {
9674  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9675  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9676  {
9677  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
9678  {
9679  bufferImageGranularityConflict = true;
9680  break;
9681  }
9682  }
9683  else
9684  // Already on previous page.
9685  break;
9686  }
9687  if(bufferImageGranularityConflict)
9688  {
9689  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
9690  }
9691  }
9692 
9693  // There is enough free space.
9694  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
9695  suballocations1st.back().offset + suballocations1st.back().size :
9696  0;
9697  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
9698  {
9699  // Check previous suballocations for BufferImageGranularity conflicts.
9700  // If conflict exists, allocation cannot be made here.
9701  if(bufferImageGranularity > 1)
9702  {
9703  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9704  {
9705  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9706  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9707  {
9708  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
9709  {
9710  return false;
9711  }
9712  }
9713  else
9714  {
9715  // Already on next page.
9716  break;
9717  }
9718  }
9719  }
9720 
9721  // All tests passed: Success.
9722  pAllocationRequest->offset = resultOffset;
9723  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
9724  pAllocationRequest->sumItemSize = 0;
9725  // pAllocationRequest->item unused.
9726  pAllocationRequest->itemsToMakeLostCount = 0;
9727  return true;
9728  }
9729  }
9730  else // !upperAddress
9731  {
9732  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9733  {
9734  // Try to allocate at the end of 1st vector.
9735 
9736  VkDeviceSize resultBaseOffset = 0;
9737  if(!suballocations1st.empty())
9738  {
9739  const VmaSuballocation& lastSuballoc = suballocations1st.back();
9740  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
9741  }
9742 
9743  // Start from offset equal to beginning of free space.
9744  VkDeviceSize resultOffset = resultBaseOffset;
9745 
9746  // Apply VMA_DEBUG_MARGIN at the beginning.
9747  if(VMA_DEBUG_MARGIN > 0)
9748  {
9749  resultOffset += VMA_DEBUG_MARGIN;
9750  }
9751 
9752  // Apply alignment.
9753  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
9754 
9755  // Check previous suballocations for BufferImageGranularity conflicts.
9756  // Make bigger alignment if necessary.
9757  if(bufferImageGranularity > 1 && !suballocations1st.empty())
9758  {
9759  bool bufferImageGranularityConflict = false;
9760  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9761  {
9762  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9763  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9764  {
9765  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9766  {
9767  bufferImageGranularityConflict = true;
9768  break;
9769  }
9770  }
9771  else
9772  // Already on previous page.
9773  break;
9774  }
9775  if(bufferImageGranularityConflict)
9776  {
9777  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
9778  }
9779  }
9780 
9781  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
9782  suballocations2nd.back().offset : size;
9783 
9784  // There is enough free space at the end after alignment.
9785  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
9786  {
9787  // Check next suballocations for BufferImageGranularity conflicts.
9788  // If conflict exists, allocation cannot be made here.
9789  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9790  {
9791  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9792  {
9793  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9794  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9795  {
9796  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9797  {
9798  return false;
9799  }
9800  }
9801  else
9802  {
9803  // Already on previous page.
9804  break;
9805  }
9806  }
9807  }
9808 
9809  // All tests passed: Success.
9810  pAllocationRequest->offset = resultOffset;
9811  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
9812  pAllocationRequest->sumItemSize = 0;
9813  // pAllocationRequest->item unused.
9814  pAllocationRequest->itemsToMakeLostCount = 0;
9815  return true;
9816  }
9817  }
9818 
9819  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
9820  // beginning of 1st vector as the end of free space.
9821  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9822  {
9823  VMA_ASSERT(!suballocations1st.empty());
9824 
9825  VkDeviceSize resultBaseOffset = 0;
9826  if(!suballocations2nd.empty())
9827  {
9828  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9829  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
9830  }
9831 
9832  // Start from offset equal to beginning of free space.
9833  VkDeviceSize resultOffset = resultBaseOffset;
9834 
9835  // Apply VMA_DEBUG_MARGIN at the beginning.
9836  if(VMA_DEBUG_MARGIN > 0)
9837  {
9838  resultOffset += VMA_DEBUG_MARGIN;
9839  }
9840 
9841  // Apply alignment.
9842  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
9843 
9844  // Check previous suballocations for BufferImageGranularity conflicts.
9845  // Make bigger alignment if necessary.
9846  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
9847  {
9848  bool bufferImageGranularityConflict = false;
9849  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
9850  {
9851  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
9852  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9853  {
9854  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9855  {
9856  bufferImageGranularityConflict = true;
9857  break;
9858  }
9859  }
9860  else
9861  // Already on previous page.
9862  break;
9863  }
9864  if(bufferImageGranularityConflict)
9865  {
9866  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
9867  }
9868  }
9869 
9870  pAllocationRequest->itemsToMakeLostCount = 0;
9871  pAllocationRequest->sumItemSize = 0;
9872  size_t index1st = m_1stNullItemsBeginCount;
9873 
9874  if(canMakeOtherLost)
9875  {
9876  while(index1st < suballocations1st.size() &&
9877  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
9878  {
9879  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
9880  const VmaSuballocation& suballoc = suballocations1st[index1st];
9881  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
9882  {
9883  // No problem.
9884  }
9885  else
9886  {
9887  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
9888  if(suballoc.hAllocation->CanBecomeLost() &&
9889  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9890  {
9891  ++pAllocationRequest->itemsToMakeLostCount;
9892  pAllocationRequest->sumItemSize += suballoc.size;
9893  }
9894  else
9895  {
9896  return false;
9897  }
9898  }
9899  ++index1st;
9900  }
9901 
9902  // Check next suballocations for BufferImageGranularity conflicts.
9903  // If conflict exists, we must mark more allocations lost or fail.
9904  if(bufferImageGranularity > 1)
9905  {
9906  while(index1st < suballocations1st.size())
9907  {
9908  const VmaSuballocation& suballoc = suballocations1st[index1st];
9909  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
9910  {
9911  if(suballoc.hAllocation != VK_NULL_HANDLE)
9912  {
9913  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
9914  if(suballoc.hAllocation->CanBecomeLost() &&
9915  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9916  {
9917  ++pAllocationRequest->itemsToMakeLostCount;
9918  pAllocationRequest->sumItemSize += suballoc.size;
9919  }
9920  else
9921  {
9922  return false;
9923  }
9924  }
9925  }
9926  else
9927  {
9928  // Already on next page.
9929  break;
9930  }
9931  ++index1st;
9932  }
9933  }
9934  }
9935 
9936  // There is enough free space at the end after alignment.
9937  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
9938  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
9939  {
9940  // Check next suballocations for BufferImageGranularity conflicts.
9941  // If conflict exists, allocation cannot be made here.
9942  if(bufferImageGranularity > 1)
9943  {
9944  for(size_t nextSuballocIndex = index1st;
9945  nextSuballocIndex < suballocations1st.size();
9946  nextSuballocIndex++)
9947  {
9948  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
9949  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9950  {
9951  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9952  {
9953  return false;
9954  }
9955  }
9956  else
9957  {
9958  // Already on next page.
9959  break;
9960  }
9961  }
9962  }
9963 
9964  // All tests passed: Success.
9965  pAllocationRequest->offset = resultOffset;
9966  pAllocationRequest->sumFreeSize =
9967  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
9968  - resultBaseOffset
9969  - pAllocationRequest->sumItemSize;
9970  // pAllocationRequest->item unused.
9971  return true;
9972  }
9973  }
9974  }
9975 
9976  return false;
9977 }
9978 
9979 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
9980  uint32_t currentFrameIndex,
9981  uint32_t frameInUseCount,
9982  VmaAllocationRequest* pAllocationRequest)
9983 {
9984  if(pAllocationRequest->itemsToMakeLostCount == 0)
9985  {
9986  return true;
9987  }
9988 
9989  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
9990 
9991  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9992  size_t index1st = m_1stNullItemsBeginCount;
9993  size_t madeLostCount = 0;
9994  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
9995  {
9996  VMA_ASSERT(index1st < suballocations1st.size());
9997  VmaSuballocation& suballoc = suballocations1st[index1st];
9998  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9999  {
10000  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10001  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
10002  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10003  {
10004  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10005  suballoc.hAllocation = VK_NULL_HANDLE;
10006  m_SumFreeSize += suballoc.size;
10007  ++m_1stNullItemsMiddleCount;
10008  ++madeLostCount;
10009  }
10010  else
10011  {
10012  return false;
10013  }
10014  }
10015  ++index1st;
10016  }
10017 
10018  CleanupAfterFree();
10019  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
10020 
10021  return true;
10022 }
10023 
10024 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10025 {
10026  uint32_t lostAllocationCount = 0;
10027 
10028  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10029  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10030  {
10031  VmaSuballocation& suballoc = suballocations1st[i];
10032  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10033  suballoc.hAllocation->CanBecomeLost() &&
10034  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10035  {
10036  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10037  suballoc.hAllocation = VK_NULL_HANDLE;
10038  ++m_1stNullItemsMiddleCount;
10039  m_SumFreeSize += suballoc.size;
10040  ++lostAllocationCount;
10041  }
10042  }
10043 
10044  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10045  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10046  {
10047  VmaSuballocation& suballoc = suballocations2nd[i];
10048  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10049  suballoc.hAllocation->CanBecomeLost() &&
10050  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10051  {
10052  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10053  suballoc.hAllocation = VK_NULL_HANDLE;
10054  ++m_2ndNullItemsCount;
10055  ++lostAllocationCount;
10056  }
10057  }
10058 
10059  if(lostAllocationCount)
10060  {
10061  CleanupAfterFree();
10062  }
10063 
10064  return lostAllocationCount;
10065 }
10066 
10067 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
10068 {
10069  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10070  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10071  {
10072  const VmaSuballocation& suballoc = suballocations1st[i];
10073  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10074  {
10075  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10076  {
10077  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10078  return VK_ERROR_VALIDATION_FAILED_EXT;
10079  }
10080  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10081  {
10082  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10083  return VK_ERROR_VALIDATION_FAILED_EXT;
10084  }
10085  }
10086  }
10087 
10088  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10089  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10090  {
10091  const VmaSuballocation& suballoc = suballocations2nd[i];
10092  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10093  {
10094  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10095  {
10096  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10097  return VK_ERROR_VALIDATION_FAILED_EXT;
10098  }
10099  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10100  {
10101  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10102  return VK_ERROR_VALIDATION_FAILED_EXT;
10103  }
10104  }
10105  }
10106 
10107  return VK_SUCCESS;
10108 }
10109 
10110 void VmaBlockMetadata_Linear::Alloc(
10111  const VmaAllocationRequest& request,
10112  VmaSuballocationType type,
10113  VkDeviceSize allocSize,
10114  bool upperAddress,
10115  VmaAllocation hAllocation)
10116 {
10117  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
10118 
10119  if(upperAddress)
10120  {
10121  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
10122  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
10123  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10124  suballocations2nd.push_back(newSuballoc);
10125  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
10126  }
10127  else
10128  {
10129  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10130 
10131  // First allocation.
10132  if(suballocations1st.empty())
10133  {
10134  suballocations1st.push_back(newSuballoc);
10135  }
10136  else
10137  {
10138  // New allocation at the end of 1st vector.
10139  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
10140  {
10141  // Check if it fits before the end of the block.
10142  VMA_ASSERT(request.offset + allocSize <= GetSize());
10143  suballocations1st.push_back(newSuballoc);
10144  }
10145  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
10146  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
10147  {
10148  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10149 
10150  switch(m_2ndVectorMode)
10151  {
10152  case SECOND_VECTOR_EMPTY:
10153  // First allocation from second part ring buffer.
10154  VMA_ASSERT(suballocations2nd.empty());
10155  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
10156  break;
10157  case SECOND_VECTOR_RING_BUFFER:
10158  // 2-part ring buffer is already started.
10159  VMA_ASSERT(!suballocations2nd.empty());
10160  break;
10161  case SECOND_VECTOR_DOUBLE_STACK:
10162  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
10163  break;
10164  default:
10165  VMA_ASSERT(0);
10166  }
10167 
10168  suballocations2nd.push_back(newSuballoc);
10169  }
10170  else
10171  {
10172  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
10173  }
10174  }
10175  }
10176 
10177  m_SumFreeSize -= newSuballoc.size;
10178 }
10179 
10180 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
10181 {
10182  FreeAtOffset(allocation->GetOffset());
10183 }
10184 
10185 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
10186 {
10187  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10188  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10189 
10190  if(!suballocations1st.empty())
10191  {
10192  // First allocation: Mark it as next empty at the beginning.
10193  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
10194  if(firstSuballoc.offset == offset)
10195  {
10196  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10197  firstSuballoc.hAllocation = VK_NULL_HANDLE;
10198  m_SumFreeSize += firstSuballoc.size;
10199  ++m_1stNullItemsBeginCount;
10200  CleanupAfterFree();
10201  return;
10202  }
10203  }
10204 
10205  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
10206  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
10207  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10208  {
10209  VmaSuballocation& lastSuballoc = suballocations2nd.back();
10210  if(lastSuballoc.offset == offset)
10211  {
10212  m_SumFreeSize += lastSuballoc.size;
10213  suballocations2nd.pop_back();
10214  CleanupAfterFree();
10215  return;
10216  }
10217  }
10218  // Last allocation in 1st vector.
10219  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
10220  {
10221  VmaSuballocation& lastSuballoc = suballocations1st.back();
10222  if(lastSuballoc.offset == offset)
10223  {
10224  m_SumFreeSize += lastSuballoc.size;
10225  suballocations1st.pop_back();
10226  CleanupAfterFree();
10227  return;
10228  }
10229  }
10230 
10231  // Item from the middle of 1st vector.
10232  {
10233  VmaSuballocation refSuballoc;
10234  refSuballoc.offset = offset;
10235  // Rest of members stays uninitialized intentionally for better performance.
10236  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
10237  suballocations1st.begin() + m_1stNullItemsBeginCount,
10238  suballocations1st.end(),
10239  refSuballoc);
10240  if(it != suballocations1st.end())
10241  {
10242  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10243  it->hAllocation = VK_NULL_HANDLE;
10244  ++m_1stNullItemsMiddleCount;
10245  m_SumFreeSize += it->size;
10246  CleanupAfterFree();
10247  return;
10248  }
10249  }
10250 
10251  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
10252  {
10253  // Item from the middle of 2nd vector.
10254  VmaSuballocation refSuballoc;
10255  refSuballoc.offset = offset;
10256  // Rest of members stays uninitialized intentionally for better performance.
10257  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
10258  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
10259  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
10260  if(it != suballocations2nd.end())
10261  {
10262  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10263  it->hAllocation = VK_NULL_HANDLE;
10264  ++m_2ndNullItemsCount;
10265  m_SumFreeSize += it->size;
10266  CleanupAfterFree();
10267  return;
10268  }
10269  }
10270 
10271  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
10272 }
10273 
10274 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
10275 {
10276  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10277  const size_t suballocCount = AccessSuballocations1st().size();
10278  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
10279 }
10280 
10281 void VmaBlockMetadata_Linear::CleanupAfterFree()
10282 {
10283  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10284  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10285 
10286  if(IsEmpty())
10287  {
10288  suballocations1st.clear();
10289  suballocations2nd.clear();
10290  m_1stNullItemsBeginCount = 0;
10291  m_1stNullItemsMiddleCount = 0;
10292  m_2ndNullItemsCount = 0;
10293  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10294  }
10295  else
10296  {
10297  const size_t suballoc1stCount = suballocations1st.size();
10298  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10299  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
10300 
10301  // Find more null items at the beginning of 1st vector.
10302  while(m_1stNullItemsBeginCount < suballoc1stCount &&
10303  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10304  {
10305  ++m_1stNullItemsBeginCount;
10306  --m_1stNullItemsMiddleCount;
10307  }
10308 
10309  // Find more null items at the end of 1st vector.
10310  while(m_1stNullItemsMiddleCount > 0 &&
10311  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
10312  {
10313  --m_1stNullItemsMiddleCount;
10314  suballocations1st.pop_back();
10315  }
10316 
10317  // Find more null items at the end of 2nd vector.
10318  while(m_2ndNullItemsCount > 0 &&
10319  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
10320  {
10321  --m_2ndNullItemsCount;
10322  suballocations2nd.pop_back();
10323  }
10324 
10325  if(ShouldCompact1st())
10326  {
10327  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
10328  size_t srcIndex = m_1stNullItemsBeginCount;
10329  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
10330  {
10331  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
10332  {
10333  ++srcIndex;
10334  }
10335  if(dstIndex != srcIndex)
10336  {
10337  suballocations1st[dstIndex] = suballocations1st[srcIndex];
10338  }
10339  ++srcIndex;
10340  }
10341  suballocations1st.resize(nonNullItemCount);
10342  m_1stNullItemsBeginCount = 0;
10343  m_1stNullItemsMiddleCount = 0;
10344  }
10345 
10346  // 2nd vector became empty.
10347  if(suballocations2nd.empty())
10348  {
10349  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10350  }
10351 
10352  // 1st vector became empty.
10353  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
10354  {
10355  suballocations1st.clear();
10356  m_1stNullItemsBeginCount = 0;
10357 
10358  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10359  {
10360  // Swap 1st with 2nd. Now 2nd is empty.
10361  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10362  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
10363  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
10364  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10365  {
10366  ++m_1stNullItemsBeginCount;
10367  --m_1stNullItemsMiddleCount;
10368  }
10369  m_2ndNullItemsCount = 0;
10370  m_1stVectorIndex ^= 1;
10371  }
10372  }
10373  }
10374 
10375  VMA_HEAVY_ASSERT(Validate());
10376 }
10377 
10378 
10380 // class VmaBlockMetadata_Buddy
10381 
10382 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
10383  VmaBlockMetadata(hAllocator),
10384  m_Root(VMA_NULL),
10385  m_AllocationCount(0),
10386  m_FreeCount(1),
10387  m_SumFreeSize(0)
10388 {
10389  memset(m_FreeList, 0, sizeof(m_FreeList));
10390 }
10391 
10392 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
10393 {
10394  DeleteNode(m_Root);
10395 }
10396 
10397 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
10398 {
10399  VmaBlockMetadata::Init(size);
10400 
10401  m_UsableSize = VmaPrevPow2(size);
10402  m_SumFreeSize = m_UsableSize;
10403 
10404  // Calculate m_LevelCount.
10405  m_LevelCount = 1;
10406  while(m_LevelCount < MAX_LEVELS &&
10407  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
10408  {
10409  ++m_LevelCount;
10410  }
10411 
10412  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
10413  rootNode->offset = 0;
10414  rootNode->type = Node::TYPE_FREE;
10415  rootNode->parent = VMA_NULL;
10416  rootNode->buddy = VMA_NULL;
10417 
10418  m_Root = rootNode;
10419  AddToFreeListFront(0, rootNode);
10420 }
10421 
10422 bool VmaBlockMetadata_Buddy::Validate() const
10423 {
10424  // Validate tree.
10425  ValidationContext ctx;
10426  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
10427  {
10428  VMA_VALIDATE(false && "ValidateNode failed.");
10429  }
10430  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
10431  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
10432 
10433  // Validate free node lists.
10434  for(uint32_t level = 0; level < m_LevelCount; ++level)
10435  {
10436  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
10437  m_FreeList[level].front->free.prev == VMA_NULL);
10438 
10439  for(Node* node = m_FreeList[level].front;
10440  node != VMA_NULL;
10441  node = node->free.next)
10442  {
10443  VMA_VALIDATE(node->type == Node::TYPE_FREE);
10444 
10445  if(node->free.next == VMA_NULL)
10446  {
10447  VMA_VALIDATE(m_FreeList[level].back == node);
10448  }
10449  else
10450  {
10451  VMA_VALIDATE(node->free.next->free.prev == node);
10452  }
10453  }
10454  }
10455 
10456  // Validate that free lists ar higher levels are empty.
10457  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
10458  {
10459  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
10460  }
10461 
10462  return true;
10463 }
10464 
10465 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
10466 {
10467  for(uint32_t level = 0; level < m_LevelCount; ++level)
10468  {
10469  if(m_FreeList[level].front != VMA_NULL)
10470  {
10471  return LevelToNodeSize(level);
10472  }
10473  }
10474  return 0;
10475 }
10476 
10477 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
10478 {
10479  const VkDeviceSize unusableSize = GetUnusableSize();
10480 
10481  outInfo.blockCount = 1;
10482 
10483  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
10484  outInfo.usedBytes = outInfo.unusedBytes = 0;
10485 
10486  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
10487  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
10488  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
10489 
10490  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
10491 
10492  if(unusableSize > 0)
10493  {
10494  ++outInfo.unusedRangeCount;
10495  outInfo.unusedBytes += unusableSize;
10496  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
10497  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
10498  }
10499 }
10500 
10501 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
10502 {
10503  const VkDeviceSize unusableSize = GetUnusableSize();
10504 
10505  inoutStats.size += GetSize();
10506  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
10507  inoutStats.allocationCount += m_AllocationCount;
10508  inoutStats.unusedRangeCount += m_FreeCount;
10509  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
10510 
10511  if(unusableSize > 0)
10512  {
10513  ++inoutStats.unusedRangeCount;
10514  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
10515  }
10516 }
10517 
10518 #if VMA_STATS_STRING_ENABLED
10519 
10520 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
10521 {
10522  // TODO optimize
10523  VmaStatInfo stat;
10524  CalcAllocationStatInfo(stat);
10525 
10526  PrintDetailedMap_Begin(
10527  json,
10528  stat.unusedBytes,
10529  stat.allocationCount,
10530  stat.unusedRangeCount);
10531 
10532  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
10533 
10534  const VkDeviceSize unusableSize = GetUnusableSize();
10535  if(unusableSize > 0)
10536  {
10537  PrintDetailedMap_UnusedRange(json,
10538  m_UsableSize, // offset
10539  unusableSize); // size
10540  }
10541 
10542  PrintDetailedMap_End(json);
10543 }
10544 
10545 #endif // #if VMA_STATS_STRING_ENABLED
10546 
10547 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
10548  uint32_t currentFrameIndex,
10549  uint32_t frameInUseCount,
10550  VkDeviceSize bufferImageGranularity,
10551  VkDeviceSize allocSize,
10552  VkDeviceSize allocAlignment,
10553  bool upperAddress,
10554  VmaSuballocationType allocType,
10555  bool canMakeOtherLost,
10556  uint32_t strategy,
10557  VmaAllocationRequest* pAllocationRequest)
10558 {
10559  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
10560 
10561  // Simple way to respect bufferImageGranularity. May be optimized some day.
10562  // Whenever it might be an OPTIMAL image...
10563  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
10564  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
10565  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
10566  {
10567  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
10568  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
10569  }
10570 
10571  if(allocSize > m_UsableSize)
10572  {
10573  return false;
10574  }
10575 
10576  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10577  for(uint32_t level = targetLevel + 1; level--; )
10578  {
10579  for(Node* freeNode = m_FreeList[level].front;
10580  freeNode != VMA_NULL;
10581  freeNode = freeNode->free.next)
10582  {
10583  if(freeNode->offset % allocAlignment == 0)
10584  {
10585  pAllocationRequest->offset = freeNode->offset;
10586  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
10587  pAllocationRequest->sumItemSize = 0;
10588  pAllocationRequest->itemsToMakeLostCount = 0;
10589  pAllocationRequest->customData = (void*)(uintptr_t)level;
10590  return true;
10591  }
10592  }
10593  }
10594 
10595  return false;
10596 }
10597 
10598 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
10599  uint32_t currentFrameIndex,
10600  uint32_t frameInUseCount,
10601  VmaAllocationRequest* pAllocationRequest)
10602 {
10603  /*
10604  Lost allocations are not supported in buddy allocator at the moment.
10605  Support might be added in the future.
10606  */
10607  return pAllocationRequest->itemsToMakeLostCount == 0;
10608 }
10609 
10610 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10611 {
10612  /*
10613  Lost allocations are not supported in buddy allocator at the moment.
10614  Support might be added in the future.
10615  */
10616  return 0;
10617 }
10618 
10619 void VmaBlockMetadata_Buddy::Alloc(
10620  const VmaAllocationRequest& request,
10621  VmaSuballocationType type,
10622  VkDeviceSize allocSize,
10623  bool upperAddress,
10624  VmaAllocation hAllocation)
10625 {
10626  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10627  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
10628 
10629  Node* currNode = m_FreeList[currLevel].front;
10630  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10631  while(currNode->offset != request.offset)
10632  {
10633  currNode = currNode->free.next;
10634  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10635  }
10636 
10637  // Go down, splitting free nodes.
10638  while(currLevel < targetLevel)
10639  {
10640  // currNode is already first free node at currLevel.
10641  // Remove it from list of free nodes at this currLevel.
10642  RemoveFromFreeList(currLevel, currNode);
10643 
10644  const uint32_t childrenLevel = currLevel + 1;
10645 
10646  // Create two free sub-nodes.
10647  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
10648  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
10649 
10650  leftChild->offset = currNode->offset;
10651  leftChild->type = Node::TYPE_FREE;
10652  leftChild->parent = currNode;
10653  leftChild->buddy = rightChild;
10654 
10655  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
10656  rightChild->type = Node::TYPE_FREE;
10657  rightChild->parent = currNode;
10658  rightChild->buddy = leftChild;
10659 
10660  // Convert current currNode to split type.
10661  currNode->type = Node::TYPE_SPLIT;
10662  currNode->split.leftChild = leftChild;
10663 
10664  // Add child nodes to free list. Order is important!
10665  AddToFreeListFront(childrenLevel, rightChild);
10666  AddToFreeListFront(childrenLevel, leftChild);
10667 
10668  ++m_FreeCount;
10669  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
10670  ++currLevel;
10671  currNode = m_FreeList[currLevel].front;
10672 
10673  /*
10674  We can be sure that currNode, as left child of node previously split,
10675  also fullfills the alignment requirement.
10676  */
10677  }
10678 
10679  // Remove from free list.
10680  VMA_ASSERT(currLevel == targetLevel &&
10681  currNode != VMA_NULL &&
10682  currNode->type == Node::TYPE_FREE);
10683  RemoveFromFreeList(currLevel, currNode);
10684 
10685  // Convert to allocation node.
10686  currNode->type = Node::TYPE_ALLOCATION;
10687  currNode->allocation.alloc = hAllocation;
10688 
10689  ++m_AllocationCount;
10690  --m_FreeCount;
10691  m_SumFreeSize -= allocSize;
10692 }
10693 
10694 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
10695 {
10696  if(node->type == Node::TYPE_SPLIT)
10697  {
10698  DeleteNode(node->split.leftChild->buddy);
10699  DeleteNode(node->split.leftChild);
10700  }
10701 
10702  vma_delete(GetAllocationCallbacks(), node);
10703 }
10704 
10705 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
10706 {
10707  VMA_VALIDATE(level < m_LevelCount);
10708  VMA_VALIDATE(curr->parent == parent);
10709  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
10710  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
10711  switch(curr->type)
10712  {
10713  case Node::TYPE_FREE:
10714  // curr->free.prev, next are validated separately.
10715  ctx.calculatedSumFreeSize += levelNodeSize;
10716  ++ctx.calculatedFreeCount;
10717  break;
10718  case Node::TYPE_ALLOCATION:
10719  ++ctx.calculatedAllocationCount;
10720  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
10721  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
10722  break;
10723  case Node::TYPE_SPLIT:
10724  {
10725  const uint32_t childrenLevel = level + 1;
10726  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
10727  const Node* const leftChild = curr->split.leftChild;
10728  VMA_VALIDATE(leftChild != VMA_NULL);
10729  VMA_VALIDATE(leftChild->offset == curr->offset);
10730  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
10731  {
10732  VMA_VALIDATE(false && "ValidateNode for left child failed.");
10733  }
10734  const Node* const rightChild = leftChild->buddy;
10735  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
10736  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
10737  {
10738  VMA_VALIDATE(false && "ValidateNode for right child failed.");
10739  }
10740  }
10741  break;
10742  default:
10743  return false;
10744  }
10745 
10746  return true;
10747 }
10748 
10749 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
10750 {
10751  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
10752  uint32_t level = 0;
10753  VkDeviceSize currLevelNodeSize = m_UsableSize;
10754  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
10755  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
10756  {
10757  ++level;
10758  currLevelNodeSize = nextLevelNodeSize;
10759  nextLevelNodeSize = currLevelNodeSize >> 1;
10760  }
10761  return level;
10762 }
10763 
10764 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
10765 {
10766  // Find node and level.
10767  Node* node = m_Root;
10768  VkDeviceSize nodeOffset = 0;
10769  uint32_t level = 0;
10770  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
10771  while(node->type == Node::TYPE_SPLIT)
10772  {
10773  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
10774  if(offset < nodeOffset + nextLevelSize)
10775  {
10776  node = node->split.leftChild;
10777  }
10778  else
10779  {
10780  node = node->split.leftChild->buddy;
10781  nodeOffset += nextLevelSize;
10782  }
10783  ++level;
10784  levelNodeSize = nextLevelSize;
10785  }
10786 
10787  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
10788  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
10789 
10790  ++m_FreeCount;
10791  --m_AllocationCount;
10792  m_SumFreeSize += alloc->GetSize();
10793 
10794  node->type = Node::TYPE_FREE;
10795 
10796  // Join free nodes if possible.
10797  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
10798  {
10799  RemoveFromFreeList(level, node->buddy);
10800  Node* const parent = node->parent;
10801 
10802  vma_delete(GetAllocationCallbacks(), node->buddy);
10803  vma_delete(GetAllocationCallbacks(), node);
10804  parent->type = Node::TYPE_FREE;
10805 
10806  node = parent;
10807  --level;
10808  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
10809  --m_FreeCount;
10810  }
10811 
10812  AddToFreeListFront(level, node);
10813 }
10814 
10815 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
10816 {
10817  switch(node->type)
10818  {
10819  case Node::TYPE_FREE:
10820  ++outInfo.unusedRangeCount;
10821  outInfo.unusedBytes += levelNodeSize;
10822  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
10823  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
10824  break;
10825  case Node::TYPE_ALLOCATION:
10826  {
10827  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
10828  ++outInfo.allocationCount;
10829  outInfo.usedBytes += allocSize;
10830  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
10831  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
10832 
10833  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
10834  if(unusedRangeSize > 0)
10835  {
10836  ++outInfo.unusedRangeCount;
10837  outInfo.unusedBytes += unusedRangeSize;
10838  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
10839  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
10840  }
10841  }
10842  break;
10843  case Node::TYPE_SPLIT:
10844  {
10845  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
10846  const Node* const leftChild = node->split.leftChild;
10847  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
10848  const Node* const rightChild = leftChild->buddy;
10849  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
10850  }
10851  break;
10852  default:
10853  VMA_ASSERT(0);
10854  }
10855 }
10856 
10857 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
10858 {
10859  VMA_ASSERT(node->type == Node::TYPE_FREE);
10860 
10861  // List is empty.
10862  Node* const frontNode = m_FreeList[level].front;
10863  if(frontNode == VMA_NULL)
10864  {
10865  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
10866  node->free.prev = node->free.next = VMA_NULL;
10867  m_FreeList[level].front = m_FreeList[level].back = node;
10868  }
10869  else
10870  {
10871  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
10872  node->free.prev = VMA_NULL;
10873  node->free.next = frontNode;
10874  frontNode->free.prev = node;
10875  m_FreeList[level].front = node;
10876  }
10877 }
10878 
10879 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
10880 {
10881  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
10882 
10883  // It is at the front.
10884  if(node->free.prev == VMA_NULL)
10885  {
10886  VMA_ASSERT(m_FreeList[level].front == node);
10887  m_FreeList[level].front = node->free.next;
10888  }
10889  else
10890  {
10891  Node* const prevFreeNode = node->free.prev;
10892  VMA_ASSERT(prevFreeNode->free.next == node);
10893  prevFreeNode->free.next = node->free.next;
10894  }
10895 
10896  // It is at the back.
10897  if(node->free.next == VMA_NULL)
10898  {
10899  VMA_ASSERT(m_FreeList[level].back == node);
10900  m_FreeList[level].back = node->free.prev;
10901  }
10902  else
10903  {
10904  Node* const nextFreeNode = node->free.next;
10905  VMA_ASSERT(nextFreeNode->free.prev == node);
10906  nextFreeNode->free.prev = node->free.prev;
10907  }
10908 }
10909 
10910 #if VMA_STATS_STRING_ENABLED
10911 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
10912 {
10913  switch(node->type)
10914  {
10915  case Node::TYPE_FREE:
10916  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
10917  break;
10918  case Node::TYPE_ALLOCATION:
10919  {
10920  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
10921  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
10922  if(allocSize < levelNodeSize)
10923  {
10924  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
10925  }
10926  }
10927  break;
10928  case Node::TYPE_SPLIT:
10929  {
10930  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
10931  const Node* const leftChild = node->split.leftChild;
10932  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
10933  const Node* const rightChild = leftChild->buddy;
10934  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
10935  }
10936  break;
10937  default:
10938  VMA_ASSERT(0);
10939  }
10940 }
10941 #endif // #if VMA_STATS_STRING_ENABLED
10942 
10943 
10945 // class VmaDeviceMemoryBlock
10946 
10947 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
10948  m_pMetadata(VMA_NULL),
10949  m_MemoryTypeIndex(UINT32_MAX),
10950  m_Id(0),
10951  m_hMemory(VK_NULL_HANDLE),
10952  m_MapCount(0),
10953  m_pMappedData(VMA_NULL)
10954 {
10955 }
10956 
10957 void VmaDeviceMemoryBlock::Init(
10958  VmaAllocator hAllocator,
10959  uint32_t newMemoryTypeIndex,
10960  VkDeviceMemory newMemory,
10961  VkDeviceSize newSize,
10962  uint32_t id,
10963  uint32_t algorithm)
10964 {
10965  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
10966 
10967  m_MemoryTypeIndex = newMemoryTypeIndex;
10968  m_Id = id;
10969  m_hMemory = newMemory;
10970 
10971  switch(algorithm)
10972  {
10974  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
10975  break;
10977  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
10978  break;
10979  default:
10980  VMA_ASSERT(0);
10981  // Fall-through.
10982  case 0:
10983  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
10984  }
10985  m_pMetadata->Init(newSize);
10986 }
10987 
10988 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
10989 {
10990  // This is the most important assert in the entire library.
10991  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
10992  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
10993 
10994  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
10995  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
10996  m_hMemory = VK_NULL_HANDLE;
10997 
10998  vma_delete(allocator, m_pMetadata);
10999  m_pMetadata = VMA_NULL;
11000 }
11001 
11002 bool VmaDeviceMemoryBlock::Validate() const
11003 {
11004  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
11005  (m_pMetadata->GetSize() != 0));
11006 
11007  return m_pMetadata->Validate();
11008 }
11009 
11010 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
11011 {
11012  void* pData = nullptr;
11013  VkResult res = Map(hAllocator, 1, &pData);
11014  if(res != VK_SUCCESS)
11015  {
11016  return res;
11017  }
11018 
11019  res = m_pMetadata->CheckCorruption(pData);
11020 
11021  Unmap(hAllocator, 1);
11022 
11023  return res;
11024 }
11025 
11026 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
11027 {
11028  if(count == 0)
11029  {
11030  return VK_SUCCESS;
11031  }
11032 
11033  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11034  if(m_MapCount != 0)
11035  {
11036  m_MapCount += count;
11037  VMA_ASSERT(m_pMappedData != VMA_NULL);
11038  if(ppData != VMA_NULL)
11039  {
11040  *ppData = m_pMappedData;
11041  }
11042  return VK_SUCCESS;
11043  }
11044  else
11045  {
11046  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
11047  hAllocator->m_hDevice,
11048  m_hMemory,
11049  0, // offset
11050  VK_WHOLE_SIZE,
11051  0, // flags
11052  &m_pMappedData);
11053  if(result == VK_SUCCESS)
11054  {
11055  if(ppData != VMA_NULL)
11056  {
11057  *ppData = m_pMappedData;
11058  }
11059  m_MapCount = count;
11060  }
11061  return result;
11062  }
11063 }
11064 
11065 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
11066 {
11067  if(count == 0)
11068  {
11069  return;
11070  }
11071 
11072  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11073  if(m_MapCount >= count)
11074  {
11075  m_MapCount -= count;
11076  if(m_MapCount == 0)
11077  {
11078  m_pMappedData = VMA_NULL;
11079  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
11080  }
11081  }
11082  else
11083  {
11084  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
11085  }
11086 }
11087 
11088 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11089 {
11090  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11091  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11092 
11093  void* pData;
11094  VkResult res = Map(hAllocator, 1, &pData);
11095  if(res != VK_SUCCESS)
11096  {
11097  return res;
11098  }
11099 
11100  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
11101  VmaWriteMagicValue(pData, allocOffset + allocSize);
11102 
11103  Unmap(hAllocator, 1);
11104 
11105  return VK_SUCCESS;
11106 }
11107 
11108 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11109 {
11110  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11111  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11112 
11113  void* pData;
11114  VkResult res = Map(hAllocator, 1, &pData);
11115  if(res != VK_SUCCESS)
11116  {
11117  return res;
11118  }
11119 
11120  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
11121  {
11122  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
11123  }
11124  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
11125  {
11126  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
11127  }
11128 
11129  Unmap(hAllocator, 1);
11130 
11131  return VK_SUCCESS;
11132 }
11133 
11134 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
11135  const VmaAllocator hAllocator,
11136  const VmaAllocation hAllocation,
11137  VkBuffer hBuffer)
11138 {
11139  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11140  hAllocation->GetBlock() == this);
11141  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11142  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11143  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
11144  hAllocator->m_hDevice,
11145  hBuffer,
11146  m_hMemory,
11147  hAllocation->GetOffset());
11148 }
11149 
11150 VkResult VmaDeviceMemoryBlock::BindImageMemory(
11151  const VmaAllocator hAllocator,
11152  const VmaAllocation hAllocation,
11153  VkImage hImage)
11154 {
11155  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11156  hAllocation->GetBlock() == this);
11157  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11158  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11159  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
11160  hAllocator->m_hDevice,
11161  hImage,
11162  m_hMemory,
11163  hAllocation->GetOffset());
11164 }
11165 
11166 static void InitStatInfo(VmaStatInfo& outInfo)
11167 {
11168  memset(&outInfo, 0, sizeof(outInfo));
11169  outInfo.allocationSizeMin = UINT64_MAX;
11170  outInfo.unusedRangeSizeMin = UINT64_MAX;
11171 }
11172 
11173 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
11174 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
11175 {
11176  inoutInfo.blockCount += srcInfo.blockCount;
11177  inoutInfo.allocationCount += srcInfo.allocationCount;
11178  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
11179  inoutInfo.usedBytes += srcInfo.usedBytes;
11180  inoutInfo.unusedBytes += srcInfo.unusedBytes;
11181  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
11182  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
11183  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
11184  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
11185 }
11186 
11187 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
11188 {
11189  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
11190  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
11191  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
11192  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
11193 }
11194 
11195 VmaPool_T::VmaPool_T(
11196  VmaAllocator hAllocator,
11197  const VmaPoolCreateInfo& createInfo,
11198  VkDeviceSize preferredBlockSize) :
11199  m_BlockVector(
11200  hAllocator,
11201  createInfo.memoryTypeIndex,
11202  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
11203  createInfo.minBlockCount,
11204  createInfo.maxBlockCount,
11205  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
11206  createInfo.frameInUseCount,
11207  true, // isCustomPool
11208  createInfo.blockSize != 0, // explicitBlockSize
11209  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
11210  m_Id(0)
11211 {
11212 }
11213 
11214 VmaPool_T::~VmaPool_T()
11215 {
11216 }
11217 
11218 #if VMA_STATS_STRING_ENABLED
11219 
11220 #endif // #if VMA_STATS_STRING_ENABLED
11221 
11222 VmaBlockVector::VmaBlockVector(
11223  VmaAllocator hAllocator,
11224  uint32_t memoryTypeIndex,
11225  VkDeviceSize preferredBlockSize,
11226  size_t minBlockCount,
11227  size_t maxBlockCount,
11228  VkDeviceSize bufferImageGranularity,
11229  uint32_t frameInUseCount,
11230  bool isCustomPool,
11231  bool explicitBlockSize,
11232  uint32_t algorithm) :
11233  m_hAllocator(hAllocator),
11234  m_MemoryTypeIndex(memoryTypeIndex),
11235  m_PreferredBlockSize(preferredBlockSize),
11236  m_MinBlockCount(minBlockCount),
11237  m_MaxBlockCount(maxBlockCount),
11238  m_BufferImageGranularity(bufferImageGranularity),
11239  m_FrameInUseCount(frameInUseCount),
11240  m_IsCustomPool(isCustomPool),
11241  m_ExplicitBlockSize(explicitBlockSize),
11242  m_Algorithm(algorithm),
11243  m_HasEmptyBlock(false),
11244  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
11245  m_NextBlockId(0)
11246 {
11247 }
11248 
11249 VmaBlockVector::~VmaBlockVector()
11250 {
11251  for(size_t i = m_Blocks.size(); i--; )
11252  {
11253  m_Blocks[i]->Destroy(m_hAllocator);
11254  vma_delete(m_hAllocator, m_Blocks[i]);
11255  }
11256 }
11257 
11258 VkResult VmaBlockVector::CreateMinBlocks()
11259 {
11260  for(size_t i = 0; i < m_MinBlockCount; ++i)
11261  {
11262  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
11263  if(res != VK_SUCCESS)
11264  {
11265  return res;
11266  }
11267  }
11268  return VK_SUCCESS;
11269 }
11270 
11271 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
11272 {
11273  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
11274 
11275  const size_t blockCount = m_Blocks.size();
11276 
11277  pStats->size = 0;
11278  pStats->unusedSize = 0;
11279  pStats->allocationCount = 0;
11280  pStats->unusedRangeCount = 0;
11281  pStats->unusedRangeSizeMax = 0;
11282  pStats->blockCount = blockCount;
11283 
11284  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11285  {
11286  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11287  VMA_ASSERT(pBlock);
11288  VMA_HEAVY_ASSERT(pBlock->Validate());
11289  pBlock->m_pMetadata->AddPoolStats(*pStats);
11290  }
11291 }
11292 
11293 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
11294 {
11295  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
11296  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
11297  (VMA_DEBUG_MARGIN > 0) &&
11298  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
11299 }
11300 
11301 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
11302 
11303 VkResult VmaBlockVector::Allocate(
11304  VmaPool hCurrentPool,
11305  uint32_t currentFrameIndex,
11306  VkDeviceSize size,
11307  VkDeviceSize alignment,
11308  const VmaAllocationCreateInfo& createInfo,
11309  VmaSuballocationType suballocType,
11310  size_t allocationCount,
11311  VmaAllocation* pAllocations)
11312 {
11313  size_t allocIndex;
11314  VkResult res = VK_SUCCESS;
11315 
11316  {
11317  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11318  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
11319  {
11320  res = AllocatePage(
11321  hCurrentPool,
11322  currentFrameIndex,
11323  size,
11324  alignment,
11325  createInfo,
11326  suballocType,
11327  pAllocations + allocIndex);
11328  if(res != VK_SUCCESS)
11329  {
11330  break;
11331  }
11332  }
11333  }
11334 
11335  if(res != VK_SUCCESS)
11336  {
11337  // Free all already created allocations.
11338  while(allocIndex--)
11339  {
11340  Free(pAllocations[allocIndex]);
11341  }
11342  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
11343  }
11344 
11345  return res;
11346 }
11347 
11348 VkResult VmaBlockVector::AllocatePage(
11349  VmaPool hCurrentPool,
11350  uint32_t currentFrameIndex,
11351  VkDeviceSize size,
11352  VkDeviceSize alignment,
11353  const VmaAllocationCreateInfo& createInfo,
11354  VmaSuballocationType suballocType,
11355  VmaAllocation* pAllocation)
11356 {
11357  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
11358  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
11359  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
11360  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
11361  const bool canCreateNewBlock =
11362  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
11363  (m_Blocks.size() < m_MaxBlockCount);
11364  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
11365 
11366  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
11367  // Which in turn is available only when maxBlockCount = 1.
11368  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
11369  {
11370  canMakeOtherLost = false;
11371  }
11372 
11373  // Upper address can only be used with linear allocator and within single memory block.
11374  if(isUpperAddress &&
11375  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
11376  {
11377  return VK_ERROR_FEATURE_NOT_PRESENT;
11378  }
11379 
11380  // Validate strategy.
11381  switch(strategy)
11382  {
11383  case 0:
11385  break;
11389  break;
11390  default:
11391  return VK_ERROR_FEATURE_NOT_PRESENT;
11392  }
11393 
11394  // Early reject: requested allocation size is larger that maximum block size for this block vector.
11395  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
11396  {
11397  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11398  }
11399 
11400  /*
11401  Under certain condition, this whole section can be skipped for optimization, so
11402  we move on directly to trying to allocate with canMakeOtherLost. That's the case
11403  e.g. for custom pools with linear algorithm.
11404  */
11405  if(!canMakeOtherLost || canCreateNewBlock)
11406  {
11407  // 1. Search existing allocations. Try to allocate without making other allocations lost.
11408  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
11410 
11411  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
11412  {
11413  // Use only last block.
11414  if(!m_Blocks.empty())
11415  {
11416  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
11417  VMA_ASSERT(pCurrBlock);
11418  VkResult res = AllocateFromBlock(
11419  pCurrBlock,
11420  hCurrentPool,
11421  currentFrameIndex,
11422  size,
11423  alignment,
11424  allocFlagsCopy,
11425  createInfo.pUserData,
11426  suballocType,
11427  strategy,
11428  pAllocation);
11429  if(res == VK_SUCCESS)
11430  {
11431  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
11432  return VK_SUCCESS;
11433  }
11434  }
11435  }
11436  else
11437  {
11439  {
11440  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11441  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11442  {
11443  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11444  VMA_ASSERT(pCurrBlock);
11445  VkResult res = AllocateFromBlock(
11446  pCurrBlock,
11447  hCurrentPool,
11448  currentFrameIndex,
11449  size,
11450  alignment,
11451  allocFlagsCopy,
11452  createInfo.pUserData,
11453  suballocType,
11454  strategy,
11455  pAllocation);
11456  if(res == VK_SUCCESS)
11457  {
11458  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
11459  return VK_SUCCESS;
11460  }
11461  }
11462  }
11463  else // WORST_FIT, FIRST_FIT
11464  {
11465  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11466  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11467  {
11468  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11469  VMA_ASSERT(pCurrBlock);
11470  VkResult res = AllocateFromBlock(
11471  pCurrBlock,
11472  hCurrentPool,
11473  currentFrameIndex,
11474  size,
11475  alignment,
11476  allocFlagsCopy,
11477  createInfo.pUserData,
11478  suballocType,
11479  strategy,
11480  pAllocation);
11481  if(res == VK_SUCCESS)
11482  {
11483  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
11484  return VK_SUCCESS;
11485  }
11486  }
11487  }
11488  }
11489 
11490  // 2. Try to create new block.
11491  if(canCreateNewBlock)
11492  {
11493  // Calculate optimal size for new block.
11494  VkDeviceSize newBlockSize = m_PreferredBlockSize;
11495  uint32_t newBlockSizeShift = 0;
11496  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
11497 
11498  if(!m_ExplicitBlockSize)
11499  {
11500  // Allocate 1/8, 1/4, 1/2 as first blocks.
11501  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
11502  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
11503  {
11504  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11505  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
11506  {
11507  newBlockSize = smallerNewBlockSize;
11508  ++newBlockSizeShift;
11509  }
11510  else
11511  {
11512  break;
11513  }
11514  }
11515  }
11516 
11517  size_t newBlockIndex = 0;
11518  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
11519  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
11520  if(!m_ExplicitBlockSize)
11521  {
11522  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
11523  {
11524  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11525  if(smallerNewBlockSize >= size)
11526  {
11527  newBlockSize = smallerNewBlockSize;
11528  ++newBlockSizeShift;
11529  res = CreateBlock(newBlockSize, &newBlockIndex);
11530  }
11531  else
11532  {
11533  break;
11534  }
11535  }
11536  }
11537 
11538  if(res == VK_SUCCESS)
11539  {
11540  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
11541  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
11542 
11543  res = AllocateFromBlock(
11544  pBlock,
11545  hCurrentPool,
11546  currentFrameIndex,
11547  size,
11548  alignment,
11549  allocFlagsCopy,
11550  createInfo.pUserData,
11551  suballocType,
11552  strategy,
11553  pAllocation);
11554  if(res == VK_SUCCESS)
11555  {
11556  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
11557  return VK_SUCCESS;
11558  }
11559  else
11560  {
11561  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
11562  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11563  }
11564  }
11565  }
11566  }
11567 
11568  // 3. Try to allocate from existing blocks with making other allocations lost.
11569  if(canMakeOtherLost)
11570  {
11571  uint32_t tryIndex = 0;
11572  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
11573  {
11574  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
11575  VmaAllocationRequest bestRequest = {};
11576  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
11577 
11578  // 1. Search existing allocations.
11580  {
11581  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11582  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11583  {
11584  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11585  VMA_ASSERT(pCurrBlock);
11586  VmaAllocationRequest currRequest = {};
11587  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11588  currentFrameIndex,
11589  m_FrameInUseCount,
11590  m_BufferImageGranularity,
11591  size,
11592  alignment,
11593  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
11594  suballocType,
11595  canMakeOtherLost,
11596  strategy,
11597  &currRequest))
11598  {
11599  const VkDeviceSize currRequestCost = currRequest.CalcCost();
11600  if(pBestRequestBlock == VMA_NULL ||
11601  currRequestCost < bestRequestCost)
11602  {
11603  pBestRequestBlock = pCurrBlock;
11604  bestRequest = currRequest;
11605  bestRequestCost = currRequestCost;
11606 
11607  if(bestRequestCost == 0)
11608  {
11609  break;
11610  }
11611  }
11612  }
11613  }
11614  }
11615  else // WORST_FIT, FIRST_FIT
11616  {
11617  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11618  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11619  {
11620  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11621  VMA_ASSERT(pCurrBlock);
11622  VmaAllocationRequest currRequest = {};
11623  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11624  currentFrameIndex,
11625  m_FrameInUseCount,
11626  m_BufferImageGranularity,
11627  size,
11628  alignment,
11629  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
11630  suballocType,
11631  canMakeOtherLost,
11632  strategy,
11633  &currRequest))
11634  {
11635  const VkDeviceSize currRequestCost = currRequest.CalcCost();
11636  if(pBestRequestBlock == VMA_NULL ||
11637  currRequestCost < bestRequestCost ||
11639  {
11640  pBestRequestBlock = pCurrBlock;
11641  bestRequest = currRequest;
11642  bestRequestCost = currRequestCost;
11643 
11644  if(bestRequestCost == 0 ||
11646  {
11647  break;
11648  }
11649  }
11650  }
11651  }
11652  }
11653 
11654  if(pBestRequestBlock != VMA_NULL)
11655  {
11656  if(mapped)
11657  {
11658  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
11659  if(res != VK_SUCCESS)
11660  {
11661  return res;
11662  }
11663  }
11664 
11665  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
11666  currentFrameIndex,
11667  m_FrameInUseCount,
11668  &bestRequest))
11669  {
11670  // We no longer have an empty Allocation.
11671  if(pBestRequestBlock->m_pMetadata->IsEmpty())
11672  {
11673  m_HasEmptyBlock = false;
11674  }
11675  // Allocate from this pBlock.
11676  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
11677  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
11678  (*pAllocation)->InitBlockAllocation(
11679  hCurrentPool,
11680  pBestRequestBlock,
11681  bestRequest.offset,
11682  alignment,
11683  size,
11684  suballocType,
11685  mapped,
11686  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
11687  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
11688  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
11689  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
11690  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
11691  {
11692  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
11693  }
11694  if(IsCorruptionDetectionEnabled())
11695  {
11696  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
11697  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
11698  }
11699  return VK_SUCCESS;
11700  }
11701  // else: Some allocations must have been touched while we are here. Next try.
11702  }
11703  else
11704  {
11705  // Could not find place in any of the blocks - break outer loop.
11706  break;
11707  }
11708  }
11709  /* Maximum number of tries exceeded - a very unlike event when many other
11710  threads are simultaneously touching allocations making it impossible to make
11711  lost at the same time as we try to allocate. */
11712  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
11713  {
11714  return VK_ERROR_TOO_MANY_OBJECTS;
11715  }
11716  }
11717 
11718  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11719 }
11720 
11721 void VmaBlockVector::Free(
11722  VmaAllocation hAllocation)
11723 {
11724  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
11725 
11726  // Scope for lock.
11727  {
11728  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11729 
11730  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
11731 
11732  if(IsCorruptionDetectionEnabled())
11733  {
11734  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
11735  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
11736  }
11737 
11738  if(hAllocation->IsPersistentMap())
11739  {
11740  pBlock->Unmap(m_hAllocator, 1);
11741  }
11742 
11743  pBlock->m_pMetadata->Free(hAllocation);
11744  VMA_HEAVY_ASSERT(pBlock->Validate());
11745 
11746  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
11747 
11748  // pBlock became empty after this deallocation.
11749  if(pBlock->m_pMetadata->IsEmpty())
11750  {
11751  // Already has empty Allocation. We don't want to have two, so delete this one.
11752  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
11753  {
11754  pBlockToDelete = pBlock;
11755  Remove(pBlock);
11756  }
11757  // We now have first empty block.
11758  else
11759  {
11760  m_HasEmptyBlock = true;
11761  }
11762  }
11763  // pBlock didn't become empty, but we have another empty block - find and free that one.
11764  // (This is optional, heuristics.)
11765  else if(m_HasEmptyBlock)
11766  {
11767  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
11768  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
11769  {
11770  pBlockToDelete = pLastBlock;
11771  m_Blocks.pop_back();
11772  m_HasEmptyBlock = false;
11773  }
11774  }
11775 
11776  IncrementallySortBlocks();
11777  }
11778 
11779  // Destruction of a free Allocation. Deferred until this point, outside of mutex
11780  // lock, for performance reason.
11781  if(pBlockToDelete != VMA_NULL)
11782  {
11783  VMA_DEBUG_LOG(" Deleted empty allocation");
11784  pBlockToDelete->Destroy(m_hAllocator);
11785  vma_delete(m_hAllocator, pBlockToDelete);
11786  }
11787 }
11788 
11789 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
11790 {
11791  VkDeviceSize result = 0;
11792  for(size_t i = m_Blocks.size(); i--; )
11793  {
11794  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
11795  if(result >= m_PreferredBlockSize)
11796  {
11797  break;
11798  }
11799  }
11800  return result;
11801 }
11802 
11803 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
11804 {
11805  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11806  {
11807  if(m_Blocks[blockIndex] == pBlock)
11808  {
11809  VmaVectorRemove(m_Blocks, blockIndex);
11810  return;
11811  }
11812  }
11813  VMA_ASSERT(0);
11814 }
11815 
11816 void VmaBlockVector::IncrementallySortBlocks()
11817 {
11818  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
11819  {
11820  // Bubble sort only until first swap.
11821  for(size_t i = 1; i < m_Blocks.size(); ++i)
11822  {
11823  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
11824  {
11825  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
11826  return;
11827  }
11828  }
11829  }
11830 }
11831 
11832 VkResult VmaBlockVector::AllocateFromBlock(
11833  VmaDeviceMemoryBlock* pBlock,
11834  VmaPool hCurrentPool,
11835  uint32_t currentFrameIndex,
11836  VkDeviceSize size,
11837  VkDeviceSize alignment,
11838  VmaAllocationCreateFlags allocFlags,
11839  void* pUserData,
11840  VmaSuballocationType suballocType,
11841  uint32_t strategy,
11842  VmaAllocation* pAllocation)
11843 {
11844  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
11845  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
11846  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
11847  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
11848 
11849  VmaAllocationRequest currRequest = {};
11850  if(pBlock->m_pMetadata->CreateAllocationRequest(
11851  currentFrameIndex,
11852  m_FrameInUseCount,
11853  m_BufferImageGranularity,
11854  size,
11855  alignment,
11856  isUpperAddress,
11857  suballocType,
11858  false, // canMakeOtherLost
11859  strategy,
11860  &currRequest))
11861  {
11862  // Allocate from pCurrBlock.
11863  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
11864 
11865  if(mapped)
11866  {
11867  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
11868  if(res != VK_SUCCESS)
11869  {
11870  return res;
11871  }
11872  }
11873 
11874  // We no longer have an empty Allocation.
11875  if(pBlock->m_pMetadata->IsEmpty())
11876  {
11877  m_HasEmptyBlock = false;
11878  }
11879 
11880  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
11881  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
11882  (*pAllocation)->InitBlockAllocation(
11883  hCurrentPool,
11884  pBlock,
11885  currRequest.offset,
11886  alignment,
11887  size,
11888  suballocType,
11889  mapped,
11890  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
11891  VMA_HEAVY_ASSERT(pBlock->Validate());
11892  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
11893  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
11894  {
11895  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
11896  }
11897  if(IsCorruptionDetectionEnabled())
11898  {
11899  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
11900  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
11901  }
11902  return VK_SUCCESS;
11903  }
11904  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11905 }
11906 
11907 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
11908 {
11909  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
11910  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
11911  allocInfo.allocationSize = blockSize;
11912  VkDeviceMemory mem = VK_NULL_HANDLE;
11913  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
11914  if(res < 0)
11915  {
11916  return res;
11917  }
11918 
11919  // New VkDeviceMemory successfully created.
11920 
11921  // Create new Allocation for it.
11922  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
11923  pBlock->Init(
11924  m_hAllocator,
11925  m_MemoryTypeIndex,
11926  mem,
11927  allocInfo.allocationSize,
11928  m_NextBlockId++,
11929  m_Algorithm);
11930 
11931  m_Blocks.push_back(pBlock);
11932  if(pNewBlockIndex != VMA_NULL)
11933  {
11934  *pNewBlockIndex = m_Blocks.size() - 1;
11935  }
11936 
11937  return VK_SUCCESS;
11938 }
11939 
11940 void VmaBlockVector::ApplyDefragmentationMovesCpu(
11941  class VmaBlockVectorDefragmentationContext* pDefragCtx,
11942  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
11943 {
11944  const size_t blockCount = m_Blocks.size();
11945  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
11946 
11947  enum BLOCK_FLAG
11948  {
11949  BLOCK_FLAG_USED = 0x00000001,
11950  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
11951  };
11952 
11953  struct BlockInfo
11954  {
11955  uint32_t flags;
11956  void* pMappedData;
11957  };
11958  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
11959  blockInfo(blockCount, VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
11960  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
11961 
11962  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
11963  const size_t moveCount = moves.size();
11964  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
11965  {
11966  const VmaDefragmentationMove& move = moves[moveIndex];
11967  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
11968  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
11969  }
11970 
11971  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
11972 
11973  // Go over all blocks. Get mapped pointer or map if necessary.
11974  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
11975  {
11976  BlockInfo& currBlockInfo = blockInfo[blockIndex];
11977  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
11978  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
11979  {
11980  currBlockInfo.pMappedData = pBlock->GetMappedData();
11981  // It is not originally mapped - map it.
11982  if(currBlockInfo.pMappedData == VMA_NULL)
11983  {
11984  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
11985  if(pDefragCtx->res == VK_SUCCESS)
11986  {
11987  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
11988  }
11989  }
11990  }
11991  }
11992 
11993  // Go over all moves. Do actual data transfer.
11994  if(pDefragCtx->res == VK_SUCCESS)
11995  {
11996  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
11997  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
11998 
11999  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12000  {
12001  const VmaDefragmentationMove& move = moves[moveIndex];
12002 
12003  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
12004  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
12005 
12006  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
12007 
12008  // Invalidate source.
12009  if(isNonCoherent)
12010  {
12011  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
12012  memRange.memory = pSrcBlock->GetDeviceMemory();
12013  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
12014  memRange.size = VMA_MIN(
12015  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
12016  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
12017  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12018  }
12019 
12020  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
12021  memmove(
12022  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
12023  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
12024  static_cast<size_t>(move.size));
12025 
12026  if(IsCorruptionDetectionEnabled())
12027  {
12028  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
12029  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
12030  }
12031 
12032  // Flush destination.
12033  if(isNonCoherent)
12034  {
12035  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
12036  memRange.memory = pDstBlock->GetDeviceMemory();
12037  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
12038  memRange.size = VMA_MIN(
12039  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
12040  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
12041  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12042  }
12043  }
12044  }
12045 
12046  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
12047  // Regardless of pCtx->res == VK_SUCCESS.
12048  for(size_t blockIndex = blockCount; blockIndex--; )
12049  {
12050  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
12051  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
12052  {
12053  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12054  pBlock->Unmap(m_hAllocator, 1);
12055  }
12056  }
12057 }
12058 
12059 void VmaBlockVector::ApplyDefragmentationMovesGpu(
12060  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12061  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12062  VkCommandBuffer commandBuffer)
12063 {
12064  const size_t blockCount = m_Blocks.size();
12065 
12066  pDefragCtx->blockContexts.resize(blockCount);
12067  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
12068 
12069  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12070  const size_t moveCount = moves.size();
12071  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12072  {
12073  const VmaDefragmentationMove& move = moves[moveIndex];
12074  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12075  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12076  }
12077 
12078  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12079 
12080  // Go over all blocks. Create and bind buffer for whole block if necessary.
12081  {
12082  VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
12083  bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
12084  VK_BUFFER_USAGE_TRANSFER_DST_BIT;
12085 
12086  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12087  {
12088  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
12089  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12090  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
12091  {
12092  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
12093  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
12094  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
12095  if(pDefragCtx->res == VK_SUCCESS)
12096  {
12097  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
12098  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
12099  }
12100  }
12101  }
12102  }
12103 
12104  // Go over all moves. Post data transfer commands to command buffer.
12105  if(pDefragCtx->res == VK_SUCCESS)
12106  {
12107  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
12108  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
12109 
12110  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12111  {
12112  const VmaDefragmentationMove& move = moves[moveIndex];
12113 
12114  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
12115  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
12116 
12117  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
12118 
12119  VkBufferCopy region = {
12120  move.srcOffset,
12121  move.dstOffset,
12122  move.size };
12123  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
12124  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
12125  }
12126  }
12127 
12128  // Save buffers to defrag context for later destruction.
12129  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
12130  {
12131  pDefragCtx->res = VK_NOT_READY;
12132  }
12133 }
12134 
12135 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
12136 {
12137  m_HasEmptyBlock = false;
12138  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12139  {
12140  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12141  if(pBlock->m_pMetadata->IsEmpty())
12142  {
12143  if(m_Blocks.size() > m_MinBlockCount)
12144  {
12145  if(pDefragmentationStats != VMA_NULL)
12146  {
12147  ++pDefragmentationStats->deviceMemoryBlocksFreed;
12148  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
12149  }
12150 
12151  VmaVectorRemove(m_Blocks, blockIndex);
12152  pBlock->Destroy(m_hAllocator);
12153  vma_delete(m_hAllocator, pBlock);
12154  }
12155  else
12156  {
12157  m_HasEmptyBlock = true;
12158  }
12159  }
12160  }
12161 }
12162 
12163 #if VMA_STATS_STRING_ENABLED
12164 
12165 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
12166 {
12167  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12168 
12169  json.BeginObject();
12170 
12171  if(m_IsCustomPool)
12172  {
12173  json.WriteString("MemoryTypeIndex");
12174  json.WriteNumber(m_MemoryTypeIndex);
12175 
12176  json.WriteString("BlockSize");
12177  json.WriteNumber(m_PreferredBlockSize);
12178 
12179  json.WriteString("BlockCount");
12180  json.BeginObject(true);
12181  if(m_MinBlockCount > 0)
12182  {
12183  json.WriteString("Min");
12184  json.WriteNumber((uint64_t)m_MinBlockCount);
12185  }
12186  if(m_MaxBlockCount < SIZE_MAX)
12187  {
12188  json.WriteString("Max");
12189  json.WriteNumber((uint64_t)m_MaxBlockCount);
12190  }
12191  json.WriteString("Cur");
12192  json.WriteNumber((uint64_t)m_Blocks.size());
12193  json.EndObject();
12194 
12195  if(m_FrameInUseCount > 0)
12196  {
12197  json.WriteString("FrameInUseCount");
12198  json.WriteNumber(m_FrameInUseCount);
12199  }
12200 
12201  if(m_Algorithm != 0)
12202  {
12203  json.WriteString("Algorithm");
12204  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
12205  }
12206  }
12207  else
12208  {
12209  json.WriteString("PreferredBlockSize");
12210  json.WriteNumber(m_PreferredBlockSize);
12211  }
12212 
12213  json.WriteString("Blocks");
12214  json.BeginObject();
12215  for(size_t i = 0; i < m_Blocks.size(); ++i)
12216  {
12217  json.BeginString();
12218  json.ContinueString(m_Blocks[i]->GetId());
12219  json.EndString();
12220 
12221  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
12222  }
12223  json.EndObject();
12224 
12225  json.EndObject();
12226 }
12227 
12228 #endif // #if VMA_STATS_STRING_ENABLED
12229 
12230 void VmaBlockVector::Defragment(
12231  class VmaBlockVectorDefragmentationContext* pCtx,
12232  VmaDefragmentationStats* pStats,
12233  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
12234  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
12235  VkCommandBuffer commandBuffer)
12236 {
12237  pCtx->res = VK_SUCCESS;
12238 
12239  const VkMemoryPropertyFlags memPropFlags =
12240  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
12241  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
12242  const bool isHostCoherent = (memPropFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0;
12243 
12244  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
12245  isHostVisible;
12246  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
12247  (VMA_DEBUG_DETECT_CORRUPTION == 0 || !(isHostVisible && isHostCoherent));
12248 
12249  // There are options to defragment this memory type.
12250  if(canDefragmentOnCpu || canDefragmentOnGpu)
12251  {
12252  bool defragmentOnGpu;
12253  // There is only one option to defragment this memory type.
12254  if(canDefragmentOnGpu != canDefragmentOnCpu)
12255  {
12256  defragmentOnGpu = canDefragmentOnGpu;
12257  }
12258  // Both options are available: Heuristics to choose the best one.
12259  else
12260  {
12261  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
12262  m_hAllocator->IsIntegratedGpu();
12263  }
12264 
12265  bool overlappingMoveSupported = !defragmentOnGpu;
12266 
12267  if(m_hAllocator->m_UseMutex)
12268  {
12269  m_Mutex.LockWrite();
12270  pCtx->mutexLocked = true;
12271  }
12272 
12273  pCtx->Begin(overlappingMoveSupported);
12274 
12275  // Defragment.
12276 
12277  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
12278  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
12279  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
12280  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
12281  pCtx->res = pCtx->GetAlgorithm()->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
12282 
12283  // Accumulate statistics.
12284  if(pStats != VMA_NULL)
12285  {
12286  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
12287  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
12288  pStats->bytesMoved += bytesMoved;
12289  pStats->allocationsMoved += allocationsMoved;
12290  VMA_ASSERT(bytesMoved <= maxBytesToMove);
12291  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
12292  if(defragmentOnGpu)
12293  {
12294  maxGpuBytesToMove -= bytesMoved;
12295  maxGpuAllocationsToMove -= allocationsMoved;
12296  }
12297  else
12298  {
12299  maxCpuBytesToMove -= bytesMoved;
12300  maxCpuAllocationsToMove -= allocationsMoved;
12301  }
12302  }
12303 
12304  if(pCtx->res >= VK_SUCCESS)
12305  {
12306  if(defragmentOnGpu)
12307  {
12308  ApplyDefragmentationMovesGpu(pCtx, moves, commandBuffer);
12309  }
12310  else
12311  {
12312  ApplyDefragmentationMovesCpu(pCtx, moves);
12313  }
12314  }
12315  }
12316 }
12317 
12318 void VmaBlockVector::DefragmentationEnd(
12319  class VmaBlockVectorDefragmentationContext* pCtx,
12320  VmaDefragmentationStats* pStats)
12321 {
12322  // Destroy buffers.
12323  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
12324  {
12325  VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];
12326  if(blockCtx.hBuffer)
12327  {
12328  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
12329  m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
12330  }
12331  }
12332 
12333  if(pCtx->res >= VK_SUCCESS)
12334  {
12335  FreeEmptyBlocks(pStats);
12336  }
12337 
12338  if(pCtx->mutexLocked)
12339  {
12340  VMA_ASSERT(m_hAllocator->m_UseMutex);
12341  m_Mutex.UnlockWrite();
12342  }
12343 }
12344 
12345 size_t VmaBlockVector::CalcAllocationCount() const
12346 {
12347  size_t result = 0;
12348  for(size_t i = 0; i < m_Blocks.size(); ++i)
12349  {
12350  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
12351  }
12352  return result;
12353 }
12354 
12355 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
12356 {
12357  if(m_BufferImageGranularity == 1)
12358  {
12359  return false;
12360  }
12361  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
12362  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
12363  {
12364  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
12365  VMA_ASSERT(m_Algorithm == 0);
12366  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
12367  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
12368  {
12369  return true;
12370  }
12371  }
12372  return false;
12373 }
12374 
12375 void VmaBlockVector::MakePoolAllocationsLost(
12376  uint32_t currentFrameIndex,
12377  size_t* pLostAllocationCount)
12378 {
12379  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12380  size_t lostAllocationCount = 0;
12381  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12382  {
12383  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12384  VMA_ASSERT(pBlock);
12385  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
12386  }
12387  if(pLostAllocationCount != VMA_NULL)
12388  {
12389  *pLostAllocationCount = lostAllocationCount;
12390  }
12391 }
12392 
12393 VkResult VmaBlockVector::CheckCorruption()
12394 {
12395  if(!IsCorruptionDetectionEnabled())
12396  {
12397  return VK_ERROR_FEATURE_NOT_PRESENT;
12398  }
12399 
12400  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12401  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12402  {
12403  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12404  VMA_ASSERT(pBlock);
12405  VkResult res = pBlock->CheckCorruption(m_hAllocator);
12406  if(res != VK_SUCCESS)
12407  {
12408  return res;
12409  }
12410  }
12411  return VK_SUCCESS;
12412 }
12413 
12414 void VmaBlockVector::AddStats(VmaStats* pStats)
12415 {
12416  const uint32_t memTypeIndex = m_MemoryTypeIndex;
12417  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
12418 
12419  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12420 
12421  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12422  {
12423  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12424  VMA_ASSERT(pBlock);
12425  VMA_HEAVY_ASSERT(pBlock->Validate());
12426  VmaStatInfo allocationStatInfo;
12427  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
12428  VmaAddStatInfo(pStats->total, allocationStatInfo);
12429  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
12430  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
12431  }
12432 }
12433 
12435 // VmaDefragmentationAlgorithm_Generic members definition
12436 
12437 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
12438  VmaAllocator hAllocator,
12439  VmaBlockVector* pBlockVector,
12440  uint32_t currentFrameIndex,
12441  bool overlappingMoveSupported) :
12442  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12443  m_AllAllocations(false),
12444  m_AllocationCount(0),
12445  m_BytesMoved(0),
12446  m_AllocationsMoved(0),
12447  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
12448 {
12449  // Create block info for each block.
12450  const size_t blockCount = m_pBlockVector->m_Blocks.size();
12451  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12452  {
12453  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
12454  pBlockInfo->m_OriginalBlockIndex = blockIndex;
12455  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
12456  m_Blocks.push_back(pBlockInfo);
12457  }
12458 
12459  // Sort them by m_pBlock pointer value.
12460  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
12461 }
12462 
12463 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
12464 {
12465  for(size_t i = m_Blocks.size(); i--; )
12466  {
12467  vma_delete(m_hAllocator, m_Blocks[i]);
12468  }
12469 }
12470 
12471 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
12472 {
12473  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
12474  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
12475  {
12476  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
12477  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
12478  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
12479  {
12480  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
12481  (*it)->m_Allocations.push_back(allocInfo);
12482  }
12483  else
12484  {
12485  VMA_ASSERT(0);
12486  }
12487 
12488  ++m_AllocationCount;
12489  }
12490 }
12491 
12492 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
12493  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12494  VkDeviceSize maxBytesToMove,
12495  uint32_t maxAllocationsToMove)
12496 {
12497  if(m_Blocks.empty())
12498  {
12499  return VK_SUCCESS;
12500  }
12501 
12502  // This is a choice based on research.
12503  // Option 1:
12504  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
12505  // Option 2:
12506  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
12507  // Option 3:
12508  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
12509 
12510  size_t srcBlockMinIndex = 0;
12511  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
12512  /*
12513  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
12514  {
12515  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
12516  if(blocksWithNonMovableCount > 0)
12517  {
12518  srcBlockMinIndex = blocksWithNonMovableCount - 1;
12519  }
12520  }
12521  */
12522 
12523  size_t srcBlockIndex = m_Blocks.size() - 1;
12524  size_t srcAllocIndex = SIZE_MAX;
12525  for(;;)
12526  {
12527  // 1. Find next allocation to move.
12528  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
12529  // 1.2. Then start from last to first m_Allocations.
12530  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
12531  {
12532  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
12533  {
12534  // Finished: no more allocations to process.
12535  if(srcBlockIndex == srcBlockMinIndex)
12536  {
12537  return VK_SUCCESS;
12538  }
12539  else
12540  {
12541  --srcBlockIndex;
12542  srcAllocIndex = SIZE_MAX;
12543  }
12544  }
12545  else
12546  {
12547  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
12548  }
12549  }
12550 
12551  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
12552  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
12553 
12554  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
12555  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
12556  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
12557  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
12558 
12559  // 2. Try to find new place for this allocation in preceding or current block.
12560  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
12561  {
12562  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
12563  VmaAllocationRequest dstAllocRequest;
12564  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
12565  m_CurrentFrameIndex,
12566  m_pBlockVector->GetFrameInUseCount(),
12567  m_pBlockVector->GetBufferImageGranularity(),
12568  size,
12569  alignment,
12570  false, // upperAddress
12571  suballocType,
12572  false, // canMakeOtherLost
12573  strategy,
12574  &dstAllocRequest) &&
12575  MoveMakesSense(
12576  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
12577  {
12578  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
12579 
12580  // Reached limit on number of allocations or bytes to move.
12581  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
12582  (m_BytesMoved + size > maxBytesToMove))
12583  {
12584  return VK_SUCCESS;
12585  }
12586 
12587  VmaDefragmentationMove move;
12588  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
12589  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
12590  move.srcOffset = srcOffset;
12591  move.dstOffset = dstAllocRequest.offset;
12592  move.size = size;
12593  moves.push_back(move);
12594 
12595  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
12596  dstAllocRequest,
12597  suballocType,
12598  size,
12599  false, // upperAddress
12600  allocInfo.m_hAllocation);
12601  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
12602 
12603  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
12604 
12605  if(allocInfo.m_pChanged != VMA_NULL)
12606  {
12607  *allocInfo.m_pChanged = VK_TRUE;
12608  }
12609 
12610  ++m_AllocationsMoved;
12611  m_BytesMoved += size;
12612 
12613  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
12614 
12615  break;
12616  }
12617  }
12618 
12619  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
12620 
12621  if(srcAllocIndex > 0)
12622  {
12623  --srcAllocIndex;
12624  }
12625  else
12626  {
12627  if(srcBlockIndex > 0)
12628  {
12629  --srcBlockIndex;
12630  srcAllocIndex = SIZE_MAX;
12631  }
12632  else
12633  {
12634  return VK_SUCCESS;
12635  }
12636  }
12637  }
12638 }
12639 
12640 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
12641 {
12642  size_t result = 0;
12643  for(size_t i = 0; i < m_Blocks.size(); ++i)
12644  {
12645  if(m_Blocks[i]->m_HasNonMovableAllocations)
12646  {
12647  ++result;
12648  }
12649  }
12650  return result;
12651 }
12652 
12653 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
12654  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12655  VkDeviceSize maxBytesToMove,
12656  uint32_t maxAllocationsToMove)
12657 {
12658  if(!m_AllAllocations && m_AllocationCount == 0)
12659  {
12660  return VK_SUCCESS;
12661  }
12662 
12663  const size_t blockCount = m_Blocks.size();
12664  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12665  {
12666  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
12667 
12668  if(m_AllAllocations)
12669  {
12670  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
12671  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
12672  it != pMetadata->m_Suballocations.end();
12673  ++it)
12674  {
12675  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
12676  {
12677  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
12678  pBlockInfo->m_Allocations.push_back(allocInfo);
12679  }
12680  }
12681  }
12682 
12683  pBlockInfo->CalcHasNonMovableAllocations();
12684 
12685  // This is a choice based on research.
12686  // Option 1:
12687  pBlockInfo->SortAllocationsByOffsetDescending();
12688  // Option 2:
12689  //pBlockInfo->SortAllocationsBySizeDescending();
12690  }
12691 
12692  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
12693  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
12694 
12695  // This is a choice based on research.
12696  const uint32_t roundCount = 2;
12697 
12698  // Execute defragmentation rounds (the main part).
12699  VkResult result = VK_SUCCESS;
12700  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
12701  {
12702  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove);
12703  }
12704 
12705  return result;
12706 }
12707 
12708 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
12709  size_t dstBlockIndex, VkDeviceSize dstOffset,
12710  size_t srcBlockIndex, VkDeviceSize srcOffset)
12711 {
12712  if(dstBlockIndex < srcBlockIndex)
12713  {
12714  return true;
12715  }
12716  if(dstBlockIndex > srcBlockIndex)
12717  {
12718  return false;
12719  }
12720  if(dstOffset < srcOffset)
12721  {
12722  return true;
12723  }
12724  return false;
12725 }
12726 
12728 // VmaDefragmentationAlgorithm_Fast
12729 
12730 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
12731  VmaAllocator hAllocator,
12732  VmaBlockVector* pBlockVector,
12733  uint32_t currentFrameIndex,
12734  bool overlappingMoveSupported) :
12735  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12736  m_OverlappingMoveSupported(overlappingMoveSupported),
12737  m_AllocationCount(0),
12738  m_AllAllocations(false),
12739  m_BytesMoved(0),
12740  m_AllocationsMoved(0),
12741  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
12742 {
12743  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
12744 
12745 }
12746 
12747 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
12748 {
12749 }
12750 
12751 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
12752  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12753  VkDeviceSize maxBytesToMove,
12754  uint32_t maxAllocationsToMove)
12755 {
12756  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
12757 
12758  const size_t blockCount = m_pBlockVector->GetBlockCount();
12759  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
12760  {
12761  return VK_SUCCESS;
12762  }
12763 
12764  PreprocessMetadata();
12765 
12766  // Sort blocks in order from most destination.
12767 
12768  m_BlockInfos.resize(blockCount);
12769  for(size_t i = 0; i < blockCount; ++i)
12770  {
12771  m_BlockInfos[i].origBlockIndex = i;
12772  }
12773 
12774  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
12775  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
12776  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
12777  });
12778 
12779  // THE MAIN ALGORITHM
12780 
12781  FreeSpaceDatabase freeSpaceDb;
12782 
12783  size_t dstBlockInfoIndex = 0;
12784  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
12785  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
12786  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
12787  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
12788  VkDeviceSize dstOffset = 0;
12789 
12790  bool end = false;
12791  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
12792  {
12793  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
12794  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
12795  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
12796  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
12797  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
12798  {
12799  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
12800  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
12801  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
12802  if(m_AllocationsMoved == maxAllocationsToMove ||
12803  m_BytesMoved + srcAllocSize > maxBytesToMove)
12804  {
12805  end = true;
12806  break;
12807  }
12808  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
12809 
12810  // Try to place it in one of free spaces from the database.
12811  size_t freeSpaceInfoIndex;
12812  VkDeviceSize dstAllocOffset;
12813  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
12814  freeSpaceInfoIndex, dstAllocOffset))
12815  {
12816  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
12817  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
12818  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
12819  VkDeviceSize freeSpaceBlockSize = pFreeSpaceMetadata->GetSize();
12820 
12821  // Same block
12822  if(freeSpaceInfoIndex == srcBlockInfoIndex)
12823  {
12824  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
12825 
12826  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
12827 
12828  VmaSuballocation suballoc = *srcSuballocIt;
12829  suballoc.offset = dstAllocOffset;
12830  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
12831  m_BytesMoved += srcAllocSize;
12832  ++m_AllocationsMoved;
12833 
12834  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
12835  ++nextSuballocIt;
12836  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
12837  srcSuballocIt = nextSuballocIt;
12838 
12839  InsertSuballoc(pFreeSpaceMetadata, suballoc);
12840 
12841  VmaDefragmentationMove move = {
12842  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
12843  srcAllocOffset, dstAllocOffset,
12844  srcAllocSize };
12845  moves.push_back(move);
12846  }
12847  // Different block
12848  else
12849  {
12850  // MOVE OPTION 2: Move the allocation to a different block.
12851 
12852  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
12853 
12854  VmaSuballocation suballoc = *srcSuballocIt;
12855  suballoc.offset = dstAllocOffset;
12856  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
12857  m_BytesMoved += srcAllocSize;
12858  ++m_AllocationsMoved;
12859 
12860  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
12861  ++nextSuballocIt;
12862  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
12863  srcSuballocIt = nextSuballocIt;
12864 
12865  InsertSuballoc(pFreeSpaceMetadata, suballoc);
12866 
12867  VmaDefragmentationMove move = {
12868  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
12869  srcAllocOffset, dstAllocOffset,
12870  srcAllocSize };
12871  moves.push_back(move);
12872  }
12873  }
12874  else
12875  {
12876  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
12877 
12878  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
12879  while(dstBlockInfoIndex < srcBlockInfoIndex &&
12880  dstAllocOffset + srcAllocSize > dstBlockSize)
12881  {
12882  // But before that, register remaining free space at the end of dst block.
12883  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
12884 
12885  ++dstBlockInfoIndex;
12886  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
12887  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
12888  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
12889  dstBlockSize = pDstMetadata->GetSize();
12890  dstOffset = 0;
12891  dstAllocOffset = 0;
12892  }
12893 
12894  // Same block
12895  if(dstBlockInfoIndex == srcBlockInfoIndex)
12896  {
12897  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
12898 
12899  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
12900 
12901  bool skipOver = overlap;
12902  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
12903  {
12904  // If destination and source place overlap, skip if it would move it
12905  // by only < 1/64 of its size.
12906  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
12907  }
12908 
12909  if(skipOver)
12910  {
12911  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
12912 
12913  dstOffset = srcAllocOffset + srcAllocSize;
12914  ++srcSuballocIt;
12915  }
12916  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
12917  else
12918  {
12919  srcSuballocIt->offset = dstAllocOffset;
12920  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
12921  dstOffset = dstAllocOffset + srcAllocSize;
12922  m_BytesMoved += srcAllocSize;
12923  ++m_AllocationsMoved;
12924  ++srcSuballocIt;
12925  VmaDefragmentationMove move = {
12926  srcOrigBlockIndex, dstOrigBlockIndex,
12927  srcAllocOffset, dstAllocOffset,
12928  srcAllocSize };
12929  moves.push_back(move);
12930  }
12931  }
12932  // Different block
12933  else
12934  {
12935  // MOVE OPTION 2: Move the allocation to a different block.
12936 
12937  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
12938  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
12939 
12940  VmaSuballocation suballoc = *srcSuballocIt;
12941  suballoc.offset = dstAllocOffset;
12942  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
12943  dstOffset = dstAllocOffset + srcAllocSize;
12944  m_BytesMoved += srcAllocSize;
12945  ++m_AllocationsMoved;
12946 
12947  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
12948  ++nextSuballocIt;
12949  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
12950  srcSuballocIt = nextSuballocIt;
12951 
12952  pDstMetadata->m_Suballocations.push_back(suballoc);
12953 
12954  VmaDefragmentationMove move = {
12955  srcOrigBlockIndex, dstOrigBlockIndex,
12956  srcAllocOffset, dstAllocOffset,
12957  srcAllocSize };
12958  moves.push_back(move);
12959  }
12960  }
12961  }
12962  }
12963 
12964  m_BlockInfos.clear();
12965 
12966  PostprocessMetadata();
12967 
12968  return VK_SUCCESS;
12969 }
12970 
12971 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
12972 {
12973  const size_t blockCount = m_pBlockVector->GetBlockCount();
12974  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12975  {
12976  VmaBlockMetadata_Generic* const pMetadata =
12977  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
12978  pMetadata->m_FreeCount = 0;
12979  pMetadata->m_SumFreeSize = pMetadata->GetSize();
12980  pMetadata->m_FreeSuballocationsBySize.clear();
12981  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
12982  it != pMetadata->m_Suballocations.end(); )
12983  {
12984  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
12985  {
12986  VmaSuballocationList::iterator nextIt = it;
12987  ++nextIt;
12988  pMetadata->m_Suballocations.erase(it);
12989  it = nextIt;
12990  }
12991  else
12992  {
12993  ++it;
12994  }
12995  }
12996  }
12997 }
12998 
12999 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
13000 {
13001  const size_t blockCount = m_pBlockVector->GetBlockCount();
13002  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13003  {
13004  VmaBlockMetadata_Generic* const pMetadata =
13005  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13006  const VkDeviceSize blockSize = pMetadata->GetSize();
13007 
13008  // No allocations in this block - entire area is free.
13009  if(pMetadata->m_Suballocations.empty())
13010  {
13011  pMetadata->m_FreeCount = 1;
13012  //pMetadata->m_SumFreeSize is already set to blockSize.
13013  VmaSuballocation suballoc = {
13014  0, // offset
13015  blockSize, // size
13016  VMA_NULL, // hAllocation
13017  VMA_SUBALLOCATION_TYPE_FREE };
13018  pMetadata->m_Suballocations.push_back(suballoc);
13019  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
13020  }
13021  // There are some allocations in this block.
13022  else
13023  {
13024  VkDeviceSize offset = 0;
13025  VmaSuballocationList::iterator it;
13026  for(it = pMetadata->m_Suballocations.begin();
13027  it != pMetadata->m_Suballocations.end();
13028  ++it)
13029  {
13030  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
13031  VMA_ASSERT(it->offset >= offset);
13032 
13033  // Need to insert preceding free space.
13034  if(it->offset > offset)
13035  {
13036  ++pMetadata->m_FreeCount;
13037  const VkDeviceSize freeSize = it->offset - offset;
13038  VmaSuballocation suballoc = {
13039  offset, // offset
13040  freeSize, // size
13041  VMA_NULL, // hAllocation
13042  VMA_SUBALLOCATION_TYPE_FREE };
13043  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13044  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13045  {
13046  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
13047  }
13048  }
13049 
13050  pMetadata->m_SumFreeSize -= it->size;
13051  offset = it->offset + it->size;
13052  }
13053 
13054  // Need to insert trailing free space.
13055  if(offset < blockSize)
13056  {
13057  ++pMetadata->m_FreeCount;
13058  const VkDeviceSize freeSize = blockSize - offset;
13059  VmaSuballocation suballoc = {
13060  offset, // offset
13061  freeSize, // size
13062  VMA_NULL, // hAllocation
13063  VMA_SUBALLOCATION_TYPE_FREE };
13064  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
13065  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13066  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13067  {
13068  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
13069  }
13070  }
13071 
13072  VMA_SORT(
13073  pMetadata->m_FreeSuballocationsBySize.begin(),
13074  pMetadata->m_FreeSuballocationsBySize.end(),
13075  VmaSuballocationItemSizeLess());
13076  }
13077 
13078  VMA_HEAVY_ASSERT(pMetadata->Validate());
13079  }
13080 }
13081 
13082 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
13083 {
13084  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
13085  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13086  while(it != pMetadata->m_Suballocations.end())
13087  {
13088  if(it->offset < suballoc.offset)
13089  {
13090  ++it;
13091  }
13092  }
13093  pMetadata->m_Suballocations.insert(it, suballoc);
13094 }
13095 
13097 // VmaBlockVectorDefragmentationContext
13098 
13099 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
13100  VmaAllocator hAllocator,
13101  VmaPool hCustomPool,
13102  VmaBlockVector* pBlockVector,
13103  uint32_t currFrameIndex,
13104  uint32_t algorithmFlags) :
13105  res(VK_SUCCESS),
13106  mutexLocked(false),
13107  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
13108  m_hAllocator(hAllocator),
13109  m_hCustomPool(hCustomPool),
13110  m_pBlockVector(pBlockVector),
13111  m_CurrFrameIndex(currFrameIndex),
13112  m_AlgorithmFlags(algorithmFlags),
13113  m_pAlgorithm(VMA_NULL),
13114  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
13115  m_AllAllocations(false)
13116 {
13117 }
13118 
13119 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
13120 {
13121  vma_delete(m_hAllocator, m_pAlgorithm);
13122 }
13123 
13124 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
13125 {
13126  AllocInfo info = { hAlloc, pChanged };
13127  m_Allocations.push_back(info);
13128 }
13129 
13130 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported)
13131 {
13132  const bool allAllocations = m_AllAllocations ||
13133  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
13134 
13135  /********************************
13136  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
13137  ********************************/
13138 
13139  /*
13140  Fast algorithm is supported only when certain criteria are met:
13141  - VMA_DEBUG_MARGIN is 0.
13142  - All allocations in this block vector are moveable.
13143  - There is no possibility of image/buffer granularity conflict.
13144  */
13145  if(VMA_DEBUG_MARGIN == 0 &&
13146  allAllocations &&
13147  !m_pBlockVector->IsBufferImageGranularityConflictPossible())
13148  {
13149  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
13150  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13151  }
13152  else
13153  {
13154  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
13155  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13156  }
13157 
13158  if(allAllocations)
13159  {
13160  m_pAlgorithm->AddAll();
13161  }
13162  else
13163  {
13164  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
13165  {
13166  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
13167  }
13168  }
13169 }
13170 
13172 // VmaDefragmentationContext
13173 
13174 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
13175  VmaAllocator hAllocator,
13176  uint32_t currFrameIndex,
13177  uint32_t flags,
13178  VmaDefragmentationStats* pStats) :
13179  m_hAllocator(hAllocator),
13180  m_CurrFrameIndex(currFrameIndex),
13181  m_Flags(flags),
13182  m_pStats(pStats),
13183  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
13184 {
13185  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
13186 }
13187 
13188 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
13189 {
13190  for(size_t i = m_CustomPoolContexts.size(); i--; )
13191  {
13192  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
13193  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13194  vma_delete(m_hAllocator, pBlockVectorCtx);
13195  }
13196  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
13197  {
13198  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
13199  if(pBlockVectorCtx)
13200  {
13201  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13202  vma_delete(m_hAllocator, pBlockVectorCtx);
13203  }
13204  }
13205 }
13206 
13207 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
13208 {
13209  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
13210  {
13211  VmaPool pool = pPools[poolIndex];
13212  VMA_ASSERT(pool);
13213  // Pools with algorithm other than default are not defragmented.
13214  if(pool->m_BlockVector.GetAlgorithm() == 0)
13215  {
13216  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13217 
13218  for(size_t i = m_CustomPoolContexts.size(); i--; )
13219  {
13220  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
13221  {
13222  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13223  break;
13224  }
13225  }
13226 
13227  if(!pBlockVectorDefragCtx)
13228  {
13229  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13230  m_hAllocator,
13231  pool,
13232  &pool->m_BlockVector,
13233  m_CurrFrameIndex,
13234  m_Flags);
13235  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13236  }
13237 
13238  pBlockVectorDefragCtx->AddAll();
13239  }
13240  }
13241 }
13242 
13243 void VmaDefragmentationContext_T::AddAllocations(
13244  uint32_t allocationCount,
13245  VmaAllocation* pAllocations,
13246  VkBool32* pAllocationsChanged)
13247 {
13248  // Dispatch pAllocations among defragmentators. Create them when necessary.
13249  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
13250  {
13251  const VmaAllocation hAlloc = pAllocations[allocIndex];
13252  VMA_ASSERT(hAlloc);
13253  // DedicatedAlloc cannot be defragmented.
13254  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
13255  // Lost allocation cannot be defragmented.
13256  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
13257  {
13258  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13259 
13260  const VmaPool hAllocPool = hAlloc->GetPool();
13261  // This allocation belongs to custom pool.
13262  if(hAllocPool != VK_NULL_HANDLE)
13263  {
13264  // Pools with algorithm other than default are not defragmented.
13265  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
13266  {
13267  for(size_t i = m_CustomPoolContexts.size(); i--; )
13268  {
13269  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
13270  {
13271  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13272  break;
13273  }
13274  }
13275  if(!pBlockVectorDefragCtx)
13276  {
13277  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13278  m_hAllocator,
13279  hAllocPool,
13280  &hAllocPool->m_BlockVector,
13281  m_CurrFrameIndex,
13282  m_Flags);
13283  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13284  }
13285  }
13286  }
13287  // This allocation belongs to default pool.
13288  else
13289  {
13290  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
13291  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
13292  if(!pBlockVectorDefragCtx)
13293  {
13294  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13295  m_hAllocator,
13296  VMA_NULL, // hCustomPool
13297  m_hAllocator->m_pBlockVectors[memTypeIndex],
13298  m_CurrFrameIndex,
13299  m_Flags);
13300  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
13301  }
13302  }
13303 
13304  if(pBlockVectorDefragCtx)
13305  {
13306  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
13307  &pAllocationsChanged[allocIndex] : VMA_NULL;
13308  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
13309  }
13310  }
13311  }
13312 }
13313 
13314 VkResult VmaDefragmentationContext_T::Defragment(
13315  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
13316  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
13317  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats)
13318 {
13319  if(pStats)
13320  {
13321  memset(pStats, 0, sizeof(VmaDefragmentationStats));
13322  }
13323 
13324  if(commandBuffer == VK_NULL_HANDLE)
13325  {
13326  maxGpuBytesToMove = 0;
13327  maxGpuAllocationsToMove = 0;
13328  }
13329 
13330  VkResult res = VK_SUCCESS;
13331 
13332  // Process default pools.
13333  for(uint32_t memTypeIndex = 0;
13334  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
13335  ++memTypeIndex)
13336  {
13337  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
13338  if(pBlockVectorCtx)
13339  {
13340  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
13341  pBlockVectorCtx->GetBlockVector()->Defragment(
13342  pBlockVectorCtx,
13343  pStats,
13344  maxCpuBytesToMove, maxCpuAllocationsToMove,
13345  maxGpuBytesToMove, maxGpuAllocationsToMove,
13346  commandBuffer);
13347  if(pBlockVectorCtx->res != VK_SUCCESS)
13348  {
13349  res = pBlockVectorCtx->res;
13350  }
13351  }
13352  }
13353 
13354  // Process custom pools.
13355  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
13356  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
13357  ++customCtxIndex)
13358  {
13359  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
13360  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
13361  pBlockVectorCtx->GetBlockVector()->Defragment(
13362  pBlockVectorCtx,
13363  pStats,
13364  maxCpuBytesToMove, maxCpuAllocationsToMove,
13365  maxGpuBytesToMove, maxGpuAllocationsToMove,
13366  commandBuffer);
13367  if(pBlockVectorCtx->res != VK_SUCCESS)
13368  {
13369  res = pBlockVectorCtx->res;
13370  }
13371  }
13372 
13373  return res;
13374 }
13375 
13377 // VmaRecorder
13378 
13379 #if VMA_RECORDING_ENABLED
13380 
13381 VmaRecorder::VmaRecorder() :
13382  m_UseMutex(true),
13383  m_Flags(0),
13384  m_File(VMA_NULL),
13385  m_Freq(INT64_MAX),
13386  m_StartCounter(INT64_MAX)
13387 {
13388 }
13389 
13390 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
13391 {
13392  m_UseMutex = useMutex;
13393  m_Flags = settings.flags;
13394 
13395  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
13396  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
13397 
13398  // Open file for writing.
13399  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
13400  if(err != 0)
13401  {
13402  return VK_ERROR_INITIALIZATION_FAILED;
13403  }
13404 
13405  // Write header.
13406  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
13407  fprintf(m_File, "%s\n", "1,5");
13408 
13409  return VK_SUCCESS;
13410 }
13411 
13412 VmaRecorder::~VmaRecorder()
13413 {
13414  if(m_File != VMA_NULL)
13415  {
13416  fclose(m_File);
13417  }
13418 }
13419 
13420 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
13421 {
13422  CallParams callParams;
13423  GetBasicParams(callParams);
13424 
13425  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13426  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
13427  Flush();
13428 }
13429 
13430 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
13431 {
13432  CallParams callParams;
13433  GetBasicParams(callParams);
13434 
13435  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13436  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
13437  Flush();
13438 }
13439 
13440 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
13441 {
13442  CallParams callParams;
13443  GetBasicParams(callParams);
13444 
13445  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13446  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
13447  createInfo.memoryTypeIndex,
13448  createInfo.flags,
13449  createInfo.blockSize,
13450  (uint64_t)createInfo.minBlockCount,
13451  (uint64_t)createInfo.maxBlockCount,
13452  createInfo.frameInUseCount,
13453  pool);
13454  Flush();
13455 }
13456 
13457 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
13458 {
13459  CallParams callParams;
13460  GetBasicParams(callParams);
13461 
13462  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13463  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
13464  pool);
13465  Flush();
13466 }
13467 
13468 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
13469  const VkMemoryRequirements& vkMemReq,
13470  const VmaAllocationCreateInfo& createInfo,
13471  VmaAllocation allocation)
13472 {
13473  CallParams callParams;
13474  GetBasicParams(callParams);
13475 
13476  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13477  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13478  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13479  vkMemReq.size,
13480  vkMemReq.alignment,
13481  vkMemReq.memoryTypeBits,
13482  createInfo.flags,
13483  createInfo.usage,
13484  createInfo.requiredFlags,
13485  createInfo.preferredFlags,
13486  createInfo.memoryTypeBits,
13487  createInfo.pool,
13488  allocation,
13489  userDataStr.GetString());
13490  Flush();
13491 }
13492 
13493 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
13494  const VkMemoryRequirements& vkMemReq,
13495  const VmaAllocationCreateInfo& createInfo,
13496  uint64_t allocationCount,
13497  const VmaAllocation* pAllocations)
13498 {
13499  CallParams callParams;
13500  GetBasicParams(callParams);
13501 
13502  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13503  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13504  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
13505  vkMemReq.size,
13506  vkMemReq.alignment,
13507  vkMemReq.memoryTypeBits,
13508  createInfo.flags,
13509  createInfo.usage,
13510  createInfo.requiredFlags,
13511  createInfo.preferredFlags,
13512  createInfo.memoryTypeBits,
13513  createInfo.pool);
13514  PrintPointerList(allocationCount, pAllocations);
13515  fprintf(m_File, ",%s\n", userDataStr.GetString());
13516  Flush();
13517 }
13518 
13519 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
13520  const VkMemoryRequirements& vkMemReq,
13521  bool requiresDedicatedAllocation,
13522  bool prefersDedicatedAllocation,
13523  const VmaAllocationCreateInfo& createInfo,
13524  VmaAllocation allocation)
13525 {
13526  CallParams callParams;
13527  GetBasicParams(callParams);
13528 
13529  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13530  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13531  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13532  vkMemReq.size,
13533  vkMemReq.alignment,
13534  vkMemReq.memoryTypeBits,
13535  requiresDedicatedAllocation ? 1 : 0,
13536  prefersDedicatedAllocation ? 1 : 0,
13537  createInfo.flags,
13538  createInfo.usage,
13539  createInfo.requiredFlags,
13540  createInfo.preferredFlags,
13541  createInfo.memoryTypeBits,
13542  createInfo.pool,
13543  allocation,
13544  userDataStr.GetString());
13545  Flush();
13546 }
13547 
13548 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
13549  const VkMemoryRequirements& vkMemReq,
13550  bool requiresDedicatedAllocation,
13551  bool prefersDedicatedAllocation,
13552  const VmaAllocationCreateInfo& createInfo,
13553  VmaAllocation allocation)
13554 {
13555  CallParams callParams;
13556  GetBasicParams(callParams);
13557 
13558  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13559  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13560  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13561  vkMemReq.size,
13562  vkMemReq.alignment,
13563  vkMemReq.memoryTypeBits,
13564  requiresDedicatedAllocation ? 1 : 0,
13565  prefersDedicatedAllocation ? 1 : 0,
13566  createInfo.flags,
13567  createInfo.usage,
13568  createInfo.requiredFlags,
13569  createInfo.preferredFlags,
13570  createInfo.memoryTypeBits,
13571  createInfo.pool,
13572  allocation,
13573  userDataStr.GetString());
13574  Flush();
13575 }
13576 
13577 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
13578  VmaAllocation allocation)
13579 {
13580  CallParams callParams;
13581  GetBasicParams(callParams);
13582 
13583  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13584  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13585  allocation);
13586  Flush();
13587 }
13588 
13589 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
13590  uint64_t allocationCount,
13591  const VmaAllocation* pAllocations)
13592 {
13593  CallParams callParams;
13594  GetBasicParams(callParams);
13595 
13596  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13597  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
13598  PrintPointerList(allocationCount, pAllocations);
13599  fprintf(m_File, "\n");
13600  Flush();
13601 }
13602 
13603 void VmaRecorder::RecordResizeAllocation(
13604  uint32_t frameIndex,
13605  VmaAllocation allocation,
13606  VkDeviceSize newSize)
13607 {
13608  CallParams callParams;
13609  GetBasicParams(callParams);
13610 
13611  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13612  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
13613  allocation, newSize);
13614  Flush();
13615 }
13616 
13617 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
13618  VmaAllocation allocation,
13619  const void* pUserData)
13620 {
13621  CallParams callParams;
13622  GetBasicParams(callParams);
13623 
13624  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13625  UserDataString userDataStr(
13626  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
13627  pUserData);
13628  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13629  allocation,
13630  userDataStr.GetString());
13631  Flush();
13632 }
13633 
13634 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
13635  VmaAllocation allocation)
13636 {
13637  CallParams callParams;
13638  GetBasicParams(callParams);
13639 
13640  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13641  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13642  allocation);
13643  Flush();
13644 }
13645 
13646 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
13647  VmaAllocation allocation)
13648 {
13649  CallParams callParams;
13650  GetBasicParams(callParams);
13651 
13652  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13653  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13654  allocation);
13655  Flush();
13656 }
13657 
13658 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
13659  VmaAllocation allocation)
13660 {
13661  CallParams callParams;
13662  GetBasicParams(callParams);
13663 
13664  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13665  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13666  allocation);
13667  Flush();
13668 }
13669 
13670 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
13671  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13672 {
13673  CallParams callParams;
13674  GetBasicParams(callParams);
13675 
13676  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13677  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13678  allocation,
13679  offset,
13680  size);
13681  Flush();
13682 }
13683 
13684 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
13685  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13686 {
13687  CallParams callParams;
13688  GetBasicParams(callParams);
13689 
13690  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13691  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13692  allocation,
13693  offset,
13694  size);
13695  Flush();
13696 }
13697 
13698 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
13699  const VkBufferCreateInfo& bufCreateInfo,
13700  const VmaAllocationCreateInfo& allocCreateInfo,
13701  VmaAllocation allocation)
13702 {
13703  CallParams callParams;
13704  GetBasicParams(callParams);
13705 
13706  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13707  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
13708  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13709  bufCreateInfo.flags,
13710  bufCreateInfo.size,
13711  bufCreateInfo.usage,
13712  bufCreateInfo.sharingMode,
13713  allocCreateInfo.flags,
13714  allocCreateInfo.usage,
13715  allocCreateInfo.requiredFlags,
13716  allocCreateInfo.preferredFlags,
13717  allocCreateInfo.memoryTypeBits,
13718  allocCreateInfo.pool,
13719  allocation,
13720  userDataStr.GetString());
13721  Flush();
13722 }
13723 
13724 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
13725  const VkImageCreateInfo& imageCreateInfo,
13726  const VmaAllocationCreateInfo& allocCreateInfo,
13727  VmaAllocation allocation)
13728 {
13729  CallParams callParams;
13730  GetBasicParams(callParams);
13731 
13732  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13733  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
13734  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13735  imageCreateInfo.flags,
13736  imageCreateInfo.imageType,
13737  imageCreateInfo.format,
13738  imageCreateInfo.extent.width,
13739  imageCreateInfo.extent.height,
13740  imageCreateInfo.extent.depth,
13741  imageCreateInfo.mipLevels,
13742  imageCreateInfo.arrayLayers,
13743  imageCreateInfo.samples,
13744  imageCreateInfo.tiling,
13745  imageCreateInfo.usage,
13746  imageCreateInfo.sharingMode,
13747  imageCreateInfo.initialLayout,
13748  allocCreateInfo.flags,
13749  allocCreateInfo.usage,
13750  allocCreateInfo.requiredFlags,
13751  allocCreateInfo.preferredFlags,
13752  allocCreateInfo.memoryTypeBits,
13753  allocCreateInfo.pool,
13754  allocation,
13755  userDataStr.GetString());
13756  Flush();
13757 }
13758 
13759 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
13760  VmaAllocation allocation)
13761 {
13762  CallParams callParams;
13763  GetBasicParams(callParams);
13764 
13765  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13766  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
13767  allocation);
13768  Flush();
13769 }
13770 
13771 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
13772  VmaAllocation allocation)
13773 {
13774  CallParams callParams;
13775  GetBasicParams(callParams);
13776 
13777  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13778  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
13779  allocation);
13780  Flush();
13781 }
13782 
13783 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
13784  VmaAllocation allocation)
13785 {
13786  CallParams callParams;
13787  GetBasicParams(callParams);
13788 
13789  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13790  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13791  allocation);
13792  Flush();
13793 }
13794 
13795 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
13796  VmaAllocation allocation)
13797 {
13798  CallParams callParams;
13799  GetBasicParams(callParams);
13800 
13801  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13802  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
13803  allocation);
13804  Flush();
13805 }
13806 
13807 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
13808  VmaPool pool)
13809 {
13810  CallParams callParams;
13811  GetBasicParams(callParams);
13812 
13813  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13814  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
13815  pool);
13816  Flush();
13817 }
13818 
13819 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
13820  const VmaDefragmentationInfo2& info,
13822 {
13823  CallParams callParams;
13824  GetBasicParams(callParams);
13825 
13826  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13827  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
13828  info.flags);
13829  PrintPointerList(info.allocationCount, info.pAllocations);
13830  fprintf(m_File, ",");
13831  PrintPointerList(info.poolCount, info.pPools);
13832  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
13833  info.maxCpuBytesToMove,
13835  info.maxGpuBytesToMove,
13837  info.commandBuffer,
13838  ctx);
13839  Flush();
13840 }
13841 
13842 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
13844 {
13845  CallParams callParams;
13846  GetBasicParams(callParams);
13847 
13848  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13849  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
13850  ctx);
13851  Flush();
13852 }
13853 
13854 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
13855 {
13856  if(pUserData != VMA_NULL)
13857  {
13858  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
13859  {
13860  m_Str = (const char*)pUserData;
13861  }
13862  else
13863  {
13864  sprintf_s(m_PtrStr, "%p", pUserData);
13865  m_Str = m_PtrStr;
13866  }
13867  }
13868  else
13869  {
13870  m_Str = "";
13871  }
13872 }
13873 
13874 void VmaRecorder::WriteConfiguration(
13875  const VkPhysicalDeviceProperties& devProps,
13876  const VkPhysicalDeviceMemoryProperties& memProps,
13877  bool dedicatedAllocationExtensionEnabled)
13878 {
13879  fprintf(m_File, "Config,Begin\n");
13880 
13881  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
13882  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
13883  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
13884  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
13885  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
13886  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
13887 
13888  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
13889  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
13890  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
13891 
13892  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
13893  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
13894  {
13895  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
13896  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
13897  }
13898  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
13899  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
13900  {
13901  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
13902  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
13903  }
13904 
13905  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
13906 
13907  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
13908  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
13909  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
13910  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
13911  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
13912  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
13913  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
13914  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
13915  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
13916 
13917  fprintf(m_File, "Config,End\n");
13918 }
13919 
13920 void VmaRecorder::GetBasicParams(CallParams& outParams)
13921 {
13922  outParams.threadId = GetCurrentThreadId();
13923 
13924  LARGE_INTEGER counter;
13925  QueryPerformanceCounter(&counter);
13926  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
13927 }
13928 
13929 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
13930 {
13931  if(count)
13932  {
13933  fprintf(m_File, "%p", pItems[0]);
13934  for(uint64_t i = 1; i < count; ++i)
13935  {
13936  fprintf(m_File, " %p", pItems[i]);
13937  }
13938  }
13939 }
13940 
13941 void VmaRecorder::Flush()
13942 {
13943  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
13944  {
13945  fflush(m_File);
13946  }
13947 }
13948 
13949 #endif // #if VMA_RECORDING_ENABLED
13950 
13952 // VmaAllocator_T
13953 
13954 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
13955  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
13956  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
13957  m_hDevice(pCreateInfo->device),
13958  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
13959  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
13960  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
13961  m_PreferredLargeHeapBlockSize(0),
13962  m_PhysicalDevice(pCreateInfo->physicalDevice),
13963  m_CurrentFrameIndex(0),
13964  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
13965  m_NextPoolId(0)
13967  ,m_pRecorder(VMA_NULL)
13968 #endif
13969 {
13970  if(VMA_DEBUG_DETECT_CORRUPTION)
13971  {
13972  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
13973  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
13974  }
13975 
13976  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
13977 
13978 #if !(VMA_DEDICATED_ALLOCATION)
13980  {
13981  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
13982  }
13983 #endif
13984 
13985  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
13986  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
13987  memset(&m_MemProps, 0, sizeof(m_MemProps));
13988 
13989  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
13990  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
13991 
13992  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
13993  {
13994  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
13995  }
13996 
13997  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
13998  {
13999  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
14000  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
14001  }
14002 
14003  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
14004 
14005  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
14006  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
14007 
14008  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
14009  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
14010  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
14011  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
14012 
14013  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
14014  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
14015 
14016  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
14017  {
14018  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
14019  {
14020  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
14021  if(limit != VK_WHOLE_SIZE)
14022  {
14023  m_HeapSizeLimit[heapIndex] = limit;
14024  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
14025  {
14026  m_MemProps.memoryHeaps[heapIndex].size = limit;
14027  }
14028  }
14029  }
14030  }
14031 
14032  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14033  {
14034  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
14035 
14036  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
14037  this,
14038  memTypeIndex,
14039  preferredBlockSize,
14040  0,
14041  SIZE_MAX,
14042  GetBufferImageGranularity(),
14043  pCreateInfo->frameInUseCount,
14044  false, // isCustomPool
14045  false, // explicitBlockSize
14046  false); // linearAlgorithm
14047  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
14048  // becase minBlockCount is 0.
14049  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
14050 
14051  }
14052 }
14053 
14054 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
14055 {
14056  VkResult res = VK_SUCCESS;
14057 
14058  if(pCreateInfo->pRecordSettings != VMA_NULL &&
14059  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
14060  {
14061 #if VMA_RECORDING_ENABLED
14062  m_pRecorder = vma_new(this, VmaRecorder)();
14063  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
14064  if(res != VK_SUCCESS)
14065  {
14066  return res;
14067  }
14068  m_pRecorder->WriteConfiguration(
14069  m_PhysicalDeviceProperties,
14070  m_MemProps,
14071  m_UseKhrDedicatedAllocation);
14072  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
14073 #else
14074  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
14075  return VK_ERROR_FEATURE_NOT_PRESENT;
14076 #endif
14077  }
14078 
14079  return res;
14080 }
14081 
14082 VmaAllocator_T::~VmaAllocator_T()
14083 {
14084 #if VMA_RECORDING_ENABLED
14085  if(m_pRecorder != VMA_NULL)
14086  {
14087  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
14088  vma_delete(this, m_pRecorder);
14089  }
14090 #endif
14091 
14092  VMA_ASSERT(m_Pools.empty());
14093 
14094  for(size_t i = GetMemoryTypeCount(); i--; )
14095  {
14096  vma_delete(this, m_pDedicatedAllocations[i]);
14097  vma_delete(this, m_pBlockVectors[i]);
14098  }
14099 }
14100 
14101 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
14102 {
14103 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14104  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
14105  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
14106  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
14107  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
14108  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
14109  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
14110  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
14111  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
14112  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
14113  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
14114  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
14115  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
14116  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
14117  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
14118  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
14119  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
14120  m_VulkanFunctions.vkCmdCopyBuffer = &vkCmdCopyBuffer;
14121 #if VMA_DEDICATED_ALLOCATION
14122  if(m_UseKhrDedicatedAllocation)
14123  {
14124  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
14125  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
14126  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
14127  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
14128  }
14129 #endif // #if VMA_DEDICATED_ALLOCATION
14130 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14131 
14132 #define VMA_COPY_IF_NOT_NULL(funcName) \
14133  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
14134 
14135  if(pVulkanFunctions != VMA_NULL)
14136  {
14137  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
14138  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
14139  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
14140  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
14141  VMA_COPY_IF_NOT_NULL(vkMapMemory);
14142  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
14143  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
14144  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
14145  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
14146  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
14147  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
14148  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
14149  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
14150  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
14151  VMA_COPY_IF_NOT_NULL(vkCreateImage);
14152  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
14153  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
14154 #if VMA_DEDICATED_ALLOCATION
14155  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
14156  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
14157 #endif
14158  }
14159 
14160 #undef VMA_COPY_IF_NOT_NULL
14161 
14162  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
14163  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
14164  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
14165  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
14166  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
14167  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
14168  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
14169  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
14170  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
14171  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
14172  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
14173  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
14174  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
14175  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
14176  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
14177  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
14178  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
14179  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
14180  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
14181 #if VMA_DEDICATED_ALLOCATION
14182  if(m_UseKhrDedicatedAllocation)
14183  {
14184  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
14185  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
14186  }
14187 #endif
14188 }
14189 
14190 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
14191 {
14192  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14193  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
14194  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
14195  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
14196 }
14197 
14198 VkResult VmaAllocator_T::AllocateMemoryOfType(
14199  VkDeviceSize size,
14200  VkDeviceSize alignment,
14201  bool dedicatedAllocation,
14202  VkBuffer dedicatedBuffer,
14203  VkImage dedicatedImage,
14204  const VmaAllocationCreateInfo& createInfo,
14205  uint32_t memTypeIndex,
14206  VmaSuballocationType suballocType,
14207  size_t allocationCount,
14208  VmaAllocation* pAllocations)
14209 {
14210  VMA_ASSERT(pAllocations != VMA_NULL);
14211  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, vkMemReq.size);
14212 
14213  VmaAllocationCreateInfo finalCreateInfo = createInfo;
14214 
14215  // If memory type is not HOST_VISIBLE, disable MAPPED.
14216  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14217  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
14218  {
14219  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
14220  }
14221 
14222  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
14223  VMA_ASSERT(blockVector);
14224 
14225  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
14226  bool preferDedicatedMemory =
14227  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
14228  dedicatedAllocation ||
14229  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
14230  size > preferredBlockSize / 2;
14231 
14232  if(preferDedicatedMemory &&
14233  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
14234  finalCreateInfo.pool == VK_NULL_HANDLE)
14235  {
14237  }
14238 
14239  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
14240  {
14241  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14242  {
14243  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14244  }
14245  else
14246  {
14247  return AllocateDedicatedMemory(
14248  size,
14249  suballocType,
14250  memTypeIndex,
14251  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14252  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14253  finalCreateInfo.pUserData,
14254  dedicatedBuffer,
14255  dedicatedImage,
14256  allocationCount,
14257  pAllocations);
14258  }
14259  }
14260  else
14261  {
14262  VkResult res = blockVector->Allocate(
14263  VK_NULL_HANDLE, // hCurrentPool
14264  m_CurrentFrameIndex.load(),
14265  size,
14266  alignment,
14267  finalCreateInfo,
14268  suballocType,
14269  allocationCount,
14270  pAllocations);
14271  if(res == VK_SUCCESS)
14272  {
14273  return res;
14274  }
14275 
14276  // 5. Try dedicated memory.
14277  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14278  {
14279  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14280  }
14281  else
14282  {
14283  res = AllocateDedicatedMemory(
14284  size,
14285  suballocType,
14286  memTypeIndex,
14287  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14288  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14289  finalCreateInfo.pUserData,
14290  dedicatedBuffer,
14291  dedicatedImage,
14292  allocationCount,
14293  pAllocations);
14294  if(res == VK_SUCCESS)
14295  {
14296  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
14297  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
14298  return VK_SUCCESS;
14299  }
14300  else
14301  {
14302  // Everything failed: Return error code.
14303  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14304  return res;
14305  }
14306  }
14307  }
14308 }
14309 
14310 VkResult VmaAllocator_T::AllocateDedicatedMemory(
14311  VkDeviceSize size,
14312  VmaSuballocationType suballocType,
14313  uint32_t memTypeIndex,
14314  bool map,
14315  bool isUserDataString,
14316  void* pUserData,
14317  VkBuffer dedicatedBuffer,
14318  VkImage dedicatedImage,
14319  size_t allocationCount,
14320  VmaAllocation* pAllocations)
14321 {
14322  VMA_ASSERT(allocationCount > 0 && pAllocations);
14323 
14324  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
14325  allocInfo.memoryTypeIndex = memTypeIndex;
14326  allocInfo.allocationSize = size;
14327 
14328 #if VMA_DEDICATED_ALLOCATION
14329  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
14330  if(m_UseKhrDedicatedAllocation)
14331  {
14332  if(dedicatedBuffer != VK_NULL_HANDLE)
14333  {
14334  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
14335  dedicatedAllocInfo.buffer = dedicatedBuffer;
14336  allocInfo.pNext = &dedicatedAllocInfo;
14337  }
14338  else if(dedicatedImage != VK_NULL_HANDLE)
14339  {
14340  dedicatedAllocInfo.image = dedicatedImage;
14341  allocInfo.pNext = &dedicatedAllocInfo;
14342  }
14343  }
14344 #endif // #if VMA_DEDICATED_ALLOCATION
14345 
14346  size_t allocIndex;
14347  VkResult res;
14348  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14349  {
14350  res = AllocateDedicatedMemoryPage(
14351  size,
14352  suballocType,
14353  memTypeIndex,
14354  allocInfo,
14355  map,
14356  isUserDataString,
14357  pUserData,
14358  pAllocations + allocIndex);
14359  if(res != VK_SUCCESS)
14360  {
14361  break;
14362  }
14363  }
14364 
14365  if(res == VK_SUCCESS)
14366  {
14367  // Register them in m_pDedicatedAllocations.
14368  {
14369  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
14370  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
14371  VMA_ASSERT(pDedicatedAllocations);
14372  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14373  {
14374  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
14375  }
14376  }
14377 
14378  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
14379  }
14380  else
14381  {
14382  // Free all already created allocations.
14383  while(allocIndex--)
14384  {
14385  VmaAllocation currAlloc = pAllocations[allocIndex];
14386  VkDeviceMemory hMemory = currAlloc->GetMemory();
14387 
14388  /*
14389  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
14390  before vkFreeMemory.
14391 
14392  if(currAlloc->GetMappedData() != VMA_NULL)
14393  {
14394  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
14395  }
14396  */
14397 
14398  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
14399 
14400  currAlloc->SetUserData(this, VMA_NULL);
14401  vma_delete(this, currAlloc);
14402  }
14403 
14404  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
14405  }
14406 
14407  return res;
14408 }
14409 
14410 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
14411  VkDeviceSize size,
14412  VmaSuballocationType suballocType,
14413  uint32_t memTypeIndex,
14414  const VkMemoryAllocateInfo& allocInfo,
14415  bool map,
14416  bool isUserDataString,
14417  void* pUserData,
14418  VmaAllocation* pAllocation)
14419 {
14420  VkDeviceMemory hMemory = VK_NULL_HANDLE;
14421  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
14422  if(res < 0)
14423  {
14424  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14425  return res;
14426  }
14427 
14428  void* pMappedData = VMA_NULL;
14429  if(map)
14430  {
14431  res = (*m_VulkanFunctions.vkMapMemory)(
14432  m_hDevice,
14433  hMemory,
14434  0,
14435  VK_WHOLE_SIZE,
14436  0,
14437  &pMappedData);
14438  if(res < 0)
14439  {
14440  VMA_DEBUG_LOG(" vkMapMemory FAILED");
14441  FreeVulkanMemory(memTypeIndex, size, hMemory);
14442  return res;
14443  }
14444  }
14445 
14446  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
14447  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
14448  (*pAllocation)->SetUserData(this, pUserData);
14449  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14450  {
14451  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
14452  }
14453 
14454  return VK_SUCCESS;
14455 }
14456 
14457 void VmaAllocator_T::GetBufferMemoryRequirements(
14458  VkBuffer hBuffer,
14459  VkMemoryRequirements& memReq,
14460  bool& requiresDedicatedAllocation,
14461  bool& prefersDedicatedAllocation) const
14462 {
14463 #if VMA_DEDICATED_ALLOCATION
14464  if(m_UseKhrDedicatedAllocation)
14465  {
14466  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
14467  memReqInfo.buffer = hBuffer;
14468 
14469  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14470 
14471  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14472  memReq2.pNext = &memDedicatedReq;
14473 
14474  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14475 
14476  memReq = memReq2.memoryRequirements;
14477  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14478  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14479  }
14480  else
14481 #endif // #if VMA_DEDICATED_ALLOCATION
14482  {
14483  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
14484  requiresDedicatedAllocation = false;
14485  prefersDedicatedAllocation = false;
14486  }
14487 }
14488 
14489 void VmaAllocator_T::GetImageMemoryRequirements(
14490  VkImage hImage,
14491  VkMemoryRequirements& memReq,
14492  bool& requiresDedicatedAllocation,
14493  bool& prefersDedicatedAllocation) const
14494 {
14495 #if VMA_DEDICATED_ALLOCATION
14496  if(m_UseKhrDedicatedAllocation)
14497  {
14498  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
14499  memReqInfo.image = hImage;
14500 
14501  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14502 
14503  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14504  memReq2.pNext = &memDedicatedReq;
14505 
14506  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14507 
14508  memReq = memReq2.memoryRequirements;
14509  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14510  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14511  }
14512  else
14513 #endif // #if VMA_DEDICATED_ALLOCATION
14514  {
14515  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
14516  requiresDedicatedAllocation = false;
14517  prefersDedicatedAllocation = false;
14518  }
14519 }
14520 
14521 VkResult VmaAllocator_T::AllocateMemory(
14522  const VkMemoryRequirements& vkMemReq,
14523  bool requiresDedicatedAllocation,
14524  bool prefersDedicatedAllocation,
14525  VkBuffer dedicatedBuffer,
14526  VkImage dedicatedImage,
14527  const VmaAllocationCreateInfo& createInfo,
14528  VmaSuballocationType suballocType,
14529  size_t allocationCount,
14530  VmaAllocation* pAllocations)
14531 {
14532  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
14533 
14534  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
14535 
14536  if(vkMemReq.size == 0)
14537  {
14538  return VK_ERROR_VALIDATION_FAILED_EXT;
14539  }
14540  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
14541  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14542  {
14543  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
14544  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14545  }
14546  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14548  {
14549  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
14550  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14551  }
14552  if(requiresDedicatedAllocation)
14553  {
14554  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14555  {
14556  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
14557  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14558  }
14559  if(createInfo.pool != VK_NULL_HANDLE)
14560  {
14561  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
14562  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14563  }
14564  }
14565  if((createInfo.pool != VK_NULL_HANDLE) &&
14566  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
14567  {
14568  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
14569  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14570  }
14571 
14572  if(createInfo.pool != VK_NULL_HANDLE)
14573  {
14574  const VkDeviceSize alignmentForPool = VMA_MAX(
14575  vkMemReq.alignment,
14576  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
14577  return createInfo.pool->m_BlockVector.Allocate(
14578  createInfo.pool,
14579  m_CurrentFrameIndex.load(),
14580  vkMemReq.size,
14581  alignmentForPool,
14582  createInfo,
14583  suballocType,
14584  allocationCount,
14585  pAllocations);
14586  }
14587  else
14588  {
14589  // Bit mask of memory Vulkan types acceptable for this allocation.
14590  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
14591  uint32_t memTypeIndex = UINT32_MAX;
14592  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
14593  if(res == VK_SUCCESS)
14594  {
14595  VkDeviceSize alignmentForMemType = VMA_MAX(
14596  vkMemReq.alignment,
14597  GetMemoryTypeMinAlignment(memTypeIndex));
14598 
14599  res = AllocateMemoryOfType(
14600  vkMemReq.size,
14601  alignmentForMemType,
14602  requiresDedicatedAllocation || prefersDedicatedAllocation,
14603  dedicatedBuffer,
14604  dedicatedImage,
14605  createInfo,
14606  memTypeIndex,
14607  suballocType,
14608  allocationCount,
14609  pAllocations);
14610  // Succeeded on first try.
14611  if(res == VK_SUCCESS)
14612  {
14613  return res;
14614  }
14615  // Allocation from this memory type failed. Try other compatible memory types.
14616  else
14617  {
14618  for(;;)
14619  {
14620  // Remove old memTypeIndex from list of possibilities.
14621  memoryTypeBits &= ~(1u << memTypeIndex);
14622  // Find alternative memTypeIndex.
14623  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
14624  if(res == VK_SUCCESS)
14625  {
14626  alignmentForMemType = VMA_MAX(
14627  vkMemReq.alignment,
14628  GetMemoryTypeMinAlignment(memTypeIndex));
14629 
14630  res = AllocateMemoryOfType(
14631  vkMemReq.size,
14632  alignmentForMemType,
14633  requiresDedicatedAllocation || prefersDedicatedAllocation,
14634  dedicatedBuffer,
14635  dedicatedImage,
14636  createInfo,
14637  memTypeIndex,
14638  suballocType,
14639  allocationCount,
14640  pAllocations);
14641  // Allocation from this alternative memory type succeeded.
14642  if(res == VK_SUCCESS)
14643  {
14644  return res;
14645  }
14646  // else: Allocation from this memory type failed. Try next one - next loop iteration.
14647  }
14648  // No other matching memory type index could be found.
14649  else
14650  {
14651  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
14652  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14653  }
14654  }
14655  }
14656  }
14657  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
14658  else
14659  return res;
14660  }
14661 }
14662 
14663 void VmaAllocator_T::FreeMemory(
14664  size_t allocationCount,
14665  const VmaAllocation* pAllocations)
14666 {
14667  VMA_ASSERT(pAllocations);
14668 
14669  for(size_t allocIndex = allocationCount; allocIndex--; )
14670  {
14671  VmaAllocation allocation = pAllocations[allocIndex];
14672 
14673  if(allocation != VK_NULL_HANDLE)
14674  {
14675  if(TouchAllocation(allocation))
14676  {
14677  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14678  {
14679  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
14680  }
14681 
14682  switch(allocation->GetType())
14683  {
14684  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14685  {
14686  VmaBlockVector* pBlockVector = VMA_NULL;
14687  VmaPool hPool = allocation->GetPool();
14688  if(hPool != VK_NULL_HANDLE)
14689  {
14690  pBlockVector = &hPool->m_BlockVector;
14691  }
14692  else
14693  {
14694  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
14695  pBlockVector = m_pBlockVectors[memTypeIndex];
14696  }
14697  pBlockVector->Free(allocation);
14698  }
14699  break;
14700  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14701  FreeDedicatedMemory(allocation);
14702  break;
14703  default:
14704  VMA_ASSERT(0);
14705  }
14706  }
14707 
14708  allocation->SetUserData(this, VMA_NULL);
14709  vma_delete(this, allocation);
14710  }
14711  }
14712 }
14713 
14714 VkResult VmaAllocator_T::ResizeAllocation(
14715  const VmaAllocation alloc,
14716  VkDeviceSize newSize)
14717 {
14718  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
14719  {
14720  return VK_ERROR_VALIDATION_FAILED_EXT;
14721  }
14722  if(newSize == alloc->GetSize())
14723  {
14724  return VK_SUCCESS;
14725  }
14726 
14727  switch(alloc->GetType())
14728  {
14729  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14730  return VK_ERROR_FEATURE_NOT_PRESENT;
14731  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14732  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
14733  {
14734  alloc->ChangeSize(newSize);
14735  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
14736  return VK_SUCCESS;
14737  }
14738  else
14739  {
14740  return VK_ERROR_OUT_OF_POOL_MEMORY;
14741  }
14742  default:
14743  VMA_ASSERT(0);
14744  return VK_ERROR_VALIDATION_FAILED_EXT;
14745  }
14746 }
14747 
14748 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
14749 {
14750  // Initialize.
14751  InitStatInfo(pStats->total);
14752  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
14753  InitStatInfo(pStats->memoryType[i]);
14754  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
14755  InitStatInfo(pStats->memoryHeap[i]);
14756 
14757  // Process default pools.
14758  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14759  {
14760  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
14761  VMA_ASSERT(pBlockVector);
14762  pBlockVector->AddStats(pStats);
14763  }
14764 
14765  // Process custom pools.
14766  {
14767  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
14768  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
14769  {
14770  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
14771  }
14772  }
14773 
14774  // Process dedicated allocations.
14775  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14776  {
14777  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14778  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
14779  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
14780  VMA_ASSERT(pDedicatedAllocVector);
14781  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
14782  {
14783  VmaStatInfo allocationStatInfo;
14784  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
14785  VmaAddStatInfo(pStats->total, allocationStatInfo);
14786  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
14787  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
14788  }
14789  }
14790 
14791  // Postprocess.
14792  VmaPostprocessCalcStatInfo(pStats->total);
14793  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
14794  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
14795  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
14796  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
14797 }
14798 
14799 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
14800 
14801 VkResult VmaAllocator_T::DefragmentationBegin(
14802  const VmaDefragmentationInfo2& info,
14803  VmaDefragmentationStats* pStats,
14804  VmaDefragmentationContext* pContext)
14805 {
14806  if(info.pAllocationsChanged != VMA_NULL)
14807  {
14808  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
14809  }
14810 
14811  *pContext = vma_new(this, VmaDefragmentationContext_T)(
14812  this, m_CurrentFrameIndex.load(), info.flags, pStats);
14813 
14814  (*pContext)->AddPools(info.poolCount, info.pPools);
14815  (*pContext)->AddAllocations(
14817 
14818  VkResult res = (*pContext)->Defragment(
14821  info.commandBuffer, pStats);
14822 
14823  if(res != VK_NOT_READY)
14824  {
14825  vma_delete(this, *pContext);
14826  *pContext = VMA_NULL;
14827  }
14828 
14829  return res;
14830 }
14831 
14832 VkResult VmaAllocator_T::DefragmentationEnd(
14833  VmaDefragmentationContext context)
14834 {
14835  vma_delete(this, context);
14836  return VK_SUCCESS;
14837 }
14838 
14839 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
14840 {
14841  if(hAllocation->CanBecomeLost())
14842  {
14843  /*
14844  Warning: This is a carefully designed algorithm.
14845  Do not modify unless you really know what you're doing :)
14846  */
14847  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14848  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14849  for(;;)
14850  {
14851  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
14852  {
14853  pAllocationInfo->memoryType = UINT32_MAX;
14854  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
14855  pAllocationInfo->offset = 0;
14856  pAllocationInfo->size = hAllocation->GetSize();
14857  pAllocationInfo->pMappedData = VMA_NULL;
14858  pAllocationInfo->pUserData = hAllocation->GetUserData();
14859  return;
14860  }
14861  else if(localLastUseFrameIndex == localCurrFrameIndex)
14862  {
14863  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
14864  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
14865  pAllocationInfo->offset = hAllocation->GetOffset();
14866  pAllocationInfo->size = hAllocation->GetSize();
14867  pAllocationInfo->pMappedData = VMA_NULL;
14868  pAllocationInfo->pUserData = hAllocation->GetUserData();
14869  return;
14870  }
14871  else // Last use time earlier than current time.
14872  {
14873  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14874  {
14875  localLastUseFrameIndex = localCurrFrameIndex;
14876  }
14877  }
14878  }
14879  }
14880  else
14881  {
14882 #if VMA_STATS_STRING_ENABLED
14883  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14884  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14885  for(;;)
14886  {
14887  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
14888  if(localLastUseFrameIndex == localCurrFrameIndex)
14889  {
14890  break;
14891  }
14892  else // Last use time earlier than current time.
14893  {
14894  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14895  {
14896  localLastUseFrameIndex = localCurrFrameIndex;
14897  }
14898  }
14899  }
14900 #endif
14901 
14902  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
14903  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
14904  pAllocationInfo->offset = hAllocation->GetOffset();
14905  pAllocationInfo->size = hAllocation->GetSize();
14906  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
14907  pAllocationInfo->pUserData = hAllocation->GetUserData();
14908  }
14909 }
14910 
14911 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
14912 {
14913  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
14914  if(hAllocation->CanBecomeLost())
14915  {
14916  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14917  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14918  for(;;)
14919  {
14920  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
14921  {
14922  return false;
14923  }
14924  else if(localLastUseFrameIndex == localCurrFrameIndex)
14925  {
14926  return true;
14927  }
14928  else // Last use time earlier than current time.
14929  {
14930  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14931  {
14932  localLastUseFrameIndex = localCurrFrameIndex;
14933  }
14934  }
14935  }
14936  }
14937  else
14938  {
14939 #if VMA_STATS_STRING_ENABLED
14940  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14941  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14942  for(;;)
14943  {
14944  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
14945  if(localLastUseFrameIndex == localCurrFrameIndex)
14946  {
14947  break;
14948  }
14949  else // Last use time earlier than current time.
14950  {
14951  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14952  {
14953  localLastUseFrameIndex = localCurrFrameIndex;
14954  }
14955  }
14956  }
14957 #endif
14958 
14959  return true;
14960  }
14961 }
14962 
14963 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
14964 {
14965  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
14966 
14967  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
14968 
14969  if(newCreateInfo.maxBlockCount == 0)
14970  {
14971  newCreateInfo.maxBlockCount = SIZE_MAX;
14972  }
14973  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
14974  {
14975  return VK_ERROR_INITIALIZATION_FAILED;
14976  }
14977 
14978  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
14979 
14980  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
14981 
14982  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
14983  if(res != VK_SUCCESS)
14984  {
14985  vma_delete(this, *pPool);
14986  *pPool = VMA_NULL;
14987  return res;
14988  }
14989 
14990  // Add to m_Pools.
14991  {
14992  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
14993  (*pPool)->SetId(m_NextPoolId++);
14994  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
14995  }
14996 
14997  return VK_SUCCESS;
14998 }
14999 
15000 void VmaAllocator_T::DestroyPool(VmaPool pool)
15001 {
15002  // Remove from m_Pools.
15003  {
15004  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15005  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
15006  VMA_ASSERT(success && "Pool not found in Allocator.");
15007  }
15008 
15009  vma_delete(this, pool);
15010 }
15011 
15012 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
15013 {
15014  pool->m_BlockVector.GetPoolStats(pPoolStats);
15015 }
15016 
15017 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
15018 {
15019  m_CurrentFrameIndex.store(frameIndex);
15020 }
15021 
15022 void VmaAllocator_T::MakePoolAllocationsLost(
15023  VmaPool hPool,
15024  size_t* pLostAllocationCount)
15025 {
15026  hPool->m_BlockVector.MakePoolAllocationsLost(
15027  m_CurrentFrameIndex.load(),
15028  pLostAllocationCount);
15029 }
15030 
15031 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
15032 {
15033  return hPool->m_BlockVector.CheckCorruption();
15034 }
15035 
15036 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
15037 {
15038  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
15039 
15040  // Process default pools.
15041  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15042  {
15043  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
15044  {
15045  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
15046  VMA_ASSERT(pBlockVector);
15047  VkResult localRes = pBlockVector->CheckCorruption();
15048  switch(localRes)
15049  {
15050  case VK_ERROR_FEATURE_NOT_PRESENT:
15051  break;
15052  case VK_SUCCESS:
15053  finalRes = VK_SUCCESS;
15054  break;
15055  default:
15056  return localRes;
15057  }
15058  }
15059  }
15060 
15061  // Process custom pools.
15062  {
15063  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15064  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
15065  {
15066  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
15067  {
15068  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
15069  switch(localRes)
15070  {
15071  case VK_ERROR_FEATURE_NOT_PRESENT:
15072  break;
15073  case VK_SUCCESS:
15074  finalRes = VK_SUCCESS;
15075  break;
15076  default:
15077  return localRes;
15078  }
15079  }
15080  }
15081  }
15082 
15083  return finalRes;
15084 }
15085 
15086 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
15087 {
15088  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
15089  (*pAllocation)->InitLost();
15090 }
15091 
15092 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
15093 {
15094  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
15095 
15096  VkResult res;
15097  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
15098  {
15099  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
15100  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
15101  {
15102  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15103  if(res == VK_SUCCESS)
15104  {
15105  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
15106  }
15107  }
15108  else
15109  {
15110  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
15111  }
15112  }
15113  else
15114  {
15115  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15116  }
15117 
15118  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
15119  {
15120  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
15121  }
15122 
15123  return res;
15124 }
15125 
15126 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
15127 {
15128  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
15129  {
15130  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
15131  }
15132 
15133  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
15134 
15135  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
15136  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
15137  {
15138  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
15139  m_HeapSizeLimit[heapIndex] += size;
15140  }
15141 }
15142 
15143 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
15144 {
15145  if(hAllocation->CanBecomeLost())
15146  {
15147  return VK_ERROR_MEMORY_MAP_FAILED;
15148  }
15149 
15150  switch(hAllocation->GetType())
15151  {
15152  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15153  {
15154  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15155  char *pBytes = VMA_NULL;
15156  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
15157  if(res == VK_SUCCESS)
15158  {
15159  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
15160  hAllocation->BlockAllocMap();
15161  }
15162  return res;
15163  }
15164  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15165  return hAllocation->DedicatedAllocMap(this, ppData);
15166  default:
15167  VMA_ASSERT(0);
15168  return VK_ERROR_MEMORY_MAP_FAILED;
15169  }
15170 }
15171 
15172 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
15173 {
15174  switch(hAllocation->GetType())
15175  {
15176  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15177  {
15178  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15179  hAllocation->BlockAllocUnmap();
15180  pBlock->Unmap(this, 1);
15181  }
15182  break;
15183  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15184  hAllocation->DedicatedAllocUnmap(this);
15185  break;
15186  default:
15187  VMA_ASSERT(0);
15188  }
15189 }
15190 
15191 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
15192 {
15193  VkResult res = VK_SUCCESS;
15194  switch(hAllocation->GetType())
15195  {
15196  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15197  res = GetVulkanFunctions().vkBindBufferMemory(
15198  m_hDevice,
15199  hBuffer,
15200  hAllocation->GetMemory(),
15201  0); //memoryOffset
15202  break;
15203  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15204  {
15205  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
15206  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
15207  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
15208  break;
15209  }
15210  default:
15211  VMA_ASSERT(0);
15212  }
15213  return res;
15214 }
15215 
15216 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
15217 {
15218  VkResult res = VK_SUCCESS;
15219  switch(hAllocation->GetType())
15220  {
15221  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15222  res = GetVulkanFunctions().vkBindImageMemory(
15223  m_hDevice,
15224  hImage,
15225  hAllocation->GetMemory(),
15226  0); //memoryOffset
15227  break;
15228  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15229  {
15230  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
15231  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
15232  res = pBlock->BindImageMemory(this, hAllocation, hImage);
15233  break;
15234  }
15235  default:
15236  VMA_ASSERT(0);
15237  }
15238  return res;
15239 }
15240 
15241 void VmaAllocator_T::FlushOrInvalidateAllocation(
15242  VmaAllocation hAllocation,
15243  VkDeviceSize offset, VkDeviceSize size,
15244  VMA_CACHE_OPERATION op)
15245 {
15246  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
15247  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
15248  {
15249  const VkDeviceSize allocationSize = hAllocation->GetSize();
15250  VMA_ASSERT(offset <= allocationSize);
15251 
15252  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
15253 
15254  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
15255  memRange.memory = hAllocation->GetMemory();
15256 
15257  switch(hAllocation->GetType())
15258  {
15259  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15260  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15261  if(size == VK_WHOLE_SIZE)
15262  {
15263  memRange.size = allocationSize - memRange.offset;
15264  }
15265  else
15266  {
15267  VMA_ASSERT(offset + size <= allocationSize);
15268  memRange.size = VMA_MIN(
15269  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
15270  allocationSize - memRange.offset);
15271  }
15272  break;
15273 
15274  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15275  {
15276  // 1. Still within this allocation.
15277  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15278  if(size == VK_WHOLE_SIZE)
15279  {
15280  size = allocationSize - offset;
15281  }
15282  else
15283  {
15284  VMA_ASSERT(offset + size <= allocationSize);
15285  }
15286  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
15287 
15288  // 2. Adjust to whole block.
15289  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
15290  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
15291  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
15292  memRange.offset += allocationOffset;
15293  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
15294 
15295  break;
15296  }
15297 
15298  default:
15299  VMA_ASSERT(0);
15300  }
15301 
15302  switch(op)
15303  {
15304  case VMA_CACHE_FLUSH:
15305  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
15306  break;
15307  case VMA_CACHE_INVALIDATE:
15308  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
15309  break;
15310  default:
15311  VMA_ASSERT(0);
15312  }
15313  }
15314  // else: Just ignore this call.
15315 }
15316 
15317 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
15318 {
15319  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
15320 
15321  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
15322  {
15323  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15324  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
15325  VMA_ASSERT(pDedicatedAllocations);
15326  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
15327  VMA_ASSERT(success);
15328  }
15329 
15330  VkDeviceMemory hMemory = allocation->GetMemory();
15331 
15332  /*
15333  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
15334  before vkFreeMemory.
15335 
15336  if(allocation->GetMappedData() != VMA_NULL)
15337  {
15338  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
15339  }
15340  */
15341 
15342  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
15343 
15344  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
15345 }
15346 
15347 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
15348 {
15349  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
15350  !hAllocation->CanBecomeLost() &&
15351  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15352  {
15353  void* pData = VMA_NULL;
15354  VkResult res = Map(hAllocation, &pData);
15355  if(res == VK_SUCCESS)
15356  {
15357  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
15358  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
15359  Unmap(hAllocation);
15360  }
15361  else
15362  {
15363  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
15364  }
15365  }
15366 }
15367 
15368 #if VMA_STATS_STRING_ENABLED
15369 
15370 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
15371 {
15372  bool dedicatedAllocationsStarted = false;
15373  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15374  {
15375  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15376  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
15377  VMA_ASSERT(pDedicatedAllocVector);
15378  if(pDedicatedAllocVector->empty() == false)
15379  {
15380  if(dedicatedAllocationsStarted == false)
15381  {
15382  dedicatedAllocationsStarted = true;
15383  json.WriteString("DedicatedAllocations");
15384  json.BeginObject();
15385  }
15386 
15387  json.BeginString("Type ");
15388  json.ContinueString(memTypeIndex);
15389  json.EndString();
15390 
15391  json.BeginArray();
15392 
15393  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
15394  {
15395  json.BeginObject(true);
15396  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
15397  hAlloc->PrintParameters(json);
15398  json.EndObject();
15399  }
15400 
15401  json.EndArray();
15402  }
15403  }
15404  if(dedicatedAllocationsStarted)
15405  {
15406  json.EndObject();
15407  }
15408 
15409  {
15410  bool allocationsStarted = false;
15411  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15412  {
15413  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
15414  {
15415  if(allocationsStarted == false)
15416  {
15417  allocationsStarted = true;
15418  json.WriteString("DefaultPools");
15419  json.BeginObject();
15420  }
15421 
15422  json.BeginString("Type ");
15423  json.ContinueString(memTypeIndex);
15424  json.EndString();
15425 
15426  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
15427  }
15428  }
15429  if(allocationsStarted)
15430  {
15431  json.EndObject();
15432  }
15433  }
15434 
15435  // Custom pools
15436  {
15437  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15438  const size_t poolCount = m_Pools.size();
15439  if(poolCount > 0)
15440  {
15441  json.WriteString("Pools");
15442  json.BeginObject();
15443  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
15444  {
15445  json.BeginString();
15446  json.ContinueString(m_Pools[poolIndex]->GetId());
15447  json.EndString();
15448 
15449  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
15450  }
15451  json.EndObject();
15452  }
15453  }
15454 }
15455 
15456 #endif // #if VMA_STATS_STRING_ENABLED
15457 
15459 // Public interface
15460 
15461 VkResult vmaCreateAllocator(
15462  const VmaAllocatorCreateInfo* pCreateInfo,
15463  VmaAllocator* pAllocator)
15464 {
15465  VMA_ASSERT(pCreateInfo && pAllocator);
15466  VMA_DEBUG_LOG("vmaCreateAllocator");
15467  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
15468  return (*pAllocator)->Init(pCreateInfo);
15469 }
15470 
15471 void vmaDestroyAllocator(
15472  VmaAllocator allocator)
15473 {
15474  if(allocator != VK_NULL_HANDLE)
15475  {
15476  VMA_DEBUG_LOG("vmaDestroyAllocator");
15477  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
15478  vma_delete(&allocationCallbacks, allocator);
15479  }
15480 }
15481 
15483  VmaAllocator allocator,
15484  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
15485 {
15486  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
15487  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
15488 }
15489 
15491  VmaAllocator allocator,
15492  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
15493 {
15494  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
15495  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
15496 }
15497 
15499  VmaAllocator allocator,
15500  uint32_t memoryTypeIndex,
15501  VkMemoryPropertyFlags* pFlags)
15502 {
15503  VMA_ASSERT(allocator && pFlags);
15504  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
15505  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
15506 }
15507 
15509  VmaAllocator allocator,
15510  uint32_t frameIndex)
15511 {
15512  VMA_ASSERT(allocator);
15513  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
15514 
15515  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15516 
15517  allocator->SetCurrentFrameIndex(frameIndex);
15518 }
15519 
15520 void vmaCalculateStats(
15521  VmaAllocator allocator,
15522  VmaStats* pStats)
15523 {
15524  VMA_ASSERT(allocator && pStats);
15525  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15526  allocator->CalculateStats(pStats);
15527 }
15528 
15529 #if VMA_STATS_STRING_ENABLED
15530 
15531 void vmaBuildStatsString(
15532  VmaAllocator allocator,
15533  char** ppStatsString,
15534  VkBool32 detailedMap)
15535 {
15536  VMA_ASSERT(allocator && ppStatsString);
15537  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15538 
15539  VmaStringBuilder sb(allocator);
15540  {
15541  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
15542  json.BeginObject();
15543 
15544  VmaStats stats;
15545  allocator->CalculateStats(&stats);
15546 
15547  json.WriteString("Total");
15548  VmaPrintStatInfo(json, stats.total);
15549 
15550  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
15551  {
15552  json.BeginString("Heap ");
15553  json.ContinueString(heapIndex);
15554  json.EndString();
15555  json.BeginObject();
15556 
15557  json.WriteString("Size");
15558  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
15559 
15560  json.WriteString("Flags");
15561  json.BeginArray(true);
15562  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
15563  {
15564  json.WriteString("DEVICE_LOCAL");
15565  }
15566  json.EndArray();
15567 
15568  if(stats.memoryHeap[heapIndex].blockCount > 0)
15569  {
15570  json.WriteString("Stats");
15571  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
15572  }
15573 
15574  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
15575  {
15576  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
15577  {
15578  json.BeginString("Type ");
15579  json.ContinueString(typeIndex);
15580  json.EndString();
15581 
15582  json.BeginObject();
15583 
15584  json.WriteString("Flags");
15585  json.BeginArray(true);
15586  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
15587  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
15588  {
15589  json.WriteString("DEVICE_LOCAL");
15590  }
15591  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15592  {
15593  json.WriteString("HOST_VISIBLE");
15594  }
15595  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
15596  {
15597  json.WriteString("HOST_COHERENT");
15598  }
15599  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
15600  {
15601  json.WriteString("HOST_CACHED");
15602  }
15603  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
15604  {
15605  json.WriteString("LAZILY_ALLOCATED");
15606  }
15607  json.EndArray();
15608 
15609  if(stats.memoryType[typeIndex].blockCount > 0)
15610  {
15611  json.WriteString("Stats");
15612  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
15613  }
15614 
15615  json.EndObject();
15616  }
15617  }
15618 
15619  json.EndObject();
15620  }
15621  if(detailedMap == VK_TRUE)
15622  {
15623  allocator->PrintDetailedMap(json);
15624  }
15625 
15626  json.EndObject();
15627  }
15628 
15629  const size_t len = sb.GetLength();
15630  char* const pChars = vma_new_array(allocator, char, len + 1);
15631  if(len > 0)
15632  {
15633  memcpy(pChars, sb.GetData(), len);
15634  }
15635  pChars[len] = '\0';
15636  *ppStatsString = pChars;
15637 }
15638 
15639 void vmaFreeStatsString(
15640  VmaAllocator allocator,
15641  char* pStatsString)
15642 {
15643  if(pStatsString != VMA_NULL)
15644  {
15645  VMA_ASSERT(allocator);
15646  size_t len = strlen(pStatsString);
15647  vma_delete_array(allocator, pStatsString, len + 1);
15648  }
15649 }
15650 
15651 #endif // #if VMA_STATS_STRING_ENABLED
15652 
15653 /*
15654 This function is not protected by any mutex because it just reads immutable data.
15655 */
15656 VkResult vmaFindMemoryTypeIndex(
15657  VmaAllocator allocator,
15658  uint32_t memoryTypeBits,
15659  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15660  uint32_t* pMemoryTypeIndex)
15661 {
15662  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15663  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15664  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15665 
15666  if(pAllocationCreateInfo->memoryTypeBits != 0)
15667  {
15668  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
15669  }
15670 
15671  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
15672  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
15673 
15674  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
15675  if(mapped)
15676  {
15677  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15678  }
15679 
15680  // Convert usage to requiredFlags and preferredFlags.
15681  switch(pAllocationCreateInfo->usage)
15682  {
15684  break;
15686  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15687  {
15688  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15689  }
15690  break;
15692  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
15693  break;
15695  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15696  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15697  {
15698  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15699  }
15700  break;
15702  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15703  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
15704  break;
15705  default:
15706  break;
15707  }
15708 
15709  *pMemoryTypeIndex = UINT32_MAX;
15710  uint32_t minCost = UINT32_MAX;
15711  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
15712  memTypeIndex < allocator->GetMemoryTypeCount();
15713  ++memTypeIndex, memTypeBit <<= 1)
15714  {
15715  // This memory type is acceptable according to memoryTypeBits bitmask.
15716  if((memTypeBit & memoryTypeBits) != 0)
15717  {
15718  const VkMemoryPropertyFlags currFlags =
15719  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
15720  // This memory type contains requiredFlags.
15721  if((requiredFlags & ~currFlags) == 0)
15722  {
15723  // Calculate cost as number of bits from preferredFlags not present in this memory type.
15724  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
15725  // Remember memory type with lowest cost.
15726  if(currCost < minCost)
15727  {
15728  *pMemoryTypeIndex = memTypeIndex;
15729  if(currCost == 0)
15730  {
15731  return VK_SUCCESS;
15732  }
15733  minCost = currCost;
15734  }
15735  }
15736  }
15737  }
15738  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
15739 }
15740 
15742  VmaAllocator allocator,
15743  const VkBufferCreateInfo* pBufferCreateInfo,
15744  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15745  uint32_t* pMemoryTypeIndex)
15746 {
15747  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15748  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
15749  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15750  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15751 
15752  const VkDevice hDev = allocator->m_hDevice;
15753  VkBuffer hBuffer = VK_NULL_HANDLE;
15754  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
15755  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
15756  if(res == VK_SUCCESS)
15757  {
15758  VkMemoryRequirements memReq = {};
15759  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
15760  hDev, hBuffer, &memReq);
15761 
15762  res = vmaFindMemoryTypeIndex(
15763  allocator,
15764  memReq.memoryTypeBits,
15765  pAllocationCreateInfo,
15766  pMemoryTypeIndex);
15767 
15768  allocator->GetVulkanFunctions().vkDestroyBuffer(
15769  hDev, hBuffer, allocator->GetAllocationCallbacks());
15770  }
15771  return res;
15772 }
15773 
15775  VmaAllocator allocator,
15776  const VkImageCreateInfo* pImageCreateInfo,
15777  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15778  uint32_t* pMemoryTypeIndex)
15779 {
15780  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15781  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
15782  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15783  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15784 
15785  const VkDevice hDev = allocator->m_hDevice;
15786  VkImage hImage = VK_NULL_HANDLE;
15787  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
15788  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
15789  if(res == VK_SUCCESS)
15790  {
15791  VkMemoryRequirements memReq = {};
15792  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
15793  hDev, hImage, &memReq);
15794 
15795  res = vmaFindMemoryTypeIndex(
15796  allocator,
15797  memReq.memoryTypeBits,
15798  pAllocationCreateInfo,
15799  pMemoryTypeIndex);
15800 
15801  allocator->GetVulkanFunctions().vkDestroyImage(
15802  hDev, hImage, allocator->GetAllocationCallbacks());
15803  }
15804  return res;
15805 }
15806 
15807 VkResult vmaCreatePool(
15808  VmaAllocator allocator,
15809  const VmaPoolCreateInfo* pCreateInfo,
15810  VmaPool* pPool)
15811 {
15812  VMA_ASSERT(allocator && pCreateInfo && pPool);
15813 
15814  VMA_DEBUG_LOG("vmaCreatePool");
15815 
15816  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15817 
15818  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
15819 
15820 #if VMA_RECORDING_ENABLED
15821  if(allocator->GetRecorder() != VMA_NULL)
15822  {
15823  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
15824  }
15825 #endif
15826 
15827  return res;
15828 }
15829 
15830 void vmaDestroyPool(
15831  VmaAllocator allocator,
15832  VmaPool pool)
15833 {
15834  VMA_ASSERT(allocator);
15835 
15836  if(pool == VK_NULL_HANDLE)
15837  {
15838  return;
15839  }
15840 
15841  VMA_DEBUG_LOG("vmaDestroyPool");
15842 
15843  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15844 
15845 #if VMA_RECORDING_ENABLED
15846  if(allocator->GetRecorder() != VMA_NULL)
15847  {
15848  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
15849  }
15850 #endif
15851 
15852  allocator->DestroyPool(pool);
15853 }
15854 
15855 void vmaGetPoolStats(
15856  VmaAllocator allocator,
15857  VmaPool pool,
15858  VmaPoolStats* pPoolStats)
15859 {
15860  VMA_ASSERT(allocator && pool && pPoolStats);
15861 
15862  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15863 
15864  allocator->GetPoolStats(pool, pPoolStats);
15865 }
15866 
15868  VmaAllocator allocator,
15869  VmaPool pool,
15870  size_t* pLostAllocationCount)
15871 {
15872  VMA_ASSERT(allocator && pool);
15873 
15874  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15875 
15876 #if VMA_RECORDING_ENABLED
15877  if(allocator->GetRecorder() != VMA_NULL)
15878  {
15879  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
15880  }
15881 #endif
15882 
15883  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
15884 }
15885 
15886 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
15887 {
15888  VMA_ASSERT(allocator && pool);
15889 
15890  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15891 
15892  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
15893 
15894  return allocator->CheckPoolCorruption(pool);
15895 }
15896 
15897 VkResult vmaAllocateMemory(
15898  VmaAllocator allocator,
15899  const VkMemoryRequirements* pVkMemoryRequirements,
15900  const VmaAllocationCreateInfo* pCreateInfo,
15901  VmaAllocation* pAllocation,
15902  VmaAllocationInfo* pAllocationInfo)
15903 {
15904  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
15905 
15906  VMA_DEBUG_LOG("vmaAllocateMemory");
15907 
15908  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15909 
15910  VkResult result = allocator->AllocateMemory(
15911  *pVkMemoryRequirements,
15912  false, // requiresDedicatedAllocation
15913  false, // prefersDedicatedAllocation
15914  VK_NULL_HANDLE, // dedicatedBuffer
15915  VK_NULL_HANDLE, // dedicatedImage
15916  *pCreateInfo,
15917  VMA_SUBALLOCATION_TYPE_UNKNOWN,
15918  1, // allocationCount
15919  pAllocation);
15920 
15921 #if VMA_RECORDING_ENABLED
15922  if(allocator->GetRecorder() != VMA_NULL)
15923  {
15924  allocator->GetRecorder()->RecordAllocateMemory(
15925  allocator->GetCurrentFrameIndex(),
15926  *pVkMemoryRequirements,
15927  *pCreateInfo,
15928  *pAllocation);
15929  }
15930 #endif
15931 
15932  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
15933  {
15934  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
15935  }
15936 
15937  return result;
15938 }
15939 
15940 VkResult vmaAllocateMemoryPages(
15941  VmaAllocator allocator,
15942  const VkMemoryRequirements* pVkMemoryRequirements,
15943  const VmaAllocationCreateInfo* pCreateInfo,
15944  size_t allocationCount,
15945  VmaAllocation* pAllocations,
15946  VmaAllocationInfo* pAllocationInfo)
15947 {
15948  if(allocationCount == 0)
15949  {
15950  return VK_SUCCESS;
15951  }
15952 
15953  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
15954 
15955  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
15956 
15957  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15958 
15959  VkResult result = allocator->AllocateMemory(
15960  *pVkMemoryRequirements,
15961  false, // requiresDedicatedAllocation
15962  false, // prefersDedicatedAllocation
15963  VK_NULL_HANDLE, // dedicatedBuffer
15964  VK_NULL_HANDLE, // dedicatedImage
15965  *pCreateInfo,
15966  VMA_SUBALLOCATION_TYPE_UNKNOWN,
15967  allocationCount,
15968  pAllocations);
15969 
15970 #if VMA_RECORDING_ENABLED
15971  if(allocator->GetRecorder() != VMA_NULL)
15972  {
15973  allocator->GetRecorder()->RecordAllocateMemoryPages(
15974  allocator->GetCurrentFrameIndex(),
15975  *pVkMemoryRequirements,
15976  *pCreateInfo,
15977  (uint64_t)allocationCount,
15978  pAllocations);
15979  }
15980 #endif
15981 
15982  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
15983  {
15984  for(size_t i = 0; i < allocationCount; ++i)
15985  {
15986  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
15987  }
15988  }
15989 
15990  return result;
15991 }
15992 
15994  VmaAllocator allocator,
15995  VkBuffer buffer,
15996  const VmaAllocationCreateInfo* pCreateInfo,
15997  VmaAllocation* pAllocation,
15998  VmaAllocationInfo* pAllocationInfo)
15999 {
16000  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16001 
16002  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
16003 
16004  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16005 
16006  VkMemoryRequirements vkMemReq = {};
16007  bool requiresDedicatedAllocation = false;
16008  bool prefersDedicatedAllocation = false;
16009  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
16010  requiresDedicatedAllocation,
16011  prefersDedicatedAllocation);
16012 
16013  VkResult result = allocator->AllocateMemory(
16014  vkMemReq,
16015  requiresDedicatedAllocation,
16016  prefersDedicatedAllocation,
16017  buffer, // dedicatedBuffer
16018  VK_NULL_HANDLE, // dedicatedImage
16019  *pCreateInfo,
16020  VMA_SUBALLOCATION_TYPE_BUFFER,
16021  1, // allocationCount
16022  pAllocation);
16023 
16024 #if VMA_RECORDING_ENABLED
16025  if(allocator->GetRecorder() != VMA_NULL)
16026  {
16027  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
16028  allocator->GetCurrentFrameIndex(),
16029  vkMemReq,
16030  requiresDedicatedAllocation,
16031  prefersDedicatedAllocation,
16032  *pCreateInfo,
16033  *pAllocation);
16034  }
16035 #endif
16036 
16037  if(pAllocationInfo && result == VK_SUCCESS)
16038  {
16039  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16040  }
16041 
16042  return result;
16043 }
16044 
16045 VkResult vmaAllocateMemoryForImage(
16046  VmaAllocator allocator,
16047  VkImage image,
16048  const VmaAllocationCreateInfo* pCreateInfo,
16049  VmaAllocation* pAllocation,
16050  VmaAllocationInfo* pAllocationInfo)
16051 {
16052  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16053 
16054  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
16055 
16056  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16057 
16058  VkMemoryRequirements vkMemReq = {};
16059  bool requiresDedicatedAllocation = false;
16060  bool prefersDedicatedAllocation = false;
16061  allocator->GetImageMemoryRequirements(image, vkMemReq,
16062  requiresDedicatedAllocation, prefersDedicatedAllocation);
16063 
16064  VkResult result = allocator->AllocateMemory(
16065  vkMemReq,
16066  requiresDedicatedAllocation,
16067  prefersDedicatedAllocation,
16068  VK_NULL_HANDLE, // dedicatedBuffer
16069  image, // dedicatedImage
16070  *pCreateInfo,
16071  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
16072  1, // allocationCount
16073  pAllocation);
16074 
16075 #if VMA_RECORDING_ENABLED
16076  if(allocator->GetRecorder() != VMA_NULL)
16077  {
16078  allocator->GetRecorder()->RecordAllocateMemoryForImage(
16079  allocator->GetCurrentFrameIndex(),
16080  vkMemReq,
16081  requiresDedicatedAllocation,
16082  prefersDedicatedAllocation,
16083  *pCreateInfo,
16084  *pAllocation);
16085  }
16086 #endif
16087 
16088  if(pAllocationInfo && result == VK_SUCCESS)
16089  {
16090  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16091  }
16092 
16093  return result;
16094 }
16095 
16096 void vmaFreeMemory(
16097  VmaAllocator allocator,
16098  VmaAllocation allocation)
16099 {
16100  VMA_ASSERT(allocator);
16101 
16102  if(allocation == VK_NULL_HANDLE)
16103  {
16104  return;
16105  }
16106 
16107  VMA_DEBUG_LOG("vmaFreeMemory");
16108 
16109  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16110 
16111 #if VMA_RECORDING_ENABLED
16112  if(allocator->GetRecorder() != VMA_NULL)
16113  {
16114  allocator->GetRecorder()->RecordFreeMemory(
16115  allocator->GetCurrentFrameIndex(),
16116  allocation);
16117  }
16118 #endif
16119 
16120  allocator->FreeMemory(
16121  1, // allocationCount
16122  &allocation);
16123 }
16124 
16125 void vmaFreeMemoryPages(
16126  VmaAllocator allocator,
16127  size_t allocationCount,
16128  VmaAllocation* pAllocations)
16129 {
16130  if(allocationCount == 0)
16131  {
16132  return;
16133  }
16134 
16135  VMA_ASSERT(allocator);
16136 
16137  VMA_DEBUG_LOG("vmaFreeMemoryPages");
16138 
16139  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16140 
16141 #if VMA_RECORDING_ENABLED
16142  if(allocator->GetRecorder() != VMA_NULL)
16143  {
16144  allocator->GetRecorder()->RecordFreeMemoryPages(
16145  allocator->GetCurrentFrameIndex(),
16146  (uint64_t)allocationCount,
16147  pAllocations);
16148  }
16149 #endif
16150 
16151  allocator->FreeMemory(allocationCount, pAllocations);
16152 }
16153 
16154 VkResult vmaResizeAllocation(
16155  VmaAllocator allocator,
16156  VmaAllocation allocation,
16157  VkDeviceSize newSize)
16158 {
16159  VMA_ASSERT(allocator && allocation);
16160 
16161  VMA_DEBUG_LOG("vmaResizeAllocation");
16162 
16163  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16164 
16165 #if VMA_RECORDING_ENABLED
16166  if(allocator->GetRecorder() != VMA_NULL)
16167  {
16168  allocator->GetRecorder()->RecordResizeAllocation(
16169  allocator->GetCurrentFrameIndex(),
16170  allocation,
16171  newSize);
16172  }
16173 #endif
16174 
16175  return allocator->ResizeAllocation(allocation, newSize);
16176 }
16177 
16179  VmaAllocator allocator,
16180  VmaAllocation allocation,
16181  VmaAllocationInfo* pAllocationInfo)
16182 {
16183  VMA_ASSERT(allocator && allocation && pAllocationInfo);
16184 
16185  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16186 
16187 #if VMA_RECORDING_ENABLED
16188  if(allocator->GetRecorder() != VMA_NULL)
16189  {
16190  allocator->GetRecorder()->RecordGetAllocationInfo(
16191  allocator->GetCurrentFrameIndex(),
16192  allocation);
16193  }
16194 #endif
16195 
16196  allocator->GetAllocationInfo(allocation, pAllocationInfo);
16197 }
16198 
16199 VkBool32 vmaTouchAllocation(
16200  VmaAllocator allocator,
16201  VmaAllocation allocation)
16202 {
16203  VMA_ASSERT(allocator && allocation);
16204 
16205  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16206 
16207 #if VMA_RECORDING_ENABLED
16208  if(allocator->GetRecorder() != VMA_NULL)
16209  {
16210  allocator->GetRecorder()->RecordTouchAllocation(
16211  allocator->GetCurrentFrameIndex(),
16212  allocation);
16213  }
16214 #endif
16215 
16216  return allocator->TouchAllocation(allocation);
16217 }
16218 
16220  VmaAllocator allocator,
16221  VmaAllocation allocation,
16222  void* pUserData)
16223 {
16224  VMA_ASSERT(allocator && allocation);
16225 
16226  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16227 
16228  allocation->SetUserData(allocator, pUserData);
16229 
16230 #if VMA_RECORDING_ENABLED
16231  if(allocator->GetRecorder() != VMA_NULL)
16232  {
16233  allocator->GetRecorder()->RecordSetAllocationUserData(
16234  allocator->GetCurrentFrameIndex(),
16235  allocation,
16236  pUserData);
16237  }
16238 #endif
16239 }
16240 
16242  VmaAllocator allocator,
16243  VmaAllocation* pAllocation)
16244 {
16245  VMA_ASSERT(allocator && pAllocation);
16246 
16247  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
16248 
16249  allocator->CreateLostAllocation(pAllocation);
16250 
16251 #if VMA_RECORDING_ENABLED
16252  if(allocator->GetRecorder() != VMA_NULL)
16253  {
16254  allocator->GetRecorder()->RecordCreateLostAllocation(
16255  allocator->GetCurrentFrameIndex(),
16256  *pAllocation);
16257  }
16258 #endif
16259 }
16260 
16261 VkResult vmaMapMemory(
16262  VmaAllocator allocator,
16263  VmaAllocation allocation,
16264  void** ppData)
16265 {
16266  VMA_ASSERT(allocator && allocation && ppData);
16267 
16268  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16269 
16270  VkResult res = allocator->Map(allocation, ppData);
16271 
16272 #if VMA_RECORDING_ENABLED
16273  if(allocator->GetRecorder() != VMA_NULL)
16274  {
16275  allocator->GetRecorder()->RecordMapMemory(
16276  allocator->GetCurrentFrameIndex(),
16277  allocation);
16278  }
16279 #endif
16280 
16281  return res;
16282 }
16283 
16284 void vmaUnmapMemory(
16285  VmaAllocator allocator,
16286  VmaAllocation allocation)
16287 {
16288  VMA_ASSERT(allocator && allocation);
16289 
16290  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16291 
16292 #if VMA_RECORDING_ENABLED
16293  if(allocator->GetRecorder() != VMA_NULL)
16294  {
16295  allocator->GetRecorder()->RecordUnmapMemory(
16296  allocator->GetCurrentFrameIndex(),
16297  allocation);
16298  }
16299 #endif
16300 
16301  allocator->Unmap(allocation);
16302 }
16303 
16304 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
16305 {
16306  VMA_ASSERT(allocator && allocation);
16307 
16308  VMA_DEBUG_LOG("vmaFlushAllocation");
16309 
16310  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16311 
16312  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
16313 
16314 #if VMA_RECORDING_ENABLED
16315  if(allocator->GetRecorder() != VMA_NULL)
16316  {
16317  allocator->GetRecorder()->RecordFlushAllocation(
16318  allocator->GetCurrentFrameIndex(),
16319  allocation, offset, size);
16320  }
16321 #endif
16322 }
16323 
16324 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
16325 {
16326  VMA_ASSERT(allocator && allocation);
16327 
16328  VMA_DEBUG_LOG("vmaInvalidateAllocation");
16329 
16330  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16331 
16332  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
16333 
16334 #if VMA_RECORDING_ENABLED
16335  if(allocator->GetRecorder() != VMA_NULL)
16336  {
16337  allocator->GetRecorder()->RecordInvalidateAllocation(
16338  allocator->GetCurrentFrameIndex(),
16339  allocation, offset, size);
16340  }
16341 #endif
16342 }
16343 
16344 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
16345 {
16346  VMA_ASSERT(allocator);
16347 
16348  VMA_DEBUG_LOG("vmaCheckCorruption");
16349 
16350  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16351 
16352  return allocator->CheckCorruption(memoryTypeBits);
16353 }
16354 
16355 VkResult vmaDefragment(
16356  VmaAllocator allocator,
16357  VmaAllocation* pAllocations,
16358  size_t allocationCount,
16359  VkBool32* pAllocationsChanged,
16360  const VmaDefragmentationInfo *pDefragmentationInfo,
16361  VmaDefragmentationStats* pDefragmentationStats)
16362 {
16363  // Deprecated interface, reimplemented using new one.
16364 
16365  VmaDefragmentationInfo2 info2 = {};
16366  info2.allocationCount = (uint32_t)allocationCount;
16367  info2.pAllocations = pAllocations;
16368  info2.pAllocationsChanged = pAllocationsChanged;
16369  if(pDefragmentationInfo != VMA_NULL)
16370  {
16371  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
16372  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
16373  }
16374  else
16375  {
16376  info2.maxCpuAllocationsToMove = UINT32_MAX;
16377  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
16378  }
16379  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
16380 
16382  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
16383  if(res == VK_NOT_READY)
16384  {
16385  res = vmaDefragmentationEnd( allocator, ctx);
16386  }
16387  return res;
16388 }
16389 
16390 VkResult vmaDefragmentationBegin(
16391  VmaAllocator allocator,
16392  const VmaDefragmentationInfo2* pInfo,
16393  VmaDefragmentationStats* pStats,
16394  VmaDefragmentationContext *pContext)
16395 {
16396  VMA_ASSERT(allocator && pInfo && pContext);
16397 
16398  // Degenerate case: Nothing to defragment.
16399  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
16400  {
16401  return VK_SUCCESS;
16402  }
16403 
16404  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
16405  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
16406  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
16407  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
16408 
16409  VMA_DEBUG_LOG("vmaDefragmentationBegin");
16410 
16411  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16412 
16413  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
16414 
16415 #if VMA_RECORDING_ENABLED
16416  if(allocator->GetRecorder() != VMA_NULL)
16417  {
16418  allocator->GetRecorder()->RecordDefragmentationBegin(
16419  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
16420  }
16421 #endif
16422 
16423  return res;
16424 }
16425 
16426 VkResult vmaDefragmentationEnd(
16427  VmaAllocator allocator,
16428  VmaDefragmentationContext context)
16429 {
16430  VMA_ASSERT(allocator);
16431 
16432  VMA_DEBUG_LOG("vmaDefragmentationEnd");
16433 
16434  if(context != VK_NULL_HANDLE)
16435  {
16436  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16437 
16438 #if VMA_RECORDING_ENABLED
16439  if(allocator->GetRecorder() != VMA_NULL)
16440  {
16441  allocator->GetRecorder()->RecordDefragmentationEnd(
16442  allocator->GetCurrentFrameIndex(), context);
16443  }
16444 #endif
16445 
16446  return allocator->DefragmentationEnd(context);
16447  }
16448  else
16449  {
16450  return VK_SUCCESS;
16451  }
16452 }
16453 
16454 VkResult vmaBindBufferMemory(
16455  VmaAllocator allocator,
16456  VmaAllocation allocation,
16457  VkBuffer buffer)
16458 {
16459  VMA_ASSERT(allocator && allocation && buffer);
16460 
16461  VMA_DEBUG_LOG("vmaBindBufferMemory");
16462 
16463  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16464 
16465  return allocator->BindBufferMemory(allocation, buffer);
16466 }
16467 
16468 VkResult vmaBindImageMemory(
16469  VmaAllocator allocator,
16470  VmaAllocation allocation,
16471  VkImage image)
16472 {
16473  VMA_ASSERT(allocator && allocation && image);
16474 
16475  VMA_DEBUG_LOG("vmaBindImageMemory");
16476 
16477  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16478 
16479  return allocator->BindImageMemory(allocation, image);
16480 }
16481 
16482 VkResult vmaCreateBuffer(
16483  VmaAllocator allocator,
16484  const VkBufferCreateInfo* pBufferCreateInfo,
16485  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16486  VkBuffer* pBuffer,
16487  VmaAllocation* pAllocation,
16488  VmaAllocationInfo* pAllocationInfo)
16489 {
16490  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
16491 
16492  if(pBufferCreateInfo->size == 0)
16493  {
16494  return VK_ERROR_VALIDATION_FAILED_EXT;
16495  }
16496 
16497  VMA_DEBUG_LOG("vmaCreateBuffer");
16498 
16499  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16500 
16501  *pBuffer = VK_NULL_HANDLE;
16502  *pAllocation = VK_NULL_HANDLE;
16503 
16504  // 1. Create VkBuffer.
16505  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
16506  allocator->m_hDevice,
16507  pBufferCreateInfo,
16508  allocator->GetAllocationCallbacks(),
16509  pBuffer);
16510  if(res >= 0)
16511  {
16512  // 2. vkGetBufferMemoryRequirements.
16513  VkMemoryRequirements vkMemReq = {};
16514  bool requiresDedicatedAllocation = false;
16515  bool prefersDedicatedAllocation = false;
16516  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
16517  requiresDedicatedAllocation, prefersDedicatedAllocation);
16518 
16519  // Make sure alignment requirements for specific buffer usages reported
16520  // in Physical Device Properties are included in alignment reported by memory requirements.
16521  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
16522  {
16523  VMA_ASSERT(vkMemReq.alignment %
16524  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
16525  }
16526  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
16527  {
16528  VMA_ASSERT(vkMemReq.alignment %
16529  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
16530  }
16531  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
16532  {
16533  VMA_ASSERT(vkMemReq.alignment %
16534  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
16535  }
16536 
16537  // 3. Allocate memory using allocator.
16538  res = allocator->AllocateMemory(
16539  vkMemReq,
16540  requiresDedicatedAllocation,
16541  prefersDedicatedAllocation,
16542  *pBuffer, // dedicatedBuffer
16543  VK_NULL_HANDLE, // dedicatedImage
16544  *pAllocationCreateInfo,
16545  VMA_SUBALLOCATION_TYPE_BUFFER,
16546  1, // allocationCount
16547  pAllocation);
16548 
16549 #if VMA_RECORDING_ENABLED
16550  if(allocator->GetRecorder() != VMA_NULL)
16551  {
16552  allocator->GetRecorder()->RecordCreateBuffer(
16553  allocator->GetCurrentFrameIndex(),
16554  *pBufferCreateInfo,
16555  *pAllocationCreateInfo,
16556  *pAllocation);
16557  }
16558 #endif
16559 
16560  if(res >= 0)
16561  {
16562  // 3. Bind buffer with memory.
16563  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
16564  if(res >= 0)
16565  {
16566  // All steps succeeded.
16567  #if VMA_STATS_STRING_ENABLED
16568  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
16569  #endif
16570  if(pAllocationInfo != VMA_NULL)
16571  {
16572  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16573  }
16574 
16575  return VK_SUCCESS;
16576  }
16577  allocator->FreeMemory(
16578  1, // allocationCount
16579  pAllocation);
16580  *pAllocation = VK_NULL_HANDLE;
16581  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16582  *pBuffer = VK_NULL_HANDLE;
16583  return res;
16584  }
16585  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16586  *pBuffer = VK_NULL_HANDLE;
16587  return res;
16588  }
16589  return res;
16590 }
16591 
16592 void vmaDestroyBuffer(
16593  VmaAllocator allocator,
16594  VkBuffer buffer,
16595  VmaAllocation allocation)
16596 {
16597  VMA_ASSERT(allocator);
16598 
16599  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
16600  {
16601  return;
16602  }
16603 
16604  VMA_DEBUG_LOG("vmaDestroyBuffer");
16605 
16606  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16607 
16608 #if VMA_RECORDING_ENABLED
16609  if(allocator->GetRecorder() != VMA_NULL)
16610  {
16611  allocator->GetRecorder()->RecordDestroyBuffer(
16612  allocator->GetCurrentFrameIndex(),
16613  allocation);
16614  }
16615 #endif
16616 
16617  if(buffer != VK_NULL_HANDLE)
16618  {
16619  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
16620  }
16621 
16622  if(allocation != VK_NULL_HANDLE)
16623  {
16624  allocator->FreeMemory(
16625  1, // allocationCount
16626  &allocation);
16627  }
16628 }
16629 
16630 VkResult vmaCreateImage(
16631  VmaAllocator allocator,
16632  const VkImageCreateInfo* pImageCreateInfo,
16633  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16634  VkImage* pImage,
16635  VmaAllocation* pAllocation,
16636  VmaAllocationInfo* pAllocationInfo)
16637 {
16638  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
16639 
16640  if(pImageCreateInfo->extent.width == 0 ||
16641  pImageCreateInfo->extent.height == 0 ||
16642  pImageCreateInfo->extent.depth == 0 ||
16643  pImageCreateInfo->mipLevels == 0 ||
16644  pImageCreateInfo->arrayLayers == 0)
16645  {
16646  return VK_ERROR_VALIDATION_FAILED_EXT;
16647  }
16648 
16649  VMA_DEBUG_LOG("vmaCreateImage");
16650 
16651  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16652 
16653  *pImage = VK_NULL_HANDLE;
16654  *pAllocation = VK_NULL_HANDLE;
16655 
16656  // 1. Create VkImage.
16657  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
16658  allocator->m_hDevice,
16659  pImageCreateInfo,
16660  allocator->GetAllocationCallbacks(),
16661  pImage);
16662  if(res >= 0)
16663  {
16664  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
16665  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
16666  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
16667 
16668  // 2. Allocate memory using allocator.
16669  VkMemoryRequirements vkMemReq = {};
16670  bool requiresDedicatedAllocation = false;
16671  bool prefersDedicatedAllocation = false;
16672  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
16673  requiresDedicatedAllocation, prefersDedicatedAllocation);
16674 
16675  res = allocator->AllocateMemory(
16676  vkMemReq,
16677  requiresDedicatedAllocation,
16678  prefersDedicatedAllocation,
16679  VK_NULL_HANDLE, // dedicatedBuffer
16680  *pImage, // dedicatedImage
16681  *pAllocationCreateInfo,
16682  suballocType,
16683  1, // allocationCount
16684  pAllocation);
16685 
16686 #if VMA_RECORDING_ENABLED
16687  if(allocator->GetRecorder() != VMA_NULL)
16688  {
16689  allocator->GetRecorder()->RecordCreateImage(
16690  allocator->GetCurrentFrameIndex(),
16691  *pImageCreateInfo,
16692  *pAllocationCreateInfo,
16693  *pAllocation);
16694  }
16695 #endif
16696 
16697  if(res >= 0)
16698  {
16699  // 3. Bind image with memory.
16700  res = allocator->BindImageMemory(*pAllocation, *pImage);
16701  if(res >= 0)
16702  {
16703  // All steps succeeded.
16704  #if VMA_STATS_STRING_ENABLED
16705  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
16706  #endif
16707  if(pAllocationInfo != VMA_NULL)
16708  {
16709  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16710  }
16711 
16712  return VK_SUCCESS;
16713  }
16714  allocator->FreeMemory(
16715  1, // allocationCount
16716  pAllocation);
16717  *pAllocation = VK_NULL_HANDLE;
16718  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
16719  *pImage = VK_NULL_HANDLE;
16720  return res;
16721  }
16722  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
16723  *pImage = VK_NULL_HANDLE;
16724  return res;
16725  }
16726  return res;
16727 }
16728 
16729 void vmaDestroyImage(
16730  VmaAllocator allocator,
16731  VkImage image,
16732  VmaAllocation allocation)
16733 {
16734  VMA_ASSERT(allocator);
16735 
16736  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
16737  {
16738  return;
16739  }
16740 
16741  VMA_DEBUG_LOG("vmaDestroyImage");
16742 
16743  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16744 
16745 #if VMA_RECORDING_ENABLED
16746  if(allocator->GetRecorder() != VMA_NULL)
16747  {
16748  allocator->GetRecorder()->RecordDestroyImage(
16749  allocator->GetCurrentFrameIndex(),
16750  allocation);
16751  }
16752 #endif
16753 
16754  if(image != VK_NULL_HANDLE)
16755  {
16756  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
16757  }
16758  if(allocation != VK_NULL_HANDLE)
16759  {
16760  allocator->FreeMemory(
16761  1, // allocationCount
16762  &allocation);
16763  }
16764 }
16765 
16766 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1723
+Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1614 /*
1615 Define this macro to 0/1 to disable/enable support for recording functionality,
1616 available through VmaAllocatorCreateInfo::pRecordSettings.
1617 */
1618 #ifndef VMA_RECORDING_ENABLED
1619  #ifdef _WIN32
1620  #define VMA_RECORDING_ENABLED 1
1621  #else
1622  #define VMA_RECORDING_ENABLED 0
1623  #endif
1624 #endif
1625 
1626 #ifndef NOMINMAX
1627  #define NOMINMAX // For windows.h
1628 #endif
1629 
1630 #ifndef VULKAN_H_
1631  #include <vulkan/vulkan.h>
1632 #endif
1633 
1634 #if VMA_RECORDING_ENABLED
1635  #include <windows.h>
1636 #endif
1637 
1638 #if !defined(VMA_DEDICATED_ALLOCATION)
1639  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1640  #define VMA_DEDICATED_ALLOCATION 1
1641  #else
1642  #define VMA_DEDICATED_ALLOCATION 0
1643  #endif
1644 #endif
1645 
1655 VK_DEFINE_HANDLE(VmaAllocator)
1656 
1657 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1659  VmaAllocator allocator,
1660  uint32_t memoryType,
1661  VkDeviceMemory memory,
1662  VkDeviceSize size);
1664 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1665  VmaAllocator allocator,
1666  uint32_t memoryType,
1667  VkDeviceMemory memory,
1668  VkDeviceSize size);
1669 
1683 
1713 
1716 typedef VkFlags VmaAllocatorCreateFlags;
1717 
1722 typedef struct VmaVulkanFunctions {
1723  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1724  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1725  PFN_vkAllocateMemory vkAllocateMemory;
1726  PFN_vkFreeMemory vkFreeMemory;
1727  PFN_vkMapMemory vkMapMemory;
1728  PFN_vkUnmapMemory vkUnmapMemory;
1729  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1730  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1731  PFN_vkBindBufferMemory vkBindBufferMemory;
1732  PFN_vkBindImageMemory vkBindImageMemory;
1733  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1734  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1735  PFN_vkCreateBuffer vkCreateBuffer;
1736  PFN_vkDestroyBuffer vkDestroyBuffer;
1737  PFN_vkCreateImage vkCreateImage;
1738  PFN_vkDestroyImage vkDestroyImage;
1739  PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
1740 #if VMA_DEDICATED_ALLOCATION
1741  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1742  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1743 #endif
1745 
1747 typedef enum VmaRecordFlagBits {
1754 
1757 typedef VkFlags VmaRecordFlags;
1758 
1760 typedef struct VmaRecordSettings
1761 {
1771  const char* pFilePath;
1773 
1776 {
1780 
1781  VkPhysicalDevice physicalDevice;
1783 
1784  VkDevice device;
1786 
1789 
1790  const VkAllocationCallbacks* pAllocationCallbacks;
1792 
1832  const VkDeviceSize* pHeapSizeLimit;
1853 
1855 VkResult vmaCreateAllocator(
1856  const VmaAllocatorCreateInfo* pCreateInfo,
1857  VmaAllocator* pAllocator);
1858 
1860 void vmaDestroyAllocator(
1861  VmaAllocator allocator);
1862 
1868  VmaAllocator allocator,
1869  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1870 
1876  VmaAllocator allocator,
1877  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1878 
1886  VmaAllocator allocator,
1887  uint32_t memoryTypeIndex,
1888  VkMemoryPropertyFlags* pFlags);
1889 
1899  VmaAllocator allocator,
1900  uint32_t frameIndex);
1901 
1904 typedef struct VmaStatInfo
1905 {
1907  uint32_t blockCount;
1913  VkDeviceSize usedBytes;
1915  VkDeviceSize unusedBytes;
1918 } VmaStatInfo;
1919 
1921 typedef struct VmaStats
1922 {
1923  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1924  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1926 } VmaStats;
1927 
1929 void vmaCalculateStats(
1930  VmaAllocator allocator,
1931  VmaStats* pStats);
1932 
1933 #define VMA_STATS_STRING_ENABLED 1
1934 
1935 #if VMA_STATS_STRING_ENABLED
1936 
1938 
1940 void vmaBuildStatsString(
1941  VmaAllocator allocator,
1942  char** ppStatsString,
1943  VkBool32 detailedMap);
1944 
1945 void vmaFreeStatsString(
1946  VmaAllocator allocator,
1947  char* pStatsString);
1948 
1949 #endif // #if VMA_STATS_STRING_ENABLED
1950 
1959 VK_DEFINE_HANDLE(VmaPool)
1960 
1961 typedef enum VmaMemoryUsage
1962 {
2011 } VmaMemoryUsage;
2012 
2027 
2082 
2098 
2108 
2115 
2119 
2121 {
2134  VkMemoryPropertyFlags requiredFlags;
2139  VkMemoryPropertyFlags preferredFlags;
2147  uint32_t memoryTypeBits;
2160  void* pUserData;
2162 
2179 VkResult vmaFindMemoryTypeIndex(
2180  VmaAllocator allocator,
2181  uint32_t memoryTypeBits,
2182  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2183  uint32_t* pMemoryTypeIndex);
2184 
2198  VmaAllocator allocator,
2199  const VkBufferCreateInfo* pBufferCreateInfo,
2200  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2201  uint32_t* pMemoryTypeIndex);
2202 
2216  VmaAllocator allocator,
2217  const VkImageCreateInfo* pImageCreateInfo,
2218  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2219  uint32_t* pMemoryTypeIndex);
2220 
2241 
2258 
2269 
2275 
2278 typedef VkFlags VmaPoolCreateFlags;
2279 
2282 typedef struct VmaPoolCreateInfo {
2297  VkDeviceSize blockSize;
2326 
2329 typedef struct VmaPoolStats {
2332  VkDeviceSize size;
2335  VkDeviceSize unusedSize;
2348  VkDeviceSize unusedRangeSizeMax;
2351  size_t blockCount;
2352 } VmaPoolStats;
2353 
2360 VkResult vmaCreatePool(
2361  VmaAllocator allocator,
2362  const VmaPoolCreateInfo* pCreateInfo,
2363  VmaPool* pPool);
2364 
2367 void vmaDestroyPool(
2368  VmaAllocator allocator,
2369  VmaPool pool);
2370 
2377 void vmaGetPoolStats(
2378  VmaAllocator allocator,
2379  VmaPool pool,
2380  VmaPoolStats* pPoolStats);
2381 
2389  VmaAllocator allocator,
2390  VmaPool pool,
2391  size_t* pLostAllocationCount);
2392 
2407 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2408 
2433 VK_DEFINE_HANDLE(VmaAllocation)
2434 
2435 
2437 typedef struct VmaAllocationInfo {
2442  uint32_t memoryType;
2451  VkDeviceMemory deviceMemory;
2456  VkDeviceSize offset;
2461  VkDeviceSize size;
2475  void* pUserData;
2477 
2488 VkResult vmaAllocateMemory(
2489  VmaAllocator allocator,
2490  const VkMemoryRequirements* pVkMemoryRequirements,
2491  const VmaAllocationCreateInfo* pCreateInfo,
2492  VmaAllocation* pAllocation,
2493  VmaAllocationInfo* pAllocationInfo);
2494 
2514 VkResult vmaAllocateMemoryPages(
2515  VmaAllocator allocator,
2516  const VkMemoryRequirements* pVkMemoryRequirements,
2517  const VmaAllocationCreateInfo* pCreateInfo,
2518  size_t allocationCount,
2519  VmaAllocation* pAllocations,
2520  VmaAllocationInfo* pAllocationInfo);
2521 
2529  VmaAllocator allocator,
2530  VkBuffer buffer,
2531  const VmaAllocationCreateInfo* pCreateInfo,
2532  VmaAllocation* pAllocation,
2533  VmaAllocationInfo* pAllocationInfo);
2534 
2536 VkResult vmaAllocateMemoryForImage(
2537  VmaAllocator allocator,
2538  VkImage image,
2539  const VmaAllocationCreateInfo* pCreateInfo,
2540  VmaAllocation* pAllocation,
2541  VmaAllocationInfo* pAllocationInfo);
2542 
2547 void vmaFreeMemory(
2548  VmaAllocator allocator,
2549  VmaAllocation allocation);
2550 
2561 void vmaFreeMemoryPages(
2562  VmaAllocator allocator,
2563  size_t allocationCount,
2564  VmaAllocation* pAllocations);
2565 
2586 VkResult vmaResizeAllocation(
2587  VmaAllocator allocator,
2588  VmaAllocation allocation,
2589  VkDeviceSize newSize);
2590 
2608  VmaAllocator allocator,
2609  VmaAllocation allocation,
2610  VmaAllocationInfo* pAllocationInfo);
2611 
2626 VkBool32 vmaTouchAllocation(
2627  VmaAllocator allocator,
2628  VmaAllocation allocation);
2629 
2644  VmaAllocator allocator,
2645  VmaAllocation allocation,
2646  void* pUserData);
2647 
2659  VmaAllocator allocator,
2660  VmaAllocation* pAllocation);
2661 
2696 VkResult vmaMapMemory(
2697  VmaAllocator allocator,
2698  VmaAllocation allocation,
2699  void** ppData);
2700 
2705 void vmaUnmapMemory(
2706  VmaAllocator allocator,
2707  VmaAllocation allocation);
2708 
2721 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2722 
2735 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2736 
2753 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
2754 
2761 VK_DEFINE_HANDLE(VmaDefragmentationContext)
2762 
2763 typedef enum VmaDefragmentationFlagBits {
2767 typedef VkFlags VmaDefragmentationFlags;
2768 
2773 typedef struct VmaDefragmentationInfo2 {
2797  uint32_t poolCount;
2818  VkDeviceSize maxCpuBytesToMove;
2828  VkDeviceSize maxGpuBytesToMove;
2842  VkCommandBuffer commandBuffer;
2844 
2849 typedef struct VmaDefragmentationInfo {
2854  VkDeviceSize maxBytesToMove;
2861 
2863 typedef struct VmaDefragmentationStats {
2865  VkDeviceSize bytesMoved;
2867  VkDeviceSize bytesFreed;
2873 
2900 VkResult vmaDefragmentationBegin(
2901  VmaAllocator allocator,
2902  const VmaDefragmentationInfo2* pInfo,
2903  VmaDefragmentationStats* pStats,
2904  VmaDefragmentationContext *pContext);
2905 
2911 VkResult vmaDefragmentationEnd(
2912  VmaAllocator allocator,
2913  VmaDefragmentationContext context);
2914 
2955 VkResult vmaDefragment(
2956  VmaAllocator allocator,
2957  VmaAllocation* pAllocations,
2958  size_t allocationCount,
2959  VkBool32* pAllocationsChanged,
2960  const VmaDefragmentationInfo *pDefragmentationInfo,
2961  VmaDefragmentationStats* pDefragmentationStats);
2962 
2975 VkResult vmaBindBufferMemory(
2976  VmaAllocator allocator,
2977  VmaAllocation allocation,
2978  VkBuffer buffer);
2979 
2992 VkResult vmaBindImageMemory(
2993  VmaAllocator allocator,
2994  VmaAllocation allocation,
2995  VkImage image);
2996 
3023 VkResult vmaCreateBuffer(
3024  VmaAllocator allocator,
3025  const VkBufferCreateInfo* pBufferCreateInfo,
3026  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3027  VkBuffer* pBuffer,
3028  VmaAllocation* pAllocation,
3029  VmaAllocationInfo* pAllocationInfo);
3030 
3042 void vmaDestroyBuffer(
3043  VmaAllocator allocator,
3044  VkBuffer buffer,
3045  VmaAllocation allocation);
3046 
3048 VkResult vmaCreateImage(
3049  VmaAllocator allocator,
3050  const VkImageCreateInfo* pImageCreateInfo,
3051  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3052  VkImage* pImage,
3053  VmaAllocation* pAllocation,
3054  VmaAllocationInfo* pAllocationInfo);
3055 
3067 void vmaDestroyImage(
3068  VmaAllocator allocator,
3069  VkImage image,
3070  VmaAllocation allocation);
3071 
3072 #ifdef __cplusplus
3073 }
3074 #endif
3075 
3076 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
3077 
3078 // For Visual Studio IntelliSense.
3079 #if defined(__cplusplus) && defined(__INTELLISENSE__)
3080 #define VMA_IMPLEMENTATION
3081 #endif
3082 
3083 #ifdef VMA_IMPLEMENTATION
3084 #undef VMA_IMPLEMENTATION
3085 
3086 #include <cstdint>
3087 #include <cstdlib>
3088 #include <cstring>
3089 
3090 /*******************************************************************************
3091 CONFIGURATION SECTION
3092 
3093 Define some of these macros before each #include of this header or change them
3094 here if you need other then default behavior depending on your environment.
3095 */
3096 
3097 /*
3098 Define this macro to 1 to make the library fetch pointers to Vulkan functions
3099 internally, like:
3100 
3101  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
3102 
3103 Define to 0 if you are going to provide you own pointers to Vulkan functions via
3104 VmaAllocatorCreateInfo::pVulkanFunctions.
3105 */
3106 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
3107 #define VMA_STATIC_VULKAN_FUNCTIONS 1
3108 #endif
3109 
3110 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
3111 //#define VMA_USE_STL_CONTAINERS 1
3112 
3113 /* Set this macro to 1 to make the library including and using STL containers:
3114 std::pair, std::vector, std::list, std::unordered_map.
3115 
3116 Set it to 0 or undefined to make the library using its own implementation of
3117 the containers.
3118 */
3119 #if VMA_USE_STL_CONTAINERS
3120  #define VMA_USE_STL_VECTOR 1
3121  #define VMA_USE_STL_UNORDERED_MAP 1
3122  #define VMA_USE_STL_LIST 1
3123 #endif
3124 
3125 #ifndef VMA_USE_STL_SHARED_MUTEX
3126  // Minimum Visual Studio 2015 Update 2
3127  #if defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918
3128  #define VMA_USE_STL_SHARED_MUTEX 1
3129  #endif
3130 #endif
3131 
3132 #if VMA_USE_STL_VECTOR
3133  #include <vector>
3134 #endif
3135 
3136 #if VMA_USE_STL_UNORDERED_MAP
3137  #include <unordered_map>
3138 #endif
3139 
3140 #if VMA_USE_STL_LIST
3141  #include <list>
3142 #endif
3143 
3144 /*
3145 Following headers are used in this CONFIGURATION section only, so feel free to
3146 remove them if not needed.
3147 */
3148 #include <cassert> // for assert
3149 #include <algorithm> // for min, max
3150 #include <mutex>
3151 #include <atomic> // for std::atomic
3152 
3153 #ifndef VMA_NULL
3154  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
3155  #define VMA_NULL nullptr
3156 #endif
3157 
3158 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
3159 #include <cstdlib>
3160 void *aligned_alloc(size_t alignment, size_t size)
3161 {
3162  // alignment must be >= sizeof(void*)
3163  if(alignment < sizeof(void*))
3164  {
3165  alignment = sizeof(void*);
3166  }
3167 
3168  return memalign(alignment, size);
3169 }
3170 #elif defined(__APPLE__) || defined(__ANDROID__)
3171 #include <cstdlib>
3172 void *aligned_alloc(size_t alignment, size_t size)
3173 {
3174  // alignment must be >= sizeof(void*)
3175  if(alignment < sizeof(void*))
3176  {
3177  alignment = sizeof(void*);
3178  }
3179 
3180  void *pointer;
3181  if(posix_memalign(&pointer, alignment, size) == 0)
3182  return pointer;
3183  return VMA_NULL;
3184 }
3185 #endif
3186 
3187 // If your compiler is not compatible with C++11 and definition of
3188 // aligned_alloc() function is missing, uncommeting following line may help:
3189 
3190 //#include <malloc.h>
3191 
3192 // Normal assert to check for programmer's errors, especially in Debug configuration.
3193 #ifndef VMA_ASSERT
3194  #ifdef _DEBUG
3195  #define VMA_ASSERT(expr) assert(expr)
3196  #else
3197  #define VMA_ASSERT(expr)
3198  #endif
3199 #endif
3200 
3201 // Assert that will be called very often, like inside data structures e.g. operator[].
3202 // Making it non-empty can make program slow.
3203 #ifndef VMA_HEAVY_ASSERT
3204  #ifdef _DEBUG
3205  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
3206  #else
3207  #define VMA_HEAVY_ASSERT(expr)
3208  #endif
3209 #endif
3210 
3211 #ifndef VMA_ALIGN_OF
3212  #define VMA_ALIGN_OF(type) (__alignof(type))
3213 #endif
3214 
3215 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
3216  #if defined(_WIN32)
3217  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
3218  #else
3219  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
3220  #endif
3221 #endif
3222 
3223 #ifndef VMA_SYSTEM_FREE
3224  #if defined(_WIN32)
3225  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
3226  #else
3227  #define VMA_SYSTEM_FREE(ptr) free(ptr)
3228  #endif
3229 #endif
3230 
3231 #ifndef VMA_MIN
3232  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
3233 #endif
3234 
3235 #ifndef VMA_MAX
3236  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
3237 #endif
3238 
3239 #ifndef VMA_SWAP
3240  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
3241 #endif
3242 
3243 #ifndef VMA_SORT
3244  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
3245 #endif
3246 
3247 #ifndef VMA_DEBUG_LOG
3248  #define VMA_DEBUG_LOG(format, ...)
3249  /*
3250  #define VMA_DEBUG_LOG(format, ...) do { \
3251  printf(format, __VA_ARGS__); \
3252  printf("\n"); \
3253  } while(false)
3254  */
3255 #endif
3256 
3257 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
3258 #if VMA_STATS_STRING_ENABLED
3259  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
3260  {
3261  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
3262  }
3263  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
3264  {
3265  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
3266  }
3267  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
3268  {
3269  snprintf(outStr, strLen, "%p", ptr);
3270  }
3271 #endif
3272 
3273 #ifndef VMA_MUTEX
3274  class VmaMutex
3275  {
3276  public:
3277  void Lock() { m_Mutex.lock(); }
3278  void Unlock() { m_Mutex.unlock(); }
3279  private:
3280  std::mutex m_Mutex;
3281  };
3282  #define VMA_MUTEX VmaMutex
3283 #endif
3284 
3285 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
3286 #ifndef VMA_RW_MUTEX
3287  #if VMA_USE_STL_SHARED_MUTEX
3288  // Use std::shared_mutex from C++17.
3289  #include <shared_mutex>
3290  class VmaRWMutex
3291  {
3292  public:
3293  void LockRead() { m_Mutex.lock_shared(); }
3294  void UnlockRead() { m_Mutex.unlock_shared(); }
3295  void LockWrite() { m_Mutex.lock(); }
3296  void UnlockWrite() { m_Mutex.unlock(); }
3297  private:
3298  std::shared_mutex m_Mutex;
3299  };
3300  #define VMA_RW_MUTEX VmaRWMutex
3301  #elif defined(_WIN32)
3302  // Use SRWLOCK from WinAPI.
3303  class VmaRWMutex
3304  {
3305  public:
3306  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
3307  void LockRead() { AcquireSRWLockShared(&m_Lock); }
3308  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
3309  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
3310  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
3311  private:
3312  SRWLOCK m_Lock;
3313  };
3314  #define VMA_RW_MUTEX VmaRWMutex
3315  #else
3316  // Less efficient fallback: Use normal mutex.
3317  class VmaRWMutex
3318  {
3319  public:
3320  void LockRead() { m_Mutex.Lock(); }
3321  void UnlockRead() { m_Mutex.Unlock(); }
3322  void LockWrite() { m_Mutex.Lock(); }
3323  void UnlockWrite() { m_Mutex.Unlock(); }
3324  private:
3325  VMA_MUTEX m_Mutex;
3326  };
3327  #define VMA_RW_MUTEX VmaRWMutex
3328  #endif // #if VMA_USE_STL_SHARED_MUTEX
3329 #endif // #ifndef VMA_RW_MUTEX
3330 
3331 /*
3332 If providing your own implementation, you need to implement a subset of std::atomic:
3333 
3334 - Constructor(uint32_t desired)
3335 - uint32_t load() const
3336 - void store(uint32_t desired)
3337 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
3338 */
3339 #ifndef VMA_ATOMIC_UINT32
3340  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
3341 #endif
3342 
3343 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
3344 
3348  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
3349 #endif
3350 
3351 #ifndef VMA_DEBUG_ALIGNMENT
3352 
3356  #define VMA_DEBUG_ALIGNMENT (1)
3357 #endif
3358 
3359 #ifndef VMA_DEBUG_MARGIN
3360 
3364  #define VMA_DEBUG_MARGIN (0)
3365 #endif
3366 
3367 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
3368 
3372  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
3373 #endif
3374 
3375 #ifndef VMA_DEBUG_DETECT_CORRUPTION
3376 
3381  #define VMA_DEBUG_DETECT_CORRUPTION (0)
3382 #endif
3383 
3384 #ifndef VMA_DEBUG_GLOBAL_MUTEX
3385 
3389  #define VMA_DEBUG_GLOBAL_MUTEX (0)
3390 #endif
3391 
3392 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
3393 
3397  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
3398 #endif
3399 
3400 #ifndef VMA_SMALL_HEAP_MAX_SIZE
3401  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
3403 #endif
3404 
3405 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
3406  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
3408 #endif
3409 
3410 #ifndef VMA_CLASS_NO_COPY
3411  #define VMA_CLASS_NO_COPY(className) \
3412  private: \
3413  className(const className&) = delete; \
3414  className& operator=(const className&) = delete;
3415 #endif
3416 
3417 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
3418 
3419 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
3420 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
3421 
3422 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
3423 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
3424 
3425 /*******************************************************************************
3426 END OF CONFIGURATION
3427 */
3428 
3429 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
3430 
3431 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
3432  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
3433 
3434 // Returns number of bits set to 1 in (v).
3435 static inline uint32_t VmaCountBitsSet(uint32_t v)
3436 {
3437  uint32_t c = v - ((v >> 1) & 0x55555555);
3438  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
3439  c = ((c >> 4) + c) & 0x0F0F0F0F;
3440  c = ((c >> 8) + c) & 0x00FF00FF;
3441  c = ((c >> 16) + c) & 0x0000FFFF;
3442  return c;
3443 }
3444 
3445 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
3446 // Use types like uint32_t, uint64_t as T.
3447 template <typename T>
3448 static inline T VmaAlignUp(T val, T align)
3449 {
3450  return (val + align - 1) / align * align;
3451 }
3452 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
3453 // Use types like uint32_t, uint64_t as T.
3454 template <typename T>
3455 static inline T VmaAlignDown(T val, T align)
3456 {
3457  return val / align * align;
3458 }
3459 
3460 // Division with mathematical rounding to nearest number.
3461 template <typename T>
3462 static inline T VmaRoundDiv(T x, T y)
3463 {
3464  return (x + (y / (T)2)) / y;
3465 }
3466 
3467 /*
3468 Returns true if given number is a power of two.
3469 T must be unsigned integer number or signed integer but always nonnegative.
3470 For 0 returns true.
3471 */
3472 template <typename T>
3473 inline bool VmaIsPow2(T x)
3474 {
3475  return (x & (x-1)) == 0;
3476 }
3477 
3478 // Returns smallest power of 2 greater or equal to v.
3479 static inline uint32_t VmaNextPow2(uint32_t v)
3480 {
3481  v--;
3482  v |= v >> 1;
3483  v |= v >> 2;
3484  v |= v >> 4;
3485  v |= v >> 8;
3486  v |= v >> 16;
3487  v++;
3488  return v;
3489 }
3490 static inline uint64_t VmaNextPow2(uint64_t v)
3491 {
3492  v--;
3493  v |= v >> 1;
3494  v |= v >> 2;
3495  v |= v >> 4;
3496  v |= v >> 8;
3497  v |= v >> 16;
3498  v |= v >> 32;
3499  v++;
3500  return v;
3501 }
3502 
3503 // Returns largest power of 2 less or equal to v.
3504 static inline uint32_t VmaPrevPow2(uint32_t v)
3505 {
3506  v |= v >> 1;
3507  v |= v >> 2;
3508  v |= v >> 4;
3509  v |= v >> 8;
3510  v |= v >> 16;
3511  v = v ^ (v >> 1);
3512  return v;
3513 }
3514 static inline uint64_t VmaPrevPow2(uint64_t v)
3515 {
3516  v |= v >> 1;
3517  v |= v >> 2;
3518  v |= v >> 4;
3519  v |= v >> 8;
3520  v |= v >> 16;
3521  v |= v >> 32;
3522  v = v ^ (v >> 1);
3523  return v;
3524 }
3525 
3526 static inline bool VmaStrIsEmpty(const char* pStr)
3527 {
3528  return pStr == VMA_NULL || *pStr == '\0';
3529 }
3530 
3531 static const char* VmaAlgorithmToStr(uint32_t algorithm)
3532 {
3533  switch(algorithm)
3534  {
3536  return "Linear";
3538  return "Buddy";
3539  case 0:
3540  return "Default";
3541  default:
3542  VMA_ASSERT(0);
3543  return "";
3544  }
3545 }
3546 
3547 #ifndef VMA_SORT
3548 
3549 template<typename Iterator, typename Compare>
3550 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
3551 {
3552  Iterator centerValue = end; --centerValue;
3553  Iterator insertIndex = beg;
3554  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
3555  {
3556  if(cmp(*memTypeIndex, *centerValue))
3557  {
3558  if(insertIndex != memTypeIndex)
3559  {
3560  VMA_SWAP(*memTypeIndex, *insertIndex);
3561  }
3562  ++insertIndex;
3563  }
3564  }
3565  if(insertIndex != centerValue)
3566  {
3567  VMA_SWAP(*insertIndex, *centerValue);
3568  }
3569  return insertIndex;
3570 }
3571 
3572 template<typename Iterator, typename Compare>
3573 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
3574 {
3575  if(beg < end)
3576  {
3577  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
3578  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
3579  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
3580  }
3581 }
3582 
3583 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
3584 
3585 #endif // #ifndef VMA_SORT
3586 
3587 /*
3588 Returns true if two memory blocks occupy overlapping pages.
3589 ResourceA must be in less memory offset than ResourceB.
3590 
3591 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
3592 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
3593 */
3594 static inline bool VmaBlocksOnSamePage(
3595  VkDeviceSize resourceAOffset,
3596  VkDeviceSize resourceASize,
3597  VkDeviceSize resourceBOffset,
3598  VkDeviceSize pageSize)
3599 {
3600  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
3601  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
3602  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
3603  VkDeviceSize resourceBStart = resourceBOffset;
3604  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
3605  return resourceAEndPage == resourceBStartPage;
3606 }
3607 
3608 enum VmaSuballocationType
3609 {
3610  VMA_SUBALLOCATION_TYPE_FREE = 0,
3611  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
3612  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
3613  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
3614  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
3615  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
3616  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
3617 };
3618 
3619 /*
3620 Returns true if given suballocation types could conflict and must respect
3621 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
3622 or linear image and another one is optimal image. If type is unknown, behave
3623 conservatively.
3624 */
3625 static inline bool VmaIsBufferImageGranularityConflict(
3626  VmaSuballocationType suballocType1,
3627  VmaSuballocationType suballocType2)
3628 {
3629  if(suballocType1 > suballocType2)
3630  {
3631  VMA_SWAP(suballocType1, suballocType2);
3632  }
3633 
3634  switch(suballocType1)
3635  {
3636  case VMA_SUBALLOCATION_TYPE_FREE:
3637  return false;
3638  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3639  return true;
3640  case VMA_SUBALLOCATION_TYPE_BUFFER:
3641  return
3642  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3643  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3644  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3645  return
3646  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3647  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3648  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3649  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3650  return
3651  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3652  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3653  return false;
3654  default:
3655  VMA_ASSERT(0);
3656  return true;
3657  }
3658 }
3659 
3660 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
3661 {
3662  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
3663  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3664  for(size_t i = 0; i < numberCount; ++i, ++pDst)
3665  {
3666  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3667  }
3668 }
3669 
3670 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
3671 {
3672  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
3673  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3674  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
3675  {
3676  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
3677  {
3678  return false;
3679  }
3680  }
3681  return true;
3682 }
3683 
3684 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
3685 struct VmaMutexLock
3686 {
3687  VMA_CLASS_NO_COPY(VmaMutexLock)
3688 public:
3689  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
3690  m_pMutex(useMutex ? &mutex : VMA_NULL)
3691  { if(m_pMutex) { m_pMutex->Lock(); } }
3692  ~VmaMutexLock()
3693  { if(m_pMutex) { m_pMutex->Unlock(); } }
3694 private:
3695  VMA_MUTEX* m_pMutex;
3696 };
3697 
3698 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
3699 struct VmaMutexLockRead
3700 {
3701  VMA_CLASS_NO_COPY(VmaMutexLockRead)
3702 public:
3703  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
3704  m_pMutex(useMutex ? &mutex : VMA_NULL)
3705  { if(m_pMutex) { m_pMutex->LockRead(); } }
3706  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
3707 private:
3708  VMA_RW_MUTEX* m_pMutex;
3709 };
3710 
3711 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
3712 struct VmaMutexLockWrite
3713 {
3714  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
3715 public:
3716  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
3717  m_pMutex(useMutex ? &mutex : VMA_NULL)
3718  { if(m_pMutex) { m_pMutex->LockWrite(); } }
3719  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
3720 private:
3721  VMA_RW_MUTEX* m_pMutex;
3722 };
3723 
3724 #if VMA_DEBUG_GLOBAL_MUTEX
3725  static VMA_MUTEX gDebugGlobalMutex;
3726  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
3727 #else
3728  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
3729 #endif
3730 
3731 // Minimum size of a free suballocation to register it in the free suballocation collection.
3732 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
3733 
3734 /*
3735 Performs binary search and returns iterator to first element that is greater or
3736 equal to (key), according to comparison (cmp).
3737 
3738 Cmp should return true if first argument is less than second argument.
3739 
3740 Returned value is the found element, if present in the collection or place where
3741 new element with value (key) should be inserted.
3742 */
3743 template <typename CmpLess, typename IterT, typename KeyT>
3744 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
3745 {
3746  size_t down = 0, up = (end - beg);
3747  while(down < up)
3748  {
3749  const size_t mid = (down + up) / 2;
3750  if(cmp(*(beg+mid), key))
3751  {
3752  down = mid + 1;
3753  }
3754  else
3755  {
3756  up = mid;
3757  }
3758  }
3759  return beg + down;
3760 }
3761 
3762 /*
3763 Returns true if all pointers in the array are not-null and unique.
3764 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
3765 T must be pointer type, e.g. VmaAllocation, VmaPool.
3766 */
3767 template<typename T>
3768 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
3769 {
3770  for(uint32_t i = 0; i < count; ++i)
3771  {
3772  const T iPtr = arr[i];
3773  if(iPtr == VMA_NULL)
3774  {
3775  return false;
3776  }
3777  for(uint32_t j = i + 1; j < count; ++j)
3778  {
3779  if(iPtr == arr[j])
3780  {
3781  return false;
3782  }
3783  }
3784  }
3785  return true;
3786 }
3787 
3789 // Memory allocation
3790 
3791 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
3792 {
3793  if((pAllocationCallbacks != VMA_NULL) &&
3794  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
3795  {
3796  return (*pAllocationCallbacks->pfnAllocation)(
3797  pAllocationCallbacks->pUserData,
3798  size,
3799  alignment,
3800  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3801  }
3802  else
3803  {
3804  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
3805  }
3806 }
3807 
3808 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
3809 {
3810  if((pAllocationCallbacks != VMA_NULL) &&
3811  (pAllocationCallbacks->pfnFree != VMA_NULL))
3812  {
3813  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
3814  }
3815  else
3816  {
3817  VMA_SYSTEM_FREE(ptr);
3818  }
3819 }
3820 
3821 template<typename T>
3822 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
3823 {
3824  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
3825 }
3826 
3827 template<typename T>
3828 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
3829 {
3830  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
3831 }
3832 
3833 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
3834 
3835 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
3836 
3837 template<typename T>
3838 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
3839 {
3840  ptr->~T();
3841  VmaFree(pAllocationCallbacks, ptr);
3842 }
3843 
3844 template<typename T>
3845 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
3846 {
3847  if(ptr != VMA_NULL)
3848  {
3849  for(size_t i = count; i--; )
3850  {
3851  ptr[i].~T();
3852  }
3853  VmaFree(pAllocationCallbacks, ptr);
3854  }
3855 }
3856 
3857 // STL-compatible allocator.
3858 template<typename T>
3859 class VmaStlAllocator
3860 {
3861 public:
3862  const VkAllocationCallbacks* const m_pCallbacks;
3863  typedef T value_type;
3864 
3865  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
3866  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
3867 
3868  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
3869  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
3870 
3871  template<typename U>
3872  bool operator==(const VmaStlAllocator<U>& rhs) const
3873  {
3874  return m_pCallbacks == rhs.m_pCallbacks;
3875  }
3876  template<typename U>
3877  bool operator!=(const VmaStlAllocator<U>& rhs) const
3878  {
3879  return m_pCallbacks != rhs.m_pCallbacks;
3880  }
3881 
3882  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
3883 };
3884 
3885 #if VMA_USE_STL_VECTOR
3886 
3887 #define VmaVector std::vector
3888 
3889 template<typename T, typename allocatorT>
3890 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
3891 {
3892  vec.insert(vec.begin() + index, item);
3893 }
3894 
3895 template<typename T, typename allocatorT>
3896 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
3897 {
3898  vec.erase(vec.begin() + index);
3899 }
3900 
3901 #else // #if VMA_USE_STL_VECTOR
3902 
3903 /* Class with interface compatible with subset of std::vector.
3904 T must be POD because constructors and destructors are not called and memcpy is
3905 used for these objects. */
3906 template<typename T, typename AllocatorT>
3907 class VmaVector
3908 {
3909 public:
3910  typedef T value_type;
3911 
3912  VmaVector(const AllocatorT& allocator) :
3913  m_Allocator(allocator),
3914  m_pArray(VMA_NULL),
3915  m_Count(0),
3916  m_Capacity(0)
3917  {
3918  }
3919 
3920  VmaVector(size_t count, const AllocatorT& allocator) :
3921  m_Allocator(allocator),
3922  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
3923  m_Count(count),
3924  m_Capacity(count)
3925  {
3926  }
3927 
3928  VmaVector(const VmaVector<T, AllocatorT>& src) :
3929  m_Allocator(src.m_Allocator),
3930  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
3931  m_Count(src.m_Count),
3932  m_Capacity(src.m_Count)
3933  {
3934  if(m_Count != 0)
3935  {
3936  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
3937  }
3938  }
3939 
3940  ~VmaVector()
3941  {
3942  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3943  }
3944 
3945  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
3946  {
3947  if(&rhs != this)
3948  {
3949  resize(rhs.m_Count);
3950  if(m_Count != 0)
3951  {
3952  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
3953  }
3954  }
3955  return *this;
3956  }
3957 
3958  bool empty() const { return m_Count == 0; }
3959  size_t size() const { return m_Count; }
3960  T* data() { return m_pArray; }
3961  const T* data() const { return m_pArray; }
3962 
3963  T& operator[](size_t index)
3964  {
3965  VMA_HEAVY_ASSERT(index < m_Count);
3966  return m_pArray[index];
3967  }
3968  const T& operator[](size_t index) const
3969  {
3970  VMA_HEAVY_ASSERT(index < m_Count);
3971  return m_pArray[index];
3972  }
3973 
3974  T& front()
3975  {
3976  VMA_HEAVY_ASSERT(m_Count > 0);
3977  return m_pArray[0];
3978  }
3979  const T& front() const
3980  {
3981  VMA_HEAVY_ASSERT(m_Count > 0);
3982  return m_pArray[0];
3983  }
3984  T& back()
3985  {
3986  VMA_HEAVY_ASSERT(m_Count > 0);
3987  return m_pArray[m_Count - 1];
3988  }
3989  const T& back() const
3990  {
3991  VMA_HEAVY_ASSERT(m_Count > 0);
3992  return m_pArray[m_Count - 1];
3993  }
3994 
3995  void reserve(size_t newCapacity, bool freeMemory = false)
3996  {
3997  newCapacity = VMA_MAX(newCapacity, m_Count);
3998 
3999  if((newCapacity < m_Capacity) && !freeMemory)
4000  {
4001  newCapacity = m_Capacity;
4002  }
4003 
4004  if(newCapacity != m_Capacity)
4005  {
4006  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
4007  if(m_Count != 0)
4008  {
4009  memcpy(newArray, m_pArray, m_Count * sizeof(T));
4010  }
4011  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4012  m_Capacity = newCapacity;
4013  m_pArray = newArray;
4014  }
4015  }
4016 
4017  void resize(size_t newCount, bool freeMemory = false)
4018  {
4019  size_t newCapacity = m_Capacity;
4020  if(newCount > m_Capacity)
4021  {
4022  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
4023  }
4024  else if(freeMemory)
4025  {
4026  newCapacity = newCount;
4027  }
4028 
4029  if(newCapacity != m_Capacity)
4030  {
4031  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
4032  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
4033  if(elementsToCopy != 0)
4034  {
4035  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
4036  }
4037  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4038  m_Capacity = newCapacity;
4039  m_pArray = newArray;
4040  }
4041 
4042  m_Count = newCount;
4043  }
4044 
4045  void clear(bool freeMemory = false)
4046  {
4047  resize(0, freeMemory);
4048  }
4049 
4050  void insert(size_t index, const T& src)
4051  {
4052  VMA_HEAVY_ASSERT(index <= m_Count);
4053  const size_t oldCount = size();
4054  resize(oldCount + 1);
4055  if(index < oldCount)
4056  {
4057  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
4058  }
4059  m_pArray[index] = src;
4060  }
4061 
4062  void remove(size_t index)
4063  {
4064  VMA_HEAVY_ASSERT(index < m_Count);
4065  const size_t oldCount = size();
4066  if(index < oldCount - 1)
4067  {
4068  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
4069  }
4070  resize(oldCount - 1);
4071  }
4072 
4073  void push_back(const T& src)
4074  {
4075  const size_t newIndex = size();
4076  resize(newIndex + 1);
4077  m_pArray[newIndex] = src;
4078  }
4079 
4080  void pop_back()
4081  {
4082  VMA_HEAVY_ASSERT(m_Count > 0);
4083  resize(size() - 1);
4084  }
4085 
4086  void push_front(const T& src)
4087  {
4088  insert(0, src);
4089  }
4090 
4091  void pop_front()
4092  {
4093  VMA_HEAVY_ASSERT(m_Count > 0);
4094  remove(0);
4095  }
4096 
4097  typedef T* iterator;
4098 
4099  iterator begin() { return m_pArray; }
4100  iterator end() { return m_pArray + m_Count; }
4101 
4102 private:
4103  AllocatorT m_Allocator;
4104  T* m_pArray;
4105  size_t m_Count;
4106  size_t m_Capacity;
4107 };
4108 
4109 template<typename T, typename allocatorT>
4110 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
4111 {
4112  vec.insert(index, item);
4113 }
4114 
4115 template<typename T, typename allocatorT>
4116 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
4117 {
4118  vec.remove(index);
4119 }
4120 
4121 #endif // #if VMA_USE_STL_VECTOR
4122 
4123 template<typename CmpLess, typename VectorT>
4124 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
4125 {
4126  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4127  vector.data(),
4128  vector.data() + vector.size(),
4129  value,
4130  CmpLess()) - vector.data();
4131  VmaVectorInsert(vector, indexToInsert, value);
4132  return indexToInsert;
4133 }
4134 
4135 template<typename CmpLess, typename VectorT>
4136 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
4137 {
4138  CmpLess comparator;
4139  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
4140  vector.begin(),
4141  vector.end(),
4142  value,
4143  comparator);
4144  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
4145  {
4146  size_t indexToRemove = it - vector.begin();
4147  VmaVectorRemove(vector, indexToRemove);
4148  return true;
4149  }
4150  return false;
4151 }
4152 
4153 template<typename CmpLess, typename IterT, typename KeyT>
4154 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
4155 {
4156  CmpLess comparator;
4157  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
4158  beg, end, value, comparator);
4159  if(it == end ||
4160  (!comparator(*it, value) && !comparator(value, *it)))
4161  {
4162  return it;
4163  }
4164  return end;
4165 }
4166 
4168 // class VmaPoolAllocator
4169 
4170 /*
4171 Allocator for objects of type T using a list of arrays (pools) to speed up
4172 allocation. Number of elements that can be allocated is not bounded because
4173 allocator can create multiple blocks.
4174 */
4175 template<typename T>
4176 class VmaPoolAllocator
4177 {
4178  VMA_CLASS_NO_COPY(VmaPoolAllocator)
4179 public:
4180  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
4181  ~VmaPoolAllocator();
4182  void Clear();
4183  T* Alloc();
4184  void Free(T* ptr);
4185 
4186 private:
4187  union Item
4188  {
4189  uint32_t NextFreeIndex;
4190  T Value;
4191  };
4192 
4193  struct ItemBlock
4194  {
4195  Item* pItems;
4196  uint32_t FirstFreeIndex;
4197  };
4198 
4199  const VkAllocationCallbacks* m_pAllocationCallbacks;
4200  size_t m_ItemsPerBlock;
4201  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
4202 
4203  ItemBlock& CreateNewBlock();
4204 };
4205 
4206 template<typename T>
4207 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
4208  m_pAllocationCallbacks(pAllocationCallbacks),
4209  m_ItemsPerBlock(itemsPerBlock),
4210  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
4211 {
4212  VMA_ASSERT(itemsPerBlock > 0);
4213 }
4214 
4215 template<typename T>
4216 VmaPoolAllocator<T>::~VmaPoolAllocator()
4217 {
4218  Clear();
4219 }
4220 
4221 template<typename T>
4222 void VmaPoolAllocator<T>::Clear()
4223 {
4224  for(size_t i = m_ItemBlocks.size(); i--; )
4225  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
4226  m_ItemBlocks.clear();
4227 }
4228 
4229 template<typename T>
4230 T* VmaPoolAllocator<T>::Alloc()
4231 {
4232  for(size_t i = m_ItemBlocks.size(); i--; )
4233  {
4234  ItemBlock& block = m_ItemBlocks[i];
4235  // This block has some free items: Use first one.
4236  if(block.FirstFreeIndex != UINT32_MAX)
4237  {
4238  Item* const pItem = &block.pItems[block.FirstFreeIndex];
4239  block.FirstFreeIndex = pItem->NextFreeIndex;
4240  return &pItem->Value;
4241  }
4242  }
4243 
4244  // No block has free item: Create new one and use it.
4245  ItemBlock& newBlock = CreateNewBlock();
4246  Item* const pItem = &newBlock.pItems[0];
4247  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
4248  return &pItem->Value;
4249 }
4250 
4251 template<typename T>
4252 void VmaPoolAllocator<T>::Free(T* ptr)
4253 {
4254  // Search all memory blocks to find ptr.
4255  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
4256  {
4257  ItemBlock& block = m_ItemBlocks[i];
4258 
4259  // Casting to union.
4260  Item* pItemPtr;
4261  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
4262 
4263  // Check if pItemPtr is in address range of this block.
4264  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
4265  {
4266  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
4267  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
4268  block.FirstFreeIndex = index;
4269  return;
4270  }
4271  }
4272  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
4273 }
4274 
4275 template<typename T>
4276 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
4277 {
4278  ItemBlock newBlock = {
4279  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
4280 
4281  m_ItemBlocks.push_back(newBlock);
4282 
4283  // Setup singly-linked list of all free items in this block.
4284  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
4285  newBlock.pItems[i].NextFreeIndex = i + 1;
4286  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
4287  return m_ItemBlocks.back();
4288 }
4289 
4291 // class VmaRawList, VmaList
4292 
4293 #if VMA_USE_STL_LIST
4294 
4295 #define VmaList std::list
4296 
4297 #else // #if VMA_USE_STL_LIST
4298 
4299 template<typename T>
4300 struct VmaListItem
4301 {
4302  VmaListItem* pPrev;
4303  VmaListItem* pNext;
4304  T Value;
4305 };
4306 
4307 // Doubly linked list.
4308 template<typename T>
4309 class VmaRawList
4310 {
4311  VMA_CLASS_NO_COPY(VmaRawList)
4312 public:
4313  typedef VmaListItem<T> ItemType;
4314 
4315  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
4316  ~VmaRawList();
4317  void Clear();
4318 
4319  size_t GetCount() const { return m_Count; }
4320  bool IsEmpty() const { return m_Count == 0; }
4321 
4322  ItemType* Front() { return m_pFront; }
4323  const ItemType* Front() const { return m_pFront; }
4324  ItemType* Back() { return m_pBack; }
4325  const ItemType* Back() const { return m_pBack; }
4326 
4327  ItemType* PushBack();
4328  ItemType* PushFront();
4329  ItemType* PushBack(const T& value);
4330  ItemType* PushFront(const T& value);
4331  void PopBack();
4332  void PopFront();
4333 
4334  // Item can be null - it means PushBack.
4335  ItemType* InsertBefore(ItemType* pItem);
4336  // Item can be null - it means PushFront.
4337  ItemType* InsertAfter(ItemType* pItem);
4338 
4339  ItemType* InsertBefore(ItemType* pItem, const T& value);
4340  ItemType* InsertAfter(ItemType* pItem, const T& value);
4341 
4342  void Remove(ItemType* pItem);
4343 
4344 private:
4345  const VkAllocationCallbacks* const m_pAllocationCallbacks;
4346  VmaPoolAllocator<ItemType> m_ItemAllocator;
4347  ItemType* m_pFront;
4348  ItemType* m_pBack;
4349  size_t m_Count;
4350 };
4351 
4352 template<typename T>
4353 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
4354  m_pAllocationCallbacks(pAllocationCallbacks),
4355  m_ItemAllocator(pAllocationCallbacks, 128),
4356  m_pFront(VMA_NULL),
4357  m_pBack(VMA_NULL),
4358  m_Count(0)
4359 {
4360 }
4361 
4362 template<typename T>
4363 VmaRawList<T>::~VmaRawList()
4364 {
4365  // Intentionally not calling Clear, because that would be unnecessary
4366  // computations to return all items to m_ItemAllocator as free.
4367 }
4368 
4369 template<typename T>
4370 void VmaRawList<T>::Clear()
4371 {
4372  if(IsEmpty() == false)
4373  {
4374  ItemType* pItem = m_pBack;
4375  while(pItem != VMA_NULL)
4376  {
4377  ItemType* const pPrevItem = pItem->pPrev;
4378  m_ItemAllocator.Free(pItem);
4379  pItem = pPrevItem;
4380  }
4381  m_pFront = VMA_NULL;
4382  m_pBack = VMA_NULL;
4383  m_Count = 0;
4384  }
4385 }
4386 
4387 template<typename T>
4388 VmaListItem<T>* VmaRawList<T>::PushBack()
4389 {
4390  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4391  pNewItem->pNext = VMA_NULL;
4392  if(IsEmpty())
4393  {
4394  pNewItem->pPrev = VMA_NULL;
4395  m_pFront = pNewItem;
4396  m_pBack = pNewItem;
4397  m_Count = 1;
4398  }
4399  else
4400  {
4401  pNewItem->pPrev = m_pBack;
4402  m_pBack->pNext = pNewItem;
4403  m_pBack = pNewItem;
4404  ++m_Count;
4405  }
4406  return pNewItem;
4407 }
4408 
4409 template<typename T>
4410 VmaListItem<T>* VmaRawList<T>::PushFront()
4411 {
4412  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4413  pNewItem->pPrev = VMA_NULL;
4414  if(IsEmpty())
4415  {
4416  pNewItem->pNext = VMA_NULL;
4417  m_pFront = pNewItem;
4418  m_pBack = pNewItem;
4419  m_Count = 1;
4420  }
4421  else
4422  {
4423  pNewItem->pNext = m_pFront;
4424  m_pFront->pPrev = pNewItem;
4425  m_pFront = pNewItem;
4426  ++m_Count;
4427  }
4428  return pNewItem;
4429 }
4430 
4431 template<typename T>
4432 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
4433 {
4434  ItemType* const pNewItem = PushBack();
4435  pNewItem->Value = value;
4436  return pNewItem;
4437 }
4438 
4439 template<typename T>
4440 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
4441 {
4442  ItemType* const pNewItem = PushFront();
4443  pNewItem->Value = value;
4444  return pNewItem;
4445 }
4446 
4447 template<typename T>
4448 void VmaRawList<T>::PopBack()
4449 {
4450  VMA_HEAVY_ASSERT(m_Count > 0);
4451  ItemType* const pBackItem = m_pBack;
4452  ItemType* const pPrevItem = pBackItem->pPrev;
4453  if(pPrevItem != VMA_NULL)
4454  {
4455  pPrevItem->pNext = VMA_NULL;
4456  }
4457  m_pBack = pPrevItem;
4458  m_ItemAllocator.Free(pBackItem);
4459  --m_Count;
4460 }
4461 
4462 template<typename T>
4463 void VmaRawList<T>::PopFront()
4464 {
4465  VMA_HEAVY_ASSERT(m_Count > 0);
4466  ItemType* const pFrontItem = m_pFront;
4467  ItemType* const pNextItem = pFrontItem->pNext;
4468  if(pNextItem != VMA_NULL)
4469  {
4470  pNextItem->pPrev = VMA_NULL;
4471  }
4472  m_pFront = pNextItem;
4473  m_ItemAllocator.Free(pFrontItem);
4474  --m_Count;
4475 }
4476 
4477 template<typename T>
4478 void VmaRawList<T>::Remove(ItemType* pItem)
4479 {
4480  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
4481  VMA_HEAVY_ASSERT(m_Count > 0);
4482 
4483  if(pItem->pPrev != VMA_NULL)
4484  {
4485  pItem->pPrev->pNext = pItem->pNext;
4486  }
4487  else
4488  {
4489  VMA_HEAVY_ASSERT(m_pFront == pItem);
4490  m_pFront = pItem->pNext;
4491  }
4492 
4493  if(pItem->pNext != VMA_NULL)
4494  {
4495  pItem->pNext->pPrev = pItem->pPrev;
4496  }
4497  else
4498  {
4499  VMA_HEAVY_ASSERT(m_pBack == pItem);
4500  m_pBack = pItem->pPrev;
4501  }
4502 
4503  m_ItemAllocator.Free(pItem);
4504  --m_Count;
4505 }
4506 
4507 template<typename T>
4508 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
4509 {
4510  if(pItem != VMA_NULL)
4511  {
4512  ItemType* const prevItem = pItem->pPrev;
4513  ItemType* const newItem = m_ItemAllocator.Alloc();
4514  newItem->pPrev = prevItem;
4515  newItem->pNext = pItem;
4516  pItem->pPrev = newItem;
4517  if(prevItem != VMA_NULL)
4518  {
4519  prevItem->pNext = newItem;
4520  }
4521  else
4522  {
4523  VMA_HEAVY_ASSERT(m_pFront == pItem);
4524  m_pFront = newItem;
4525  }
4526  ++m_Count;
4527  return newItem;
4528  }
4529  else
4530  return PushBack();
4531 }
4532 
4533 template<typename T>
4534 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
4535 {
4536  if(pItem != VMA_NULL)
4537  {
4538  ItemType* const nextItem = pItem->pNext;
4539  ItemType* const newItem = m_ItemAllocator.Alloc();
4540  newItem->pNext = nextItem;
4541  newItem->pPrev = pItem;
4542  pItem->pNext = newItem;
4543  if(nextItem != VMA_NULL)
4544  {
4545  nextItem->pPrev = newItem;
4546  }
4547  else
4548  {
4549  VMA_HEAVY_ASSERT(m_pBack == pItem);
4550  m_pBack = newItem;
4551  }
4552  ++m_Count;
4553  return newItem;
4554  }
4555  else
4556  return PushFront();
4557 }
4558 
4559 template<typename T>
4560 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
4561 {
4562  ItemType* const newItem = InsertBefore(pItem);
4563  newItem->Value = value;
4564  return newItem;
4565 }
4566 
4567 template<typename T>
4568 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
4569 {
4570  ItemType* const newItem = InsertAfter(pItem);
4571  newItem->Value = value;
4572  return newItem;
4573 }
4574 
4575 template<typename T, typename AllocatorT>
4576 class VmaList
4577 {
4578  VMA_CLASS_NO_COPY(VmaList)
4579 public:
4580  class iterator
4581  {
4582  public:
4583  iterator() :
4584  m_pList(VMA_NULL),
4585  m_pItem(VMA_NULL)
4586  {
4587  }
4588 
4589  T& operator*() const
4590  {
4591  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4592  return m_pItem->Value;
4593  }
4594  T* operator->() const
4595  {
4596  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4597  return &m_pItem->Value;
4598  }
4599 
4600  iterator& operator++()
4601  {
4602  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4603  m_pItem = m_pItem->pNext;
4604  return *this;
4605  }
4606  iterator& operator--()
4607  {
4608  if(m_pItem != VMA_NULL)
4609  {
4610  m_pItem = m_pItem->pPrev;
4611  }
4612  else
4613  {
4614  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4615  m_pItem = m_pList->Back();
4616  }
4617  return *this;
4618  }
4619 
4620  iterator operator++(int)
4621  {
4622  iterator result = *this;
4623  ++*this;
4624  return result;
4625  }
4626  iterator operator--(int)
4627  {
4628  iterator result = *this;
4629  --*this;
4630  return result;
4631  }
4632 
4633  bool operator==(const iterator& rhs) const
4634  {
4635  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4636  return m_pItem == rhs.m_pItem;
4637  }
4638  bool operator!=(const iterator& rhs) const
4639  {
4640  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4641  return m_pItem != rhs.m_pItem;
4642  }
4643 
4644  private:
4645  VmaRawList<T>* m_pList;
4646  VmaListItem<T>* m_pItem;
4647 
4648  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
4649  m_pList(pList),
4650  m_pItem(pItem)
4651  {
4652  }
4653 
4654  friend class VmaList<T, AllocatorT>;
4655  };
4656 
4657  class const_iterator
4658  {
4659  public:
4660  const_iterator() :
4661  m_pList(VMA_NULL),
4662  m_pItem(VMA_NULL)
4663  {
4664  }
4665 
4666  const_iterator(const iterator& src) :
4667  m_pList(src.m_pList),
4668  m_pItem(src.m_pItem)
4669  {
4670  }
4671 
4672  const T& operator*() const
4673  {
4674  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4675  return m_pItem->Value;
4676  }
4677  const T* operator->() const
4678  {
4679  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4680  return &m_pItem->Value;
4681  }
4682 
4683  const_iterator& operator++()
4684  {
4685  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4686  m_pItem = m_pItem->pNext;
4687  return *this;
4688  }
4689  const_iterator& operator--()
4690  {
4691  if(m_pItem != VMA_NULL)
4692  {
4693  m_pItem = m_pItem->pPrev;
4694  }
4695  else
4696  {
4697  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4698  m_pItem = m_pList->Back();
4699  }
4700  return *this;
4701  }
4702 
4703  const_iterator operator++(int)
4704  {
4705  const_iterator result = *this;
4706  ++*this;
4707  return result;
4708  }
4709  const_iterator operator--(int)
4710  {
4711  const_iterator result = *this;
4712  --*this;
4713  return result;
4714  }
4715 
4716  bool operator==(const const_iterator& rhs) const
4717  {
4718  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4719  return m_pItem == rhs.m_pItem;
4720  }
4721  bool operator!=(const const_iterator& rhs) const
4722  {
4723  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4724  return m_pItem != rhs.m_pItem;
4725  }
4726 
4727  private:
4728  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
4729  m_pList(pList),
4730  m_pItem(pItem)
4731  {
4732  }
4733 
4734  const VmaRawList<T>* m_pList;
4735  const VmaListItem<T>* m_pItem;
4736 
4737  friend class VmaList<T, AllocatorT>;
4738  };
4739 
4740  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
4741 
4742  bool empty() const { return m_RawList.IsEmpty(); }
4743  size_t size() const { return m_RawList.GetCount(); }
4744 
4745  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
4746  iterator end() { return iterator(&m_RawList, VMA_NULL); }
4747 
4748  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
4749  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
4750 
4751  void clear() { m_RawList.Clear(); }
4752  void push_back(const T& value) { m_RawList.PushBack(value); }
4753  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
4754  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
4755 
4756 private:
4757  VmaRawList<T> m_RawList;
4758 };
4759 
4760 #endif // #if VMA_USE_STL_LIST
4761 
4763 // class VmaMap
4764 
4765 // Unused in this version.
4766 #if 0
4767 
4768 #if VMA_USE_STL_UNORDERED_MAP
4769 
4770 #define VmaPair std::pair
4771 
4772 #define VMA_MAP_TYPE(KeyT, ValueT) \
4773  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
4774 
4775 #else // #if VMA_USE_STL_UNORDERED_MAP
4776 
4777 template<typename T1, typename T2>
4778 struct VmaPair
4779 {
4780  T1 first;
4781  T2 second;
4782 
4783  VmaPair() : first(), second() { }
4784  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
4785 };
4786 
4787 /* Class compatible with subset of interface of std::unordered_map.
4788 KeyT, ValueT must be POD because they will be stored in VmaVector.
4789 */
4790 template<typename KeyT, typename ValueT>
4791 class VmaMap
4792 {
4793 public:
4794  typedef VmaPair<KeyT, ValueT> PairType;
4795  typedef PairType* iterator;
4796 
4797  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
4798 
4799  iterator begin() { return m_Vector.begin(); }
4800  iterator end() { return m_Vector.end(); }
4801 
4802  void insert(const PairType& pair);
4803  iterator find(const KeyT& key);
4804  void erase(iterator it);
4805 
4806 private:
4807  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
4808 };
4809 
4810 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
4811 
4812 template<typename FirstT, typename SecondT>
4813 struct VmaPairFirstLess
4814 {
4815  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
4816  {
4817  return lhs.first < rhs.first;
4818  }
4819  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
4820  {
4821  return lhs.first < rhsFirst;
4822  }
4823 };
4824 
4825 template<typename KeyT, typename ValueT>
4826 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
4827 {
4828  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4829  m_Vector.data(),
4830  m_Vector.data() + m_Vector.size(),
4831  pair,
4832  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
4833  VmaVectorInsert(m_Vector, indexToInsert, pair);
4834 }
4835 
4836 template<typename KeyT, typename ValueT>
4837 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
4838 {
4839  PairType* it = VmaBinaryFindFirstNotLess(
4840  m_Vector.data(),
4841  m_Vector.data() + m_Vector.size(),
4842  key,
4843  VmaPairFirstLess<KeyT, ValueT>());
4844  if((it != m_Vector.end()) && (it->first == key))
4845  {
4846  return it;
4847  }
4848  else
4849  {
4850  return m_Vector.end();
4851  }
4852 }
4853 
4854 template<typename KeyT, typename ValueT>
4855 void VmaMap<KeyT, ValueT>::erase(iterator it)
4856 {
4857  VmaVectorRemove(m_Vector, it - m_Vector.begin());
4858 }
4859 
4860 #endif // #if VMA_USE_STL_UNORDERED_MAP
4861 
4862 #endif // #if 0
4863 
4865 
4866 class VmaDeviceMemoryBlock;
4867 
4868 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
4869 
4870 struct VmaAllocation_T
4871 {
4872  VMA_CLASS_NO_COPY(VmaAllocation_T)
4873 private:
4874  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
4875 
4876  enum FLAGS
4877  {
4878  FLAG_USER_DATA_STRING = 0x01,
4879  };
4880 
4881 public:
4882  enum ALLOCATION_TYPE
4883  {
4884  ALLOCATION_TYPE_NONE,
4885  ALLOCATION_TYPE_BLOCK,
4886  ALLOCATION_TYPE_DEDICATED,
4887  };
4888 
4889  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
4890  m_Alignment(1),
4891  m_Size(0),
4892  m_pUserData(VMA_NULL),
4893  m_LastUseFrameIndex(currentFrameIndex),
4894  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
4895  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
4896  m_MapCount(0),
4897  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
4898  {
4899 #if VMA_STATS_STRING_ENABLED
4900  m_CreationFrameIndex = currentFrameIndex;
4901  m_BufferImageUsage = 0;
4902 #endif
4903  }
4904 
4905  ~VmaAllocation_T()
4906  {
4907  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
4908 
4909  // Check if owned string was freed.
4910  VMA_ASSERT(m_pUserData == VMA_NULL);
4911  }
4912 
4913  void InitBlockAllocation(
4914  VmaPool hPool,
4915  VmaDeviceMemoryBlock* block,
4916  VkDeviceSize offset,
4917  VkDeviceSize alignment,
4918  VkDeviceSize size,
4919  VmaSuballocationType suballocationType,
4920  bool mapped,
4921  bool canBecomeLost)
4922  {
4923  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4924  VMA_ASSERT(block != VMA_NULL);
4925  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4926  m_Alignment = alignment;
4927  m_Size = size;
4928  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4929  m_SuballocationType = (uint8_t)suballocationType;
4930  m_BlockAllocation.m_hPool = hPool;
4931  m_BlockAllocation.m_Block = block;
4932  m_BlockAllocation.m_Offset = offset;
4933  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
4934  }
4935 
4936  void InitLost()
4937  {
4938  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4939  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
4940  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4941  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
4942  m_BlockAllocation.m_Block = VMA_NULL;
4943  m_BlockAllocation.m_Offset = 0;
4944  m_BlockAllocation.m_CanBecomeLost = true;
4945  }
4946 
4947  void ChangeBlockAllocation(
4948  VmaAllocator hAllocator,
4949  VmaDeviceMemoryBlock* block,
4950  VkDeviceSize offset);
4951 
4952  void ChangeSize(VkDeviceSize newSize);
4953  void ChangeOffset(VkDeviceSize newOffset);
4954 
4955  // pMappedData not null means allocation is created with MAPPED flag.
4956  void InitDedicatedAllocation(
4957  uint32_t memoryTypeIndex,
4958  VkDeviceMemory hMemory,
4959  VmaSuballocationType suballocationType,
4960  void* pMappedData,
4961  VkDeviceSize size)
4962  {
4963  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4964  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
4965  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
4966  m_Alignment = 0;
4967  m_Size = size;
4968  m_SuballocationType = (uint8_t)suballocationType;
4969  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4970  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
4971  m_DedicatedAllocation.m_hMemory = hMemory;
4972  m_DedicatedAllocation.m_pMappedData = pMappedData;
4973  }
4974 
4975  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
4976  VkDeviceSize GetAlignment() const { return m_Alignment; }
4977  VkDeviceSize GetSize() const { return m_Size; }
4978  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
4979  void* GetUserData() const { return m_pUserData; }
4980  void SetUserData(VmaAllocator hAllocator, void* pUserData);
4981  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
4982 
4983  VmaDeviceMemoryBlock* GetBlock() const
4984  {
4985  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4986  return m_BlockAllocation.m_Block;
4987  }
4988  VkDeviceSize GetOffset() const;
4989  VkDeviceMemory GetMemory() const;
4990  uint32_t GetMemoryTypeIndex() const;
4991  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
4992  void* GetMappedData() const;
4993  bool CanBecomeLost() const;
4994  VmaPool GetPool() const;
4995 
4996  uint32_t GetLastUseFrameIndex() const
4997  {
4998  return m_LastUseFrameIndex.load();
4999  }
5000  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
5001  {
5002  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
5003  }
5004  /*
5005  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
5006  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
5007  - Else, returns false.
5008 
5009  If hAllocation is already lost, assert - you should not call it then.
5010  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
5011  */
5012  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5013 
5014  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
5015  {
5016  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
5017  outInfo.blockCount = 1;
5018  outInfo.allocationCount = 1;
5019  outInfo.unusedRangeCount = 0;
5020  outInfo.usedBytes = m_Size;
5021  outInfo.unusedBytes = 0;
5022  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
5023  outInfo.unusedRangeSizeMin = UINT64_MAX;
5024  outInfo.unusedRangeSizeMax = 0;
5025  }
5026 
5027  void BlockAllocMap();
5028  void BlockAllocUnmap();
5029  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
5030  void DedicatedAllocUnmap(VmaAllocator hAllocator);
5031 
5032 #if VMA_STATS_STRING_ENABLED
5033  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
5034  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
5035 
5036  void InitBufferImageUsage(uint32_t bufferImageUsage)
5037  {
5038  VMA_ASSERT(m_BufferImageUsage == 0);
5039  m_BufferImageUsage = bufferImageUsage;
5040  }
5041 
5042  void PrintParameters(class VmaJsonWriter& json) const;
5043 #endif
5044 
5045 private:
5046  VkDeviceSize m_Alignment;
5047  VkDeviceSize m_Size;
5048  void* m_pUserData;
5049  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
5050  uint8_t m_Type; // ALLOCATION_TYPE
5051  uint8_t m_SuballocationType; // VmaSuballocationType
5052  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
5053  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
5054  uint8_t m_MapCount;
5055  uint8_t m_Flags; // enum FLAGS
5056 
5057  // Allocation out of VmaDeviceMemoryBlock.
5058  struct BlockAllocation
5059  {
5060  VmaPool m_hPool; // Null if belongs to general memory.
5061  VmaDeviceMemoryBlock* m_Block;
5062  VkDeviceSize m_Offset;
5063  bool m_CanBecomeLost;
5064  };
5065 
5066  // Allocation for an object that has its own private VkDeviceMemory.
5067  struct DedicatedAllocation
5068  {
5069  uint32_t m_MemoryTypeIndex;
5070  VkDeviceMemory m_hMemory;
5071  void* m_pMappedData; // Not null means memory is mapped.
5072  };
5073 
5074  union
5075  {
5076  // Allocation out of VmaDeviceMemoryBlock.
5077  BlockAllocation m_BlockAllocation;
5078  // Allocation for an object that has its own private VkDeviceMemory.
5079  DedicatedAllocation m_DedicatedAllocation;
5080  };
5081 
5082 #if VMA_STATS_STRING_ENABLED
5083  uint32_t m_CreationFrameIndex;
5084  uint32_t m_BufferImageUsage; // 0 if unknown.
5085 #endif
5086 
5087  void FreeUserDataString(VmaAllocator hAllocator);
5088 };
5089 
5090 /*
5091 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
5092 allocated memory block or free.
5093 */
5094 struct VmaSuballocation
5095 {
5096  VkDeviceSize offset;
5097  VkDeviceSize size;
5098  VmaAllocation hAllocation;
5099  VmaSuballocationType type;
5100 };
5101 
5102 // Comparator for offsets.
5103 struct VmaSuballocationOffsetLess
5104 {
5105  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5106  {
5107  return lhs.offset < rhs.offset;
5108  }
5109 };
5110 struct VmaSuballocationOffsetGreater
5111 {
5112  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5113  {
5114  return lhs.offset > rhs.offset;
5115  }
5116 };
5117 
5118 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
5119 
5120 // Cost of one additional allocation lost, as equivalent in bytes.
5121 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
5122 
5123 /*
5124 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
5125 
5126 If canMakeOtherLost was false:
5127 - item points to a FREE suballocation.
5128 - itemsToMakeLostCount is 0.
5129 
5130 If canMakeOtherLost was true:
5131 - item points to first of sequence of suballocations, which are either FREE,
5132  or point to VmaAllocations that can become lost.
5133 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
5134  the requested allocation to succeed.
5135 */
5136 struct VmaAllocationRequest
5137 {
5138  VkDeviceSize offset;
5139  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
5140  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
5141  VmaSuballocationList::iterator item;
5142  size_t itemsToMakeLostCount;
5143  void* customData;
5144 
5145  VkDeviceSize CalcCost() const
5146  {
5147  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
5148  }
5149 };
5150 
5151 /*
5152 Data structure used for bookkeeping of allocations and unused ranges of memory
5153 in a single VkDeviceMemory block.
5154 */
5155 class VmaBlockMetadata
5156 {
5157 public:
5158  VmaBlockMetadata(VmaAllocator hAllocator);
5159  virtual ~VmaBlockMetadata() { }
5160  virtual void Init(VkDeviceSize size) { m_Size = size; }
5161 
5162  // Validates all data structures inside this object. If not valid, returns false.
5163  virtual bool Validate() const = 0;
5164  VkDeviceSize GetSize() const { return m_Size; }
5165  virtual size_t GetAllocationCount() const = 0;
5166  virtual VkDeviceSize GetSumFreeSize() const = 0;
5167  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
5168  // Returns true if this block is empty - contains only single free suballocation.
5169  virtual bool IsEmpty() const = 0;
5170 
5171  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
5172  // Shouldn't modify blockCount.
5173  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
5174 
5175 #if VMA_STATS_STRING_ENABLED
5176  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
5177 #endif
5178 
5179  // Tries to find a place for suballocation with given parameters inside this block.
5180  // If succeeded, fills pAllocationRequest and returns true.
5181  // If failed, returns false.
5182  virtual bool CreateAllocationRequest(
5183  uint32_t currentFrameIndex,
5184  uint32_t frameInUseCount,
5185  VkDeviceSize bufferImageGranularity,
5186  VkDeviceSize allocSize,
5187  VkDeviceSize allocAlignment,
5188  bool upperAddress,
5189  VmaSuballocationType allocType,
5190  bool canMakeOtherLost,
5191  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
5192  uint32_t strategy,
5193  VmaAllocationRequest* pAllocationRequest) = 0;
5194 
5195  virtual bool MakeRequestedAllocationsLost(
5196  uint32_t currentFrameIndex,
5197  uint32_t frameInUseCount,
5198  VmaAllocationRequest* pAllocationRequest) = 0;
5199 
5200  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
5201 
5202  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
5203 
5204  // Makes actual allocation based on request. Request must already be checked and valid.
5205  virtual void Alloc(
5206  const VmaAllocationRequest& request,
5207  VmaSuballocationType type,
5208  VkDeviceSize allocSize,
5209  bool upperAddress,
5210  VmaAllocation hAllocation) = 0;
5211 
5212  // Frees suballocation assigned to given memory region.
5213  virtual void Free(const VmaAllocation allocation) = 0;
5214  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
5215 
5216  // Tries to resize (grow or shrink) space for given allocation, in place.
5217  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
5218 
5219 protected:
5220  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
5221 
5222 #if VMA_STATS_STRING_ENABLED
5223  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
5224  VkDeviceSize unusedBytes,
5225  size_t allocationCount,
5226  size_t unusedRangeCount) const;
5227  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
5228  VkDeviceSize offset,
5229  VmaAllocation hAllocation) const;
5230  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
5231  VkDeviceSize offset,
5232  VkDeviceSize size) const;
5233  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
5234 #endif
5235 
5236 private:
5237  VkDeviceSize m_Size;
5238  const VkAllocationCallbacks* m_pAllocationCallbacks;
5239 };
5240 
5241 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
5242  VMA_ASSERT(0 && "Validation failed: " #cond); \
5243  return false; \
5244  } } while(false)
5245 
5246 class VmaBlockMetadata_Generic : public VmaBlockMetadata
5247 {
5248  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
5249 public:
5250  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
5251  virtual ~VmaBlockMetadata_Generic();
5252  virtual void Init(VkDeviceSize size);
5253 
5254  virtual bool Validate() const;
5255  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
5256  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5257  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5258  virtual bool IsEmpty() const;
5259 
5260  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5261  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5262 
5263 #if VMA_STATS_STRING_ENABLED
5264  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5265 #endif
5266 
5267  virtual bool CreateAllocationRequest(
5268  uint32_t currentFrameIndex,
5269  uint32_t frameInUseCount,
5270  VkDeviceSize bufferImageGranularity,
5271  VkDeviceSize allocSize,
5272  VkDeviceSize allocAlignment,
5273  bool upperAddress,
5274  VmaSuballocationType allocType,
5275  bool canMakeOtherLost,
5276  uint32_t strategy,
5277  VmaAllocationRequest* pAllocationRequest);
5278 
5279  virtual bool MakeRequestedAllocationsLost(
5280  uint32_t currentFrameIndex,
5281  uint32_t frameInUseCount,
5282  VmaAllocationRequest* pAllocationRequest);
5283 
5284  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5285 
5286  virtual VkResult CheckCorruption(const void* pBlockData);
5287 
5288  virtual void Alloc(
5289  const VmaAllocationRequest& request,
5290  VmaSuballocationType type,
5291  VkDeviceSize allocSize,
5292  bool upperAddress,
5293  VmaAllocation hAllocation);
5294 
5295  virtual void Free(const VmaAllocation allocation);
5296  virtual void FreeAtOffset(VkDeviceSize offset);
5297 
5298  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
5299 
5301  // For defragmentation
5302 
5303  bool IsBufferImageGranularityConflictPossible(
5304  VkDeviceSize bufferImageGranularity,
5305  VmaSuballocationType& inOutPrevSuballocType) const;
5306 
5307 private:
5308  friend class VmaDefragmentationAlgorithm_Generic;
5309  friend class VmaDefragmentationAlgorithm_Fast;
5310 
5311  uint32_t m_FreeCount;
5312  VkDeviceSize m_SumFreeSize;
5313  VmaSuballocationList m_Suballocations;
5314  // Suballocations that are free and have size greater than certain threshold.
5315  // Sorted by size, ascending.
5316  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
5317 
5318  bool ValidateFreeSuballocationList() const;
5319 
5320  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
5321  // If yes, fills pOffset and returns true. If no, returns false.
5322  bool CheckAllocation(
5323  uint32_t currentFrameIndex,
5324  uint32_t frameInUseCount,
5325  VkDeviceSize bufferImageGranularity,
5326  VkDeviceSize allocSize,
5327  VkDeviceSize allocAlignment,
5328  VmaSuballocationType allocType,
5329  VmaSuballocationList::const_iterator suballocItem,
5330  bool canMakeOtherLost,
5331  VkDeviceSize* pOffset,
5332  size_t* itemsToMakeLostCount,
5333  VkDeviceSize* pSumFreeSize,
5334  VkDeviceSize* pSumItemSize) const;
5335  // Given free suballocation, it merges it with following one, which must also be free.
5336  void MergeFreeWithNext(VmaSuballocationList::iterator item);
5337  // Releases given suballocation, making it free.
5338  // Merges it with adjacent free suballocations if applicable.
5339  // Returns iterator to new free suballocation at this place.
5340  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
5341  // Given free suballocation, it inserts it into sorted list of
5342  // m_FreeSuballocationsBySize if it's suitable.
5343  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
5344  // Given free suballocation, it removes it from sorted list of
5345  // m_FreeSuballocationsBySize if it's suitable.
5346  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
5347 };
5348 
5349 /*
5350 Allocations and their references in internal data structure look like this:
5351 
5352 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
5353 
5354  0 +-------+
5355  | |
5356  | |
5357  | |
5358  +-------+
5359  | Alloc | 1st[m_1stNullItemsBeginCount]
5360  +-------+
5361  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5362  +-------+
5363  | ... |
5364  +-------+
5365  | Alloc | 1st[1st.size() - 1]
5366  +-------+
5367  | |
5368  | |
5369  | |
5370 GetSize() +-------+
5371 
5372 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
5373 
5374  0 +-------+
5375  | Alloc | 2nd[0]
5376  +-------+
5377  | Alloc | 2nd[1]
5378  +-------+
5379  | ... |
5380  +-------+
5381  | Alloc | 2nd[2nd.size() - 1]
5382  +-------+
5383  | |
5384  | |
5385  | |
5386  +-------+
5387  | Alloc | 1st[m_1stNullItemsBeginCount]
5388  +-------+
5389  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5390  +-------+
5391  | ... |
5392  +-------+
5393  | Alloc | 1st[1st.size() - 1]
5394  +-------+
5395  | |
5396 GetSize() +-------+
5397 
5398 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
5399 
5400  0 +-------+
5401  | |
5402  | |
5403  | |
5404  +-------+
5405  | Alloc | 1st[m_1stNullItemsBeginCount]
5406  +-------+
5407  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5408  +-------+
5409  | ... |
5410  +-------+
5411  | Alloc | 1st[1st.size() - 1]
5412  +-------+
5413  | |
5414  | |
5415  | |
5416  +-------+
5417  | Alloc | 2nd[2nd.size() - 1]
5418  +-------+
5419  | ... |
5420  +-------+
5421  | Alloc | 2nd[1]
5422  +-------+
5423  | Alloc | 2nd[0]
5424 GetSize() +-------+
5425 
5426 */
5427 class VmaBlockMetadata_Linear : public VmaBlockMetadata
5428 {
5429  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
5430 public:
5431  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
5432  virtual ~VmaBlockMetadata_Linear();
5433  virtual void Init(VkDeviceSize size);
5434 
5435  virtual bool Validate() const;
5436  virtual size_t GetAllocationCount() const;
5437  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5438  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5439  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
5440 
5441  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5442  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5443 
5444 #if VMA_STATS_STRING_ENABLED
5445  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5446 #endif
5447 
5448  virtual bool CreateAllocationRequest(
5449  uint32_t currentFrameIndex,
5450  uint32_t frameInUseCount,
5451  VkDeviceSize bufferImageGranularity,
5452  VkDeviceSize allocSize,
5453  VkDeviceSize allocAlignment,
5454  bool upperAddress,
5455  VmaSuballocationType allocType,
5456  bool canMakeOtherLost,
5457  uint32_t strategy,
5458  VmaAllocationRequest* pAllocationRequest);
5459 
5460  virtual bool MakeRequestedAllocationsLost(
5461  uint32_t currentFrameIndex,
5462  uint32_t frameInUseCount,
5463  VmaAllocationRequest* pAllocationRequest);
5464 
5465  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5466 
5467  virtual VkResult CheckCorruption(const void* pBlockData);
5468 
5469  virtual void Alloc(
5470  const VmaAllocationRequest& request,
5471  VmaSuballocationType type,
5472  VkDeviceSize allocSize,
5473  bool upperAddress,
5474  VmaAllocation hAllocation);
5475 
5476  virtual void Free(const VmaAllocation allocation);
5477  virtual void FreeAtOffset(VkDeviceSize offset);
5478 
5479 private:
5480  /*
5481  There are two suballocation vectors, used in ping-pong way.
5482  The one with index m_1stVectorIndex is called 1st.
5483  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
5484  2nd can be non-empty only when 1st is not empty.
5485  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
5486  */
5487  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
5488 
5489  enum SECOND_VECTOR_MODE
5490  {
5491  SECOND_VECTOR_EMPTY,
5492  /*
5493  Suballocations in 2nd vector are created later than the ones in 1st, but they
5494  all have smaller offset.
5495  */
5496  SECOND_VECTOR_RING_BUFFER,
5497  /*
5498  Suballocations in 2nd vector are upper side of double stack.
5499  They all have offsets higher than those in 1st vector.
5500  Top of this stack means smaller offsets, but higher indices in this vector.
5501  */
5502  SECOND_VECTOR_DOUBLE_STACK,
5503  };
5504 
5505  VkDeviceSize m_SumFreeSize;
5506  SuballocationVectorType m_Suballocations0, m_Suballocations1;
5507  uint32_t m_1stVectorIndex;
5508  SECOND_VECTOR_MODE m_2ndVectorMode;
5509 
5510  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5511  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5512  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5513  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5514 
5515  // Number of items in 1st vector with hAllocation = null at the beginning.
5516  size_t m_1stNullItemsBeginCount;
5517  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
5518  size_t m_1stNullItemsMiddleCount;
5519  // Number of items in 2nd vector with hAllocation = null.
5520  size_t m_2ndNullItemsCount;
5521 
5522  bool ShouldCompact1st() const;
5523  void CleanupAfterFree();
5524 };
5525 
5526 /*
5527 - GetSize() is the original size of allocated memory block.
5528 - m_UsableSize is this size aligned down to a power of two.
5529  All allocations and calculations happen relative to m_UsableSize.
5530 - GetUnusableSize() is the difference between them.
5531  It is repoted as separate, unused range, not available for allocations.
5532 
5533 Node at level 0 has size = m_UsableSize.
5534 Each next level contains nodes with size 2 times smaller than current level.
5535 m_LevelCount is the maximum number of levels to use in the current object.
5536 */
5537 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
5538 {
5539  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
5540 public:
5541  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
5542  virtual ~VmaBlockMetadata_Buddy();
5543  virtual void Init(VkDeviceSize size);
5544 
5545  virtual bool Validate() const;
5546  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
5547  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
5548  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5549  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
5550 
5551  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5552  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5553 
5554 #if VMA_STATS_STRING_ENABLED
5555  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5556 #endif
5557 
5558  virtual bool CreateAllocationRequest(
5559  uint32_t currentFrameIndex,
5560  uint32_t frameInUseCount,
5561  VkDeviceSize bufferImageGranularity,
5562  VkDeviceSize allocSize,
5563  VkDeviceSize allocAlignment,
5564  bool upperAddress,
5565  VmaSuballocationType allocType,
5566  bool canMakeOtherLost,
5567  uint32_t strategy,
5568  VmaAllocationRequest* pAllocationRequest);
5569 
5570  virtual bool MakeRequestedAllocationsLost(
5571  uint32_t currentFrameIndex,
5572  uint32_t frameInUseCount,
5573  VmaAllocationRequest* pAllocationRequest);
5574 
5575  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5576 
5577  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
5578 
5579  virtual void Alloc(
5580  const VmaAllocationRequest& request,
5581  VmaSuballocationType type,
5582  VkDeviceSize allocSize,
5583  bool upperAddress,
5584  VmaAllocation hAllocation);
5585 
5586  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
5587  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
5588 
5589 private:
5590  static const VkDeviceSize MIN_NODE_SIZE = 32;
5591  static const size_t MAX_LEVELS = 30;
5592 
5593  struct ValidationContext
5594  {
5595  size_t calculatedAllocationCount;
5596  size_t calculatedFreeCount;
5597  VkDeviceSize calculatedSumFreeSize;
5598 
5599  ValidationContext() :
5600  calculatedAllocationCount(0),
5601  calculatedFreeCount(0),
5602  calculatedSumFreeSize(0) { }
5603  };
5604 
5605  struct Node
5606  {
5607  VkDeviceSize offset;
5608  enum TYPE
5609  {
5610  TYPE_FREE,
5611  TYPE_ALLOCATION,
5612  TYPE_SPLIT,
5613  TYPE_COUNT
5614  } type;
5615  Node* parent;
5616  Node* buddy;
5617 
5618  union
5619  {
5620  struct
5621  {
5622  Node* prev;
5623  Node* next;
5624  } free;
5625  struct
5626  {
5627  VmaAllocation alloc;
5628  } allocation;
5629  struct
5630  {
5631  Node* leftChild;
5632  } split;
5633  };
5634  };
5635 
5636  // Size of the memory block aligned down to a power of two.
5637  VkDeviceSize m_UsableSize;
5638  uint32_t m_LevelCount;
5639 
5640  Node* m_Root;
5641  struct {
5642  Node* front;
5643  Node* back;
5644  } m_FreeList[MAX_LEVELS];
5645  // Number of nodes in the tree with type == TYPE_ALLOCATION.
5646  size_t m_AllocationCount;
5647  // Number of nodes in the tree with type == TYPE_FREE.
5648  size_t m_FreeCount;
5649  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
5650  VkDeviceSize m_SumFreeSize;
5651 
5652  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
5653  void DeleteNode(Node* node);
5654  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
5655  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
5656  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
5657  // Alloc passed just for validation. Can be null.
5658  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
5659  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
5660  // Adds node to the front of FreeList at given level.
5661  // node->type must be FREE.
5662  // node->free.prev, next can be undefined.
5663  void AddToFreeListFront(uint32_t level, Node* node);
5664  // Removes node from FreeList at given level.
5665  // node->type must be FREE.
5666  // node->free.prev, next stay untouched.
5667  void RemoveFromFreeList(uint32_t level, Node* node);
5668 
5669 #if VMA_STATS_STRING_ENABLED
5670  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
5671 #endif
5672 };
5673 
5674 /*
5675 Represents a single block of device memory (`VkDeviceMemory`) with all the
5676 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
5677 
5678 Thread-safety: This class must be externally synchronized.
5679 */
5680 class VmaDeviceMemoryBlock
5681 {
5682  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
5683 public:
5684  VmaBlockMetadata* m_pMetadata;
5685 
5686  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
5687 
5688  ~VmaDeviceMemoryBlock()
5689  {
5690  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5691  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5692  }
5693 
5694  // Always call after construction.
5695  void Init(
5696  VmaAllocator hAllocator,
5697  uint32_t newMemoryTypeIndex,
5698  VkDeviceMemory newMemory,
5699  VkDeviceSize newSize,
5700  uint32_t id,
5701  uint32_t algorithm);
5702  // Always call before destruction.
5703  void Destroy(VmaAllocator allocator);
5704 
5705  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
5706  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5707  uint32_t GetId() const { return m_Id; }
5708  void* GetMappedData() const { return m_pMappedData; }
5709 
5710  // Validates all data structures inside this object. If not valid, returns false.
5711  bool Validate() const;
5712 
5713  VkResult CheckCorruption(VmaAllocator hAllocator);
5714 
5715  // ppData can be null.
5716  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
5717  void Unmap(VmaAllocator hAllocator, uint32_t count);
5718 
5719  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5720  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5721 
5722  VkResult BindBufferMemory(
5723  const VmaAllocator hAllocator,
5724  const VmaAllocation hAllocation,
5725  VkBuffer hBuffer);
5726  VkResult BindImageMemory(
5727  const VmaAllocator hAllocator,
5728  const VmaAllocation hAllocation,
5729  VkImage hImage);
5730 
5731 private:
5732  uint32_t m_MemoryTypeIndex;
5733  uint32_t m_Id;
5734  VkDeviceMemory m_hMemory;
5735 
5736  /*
5737  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
5738  Also protects m_MapCount, m_pMappedData.
5739  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
5740  */
5741  VMA_MUTEX m_Mutex;
5742  uint32_t m_MapCount;
5743  void* m_pMappedData;
5744 };
5745 
5746 struct VmaPointerLess
5747 {
5748  bool operator()(const void* lhs, const void* rhs) const
5749  {
5750  return lhs < rhs;
5751  }
5752 };
5753 
5754 struct VmaDefragmentationMove
5755 {
5756  size_t srcBlockIndex;
5757  size_t dstBlockIndex;
5758  VkDeviceSize srcOffset;
5759  VkDeviceSize dstOffset;
5760  VkDeviceSize size;
5761 };
5762 
5763 class VmaDefragmentationAlgorithm;
5764 
5765 /*
5766 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
5767 Vulkan memory type.
5768 
5769 Synchronized internally with a mutex.
5770 */
5771 struct VmaBlockVector
5772 {
5773  VMA_CLASS_NO_COPY(VmaBlockVector)
5774 public:
5775  VmaBlockVector(
5776  VmaAllocator hAllocator,
5777  uint32_t memoryTypeIndex,
5778  VkDeviceSize preferredBlockSize,
5779  size_t minBlockCount,
5780  size_t maxBlockCount,
5781  VkDeviceSize bufferImageGranularity,
5782  uint32_t frameInUseCount,
5783  bool isCustomPool,
5784  bool explicitBlockSize,
5785  uint32_t algorithm);
5786  ~VmaBlockVector();
5787 
5788  VkResult CreateMinBlocks();
5789 
5790  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5791  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
5792  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
5793  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
5794  uint32_t GetAlgorithm() const { return m_Algorithm; }
5795 
5796  void GetPoolStats(VmaPoolStats* pStats);
5797 
5798  bool IsEmpty() const { return m_Blocks.empty(); }
5799  bool IsCorruptionDetectionEnabled() const;
5800 
5801  VkResult Allocate(
5802  VmaPool hCurrentPool,
5803  uint32_t currentFrameIndex,
5804  VkDeviceSize size,
5805  VkDeviceSize alignment,
5806  const VmaAllocationCreateInfo& createInfo,
5807  VmaSuballocationType suballocType,
5808  size_t allocationCount,
5809  VmaAllocation* pAllocations);
5810 
5811  void Free(
5812  VmaAllocation hAllocation);
5813 
5814  // Adds statistics of this BlockVector to pStats.
5815  void AddStats(VmaStats* pStats);
5816 
5817 #if VMA_STATS_STRING_ENABLED
5818  void PrintDetailedMap(class VmaJsonWriter& json);
5819 #endif
5820 
5821  void MakePoolAllocationsLost(
5822  uint32_t currentFrameIndex,
5823  size_t* pLostAllocationCount);
5824  VkResult CheckCorruption();
5825 
5826  // Saves results in pCtx->res.
5827  void Defragment(
5828  class VmaBlockVectorDefragmentationContext* pCtx,
5829  VmaDefragmentationStats* pStats,
5830  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
5831  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
5832  VkCommandBuffer commandBuffer);
5833  void DefragmentationEnd(
5834  class VmaBlockVectorDefragmentationContext* pCtx,
5835  VmaDefragmentationStats* pStats);
5836 
5838  // To be used only while the m_Mutex is locked. Used during defragmentation.
5839 
5840  size_t GetBlockCount() const { return m_Blocks.size(); }
5841  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
5842  size_t CalcAllocationCount() const;
5843  bool IsBufferImageGranularityConflictPossible() const;
5844 
5845 private:
5846  friend class VmaDefragmentationAlgorithm_Generic;
5847 
5848  const VmaAllocator m_hAllocator;
5849  const uint32_t m_MemoryTypeIndex;
5850  const VkDeviceSize m_PreferredBlockSize;
5851  const size_t m_MinBlockCount;
5852  const size_t m_MaxBlockCount;
5853  const VkDeviceSize m_BufferImageGranularity;
5854  const uint32_t m_FrameInUseCount;
5855  const bool m_IsCustomPool;
5856  const bool m_ExplicitBlockSize;
5857  const uint32_t m_Algorithm;
5858  /* There can be at most one allocation that is completely empty - a
5859  hysteresis to avoid pessimistic case of alternating creation and destruction
5860  of a VkDeviceMemory. */
5861  bool m_HasEmptyBlock;
5862  VMA_RW_MUTEX m_Mutex;
5863  // Incrementally sorted by sumFreeSize, ascending.
5864  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
5865  uint32_t m_NextBlockId;
5866 
5867  VkDeviceSize CalcMaxBlockSize() const;
5868 
5869  // Finds and removes given block from vector.
5870  void Remove(VmaDeviceMemoryBlock* pBlock);
5871 
5872  // Performs single step in sorting m_Blocks. They may not be fully sorted
5873  // after this call.
5874  void IncrementallySortBlocks();
5875 
5876  VkResult AllocatePage(
5877  VmaPool hCurrentPool,
5878  uint32_t currentFrameIndex,
5879  VkDeviceSize size,
5880  VkDeviceSize alignment,
5881  const VmaAllocationCreateInfo& createInfo,
5882  VmaSuballocationType suballocType,
5883  VmaAllocation* pAllocation);
5884 
5885  // To be used only without CAN_MAKE_OTHER_LOST flag.
5886  VkResult AllocateFromBlock(
5887  VmaDeviceMemoryBlock* pBlock,
5888  VmaPool hCurrentPool,
5889  uint32_t currentFrameIndex,
5890  VkDeviceSize size,
5891  VkDeviceSize alignment,
5892  VmaAllocationCreateFlags allocFlags,
5893  void* pUserData,
5894  VmaSuballocationType suballocType,
5895  uint32_t strategy,
5896  VmaAllocation* pAllocation);
5897 
5898  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
5899 
5900  // Saves result to pCtx->res.
5901  void ApplyDefragmentationMovesCpu(
5902  class VmaBlockVectorDefragmentationContext* pDefragCtx,
5903  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
5904  // Saves result to pCtx->res.
5905  void ApplyDefragmentationMovesGpu(
5906  class VmaBlockVectorDefragmentationContext* pDefragCtx,
5907  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
5908  VkCommandBuffer commandBuffer);
5909 
5910  /*
5911  Used during defragmentation. pDefragmentationStats is optional. It's in/out
5912  - updated with new data.
5913  */
5914  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
5915 };
5916 
5917 struct VmaPool_T
5918 {
5919  VMA_CLASS_NO_COPY(VmaPool_T)
5920 public:
5921  VmaBlockVector m_BlockVector;
5922 
5923  VmaPool_T(
5924  VmaAllocator hAllocator,
5925  const VmaPoolCreateInfo& createInfo,
5926  VkDeviceSize preferredBlockSize);
5927  ~VmaPool_T();
5928 
5929  uint32_t GetId() const { return m_Id; }
5930  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
5931 
5932 #if VMA_STATS_STRING_ENABLED
5933  //void PrintDetailedMap(class VmaStringBuilder& sb);
5934 #endif
5935 
5936 private:
5937  uint32_t m_Id;
5938 };
5939 
5940 /*
5941 Performs defragmentation:
5942 
5943 - Updates `pBlockVector->m_pMetadata`.
5944 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
5945 - Does not move actual data, only returns requested moves as `moves`.
5946 */
5947 class VmaDefragmentationAlgorithm
5948 {
5949  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
5950 public:
5951  VmaDefragmentationAlgorithm(
5952  VmaAllocator hAllocator,
5953  VmaBlockVector* pBlockVector,
5954  uint32_t currentFrameIndex) :
5955  m_hAllocator(hAllocator),
5956  m_pBlockVector(pBlockVector),
5957  m_CurrentFrameIndex(currentFrameIndex)
5958  {
5959  }
5960  virtual ~VmaDefragmentationAlgorithm()
5961  {
5962  }
5963 
5964  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
5965  virtual void AddAll() = 0;
5966 
5967  virtual VkResult Defragment(
5968  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
5969  VkDeviceSize maxBytesToMove,
5970  uint32_t maxAllocationsToMove) = 0;
5971 
5972  virtual VkDeviceSize GetBytesMoved() const = 0;
5973  virtual uint32_t GetAllocationsMoved() const = 0;
5974 
5975 protected:
5976  VmaAllocator const m_hAllocator;
5977  VmaBlockVector* const m_pBlockVector;
5978  const uint32_t m_CurrentFrameIndex;
5979 
5980  struct AllocationInfo
5981  {
5982  VmaAllocation m_hAllocation;
5983  VkBool32* m_pChanged;
5984 
5985  AllocationInfo() :
5986  m_hAllocation(VK_NULL_HANDLE),
5987  m_pChanged(VMA_NULL)
5988  {
5989  }
5990  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
5991  m_hAllocation(hAlloc),
5992  m_pChanged(pChanged)
5993  {
5994  }
5995  };
5996 };
5997 
5998 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
5999 {
6000  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
6001 public:
6002  VmaDefragmentationAlgorithm_Generic(
6003  VmaAllocator hAllocator,
6004  VmaBlockVector* pBlockVector,
6005  uint32_t currentFrameIndex,
6006  bool overlappingMoveSupported);
6007  virtual ~VmaDefragmentationAlgorithm_Generic();
6008 
6009  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6010  virtual void AddAll() { m_AllAllocations = true; }
6011 
6012  virtual VkResult Defragment(
6013  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6014  VkDeviceSize maxBytesToMove,
6015  uint32_t maxAllocationsToMove);
6016 
6017  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6018  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6019 
6020 private:
6021  uint32_t m_AllocationCount;
6022  bool m_AllAllocations;
6023 
6024  VkDeviceSize m_BytesMoved;
6025  uint32_t m_AllocationsMoved;
6026 
6027  struct AllocationInfoSizeGreater
6028  {
6029  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6030  {
6031  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
6032  }
6033  };
6034 
6035  struct AllocationInfoOffsetGreater
6036  {
6037  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6038  {
6039  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
6040  }
6041  };
6042 
6043  struct BlockInfo
6044  {
6045  size_t m_OriginalBlockIndex;
6046  VmaDeviceMemoryBlock* m_pBlock;
6047  bool m_HasNonMovableAllocations;
6048  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
6049 
6050  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
6051  m_OriginalBlockIndex(SIZE_MAX),
6052  m_pBlock(VMA_NULL),
6053  m_HasNonMovableAllocations(true),
6054  m_Allocations(pAllocationCallbacks)
6055  {
6056  }
6057 
6058  void CalcHasNonMovableAllocations()
6059  {
6060  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
6061  const size_t defragmentAllocCount = m_Allocations.size();
6062  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
6063  }
6064 
6065  void SortAllocationsBySizeDescending()
6066  {
6067  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
6068  }
6069 
6070  void SortAllocationsByOffsetDescending()
6071  {
6072  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
6073  }
6074  };
6075 
6076  struct BlockPointerLess
6077  {
6078  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
6079  {
6080  return pLhsBlockInfo->m_pBlock < pRhsBlock;
6081  }
6082  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6083  {
6084  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
6085  }
6086  };
6087 
6088  // 1. Blocks with some non-movable allocations go first.
6089  // 2. Blocks with smaller sumFreeSize go first.
6090  struct BlockInfoCompareMoveDestination
6091  {
6092  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6093  {
6094  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
6095  {
6096  return true;
6097  }
6098  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
6099  {
6100  return false;
6101  }
6102  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
6103  {
6104  return true;
6105  }
6106  return false;
6107  }
6108  };
6109 
6110  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
6111  BlockInfoVector m_Blocks;
6112 
6113  VkResult DefragmentRound(
6114  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6115  VkDeviceSize maxBytesToMove,
6116  uint32_t maxAllocationsToMove);
6117 
6118  size_t CalcBlocksWithNonMovableCount() const;
6119 
6120  static bool MoveMakesSense(
6121  size_t dstBlockIndex, VkDeviceSize dstOffset,
6122  size_t srcBlockIndex, VkDeviceSize srcOffset);
6123 };
6124 
6125 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
6126 {
6127  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
6128 public:
6129  VmaDefragmentationAlgorithm_Fast(
6130  VmaAllocator hAllocator,
6131  VmaBlockVector* pBlockVector,
6132  uint32_t currentFrameIndex,
6133  bool overlappingMoveSupported);
6134  virtual ~VmaDefragmentationAlgorithm_Fast();
6135 
6136  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
6137  virtual void AddAll() { m_AllAllocations = true; }
6138 
6139  virtual VkResult Defragment(
6140  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6141  VkDeviceSize maxBytesToMove,
6142  uint32_t maxAllocationsToMove);
6143 
6144  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6145  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6146 
6147 private:
6148  struct BlockInfo
6149  {
6150  size_t origBlockIndex;
6151  };
6152 
6153  class FreeSpaceDatabase
6154  {
6155  public:
6156  FreeSpaceDatabase()
6157  {
6158  FreeSpace s = {};
6159  s.blockInfoIndex = SIZE_MAX;
6160  for(size_t i = 0; i < MAX_COUNT; ++i)
6161  {
6162  m_FreeSpaces[i] = s;
6163  }
6164  }
6165 
6166  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
6167  {
6168  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6169  {
6170  return;
6171  }
6172 
6173  // Find first invalid or the smallest structure.
6174  size_t bestIndex = SIZE_MAX;
6175  for(size_t i = 0; i < MAX_COUNT; ++i)
6176  {
6177  // Empty structure.
6178  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
6179  {
6180  bestIndex = i;
6181  break;
6182  }
6183  if(m_FreeSpaces[i].size < size &&
6184  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
6185  {
6186  bestIndex = i;
6187  }
6188  }
6189 
6190  if(bestIndex != SIZE_MAX)
6191  {
6192  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
6193  m_FreeSpaces[bestIndex].offset = offset;
6194  m_FreeSpaces[bestIndex].size = size;
6195  }
6196  }
6197 
6198  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
6199  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
6200  {
6201  size_t bestIndex = SIZE_MAX;
6202  VkDeviceSize bestFreeSpaceAfter = 0;
6203  for(size_t i = 0; i < MAX_COUNT; ++i)
6204  {
6205  // Structure is valid.
6206  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
6207  {
6208  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
6209  // Allocation fits into this structure.
6210  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
6211  {
6212  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
6213  (dstOffset + size);
6214  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
6215  {
6216  bestIndex = i;
6217  bestFreeSpaceAfter = freeSpaceAfter;
6218  }
6219  }
6220  }
6221  }
6222 
6223  if(bestIndex != SIZE_MAX)
6224  {
6225  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
6226  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
6227 
6228  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6229  {
6230  // Leave this structure for remaining empty space.
6231  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
6232  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
6233  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
6234  }
6235  else
6236  {
6237  // This structure becomes invalid.
6238  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
6239  }
6240 
6241  return true;
6242  }
6243 
6244  return false;
6245  }
6246 
6247  private:
6248  static const size_t MAX_COUNT = 4;
6249 
6250  struct FreeSpace
6251  {
6252  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
6253  VkDeviceSize offset;
6254  VkDeviceSize size;
6255  } m_FreeSpaces[MAX_COUNT];
6256  };
6257 
6258  const bool m_OverlappingMoveSupported;
6259 
6260  uint32_t m_AllocationCount;
6261  bool m_AllAllocations;
6262 
6263  VkDeviceSize m_BytesMoved;
6264  uint32_t m_AllocationsMoved;
6265 
6266  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
6267 
6268  void PreprocessMetadata();
6269  void PostprocessMetadata();
6270  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
6271 };
6272 
6273 struct VmaBlockDefragmentationContext
6274 {
6275  enum BLOCK_FLAG
6276  {
6277  BLOCK_FLAG_USED = 0x00000001,
6278  };
6279  uint32_t flags;
6280  VkBuffer hBuffer;
6281 
6282  VmaBlockDefragmentationContext() :
6283  flags(0),
6284  hBuffer(VK_NULL_HANDLE)
6285  {
6286  }
6287 };
6288 
6289 class VmaBlockVectorDefragmentationContext
6290 {
6291  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
6292 public:
6293  VkResult res;
6294  bool mutexLocked;
6295  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
6296 
6297  VmaBlockVectorDefragmentationContext(
6298  VmaAllocator hAllocator,
6299  VmaPool hCustomPool, // Optional.
6300  VmaBlockVector* pBlockVector,
6301  uint32_t currFrameIndex,
6302  uint32_t flags);
6303  ~VmaBlockVectorDefragmentationContext();
6304 
6305  VmaPool GetCustomPool() const { return m_hCustomPool; }
6306  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
6307  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
6308 
6309  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6310  void AddAll() { m_AllAllocations = true; }
6311 
6312  void Begin(bool overlappingMoveSupported);
6313 
6314 private:
6315  const VmaAllocator m_hAllocator;
6316  // Null if not from custom pool.
6317  const VmaPool m_hCustomPool;
6318  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
6319  VmaBlockVector* const m_pBlockVector;
6320  const uint32_t m_CurrFrameIndex;
6321  const uint32_t m_AlgorithmFlags;
6322  // Owner of this object.
6323  VmaDefragmentationAlgorithm* m_pAlgorithm;
6324 
6325  struct AllocInfo
6326  {
6327  VmaAllocation hAlloc;
6328  VkBool32* pChanged;
6329  };
6330  // Used between constructor and Begin.
6331  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
6332  bool m_AllAllocations;
6333 };
6334 
6335 struct VmaDefragmentationContext_T
6336 {
6337 private:
6338  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
6339 public:
6340  VmaDefragmentationContext_T(
6341  VmaAllocator hAllocator,
6342  uint32_t currFrameIndex,
6343  uint32_t flags,
6344  VmaDefragmentationStats* pStats);
6345  ~VmaDefragmentationContext_T();
6346 
6347  void AddPools(uint32_t poolCount, VmaPool* pPools);
6348  void AddAllocations(
6349  uint32_t allocationCount,
6350  VmaAllocation* pAllocations,
6351  VkBool32* pAllocationsChanged);
6352 
6353  /*
6354  Returns:
6355  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
6356  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
6357  - Negative value if error occured and object can be destroyed immediately.
6358  */
6359  VkResult Defragment(
6360  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
6361  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
6362  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats);
6363 
6364 private:
6365  const VmaAllocator m_hAllocator;
6366  const uint32_t m_CurrFrameIndex;
6367  const uint32_t m_Flags;
6368  VmaDefragmentationStats* const m_pStats;
6369  // Owner of these objects.
6370  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
6371  // Owner of these objects.
6372  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
6373 };
6374 
6375 #if VMA_RECORDING_ENABLED
6376 
6377 class VmaRecorder
6378 {
6379 public:
6380  VmaRecorder();
6381  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
6382  void WriteConfiguration(
6383  const VkPhysicalDeviceProperties& devProps,
6384  const VkPhysicalDeviceMemoryProperties& memProps,
6385  bool dedicatedAllocationExtensionEnabled);
6386  ~VmaRecorder();
6387 
6388  void RecordCreateAllocator(uint32_t frameIndex);
6389  void RecordDestroyAllocator(uint32_t frameIndex);
6390  void RecordCreatePool(uint32_t frameIndex,
6391  const VmaPoolCreateInfo& createInfo,
6392  VmaPool pool);
6393  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
6394  void RecordAllocateMemory(uint32_t frameIndex,
6395  const VkMemoryRequirements& vkMemReq,
6396  const VmaAllocationCreateInfo& createInfo,
6397  VmaAllocation allocation);
6398  void RecordAllocateMemoryPages(uint32_t frameIndex,
6399  const VkMemoryRequirements& vkMemReq,
6400  const VmaAllocationCreateInfo& createInfo,
6401  uint64_t allocationCount,
6402  const VmaAllocation* pAllocations);
6403  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
6404  const VkMemoryRequirements& vkMemReq,
6405  bool requiresDedicatedAllocation,
6406  bool prefersDedicatedAllocation,
6407  const VmaAllocationCreateInfo& createInfo,
6408  VmaAllocation allocation);
6409  void RecordAllocateMemoryForImage(uint32_t frameIndex,
6410  const VkMemoryRequirements& vkMemReq,
6411  bool requiresDedicatedAllocation,
6412  bool prefersDedicatedAllocation,
6413  const VmaAllocationCreateInfo& createInfo,
6414  VmaAllocation allocation);
6415  void RecordFreeMemory(uint32_t frameIndex,
6416  VmaAllocation allocation);
6417  void RecordFreeMemoryPages(uint32_t frameIndex,
6418  uint64_t allocationCount,
6419  const VmaAllocation* pAllocations);
6420  void RecordResizeAllocation(
6421  uint32_t frameIndex,
6422  VmaAllocation allocation,
6423  VkDeviceSize newSize);
6424  void RecordSetAllocationUserData(uint32_t frameIndex,
6425  VmaAllocation allocation,
6426  const void* pUserData);
6427  void RecordCreateLostAllocation(uint32_t frameIndex,
6428  VmaAllocation allocation);
6429  void RecordMapMemory(uint32_t frameIndex,
6430  VmaAllocation allocation);
6431  void RecordUnmapMemory(uint32_t frameIndex,
6432  VmaAllocation allocation);
6433  void RecordFlushAllocation(uint32_t frameIndex,
6434  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6435  void RecordInvalidateAllocation(uint32_t frameIndex,
6436  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6437  void RecordCreateBuffer(uint32_t frameIndex,
6438  const VkBufferCreateInfo& bufCreateInfo,
6439  const VmaAllocationCreateInfo& allocCreateInfo,
6440  VmaAllocation allocation);
6441  void RecordCreateImage(uint32_t frameIndex,
6442  const VkImageCreateInfo& imageCreateInfo,
6443  const VmaAllocationCreateInfo& allocCreateInfo,
6444  VmaAllocation allocation);
6445  void RecordDestroyBuffer(uint32_t frameIndex,
6446  VmaAllocation allocation);
6447  void RecordDestroyImage(uint32_t frameIndex,
6448  VmaAllocation allocation);
6449  void RecordTouchAllocation(uint32_t frameIndex,
6450  VmaAllocation allocation);
6451  void RecordGetAllocationInfo(uint32_t frameIndex,
6452  VmaAllocation allocation);
6453  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
6454  VmaPool pool);
6455  void RecordDefragmentationBegin(uint32_t frameIndex,
6456  const VmaDefragmentationInfo2& info,
6458  void RecordDefragmentationEnd(uint32_t frameIndex,
6460 
6461 private:
6462  struct CallParams
6463  {
6464  uint32_t threadId;
6465  double time;
6466  };
6467 
6468  class UserDataString
6469  {
6470  public:
6471  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
6472  const char* GetString() const { return m_Str; }
6473 
6474  private:
6475  char m_PtrStr[17];
6476  const char* m_Str;
6477  };
6478 
6479  bool m_UseMutex;
6480  VmaRecordFlags m_Flags;
6481  FILE* m_File;
6482  VMA_MUTEX m_FileMutex;
6483  int64_t m_Freq;
6484  int64_t m_StartCounter;
6485 
6486  void GetBasicParams(CallParams& outParams);
6487 
6488  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
6489  template<typename T>
6490  void PrintPointerList(uint64_t count, const T* pItems)
6491  {
6492  if(count)
6493  {
6494  fprintf(m_File, "%p", pItems[0]);
6495  for(uint64_t i = 1; i < count; ++i)
6496  {
6497  fprintf(m_File, " %p", pItems[i]);
6498  }
6499  }
6500  }
6501 
6502  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
6503  void Flush();
6504 };
6505 
6506 #endif // #if VMA_RECORDING_ENABLED
6507 
6508 // Main allocator object.
6509 struct VmaAllocator_T
6510 {
6511  VMA_CLASS_NO_COPY(VmaAllocator_T)
6512 public:
6513  bool m_UseMutex;
6514  bool m_UseKhrDedicatedAllocation;
6515  VkDevice m_hDevice;
6516  bool m_AllocationCallbacksSpecified;
6517  VkAllocationCallbacks m_AllocationCallbacks;
6518  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
6519 
6520  // Number of bytes free out of limit, or VK_WHOLE_SIZE if no limit for that heap.
6521  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
6522  VMA_MUTEX m_HeapSizeLimitMutex;
6523 
6524  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
6525  VkPhysicalDeviceMemoryProperties m_MemProps;
6526 
6527  // Default pools.
6528  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
6529 
6530  // Each vector is sorted by memory (handle value).
6531  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
6532  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
6533  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
6534 
6535  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
6536  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
6537  ~VmaAllocator_T();
6538 
6539  const VkAllocationCallbacks* GetAllocationCallbacks() const
6540  {
6541  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
6542  }
6543  const VmaVulkanFunctions& GetVulkanFunctions() const
6544  {
6545  return m_VulkanFunctions;
6546  }
6547 
6548  VkDeviceSize GetBufferImageGranularity() const
6549  {
6550  return VMA_MAX(
6551  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
6552  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
6553  }
6554 
6555  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
6556  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
6557 
6558  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
6559  {
6560  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
6561  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
6562  }
6563  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
6564  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
6565  {
6566  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
6567  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
6568  }
6569  // Minimum alignment for all allocations in specific memory type.
6570  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
6571  {
6572  return IsMemoryTypeNonCoherent(memTypeIndex) ?
6573  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
6574  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
6575  }
6576 
6577  bool IsIntegratedGpu() const
6578  {
6579  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
6580  }
6581 
6582 #if VMA_RECORDING_ENABLED
6583  VmaRecorder* GetRecorder() const { return m_pRecorder; }
6584 #endif
6585 
6586  void GetBufferMemoryRequirements(
6587  VkBuffer hBuffer,
6588  VkMemoryRequirements& memReq,
6589  bool& requiresDedicatedAllocation,
6590  bool& prefersDedicatedAllocation) const;
6591  void GetImageMemoryRequirements(
6592  VkImage hImage,
6593  VkMemoryRequirements& memReq,
6594  bool& requiresDedicatedAllocation,
6595  bool& prefersDedicatedAllocation) const;
6596 
6597  // Main allocation function.
6598  VkResult AllocateMemory(
6599  const VkMemoryRequirements& vkMemReq,
6600  bool requiresDedicatedAllocation,
6601  bool prefersDedicatedAllocation,
6602  VkBuffer dedicatedBuffer,
6603  VkImage dedicatedImage,
6604  const VmaAllocationCreateInfo& createInfo,
6605  VmaSuballocationType suballocType,
6606  size_t allocationCount,
6607  VmaAllocation* pAllocations);
6608 
6609  // Main deallocation function.
6610  void FreeMemory(
6611  size_t allocationCount,
6612  const VmaAllocation* pAllocations);
6613 
6614  VkResult ResizeAllocation(
6615  const VmaAllocation alloc,
6616  VkDeviceSize newSize);
6617 
6618  void CalculateStats(VmaStats* pStats);
6619 
6620 #if VMA_STATS_STRING_ENABLED
6621  void PrintDetailedMap(class VmaJsonWriter& json);
6622 #endif
6623 
6624  VkResult DefragmentationBegin(
6625  const VmaDefragmentationInfo2& info,
6626  VmaDefragmentationStats* pStats,
6627  VmaDefragmentationContext* pContext);
6628  VkResult DefragmentationEnd(
6629  VmaDefragmentationContext context);
6630 
6631  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
6632  bool TouchAllocation(VmaAllocation hAllocation);
6633 
6634  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
6635  void DestroyPool(VmaPool pool);
6636  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
6637 
6638  void SetCurrentFrameIndex(uint32_t frameIndex);
6639  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
6640 
6641  void MakePoolAllocationsLost(
6642  VmaPool hPool,
6643  size_t* pLostAllocationCount);
6644  VkResult CheckPoolCorruption(VmaPool hPool);
6645  VkResult CheckCorruption(uint32_t memoryTypeBits);
6646 
6647  void CreateLostAllocation(VmaAllocation* pAllocation);
6648 
6649  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
6650  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
6651 
6652  VkResult Map(VmaAllocation hAllocation, void** ppData);
6653  void Unmap(VmaAllocation hAllocation);
6654 
6655  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
6656  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
6657 
6658  void FlushOrInvalidateAllocation(
6659  VmaAllocation hAllocation,
6660  VkDeviceSize offset, VkDeviceSize size,
6661  VMA_CACHE_OPERATION op);
6662 
6663  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
6664 
6665 private:
6666  VkDeviceSize m_PreferredLargeHeapBlockSize;
6667 
6668  VkPhysicalDevice m_PhysicalDevice;
6669  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
6670 
6671  VMA_RW_MUTEX m_PoolsMutex;
6672  // Protected by m_PoolsMutex. Sorted by pointer value.
6673  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
6674  uint32_t m_NextPoolId;
6675 
6676  VmaVulkanFunctions m_VulkanFunctions;
6677 
6678 #if VMA_RECORDING_ENABLED
6679  VmaRecorder* m_pRecorder;
6680 #endif
6681 
6682  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
6683 
6684  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
6685 
6686  VkResult AllocateMemoryOfType(
6687  VkDeviceSize size,
6688  VkDeviceSize alignment,
6689  bool dedicatedAllocation,
6690  VkBuffer dedicatedBuffer,
6691  VkImage dedicatedImage,
6692  const VmaAllocationCreateInfo& createInfo,
6693  uint32_t memTypeIndex,
6694  VmaSuballocationType suballocType,
6695  size_t allocationCount,
6696  VmaAllocation* pAllocations);
6697 
6698  // Helper function only to be used inside AllocateDedicatedMemory.
6699  VkResult AllocateDedicatedMemoryPage(
6700  VkDeviceSize size,
6701  VmaSuballocationType suballocType,
6702  uint32_t memTypeIndex,
6703  const VkMemoryAllocateInfo& allocInfo,
6704  bool map,
6705  bool isUserDataString,
6706  void* pUserData,
6707  VmaAllocation* pAllocation);
6708 
6709  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
6710  VkResult AllocateDedicatedMemory(
6711  VkDeviceSize size,
6712  VmaSuballocationType suballocType,
6713  uint32_t memTypeIndex,
6714  bool map,
6715  bool isUserDataString,
6716  void* pUserData,
6717  VkBuffer dedicatedBuffer,
6718  VkImage dedicatedImage,
6719  size_t allocationCount,
6720  VmaAllocation* pAllocations);
6721 
6722  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
6723  void FreeDedicatedMemory(VmaAllocation allocation);
6724 };
6725 
6727 // Memory allocation #2 after VmaAllocator_T definition
6728 
6729 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
6730 {
6731  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
6732 }
6733 
6734 static void VmaFree(VmaAllocator hAllocator, void* ptr)
6735 {
6736  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
6737 }
6738 
6739 template<typename T>
6740 static T* VmaAllocate(VmaAllocator hAllocator)
6741 {
6742  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
6743 }
6744 
6745 template<typename T>
6746 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
6747 {
6748  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
6749 }
6750 
6751 template<typename T>
6752 static void vma_delete(VmaAllocator hAllocator, T* ptr)
6753 {
6754  if(ptr != VMA_NULL)
6755  {
6756  ptr->~T();
6757  VmaFree(hAllocator, ptr);
6758  }
6759 }
6760 
6761 template<typename T>
6762 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
6763 {
6764  if(ptr != VMA_NULL)
6765  {
6766  for(size_t i = count; i--; )
6767  ptr[i].~T();
6768  VmaFree(hAllocator, ptr);
6769  }
6770 }
6771 
6773 // VmaStringBuilder
6774 
6775 #if VMA_STATS_STRING_ENABLED
6776 
6777 class VmaStringBuilder
6778 {
6779 public:
6780  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
6781  size_t GetLength() const { return m_Data.size(); }
6782  const char* GetData() const { return m_Data.data(); }
6783 
6784  void Add(char ch) { m_Data.push_back(ch); }
6785  void Add(const char* pStr);
6786  void AddNewLine() { Add('\n'); }
6787  void AddNumber(uint32_t num);
6788  void AddNumber(uint64_t num);
6789  void AddPointer(const void* ptr);
6790 
6791 private:
6792  VmaVector< char, VmaStlAllocator<char> > m_Data;
6793 };
6794 
6795 void VmaStringBuilder::Add(const char* pStr)
6796 {
6797  const size_t strLen = strlen(pStr);
6798  if(strLen > 0)
6799  {
6800  const size_t oldCount = m_Data.size();
6801  m_Data.resize(oldCount + strLen);
6802  memcpy(m_Data.data() + oldCount, pStr, strLen);
6803  }
6804 }
6805 
6806 void VmaStringBuilder::AddNumber(uint32_t num)
6807 {
6808  char buf[11];
6809  VmaUint32ToStr(buf, sizeof(buf), num);
6810  Add(buf);
6811 }
6812 
6813 void VmaStringBuilder::AddNumber(uint64_t num)
6814 {
6815  char buf[21];
6816  VmaUint64ToStr(buf, sizeof(buf), num);
6817  Add(buf);
6818 }
6819 
6820 void VmaStringBuilder::AddPointer(const void* ptr)
6821 {
6822  char buf[21];
6823  VmaPtrToStr(buf, sizeof(buf), ptr);
6824  Add(buf);
6825 }
6826 
6827 #endif // #if VMA_STATS_STRING_ENABLED
6828 
6830 // VmaJsonWriter
6831 
6832 #if VMA_STATS_STRING_ENABLED
6833 
6834 class VmaJsonWriter
6835 {
6836  VMA_CLASS_NO_COPY(VmaJsonWriter)
6837 public:
6838  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
6839  ~VmaJsonWriter();
6840 
6841  void BeginObject(bool singleLine = false);
6842  void EndObject();
6843 
6844  void BeginArray(bool singleLine = false);
6845  void EndArray();
6846 
6847  void WriteString(const char* pStr);
6848  void BeginString(const char* pStr = VMA_NULL);
6849  void ContinueString(const char* pStr);
6850  void ContinueString(uint32_t n);
6851  void ContinueString(uint64_t n);
6852  void ContinueString_Pointer(const void* ptr);
6853  void EndString(const char* pStr = VMA_NULL);
6854 
6855  void WriteNumber(uint32_t n);
6856  void WriteNumber(uint64_t n);
6857  void WriteBool(bool b);
6858  void WriteNull();
6859 
6860 private:
6861  static const char* const INDENT;
6862 
6863  enum COLLECTION_TYPE
6864  {
6865  COLLECTION_TYPE_OBJECT,
6866  COLLECTION_TYPE_ARRAY,
6867  };
6868  struct StackItem
6869  {
6870  COLLECTION_TYPE type;
6871  uint32_t valueCount;
6872  bool singleLineMode;
6873  };
6874 
6875  VmaStringBuilder& m_SB;
6876  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
6877  bool m_InsideString;
6878 
6879  void BeginValue(bool isString);
6880  void WriteIndent(bool oneLess = false);
6881 };
6882 
6883 const char* const VmaJsonWriter::INDENT = " ";
6884 
6885 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
6886  m_SB(sb),
6887  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
6888  m_InsideString(false)
6889 {
6890 }
6891 
6892 VmaJsonWriter::~VmaJsonWriter()
6893 {
6894  VMA_ASSERT(!m_InsideString);
6895  VMA_ASSERT(m_Stack.empty());
6896 }
6897 
6898 void VmaJsonWriter::BeginObject(bool singleLine)
6899 {
6900  VMA_ASSERT(!m_InsideString);
6901 
6902  BeginValue(false);
6903  m_SB.Add('{');
6904 
6905  StackItem item;
6906  item.type = COLLECTION_TYPE_OBJECT;
6907  item.valueCount = 0;
6908  item.singleLineMode = singleLine;
6909  m_Stack.push_back(item);
6910 }
6911 
6912 void VmaJsonWriter::EndObject()
6913 {
6914  VMA_ASSERT(!m_InsideString);
6915 
6916  WriteIndent(true);
6917  m_SB.Add('}');
6918 
6919  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
6920  m_Stack.pop_back();
6921 }
6922 
6923 void VmaJsonWriter::BeginArray(bool singleLine)
6924 {
6925  VMA_ASSERT(!m_InsideString);
6926 
6927  BeginValue(false);
6928  m_SB.Add('[');
6929 
6930  StackItem item;
6931  item.type = COLLECTION_TYPE_ARRAY;
6932  item.valueCount = 0;
6933  item.singleLineMode = singleLine;
6934  m_Stack.push_back(item);
6935 }
6936 
6937 void VmaJsonWriter::EndArray()
6938 {
6939  VMA_ASSERT(!m_InsideString);
6940 
6941  WriteIndent(true);
6942  m_SB.Add(']');
6943 
6944  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
6945  m_Stack.pop_back();
6946 }
6947 
6948 void VmaJsonWriter::WriteString(const char* pStr)
6949 {
6950  BeginString(pStr);
6951  EndString();
6952 }
6953 
6954 void VmaJsonWriter::BeginString(const char* pStr)
6955 {
6956  VMA_ASSERT(!m_InsideString);
6957 
6958  BeginValue(true);
6959  m_SB.Add('"');
6960  m_InsideString = true;
6961  if(pStr != VMA_NULL && pStr[0] != '\0')
6962  {
6963  ContinueString(pStr);
6964  }
6965 }
6966 
6967 void VmaJsonWriter::ContinueString(const char* pStr)
6968 {
6969  VMA_ASSERT(m_InsideString);
6970 
6971  const size_t strLen = strlen(pStr);
6972  for(size_t i = 0; i < strLen; ++i)
6973  {
6974  char ch = pStr[i];
6975  if(ch == '\\')
6976  {
6977  m_SB.Add("\\\\");
6978  }
6979  else if(ch == '"')
6980  {
6981  m_SB.Add("\\\"");
6982  }
6983  else if(ch >= 32)
6984  {
6985  m_SB.Add(ch);
6986  }
6987  else switch(ch)
6988  {
6989  case '\b':
6990  m_SB.Add("\\b");
6991  break;
6992  case '\f':
6993  m_SB.Add("\\f");
6994  break;
6995  case '\n':
6996  m_SB.Add("\\n");
6997  break;
6998  case '\r':
6999  m_SB.Add("\\r");
7000  break;
7001  case '\t':
7002  m_SB.Add("\\t");
7003  break;
7004  default:
7005  VMA_ASSERT(0 && "Character not currently supported.");
7006  break;
7007  }
7008  }
7009 }
7010 
7011 void VmaJsonWriter::ContinueString(uint32_t n)
7012 {
7013  VMA_ASSERT(m_InsideString);
7014  m_SB.AddNumber(n);
7015 }
7016 
7017 void VmaJsonWriter::ContinueString(uint64_t n)
7018 {
7019  VMA_ASSERT(m_InsideString);
7020  m_SB.AddNumber(n);
7021 }
7022 
7023 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
7024 {
7025  VMA_ASSERT(m_InsideString);
7026  m_SB.AddPointer(ptr);
7027 }
7028 
7029 void VmaJsonWriter::EndString(const char* pStr)
7030 {
7031  VMA_ASSERT(m_InsideString);
7032  if(pStr != VMA_NULL && pStr[0] != '\0')
7033  {
7034  ContinueString(pStr);
7035  }
7036  m_SB.Add('"');
7037  m_InsideString = false;
7038 }
7039 
7040 void VmaJsonWriter::WriteNumber(uint32_t n)
7041 {
7042  VMA_ASSERT(!m_InsideString);
7043  BeginValue(false);
7044  m_SB.AddNumber(n);
7045 }
7046 
7047 void VmaJsonWriter::WriteNumber(uint64_t n)
7048 {
7049  VMA_ASSERT(!m_InsideString);
7050  BeginValue(false);
7051  m_SB.AddNumber(n);
7052 }
7053 
7054 void VmaJsonWriter::WriteBool(bool b)
7055 {
7056  VMA_ASSERT(!m_InsideString);
7057  BeginValue(false);
7058  m_SB.Add(b ? "true" : "false");
7059 }
7060 
7061 void VmaJsonWriter::WriteNull()
7062 {
7063  VMA_ASSERT(!m_InsideString);
7064  BeginValue(false);
7065  m_SB.Add("null");
7066 }
7067 
7068 void VmaJsonWriter::BeginValue(bool isString)
7069 {
7070  if(!m_Stack.empty())
7071  {
7072  StackItem& currItem = m_Stack.back();
7073  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7074  currItem.valueCount % 2 == 0)
7075  {
7076  VMA_ASSERT(isString);
7077  }
7078 
7079  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7080  currItem.valueCount % 2 != 0)
7081  {
7082  m_SB.Add(": ");
7083  }
7084  else if(currItem.valueCount > 0)
7085  {
7086  m_SB.Add(", ");
7087  WriteIndent();
7088  }
7089  else
7090  {
7091  WriteIndent();
7092  }
7093  ++currItem.valueCount;
7094  }
7095 }
7096 
7097 void VmaJsonWriter::WriteIndent(bool oneLess)
7098 {
7099  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
7100  {
7101  m_SB.AddNewLine();
7102 
7103  size_t count = m_Stack.size();
7104  if(count > 0 && oneLess)
7105  {
7106  --count;
7107  }
7108  for(size_t i = 0; i < count; ++i)
7109  {
7110  m_SB.Add(INDENT);
7111  }
7112  }
7113 }
7114 
7115 #endif // #if VMA_STATS_STRING_ENABLED
7116 
7118 
7119 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
7120 {
7121  if(IsUserDataString())
7122  {
7123  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
7124 
7125  FreeUserDataString(hAllocator);
7126 
7127  if(pUserData != VMA_NULL)
7128  {
7129  const char* const newStrSrc = (char*)pUserData;
7130  const size_t newStrLen = strlen(newStrSrc);
7131  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
7132  memcpy(newStrDst, newStrSrc, newStrLen + 1);
7133  m_pUserData = newStrDst;
7134  }
7135  }
7136  else
7137  {
7138  m_pUserData = pUserData;
7139  }
7140 }
7141 
7142 void VmaAllocation_T::ChangeBlockAllocation(
7143  VmaAllocator hAllocator,
7144  VmaDeviceMemoryBlock* block,
7145  VkDeviceSize offset)
7146 {
7147  VMA_ASSERT(block != VMA_NULL);
7148  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7149 
7150  // Move mapping reference counter from old block to new block.
7151  if(block != m_BlockAllocation.m_Block)
7152  {
7153  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
7154  if(IsPersistentMap())
7155  ++mapRefCount;
7156  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
7157  block->Map(hAllocator, mapRefCount, VMA_NULL);
7158  }
7159 
7160  m_BlockAllocation.m_Block = block;
7161  m_BlockAllocation.m_Offset = offset;
7162 }
7163 
7164 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
7165 {
7166  VMA_ASSERT(newSize > 0);
7167  m_Size = newSize;
7168 }
7169 
7170 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
7171 {
7172  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7173  m_BlockAllocation.m_Offset = newOffset;
7174 }
7175 
7176 VkDeviceSize VmaAllocation_T::GetOffset() const
7177 {
7178  switch(m_Type)
7179  {
7180  case ALLOCATION_TYPE_BLOCK:
7181  return m_BlockAllocation.m_Offset;
7182  case ALLOCATION_TYPE_DEDICATED:
7183  return 0;
7184  default:
7185  VMA_ASSERT(0);
7186  return 0;
7187  }
7188 }
7189 
7190 VkDeviceMemory VmaAllocation_T::GetMemory() const
7191 {
7192  switch(m_Type)
7193  {
7194  case ALLOCATION_TYPE_BLOCK:
7195  return m_BlockAllocation.m_Block->GetDeviceMemory();
7196  case ALLOCATION_TYPE_DEDICATED:
7197  return m_DedicatedAllocation.m_hMemory;
7198  default:
7199  VMA_ASSERT(0);
7200  return VK_NULL_HANDLE;
7201  }
7202 }
7203 
7204 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
7205 {
7206  switch(m_Type)
7207  {
7208  case ALLOCATION_TYPE_BLOCK:
7209  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
7210  case ALLOCATION_TYPE_DEDICATED:
7211  return m_DedicatedAllocation.m_MemoryTypeIndex;
7212  default:
7213  VMA_ASSERT(0);
7214  return UINT32_MAX;
7215  }
7216 }
7217 
7218 void* VmaAllocation_T::GetMappedData() const
7219 {
7220  switch(m_Type)
7221  {
7222  case ALLOCATION_TYPE_BLOCK:
7223  if(m_MapCount != 0)
7224  {
7225  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
7226  VMA_ASSERT(pBlockData != VMA_NULL);
7227  return (char*)pBlockData + m_BlockAllocation.m_Offset;
7228  }
7229  else
7230  {
7231  return VMA_NULL;
7232  }
7233  break;
7234  case ALLOCATION_TYPE_DEDICATED:
7235  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
7236  return m_DedicatedAllocation.m_pMappedData;
7237  default:
7238  VMA_ASSERT(0);
7239  return VMA_NULL;
7240  }
7241 }
7242 
7243 bool VmaAllocation_T::CanBecomeLost() const
7244 {
7245  switch(m_Type)
7246  {
7247  case ALLOCATION_TYPE_BLOCK:
7248  return m_BlockAllocation.m_CanBecomeLost;
7249  case ALLOCATION_TYPE_DEDICATED:
7250  return false;
7251  default:
7252  VMA_ASSERT(0);
7253  return false;
7254  }
7255 }
7256 
7257 VmaPool VmaAllocation_T::GetPool() const
7258 {
7259  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7260  return m_BlockAllocation.m_hPool;
7261 }
7262 
7263 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7264 {
7265  VMA_ASSERT(CanBecomeLost());
7266 
7267  /*
7268  Warning: This is a carefully designed algorithm.
7269  Do not modify unless you really know what you're doing :)
7270  */
7271  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
7272  for(;;)
7273  {
7274  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7275  {
7276  VMA_ASSERT(0);
7277  return false;
7278  }
7279  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
7280  {
7281  return false;
7282  }
7283  else // Last use time earlier than current time.
7284  {
7285  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
7286  {
7287  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
7288  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
7289  return true;
7290  }
7291  }
7292  }
7293 }
7294 
7295 #if VMA_STATS_STRING_ENABLED
7296 
7297 // Correspond to values of enum VmaSuballocationType.
7298 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
7299  "FREE",
7300  "UNKNOWN",
7301  "BUFFER",
7302  "IMAGE_UNKNOWN",
7303  "IMAGE_LINEAR",
7304  "IMAGE_OPTIMAL",
7305 };
7306 
7307 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
7308 {
7309  json.WriteString("Type");
7310  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
7311 
7312  json.WriteString("Size");
7313  json.WriteNumber(m_Size);
7314 
7315  if(m_pUserData != VMA_NULL)
7316  {
7317  json.WriteString("UserData");
7318  if(IsUserDataString())
7319  {
7320  json.WriteString((const char*)m_pUserData);
7321  }
7322  else
7323  {
7324  json.BeginString();
7325  json.ContinueString_Pointer(m_pUserData);
7326  json.EndString();
7327  }
7328  }
7329 
7330  json.WriteString("CreationFrameIndex");
7331  json.WriteNumber(m_CreationFrameIndex);
7332 
7333  json.WriteString("LastUseFrameIndex");
7334  json.WriteNumber(GetLastUseFrameIndex());
7335 
7336  if(m_BufferImageUsage != 0)
7337  {
7338  json.WriteString("Usage");
7339  json.WriteNumber(m_BufferImageUsage);
7340  }
7341 }
7342 
7343 #endif
7344 
7345 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
7346 {
7347  VMA_ASSERT(IsUserDataString());
7348  if(m_pUserData != VMA_NULL)
7349  {
7350  char* const oldStr = (char*)m_pUserData;
7351  const size_t oldStrLen = strlen(oldStr);
7352  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
7353  m_pUserData = VMA_NULL;
7354  }
7355 }
7356 
7357 void VmaAllocation_T::BlockAllocMap()
7358 {
7359  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7360 
7361  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7362  {
7363  ++m_MapCount;
7364  }
7365  else
7366  {
7367  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
7368  }
7369 }
7370 
7371 void VmaAllocation_T::BlockAllocUnmap()
7372 {
7373  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7374 
7375  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7376  {
7377  --m_MapCount;
7378  }
7379  else
7380  {
7381  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
7382  }
7383 }
7384 
7385 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
7386 {
7387  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7388 
7389  if(m_MapCount != 0)
7390  {
7391  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7392  {
7393  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
7394  *ppData = m_DedicatedAllocation.m_pMappedData;
7395  ++m_MapCount;
7396  return VK_SUCCESS;
7397  }
7398  else
7399  {
7400  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
7401  return VK_ERROR_MEMORY_MAP_FAILED;
7402  }
7403  }
7404  else
7405  {
7406  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
7407  hAllocator->m_hDevice,
7408  m_DedicatedAllocation.m_hMemory,
7409  0, // offset
7410  VK_WHOLE_SIZE,
7411  0, // flags
7412  ppData);
7413  if(result == VK_SUCCESS)
7414  {
7415  m_DedicatedAllocation.m_pMappedData = *ppData;
7416  m_MapCount = 1;
7417  }
7418  return result;
7419  }
7420 }
7421 
7422 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
7423 {
7424  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7425 
7426  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7427  {
7428  --m_MapCount;
7429  if(m_MapCount == 0)
7430  {
7431  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
7432  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
7433  hAllocator->m_hDevice,
7434  m_DedicatedAllocation.m_hMemory);
7435  }
7436  }
7437  else
7438  {
7439  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
7440  }
7441 }
7442 
7443 #if VMA_STATS_STRING_ENABLED
7444 
7445 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
7446 {
7447  json.BeginObject();
7448 
7449  json.WriteString("Blocks");
7450  json.WriteNumber(stat.blockCount);
7451 
7452  json.WriteString("Allocations");
7453  json.WriteNumber(stat.allocationCount);
7454 
7455  json.WriteString("UnusedRanges");
7456  json.WriteNumber(stat.unusedRangeCount);
7457 
7458  json.WriteString("UsedBytes");
7459  json.WriteNumber(stat.usedBytes);
7460 
7461  json.WriteString("UnusedBytes");
7462  json.WriteNumber(stat.unusedBytes);
7463 
7464  if(stat.allocationCount > 1)
7465  {
7466  json.WriteString("AllocationSize");
7467  json.BeginObject(true);
7468  json.WriteString("Min");
7469  json.WriteNumber(stat.allocationSizeMin);
7470  json.WriteString("Avg");
7471  json.WriteNumber(stat.allocationSizeAvg);
7472  json.WriteString("Max");
7473  json.WriteNumber(stat.allocationSizeMax);
7474  json.EndObject();
7475  }
7476 
7477  if(stat.unusedRangeCount > 1)
7478  {
7479  json.WriteString("UnusedRangeSize");
7480  json.BeginObject(true);
7481  json.WriteString("Min");
7482  json.WriteNumber(stat.unusedRangeSizeMin);
7483  json.WriteString("Avg");
7484  json.WriteNumber(stat.unusedRangeSizeAvg);
7485  json.WriteString("Max");
7486  json.WriteNumber(stat.unusedRangeSizeMax);
7487  json.EndObject();
7488  }
7489 
7490  json.EndObject();
7491 }
7492 
7493 #endif // #if VMA_STATS_STRING_ENABLED
7494 
7495 struct VmaSuballocationItemSizeLess
7496 {
7497  bool operator()(
7498  const VmaSuballocationList::iterator lhs,
7499  const VmaSuballocationList::iterator rhs) const
7500  {
7501  return lhs->size < rhs->size;
7502  }
7503  bool operator()(
7504  const VmaSuballocationList::iterator lhs,
7505  VkDeviceSize rhsSize) const
7506  {
7507  return lhs->size < rhsSize;
7508  }
7509 };
7510 
7511 
7513 // class VmaBlockMetadata
7514 
7515 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
7516  m_Size(0),
7517  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
7518 {
7519 }
7520 
7521 #if VMA_STATS_STRING_ENABLED
7522 
7523 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
7524  VkDeviceSize unusedBytes,
7525  size_t allocationCount,
7526  size_t unusedRangeCount) const
7527 {
7528  json.BeginObject();
7529 
7530  json.WriteString("TotalBytes");
7531  json.WriteNumber(GetSize());
7532 
7533  json.WriteString("UnusedBytes");
7534  json.WriteNumber(unusedBytes);
7535 
7536  json.WriteString("Allocations");
7537  json.WriteNumber((uint64_t)allocationCount);
7538 
7539  json.WriteString("UnusedRanges");
7540  json.WriteNumber((uint64_t)unusedRangeCount);
7541 
7542  json.WriteString("Suballocations");
7543  json.BeginArray();
7544 }
7545 
7546 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
7547  VkDeviceSize offset,
7548  VmaAllocation hAllocation) const
7549 {
7550  json.BeginObject(true);
7551 
7552  json.WriteString("Offset");
7553  json.WriteNumber(offset);
7554 
7555  hAllocation->PrintParameters(json);
7556 
7557  json.EndObject();
7558 }
7559 
7560 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
7561  VkDeviceSize offset,
7562  VkDeviceSize size) const
7563 {
7564  json.BeginObject(true);
7565 
7566  json.WriteString("Offset");
7567  json.WriteNumber(offset);
7568 
7569  json.WriteString("Type");
7570  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
7571 
7572  json.WriteString("Size");
7573  json.WriteNumber(size);
7574 
7575  json.EndObject();
7576 }
7577 
7578 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
7579 {
7580  json.EndArray();
7581  json.EndObject();
7582 }
7583 
7584 #endif // #if VMA_STATS_STRING_ENABLED
7585 
7587 // class VmaBlockMetadata_Generic
7588 
7589 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
7590  VmaBlockMetadata(hAllocator),
7591  m_FreeCount(0),
7592  m_SumFreeSize(0),
7593  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7594  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
7595 {
7596 }
7597 
7598 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
7599 {
7600 }
7601 
7602 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
7603 {
7604  VmaBlockMetadata::Init(size);
7605 
7606  m_FreeCount = 1;
7607  m_SumFreeSize = size;
7608 
7609  VmaSuballocation suballoc = {};
7610  suballoc.offset = 0;
7611  suballoc.size = size;
7612  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7613  suballoc.hAllocation = VK_NULL_HANDLE;
7614 
7615  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
7616  m_Suballocations.push_back(suballoc);
7617  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
7618  --suballocItem;
7619  m_FreeSuballocationsBySize.push_back(suballocItem);
7620 }
7621 
7622 bool VmaBlockMetadata_Generic::Validate() const
7623 {
7624  VMA_VALIDATE(!m_Suballocations.empty());
7625 
7626  // Expected offset of new suballocation as calculated from previous ones.
7627  VkDeviceSize calculatedOffset = 0;
7628  // Expected number of free suballocations as calculated from traversing their list.
7629  uint32_t calculatedFreeCount = 0;
7630  // Expected sum size of free suballocations as calculated from traversing their list.
7631  VkDeviceSize calculatedSumFreeSize = 0;
7632  // Expected number of free suballocations that should be registered in
7633  // m_FreeSuballocationsBySize calculated from traversing their list.
7634  size_t freeSuballocationsToRegister = 0;
7635  // True if previous visited suballocation was free.
7636  bool prevFree = false;
7637 
7638  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7639  suballocItem != m_Suballocations.cend();
7640  ++suballocItem)
7641  {
7642  const VmaSuballocation& subAlloc = *suballocItem;
7643 
7644  // Actual offset of this suballocation doesn't match expected one.
7645  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
7646 
7647  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
7648  // Two adjacent free suballocations are invalid. They should be merged.
7649  VMA_VALIDATE(!prevFree || !currFree);
7650 
7651  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
7652 
7653  if(currFree)
7654  {
7655  calculatedSumFreeSize += subAlloc.size;
7656  ++calculatedFreeCount;
7657  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7658  {
7659  ++freeSuballocationsToRegister;
7660  }
7661 
7662  // Margin required between allocations - every free space must be at least that large.
7663  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
7664  }
7665  else
7666  {
7667  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
7668  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
7669 
7670  // Margin required between allocations - previous allocation must be free.
7671  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
7672  }
7673 
7674  calculatedOffset += subAlloc.size;
7675  prevFree = currFree;
7676  }
7677 
7678  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
7679  // match expected one.
7680  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
7681 
7682  VkDeviceSize lastSize = 0;
7683  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
7684  {
7685  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
7686 
7687  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
7688  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
7689  // They must be sorted by size ascending.
7690  VMA_VALIDATE(suballocItem->size >= lastSize);
7691 
7692  lastSize = suballocItem->size;
7693  }
7694 
7695  // Check if totals match calculacted values.
7696  VMA_VALIDATE(ValidateFreeSuballocationList());
7697  VMA_VALIDATE(calculatedOffset == GetSize());
7698  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
7699  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
7700 
7701  return true;
7702 }
7703 
7704 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
7705 {
7706  if(!m_FreeSuballocationsBySize.empty())
7707  {
7708  return m_FreeSuballocationsBySize.back()->size;
7709  }
7710  else
7711  {
7712  return 0;
7713  }
7714 }
7715 
7716 bool VmaBlockMetadata_Generic::IsEmpty() const
7717 {
7718  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
7719 }
7720 
7721 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
7722 {
7723  outInfo.blockCount = 1;
7724 
7725  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7726  outInfo.allocationCount = rangeCount - m_FreeCount;
7727  outInfo.unusedRangeCount = m_FreeCount;
7728 
7729  outInfo.unusedBytes = m_SumFreeSize;
7730  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
7731 
7732  outInfo.allocationSizeMin = UINT64_MAX;
7733  outInfo.allocationSizeMax = 0;
7734  outInfo.unusedRangeSizeMin = UINT64_MAX;
7735  outInfo.unusedRangeSizeMax = 0;
7736 
7737  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7738  suballocItem != m_Suballocations.cend();
7739  ++suballocItem)
7740  {
7741  const VmaSuballocation& suballoc = *suballocItem;
7742  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
7743  {
7744  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7745  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
7746  }
7747  else
7748  {
7749  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
7750  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
7751  }
7752  }
7753 }
7754 
7755 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
7756 {
7757  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7758 
7759  inoutStats.size += GetSize();
7760  inoutStats.unusedSize += m_SumFreeSize;
7761  inoutStats.allocationCount += rangeCount - m_FreeCount;
7762  inoutStats.unusedRangeCount += m_FreeCount;
7763  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
7764 }
7765 
7766 #if VMA_STATS_STRING_ENABLED
7767 
7768 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
7769 {
7770  PrintDetailedMap_Begin(json,
7771  m_SumFreeSize, // unusedBytes
7772  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
7773  m_FreeCount); // unusedRangeCount
7774 
7775  size_t i = 0;
7776  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7777  suballocItem != m_Suballocations.cend();
7778  ++suballocItem, ++i)
7779  {
7780  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7781  {
7782  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
7783  }
7784  else
7785  {
7786  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
7787  }
7788  }
7789 
7790  PrintDetailedMap_End(json);
7791 }
7792 
7793 #endif // #if VMA_STATS_STRING_ENABLED
7794 
7795 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
7796  uint32_t currentFrameIndex,
7797  uint32_t frameInUseCount,
7798  VkDeviceSize bufferImageGranularity,
7799  VkDeviceSize allocSize,
7800  VkDeviceSize allocAlignment,
7801  bool upperAddress,
7802  VmaSuballocationType allocType,
7803  bool canMakeOtherLost,
7804  uint32_t strategy,
7805  VmaAllocationRequest* pAllocationRequest)
7806 {
7807  VMA_ASSERT(allocSize > 0);
7808  VMA_ASSERT(!upperAddress);
7809  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
7810  VMA_ASSERT(pAllocationRequest != VMA_NULL);
7811  VMA_HEAVY_ASSERT(Validate());
7812 
7813  // There is not enough total free space in this block to fullfill the request: Early return.
7814  if(canMakeOtherLost == false &&
7815  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
7816  {
7817  return false;
7818  }
7819 
7820  // New algorithm, efficiently searching freeSuballocationsBySize.
7821  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
7822  if(freeSuballocCount > 0)
7823  {
7825  {
7826  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
7827  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7828  m_FreeSuballocationsBySize.data(),
7829  m_FreeSuballocationsBySize.data() + freeSuballocCount,
7830  allocSize + 2 * VMA_DEBUG_MARGIN,
7831  VmaSuballocationItemSizeLess());
7832  size_t index = it - m_FreeSuballocationsBySize.data();
7833  for(; index < freeSuballocCount; ++index)
7834  {
7835  if(CheckAllocation(
7836  currentFrameIndex,
7837  frameInUseCount,
7838  bufferImageGranularity,
7839  allocSize,
7840  allocAlignment,
7841  allocType,
7842  m_FreeSuballocationsBySize[index],
7843  false, // canMakeOtherLost
7844  &pAllocationRequest->offset,
7845  &pAllocationRequest->itemsToMakeLostCount,
7846  &pAllocationRequest->sumFreeSize,
7847  &pAllocationRequest->sumItemSize))
7848  {
7849  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7850  return true;
7851  }
7852  }
7853  }
7854  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
7855  {
7856  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7857  it != m_Suballocations.end();
7858  ++it)
7859  {
7860  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
7861  currentFrameIndex,
7862  frameInUseCount,
7863  bufferImageGranularity,
7864  allocSize,
7865  allocAlignment,
7866  allocType,
7867  it,
7868  false, // canMakeOtherLost
7869  &pAllocationRequest->offset,
7870  &pAllocationRequest->itemsToMakeLostCount,
7871  &pAllocationRequest->sumFreeSize,
7872  &pAllocationRequest->sumItemSize))
7873  {
7874  pAllocationRequest->item = it;
7875  return true;
7876  }
7877  }
7878  }
7879  else // WORST_FIT, FIRST_FIT
7880  {
7881  // Search staring from biggest suballocations.
7882  for(size_t index = freeSuballocCount; index--; )
7883  {
7884  if(CheckAllocation(
7885  currentFrameIndex,
7886  frameInUseCount,
7887  bufferImageGranularity,
7888  allocSize,
7889  allocAlignment,
7890  allocType,
7891  m_FreeSuballocationsBySize[index],
7892  false, // canMakeOtherLost
7893  &pAllocationRequest->offset,
7894  &pAllocationRequest->itemsToMakeLostCount,
7895  &pAllocationRequest->sumFreeSize,
7896  &pAllocationRequest->sumItemSize))
7897  {
7898  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7899  return true;
7900  }
7901  }
7902  }
7903  }
7904 
7905  if(canMakeOtherLost)
7906  {
7907  // Brute-force algorithm. TODO: Come up with something better.
7908 
7909  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
7910  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
7911 
7912  VmaAllocationRequest tmpAllocRequest = {};
7913  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
7914  suballocIt != m_Suballocations.end();
7915  ++suballocIt)
7916  {
7917  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
7918  suballocIt->hAllocation->CanBecomeLost())
7919  {
7920  if(CheckAllocation(
7921  currentFrameIndex,
7922  frameInUseCount,
7923  bufferImageGranularity,
7924  allocSize,
7925  allocAlignment,
7926  allocType,
7927  suballocIt,
7928  canMakeOtherLost,
7929  &tmpAllocRequest.offset,
7930  &tmpAllocRequest.itemsToMakeLostCount,
7931  &tmpAllocRequest.sumFreeSize,
7932  &tmpAllocRequest.sumItemSize))
7933  {
7934  tmpAllocRequest.item = suballocIt;
7935 
7936  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
7938  {
7939  *pAllocationRequest = tmpAllocRequest;
7940  }
7941  }
7942  }
7943  }
7944 
7945  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
7946  {
7947  return true;
7948  }
7949  }
7950 
7951  return false;
7952 }
7953 
7954 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
7955  uint32_t currentFrameIndex,
7956  uint32_t frameInUseCount,
7957  VmaAllocationRequest* pAllocationRequest)
7958 {
7959  while(pAllocationRequest->itemsToMakeLostCount > 0)
7960  {
7961  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
7962  {
7963  ++pAllocationRequest->item;
7964  }
7965  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
7966  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
7967  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
7968  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
7969  {
7970  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
7971  --pAllocationRequest->itemsToMakeLostCount;
7972  }
7973  else
7974  {
7975  return false;
7976  }
7977  }
7978 
7979  VMA_HEAVY_ASSERT(Validate());
7980  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
7981  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
7982 
7983  return true;
7984 }
7985 
7986 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7987 {
7988  uint32_t lostAllocationCount = 0;
7989  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7990  it != m_Suballocations.end();
7991  ++it)
7992  {
7993  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
7994  it->hAllocation->CanBecomeLost() &&
7995  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
7996  {
7997  it = FreeSuballocation(it);
7998  ++lostAllocationCount;
7999  }
8000  }
8001  return lostAllocationCount;
8002 }
8003 
8004 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
8005 {
8006  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8007  it != m_Suballocations.end();
8008  ++it)
8009  {
8010  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
8011  {
8012  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
8013  {
8014  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
8015  return VK_ERROR_VALIDATION_FAILED_EXT;
8016  }
8017  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
8018  {
8019  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
8020  return VK_ERROR_VALIDATION_FAILED_EXT;
8021  }
8022  }
8023  }
8024 
8025  return VK_SUCCESS;
8026 }
8027 
8028 void VmaBlockMetadata_Generic::Alloc(
8029  const VmaAllocationRequest& request,
8030  VmaSuballocationType type,
8031  VkDeviceSize allocSize,
8032  bool upperAddress,
8033  VmaAllocation hAllocation)
8034 {
8035  VMA_ASSERT(!upperAddress);
8036  VMA_ASSERT(request.item != m_Suballocations.end());
8037  VmaSuballocation& suballoc = *request.item;
8038  // Given suballocation is a free block.
8039  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8040  // Given offset is inside this suballocation.
8041  VMA_ASSERT(request.offset >= suballoc.offset);
8042  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
8043  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
8044  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
8045 
8046  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
8047  // it to become used.
8048  UnregisterFreeSuballocation(request.item);
8049 
8050  suballoc.offset = request.offset;
8051  suballoc.size = allocSize;
8052  suballoc.type = type;
8053  suballoc.hAllocation = hAllocation;
8054 
8055  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
8056  if(paddingEnd)
8057  {
8058  VmaSuballocation paddingSuballoc = {};
8059  paddingSuballoc.offset = request.offset + allocSize;
8060  paddingSuballoc.size = paddingEnd;
8061  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8062  VmaSuballocationList::iterator next = request.item;
8063  ++next;
8064  const VmaSuballocationList::iterator paddingEndItem =
8065  m_Suballocations.insert(next, paddingSuballoc);
8066  RegisterFreeSuballocation(paddingEndItem);
8067  }
8068 
8069  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
8070  if(paddingBegin)
8071  {
8072  VmaSuballocation paddingSuballoc = {};
8073  paddingSuballoc.offset = request.offset - paddingBegin;
8074  paddingSuballoc.size = paddingBegin;
8075  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8076  const VmaSuballocationList::iterator paddingBeginItem =
8077  m_Suballocations.insert(request.item, paddingSuballoc);
8078  RegisterFreeSuballocation(paddingBeginItem);
8079  }
8080 
8081  // Update totals.
8082  m_FreeCount = m_FreeCount - 1;
8083  if(paddingBegin > 0)
8084  {
8085  ++m_FreeCount;
8086  }
8087  if(paddingEnd > 0)
8088  {
8089  ++m_FreeCount;
8090  }
8091  m_SumFreeSize -= allocSize;
8092 }
8093 
8094 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
8095 {
8096  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8097  suballocItem != m_Suballocations.end();
8098  ++suballocItem)
8099  {
8100  VmaSuballocation& suballoc = *suballocItem;
8101  if(suballoc.hAllocation == allocation)
8102  {
8103  FreeSuballocation(suballocItem);
8104  VMA_HEAVY_ASSERT(Validate());
8105  return;
8106  }
8107  }
8108  VMA_ASSERT(0 && "Not found!");
8109 }
8110 
8111 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
8112 {
8113  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8114  suballocItem != m_Suballocations.end();
8115  ++suballocItem)
8116  {
8117  VmaSuballocation& suballoc = *suballocItem;
8118  if(suballoc.offset == offset)
8119  {
8120  FreeSuballocation(suballocItem);
8121  return;
8122  }
8123  }
8124  VMA_ASSERT(0 && "Not found!");
8125 }
8126 
8127 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
8128 {
8129  typedef VmaSuballocationList::iterator iter_type;
8130  for(iter_type suballocItem = m_Suballocations.begin();
8131  suballocItem != m_Suballocations.end();
8132  ++suballocItem)
8133  {
8134  VmaSuballocation& suballoc = *suballocItem;
8135  if(suballoc.hAllocation == alloc)
8136  {
8137  iter_type nextItem = suballocItem;
8138  ++nextItem;
8139 
8140  // Should have been ensured on higher level.
8141  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
8142 
8143  // Shrinking.
8144  if(newSize < alloc->GetSize())
8145  {
8146  const VkDeviceSize sizeDiff = suballoc.size - newSize;
8147 
8148  // There is next item.
8149  if(nextItem != m_Suballocations.end())
8150  {
8151  // Next item is free.
8152  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8153  {
8154  // Grow this next item backward.
8155  UnregisterFreeSuballocation(nextItem);
8156  nextItem->offset -= sizeDiff;
8157  nextItem->size += sizeDiff;
8158  RegisterFreeSuballocation(nextItem);
8159  }
8160  // Next item is not free.
8161  else
8162  {
8163  // Create free item after current one.
8164  VmaSuballocation newFreeSuballoc;
8165  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
8166  newFreeSuballoc.offset = suballoc.offset + newSize;
8167  newFreeSuballoc.size = sizeDiff;
8168  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8169  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
8170  RegisterFreeSuballocation(newFreeSuballocIt);
8171 
8172  ++m_FreeCount;
8173  }
8174  }
8175  // This is the last item.
8176  else
8177  {
8178  // Create free item at the end.
8179  VmaSuballocation newFreeSuballoc;
8180  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
8181  newFreeSuballoc.offset = suballoc.offset + newSize;
8182  newFreeSuballoc.size = sizeDiff;
8183  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8184  m_Suballocations.push_back(newFreeSuballoc);
8185 
8186  iter_type newFreeSuballocIt = m_Suballocations.end();
8187  RegisterFreeSuballocation(--newFreeSuballocIt);
8188 
8189  ++m_FreeCount;
8190  }
8191 
8192  suballoc.size = newSize;
8193  m_SumFreeSize += sizeDiff;
8194  }
8195  // Growing.
8196  else
8197  {
8198  const VkDeviceSize sizeDiff = newSize - suballoc.size;
8199 
8200  // There is next item.
8201  if(nextItem != m_Suballocations.end())
8202  {
8203  // Next item is free.
8204  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8205  {
8206  // There is not enough free space, including margin.
8207  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
8208  {
8209  return false;
8210  }
8211 
8212  // There is more free space than required.
8213  if(nextItem->size > sizeDiff)
8214  {
8215  // Move and shrink this next item.
8216  UnregisterFreeSuballocation(nextItem);
8217  nextItem->offset += sizeDiff;
8218  nextItem->size -= sizeDiff;
8219  RegisterFreeSuballocation(nextItem);
8220  }
8221  // There is exactly the amount of free space required.
8222  else
8223  {
8224  // Remove this next free item.
8225  UnregisterFreeSuballocation(nextItem);
8226  m_Suballocations.erase(nextItem);
8227  --m_FreeCount;
8228  }
8229  }
8230  // Next item is not free - there is no space to grow.
8231  else
8232  {
8233  return false;
8234  }
8235  }
8236  // This is the last item - there is no space to grow.
8237  else
8238  {
8239  return false;
8240  }
8241 
8242  suballoc.size = newSize;
8243  m_SumFreeSize -= sizeDiff;
8244  }
8245 
8246  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
8247  return true;
8248  }
8249  }
8250  VMA_ASSERT(0 && "Not found!");
8251  return false;
8252 }
8253 
8254 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
8255 {
8256  VkDeviceSize lastSize = 0;
8257  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
8258  {
8259  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
8260 
8261  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
8262  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
8263  VMA_VALIDATE(it->size >= lastSize);
8264  lastSize = it->size;
8265  }
8266  return true;
8267 }
8268 
8269 bool VmaBlockMetadata_Generic::CheckAllocation(
8270  uint32_t currentFrameIndex,
8271  uint32_t frameInUseCount,
8272  VkDeviceSize bufferImageGranularity,
8273  VkDeviceSize allocSize,
8274  VkDeviceSize allocAlignment,
8275  VmaSuballocationType allocType,
8276  VmaSuballocationList::const_iterator suballocItem,
8277  bool canMakeOtherLost,
8278  VkDeviceSize* pOffset,
8279  size_t* itemsToMakeLostCount,
8280  VkDeviceSize* pSumFreeSize,
8281  VkDeviceSize* pSumItemSize) const
8282 {
8283  VMA_ASSERT(allocSize > 0);
8284  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8285  VMA_ASSERT(suballocItem != m_Suballocations.cend());
8286  VMA_ASSERT(pOffset != VMA_NULL);
8287 
8288  *itemsToMakeLostCount = 0;
8289  *pSumFreeSize = 0;
8290  *pSumItemSize = 0;
8291 
8292  if(canMakeOtherLost)
8293  {
8294  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8295  {
8296  *pSumFreeSize = suballocItem->size;
8297  }
8298  else
8299  {
8300  if(suballocItem->hAllocation->CanBecomeLost() &&
8301  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8302  {
8303  ++*itemsToMakeLostCount;
8304  *pSumItemSize = suballocItem->size;
8305  }
8306  else
8307  {
8308  return false;
8309  }
8310  }
8311 
8312  // Remaining size is too small for this request: Early return.
8313  if(GetSize() - suballocItem->offset < allocSize)
8314  {
8315  return false;
8316  }
8317 
8318  // Start from offset equal to beginning of this suballocation.
8319  *pOffset = suballocItem->offset;
8320 
8321  // Apply VMA_DEBUG_MARGIN at the beginning.
8322  if(VMA_DEBUG_MARGIN > 0)
8323  {
8324  *pOffset += VMA_DEBUG_MARGIN;
8325  }
8326 
8327  // Apply alignment.
8328  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8329 
8330  // Check previous suballocations for BufferImageGranularity conflicts.
8331  // Make bigger alignment if necessary.
8332  if(bufferImageGranularity > 1)
8333  {
8334  bool bufferImageGranularityConflict = false;
8335  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8336  while(prevSuballocItem != m_Suballocations.cbegin())
8337  {
8338  --prevSuballocItem;
8339  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8340  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8341  {
8342  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8343  {
8344  bufferImageGranularityConflict = true;
8345  break;
8346  }
8347  }
8348  else
8349  // Already on previous page.
8350  break;
8351  }
8352  if(bufferImageGranularityConflict)
8353  {
8354  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8355  }
8356  }
8357 
8358  // Now that we have final *pOffset, check if we are past suballocItem.
8359  // If yes, return false - this function should be called for another suballocItem as starting point.
8360  if(*pOffset >= suballocItem->offset + suballocItem->size)
8361  {
8362  return false;
8363  }
8364 
8365  // Calculate padding at the beginning based on current offset.
8366  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
8367 
8368  // Calculate required margin at the end.
8369  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8370 
8371  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
8372  // Another early return check.
8373  if(suballocItem->offset + totalSize > GetSize())
8374  {
8375  return false;
8376  }
8377 
8378  // Advance lastSuballocItem until desired size is reached.
8379  // Update itemsToMakeLostCount.
8380  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
8381  if(totalSize > suballocItem->size)
8382  {
8383  VkDeviceSize remainingSize = totalSize - suballocItem->size;
8384  while(remainingSize > 0)
8385  {
8386  ++lastSuballocItem;
8387  if(lastSuballocItem == m_Suballocations.cend())
8388  {
8389  return false;
8390  }
8391  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8392  {
8393  *pSumFreeSize += lastSuballocItem->size;
8394  }
8395  else
8396  {
8397  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
8398  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
8399  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8400  {
8401  ++*itemsToMakeLostCount;
8402  *pSumItemSize += lastSuballocItem->size;
8403  }
8404  else
8405  {
8406  return false;
8407  }
8408  }
8409  remainingSize = (lastSuballocItem->size < remainingSize) ?
8410  remainingSize - lastSuballocItem->size : 0;
8411  }
8412  }
8413 
8414  // Check next suballocations for BufferImageGranularity conflicts.
8415  // If conflict exists, we must mark more allocations lost or fail.
8416  if(bufferImageGranularity > 1)
8417  {
8418  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
8419  ++nextSuballocItem;
8420  while(nextSuballocItem != m_Suballocations.cend())
8421  {
8422  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8423  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8424  {
8425  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8426  {
8427  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
8428  if(nextSuballoc.hAllocation->CanBecomeLost() &&
8429  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8430  {
8431  ++*itemsToMakeLostCount;
8432  }
8433  else
8434  {
8435  return false;
8436  }
8437  }
8438  }
8439  else
8440  {
8441  // Already on next page.
8442  break;
8443  }
8444  ++nextSuballocItem;
8445  }
8446  }
8447  }
8448  else
8449  {
8450  const VmaSuballocation& suballoc = *suballocItem;
8451  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8452 
8453  *pSumFreeSize = suballoc.size;
8454 
8455  // Size of this suballocation is too small for this request: Early return.
8456  if(suballoc.size < allocSize)
8457  {
8458  return false;
8459  }
8460 
8461  // Start from offset equal to beginning of this suballocation.
8462  *pOffset = suballoc.offset;
8463 
8464  // Apply VMA_DEBUG_MARGIN at the beginning.
8465  if(VMA_DEBUG_MARGIN > 0)
8466  {
8467  *pOffset += VMA_DEBUG_MARGIN;
8468  }
8469 
8470  // Apply alignment.
8471  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8472 
8473  // Check previous suballocations for BufferImageGranularity conflicts.
8474  // Make bigger alignment if necessary.
8475  if(bufferImageGranularity > 1)
8476  {
8477  bool bufferImageGranularityConflict = false;
8478  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8479  while(prevSuballocItem != m_Suballocations.cbegin())
8480  {
8481  --prevSuballocItem;
8482  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8483  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8484  {
8485  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8486  {
8487  bufferImageGranularityConflict = true;
8488  break;
8489  }
8490  }
8491  else
8492  // Already on previous page.
8493  break;
8494  }
8495  if(bufferImageGranularityConflict)
8496  {
8497  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8498  }
8499  }
8500 
8501  // Calculate padding at the beginning based on current offset.
8502  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
8503 
8504  // Calculate required margin at the end.
8505  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8506 
8507  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
8508  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
8509  {
8510  return false;
8511  }
8512 
8513  // Check next suballocations for BufferImageGranularity conflicts.
8514  // If conflict exists, allocation cannot be made here.
8515  if(bufferImageGranularity > 1)
8516  {
8517  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
8518  ++nextSuballocItem;
8519  while(nextSuballocItem != m_Suballocations.cend())
8520  {
8521  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8522  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8523  {
8524  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8525  {
8526  return false;
8527  }
8528  }
8529  else
8530  {
8531  // Already on next page.
8532  break;
8533  }
8534  ++nextSuballocItem;
8535  }
8536  }
8537  }
8538 
8539  // All tests passed: Success. pOffset is already filled.
8540  return true;
8541 }
8542 
8543 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
8544 {
8545  VMA_ASSERT(item != m_Suballocations.end());
8546  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8547 
8548  VmaSuballocationList::iterator nextItem = item;
8549  ++nextItem;
8550  VMA_ASSERT(nextItem != m_Suballocations.end());
8551  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
8552 
8553  item->size += nextItem->size;
8554  --m_FreeCount;
8555  m_Suballocations.erase(nextItem);
8556 }
8557 
8558 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
8559 {
8560  // Change this suballocation to be marked as free.
8561  VmaSuballocation& suballoc = *suballocItem;
8562  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8563  suballoc.hAllocation = VK_NULL_HANDLE;
8564 
8565  // Update totals.
8566  ++m_FreeCount;
8567  m_SumFreeSize += suballoc.size;
8568 
8569  // Merge with previous and/or next suballocation if it's also free.
8570  bool mergeWithNext = false;
8571  bool mergeWithPrev = false;
8572 
8573  VmaSuballocationList::iterator nextItem = suballocItem;
8574  ++nextItem;
8575  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
8576  {
8577  mergeWithNext = true;
8578  }
8579 
8580  VmaSuballocationList::iterator prevItem = suballocItem;
8581  if(suballocItem != m_Suballocations.begin())
8582  {
8583  --prevItem;
8584  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8585  {
8586  mergeWithPrev = true;
8587  }
8588  }
8589 
8590  if(mergeWithNext)
8591  {
8592  UnregisterFreeSuballocation(nextItem);
8593  MergeFreeWithNext(suballocItem);
8594  }
8595 
8596  if(mergeWithPrev)
8597  {
8598  UnregisterFreeSuballocation(prevItem);
8599  MergeFreeWithNext(prevItem);
8600  RegisterFreeSuballocation(prevItem);
8601  return prevItem;
8602  }
8603  else
8604  {
8605  RegisterFreeSuballocation(suballocItem);
8606  return suballocItem;
8607  }
8608 }
8609 
8610 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
8611 {
8612  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8613  VMA_ASSERT(item->size > 0);
8614 
8615  // You may want to enable this validation at the beginning or at the end of
8616  // this function, depending on what do you want to check.
8617  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8618 
8619  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8620  {
8621  if(m_FreeSuballocationsBySize.empty())
8622  {
8623  m_FreeSuballocationsBySize.push_back(item);
8624  }
8625  else
8626  {
8627  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
8628  }
8629  }
8630 
8631  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8632 }
8633 
8634 
8635 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
8636 {
8637  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8638  VMA_ASSERT(item->size > 0);
8639 
8640  // You may want to enable this validation at the beginning or at the end of
8641  // this function, depending on what do you want to check.
8642  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8643 
8644  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8645  {
8646  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
8647  m_FreeSuballocationsBySize.data(),
8648  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
8649  item,
8650  VmaSuballocationItemSizeLess());
8651  for(size_t index = it - m_FreeSuballocationsBySize.data();
8652  index < m_FreeSuballocationsBySize.size();
8653  ++index)
8654  {
8655  if(m_FreeSuballocationsBySize[index] == item)
8656  {
8657  VmaVectorRemove(m_FreeSuballocationsBySize, index);
8658  return;
8659  }
8660  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
8661  }
8662  VMA_ASSERT(0 && "Not found.");
8663  }
8664 
8665  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8666 }
8667 
8668 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
8669  VkDeviceSize bufferImageGranularity,
8670  VmaSuballocationType& inOutPrevSuballocType) const
8671 {
8672  if(bufferImageGranularity == 1 || IsEmpty())
8673  {
8674  return false;
8675  }
8676 
8677  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
8678  bool typeConflictFound = false;
8679  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
8680  it != m_Suballocations.cend();
8681  ++it)
8682  {
8683  const VmaSuballocationType suballocType = it->type;
8684  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
8685  {
8686  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
8687  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
8688  {
8689  typeConflictFound = true;
8690  }
8691  inOutPrevSuballocType = suballocType;
8692  }
8693  }
8694 
8695  return typeConflictFound || minAlignment >= bufferImageGranularity;
8696 }
8697 
8699 // class VmaBlockMetadata_Linear
8700 
8701 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
8702  VmaBlockMetadata(hAllocator),
8703  m_SumFreeSize(0),
8704  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8705  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8706  m_1stVectorIndex(0),
8707  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
8708  m_1stNullItemsBeginCount(0),
8709  m_1stNullItemsMiddleCount(0),
8710  m_2ndNullItemsCount(0)
8711 {
8712 }
8713 
8714 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
8715 {
8716 }
8717 
8718 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
8719 {
8720  VmaBlockMetadata::Init(size);
8721  m_SumFreeSize = size;
8722 }
8723 
8724 bool VmaBlockMetadata_Linear::Validate() const
8725 {
8726  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8727  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8728 
8729  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
8730  VMA_VALIDATE(!suballocations1st.empty() ||
8731  suballocations2nd.empty() ||
8732  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
8733 
8734  if(!suballocations1st.empty())
8735  {
8736  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
8737  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
8738  // Null item at the end should be just pop_back().
8739  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
8740  }
8741  if(!suballocations2nd.empty())
8742  {
8743  // Null item at the end should be just pop_back().
8744  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
8745  }
8746 
8747  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
8748  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
8749 
8750  VkDeviceSize sumUsedSize = 0;
8751  const size_t suballoc1stCount = suballocations1st.size();
8752  VkDeviceSize offset = VMA_DEBUG_MARGIN;
8753 
8754  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8755  {
8756  const size_t suballoc2ndCount = suballocations2nd.size();
8757  size_t nullItem2ndCount = 0;
8758  for(size_t i = 0; i < suballoc2ndCount; ++i)
8759  {
8760  const VmaSuballocation& suballoc = suballocations2nd[i];
8761  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8762 
8763  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8764  VMA_VALIDATE(suballoc.offset >= offset);
8765 
8766  if(!currFree)
8767  {
8768  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8769  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8770  sumUsedSize += suballoc.size;
8771  }
8772  else
8773  {
8774  ++nullItem2ndCount;
8775  }
8776 
8777  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8778  }
8779 
8780  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8781  }
8782 
8783  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
8784  {
8785  const VmaSuballocation& suballoc = suballocations1st[i];
8786  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
8787  suballoc.hAllocation == VK_NULL_HANDLE);
8788  }
8789 
8790  size_t nullItem1stCount = m_1stNullItemsBeginCount;
8791 
8792  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
8793  {
8794  const VmaSuballocation& suballoc = suballocations1st[i];
8795  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8796 
8797  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8798  VMA_VALIDATE(suballoc.offset >= offset);
8799  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
8800 
8801  if(!currFree)
8802  {
8803  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8804  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8805  sumUsedSize += suballoc.size;
8806  }
8807  else
8808  {
8809  ++nullItem1stCount;
8810  }
8811 
8812  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8813  }
8814  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
8815 
8816  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8817  {
8818  const size_t suballoc2ndCount = suballocations2nd.size();
8819  size_t nullItem2ndCount = 0;
8820  for(size_t i = suballoc2ndCount; i--; )
8821  {
8822  const VmaSuballocation& suballoc = suballocations2nd[i];
8823  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8824 
8825  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8826  VMA_VALIDATE(suballoc.offset >= offset);
8827 
8828  if(!currFree)
8829  {
8830  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8831  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8832  sumUsedSize += suballoc.size;
8833  }
8834  else
8835  {
8836  ++nullItem2ndCount;
8837  }
8838 
8839  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8840  }
8841 
8842  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8843  }
8844 
8845  VMA_VALIDATE(offset <= GetSize());
8846  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
8847 
8848  return true;
8849 }
8850 
8851 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
8852 {
8853  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
8854  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
8855 }
8856 
8857 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
8858 {
8859  const VkDeviceSize size = GetSize();
8860 
8861  /*
8862  We don't consider gaps inside allocation vectors with freed allocations because
8863  they are not suitable for reuse in linear allocator. We consider only space that
8864  is available for new allocations.
8865  */
8866  if(IsEmpty())
8867  {
8868  return size;
8869  }
8870 
8871  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8872 
8873  switch(m_2ndVectorMode)
8874  {
8875  case SECOND_VECTOR_EMPTY:
8876  /*
8877  Available space is after end of 1st, as well as before beginning of 1st (which
8878  whould make it a ring buffer).
8879  */
8880  {
8881  const size_t suballocations1stCount = suballocations1st.size();
8882  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
8883  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
8884  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
8885  return VMA_MAX(
8886  firstSuballoc.offset,
8887  size - (lastSuballoc.offset + lastSuballoc.size));
8888  }
8889  break;
8890 
8891  case SECOND_VECTOR_RING_BUFFER:
8892  /*
8893  Available space is only between end of 2nd and beginning of 1st.
8894  */
8895  {
8896  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8897  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
8898  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
8899  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
8900  }
8901  break;
8902 
8903  case SECOND_VECTOR_DOUBLE_STACK:
8904  /*
8905  Available space is only between end of 1st and top of 2nd.
8906  */
8907  {
8908  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8909  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
8910  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
8911  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
8912  }
8913  break;
8914 
8915  default:
8916  VMA_ASSERT(0);
8917  return 0;
8918  }
8919 }
8920 
8921 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
8922 {
8923  const VkDeviceSize size = GetSize();
8924  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8925  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8926  const size_t suballoc1stCount = suballocations1st.size();
8927  const size_t suballoc2ndCount = suballocations2nd.size();
8928 
8929  outInfo.blockCount = 1;
8930  outInfo.allocationCount = (uint32_t)GetAllocationCount();
8931  outInfo.unusedRangeCount = 0;
8932  outInfo.usedBytes = 0;
8933  outInfo.allocationSizeMin = UINT64_MAX;
8934  outInfo.allocationSizeMax = 0;
8935  outInfo.unusedRangeSizeMin = UINT64_MAX;
8936  outInfo.unusedRangeSizeMax = 0;
8937 
8938  VkDeviceSize lastOffset = 0;
8939 
8940  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8941  {
8942  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8943  size_t nextAlloc2ndIndex = 0;
8944  while(lastOffset < freeSpace2ndTo1stEnd)
8945  {
8946  // Find next non-null allocation or move nextAllocIndex to the end.
8947  while(nextAlloc2ndIndex < suballoc2ndCount &&
8948  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8949  {
8950  ++nextAlloc2ndIndex;
8951  }
8952 
8953  // Found non-null allocation.
8954  if(nextAlloc2ndIndex < suballoc2ndCount)
8955  {
8956  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8957 
8958  // 1. Process free space before this allocation.
8959  if(lastOffset < suballoc.offset)
8960  {
8961  // There is free space from lastOffset to suballoc.offset.
8962  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8963  ++outInfo.unusedRangeCount;
8964  outInfo.unusedBytes += unusedRangeSize;
8965  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8966  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8967  }
8968 
8969  // 2. Process this allocation.
8970  // There is allocation with suballoc.offset, suballoc.size.
8971  outInfo.usedBytes += suballoc.size;
8972  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8973  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
8974 
8975  // 3. Prepare for next iteration.
8976  lastOffset = suballoc.offset + suballoc.size;
8977  ++nextAlloc2ndIndex;
8978  }
8979  // We are at the end.
8980  else
8981  {
8982  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8983  if(lastOffset < freeSpace2ndTo1stEnd)
8984  {
8985  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8986  ++outInfo.unusedRangeCount;
8987  outInfo.unusedBytes += unusedRangeSize;
8988  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8989  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8990  }
8991 
8992  // End of loop.
8993  lastOffset = freeSpace2ndTo1stEnd;
8994  }
8995  }
8996  }
8997 
8998  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8999  const VkDeviceSize freeSpace1stTo2ndEnd =
9000  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9001  while(lastOffset < freeSpace1stTo2ndEnd)
9002  {
9003  // Find next non-null allocation or move nextAllocIndex to the end.
9004  while(nextAlloc1stIndex < suballoc1stCount &&
9005  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9006  {
9007  ++nextAlloc1stIndex;
9008  }
9009 
9010  // Found non-null allocation.
9011  if(nextAlloc1stIndex < suballoc1stCount)
9012  {
9013  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9014 
9015  // 1. Process free space before this allocation.
9016  if(lastOffset < suballoc.offset)
9017  {
9018  // There is free space from lastOffset to suballoc.offset.
9019  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9020  ++outInfo.unusedRangeCount;
9021  outInfo.unusedBytes += unusedRangeSize;
9022  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9023  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9024  }
9025 
9026  // 2. Process this allocation.
9027  // There is allocation with suballoc.offset, suballoc.size.
9028  outInfo.usedBytes += suballoc.size;
9029  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9030  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9031 
9032  // 3. Prepare for next iteration.
9033  lastOffset = suballoc.offset + suballoc.size;
9034  ++nextAlloc1stIndex;
9035  }
9036  // We are at the end.
9037  else
9038  {
9039  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9040  if(lastOffset < freeSpace1stTo2ndEnd)
9041  {
9042  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9043  ++outInfo.unusedRangeCount;
9044  outInfo.unusedBytes += unusedRangeSize;
9045  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9046  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9047  }
9048 
9049  // End of loop.
9050  lastOffset = freeSpace1stTo2ndEnd;
9051  }
9052  }
9053 
9054  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9055  {
9056  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9057  while(lastOffset < size)
9058  {
9059  // Find next non-null allocation or move nextAllocIndex to the end.
9060  while(nextAlloc2ndIndex != SIZE_MAX &&
9061  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9062  {
9063  --nextAlloc2ndIndex;
9064  }
9065 
9066  // Found non-null allocation.
9067  if(nextAlloc2ndIndex != SIZE_MAX)
9068  {
9069  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9070 
9071  // 1. Process free space before this allocation.
9072  if(lastOffset < suballoc.offset)
9073  {
9074  // There is free space from lastOffset to suballoc.offset.
9075  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9076  ++outInfo.unusedRangeCount;
9077  outInfo.unusedBytes += unusedRangeSize;
9078  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9079  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9080  }
9081 
9082  // 2. Process this allocation.
9083  // There is allocation with suballoc.offset, suballoc.size.
9084  outInfo.usedBytes += suballoc.size;
9085  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9086  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9087 
9088  // 3. Prepare for next iteration.
9089  lastOffset = suballoc.offset + suballoc.size;
9090  --nextAlloc2ndIndex;
9091  }
9092  // We are at the end.
9093  else
9094  {
9095  // There is free space from lastOffset to size.
9096  if(lastOffset < size)
9097  {
9098  const VkDeviceSize unusedRangeSize = size - lastOffset;
9099  ++outInfo.unusedRangeCount;
9100  outInfo.unusedBytes += unusedRangeSize;
9101  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9102  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9103  }
9104 
9105  // End of loop.
9106  lastOffset = size;
9107  }
9108  }
9109  }
9110 
9111  outInfo.unusedBytes = size - outInfo.usedBytes;
9112 }
9113 
9114 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
9115 {
9116  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9117  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9118  const VkDeviceSize size = GetSize();
9119  const size_t suballoc1stCount = suballocations1st.size();
9120  const size_t suballoc2ndCount = suballocations2nd.size();
9121 
9122  inoutStats.size += size;
9123 
9124  VkDeviceSize lastOffset = 0;
9125 
9126  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9127  {
9128  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9129  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
9130  while(lastOffset < freeSpace2ndTo1stEnd)
9131  {
9132  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9133  while(nextAlloc2ndIndex < suballoc2ndCount &&
9134  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9135  {
9136  ++nextAlloc2ndIndex;
9137  }
9138 
9139  // Found non-null allocation.
9140  if(nextAlloc2ndIndex < suballoc2ndCount)
9141  {
9142  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9143 
9144  // 1. Process free space before this allocation.
9145  if(lastOffset < suballoc.offset)
9146  {
9147  // There is free space from lastOffset to suballoc.offset.
9148  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9149  inoutStats.unusedSize += unusedRangeSize;
9150  ++inoutStats.unusedRangeCount;
9151  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9152  }
9153 
9154  // 2. Process this allocation.
9155  // There is allocation with suballoc.offset, suballoc.size.
9156  ++inoutStats.allocationCount;
9157 
9158  // 3. Prepare for next iteration.
9159  lastOffset = suballoc.offset + suballoc.size;
9160  ++nextAlloc2ndIndex;
9161  }
9162  // We are at the end.
9163  else
9164  {
9165  if(lastOffset < freeSpace2ndTo1stEnd)
9166  {
9167  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9168  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9169  inoutStats.unusedSize += unusedRangeSize;
9170  ++inoutStats.unusedRangeCount;
9171  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9172  }
9173 
9174  // End of loop.
9175  lastOffset = freeSpace2ndTo1stEnd;
9176  }
9177  }
9178  }
9179 
9180  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9181  const VkDeviceSize freeSpace1stTo2ndEnd =
9182  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9183  while(lastOffset < freeSpace1stTo2ndEnd)
9184  {
9185  // Find next non-null allocation or move nextAllocIndex to the end.
9186  while(nextAlloc1stIndex < suballoc1stCount &&
9187  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9188  {
9189  ++nextAlloc1stIndex;
9190  }
9191 
9192  // Found non-null allocation.
9193  if(nextAlloc1stIndex < suballoc1stCount)
9194  {
9195  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9196 
9197  // 1. Process free space before this allocation.
9198  if(lastOffset < suballoc.offset)
9199  {
9200  // There is free space from lastOffset to suballoc.offset.
9201  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9202  inoutStats.unusedSize += unusedRangeSize;
9203  ++inoutStats.unusedRangeCount;
9204  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9205  }
9206 
9207  // 2. Process this allocation.
9208  // There is allocation with suballoc.offset, suballoc.size.
9209  ++inoutStats.allocationCount;
9210 
9211  // 3. Prepare for next iteration.
9212  lastOffset = suballoc.offset + suballoc.size;
9213  ++nextAlloc1stIndex;
9214  }
9215  // We are at the end.
9216  else
9217  {
9218  if(lastOffset < freeSpace1stTo2ndEnd)
9219  {
9220  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9221  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9222  inoutStats.unusedSize += unusedRangeSize;
9223  ++inoutStats.unusedRangeCount;
9224  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9225  }
9226 
9227  // End of loop.
9228  lastOffset = freeSpace1stTo2ndEnd;
9229  }
9230  }
9231 
9232  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9233  {
9234  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9235  while(lastOffset < size)
9236  {
9237  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9238  while(nextAlloc2ndIndex != SIZE_MAX &&
9239  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9240  {
9241  --nextAlloc2ndIndex;
9242  }
9243 
9244  // Found non-null allocation.
9245  if(nextAlloc2ndIndex != SIZE_MAX)
9246  {
9247  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9248 
9249  // 1. Process free space before this allocation.
9250  if(lastOffset < suballoc.offset)
9251  {
9252  // There is free space from lastOffset to suballoc.offset.
9253  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9254  inoutStats.unusedSize += unusedRangeSize;
9255  ++inoutStats.unusedRangeCount;
9256  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9257  }
9258 
9259  // 2. Process this allocation.
9260  // There is allocation with suballoc.offset, suballoc.size.
9261  ++inoutStats.allocationCount;
9262 
9263  // 3. Prepare for next iteration.
9264  lastOffset = suballoc.offset + suballoc.size;
9265  --nextAlloc2ndIndex;
9266  }
9267  // We are at the end.
9268  else
9269  {
9270  if(lastOffset < size)
9271  {
9272  // There is free space from lastOffset to size.
9273  const VkDeviceSize unusedRangeSize = size - lastOffset;
9274  inoutStats.unusedSize += unusedRangeSize;
9275  ++inoutStats.unusedRangeCount;
9276  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9277  }
9278 
9279  // End of loop.
9280  lastOffset = size;
9281  }
9282  }
9283  }
9284 }
9285 
9286 #if VMA_STATS_STRING_ENABLED
9287 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
9288 {
9289  const VkDeviceSize size = GetSize();
9290  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9291  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9292  const size_t suballoc1stCount = suballocations1st.size();
9293  const size_t suballoc2ndCount = suballocations2nd.size();
9294 
9295  // FIRST PASS
9296 
9297  size_t unusedRangeCount = 0;
9298  VkDeviceSize usedBytes = 0;
9299 
9300  VkDeviceSize lastOffset = 0;
9301 
9302  size_t alloc2ndCount = 0;
9303  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9304  {
9305  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9306  size_t nextAlloc2ndIndex = 0;
9307  while(lastOffset < freeSpace2ndTo1stEnd)
9308  {
9309  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9310  while(nextAlloc2ndIndex < suballoc2ndCount &&
9311  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9312  {
9313  ++nextAlloc2ndIndex;
9314  }
9315 
9316  // Found non-null allocation.
9317  if(nextAlloc2ndIndex < suballoc2ndCount)
9318  {
9319  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9320 
9321  // 1. Process free space before this allocation.
9322  if(lastOffset < suballoc.offset)
9323  {
9324  // There is free space from lastOffset to suballoc.offset.
9325  ++unusedRangeCount;
9326  }
9327 
9328  // 2. Process this allocation.
9329  // There is allocation with suballoc.offset, suballoc.size.
9330  ++alloc2ndCount;
9331  usedBytes += suballoc.size;
9332 
9333  // 3. Prepare for next iteration.
9334  lastOffset = suballoc.offset + suballoc.size;
9335  ++nextAlloc2ndIndex;
9336  }
9337  // We are at the end.
9338  else
9339  {
9340  if(lastOffset < freeSpace2ndTo1stEnd)
9341  {
9342  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9343  ++unusedRangeCount;
9344  }
9345 
9346  // End of loop.
9347  lastOffset = freeSpace2ndTo1stEnd;
9348  }
9349  }
9350  }
9351 
9352  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9353  size_t alloc1stCount = 0;
9354  const VkDeviceSize freeSpace1stTo2ndEnd =
9355  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9356  while(lastOffset < freeSpace1stTo2ndEnd)
9357  {
9358  // Find next non-null allocation or move nextAllocIndex to the end.
9359  while(nextAlloc1stIndex < suballoc1stCount &&
9360  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9361  {
9362  ++nextAlloc1stIndex;
9363  }
9364 
9365  // Found non-null allocation.
9366  if(nextAlloc1stIndex < suballoc1stCount)
9367  {
9368  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9369 
9370  // 1. Process free space before this allocation.
9371  if(lastOffset < suballoc.offset)
9372  {
9373  // There is free space from lastOffset to suballoc.offset.
9374  ++unusedRangeCount;
9375  }
9376 
9377  // 2. Process this allocation.
9378  // There is allocation with suballoc.offset, suballoc.size.
9379  ++alloc1stCount;
9380  usedBytes += suballoc.size;
9381 
9382  // 3. Prepare for next iteration.
9383  lastOffset = suballoc.offset + suballoc.size;
9384  ++nextAlloc1stIndex;
9385  }
9386  // We are at the end.
9387  else
9388  {
9389  if(lastOffset < size)
9390  {
9391  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9392  ++unusedRangeCount;
9393  }
9394 
9395  // End of loop.
9396  lastOffset = freeSpace1stTo2ndEnd;
9397  }
9398  }
9399 
9400  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9401  {
9402  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9403  while(lastOffset < size)
9404  {
9405  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9406  while(nextAlloc2ndIndex != SIZE_MAX &&
9407  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9408  {
9409  --nextAlloc2ndIndex;
9410  }
9411 
9412  // Found non-null allocation.
9413  if(nextAlloc2ndIndex != SIZE_MAX)
9414  {
9415  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9416 
9417  // 1. Process free space before this allocation.
9418  if(lastOffset < suballoc.offset)
9419  {
9420  // There is free space from lastOffset to suballoc.offset.
9421  ++unusedRangeCount;
9422  }
9423 
9424  // 2. Process this allocation.
9425  // There is allocation with suballoc.offset, suballoc.size.
9426  ++alloc2ndCount;
9427  usedBytes += suballoc.size;
9428 
9429  // 3. Prepare for next iteration.
9430  lastOffset = suballoc.offset + suballoc.size;
9431  --nextAlloc2ndIndex;
9432  }
9433  // We are at the end.
9434  else
9435  {
9436  if(lastOffset < size)
9437  {
9438  // There is free space from lastOffset to size.
9439  ++unusedRangeCount;
9440  }
9441 
9442  // End of loop.
9443  lastOffset = size;
9444  }
9445  }
9446  }
9447 
9448  const VkDeviceSize unusedBytes = size - usedBytes;
9449  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
9450 
9451  // SECOND PASS
9452  lastOffset = 0;
9453 
9454  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9455  {
9456  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9457  size_t nextAlloc2ndIndex = 0;
9458  while(lastOffset < freeSpace2ndTo1stEnd)
9459  {
9460  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9461  while(nextAlloc2ndIndex < suballoc2ndCount &&
9462  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9463  {
9464  ++nextAlloc2ndIndex;
9465  }
9466 
9467  // Found non-null allocation.
9468  if(nextAlloc2ndIndex < suballoc2ndCount)
9469  {
9470  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9471 
9472  // 1. Process free space before this allocation.
9473  if(lastOffset < suballoc.offset)
9474  {
9475  // There is free space from lastOffset to suballoc.offset.
9476  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9477  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9478  }
9479 
9480  // 2. Process this allocation.
9481  // There is allocation with suballoc.offset, suballoc.size.
9482  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9483 
9484  // 3. Prepare for next iteration.
9485  lastOffset = suballoc.offset + suballoc.size;
9486  ++nextAlloc2ndIndex;
9487  }
9488  // We are at the end.
9489  else
9490  {
9491  if(lastOffset < freeSpace2ndTo1stEnd)
9492  {
9493  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9494  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9495  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9496  }
9497 
9498  // End of loop.
9499  lastOffset = freeSpace2ndTo1stEnd;
9500  }
9501  }
9502  }
9503 
9504  nextAlloc1stIndex = m_1stNullItemsBeginCount;
9505  while(lastOffset < freeSpace1stTo2ndEnd)
9506  {
9507  // Find next non-null allocation or move nextAllocIndex to the end.
9508  while(nextAlloc1stIndex < suballoc1stCount &&
9509  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9510  {
9511  ++nextAlloc1stIndex;
9512  }
9513 
9514  // Found non-null allocation.
9515  if(nextAlloc1stIndex < suballoc1stCount)
9516  {
9517  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9518 
9519  // 1. Process free space before this allocation.
9520  if(lastOffset < suballoc.offset)
9521  {
9522  // There is free space from lastOffset to suballoc.offset.
9523  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9524  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9525  }
9526 
9527  // 2. Process this allocation.
9528  // There is allocation with suballoc.offset, suballoc.size.
9529  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9530 
9531  // 3. Prepare for next iteration.
9532  lastOffset = suballoc.offset + suballoc.size;
9533  ++nextAlloc1stIndex;
9534  }
9535  // We are at the end.
9536  else
9537  {
9538  if(lastOffset < freeSpace1stTo2ndEnd)
9539  {
9540  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9541  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9542  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9543  }
9544 
9545  // End of loop.
9546  lastOffset = freeSpace1stTo2ndEnd;
9547  }
9548  }
9549 
9550  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9551  {
9552  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9553  while(lastOffset < size)
9554  {
9555  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9556  while(nextAlloc2ndIndex != SIZE_MAX &&
9557  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9558  {
9559  --nextAlloc2ndIndex;
9560  }
9561 
9562  // Found non-null allocation.
9563  if(nextAlloc2ndIndex != SIZE_MAX)
9564  {
9565  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9566 
9567  // 1. Process free space before this allocation.
9568  if(lastOffset < suballoc.offset)
9569  {
9570  // There is free space from lastOffset to suballoc.offset.
9571  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9572  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9573  }
9574 
9575  // 2. Process this allocation.
9576  // There is allocation with suballoc.offset, suballoc.size.
9577  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9578 
9579  // 3. Prepare for next iteration.
9580  lastOffset = suballoc.offset + suballoc.size;
9581  --nextAlloc2ndIndex;
9582  }
9583  // We are at the end.
9584  else
9585  {
9586  if(lastOffset < size)
9587  {
9588  // There is free space from lastOffset to size.
9589  const VkDeviceSize unusedRangeSize = size - lastOffset;
9590  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9591  }
9592 
9593  // End of loop.
9594  lastOffset = size;
9595  }
9596  }
9597  }
9598 
9599  PrintDetailedMap_End(json);
9600 }
9601 #endif // #if VMA_STATS_STRING_ENABLED
9602 
9603 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
9604  uint32_t currentFrameIndex,
9605  uint32_t frameInUseCount,
9606  VkDeviceSize bufferImageGranularity,
9607  VkDeviceSize allocSize,
9608  VkDeviceSize allocAlignment,
9609  bool upperAddress,
9610  VmaSuballocationType allocType,
9611  bool canMakeOtherLost,
9612  uint32_t strategy,
9613  VmaAllocationRequest* pAllocationRequest)
9614 {
9615  VMA_ASSERT(allocSize > 0);
9616  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
9617  VMA_ASSERT(pAllocationRequest != VMA_NULL);
9618  VMA_HEAVY_ASSERT(Validate());
9619 
9620  const VkDeviceSize size = GetSize();
9621  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9622  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9623 
9624  if(upperAddress)
9625  {
9626  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9627  {
9628  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
9629  return false;
9630  }
9631 
9632  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
9633  if(allocSize > size)
9634  {
9635  return false;
9636  }
9637  VkDeviceSize resultBaseOffset = size - allocSize;
9638  if(!suballocations2nd.empty())
9639  {
9640  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9641  resultBaseOffset = lastSuballoc.offset - allocSize;
9642  if(allocSize > lastSuballoc.offset)
9643  {
9644  return false;
9645  }
9646  }
9647 
9648  // Start from offset equal to end of free space.
9649  VkDeviceSize resultOffset = resultBaseOffset;
9650 
9651  // Apply VMA_DEBUG_MARGIN at the end.
9652  if(VMA_DEBUG_MARGIN > 0)
9653  {
9654  if(resultOffset < VMA_DEBUG_MARGIN)
9655  {
9656  return false;
9657  }
9658  resultOffset -= VMA_DEBUG_MARGIN;
9659  }
9660 
9661  // Apply alignment.
9662  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
9663 
9664  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
9665  // Make bigger alignment if necessary.
9666  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
9667  {
9668  bool bufferImageGranularityConflict = false;
9669  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9670  {
9671  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9672  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9673  {
9674  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
9675  {
9676  bufferImageGranularityConflict = true;
9677  break;
9678  }
9679  }
9680  else
9681  // Already on previous page.
9682  break;
9683  }
9684  if(bufferImageGranularityConflict)
9685  {
9686  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
9687  }
9688  }
9689 
9690  // There is enough free space.
9691  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
9692  suballocations1st.back().offset + suballocations1st.back().size :
9693  0;
9694  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
9695  {
9696  // Check previous suballocations for BufferImageGranularity conflicts.
9697  // If conflict exists, allocation cannot be made here.
9698  if(bufferImageGranularity > 1)
9699  {
9700  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9701  {
9702  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9703  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9704  {
9705  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
9706  {
9707  return false;
9708  }
9709  }
9710  else
9711  {
9712  // Already on next page.
9713  break;
9714  }
9715  }
9716  }
9717 
9718  // All tests passed: Success.
9719  pAllocationRequest->offset = resultOffset;
9720  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
9721  pAllocationRequest->sumItemSize = 0;
9722  // pAllocationRequest->item unused.
9723  pAllocationRequest->itemsToMakeLostCount = 0;
9724  return true;
9725  }
9726  }
9727  else // !upperAddress
9728  {
9729  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9730  {
9731  // Try to allocate at the end of 1st vector.
9732 
9733  VkDeviceSize resultBaseOffset = 0;
9734  if(!suballocations1st.empty())
9735  {
9736  const VmaSuballocation& lastSuballoc = suballocations1st.back();
9737  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
9738  }
9739 
9740  // Start from offset equal to beginning of free space.
9741  VkDeviceSize resultOffset = resultBaseOffset;
9742 
9743  // Apply VMA_DEBUG_MARGIN at the beginning.
9744  if(VMA_DEBUG_MARGIN > 0)
9745  {
9746  resultOffset += VMA_DEBUG_MARGIN;
9747  }
9748 
9749  // Apply alignment.
9750  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
9751 
9752  // Check previous suballocations for BufferImageGranularity conflicts.
9753  // Make bigger alignment if necessary.
9754  if(bufferImageGranularity > 1 && !suballocations1st.empty())
9755  {
9756  bool bufferImageGranularityConflict = false;
9757  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9758  {
9759  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9760  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9761  {
9762  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9763  {
9764  bufferImageGranularityConflict = true;
9765  break;
9766  }
9767  }
9768  else
9769  // Already on previous page.
9770  break;
9771  }
9772  if(bufferImageGranularityConflict)
9773  {
9774  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
9775  }
9776  }
9777 
9778  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
9779  suballocations2nd.back().offset : size;
9780 
9781  // There is enough free space at the end after alignment.
9782  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
9783  {
9784  // Check next suballocations for BufferImageGranularity conflicts.
9785  // If conflict exists, allocation cannot be made here.
9786  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9787  {
9788  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9789  {
9790  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9791  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9792  {
9793  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9794  {
9795  return false;
9796  }
9797  }
9798  else
9799  {
9800  // Already on previous page.
9801  break;
9802  }
9803  }
9804  }
9805 
9806  // All tests passed: Success.
9807  pAllocationRequest->offset = resultOffset;
9808  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
9809  pAllocationRequest->sumItemSize = 0;
9810  // pAllocationRequest->item unused.
9811  pAllocationRequest->itemsToMakeLostCount = 0;
9812  return true;
9813  }
9814  }
9815 
9816  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
9817  // beginning of 1st vector as the end of free space.
9818  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9819  {
9820  VMA_ASSERT(!suballocations1st.empty());
9821 
9822  VkDeviceSize resultBaseOffset = 0;
9823  if(!suballocations2nd.empty())
9824  {
9825  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9826  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
9827  }
9828 
9829  // Start from offset equal to beginning of free space.
9830  VkDeviceSize resultOffset = resultBaseOffset;
9831 
9832  // Apply VMA_DEBUG_MARGIN at the beginning.
9833  if(VMA_DEBUG_MARGIN > 0)
9834  {
9835  resultOffset += VMA_DEBUG_MARGIN;
9836  }
9837 
9838  // Apply alignment.
9839  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
9840 
9841  // Check previous suballocations for BufferImageGranularity conflicts.
9842  // Make bigger alignment if necessary.
9843  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
9844  {
9845  bool bufferImageGranularityConflict = false;
9846  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
9847  {
9848  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
9849  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9850  {
9851  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9852  {
9853  bufferImageGranularityConflict = true;
9854  break;
9855  }
9856  }
9857  else
9858  // Already on previous page.
9859  break;
9860  }
9861  if(bufferImageGranularityConflict)
9862  {
9863  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
9864  }
9865  }
9866 
9867  pAllocationRequest->itemsToMakeLostCount = 0;
9868  pAllocationRequest->sumItemSize = 0;
9869  size_t index1st = m_1stNullItemsBeginCount;
9870 
9871  if(canMakeOtherLost)
9872  {
9873  while(index1st < suballocations1st.size() &&
9874  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
9875  {
9876  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
9877  const VmaSuballocation& suballoc = suballocations1st[index1st];
9878  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
9879  {
9880  // No problem.
9881  }
9882  else
9883  {
9884  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
9885  if(suballoc.hAllocation->CanBecomeLost() &&
9886  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9887  {
9888  ++pAllocationRequest->itemsToMakeLostCount;
9889  pAllocationRequest->sumItemSize += suballoc.size;
9890  }
9891  else
9892  {
9893  return false;
9894  }
9895  }
9896  ++index1st;
9897  }
9898 
9899  // Check next suballocations for BufferImageGranularity conflicts.
9900  // If conflict exists, we must mark more allocations lost or fail.
9901  if(bufferImageGranularity > 1)
9902  {
9903  while(index1st < suballocations1st.size())
9904  {
9905  const VmaSuballocation& suballoc = suballocations1st[index1st];
9906  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
9907  {
9908  if(suballoc.hAllocation != VK_NULL_HANDLE)
9909  {
9910  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
9911  if(suballoc.hAllocation->CanBecomeLost() &&
9912  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9913  {
9914  ++pAllocationRequest->itemsToMakeLostCount;
9915  pAllocationRequest->sumItemSize += suballoc.size;
9916  }
9917  else
9918  {
9919  return false;
9920  }
9921  }
9922  }
9923  else
9924  {
9925  // Already on next page.
9926  break;
9927  }
9928  ++index1st;
9929  }
9930  }
9931  }
9932 
9933  // There is enough free space at the end after alignment.
9934  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
9935  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
9936  {
9937  // Check next suballocations for BufferImageGranularity conflicts.
9938  // If conflict exists, allocation cannot be made here.
9939  if(bufferImageGranularity > 1)
9940  {
9941  for(size_t nextSuballocIndex = index1st;
9942  nextSuballocIndex < suballocations1st.size();
9943  nextSuballocIndex++)
9944  {
9945  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
9946  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9947  {
9948  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9949  {
9950  return false;
9951  }
9952  }
9953  else
9954  {
9955  // Already on next page.
9956  break;
9957  }
9958  }
9959  }
9960 
9961  // All tests passed: Success.
9962  pAllocationRequest->offset = resultOffset;
9963  pAllocationRequest->sumFreeSize =
9964  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
9965  - resultBaseOffset
9966  - pAllocationRequest->sumItemSize;
9967  // pAllocationRequest->item unused.
9968  return true;
9969  }
9970  }
9971  }
9972 
9973  return false;
9974 }
9975 
9976 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
9977  uint32_t currentFrameIndex,
9978  uint32_t frameInUseCount,
9979  VmaAllocationRequest* pAllocationRequest)
9980 {
9981  if(pAllocationRequest->itemsToMakeLostCount == 0)
9982  {
9983  return true;
9984  }
9985 
9986  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
9987 
9988  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9989  size_t index1st = m_1stNullItemsBeginCount;
9990  size_t madeLostCount = 0;
9991  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
9992  {
9993  VMA_ASSERT(index1st < suballocations1st.size());
9994  VmaSuballocation& suballoc = suballocations1st[index1st];
9995  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9996  {
9997  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
9998  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
9999  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10000  {
10001  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10002  suballoc.hAllocation = VK_NULL_HANDLE;
10003  m_SumFreeSize += suballoc.size;
10004  ++m_1stNullItemsMiddleCount;
10005  ++madeLostCount;
10006  }
10007  else
10008  {
10009  return false;
10010  }
10011  }
10012  ++index1st;
10013  }
10014 
10015  CleanupAfterFree();
10016  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
10017 
10018  return true;
10019 }
10020 
10021 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10022 {
10023  uint32_t lostAllocationCount = 0;
10024 
10025  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10026  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10027  {
10028  VmaSuballocation& suballoc = suballocations1st[i];
10029  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10030  suballoc.hAllocation->CanBecomeLost() &&
10031  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10032  {
10033  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10034  suballoc.hAllocation = VK_NULL_HANDLE;
10035  ++m_1stNullItemsMiddleCount;
10036  m_SumFreeSize += suballoc.size;
10037  ++lostAllocationCount;
10038  }
10039  }
10040 
10041  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10042  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10043  {
10044  VmaSuballocation& suballoc = suballocations2nd[i];
10045  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10046  suballoc.hAllocation->CanBecomeLost() &&
10047  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10048  {
10049  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10050  suballoc.hAllocation = VK_NULL_HANDLE;
10051  ++m_2ndNullItemsCount;
10052  ++lostAllocationCount;
10053  }
10054  }
10055 
10056  if(lostAllocationCount)
10057  {
10058  CleanupAfterFree();
10059  }
10060 
10061  return lostAllocationCount;
10062 }
10063 
10064 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
10065 {
10066  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10067  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10068  {
10069  const VmaSuballocation& suballoc = suballocations1st[i];
10070  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10071  {
10072  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10073  {
10074  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10075  return VK_ERROR_VALIDATION_FAILED_EXT;
10076  }
10077  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10078  {
10079  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10080  return VK_ERROR_VALIDATION_FAILED_EXT;
10081  }
10082  }
10083  }
10084 
10085  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10086  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10087  {
10088  const VmaSuballocation& suballoc = suballocations2nd[i];
10089  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10090  {
10091  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10092  {
10093  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10094  return VK_ERROR_VALIDATION_FAILED_EXT;
10095  }
10096  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10097  {
10098  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10099  return VK_ERROR_VALIDATION_FAILED_EXT;
10100  }
10101  }
10102  }
10103 
10104  return VK_SUCCESS;
10105 }
10106 
10107 void VmaBlockMetadata_Linear::Alloc(
10108  const VmaAllocationRequest& request,
10109  VmaSuballocationType type,
10110  VkDeviceSize allocSize,
10111  bool upperAddress,
10112  VmaAllocation hAllocation)
10113 {
10114  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
10115 
10116  if(upperAddress)
10117  {
10118  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
10119  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
10120  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10121  suballocations2nd.push_back(newSuballoc);
10122  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
10123  }
10124  else
10125  {
10126  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10127 
10128  // First allocation.
10129  if(suballocations1st.empty())
10130  {
10131  suballocations1st.push_back(newSuballoc);
10132  }
10133  else
10134  {
10135  // New allocation at the end of 1st vector.
10136  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
10137  {
10138  // Check if it fits before the end of the block.
10139  VMA_ASSERT(request.offset + allocSize <= GetSize());
10140  suballocations1st.push_back(newSuballoc);
10141  }
10142  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
10143  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
10144  {
10145  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10146 
10147  switch(m_2ndVectorMode)
10148  {
10149  case SECOND_VECTOR_EMPTY:
10150  // First allocation from second part ring buffer.
10151  VMA_ASSERT(suballocations2nd.empty());
10152  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
10153  break;
10154  case SECOND_VECTOR_RING_BUFFER:
10155  // 2-part ring buffer is already started.
10156  VMA_ASSERT(!suballocations2nd.empty());
10157  break;
10158  case SECOND_VECTOR_DOUBLE_STACK:
10159  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
10160  break;
10161  default:
10162  VMA_ASSERT(0);
10163  }
10164 
10165  suballocations2nd.push_back(newSuballoc);
10166  }
10167  else
10168  {
10169  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
10170  }
10171  }
10172  }
10173 
10174  m_SumFreeSize -= newSuballoc.size;
10175 }
10176 
10177 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
10178 {
10179  FreeAtOffset(allocation->GetOffset());
10180 }
10181 
10182 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
10183 {
10184  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10185  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10186 
10187  if(!suballocations1st.empty())
10188  {
10189  // First allocation: Mark it as next empty at the beginning.
10190  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
10191  if(firstSuballoc.offset == offset)
10192  {
10193  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10194  firstSuballoc.hAllocation = VK_NULL_HANDLE;
10195  m_SumFreeSize += firstSuballoc.size;
10196  ++m_1stNullItemsBeginCount;
10197  CleanupAfterFree();
10198  return;
10199  }
10200  }
10201 
10202  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
10203  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
10204  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10205  {
10206  VmaSuballocation& lastSuballoc = suballocations2nd.back();
10207  if(lastSuballoc.offset == offset)
10208  {
10209  m_SumFreeSize += lastSuballoc.size;
10210  suballocations2nd.pop_back();
10211  CleanupAfterFree();
10212  return;
10213  }
10214  }
10215  // Last allocation in 1st vector.
10216  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
10217  {
10218  VmaSuballocation& lastSuballoc = suballocations1st.back();
10219  if(lastSuballoc.offset == offset)
10220  {
10221  m_SumFreeSize += lastSuballoc.size;
10222  suballocations1st.pop_back();
10223  CleanupAfterFree();
10224  return;
10225  }
10226  }
10227 
10228  // Item from the middle of 1st vector.
10229  {
10230  VmaSuballocation refSuballoc;
10231  refSuballoc.offset = offset;
10232  // Rest of members stays uninitialized intentionally for better performance.
10233  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
10234  suballocations1st.begin() + m_1stNullItemsBeginCount,
10235  suballocations1st.end(),
10236  refSuballoc);
10237  if(it != suballocations1st.end())
10238  {
10239  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10240  it->hAllocation = VK_NULL_HANDLE;
10241  ++m_1stNullItemsMiddleCount;
10242  m_SumFreeSize += it->size;
10243  CleanupAfterFree();
10244  return;
10245  }
10246  }
10247 
10248  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
10249  {
10250  // Item from the middle of 2nd vector.
10251  VmaSuballocation refSuballoc;
10252  refSuballoc.offset = offset;
10253  // Rest of members stays uninitialized intentionally for better performance.
10254  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
10255  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
10256  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
10257  if(it != suballocations2nd.end())
10258  {
10259  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10260  it->hAllocation = VK_NULL_HANDLE;
10261  ++m_2ndNullItemsCount;
10262  m_SumFreeSize += it->size;
10263  CleanupAfterFree();
10264  return;
10265  }
10266  }
10267 
10268  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
10269 }
10270 
10271 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
10272 {
10273  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10274  const size_t suballocCount = AccessSuballocations1st().size();
10275  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
10276 }
10277 
10278 void VmaBlockMetadata_Linear::CleanupAfterFree()
10279 {
10280  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10281  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10282 
10283  if(IsEmpty())
10284  {
10285  suballocations1st.clear();
10286  suballocations2nd.clear();
10287  m_1stNullItemsBeginCount = 0;
10288  m_1stNullItemsMiddleCount = 0;
10289  m_2ndNullItemsCount = 0;
10290  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10291  }
10292  else
10293  {
10294  const size_t suballoc1stCount = suballocations1st.size();
10295  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10296  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
10297 
10298  // Find more null items at the beginning of 1st vector.
10299  while(m_1stNullItemsBeginCount < suballoc1stCount &&
10300  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10301  {
10302  ++m_1stNullItemsBeginCount;
10303  --m_1stNullItemsMiddleCount;
10304  }
10305 
10306  // Find more null items at the end of 1st vector.
10307  while(m_1stNullItemsMiddleCount > 0 &&
10308  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
10309  {
10310  --m_1stNullItemsMiddleCount;
10311  suballocations1st.pop_back();
10312  }
10313 
10314  // Find more null items at the end of 2nd vector.
10315  while(m_2ndNullItemsCount > 0 &&
10316  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
10317  {
10318  --m_2ndNullItemsCount;
10319  suballocations2nd.pop_back();
10320  }
10321 
10322  if(ShouldCompact1st())
10323  {
10324  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
10325  size_t srcIndex = m_1stNullItemsBeginCount;
10326  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
10327  {
10328  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
10329  {
10330  ++srcIndex;
10331  }
10332  if(dstIndex != srcIndex)
10333  {
10334  suballocations1st[dstIndex] = suballocations1st[srcIndex];
10335  }
10336  ++srcIndex;
10337  }
10338  suballocations1st.resize(nonNullItemCount);
10339  m_1stNullItemsBeginCount = 0;
10340  m_1stNullItemsMiddleCount = 0;
10341  }
10342 
10343  // 2nd vector became empty.
10344  if(suballocations2nd.empty())
10345  {
10346  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10347  }
10348 
10349  // 1st vector became empty.
10350  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
10351  {
10352  suballocations1st.clear();
10353  m_1stNullItemsBeginCount = 0;
10354 
10355  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10356  {
10357  // Swap 1st with 2nd. Now 2nd is empty.
10358  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10359  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
10360  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
10361  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10362  {
10363  ++m_1stNullItemsBeginCount;
10364  --m_1stNullItemsMiddleCount;
10365  }
10366  m_2ndNullItemsCount = 0;
10367  m_1stVectorIndex ^= 1;
10368  }
10369  }
10370  }
10371 
10372  VMA_HEAVY_ASSERT(Validate());
10373 }
10374 
10375 
10377 // class VmaBlockMetadata_Buddy
10378 
10379 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
10380  VmaBlockMetadata(hAllocator),
10381  m_Root(VMA_NULL),
10382  m_AllocationCount(0),
10383  m_FreeCount(1),
10384  m_SumFreeSize(0)
10385 {
10386  memset(m_FreeList, 0, sizeof(m_FreeList));
10387 }
10388 
10389 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
10390 {
10391  DeleteNode(m_Root);
10392 }
10393 
10394 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
10395 {
10396  VmaBlockMetadata::Init(size);
10397 
10398  m_UsableSize = VmaPrevPow2(size);
10399  m_SumFreeSize = m_UsableSize;
10400 
10401  // Calculate m_LevelCount.
10402  m_LevelCount = 1;
10403  while(m_LevelCount < MAX_LEVELS &&
10404  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
10405  {
10406  ++m_LevelCount;
10407  }
10408 
10409  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
10410  rootNode->offset = 0;
10411  rootNode->type = Node::TYPE_FREE;
10412  rootNode->parent = VMA_NULL;
10413  rootNode->buddy = VMA_NULL;
10414 
10415  m_Root = rootNode;
10416  AddToFreeListFront(0, rootNode);
10417 }
10418 
10419 bool VmaBlockMetadata_Buddy::Validate() const
10420 {
10421  // Validate tree.
10422  ValidationContext ctx;
10423  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
10424  {
10425  VMA_VALIDATE(false && "ValidateNode failed.");
10426  }
10427  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
10428  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
10429 
10430  // Validate free node lists.
10431  for(uint32_t level = 0; level < m_LevelCount; ++level)
10432  {
10433  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
10434  m_FreeList[level].front->free.prev == VMA_NULL);
10435 
10436  for(Node* node = m_FreeList[level].front;
10437  node != VMA_NULL;
10438  node = node->free.next)
10439  {
10440  VMA_VALIDATE(node->type == Node::TYPE_FREE);
10441 
10442  if(node->free.next == VMA_NULL)
10443  {
10444  VMA_VALIDATE(m_FreeList[level].back == node);
10445  }
10446  else
10447  {
10448  VMA_VALIDATE(node->free.next->free.prev == node);
10449  }
10450  }
10451  }
10452 
10453  // Validate that free lists ar higher levels are empty.
10454  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
10455  {
10456  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
10457  }
10458 
10459  return true;
10460 }
10461 
10462 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
10463 {
10464  for(uint32_t level = 0; level < m_LevelCount; ++level)
10465  {
10466  if(m_FreeList[level].front != VMA_NULL)
10467  {
10468  return LevelToNodeSize(level);
10469  }
10470  }
10471  return 0;
10472 }
10473 
10474 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
10475 {
10476  const VkDeviceSize unusableSize = GetUnusableSize();
10477 
10478  outInfo.blockCount = 1;
10479 
10480  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
10481  outInfo.usedBytes = outInfo.unusedBytes = 0;
10482 
10483  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
10484  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
10485  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
10486 
10487  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
10488 
10489  if(unusableSize > 0)
10490  {
10491  ++outInfo.unusedRangeCount;
10492  outInfo.unusedBytes += unusableSize;
10493  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
10494  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
10495  }
10496 }
10497 
10498 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
10499 {
10500  const VkDeviceSize unusableSize = GetUnusableSize();
10501 
10502  inoutStats.size += GetSize();
10503  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
10504  inoutStats.allocationCount += m_AllocationCount;
10505  inoutStats.unusedRangeCount += m_FreeCount;
10506  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
10507 
10508  if(unusableSize > 0)
10509  {
10510  ++inoutStats.unusedRangeCount;
10511  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
10512  }
10513 }
10514 
10515 #if VMA_STATS_STRING_ENABLED
10516 
10517 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
10518 {
10519  // TODO optimize
10520  VmaStatInfo stat;
10521  CalcAllocationStatInfo(stat);
10522 
10523  PrintDetailedMap_Begin(
10524  json,
10525  stat.unusedBytes,
10526  stat.allocationCount,
10527  stat.unusedRangeCount);
10528 
10529  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
10530 
10531  const VkDeviceSize unusableSize = GetUnusableSize();
10532  if(unusableSize > 0)
10533  {
10534  PrintDetailedMap_UnusedRange(json,
10535  m_UsableSize, // offset
10536  unusableSize); // size
10537  }
10538 
10539  PrintDetailedMap_End(json);
10540 }
10541 
10542 #endif // #if VMA_STATS_STRING_ENABLED
10543 
10544 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
10545  uint32_t currentFrameIndex,
10546  uint32_t frameInUseCount,
10547  VkDeviceSize bufferImageGranularity,
10548  VkDeviceSize allocSize,
10549  VkDeviceSize allocAlignment,
10550  bool upperAddress,
10551  VmaSuballocationType allocType,
10552  bool canMakeOtherLost,
10553  uint32_t strategy,
10554  VmaAllocationRequest* pAllocationRequest)
10555 {
10556  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
10557 
10558  // Simple way to respect bufferImageGranularity. May be optimized some day.
10559  // Whenever it might be an OPTIMAL image...
10560  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
10561  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
10562  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
10563  {
10564  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
10565  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
10566  }
10567 
10568  if(allocSize > m_UsableSize)
10569  {
10570  return false;
10571  }
10572 
10573  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10574  for(uint32_t level = targetLevel + 1; level--; )
10575  {
10576  for(Node* freeNode = m_FreeList[level].front;
10577  freeNode != VMA_NULL;
10578  freeNode = freeNode->free.next)
10579  {
10580  if(freeNode->offset % allocAlignment == 0)
10581  {
10582  pAllocationRequest->offset = freeNode->offset;
10583  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
10584  pAllocationRequest->sumItemSize = 0;
10585  pAllocationRequest->itemsToMakeLostCount = 0;
10586  pAllocationRequest->customData = (void*)(uintptr_t)level;
10587  return true;
10588  }
10589  }
10590  }
10591 
10592  return false;
10593 }
10594 
10595 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
10596  uint32_t currentFrameIndex,
10597  uint32_t frameInUseCount,
10598  VmaAllocationRequest* pAllocationRequest)
10599 {
10600  /*
10601  Lost allocations are not supported in buddy allocator at the moment.
10602  Support might be added in the future.
10603  */
10604  return pAllocationRequest->itemsToMakeLostCount == 0;
10605 }
10606 
10607 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10608 {
10609  /*
10610  Lost allocations are not supported in buddy allocator at the moment.
10611  Support might be added in the future.
10612  */
10613  return 0;
10614 }
10615 
10616 void VmaBlockMetadata_Buddy::Alloc(
10617  const VmaAllocationRequest& request,
10618  VmaSuballocationType type,
10619  VkDeviceSize allocSize,
10620  bool upperAddress,
10621  VmaAllocation hAllocation)
10622 {
10623  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10624  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
10625 
10626  Node* currNode = m_FreeList[currLevel].front;
10627  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10628  while(currNode->offset != request.offset)
10629  {
10630  currNode = currNode->free.next;
10631  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10632  }
10633 
10634  // Go down, splitting free nodes.
10635  while(currLevel < targetLevel)
10636  {
10637  // currNode is already first free node at currLevel.
10638  // Remove it from list of free nodes at this currLevel.
10639  RemoveFromFreeList(currLevel, currNode);
10640 
10641  const uint32_t childrenLevel = currLevel + 1;
10642 
10643  // Create two free sub-nodes.
10644  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
10645  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
10646 
10647  leftChild->offset = currNode->offset;
10648  leftChild->type = Node::TYPE_FREE;
10649  leftChild->parent = currNode;
10650  leftChild->buddy = rightChild;
10651 
10652  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
10653  rightChild->type = Node::TYPE_FREE;
10654  rightChild->parent = currNode;
10655  rightChild->buddy = leftChild;
10656 
10657  // Convert current currNode to split type.
10658  currNode->type = Node::TYPE_SPLIT;
10659  currNode->split.leftChild = leftChild;
10660 
10661  // Add child nodes to free list. Order is important!
10662  AddToFreeListFront(childrenLevel, rightChild);
10663  AddToFreeListFront(childrenLevel, leftChild);
10664 
10665  ++m_FreeCount;
10666  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
10667  ++currLevel;
10668  currNode = m_FreeList[currLevel].front;
10669 
10670  /*
10671  We can be sure that currNode, as left child of node previously split,
10672  also fullfills the alignment requirement.
10673  */
10674  }
10675 
10676  // Remove from free list.
10677  VMA_ASSERT(currLevel == targetLevel &&
10678  currNode != VMA_NULL &&
10679  currNode->type == Node::TYPE_FREE);
10680  RemoveFromFreeList(currLevel, currNode);
10681 
10682  // Convert to allocation node.
10683  currNode->type = Node::TYPE_ALLOCATION;
10684  currNode->allocation.alloc = hAllocation;
10685 
10686  ++m_AllocationCount;
10687  --m_FreeCount;
10688  m_SumFreeSize -= allocSize;
10689 }
10690 
10691 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
10692 {
10693  if(node->type == Node::TYPE_SPLIT)
10694  {
10695  DeleteNode(node->split.leftChild->buddy);
10696  DeleteNode(node->split.leftChild);
10697  }
10698 
10699  vma_delete(GetAllocationCallbacks(), node);
10700 }
10701 
10702 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
10703 {
10704  VMA_VALIDATE(level < m_LevelCount);
10705  VMA_VALIDATE(curr->parent == parent);
10706  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
10707  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
10708  switch(curr->type)
10709  {
10710  case Node::TYPE_FREE:
10711  // curr->free.prev, next are validated separately.
10712  ctx.calculatedSumFreeSize += levelNodeSize;
10713  ++ctx.calculatedFreeCount;
10714  break;
10715  case Node::TYPE_ALLOCATION:
10716  ++ctx.calculatedAllocationCount;
10717  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
10718  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
10719  break;
10720  case Node::TYPE_SPLIT:
10721  {
10722  const uint32_t childrenLevel = level + 1;
10723  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
10724  const Node* const leftChild = curr->split.leftChild;
10725  VMA_VALIDATE(leftChild != VMA_NULL);
10726  VMA_VALIDATE(leftChild->offset == curr->offset);
10727  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
10728  {
10729  VMA_VALIDATE(false && "ValidateNode for left child failed.");
10730  }
10731  const Node* const rightChild = leftChild->buddy;
10732  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
10733  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
10734  {
10735  VMA_VALIDATE(false && "ValidateNode for right child failed.");
10736  }
10737  }
10738  break;
10739  default:
10740  return false;
10741  }
10742 
10743  return true;
10744 }
10745 
10746 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
10747 {
10748  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
10749  uint32_t level = 0;
10750  VkDeviceSize currLevelNodeSize = m_UsableSize;
10751  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
10752  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
10753  {
10754  ++level;
10755  currLevelNodeSize = nextLevelNodeSize;
10756  nextLevelNodeSize = currLevelNodeSize >> 1;
10757  }
10758  return level;
10759 }
10760 
10761 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
10762 {
10763  // Find node and level.
10764  Node* node = m_Root;
10765  VkDeviceSize nodeOffset = 0;
10766  uint32_t level = 0;
10767  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
10768  while(node->type == Node::TYPE_SPLIT)
10769  {
10770  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
10771  if(offset < nodeOffset + nextLevelSize)
10772  {
10773  node = node->split.leftChild;
10774  }
10775  else
10776  {
10777  node = node->split.leftChild->buddy;
10778  nodeOffset += nextLevelSize;
10779  }
10780  ++level;
10781  levelNodeSize = nextLevelSize;
10782  }
10783 
10784  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
10785  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
10786 
10787  ++m_FreeCount;
10788  --m_AllocationCount;
10789  m_SumFreeSize += alloc->GetSize();
10790 
10791  node->type = Node::TYPE_FREE;
10792 
10793  // Join free nodes if possible.
10794  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
10795  {
10796  RemoveFromFreeList(level, node->buddy);
10797  Node* const parent = node->parent;
10798 
10799  vma_delete(GetAllocationCallbacks(), node->buddy);
10800  vma_delete(GetAllocationCallbacks(), node);
10801  parent->type = Node::TYPE_FREE;
10802 
10803  node = parent;
10804  --level;
10805  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
10806  --m_FreeCount;
10807  }
10808 
10809  AddToFreeListFront(level, node);
10810 }
10811 
10812 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
10813 {
10814  switch(node->type)
10815  {
10816  case Node::TYPE_FREE:
10817  ++outInfo.unusedRangeCount;
10818  outInfo.unusedBytes += levelNodeSize;
10819  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
10820  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
10821  break;
10822  case Node::TYPE_ALLOCATION:
10823  {
10824  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
10825  ++outInfo.allocationCount;
10826  outInfo.usedBytes += allocSize;
10827  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
10828  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
10829 
10830  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
10831  if(unusedRangeSize > 0)
10832  {
10833  ++outInfo.unusedRangeCount;
10834  outInfo.unusedBytes += unusedRangeSize;
10835  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
10836  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
10837  }
10838  }
10839  break;
10840  case Node::TYPE_SPLIT:
10841  {
10842  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
10843  const Node* const leftChild = node->split.leftChild;
10844  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
10845  const Node* const rightChild = leftChild->buddy;
10846  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
10847  }
10848  break;
10849  default:
10850  VMA_ASSERT(0);
10851  }
10852 }
10853 
10854 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
10855 {
10856  VMA_ASSERT(node->type == Node::TYPE_FREE);
10857 
10858  // List is empty.
10859  Node* const frontNode = m_FreeList[level].front;
10860  if(frontNode == VMA_NULL)
10861  {
10862  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
10863  node->free.prev = node->free.next = VMA_NULL;
10864  m_FreeList[level].front = m_FreeList[level].back = node;
10865  }
10866  else
10867  {
10868  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
10869  node->free.prev = VMA_NULL;
10870  node->free.next = frontNode;
10871  frontNode->free.prev = node;
10872  m_FreeList[level].front = node;
10873  }
10874 }
10875 
10876 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
10877 {
10878  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
10879 
10880  // It is at the front.
10881  if(node->free.prev == VMA_NULL)
10882  {
10883  VMA_ASSERT(m_FreeList[level].front == node);
10884  m_FreeList[level].front = node->free.next;
10885  }
10886  else
10887  {
10888  Node* const prevFreeNode = node->free.prev;
10889  VMA_ASSERT(prevFreeNode->free.next == node);
10890  prevFreeNode->free.next = node->free.next;
10891  }
10892 
10893  // It is at the back.
10894  if(node->free.next == VMA_NULL)
10895  {
10896  VMA_ASSERT(m_FreeList[level].back == node);
10897  m_FreeList[level].back = node->free.prev;
10898  }
10899  else
10900  {
10901  Node* const nextFreeNode = node->free.next;
10902  VMA_ASSERT(nextFreeNode->free.prev == node);
10903  nextFreeNode->free.prev = node->free.prev;
10904  }
10905 }
10906 
10907 #if VMA_STATS_STRING_ENABLED
10908 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
10909 {
10910  switch(node->type)
10911  {
10912  case Node::TYPE_FREE:
10913  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
10914  break;
10915  case Node::TYPE_ALLOCATION:
10916  {
10917  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
10918  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
10919  if(allocSize < levelNodeSize)
10920  {
10921  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
10922  }
10923  }
10924  break;
10925  case Node::TYPE_SPLIT:
10926  {
10927  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
10928  const Node* const leftChild = node->split.leftChild;
10929  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
10930  const Node* const rightChild = leftChild->buddy;
10931  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
10932  }
10933  break;
10934  default:
10935  VMA_ASSERT(0);
10936  }
10937 }
10938 #endif // #if VMA_STATS_STRING_ENABLED
10939 
10940 
10942 // class VmaDeviceMemoryBlock
10943 
10944 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
10945  m_pMetadata(VMA_NULL),
10946  m_MemoryTypeIndex(UINT32_MAX),
10947  m_Id(0),
10948  m_hMemory(VK_NULL_HANDLE),
10949  m_MapCount(0),
10950  m_pMappedData(VMA_NULL)
10951 {
10952 }
10953 
10954 void VmaDeviceMemoryBlock::Init(
10955  VmaAllocator hAllocator,
10956  uint32_t newMemoryTypeIndex,
10957  VkDeviceMemory newMemory,
10958  VkDeviceSize newSize,
10959  uint32_t id,
10960  uint32_t algorithm)
10961 {
10962  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
10963 
10964  m_MemoryTypeIndex = newMemoryTypeIndex;
10965  m_Id = id;
10966  m_hMemory = newMemory;
10967 
10968  switch(algorithm)
10969  {
10971  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
10972  break;
10974  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
10975  break;
10976  default:
10977  VMA_ASSERT(0);
10978  // Fall-through.
10979  case 0:
10980  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
10981  }
10982  m_pMetadata->Init(newSize);
10983 }
10984 
10985 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
10986 {
10987  // This is the most important assert in the entire library.
10988  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
10989  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
10990 
10991  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
10992  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
10993  m_hMemory = VK_NULL_HANDLE;
10994 
10995  vma_delete(allocator, m_pMetadata);
10996  m_pMetadata = VMA_NULL;
10997 }
10998 
10999 bool VmaDeviceMemoryBlock::Validate() const
11000 {
11001  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
11002  (m_pMetadata->GetSize() != 0));
11003 
11004  return m_pMetadata->Validate();
11005 }
11006 
11007 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
11008 {
11009  void* pData = nullptr;
11010  VkResult res = Map(hAllocator, 1, &pData);
11011  if(res != VK_SUCCESS)
11012  {
11013  return res;
11014  }
11015 
11016  res = m_pMetadata->CheckCorruption(pData);
11017 
11018  Unmap(hAllocator, 1);
11019 
11020  return res;
11021 }
11022 
11023 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
11024 {
11025  if(count == 0)
11026  {
11027  return VK_SUCCESS;
11028  }
11029 
11030  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11031  if(m_MapCount != 0)
11032  {
11033  m_MapCount += count;
11034  VMA_ASSERT(m_pMappedData != VMA_NULL);
11035  if(ppData != VMA_NULL)
11036  {
11037  *ppData = m_pMappedData;
11038  }
11039  return VK_SUCCESS;
11040  }
11041  else
11042  {
11043  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
11044  hAllocator->m_hDevice,
11045  m_hMemory,
11046  0, // offset
11047  VK_WHOLE_SIZE,
11048  0, // flags
11049  &m_pMappedData);
11050  if(result == VK_SUCCESS)
11051  {
11052  if(ppData != VMA_NULL)
11053  {
11054  *ppData = m_pMappedData;
11055  }
11056  m_MapCount = count;
11057  }
11058  return result;
11059  }
11060 }
11061 
11062 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
11063 {
11064  if(count == 0)
11065  {
11066  return;
11067  }
11068 
11069  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11070  if(m_MapCount >= count)
11071  {
11072  m_MapCount -= count;
11073  if(m_MapCount == 0)
11074  {
11075  m_pMappedData = VMA_NULL;
11076  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
11077  }
11078  }
11079  else
11080  {
11081  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
11082  }
11083 }
11084 
11085 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11086 {
11087  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11088  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11089 
11090  void* pData;
11091  VkResult res = Map(hAllocator, 1, &pData);
11092  if(res != VK_SUCCESS)
11093  {
11094  return res;
11095  }
11096 
11097  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
11098  VmaWriteMagicValue(pData, allocOffset + allocSize);
11099 
11100  Unmap(hAllocator, 1);
11101 
11102  return VK_SUCCESS;
11103 }
11104 
11105 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11106 {
11107  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11108  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11109 
11110  void* pData;
11111  VkResult res = Map(hAllocator, 1, &pData);
11112  if(res != VK_SUCCESS)
11113  {
11114  return res;
11115  }
11116 
11117  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
11118  {
11119  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
11120  }
11121  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
11122  {
11123  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
11124  }
11125 
11126  Unmap(hAllocator, 1);
11127 
11128  return VK_SUCCESS;
11129 }
11130 
11131 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
11132  const VmaAllocator hAllocator,
11133  const VmaAllocation hAllocation,
11134  VkBuffer hBuffer)
11135 {
11136  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11137  hAllocation->GetBlock() == this);
11138  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11139  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11140  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
11141  hAllocator->m_hDevice,
11142  hBuffer,
11143  m_hMemory,
11144  hAllocation->GetOffset());
11145 }
11146 
11147 VkResult VmaDeviceMemoryBlock::BindImageMemory(
11148  const VmaAllocator hAllocator,
11149  const VmaAllocation hAllocation,
11150  VkImage hImage)
11151 {
11152  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11153  hAllocation->GetBlock() == this);
11154  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11155  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11156  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
11157  hAllocator->m_hDevice,
11158  hImage,
11159  m_hMemory,
11160  hAllocation->GetOffset());
11161 }
11162 
11163 static void InitStatInfo(VmaStatInfo& outInfo)
11164 {
11165  memset(&outInfo, 0, sizeof(outInfo));
11166  outInfo.allocationSizeMin = UINT64_MAX;
11167  outInfo.unusedRangeSizeMin = UINT64_MAX;
11168 }
11169 
11170 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
11171 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
11172 {
11173  inoutInfo.blockCount += srcInfo.blockCount;
11174  inoutInfo.allocationCount += srcInfo.allocationCount;
11175  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
11176  inoutInfo.usedBytes += srcInfo.usedBytes;
11177  inoutInfo.unusedBytes += srcInfo.unusedBytes;
11178  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
11179  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
11180  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
11181  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
11182 }
11183 
11184 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
11185 {
11186  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
11187  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
11188  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
11189  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
11190 }
11191 
11192 VmaPool_T::VmaPool_T(
11193  VmaAllocator hAllocator,
11194  const VmaPoolCreateInfo& createInfo,
11195  VkDeviceSize preferredBlockSize) :
11196  m_BlockVector(
11197  hAllocator,
11198  createInfo.memoryTypeIndex,
11199  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
11200  createInfo.minBlockCount,
11201  createInfo.maxBlockCount,
11202  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
11203  createInfo.frameInUseCount,
11204  true, // isCustomPool
11205  createInfo.blockSize != 0, // explicitBlockSize
11206  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
11207  m_Id(0)
11208 {
11209 }
11210 
11211 VmaPool_T::~VmaPool_T()
11212 {
11213 }
11214 
11215 #if VMA_STATS_STRING_ENABLED
11216 
11217 #endif // #if VMA_STATS_STRING_ENABLED
11218 
11219 VmaBlockVector::VmaBlockVector(
11220  VmaAllocator hAllocator,
11221  uint32_t memoryTypeIndex,
11222  VkDeviceSize preferredBlockSize,
11223  size_t minBlockCount,
11224  size_t maxBlockCount,
11225  VkDeviceSize bufferImageGranularity,
11226  uint32_t frameInUseCount,
11227  bool isCustomPool,
11228  bool explicitBlockSize,
11229  uint32_t algorithm) :
11230  m_hAllocator(hAllocator),
11231  m_MemoryTypeIndex(memoryTypeIndex),
11232  m_PreferredBlockSize(preferredBlockSize),
11233  m_MinBlockCount(minBlockCount),
11234  m_MaxBlockCount(maxBlockCount),
11235  m_BufferImageGranularity(bufferImageGranularity),
11236  m_FrameInUseCount(frameInUseCount),
11237  m_IsCustomPool(isCustomPool),
11238  m_ExplicitBlockSize(explicitBlockSize),
11239  m_Algorithm(algorithm),
11240  m_HasEmptyBlock(false),
11241  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
11242  m_NextBlockId(0)
11243 {
11244 }
11245 
11246 VmaBlockVector::~VmaBlockVector()
11247 {
11248  for(size_t i = m_Blocks.size(); i--; )
11249  {
11250  m_Blocks[i]->Destroy(m_hAllocator);
11251  vma_delete(m_hAllocator, m_Blocks[i]);
11252  }
11253 }
11254 
11255 VkResult VmaBlockVector::CreateMinBlocks()
11256 {
11257  for(size_t i = 0; i < m_MinBlockCount; ++i)
11258  {
11259  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
11260  if(res != VK_SUCCESS)
11261  {
11262  return res;
11263  }
11264  }
11265  return VK_SUCCESS;
11266 }
11267 
11268 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
11269 {
11270  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
11271 
11272  const size_t blockCount = m_Blocks.size();
11273 
11274  pStats->size = 0;
11275  pStats->unusedSize = 0;
11276  pStats->allocationCount = 0;
11277  pStats->unusedRangeCount = 0;
11278  pStats->unusedRangeSizeMax = 0;
11279  pStats->blockCount = blockCount;
11280 
11281  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11282  {
11283  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11284  VMA_ASSERT(pBlock);
11285  VMA_HEAVY_ASSERT(pBlock->Validate());
11286  pBlock->m_pMetadata->AddPoolStats(*pStats);
11287  }
11288 }
11289 
11290 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
11291 {
11292  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
11293  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
11294  (VMA_DEBUG_MARGIN > 0) &&
11295  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
11296 }
11297 
11298 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
11299 
11300 VkResult VmaBlockVector::Allocate(
11301  VmaPool hCurrentPool,
11302  uint32_t currentFrameIndex,
11303  VkDeviceSize size,
11304  VkDeviceSize alignment,
11305  const VmaAllocationCreateInfo& createInfo,
11306  VmaSuballocationType suballocType,
11307  size_t allocationCount,
11308  VmaAllocation* pAllocations)
11309 {
11310  size_t allocIndex;
11311  VkResult res = VK_SUCCESS;
11312 
11313  {
11314  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11315  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
11316  {
11317  res = AllocatePage(
11318  hCurrentPool,
11319  currentFrameIndex,
11320  size,
11321  alignment,
11322  createInfo,
11323  suballocType,
11324  pAllocations + allocIndex);
11325  if(res != VK_SUCCESS)
11326  {
11327  break;
11328  }
11329  }
11330  }
11331 
11332  if(res != VK_SUCCESS)
11333  {
11334  // Free all already created allocations.
11335  while(allocIndex--)
11336  {
11337  Free(pAllocations[allocIndex]);
11338  }
11339  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
11340  }
11341 
11342  return res;
11343 }
11344 
11345 VkResult VmaBlockVector::AllocatePage(
11346  VmaPool hCurrentPool,
11347  uint32_t currentFrameIndex,
11348  VkDeviceSize size,
11349  VkDeviceSize alignment,
11350  const VmaAllocationCreateInfo& createInfo,
11351  VmaSuballocationType suballocType,
11352  VmaAllocation* pAllocation)
11353 {
11354  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
11355  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
11356  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
11357  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
11358  const bool canCreateNewBlock =
11359  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
11360  (m_Blocks.size() < m_MaxBlockCount);
11361  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
11362 
11363  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
11364  // Which in turn is available only when maxBlockCount = 1.
11365  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
11366  {
11367  canMakeOtherLost = false;
11368  }
11369 
11370  // Upper address can only be used with linear allocator and within single memory block.
11371  if(isUpperAddress &&
11372  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
11373  {
11374  return VK_ERROR_FEATURE_NOT_PRESENT;
11375  }
11376 
11377  // Validate strategy.
11378  switch(strategy)
11379  {
11380  case 0:
11382  break;
11386  break;
11387  default:
11388  return VK_ERROR_FEATURE_NOT_PRESENT;
11389  }
11390 
11391  // Early reject: requested allocation size is larger that maximum block size for this block vector.
11392  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
11393  {
11394  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11395  }
11396 
11397  /*
11398  Under certain condition, this whole section can be skipped for optimization, so
11399  we move on directly to trying to allocate with canMakeOtherLost. That's the case
11400  e.g. for custom pools with linear algorithm.
11401  */
11402  if(!canMakeOtherLost || canCreateNewBlock)
11403  {
11404  // 1. Search existing allocations. Try to allocate without making other allocations lost.
11405  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
11407 
11408  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
11409  {
11410  // Use only last block.
11411  if(!m_Blocks.empty())
11412  {
11413  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
11414  VMA_ASSERT(pCurrBlock);
11415  VkResult res = AllocateFromBlock(
11416  pCurrBlock,
11417  hCurrentPool,
11418  currentFrameIndex,
11419  size,
11420  alignment,
11421  allocFlagsCopy,
11422  createInfo.pUserData,
11423  suballocType,
11424  strategy,
11425  pAllocation);
11426  if(res == VK_SUCCESS)
11427  {
11428  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
11429  return VK_SUCCESS;
11430  }
11431  }
11432  }
11433  else
11434  {
11436  {
11437  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11438  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11439  {
11440  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11441  VMA_ASSERT(pCurrBlock);
11442  VkResult res = AllocateFromBlock(
11443  pCurrBlock,
11444  hCurrentPool,
11445  currentFrameIndex,
11446  size,
11447  alignment,
11448  allocFlagsCopy,
11449  createInfo.pUserData,
11450  suballocType,
11451  strategy,
11452  pAllocation);
11453  if(res == VK_SUCCESS)
11454  {
11455  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
11456  return VK_SUCCESS;
11457  }
11458  }
11459  }
11460  else // WORST_FIT, FIRST_FIT
11461  {
11462  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11463  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11464  {
11465  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11466  VMA_ASSERT(pCurrBlock);
11467  VkResult res = AllocateFromBlock(
11468  pCurrBlock,
11469  hCurrentPool,
11470  currentFrameIndex,
11471  size,
11472  alignment,
11473  allocFlagsCopy,
11474  createInfo.pUserData,
11475  suballocType,
11476  strategy,
11477  pAllocation);
11478  if(res == VK_SUCCESS)
11479  {
11480  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
11481  return VK_SUCCESS;
11482  }
11483  }
11484  }
11485  }
11486 
11487  // 2. Try to create new block.
11488  if(canCreateNewBlock)
11489  {
11490  // Calculate optimal size for new block.
11491  VkDeviceSize newBlockSize = m_PreferredBlockSize;
11492  uint32_t newBlockSizeShift = 0;
11493  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
11494 
11495  if(!m_ExplicitBlockSize)
11496  {
11497  // Allocate 1/8, 1/4, 1/2 as first blocks.
11498  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
11499  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
11500  {
11501  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11502  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
11503  {
11504  newBlockSize = smallerNewBlockSize;
11505  ++newBlockSizeShift;
11506  }
11507  else
11508  {
11509  break;
11510  }
11511  }
11512  }
11513 
11514  size_t newBlockIndex = 0;
11515  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
11516  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
11517  if(!m_ExplicitBlockSize)
11518  {
11519  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
11520  {
11521  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11522  if(smallerNewBlockSize >= size)
11523  {
11524  newBlockSize = smallerNewBlockSize;
11525  ++newBlockSizeShift;
11526  res = CreateBlock(newBlockSize, &newBlockIndex);
11527  }
11528  else
11529  {
11530  break;
11531  }
11532  }
11533  }
11534 
11535  if(res == VK_SUCCESS)
11536  {
11537  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
11538  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
11539 
11540  res = AllocateFromBlock(
11541  pBlock,
11542  hCurrentPool,
11543  currentFrameIndex,
11544  size,
11545  alignment,
11546  allocFlagsCopy,
11547  createInfo.pUserData,
11548  suballocType,
11549  strategy,
11550  pAllocation);
11551  if(res == VK_SUCCESS)
11552  {
11553  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
11554  return VK_SUCCESS;
11555  }
11556  else
11557  {
11558  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
11559  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11560  }
11561  }
11562  }
11563  }
11564 
11565  // 3. Try to allocate from existing blocks with making other allocations lost.
11566  if(canMakeOtherLost)
11567  {
11568  uint32_t tryIndex = 0;
11569  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
11570  {
11571  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
11572  VmaAllocationRequest bestRequest = {};
11573  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
11574 
11575  // 1. Search existing allocations.
11577  {
11578  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11579  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11580  {
11581  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11582  VMA_ASSERT(pCurrBlock);
11583  VmaAllocationRequest currRequest = {};
11584  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11585  currentFrameIndex,
11586  m_FrameInUseCount,
11587  m_BufferImageGranularity,
11588  size,
11589  alignment,
11590  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
11591  suballocType,
11592  canMakeOtherLost,
11593  strategy,
11594  &currRequest))
11595  {
11596  const VkDeviceSize currRequestCost = currRequest.CalcCost();
11597  if(pBestRequestBlock == VMA_NULL ||
11598  currRequestCost < bestRequestCost)
11599  {
11600  pBestRequestBlock = pCurrBlock;
11601  bestRequest = currRequest;
11602  bestRequestCost = currRequestCost;
11603 
11604  if(bestRequestCost == 0)
11605  {
11606  break;
11607  }
11608  }
11609  }
11610  }
11611  }
11612  else // WORST_FIT, FIRST_FIT
11613  {
11614  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11615  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11616  {
11617  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11618  VMA_ASSERT(pCurrBlock);
11619  VmaAllocationRequest currRequest = {};
11620  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11621  currentFrameIndex,
11622  m_FrameInUseCount,
11623  m_BufferImageGranularity,
11624  size,
11625  alignment,
11626  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
11627  suballocType,
11628  canMakeOtherLost,
11629  strategy,
11630  &currRequest))
11631  {
11632  const VkDeviceSize currRequestCost = currRequest.CalcCost();
11633  if(pBestRequestBlock == VMA_NULL ||
11634  currRequestCost < bestRequestCost ||
11636  {
11637  pBestRequestBlock = pCurrBlock;
11638  bestRequest = currRequest;
11639  bestRequestCost = currRequestCost;
11640 
11641  if(bestRequestCost == 0 ||
11643  {
11644  break;
11645  }
11646  }
11647  }
11648  }
11649  }
11650 
11651  if(pBestRequestBlock != VMA_NULL)
11652  {
11653  if(mapped)
11654  {
11655  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
11656  if(res != VK_SUCCESS)
11657  {
11658  return res;
11659  }
11660  }
11661 
11662  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
11663  currentFrameIndex,
11664  m_FrameInUseCount,
11665  &bestRequest))
11666  {
11667  // We no longer have an empty Allocation.
11668  if(pBestRequestBlock->m_pMetadata->IsEmpty())
11669  {
11670  m_HasEmptyBlock = false;
11671  }
11672  // Allocate from this pBlock.
11673  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
11674  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
11675  (*pAllocation)->InitBlockAllocation(
11676  hCurrentPool,
11677  pBestRequestBlock,
11678  bestRequest.offset,
11679  alignment,
11680  size,
11681  suballocType,
11682  mapped,
11683  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
11684  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
11685  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
11686  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
11687  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
11688  {
11689  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
11690  }
11691  if(IsCorruptionDetectionEnabled())
11692  {
11693  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
11694  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
11695  }
11696  return VK_SUCCESS;
11697  }
11698  // else: Some allocations must have been touched while we are here. Next try.
11699  }
11700  else
11701  {
11702  // Could not find place in any of the blocks - break outer loop.
11703  break;
11704  }
11705  }
11706  /* Maximum number of tries exceeded - a very unlike event when many other
11707  threads are simultaneously touching allocations making it impossible to make
11708  lost at the same time as we try to allocate. */
11709  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
11710  {
11711  return VK_ERROR_TOO_MANY_OBJECTS;
11712  }
11713  }
11714 
11715  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11716 }
11717 
11718 void VmaBlockVector::Free(
11719  VmaAllocation hAllocation)
11720 {
11721  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
11722 
11723  // Scope for lock.
11724  {
11725  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11726 
11727  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
11728 
11729  if(IsCorruptionDetectionEnabled())
11730  {
11731  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
11732  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
11733  }
11734 
11735  if(hAllocation->IsPersistentMap())
11736  {
11737  pBlock->Unmap(m_hAllocator, 1);
11738  }
11739 
11740  pBlock->m_pMetadata->Free(hAllocation);
11741  VMA_HEAVY_ASSERT(pBlock->Validate());
11742 
11743  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
11744 
11745  // pBlock became empty after this deallocation.
11746  if(pBlock->m_pMetadata->IsEmpty())
11747  {
11748  // Already has empty Allocation. We don't want to have two, so delete this one.
11749  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
11750  {
11751  pBlockToDelete = pBlock;
11752  Remove(pBlock);
11753  }
11754  // We now have first empty block.
11755  else
11756  {
11757  m_HasEmptyBlock = true;
11758  }
11759  }
11760  // pBlock didn't become empty, but we have another empty block - find and free that one.
11761  // (This is optional, heuristics.)
11762  else if(m_HasEmptyBlock)
11763  {
11764  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
11765  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
11766  {
11767  pBlockToDelete = pLastBlock;
11768  m_Blocks.pop_back();
11769  m_HasEmptyBlock = false;
11770  }
11771  }
11772 
11773  IncrementallySortBlocks();
11774  }
11775 
11776  // Destruction of a free Allocation. Deferred until this point, outside of mutex
11777  // lock, for performance reason.
11778  if(pBlockToDelete != VMA_NULL)
11779  {
11780  VMA_DEBUG_LOG(" Deleted empty allocation");
11781  pBlockToDelete->Destroy(m_hAllocator);
11782  vma_delete(m_hAllocator, pBlockToDelete);
11783  }
11784 }
11785 
11786 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
11787 {
11788  VkDeviceSize result = 0;
11789  for(size_t i = m_Blocks.size(); i--; )
11790  {
11791  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
11792  if(result >= m_PreferredBlockSize)
11793  {
11794  break;
11795  }
11796  }
11797  return result;
11798 }
11799 
11800 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
11801 {
11802  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11803  {
11804  if(m_Blocks[blockIndex] == pBlock)
11805  {
11806  VmaVectorRemove(m_Blocks, blockIndex);
11807  return;
11808  }
11809  }
11810  VMA_ASSERT(0);
11811 }
11812 
11813 void VmaBlockVector::IncrementallySortBlocks()
11814 {
11815  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
11816  {
11817  // Bubble sort only until first swap.
11818  for(size_t i = 1; i < m_Blocks.size(); ++i)
11819  {
11820  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
11821  {
11822  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
11823  return;
11824  }
11825  }
11826  }
11827 }
11828 
11829 VkResult VmaBlockVector::AllocateFromBlock(
11830  VmaDeviceMemoryBlock* pBlock,
11831  VmaPool hCurrentPool,
11832  uint32_t currentFrameIndex,
11833  VkDeviceSize size,
11834  VkDeviceSize alignment,
11835  VmaAllocationCreateFlags allocFlags,
11836  void* pUserData,
11837  VmaSuballocationType suballocType,
11838  uint32_t strategy,
11839  VmaAllocation* pAllocation)
11840 {
11841  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
11842  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
11843  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
11844  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
11845 
11846  VmaAllocationRequest currRequest = {};
11847  if(pBlock->m_pMetadata->CreateAllocationRequest(
11848  currentFrameIndex,
11849  m_FrameInUseCount,
11850  m_BufferImageGranularity,
11851  size,
11852  alignment,
11853  isUpperAddress,
11854  suballocType,
11855  false, // canMakeOtherLost
11856  strategy,
11857  &currRequest))
11858  {
11859  // Allocate from pCurrBlock.
11860  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
11861 
11862  if(mapped)
11863  {
11864  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
11865  if(res != VK_SUCCESS)
11866  {
11867  return res;
11868  }
11869  }
11870 
11871  // We no longer have an empty Allocation.
11872  if(pBlock->m_pMetadata->IsEmpty())
11873  {
11874  m_HasEmptyBlock = false;
11875  }
11876 
11877  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
11878  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
11879  (*pAllocation)->InitBlockAllocation(
11880  hCurrentPool,
11881  pBlock,
11882  currRequest.offset,
11883  alignment,
11884  size,
11885  suballocType,
11886  mapped,
11887  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
11888  VMA_HEAVY_ASSERT(pBlock->Validate());
11889  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
11890  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
11891  {
11892  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
11893  }
11894  if(IsCorruptionDetectionEnabled())
11895  {
11896  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
11897  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
11898  }
11899  return VK_SUCCESS;
11900  }
11901  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11902 }
11903 
11904 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
11905 {
11906  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
11907  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
11908  allocInfo.allocationSize = blockSize;
11909  VkDeviceMemory mem = VK_NULL_HANDLE;
11910  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
11911  if(res < 0)
11912  {
11913  return res;
11914  }
11915 
11916  // New VkDeviceMemory successfully created.
11917 
11918  // Create new Allocation for it.
11919  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
11920  pBlock->Init(
11921  m_hAllocator,
11922  m_MemoryTypeIndex,
11923  mem,
11924  allocInfo.allocationSize,
11925  m_NextBlockId++,
11926  m_Algorithm);
11927 
11928  m_Blocks.push_back(pBlock);
11929  if(pNewBlockIndex != VMA_NULL)
11930  {
11931  *pNewBlockIndex = m_Blocks.size() - 1;
11932  }
11933 
11934  return VK_SUCCESS;
11935 }
11936 
11937 void VmaBlockVector::ApplyDefragmentationMovesCpu(
11938  class VmaBlockVectorDefragmentationContext* pDefragCtx,
11939  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
11940 {
11941  const size_t blockCount = m_Blocks.size();
11942  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
11943 
11944  enum BLOCK_FLAG
11945  {
11946  BLOCK_FLAG_USED = 0x00000001,
11947  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
11948  };
11949 
11950  struct BlockInfo
11951  {
11952  uint32_t flags;
11953  void* pMappedData;
11954  };
11955  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
11956  blockInfo(blockCount, VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
11957  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
11958 
11959  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
11960  const size_t moveCount = moves.size();
11961  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
11962  {
11963  const VmaDefragmentationMove& move = moves[moveIndex];
11964  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
11965  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
11966  }
11967 
11968  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
11969 
11970  // Go over all blocks. Get mapped pointer or map if necessary.
11971  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
11972  {
11973  BlockInfo& currBlockInfo = blockInfo[blockIndex];
11974  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
11975  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
11976  {
11977  currBlockInfo.pMappedData = pBlock->GetMappedData();
11978  // It is not originally mapped - map it.
11979  if(currBlockInfo.pMappedData == VMA_NULL)
11980  {
11981  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
11982  if(pDefragCtx->res == VK_SUCCESS)
11983  {
11984  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
11985  }
11986  }
11987  }
11988  }
11989 
11990  // Go over all moves. Do actual data transfer.
11991  if(pDefragCtx->res == VK_SUCCESS)
11992  {
11993  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
11994  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
11995 
11996  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
11997  {
11998  const VmaDefragmentationMove& move = moves[moveIndex];
11999 
12000  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
12001  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
12002 
12003  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
12004 
12005  // Invalidate source.
12006  if(isNonCoherent)
12007  {
12008  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
12009  memRange.memory = pSrcBlock->GetDeviceMemory();
12010  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
12011  memRange.size = VMA_MIN(
12012  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
12013  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
12014  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12015  }
12016 
12017  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
12018  memmove(
12019  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
12020  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
12021  static_cast<size_t>(move.size));
12022 
12023  if(IsCorruptionDetectionEnabled())
12024  {
12025  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
12026  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
12027  }
12028 
12029  // Flush destination.
12030  if(isNonCoherent)
12031  {
12032  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
12033  memRange.memory = pDstBlock->GetDeviceMemory();
12034  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
12035  memRange.size = VMA_MIN(
12036  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
12037  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
12038  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12039  }
12040  }
12041  }
12042 
12043  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
12044  // Regardless of pCtx->res == VK_SUCCESS.
12045  for(size_t blockIndex = blockCount; blockIndex--; )
12046  {
12047  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
12048  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
12049  {
12050  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12051  pBlock->Unmap(m_hAllocator, 1);
12052  }
12053  }
12054 }
12055 
12056 void VmaBlockVector::ApplyDefragmentationMovesGpu(
12057  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12058  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12059  VkCommandBuffer commandBuffer)
12060 {
12061  const size_t blockCount = m_Blocks.size();
12062 
12063  pDefragCtx->blockContexts.resize(blockCount);
12064  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
12065 
12066  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12067  const size_t moveCount = moves.size();
12068  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12069  {
12070  const VmaDefragmentationMove& move = moves[moveIndex];
12071  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12072  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12073  }
12074 
12075  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12076 
12077  // Go over all blocks. Create and bind buffer for whole block if necessary.
12078  {
12079  VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
12080  bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
12081  VK_BUFFER_USAGE_TRANSFER_DST_BIT;
12082 
12083  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12084  {
12085  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
12086  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12087  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
12088  {
12089  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
12090  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
12091  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
12092  if(pDefragCtx->res == VK_SUCCESS)
12093  {
12094  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
12095  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
12096  }
12097  }
12098  }
12099  }
12100 
12101  // Go over all moves. Post data transfer commands to command buffer.
12102  if(pDefragCtx->res == VK_SUCCESS)
12103  {
12104  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
12105  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
12106 
12107  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12108  {
12109  const VmaDefragmentationMove& move = moves[moveIndex];
12110 
12111  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
12112  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
12113 
12114  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
12115 
12116  VkBufferCopy region = {
12117  move.srcOffset,
12118  move.dstOffset,
12119  move.size };
12120  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
12121  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
12122  }
12123  }
12124 
12125  // Save buffers to defrag context for later destruction.
12126  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
12127  {
12128  pDefragCtx->res = VK_NOT_READY;
12129  }
12130 }
12131 
12132 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
12133 {
12134  m_HasEmptyBlock = false;
12135  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12136  {
12137  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12138  if(pBlock->m_pMetadata->IsEmpty())
12139  {
12140  if(m_Blocks.size() > m_MinBlockCount)
12141  {
12142  if(pDefragmentationStats != VMA_NULL)
12143  {
12144  ++pDefragmentationStats->deviceMemoryBlocksFreed;
12145  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
12146  }
12147 
12148  VmaVectorRemove(m_Blocks, blockIndex);
12149  pBlock->Destroy(m_hAllocator);
12150  vma_delete(m_hAllocator, pBlock);
12151  }
12152  else
12153  {
12154  m_HasEmptyBlock = true;
12155  }
12156  }
12157  }
12158 }
12159 
12160 #if VMA_STATS_STRING_ENABLED
12161 
12162 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
12163 {
12164  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12165 
12166  json.BeginObject();
12167 
12168  if(m_IsCustomPool)
12169  {
12170  json.WriteString("MemoryTypeIndex");
12171  json.WriteNumber(m_MemoryTypeIndex);
12172 
12173  json.WriteString("BlockSize");
12174  json.WriteNumber(m_PreferredBlockSize);
12175 
12176  json.WriteString("BlockCount");
12177  json.BeginObject(true);
12178  if(m_MinBlockCount > 0)
12179  {
12180  json.WriteString("Min");
12181  json.WriteNumber((uint64_t)m_MinBlockCount);
12182  }
12183  if(m_MaxBlockCount < SIZE_MAX)
12184  {
12185  json.WriteString("Max");
12186  json.WriteNumber((uint64_t)m_MaxBlockCount);
12187  }
12188  json.WriteString("Cur");
12189  json.WriteNumber((uint64_t)m_Blocks.size());
12190  json.EndObject();
12191 
12192  if(m_FrameInUseCount > 0)
12193  {
12194  json.WriteString("FrameInUseCount");
12195  json.WriteNumber(m_FrameInUseCount);
12196  }
12197 
12198  if(m_Algorithm != 0)
12199  {
12200  json.WriteString("Algorithm");
12201  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
12202  }
12203  }
12204  else
12205  {
12206  json.WriteString("PreferredBlockSize");
12207  json.WriteNumber(m_PreferredBlockSize);
12208  }
12209 
12210  json.WriteString("Blocks");
12211  json.BeginObject();
12212  for(size_t i = 0; i < m_Blocks.size(); ++i)
12213  {
12214  json.BeginString();
12215  json.ContinueString(m_Blocks[i]->GetId());
12216  json.EndString();
12217 
12218  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
12219  }
12220  json.EndObject();
12221 
12222  json.EndObject();
12223 }
12224 
12225 #endif // #if VMA_STATS_STRING_ENABLED
12226 
12227 void VmaBlockVector::Defragment(
12228  class VmaBlockVectorDefragmentationContext* pCtx,
12229  VmaDefragmentationStats* pStats,
12230  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
12231  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
12232  VkCommandBuffer commandBuffer)
12233 {
12234  pCtx->res = VK_SUCCESS;
12235 
12236  const VkMemoryPropertyFlags memPropFlags =
12237  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
12238  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
12239  const bool isHostCoherent = (memPropFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0;
12240 
12241  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
12242  isHostVisible;
12243  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
12244  (VMA_DEBUG_DETECT_CORRUPTION == 0 || !(isHostVisible && isHostCoherent));
12245 
12246  // There are options to defragment this memory type.
12247  if(canDefragmentOnCpu || canDefragmentOnGpu)
12248  {
12249  bool defragmentOnGpu;
12250  // There is only one option to defragment this memory type.
12251  if(canDefragmentOnGpu != canDefragmentOnCpu)
12252  {
12253  defragmentOnGpu = canDefragmentOnGpu;
12254  }
12255  // Both options are available: Heuristics to choose the best one.
12256  else
12257  {
12258  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
12259  m_hAllocator->IsIntegratedGpu();
12260  }
12261 
12262  bool overlappingMoveSupported = !defragmentOnGpu;
12263 
12264  if(m_hAllocator->m_UseMutex)
12265  {
12266  m_Mutex.LockWrite();
12267  pCtx->mutexLocked = true;
12268  }
12269 
12270  pCtx->Begin(overlappingMoveSupported);
12271 
12272  // Defragment.
12273 
12274  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
12275  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
12276  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
12277  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
12278  pCtx->res = pCtx->GetAlgorithm()->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
12279 
12280  // Accumulate statistics.
12281  if(pStats != VMA_NULL)
12282  {
12283  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
12284  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
12285  pStats->bytesMoved += bytesMoved;
12286  pStats->allocationsMoved += allocationsMoved;
12287  VMA_ASSERT(bytesMoved <= maxBytesToMove);
12288  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
12289  if(defragmentOnGpu)
12290  {
12291  maxGpuBytesToMove -= bytesMoved;
12292  maxGpuAllocationsToMove -= allocationsMoved;
12293  }
12294  else
12295  {
12296  maxCpuBytesToMove -= bytesMoved;
12297  maxCpuAllocationsToMove -= allocationsMoved;
12298  }
12299  }
12300 
12301  if(pCtx->res >= VK_SUCCESS)
12302  {
12303  if(defragmentOnGpu)
12304  {
12305  ApplyDefragmentationMovesGpu(pCtx, moves, commandBuffer);
12306  }
12307  else
12308  {
12309  ApplyDefragmentationMovesCpu(pCtx, moves);
12310  }
12311  }
12312  }
12313 }
12314 
12315 void VmaBlockVector::DefragmentationEnd(
12316  class VmaBlockVectorDefragmentationContext* pCtx,
12317  VmaDefragmentationStats* pStats)
12318 {
12319  // Destroy buffers.
12320  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
12321  {
12322  VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];
12323  if(blockCtx.hBuffer)
12324  {
12325  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
12326  m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
12327  }
12328  }
12329 
12330  if(pCtx->res >= VK_SUCCESS)
12331  {
12332  FreeEmptyBlocks(pStats);
12333  }
12334 
12335  if(pCtx->mutexLocked)
12336  {
12337  VMA_ASSERT(m_hAllocator->m_UseMutex);
12338  m_Mutex.UnlockWrite();
12339  }
12340 }
12341 
12342 size_t VmaBlockVector::CalcAllocationCount() const
12343 {
12344  size_t result = 0;
12345  for(size_t i = 0; i < m_Blocks.size(); ++i)
12346  {
12347  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
12348  }
12349  return result;
12350 }
12351 
12352 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
12353 {
12354  if(m_BufferImageGranularity == 1)
12355  {
12356  return false;
12357  }
12358  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
12359  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
12360  {
12361  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
12362  VMA_ASSERT(m_Algorithm == 0);
12363  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
12364  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
12365  {
12366  return true;
12367  }
12368  }
12369  return false;
12370 }
12371 
12372 void VmaBlockVector::MakePoolAllocationsLost(
12373  uint32_t currentFrameIndex,
12374  size_t* pLostAllocationCount)
12375 {
12376  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12377  size_t lostAllocationCount = 0;
12378  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12379  {
12380  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12381  VMA_ASSERT(pBlock);
12382  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
12383  }
12384  if(pLostAllocationCount != VMA_NULL)
12385  {
12386  *pLostAllocationCount = lostAllocationCount;
12387  }
12388 }
12389 
12390 VkResult VmaBlockVector::CheckCorruption()
12391 {
12392  if(!IsCorruptionDetectionEnabled())
12393  {
12394  return VK_ERROR_FEATURE_NOT_PRESENT;
12395  }
12396 
12397  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12398  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12399  {
12400  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12401  VMA_ASSERT(pBlock);
12402  VkResult res = pBlock->CheckCorruption(m_hAllocator);
12403  if(res != VK_SUCCESS)
12404  {
12405  return res;
12406  }
12407  }
12408  return VK_SUCCESS;
12409 }
12410 
12411 void VmaBlockVector::AddStats(VmaStats* pStats)
12412 {
12413  const uint32_t memTypeIndex = m_MemoryTypeIndex;
12414  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
12415 
12416  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12417 
12418  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12419  {
12420  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12421  VMA_ASSERT(pBlock);
12422  VMA_HEAVY_ASSERT(pBlock->Validate());
12423  VmaStatInfo allocationStatInfo;
12424  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
12425  VmaAddStatInfo(pStats->total, allocationStatInfo);
12426  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
12427  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
12428  }
12429 }
12430 
12432 // VmaDefragmentationAlgorithm_Generic members definition
12433 
12434 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
12435  VmaAllocator hAllocator,
12436  VmaBlockVector* pBlockVector,
12437  uint32_t currentFrameIndex,
12438  bool overlappingMoveSupported) :
12439  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12440  m_AllAllocations(false),
12441  m_AllocationCount(0),
12442  m_BytesMoved(0),
12443  m_AllocationsMoved(0),
12444  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
12445 {
12446  // Create block info for each block.
12447  const size_t blockCount = m_pBlockVector->m_Blocks.size();
12448  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12449  {
12450  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
12451  pBlockInfo->m_OriginalBlockIndex = blockIndex;
12452  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
12453  m_Blocks.push_back(pBlockInfo);
12454  }
12455 
12456  // Sort them by m_pBlock pointer value.
12457  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
12458 }
12459 
12460 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
12461 {
12462  for(size_t i = m_Blocks.size(); i--; )
12463  {
12464  vma_delete(m_hAllocator, m_Blocks[i]);
12465  }
12466 }
12467 
12468 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
12469 {
12470  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
12471  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
12472  {
12473  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
12474  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
12475  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
12476  {
12477  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
12478  (*it)->m_Allocations.push_back(allocInfo);
12479  }
12480  else
12481  {
12482  VMA_ASSERT(0);
12483  }
12484 
12485  ++m_AllocationCount;
12486  }
12487 }
12488 
12489 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
12490  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12491  VkDeviceSize maxBytesToMove,
12492  uint32_t maxAllocationsToMove)
12493 {
12494  if(m_Blocks.empty())
12495  {
12496  return VK_SUCCESS;
12497  }
12498 
12499  // This is a choice based on research.
12500  // Option 1:
12501  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
12502  // Option 2:
12503  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
12504  // Option 3:
12505  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
12506 
12507  size_t srcBlockMinIndex = 0;
12508  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
12509  /*
12510  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
12511  {
12512  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
12513  if(blocksWithNonMovableCount > 0)
12514  {
12515  srcBlockMinIndex = blocksWithNonMovableCount - 1;
12516  }
12517  }
12518  */
12519 
12520  size_t srcBlockIndex = m_Blocks.size() - 1;
12521  size_t srcAllocIndex = SIZE_MAX;
12522  for(;;)
12523  {
12524  // 1. Find next allocation to move.
12525  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
12526  // 1.2. Then start from last to first m_Allocations.
12527  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
12528  {
12529  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
12530  {
12531  // Finished: no more allocations to process.
12532  if(srcBlockIndex == srcBlockMinIndex)
12533  {
12534  return VK_SUCCESS;
12535  }
12536  else
12537  {
12538  --srcBlockIndex;
12539  srcAllocIndex = SIZE_MAX;
12540  }
12541  }
12542  else
12543  {
12544  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
12545  }
12546  }
12547 
12548  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
12549  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
12550 
12551  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
12552  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
12553  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
12554  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
12555 
12556  // 2. Try to find new place for this allocation in preceding or current block.
12557  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
12558  {
12559  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
12560  VmaAllocationRequest dstAllocRequest;
12561  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
12562  m_CurrentFrameIndex,
12563  m_pBlockVector->GetFrameInUseCount(),
12564  m_pBlockVector->GetBufferImageGranularity(),
12565  size,
12566  alignment,
12567  false, // upperAddress
12568  suballocType,
12569  false, // canMakeOtherLost
12570  strategy,
12571  &dstAllocRequest) &&
12572  MoveMakesSense(
12573  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
12574  {
12575  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
12576 
12577  // Reached limit on number of allocations or bytes to move.
12578  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
12579  (m_BytesMoved + size > maxBytesToMove))
12580  {
12581  return VK_SUCCESS;
12582  }
12583 
12584  VmaDefragmentationMove move;
12585  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
12586  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
12587  move.srcOffset = srcOffset;
12588  move.dstOffset = dstAllocRequest.offset;
12589  move.size = size;
12590  moves.push_back(move);
12591 
12592  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
12593  dstAllocRequest,
12594  suballocType,
12595  size,
12596  false, // upperAddress
12597  allocInfo.m_hAllocation);
12598  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
12599 
12600  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
12601 
12602  if(allocInfo.m_pChanged != VMA_NULL)
12603  {
12604  *allocInfo.m_pChanged = VK_TRUE;
12605  }
12606 
12607  ++m_AllocationsMoved;
12608  m_BytesMoved += size;
12609 
12610  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
12611 
12612  break;
12613  }
12614  }
12615 
12616  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
12617 
12618  if(srcAllocIndex > 0)
12619  {
12620  --srcAllocIndex;
12621  }
12622  else
12623  {
12624  if(srcBlockIndex > 0)
12625  {
12626  --srcBlockIndex;
12627  srcAllocIndex = SIZE_MAX;
12628  }
12629  else
12630  {
12631  return VK_SUCCESS;
12632  }
12633  }
12634  }
12635 }
12636 
12637 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
12638 {
12639  size_t result = 0;
12640  for(size_t i = 0; i < m_Blocks.size(); ++i)
12641  {
12642  if(m_Blocks[i]->m_HasNonMovableAllocations)
12643  {
12644  ++result;
12645  }
12646  }
12647  return result;
12648 }
12649 
12650 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
12651  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12652  VkDeviceSize maxBytesToMove,
12653  uint32_t maxAllocationsToMove)
12654 {
12655  if(!m_AllAllocations && m_AllocationCount == 0)
12656  {
12657  return VK_SUCCESS;
12658  }
12659 
12660  const size_t blockCount = m_Blocks.size();
12661  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12662  {
12663  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
12664 
12665  if(m_AllAllocations)
12666  {
12667  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
12668  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
12669  it != pMetadata->m_Suballocations.end();
12670  ++it)
12671  {
12672  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
12673  {
12674  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
12675  pBlockInfo->m_Allocations.push_back(allocInfo);
12676  }
12677  }
12678  }
12679 
12680  pBlockInfo->CalcHasNonMovableAllocations();
12681 
12682  // This is a choice based on research.
12683  // Option 1:
12684  pBlockInfo->SortAllocationsByOffsetDescending();
12685  // Option 2:
12686  //pBlockInfo->SortAllocationsBySizeDescending();
12687  }
12688 
12689  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
12690  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
12691 
12692  // This is a choice based on research.
12693  const uint32_t roundCount = 2;
12694 
12695  // Execute defragmentation rounds (the main part).
12696  VkResult result = VK_SUCCESS;
12697  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
12698  {
12699  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove);
12700  }
12701 
12702  return result;
12703 }
12704 
12705 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
12706  size_t dstBlockIndex, VkDeviceSize dstOffset,
12707  size_t srcBlockIndex, VkDeviceSize srcOffset)
12708 {
12709  if(dstBlockIndex < srcBlockIndex)
12710  {
12711  return true;
12712  }
12713  if(dstBlockIndex > srcBlockIndex)
12714  {
12715  return false;
12716  }
12717  if(dstOffset < srcOffset)
12718  {
12719  return true;
12720  }
12721  return false;
12722 }
12723 
12725 // VmaDefragmentationAlgorithm_Fast
12726 
12727 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
12728  VmaAllocator hAllocator,
12729  VmaBlockVector* pBlockVector,
12730  uint32_t currentFrameIndex,
12731  bool overlappingMoveSupported) :
12732  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12733  m_OverlappingMoveSupported(overlappingMoveSupported),
12734  m_AllocationCount(0),
12735  m_AllAllocations(false),
12736  m_BytesMoved(0),
12737  m_AllocationsMoved(0),
12738  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
12739 {
12740  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
12741 
12742 }
12743 
12744 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
12745 {
12746 }
12747 
12748 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
12749  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12750  VkDeviceSize maxBytesToMove,
12751  uint32_t maxAllocationsToMove)
12752 {
12753  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
12754 
12755  const size_t blockCount = m_pBlockVector->GetBlockCount();
12756  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
12757  {
12758  return VK_SUCCESS;
12759  }
12760 
12761  PreprocessMetadata();
12762 
12763  // Sort blocks in order from most destination.
12764 
12765  m_BlockInfos.resize(blockCount);
12766  for(size_t i = 0; i < blockCount; ++i)
12767  {
12768  m_BlockInfos[i].origBlockIndex = i;
12769  }
12770 
12771  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
12772  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
12773  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
12774  });
12775 
12776  // THE MAIN ALGORITHM
12777 
12778  FreeSpaceDatabase freeSpaceDb;
12779 
12780  size_t dstBlockInfoIndex = 0;
12781  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
12782  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
12783  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
12784  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
12785  VkDeviceSize dstOffset = 0;
12786 
12787  bool end = false;
12788  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
12789  {
12790  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
12791  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
12792  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
12793  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
12794  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
12795  {
12796  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
12797  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
12798  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
12799  if(m_AllocationsMoved == maxAllocationsToMove ||
12800  m_BytesMoved + srcAllocSize > maxBytesToMove)
12801  {
12802  end = true;
12803  break;
12804  }
12805  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
12806 
12807  // Try to place it in one of free spaces from the database.
12808  size_t freeSpaceInfoIndex;
12809  VkDeviceSize dstAllocOffset;
12810  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
12811  freeSpaceInfoIndex, dstAllocOffset))
12812  {
12813  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
12814  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
12815  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
12816  VkDeviceSize freeSpaceBlockSize = pFreeSpaceMetadata->GetSize();
12817 
12818  // Same block
12819  if(freeSpaceInfoIndex == srcBlockInfoIndex)
12820  {
12821  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
12822 
12823  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
12824 
12825  VmaSuballocation suballoc = *srcSuballocIt;
12826  suballoc.offset = dstAllocOffset;
12827  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
12828  m_BytesMoved += srcAllocSize;
12829  ++m_AllocationsMoved;
12830 
12831  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
12832  ++nextSuballocIt;
12833  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
12834  srcSuballocIt = nextSuballocIt;
12835 
12836  InsertSuballoc(pFreeSpaceMetadata, suballoc);
12837 
12838  VmaDefragmentationMove move = {
12839  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
12840  srcAllocOffset, dstAllocOffset,
12841  srcAllocSize };
12842  moves.push_back(move);
12843  }
12844  // Different block
12845  else
12846  {
12847  // MOVE OPTION 2: Move the allocation to a different block.
12848 
12849  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
12850 
12851  VmaSuballocation suballoc = *srcSuballocIt;
12852  suballoc.offset = dstAllocOffset;
12853  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
12854  m_BytesMoved += srcAllocSize;
12855  ++m_AllocationsMoved;
12856 
12857  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
12858  ++nextSuballocIt;
12859  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
12860  srcSuballocIt = nextSuballocIt;
12861 
12862  InsertSuballoc(pFreeSpaceMetadata, suballoc);
12863 
12864  VmaDefragmentationMove move = {
12865  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
12866  srcAllocOffset, dstAllocOffset,
12867  srcAllocSize };
12868  moves.push_back(move);
12869  }
12870  }
12871  else
12872  {
12873  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
12874 
12875  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
12876  while(dstBlockInfoIndex < srcBlockInfoIndex &&
12877  dstAllocOffset + srcAllocSize > dstBlockSize)
12878  {
12879  // But before that, register remaining free space at the end of dst block.
12880  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
12881 
12882  ++dstBlockInfoIndex;
12883  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
12884  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
12885  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
12886  dstBlockSize = pDstMetadata->GetSize();
12887  dstOffset = 0;
12888  dstAllocOffset = 0;
12889  }
12890 
12891  // Same block
12892  if(dstBlockInfoIndex == srcBlockInfoIndex)
12893  {
12894  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
12895 
12896  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
12897 
12898  bool skipOver = overlap;
12899  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
12900  {
12901  // If destination and source place overlap, skip if it would move it
12902  // by only < 1/64 of its size.
12903  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
12904  }
12905 
12906  if(skipOver)
12907  {
12908  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
12909 
12910  dstOffset = srcAllocOffset + srcAllocSize;
12911  ++srcSuballocIt;
12912  }
12913  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
12914  else
12915  {
12916  srcSuballocIt->offset = dstAllocOffset;
12917  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
12918  dstOffset = dstAllocOffset + srcAllocSize;
12919  m_BytesMoved += srcAllocSize;
12920  ++m_AllocationsMoved;
12921  ++srcSuballocIt;
12922  VmaDefragmentationMove move = {
12923  srcOrigBlockIndex, dstOrigBlockIndex,
12924  srcAllocOffset, dstAllocOffset,
12925  srcAllocSize };
12926  moves.push_back(move);
12927  }
12928  }
12929  // Different block
12930  else
12931  {
12932  // MOVE OPTION 2: Move the allocation to a different block.
12933 
12934  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
12935  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
12936 
12937  VmaSuballocation suballoc = *srcSuballocIt;
12938  suballoc.offset = dstAllocOffset;
12939  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
12940  dstOffset = dstAllocOffset + srcAllocSize;
12941  m_BytesMoved += srcAllocSize;
12942  ++m_AllocationsMoved;
12943 
12944  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
12945  ++nextSuballocIt;
12946  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
12947  srcSuballocIt = nextSuballocIt;
12948 
12949  pDstMetadata->m_Suballocations.push_back(suballoc);
12950 
12951  VmaDefragmentationMove move = {
12952  srcOrigBlockIndex, dstOrigBlockIndex,
12953  srcAllocOffset, dstAllocOffset,
12954  srcAllocSize };
12955  moves.push_back(move);
12956  }
12957  }
12958  }
12959  }
12960 
12961  m_BlockInfos.clear();
12962 
12963  PostprocessMetadata();
12964 
12965  return VK_SUCCESS;
12966 }
12967 
12968 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
12969 {
12970  const size_t blockCount = m_pBlockVector->GetBlockCount();
12971  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12972  {
12973  VmaBlockMetadata_Generic* const pMetadata =
12974  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
12975  pMetadata->m_FreeCount = 0;
12976  pMetadata->m_SumFreeSize = pMetadata->GetSize();
12977  pMetadata->m_FreeSuballocationsBySize.clear();
12978  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
12979  it != pMetadata->m_Suballocations.end(); )
12980  {
12981  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
12982  {
12983  VmaSuballocationList::iterator nextIt = it;
12984  ++nextIt;
12985  pMetadata->m_Suballocations.erase(it);
12986  it = nextIt;
12987  }
12988  else
12989  {
12990  ++it;
12991  }
12992  }
12993  }
12994 }
12995 
12996 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
12997 {
12998  const size_t blockCount = m_pBlockVector->GetBlockCount();
12999  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13000  {
13001  VmaBlockMetadata_Generic* const pMetadata =
13002  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13003  const VkDeviceSize blockSize = pMetadata->GetSize();
13004 
13005  // No allocations in this block - entire area is free.
13006  if(pMetadata->m_Suballocations.empty())
13007  {
13008  pMetadata->m_FreeCount = 1;
13009  //pMetadata->m_SumFreeSize is already set to blockSize.
13010  VmaSuballocation suballoc = {
13011  0, // offset
13012  blockSize, // size
13013  VMA_NULL, // hAllocation
13014  VMA_SUBALLOCATION_TYPE_FREE };
13015  pMetadata->m_Suballocations.push_back(suballoc);
13016  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
13017  }
13018  // There are some allocations in this block.
13019  else
13020  {
13021  VkDeviceSize offset = 0;
13022  VmaSuballocationList::iterator it;
13023  for(it = pMetadata->m_Suballocations.begin();
13024  it != pMetadata->m_Suballocations.end();
13025  ++it)
13026  {
13027  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
13028  VMA_ASSERT(it->offset >= offset);
13029 
13030  // Need to insert preceding free space.
13031  if(it->offset > offset)
13032  {
13033  ++pMetadata->m_FreeCount;
13034  const VkDeviceSize freeSize = it->offset - offset;
13035  VmaSuballocation suballoc = {
13036  offset, // offset
13037  freeSize, // size
13038  VMA_NULL, // hAllocation
13039  VMA_SUBALLOCATION_TYPE_FREE };
13040  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13041  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13042  {
13043  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
13044  }
13045  }
13046 
13047  pMetadata->m_SumFreeSize -= it->size;
13048  offset = it->offset + it->size;
13049  }
13050 
13051  // Need to insert trailing free space.
13052  if(offset < blockSize)
13053  {
13054  ++pMetadata->m_FreeCount;
13055  const VkDeviceSize freeSize = blockSize - offset;
13056  VmaSuballocation suballoc = {
13057  offset, // offset
13058  freeSize, // size
13059  VMA_NULL, // hAllocation
13060  VMA_SUBALLOCATION_TYPE_FREE };
13061  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
13062  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13063  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13064  {
13065  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
13066  }
13067  }
13068 
13069  VMA_SORT(
13070  pMetadata->m_FreeSuballocationsBySize.begin(),
13071  pMetadata->m_FreeSuballocationsBySize.end(),
13072  VmaSuballocationItemSizeLess());
13073  }
13074 
13075  VMA_HEAVY_ASSERT(pMetadata->Validate());
13076  }
13077 }
13078 
13079 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
13080 {
13081  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
13082  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13083  while(it != pMetadata->m_Suballocations.end())
13084  {
13085  if(it->offset < suballoc.offset)
13086  {
13087  ++it;
13088  }
13089  }
13090  pMetadata->m_Suballocations.insert(it, suballoc);
13091 }
13092 
13094 // VmaBlockVectorDefragmentationContext
13095 
13096 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
13097  VmaAllocator hAllocator,
13098  VmaPool hCustomPool,
13099  VmaBlockVector* pBlockVector,
13100  uint32_t currFrameIndex,
13101  uint32_t algorithmFlags) :
13102  res(VK_SUCCESS),
13103  mutexLocked(false),
13104  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
13105  m_hAllocator(hAllocator),
13106  m_hCustomPool(hCustomPool),
13107  m_pBlockVector(pBlockVector),
13108  m_CurrFrameIndex(currFrameIndex),
13109  m_AlgorithmFlags(algorithmFlags),
13110  m_pAlgorithm(VMA_NULL),
13111  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
13112  m_AllAllocations(false)
13113 {
13114 }
13115 
13116 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
13117 {
13118  vma_delete(m_hAllocator, m_pAlgorithm);
13119 }
13120 
13121 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
13122 {
13123  AllocInfo info = { hAlloc, pChanged };
13124  m_Allocations.push_back(info);
13125 }
13126 
13127 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported)
13128 {
13129  const bool allAllocations = m_AllAllocations ||
13130  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
13131 
13132  /********************************
13133  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
13134  ********************************/
13135 
13136  /*
13137  Fast algorithm is supported only when certain criteria are met:
13138  - VMA_DEBUG_MARGIN is 0.
13139  - All allocations in this block vector are moveable.
13140  - There is no possibility of image/buffer granularity conflict.
13141  */
13142  if(VMA_DEBUG_MARGIN == 0 &&
13143  allAllocations &&
13144  !m_pBlockVector->IsBufferImageGranularityConflictPossible())
13145  {
13146  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
13147  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13148  }
13149  else
13150  {
13151  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
13152  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13153  }
13154 
13155  if(allAllocations)
13156  {
13157  m_pAlgorithm->AddAll();
13158  }
13159  else
13160  {
13161  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
13162  {
13163  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
13164  }
13165  }
13166 }
13167 
13169 // VmaDefragmentationContext
13170 
13171 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
13172  VmaAllocator hAllocator,
13173  uint32_t currFrameIndex,
13174  uint32_t flags,
13175  VmaDefragmentationStats* pStats) :
13176  m_hAllocator(hAllocator),
13177  m_CurrFrameIndex(currFrameIndex),
13178  m_Flags(flags),
13179  m_pStats(pStats),
13180  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
13181 {
13182  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
13183 }
13184 
13185 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
13186 {
13187  for(size_t i = m_CustomPoolContexts.size(); i--; )
13188  {
13189  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
13190  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13191  vma_delete(m_hAllocator, pBlockVectorCtx);
13192  }
13193  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
13194  {
13195  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
13196  if(pBlockVectorCtx)
13197  {
13198  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13199  vma_delete(m_hAllocator, pBlockVectorCtx);
13200  }
13201  }
13202 }
13203 
13204 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
13205 {
13206  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
13207  {
13208  VmaPool pool = pPools[poolIndex];
13209  VMA_ASSERT(pool);
13210  // Pools with algorithm other than default are not defragmented.
13211  if(pool->m_BlockVector.GetAlgorithm() == 0)
13212  {
13213  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13214 
13215  for(size_t i = m_CustomPoolContexts.size(); i--; )
13216  {
13217  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
13218  {
13219  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13220  break;
13221  }
13222  }
13223 
13224  if(!pBlockVectorDefragCtx)
13225  {
13226  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13227  m_hAllocator,
13228  pool,
13229  &pool->m_BlockVector,
13230  m_CurrFrameIndex,
13231  m_Flags);
13232  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13233  }
13234 
13235  pBlockVectorDefragCtx->AddAll();
13236  }
13237  }
13238 }
13239 
13240 void VmaDefragmentationContext_T::AddAllocations(
13241  uint32_t allocationCount,
13242  VmaAllocation* pAllocations,
13243  VkBool32* pAllocationsChanged)
13244 {
13245  // Dispatch pAllocations among defragmentators. Create them when necessary.
13246  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
13247  {
13248  const VmaAllocation hAlloc = pAllocations[allocIndex];
13249  VMA_ASSERT(hAlloc);
13250  // DedicatedAlloc cannot be defragmented.
13251  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
13252  // Lost allocation cannot be defragmented.
13253  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
13254  {
13255  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13256 
13257  const VmaPool hAllocPool = hAlloc->GetPool();
13258  // This allocation belongs to custom pool.
13259  if(hAllocPool != VK_NULL_HANDLE)
13260  {
13261  // Pools with algorithm other than default are not defragmented.
13262  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
13263  {
13264  for(size_t i = m_CustomPoolContexts.size(); i--; )
13265  {
13266  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
13267  {
13268  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13269  break;
13270  }
13271  }
13272  if(!pBlockVectorDefragCtx)
13273  {
13274  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13275  m_hAllocator,
13276  hAllocPool,
13277  &hAllocPool->m_BlockVector,
13278  m_CurrFrameIndex,
13279  m_Flags);
13280  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13281  }
13282  }
13283  }
13284  // This allocation belongs to default pool.
13285  else
13286  {
13287  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
13288  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
13289  if(!pBlockVectorDefragCtx)
13290  {
13291  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13292  m_hAllocator,
13293  VMA_NULL, // hCustomPool
13294  m_hAllocator->m_pBlockVectors[memTypeIndex],
13295  m_CurrFrameIndex,
13296  m_Flags);
13297  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
13298  }
13299  }
13300 
13301  if(pBlockVectorDefragCtx)
13302  {
13303  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
13304  &pAllocationsChanged[allocIndex] : VMA_NULL;
13305  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
13306  }
13307  }
13308  }
13309 }
13310 
13311 VkResult VmaDefragmentationContext_T::Defragment(
13312  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
13313  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
13314  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats)
13315 {
13316  if(pStats)
13317  {
13318  memset(pStats, 0, sizeof(VmaDefragmentationStats));
13319  }
13320 
13321  if(commandBuffer == VK_NULL_HANDLE)
13322  {
13323  maxGpuBytesToMove = 0;
13324  maxGpuAllocationsToMove = 0;
13325  }
13326 
13327  VkResult res = VK_SUCCESS;
13328 
13329  // Process default pools.
13330  for(uint32_t memTypeIndex = 0;
13331  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
13332  ++memTypeIndex)
13333  {
13334  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
13335  if(pBlockVectorCtx)
13336  {
13337  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
13338  pBlockVectorCtx->GetBlockVector()->Defragment(
13339  pBlockVectorCtx,
13340  pStats,
13341  maxCpuBytesToMove, maxCpuAllocationsToMove,
13342  maxGpuBytesToMove, maxGpuAllocationsToMove,
13343  commandBuffer);
13344  if(pBlockVectorCtx->res != VK_SUCCESS)
13345  {
13346  res = pBlockVectorCtx->res;
13347  }
13348  }
13349  }
13350 
13351  // Process custom pools.
13352  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
13353  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
13354  ++customCtxIndex)
13355  {
13356  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
13357  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
13358  pBlockVectorCtx->GetBlockVector()->Defragment(
13359  pBlockVectorCtx,
13360  pStats,
13361  maxCpuBytesToMove, maxCpuAllocationsToMove,
13362  maxGpuBytesToMove, maxGpuAllocationsToMove,
13363  commandBuffer);
13364  if(pBlockVectorCtx->res != VK_SUCCESS)
13365  {
13366  res = pBlockVectorCtx->res;
13367  }
13368  }
13369 
13370  return res;
13371 }
13372 
13374 // VmaRecorder
13375 
13376 #if VMA_RECORDING_ENABLED
13377 
13378 VmaRecorder::VmaRecorder() :
13379  m_UseMutex(true),
13380  m_Flags(0),
13381  m_File(VMA_NULL),
13382  m_Freq(INT64_MAX),
13383  m_StartCounter(INT64_MAX)
13384 {
13385 }
13386 
13387 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
13388 {
13389  m_UseMutex = useMutex;
13390  m_Flags = settings.flags;
13391 
13392  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
13393  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
13394 
13395  // Open file for writing.
13396  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
13397  if(err != 0)
13398  {
13399  return VK_ERROR_INITIALIZATION_FAILED;
13400  }
13401 
13402  // Write header.
13403  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
13404  fprintf(m_File, "%s\n", "1,5");
13405 
13406  return VK_SUCCESS;
13407 }
13408 
13409 VmaRecorder::~VmaRecorder()
13410 {
13411  if(m_File != VMA_NULL)
13412  {
13413  fclose(m_File);
13414  }
13415 }
13416 
13417 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
13418 {
13419  CallParams callParams;
13420  GetBasicParams(callParams);
13421 
13422  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13423  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
13424  Flush();
13425 }
13426 
13427 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
13428 {
13429  CallParams callParams;
13430  GetBasicParams(callParams);
13431 
13432  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13433  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
13434  Flush();
13435 }
13436 
13437 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
13438 {
13439  CallParams callParams;
13440  GetBasicParams(callParams);
13441 
13442  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13443  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
13444  createInfo.memoryTypeIndex,
13445  createInfo.flags,
13446  createInfo.blockSize,
13447  (uint64_t)createInfo.minBlockCount,
13448  (uint64_t)createInfo.maxBlockCount,
13449  createInfo.frameInUseCount,
13450  pool);
13451  Flush();
13452 }
13453 
13454 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
13455 {
13456  CallParams callParams;
13457  GetBasicParams(callParams);
13458 
13459  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13460  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
13461  pool);
13462  Flush();
13463 }
13464 
13465 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
13466  const VkMemoryRequirements& vkMemReq,
13467  const VmaAllocationCreateInfo& createInfo,
13468  VmaAllocation allocation)
13469 {
13470  CallParams callParams;
13471  GetBasicParams(callParams);
13472 
13473  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13474  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13475  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13476  vkMemReq.size,
13477  vkMemReq.alignment,
13478  vkMemReq.memoryTypeBits,
13479  createInfo.flags,
13480  createInfo.usage,
13481  createInfo.requiredFlags,
13482  createInfo.preferredFlags,
13483  createInfo.memoryTypeBits,
13484  createInfo.pool,
13485  allocation,
13486  userDataStr.GetString());
13487  Flush();
13488 }
13489 
13490 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
13491  const VkMemoryRequirements& vkMemReq,
13492  const VmaAllocationCreateInfo& createInfo,
13493  uint64_t allocationCount,
13494  const VmaAllocation* pAllocations)
13495 {
13496  CallParams callParams;
13497  GetBasicParams(callParams);
13498 
13499  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13500  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13501  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
13502  vkMemReq.size,
13503  vkMemReq.alignment,
13504  vkMemReq.memoryTypeBits,
13505  createInfo.flags,
13506  createInfo.usage,
13507  createInfo.requiredFlags,
13508  createInfo.preferredFlags,
13509  createInfo.memoryTypeBits,
13510  createInfo.pool);
13511  PrintPointerList(allocationCount, pAllocations);
13512  fprintf(m_File, ",%s\n", userDataStr.GetString());
13513  Flush();
13514 }
13515 
13516 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
13517  const VkMemoryRequirements& vkMemReq,
13518  bool requiresDedicatedAllocation,
13519  bool prefersDedicatedAllocation,
13520  const VmaAllocationCreateInfo& createInfo,
13521  VmaAllocation allocation)
13522 {
13523  CallParams callParams;
13524  GetBasicParams(callParams);
13525 
13526  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13527  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13528  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13529  vkMemReq.size,
13530  vkMemReq.alignment,
13531  vkMemReq.memoryTypeBits,
13532  requiresDedicatedAllocation ? 1 : 0,
13533  prefersDedicatedAllocation ? 1 : 0,
13534  createInfo.flags,
13535  createInfo.usage,
13536  createInfo.requiredFlags,
13537  createInfo.preferredFlags,
13538  createInfo.memoryTypeBits,
13539  createInfo.pool,
13540  allocation,
13541  userDataStr.GetString());
13542  Flush();
13543 }
13544 
13545 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
13546  const VkMemoryRequirements& vkMemReq,
13547  bool requiresDedicatedAllocation,
13548  bool prefersDedicatedAllocation,
13549  const VmaAllocationCreateInfo& createInfo,
13550  VmaAllocation allocation)
13551 {
13552  CallParams callParams;
13553  GetBasicParams(callParams);
13554 
13555  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13556  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13557  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13558  vkMemReq.size,
13559  vkMemReq.alignment,
13560  vkMemReq.memoryTypeBits,
13561  requiresDedicatedAllocation ? 1 : 0,
13562  prefersDedicatedAllocation ? 1 : 0,
13563  createInfo.flags,
13564  createInfo.usage,
13565  createInfo.requiredFlags,
13566  createInfo.preferredFlags,
13567  createInfo.memoryTypeBits,
13568  createInfo.pool,
13569  allocation,
13570  userDataStr.GetString());
13571  Flush();
13572 }
13573 
13574 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
13575  VmaAllocation allocation)
13576 {
13577  CallParams callParams;
13578  GetBasicParams(callParams);
13579 
13580  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13581  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13582  allocation);
13583  Flush();
13584 }
13585 
13586 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
13587  uint64_t allocationCount,
13588  const VmaAllocation* pAllocations)
13589 {
13590  CallParams callParams;
13591  GetBasicParams(callParams);
13592 
13593  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13594  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
13595  PrintPointerList(allocationCount, pAllocations);
13596  fprintf(m_File, "\n");
13597  Flush();
13598 }
13599 
13600 void VmaRecorder::RecordResizeAllocation(
13601  uint32_t frameIndex,
13602  VmaAllocation allocation,
13603  VkDeviceSize newSize)
13604 {
13605  CallParams callParams;
13606  GetBasicParams(callParams);
13607 
13608  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13609  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
13610  allocation, newSize);
13611  Flush();
13612 }
13613 
13614 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
13615  VmaAllocation allocation,
13616  const void* pUserData)
13617 {
13618  CallParams callParams;
13619  GetBasicParams(callParams);
13620 
13621  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13622  UserDataString userDataStr(
13623  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
13624  pUserData);
13625  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13626  allocation,
13627  userDataStr.GetString());
13628  Flush();
13629 }
13630 
13631 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
13632  VmaAllocation allocation)
13633 {
13634  CallParams callParams;
13635  GetBasicParams(callParams);
13636 
13637  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13638  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13639  allocation);
13640  Flush();
13641 }
13642 
13643 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
13644  VmaAllocation allocation)
13645 {
13646  CallParams callParams;
13647  GetBasicParams(callParams);
13648 
13649  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13650  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13651  allocation);
13652  Flush();
13653 }
13654 
13655 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
13656  VmaAllocation allocation)
13657 {
13658  CallParams callParams;
13659  GetBasicParams(callParams);
13660 
13661  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13662  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13663  allocation);
13664  Flush();
13665 }
13666 
13667 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
13668  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13669 {
13670  CallParams callParams;
13671  GetBasicParams(callParams);
13672 
13673  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13674  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13675  allocation,
13676  offset,
13677  size);
13678  Flush();
13679 }
13680 
13681 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
13682  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13683 {
13684  CallParams callParams;
13685  GetBasicParams(callParams);
13686 
13687  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13688  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13689  allocation,
13690  offset,
13691  size);
13692  Flush();
13693 }
13694 
13695 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
13696  const VkBufferCreateInfo& bufCreateInfo,
13697  const VmaAllocationCreateInfo& allocCreateInfo,
13698  VmaAllocation allocation)
13699 {
13700  CallParams callParams;
13701  GetBasicParams(callParams);
13702 
13703  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13704  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
13705  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13706  bufCreateInfo.flags,
13707  bufCreateInfo.size,
13708  bufCreateInfo.usage,
13709  bufCreateInfo.sharingMode,
13710  allocCreateInfo.flags,
13711  allocCreateInfo.usage,
13712  allocCreateInfo.requiredFlags,
13713  allocCreateInfo.preferredFlags,
13714  allocCreateInfo.memoryTypeBits,
13715  allocCreateInfo.pool,
13716  allocation,
13717  userDataStr.GetString());
13718  Flush();
13719 }
13720 
13721 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
13722  const VkImageCreateInfo& imageCreateInfo,
13723  const VmaAllocationCreateInfo& allocCreateInfo,
13724  VmaAllocation allocation)
13725 {
13726  CallParams callParams;
13727  GetBasicParams(callParams);
13728 
13729  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13730  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
13731  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13732  imageCreateInfo.flags,
13733  imageCreateInfo.imageType,
13734  imageCreateInfo.format,
13735  imageCreateInfo.extent.width,
13736  imageCreateInfo.extent.height,
13737  imageCreateInfo.extent.depth,
13738  imageCreateInfo.mipLevels,
13739  imageCreateInfo.arrayLayers,
13740  imageCreateInfo.samples,
13741  imageCreateInfo.tiling,
13742  imageCreateInfo.usage,
13743  imageCreateInfo.sharingMode,
13744  imageCreateInfo.initialLayout,
13745  allocCreateInfo.flags,
13746  allocCreateInfo.usage,
13747  allocCreateInfo.requiredFlags,
13748  allocCreateInfo.preferredFlags,
13749  allocCreateInfo.memoryTypeBits,
13750  allocCreateInfo.pool,
13751  allocation,
13752  userDataStr.GetString());
13753  Flush();
13754 }
13755 
13756 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
13757  VmaAllocation allocation)
13758 {
13759  CallParams callParams;
13760  GetBasicParams(callParams);
13761 
13762  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13763  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
13764  allocation);
13765  Flush();
13766 }
13767 
13768 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
13769  VmaAllocation allocation)
13770 {
13771  CallParams callParams;
13772  GetBasicParams(callParams);
13773 
13774  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13775  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
13776  allocation);
13777  Flush();
13778 }
13779 
13780 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
13781  VmaAllocation allocation)
13782 {
13783  CallParams callParams;
13784  GetBasicParams(callParams);
13785 
13786  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13787  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13788  allocation);
13789  Flush();
13790 }
13791 
13792 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
13793  VmaAllocation allocation)
13794 {
13795  CallParams callParams;
13796  GetBasicParams(callParams);
13797 
13798  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13799  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
13800  allocation);
13801  Flush();
13802 }
13803 
13804 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
13805  VmaPool pool)
13806 {
13807  CallParams callParams;
13808  GetBasicParams(callParams);
13809 
13810  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13811  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
13812  pool);
13813  Flush();
13814 }
13815 
13816 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
13817  const VmaDefragmentationInfo2& info,
13819 {
13820  CallParams callParams;
13821  GetBasicParams(callParams);
13822 
13823  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13824  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
13825  info.flags);
13826  PrintPointerList(info.allocationCount, info.pAllocations);
13827  fprintf(m_File, ",");
13828  PrintPointerList(info.poolCount, info.pPools);
13829  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
13830  info.maxCpuBytesToMove,
13832  info.maxGpuBytesToMove,
13834  info.commandBuffer,
13835  ctx);
13836  Flush();
13837 }
13838 
13839 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
13841 {
13842  CallParams callParams;
13843  GetBasicParams(callParams);
13844 
13845  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13846  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
13847  ctx);
13848  Flush();
13849 }
13850 
13851 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
13852 {
13853  if(pUserData != VMA_NULL)
13854  {
13855  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
13856  {
13857  m_Str = (const char*)pUserData;
13858  }
13859  else
13860  {
13861  sprintf_s(m_PtrStr, "%p", pUserData);
13862  m_Str = m_PtrStr;
13863  }
13864  }
13865  else
13866  {
13867  m_Str = "";
13868  }
13869 }
13870 
13871 void VmaRecorder::WriteConfiguration(
13872  const VkPhysicalDeviceProperties& devProps,
13873  const VkPhysicalDeviceMemoryProperties& memProps,
13874  bool dedicatedAllocationExtensionEnabled)
13875 {
13876  fprintf(m_File, "Config,Begin\n");
13877 
13878  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
13879  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
13880  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
13881  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
13882  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
13883  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
13884 
13885  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
13886  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
13887  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
13888 
13889  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
13890  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
13891  {
13892  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
13893  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
13894  }
13895  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
13896  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
13897  {
13898  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
13899  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
13900  }
13901 
13902  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
13903 
13904  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
13905  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
13906  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
13907  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
13908  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
13909  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
13910  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
13911  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
13912  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
13913 
13914  fprintf(m_File, "Config,End\n");
13915 }
13916 
13917 void VmaRecorder::GetBasicParams(CallParams& outParams)
13918 {
13919  outParams.threadId = GetCurrentThreadId();
13920 
13921  LARGE_INTEGER counter;
13922  QueryPerformanceCounter(&counter);
13923  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
13924 }
13925 
13926 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
13927 {
13928  if(count)
13929  {
13930  fprintf(m_File, "%p", pItems[0]);
13931  for(uint64_t i = 1; i < count; ++i)
13932  {
13933  fprintf(m_File, " %p", pItems[i]);
13934  }
13935  }
13936 }
13937 
13938 void VmaRecorder::Flush()
13939 {
13940  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
13941  {
13942  fflush(m_File);
13943  }
13944 }
13945 
13946 #endif // #if VMA_RECORDING_ENABLED
13947 
13949 // VmaAllocator_T
13950 
13951 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
13952  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
13953  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
13954  m_hDevice(pCreateInfo->device),
13955  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
13956  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
13957  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
13958  m_PreferredLargeHeapBlockSize(0),
13959  m_PhysicalDevice(pCreateInfo->physicalDevice),
13960  m_CurrentFrameIndex(0),
13961  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
13962  m_NextPoolId(0)
13964  ,m_pRecorder(VMA_NULL)
13965 #endif
13966 {
13967  if(VMA_DEBUG_DETECT_CORRUPTION)
13968  {
13969  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
13970  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
13971  }
13972 
13973  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
13974 
13975 #if !(VMA_DEDICATED_ALLOCATION)
13977  {
13978  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
13979  }
13980 #endif
13981 
13982  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
13983  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
13984  memset(&m_MemProps, 0, sizeof(m_MemProps));
13985 
13986  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
13987  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
13988 
13989  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
13990  {
13991  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
13992  }
13993 
13994  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
13995  {
13996  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
13997  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
13998  }
13999 
14000  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
14001 
14002  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
14003  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
14004 
14005  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
14006  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
14007  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
14008  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
14009 
14010  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
14011  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
14012 
14013  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
14014  {
14015  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
14016  {
14017  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
14018  if(limit != VK_WHOLE_SIZE)
14019  {
14020  m_HeapSizeLimit[heapIndex] = limit;
14021  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
14022  {
14023  m_MemProps.memoryHeaps[heapIndex].size = limit;
14024  }
14025  }
14026  }
14027  }
14028 
14029  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14030  {
14031  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
14032 
14033  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
14034  this,
14035  memTypeIndex,
14036  preferredBlockSize,
14037  0,
14038  SIZE_MAX,
14039  GetBufferImageGranularity(),
14040  pCreateInfo->frameInUseCount,
14041  false, // isCustomPool
14042  false, // explicitBlockSize
14043  false); // linearAlgorithm
14044  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
14045  // becase minBlockCount is 0.
14046  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
14047 
14048  }
14049 }
14050 
14051 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
14052 {
14053  VkResult res = VK_SUCCESS;
14054 
14055  if(pCreateInfo->pRecordSettings != VMA_NULL &&
14056  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
14057  {
14058 #if VMA_RECORDING_ENABLED
14059  m_pRecorder = vma_new(this, VmaRecorder)();
14060  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
14061  if(res != VK_SUCCESS)
14062  {
14063  return res;
14064  }
14065  m_pRecorder->WriteConfiguration(
14066  m_PhysicalDeviceProperties,
14067  m_MemProps,
14068  m_UseKhrDedicatedAllocation);
14069  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
14070 #else
14071  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
14072  return VK_ERROR_FEATURE_NOT_PRESENT;
14073 #endif
14074  }
14075 
14076  return res;
14077 }
14078 
14079 VmaAllocator_T::~VmaAllocator_T()
14080 {
14081 #if VMA_RECORDING_ENABLED
14082  if(m_pRecorder != VMA_NULL)
14083  {
14084  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
14085  vma_delete(this, m_pRecorder);
14086  }
14087 #endif
14088 
14089  VMA_ASSERT(m_Pools.empty());
14090 
14091  for(size_t i = GetMemoryTypeCount(); i--; )
14092  {
14093  vma_delete(this, m_pDedicatedAllocations[i]);
14094  vma_delete(this, m_pBlockVectors[i]);
14095  }
14096 }
14097 
14098 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
14099 {
14100 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14101  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
14102  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
14103  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
14104  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
14105  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
14106  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
14107  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
14108  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
14109  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
14110  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
14111  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
14112  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
14113  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
14114  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
14115  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
14116  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
14117  m_VulkanFunctions.vkCmdCopyBuffer = &vkCmdCopyBuffer;
14118 #if VMA_DEDICATED_ALLOCATION
14119  if(m_UseKhrDedicatedAllocation)
14120  {
14121  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
14122  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
14123  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
14124  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
14125  }
14126 #endif // #if VMA_DEDICATED_ALLOCATION
14127 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14128 
14129 #define VMA_COPY_IF_NOT_NULL(funcName) \
14130  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
14131 
14132  if(pVulkanFunctions != VMA_NULL)
14133  {
14134  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
14135  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
14136  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
14137  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
14138  VMA_COPY_IF_NOT_NULL(vkMapMemory);
14139  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
14140  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
14141  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
14142  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
14143  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
14144  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
14145  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
14146  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
14147  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
14148  VMA_COPY_IF_NOT_NULL(vkCreateImage);
14149  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
14150  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
14151 #if VMA_DEDICATED_ALLOCATION
14152  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
14153  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
14154 #endif
14155  }
14156 
14157 #undef VMA_COPY_IF_NOT_NULL
14158 
14159  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
14160  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
14161  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
14162  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
14163  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
14164  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
14165  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
14166  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
14167  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
14168  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
14169  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
14170  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
14171  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
14172  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
14173  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
14174  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
14175  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
14176  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
14177  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
14178 #if VMA_DEDICATED_ALLOCATION
14179  if(m_UseKhrDedicatedAllocation)
14180  {
14181  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
14182  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
14183  }
14184 #endif
14185 }
14186 
14187 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
14188 {
14189  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14190  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
14191  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
14192  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
14193 }
14194 
14195 VkResult VmaAllocator_T::AllocateMemoryOfType(
14196  VkDeviceSize size,
14197  VkDeviceSize alignment,
14198  bool dedicatedAllocation,
14199  VkBuffer dedicatedBuffer,
14200  VkImage dedicatedImage,
14201  const VmaAllocationCreateInfo& createInfo,
14202  uint32_t memTypeIndex,
14203  VmaSuballocationType suballocType,
14204  size_t allocationCount,
14205  VmaAllocation* pAllocations)
14206 {
14207  VMA_ASSERT(pAllocations != VMA_NULL);
14208  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, vkMemReq.size);
14209 
14210  VmaAllocationCreateInfo finalCreateInfo = createInfo;
14211 
14212  // If memory type is not HOST_VISIBLE, disable MAPPED.
14213  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14214  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
14215  {
14216  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
14217  }
14218 
14219  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
14220  VMA_ASSERT(blockVector);
14221 
14222  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
14223  bool preferDedicatedMemory =
14224  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
14225  dedicatedAllocation ||
14226  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
14227  size > preferredBlockSize / 2;
14228 
14229  if(preferDedicatedMemory &&
14230  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
14231  finalCreateInfo.pool == VK_NULL_HANDLE)
14232  {
14234  }
14235 
14236  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
14237  {
14238  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14239  {
14240  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14241  }
14242  else
14243  {
14244  return AllocateDedicatedMemory(
14245  size,
14246  suballocType,
14247  memTypeIndex,
14248  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14249  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14250  finalCreateInfo.pUserData,
14251  dedicatedBuffer,
14252  dedicatedImage,
14253  allocationCount,
14254  pAllocations);
14255  }
14256  }
14257  else
14258  {
14259  VkResult res = blockVector->Allocate(
14260  VK_NULL_HANDLE, // hCurrentPool
14261  m_CurrentFrameIndex.load(),
14262  size,
14263  alignment,
14264  finalCreateInfo,
14265  suballocType,
14266  allocationCount,
14267  pAllocations);
14268  if(res == VK_SUCCESS)
14269  {
14270  return res;
14271  }
14272 
14273  // 5. Try dedicated memory.
14274  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14275  {
14276  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14277  }
14278  else
14279  {
14280  res = AllocateDedicatedMemory(
14281  size,
14282  suballocType,
14283  memTypeIndex,
14284  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14285  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14286  finalCreateInfo.pUserData,
14287  dedicatedBuffer,
14288  dedicatedImage,
14289  allocationCount,
14290  pAllocations);
14291  if(res == VK_SUCCESS)
14292  {
14293  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
14294  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
14295  return VK_SUCCESS;
14296  }
14297  else
14298  {
14299  // Everything failed: Return error code.
14300  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14301  return res;
14302  }
14303  }
14304  }
14305 }
14306 
14307 VkResult VmaAllocator_T::AllocateDedicatedMemory(
14308  VkDeviceSize size,
14309  VmaSuballocationType suballocType,
14310  uint32_t memTypeIndex,
14311  bool map,
14312  bool isUserDataString,
14313  void* pUserData,
14314  VkBuffer dedicatedBuffer,
14315  VkImage dedicatedImage,
14316  size_t allocationCount,
14317  VmaAllocation* pAllocations)
14318 {
14319  VMA_ASSERT(allocationCount > 0 && pAllocations);
14320 
14321  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
14322  allocInfo.memoryTypeIndex = memTypeIndex;
14323  allocInfo.allocationSize = size;
14324 
14325 #if VMA_DEDICATED_ALLOCATION
14326  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
14327  if(m_UseKhrDedicatedAllocation)
14328  {
14329  if(dedicatedBuffer != VK_NULL_HANDLE)
14330  {
14331  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
14332  dedicatedAllocInfo.buffer = dedicatedBuffer;
14333  allocInfo.pNext = &dedicatedAllocInfo;
14334  }
14335  else if(dedicatedImage != VK_NULL_HANDLE)
14336  {
14337  dedicatedAllocInfo.image = dedicatedImage;
14338  allocInfo.pNext = &dedicatedAllocInfo;
14339  }
14340  }
14341 #endif // #if VMA_DEDICATED_ALLOCATION
14342 
14343  size_t allocIndex;
14344  VkResult res;
14345  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14346  {
14347  res = AllocateDedicatedMemoryPage(
14348  size,
14349  suballocType,
14350  memTypeIndex,
14351  allocInfo,
14352  map,
14353  isUserDataString,
14354  pUserData,
14355  pAllocations + allocIndex);
14356  if(res != VK_SUCCESS)
14357  {
14358  break;
14359  }
14360  }
14361 
14362  if(res == VK_SUCCESS)
14363  {
14364  // Register them in m_pDedicatedAllocations.
14365  {
14366  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
14367  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
14368  VMA_ASSERT(pDedicatedAllocations);
14369  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14370  {
14371  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
14372  }
14373  }
14374 
14375  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
14376  }
14377  else
14378  {
14379  // Free all already created allocations.
14380  while(allocIndex--)
14381  {
14382  VmaAllocation currAlloc = pAllocations[allocIndex];
14383  VkDeviceMemory hMemory = currAlloc->GetMemory();
14384 
14385  /*
14386  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
14387  before vkFreeMemory.
14388 
14389  if(currAlloc->GetMappedData() != VMA_NULL)
14390  {
14391  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
14392  }
14393  */
14394 
14395  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
14396 
14397  currAlloc->SetUserData(this, VMA_NULL);
14398  vma_delete(this, currAlloc);
14399  }
14400 
14401  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
14402  }
14403 
14404  return res;
14405 }
14406 
14407 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
14408  VkDeviceSize size,
14409  VmaSuballocationType suballocType,
14410  uint32_t memTypeIndex,
14411  const VkMemoryAllocateInfo& allocInfo,
14412  bool map,
14413  bool isUserDataString,
14414  void* pUserData,
14415  VmaAllocation* pAllocation)
14416 {
14417  VkDeviceMemory hMemory = VK_NULL_HANDLE;
14418  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
14419  if(res < 0)
14420  {
14421  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14422  return res;
14423  }
14424 
14425  void* pMappedData = VMA_NULL;
14426  if(map)
14427  {
14428  res = (*m_VulkanFunctions.vkMapMemory)(
14429  m_hDevice,
14430  hMemory,
14431  0,
14432  VK_WHOLE_SIZE,
14433  0,
14434  &pMappedData);
14435  if(res < 0)
14436  {
14437  VMA_DEBUG_LOG(" vkMapMemory FAILED");
14438  FreeVulkanMemory(memTypeIndex, size, hMemory);
14439  return res;
14440  }
14441  }
14442 
14443  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
14444  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
14445  (*pAllocation)->SetUserData(this, pUserData);
14446  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14447  {
14448  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
14449  }
14450 
14451  return VK_SUCCESS;
14452 }
14453 
14454 void VmaAllocator_T::GetBufferMemoryRequirements(
14455  VkBuffer hBuffer,
14456  VkMemoryRequirements& memReq,
14457  bool& requiresDedicatedAllocation,
14458  bool& prefersDedicatedAllocation) const
14459 {
14460 #if VMA_DEDICATED_ALLOCATION
14461  if(m_UseKhrDedicatedAllocation)
14462  {
14463  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
14464  memReqInfo.buffer = hBuffer;
14465 
14466  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14467 
14468  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14469  memReq2.pNext = &memDedicatedReq;
14470 
14471  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14472 
14473  memReq = memReq2.memoryRequirements;
14474  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14475  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14476  }
14477  else
14478 #endif // #if VMA_DEDICATED_ALLOCATION
14479  {
14480  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
14481  requiresDedicatedAllocation = false;
14482  prefersDedicatedAllocation = false;
14483  }
14484 }
14485 
14486 void VmaAllocator_T::GetImageMemoryRequirements(
14487  VkImage hImage,
14488  VkMemoryRequirements& memReq,
14489  bool& requiresDedicatedAllocation,
14490  bool& prefersDedicatedAllocation) const
14491 {
14492 #if VMA_DEDICATED_ALLOCATION
14493  if(m_UseKhrDedicatedAllocation)
14494  {
14495  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
14496  memReqInfo.image = hImage;
14497 
14498  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14499 
14500  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14501  memReq2.pNext = &memDedicatedReq;
14502 
14503  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14504 
14505  memReq = memReq2.memoryRequirements;
14506  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14507  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14508  }
14509  else
14510 #endif // #if VMA_DEDICATED_ALLOCATION
14511  {
14512  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
14513  requiresDedicatedAllocation = false;
14514  prefersDedicatedAllocation = false;
14515  }
14516 }
14517 
14518 VkResult VmaAllocator_T::AllocateMemory(
14519  const VkMemoryRequirements& vkMemReq,
14520  bool requiresDedicatedAllocation,
14521  bool prefersDedicatedAllocation,
14522  VkBuffer dedicatedBuffer,
14523  VkImage dedicatedImage,
14524  const VmaAllocationCreateInfo& createInfo,
14525  VmaSuballocationType suballocType,
14526  size_t allocationCount,
14527  VmaAllocation* pAllocations)
14528 {
14529  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
14530 
14531  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
14532 
14533  if(vkMemReq.size == 0)
14534  {
14535  return VK_ERROR_VALIDATION_FAILED_EXT;
14536  }
14537  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
14538  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14539  {
14540  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
14541  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14542  }
14543  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14545  {
14546  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
14547  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14548  }
14549  if(requiresDedicatedAllocation)
14550  {
14551  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14552  {
14553  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
14554  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14555  }
14556  if(createInfo.pool != VK_NULL_HANDLE)
14557  {
14558  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
14559  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14560  }
14561  }
14562  if((createInfo.pool != VK_NULL_HANDLE) &&
14563  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
14564  {
14565  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
14566  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14567  }
14568 
14569  if(createInfo.pool != VK_NULL_HANDLE)
14570  {
14571  const VkDeviceSize alignmentForPool = VMA_MAX(
14572  vkMemReq.alignment,
14573  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
14574  return createInfo.pool->m_BlockVector.Allocate(
14575  createInfo.pool,
14576  m_CurrentFrameIndex.load(),
14577  vkMemReq.size,
14578  alignmentForPool,
14579  createInfo,
14580  suballocType,
14581  allocationCount,
14582  pAllocations);
14583  }
14584  else
14585  {
14586  // Bit mask of memory Vulkan types acceptable for this allocation.
14587  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
14588  uint32_t memTypeIndex = UINT32_MAX;
14589  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
14590  if(res == VK_SUCCESS)
14591  {
14592  VkDeviceSize alignmentForMemType = VMA_MAX(
14593  vkMemReq.alignment,
14594  GetMemoryTypeMinAlignment(memTypeIndex));
14595 
14596  res = AllocateMemoryOfType(
14597  vkMemReq.size,
14598  alignmentForMemType,
14599  requiresDedicatedAllocation || prefersDedicatedAllocation,
14600  dedicatedBuffer,
14601  dedicatedImage,
14602  createInfo,
14603  memTypeIndex,
14604  suballocType,
14605  allocationCount,
14606  pAllocations);
14607  // Succeeded on first try.
14608  if(res == VK_SUCCESS)
14609  {
14610  return res;
14611  }
14612  // Allocation from this memory type failed. Try other compatible memory types.
14613  else
14614  {
14615  for(;;)
14616  {
14617  // Remove old memTypeIndex from list of possibilities.
14618  memoryTypeBits &= ~(1u << memTypeIndex);
14619  // Find alternative memTypeIndex.
14620  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
14621  if(res == VK_SUCCESS)
14622  {
14623  alignmentForMemType = VMA_MAX(
14624  vkMemReq.alignment,
14625  GetMemoryTypeMinAlignment(memTypeIndex));
14626 
14627  res = AllocateMemoryOfType(
14628  vkMemReq.size,
14629  alignmentForMemType,
14630  requiresDedicatedAllocation || prefersDedicatedAllocation,
14631  dedicatedBuffer,
14632  dedicatedImage,
14633  createInfo,
14634  memTypeIndex,
14635  suballocType,
14636  allocationCount,
14637  pAllocations);
14638  // Allocation from this alternative memory type succeeded.
14639  if(res == VK_SUCCESS)
14640  {
14641  return res;
14642  }
14643  // else: Allocation from this memory type failed. Try next one - next loop iteration.
14644  }
14645  // No other matching memory type index could be found.
14646  else
14647  {
14648  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
14649  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14650  }
14651  }
14652  }
14653  }
14654  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
14655  else
14656  return res;
14657  }
14658 }
14659 
14660 void VmaAllocator_T::FreeMemory(
14661  size_t allocationCount,
14662  const VmaAllocation* pAllocations)
14663 {
14664  VMA_ASSERT(pAllocations);
14665 
14666  for(size_t allocIndex = allocationCount; allocIndex--; )
14667  {
14668  VmaAllocation allocation = pAllocations[allocIndex];
14669 
14670  if(allocation != VK_NULL_HANDLE)
14671  {
14672  if(TouchAllocation(allocation))
14673  {
14674  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14675  {
14676  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
14677  }
14678 
14679  switch(allocation->GetType())
14680  {
14681  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14682  {
14683  VmaBlockVector* pBlockVector = VMA_NULL;
14684  VmaPool hPool = allocation->GetPool();
14685  if(hPool != VK_NULL_HANDLE)
14686  {
14687  pBlockVector = &hPool->m_BlockVector;
14688  }
14689  else
14690  {
14691  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
14692  pBlockVector = m_pBlockVectors[memTypeIndex];
14693  }
14694  pBlockVector->Free(allocation);
14695  }
14696  break;
14697  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14698  FreeDedicatedMemory(allocation);
14699  break;
14700  default:
14701  VMA_ASSERT(0);
14702  }
14703  }
14704 
14705  allocation->SetUserData(this, VMA_NULL);
14706  vma_delete(this, allocation);
14707  }
14708  }
14709 }
14710 
14711 VkResult VmaAllocator_T::ResizeAllocation(
14712  const VmaAllocation alloc,
14713  VkDeviceSize newSize)
14714 {
14715  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
14716  {
14717  return VK_ERROR_VALIDATION_FAILED_EXT;
14718  }
14719  if(newSize == alloc->GetSize())
14720  {
14721  return VK_SUCCESS;
14722  }
14723 
14724  switch(alloc->GetType())
14725  {
14726  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14727  return VK_ERROR_FEATURE_NOT_PRESENT;
14728  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14729  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
14730  {
14731  alloc->ChangeSize(newSize);
14732  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
14733  return VK_SUCCESS;
14734  }
14735  else
14736  {
14737  return VK_ERROR_OUT_OF_POOL_MEMORY;
14738  }
14739  default:
14740  VMA_ASSERT(0);
14741  return VK_ERROR_VALIDATION_FAILED_EXT;
14742  }
14743 }
14744 
14745 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
14746 {
14747  // Initialize.
14748  InitStatInfo(pStats->total);
14749  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
14750  InitStatInfo(pStats->memoryType[i]);
14751  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
14752  InitStatInfo(pStats->memoryHeap[i]);
14753 
14754  // Process default pools.
14755  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14756  {
14757  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
14758  VMA_ASSERT(pBlockVector);
14759  pBlockVector->AddStats(pStats);
14760  }
14761 
14762  // Process custom pools.
14763  {
14764  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
14765  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
14766  {
14767  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
14768  }
14769  }
14770 
14771  // Process dedicated allocations.
14772  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14773  {
14774  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14775  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
14776  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
14777  VMA_ASSERT(pDedicatedAllocVector);
14778  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
14779  {
14780  VmaStatInfo allocationStatInfo;
14781  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
14782  VmaAddStatInfo(pStats->total, allocationStatInfo);
14783  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
14784  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
14785  }
14786  }
14787 
14788  // Postprocess.
14789  VmaPostprocessCalcStatInfo(pStats->total);
14790  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
14791  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
14792  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
14793  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
14794 }
14795 
14796 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
14797 
14798 VkResult VmaAllocator_T::DefragmentationBegin(
14799  const VmaDefragmentationInfo2& info,
14800  VmaDefragmentationStats* pStats,
14801  VmaDefragmentationContext* pContext)
14802 {
14803  if(info.pAllocationsChanged != VMA_NULL)
14804  {
14805  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
14806  }
14807 
14808  *pContext = vma_new(this, VmaDefragmentationContext_T)(
14809  this, m_CurrentFrameIndex.load(), info.flags, pStats);
14810 
14811  (*pContext)->AddPools(info.poolCount, info.pPools);
14812  (*pContext)->AddAllocations(
14814 
14815  VkResult res = (*pContext)->Defragment(
14818  info.commandBuffer, pStats);
14819 
14820  if(res != VK_NOT_READY)
14821  {
14822  vma_delete(this, *pContext);
14823  *pContext = VMA_NULL;
14824  }
14825 
14826  return res;
14827 }
14828 
14829 VkResult VmaAllocator_T::DefragmentationEnd(
14830  VmaDefragmentationContext context)
14831 {
14832  vma_delete(this, context);
14833  return VK_SUCCESS;
14834 }
14835 
14836 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
14837 {
14838  if(hAllocation->CanBecomeLost())
14839  {
14840  /*
14841  Warning: This is a carefully designed algorithm.
14842  Do not modify unless you really know what you're doing :)
14843  */
14844  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14845  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14846  for(;;)
14847  {
14848  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
14849  {
14850  pAllocationInfo->memoryType = UINT32_MAX;
14851  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
14852  pAllocationInfo->offset = 0;
14853  pAllocationInfo->size = hAllocation->GetSize();
14854  pAllocationInfo->pMappedData = VMA_NULL;
14855  pAllocationInfo->pUserData = hAllocation->GetUserData();
14856  return;
14857  }
14858  else if(localLastUseFrameIndex == localCurrFrameIndex)
14859  {
14860  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
14861  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
14862  pAllocationInfo->offset = hAllocation->GetOffset();
14863  pAllocationInfo->size = hAllocation->GetSize();
14864  pAllocationInfo->pMappedData = VMA_NULL;
14865  pAllocationInfo->pUserData = hAllocation->GetUserData();
14866  return;
14867  }
14868  else // Last use time earlier than current time.
14869  {
14870  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14871  {
14872  localLastUseFrameIndex = localCurrFrameIndex;
14873  }
14874  }
14875  }
14876  }
14877  else
14878  {
14879 #if VMA_STATS_STRING_ENABLED
14880  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14881  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14882  for(;;)
14883  {
14884  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
14885  if(localLastUseFrameIndex == localCurrFrameIndex)
14886  {
14887  break;
14888  }
14889  else // Last use time earlier than current time.
14890  {
14891  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14892  {
14893  localLastUseFrameIndex = localCurrFrameIndex;
14894  }
14895  }
14896  }
14897 #endif
14898 
14899  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
14900  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
14901  pAllocationInfo->offset = hAllocation->GetOffset();
14902  pAllocationInfo->size = hAllocation->GetSize();
14903  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
14904  pAllocationInfo->pUserData = hAllocation->GetUserData();
14905  }
14906 }
14907 
14908 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
14909 {
14910  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
14911  if(hAllocation->CanBecomeLost())
14912  {
14913  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14914  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14915  for(;;)
14916  {
14917  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
14918  {
14919  return false;
14920  }
14921  else if(localLastUseFrameIndex == localCurrFrameIndex)
14922  {
14923  return true;
14924  }
14925  else // Last use time earlier than current time.
14926  {
14927  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14928  {
14929  localLastUseFrameIndex = localCurrFrameIndex;
14930  }
14931  }
14932  }
14933  }
14934  else
14935  {
14936 #if VMA_STATS_STRING_ENABLED
14937  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14938  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14939  for(;;)
14940  {
14941  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
14942  if(localLastUseFrameIndex == localCurrFrameIndex)
14943  {
14944  break;
14945  }
14946  else // Last use time earlier than current time.
14947  {
14948  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14949  {
14950  localLastUseFrameIndex = localCurrFrameIndex;
14951  }
14952  }
14953  }
14954 #endif
14955 
14956  return true;
14957  }
14958 }
14959 
14960 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
14961 {
14962  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
14963 
14964  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
14965 
14966  if(newCreateInfo.maxBlockCount == 0)
14967  {
14968  newCreateInfo.maxBlockCount = SIZE_MAX;
14969  }
14970  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
14971  {
14972  return VK_ERROR_INITIALIZATION_FAILED;
14973  }
14974 
14975  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
14976 
14977  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
14978 
14979  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
14980  if(res != VK_SUCCESS)
14981  {
14982  vma_delete(this, *pPool);
14983  *pPool = VMA_NULL;
14984  return res;
14985  }
14986 
14987  // Add to m_Pools.
14988  {
14989  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
14990  (*pPool)->SetId(m_NextPoolId++);
14991  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
14992  }
14993 
14994  return VK_SUCCESS;
14995 }
14996 
14997 void VmaAllocator_T::DestroyPool(VmaPool pool)
14998 {
14999  // Remove from m_Pools.
15000  {
15001  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15002  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
15003  VMA_ASSERT(success && "Pool not found in Allocator.");
15004  }
15005 
15006  vma_delete(this, pool);
15007 }
15008 
15009 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
15010 {
15011  pool->m_BlockVector.GetPoolStats(pPoolStats);
15012 }
15013 
15014 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
15015 {
15016  m_CurrentFrameIndex.store(frameIndex);
15017 }
15018 
15019 void VmaAllocator_T::MakePoolAllocationsLost(
15020  VmaPool hPool,
15021  size_t* pLostAllocationCount)
15022 {
15023  hPool->m_BlockVector.MakePoolAllocationsLost(
15024  m_CurrentFrameIndex.load(),
15025  pLostAllocationCount);
15026 }
15027 
15028 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
15029 {
15030  return hPool->m_BlockVector.CheckCorruption();
15031 }
15032 
15033 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
15034 {
15035  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
15036 
15037  // Process default pools.
15038  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15039  {
15040  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
15041  {
15042  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
15043  VMA_ASSERT(pBlockVector);
15044  VkResult localRes = pBlockVector->CheckCorruption();
15045  switch(localRes)
15046  {
15047  case VK_ERROR_FEATURE_NOT_PRESENT:
15048  break;
15049  case VK_SUCCESS:
15050  finalRes = VK_SUCCESS;
15051  break;
15052  default:
15053  return localRes;
15054  }
15055  }
15056  }
15057 
15058  // Process custom pools.
15059  {
15060  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15061  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
15062  {
15063  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
15064  {
15065  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
15066  switch(localRes)
15067  {
15068  case VK_ERROR_FEATURE_NOT_PRESENT:
15069  break;
15070  case VK_SUCCESS:
15071  finalRes = VK_SUCCESS;
15072  break;
15073  default:
15074  return localRes;
15075  }
15076  }
15077  }
15078  }
15079 
15080  return finalRes;
15081 }
15082 
15083 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
15084 {
15085  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
15086  (*pAllocation)->InitLost();
15087 }
15088 
15089 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
15090 {
15091  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
15092 
15093  VkResult res;
15094  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
15095  {
15096  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
15097  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
15098  {
15099  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15100  if(res == VK_SUCCESS)
15101  {
15102  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
15103  }
15104  }
15105  else
15106  {
15107  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
15108  }
15109  }
15110  else
15111  {
15112  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15113  }
15114 
15115  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
15116  {
15117  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
15118  }
15119 
15120  return res;
15121 }
15122 
15123 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
15124 {
15125  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
15126  {
15127  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
15128  }
15129 
15130  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
15131 
15132  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
15133  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
15134  {
15135  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
15136  m_HeapSizeLimit[heapIndex] += size;
15137  }
15138 }
15139 
15140 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
15141 {
15142  if(hAllocation->CanBecomeLost())
15143  {
15144  return VK_ERROR_MEMORY_MAP_FAILED;
15145  }
15146 
15147  switch(hAllocation->GetType())
15148  {
15149  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15150  {
15151  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15152  char *pBytes = VMA_NULL;
15153  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
15154  if(res == VK_SUCCESS)
15155  {
15156  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
15157  hAllocation->BlockAllocMap();
15158  }
15159  return res;
15160  }
15161  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15162  return hAllocation->DedicatedAllocMap(this, ppData);
15163  default:
15164  VMA_ASSERT(0);
15165  return VK_ERROR_MEMORY_MAP_FAILED;
15166  }
15167 }
15168 
15169 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
15170 {
15171  switch(hAllocation->GetType())
15172  {
15173  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15174  {
15175  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15176  hAllocation->BlockAllocUnmap();
15177  pBlock->Unmap(this, 1);
15178  }
15179  break;
15180  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15181  hAllocation->DedicatedAllocUnmap(this);
15182  break;
15183  default:
15184  VMA_ASSERT(0);
15185  }
15186 }
15187 
15188 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
15189 {
15190  VkResult res = VK_SUCCESS;
15191  switch(hAllocation->GetType())
15192  {
15193  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15194  res = GetVulkanFunctions().vkBindBufferMemory(
15195  m_hDevice,
15196  hBuffer,
15197  hAllocation->GetMemory(),
15198  0); //memoryOffset
15199  break;
15200  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15201  {
15202  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
15203  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
15204  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
15205  break;
15206  }
15207  default:
15208  VMA_ASSERT(0);
15209  }
15210  return res;
15211 }
15212 
15213 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
15214 {
15215  VkResult res = VK_SUCCESS;
15216  switch(hAllocation->GetType())
15217  {
15218  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15219  res = GetVulkanFunctions().vkBindImageMemory(
15220  m_hDevice,
15221  hImage,
15222  hAllocation->GetMemory(),
15223  0); //memoryOffset
15224  break;
15225  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15226  {
15227  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
15228  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
15229  res = pBlock->BindImageMemory(this, hAllocation, hImage);
15230  break;
15231  }
15232  default:
15233  VMA_ASSERT(0);
15234  }
15235  return res;
15236 }
15237 
15238 void VmaAllocator_T::FlushOrInvalidateAllocation(
15239  VmaAllocation hAllocation,
15240  VkDeviceSize offset, VkDeviceSize size,
15241  VMA_CACHE_OPERATION op)
15242 {
15243  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
15244  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
15245  {
15246  const VkDeviceSize allocationSize = hAllocation->GetSize();
15247  VMA_ASSERT(offset <= allocationSize);
15248 
15249  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
15250 
15251  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
15252  memRange.memory = hAllocation->GetMemory();
15253 
15254  switch(hAllocation->GetType())
15255  {
15256  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15257  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15258  if(size == VK_WHOLE_SIZE)
15259  {
15260  memRange.size = allocationSize - memRange.offset;
15261  }
15262  else
15263  {
15264  VMA_ASSERT(offset + size <= allocationSize);
15265  memRange.size = VMA_MIN(
15266  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
15267  allocationSize - memRange.offset);
15268  }
15269  break;
15270 
15271  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15272  {
15273  // 1. Still within this allocation.
15274  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15275  if(size == VK_WHOLE_SIZE)
15276  {
15277  size = allocationSize - offset;
15278  }
15279  else
15280  {
15281  VMA_ASSERT(offset + size <= allocationSize);
15282  }
15283  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
15284 
15285  // 2. Adjust to whole block.
15286  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
15287  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
15288  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
15289  memRange.offset += allocationOffset;
15290  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
15291 
15292  break;
15293  }
15294 
15295  default:
15296  VMA_ASSERT(0);
15297  }
15298 
15299  switch(op)
15300  {
15301  case VMA_CACHE_FLUSH:
15302  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
15303  break;
15304  case VMA_CACHE_INVALIDATE:
15305  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
15306  break;
15307  default:
15308  VMA_ASSERT(0);
15309  }
15310  }
15311  // else: Just ignore this call.
15312 }
15313 
15314 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
15315 {
15316  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
15317 
15318  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
15319  {
15320  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15321  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
15322  VMA_ASSERT(pDedicatedAllocations);
15323  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
15324  VMA_ASSERT(success);
15325  }
15326 
15327  VkDeviceMemory hMemory = allocation->GetMemory();
15328 
15329  /*
15330  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
15331  before vkFreeMemory.
15332 
15333  if(allocation->GetMappedData() != VMA_NULL)
15334  {
15335  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
15336  }
15337  */
15338 
15339  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
15340 
15341  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
15342 }
15343 
15344 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
15345 {
15346  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
15347  !hAllocation->CanBecomeLost() &&
15348  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15349  {
15350  void* pData = VMA_NULL;
15351  VkResult res = Map(hAllocation, &pData);
15352  if(res == VK_SUCCESS)
15353  {
15354  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
15355  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
15356  Unmap(hAllocation);
15357  }
15358  else
15359  {
15360  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
15361  }
15362  }
15363 }
15364 
15365 #if VMA_STATS_STRING_ENABLED
15366 
15367 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
15368 {
15369  bool dedicatedAllocationsStarted = false;
15370  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15371  {
15372  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15373  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
15374  VMA_ASSERT(pDedicatedAllocVector);
15375  if(pDedicatedAllocVector->empty() == false)
15376  {
15377  if(dedicatedAllocationsStarted == false)
15378  {
15379  dedicatedAllocationsStarted = true;
15380  json.WriteString("DedicatedAllocations");
15381  json.BeginObject();
15382  }
15383 
15384  json.BeginString("Type ");
15385  json.ContinueString(memTypeIndex);
15386  json.EndString();
15387 
15388  json.BeginArray();
15389 
15390  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
15391  {
15392  json.BeginObject(true);
15393  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
15394  hAlloc->PrintParameters(json);
15395  json.EndObject();
15396  }
15397 
15398  json.EndArray();
15399  }
15400  }
15401  if(dedicatedAllocationsStarted)
15402  {
15403  json.EndObject();
15404  }
15405 
15406  {
15407  bool allocationsStarted = false;
15408  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15409  {
15410  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
15411  {
15412  if(allocationsStarted == false)
15413  {
15414  allocationsStarted = true;
15415  json.WriteString("DefaultPools");
15416  json.BeginObject();
15417  }
15418 
15419  json.BeginString("Type ");
15420  json.ContinueString(memTypeIndex);
15421  json.EndString();
15422 
15423  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
15424  }
15425  }
15426  if(allocationsStarted)
15427  {
15428  json.EndObject();
15429  }
15430  }
15431 
15432  // Custom pools
15433  {
15434  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15435  const size_t poolCount = m_Pools.size();
15436  if(poolCount > 0)
15437  {
15438  json.WriteString("Pools");
15439  json.BeginObject();
15440  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
15441  {
15442  json.BeginString();
15443  json.ContinueString(m_Pools[poolIndex]->GetId());
15444  json.EndString();
15445 
15446  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
15447  }
15448  json.EndObject();
15449  }
15450  }
15451 }
15452 
15453 #endif // #if VMA_STATS_STRING_ENABLED
15454 
15456 // Public interface
15457 
15458 VkResult vmaCreateAllocator(
15459  const VmaAllocatorCreateInfo* pCreateInfo,
15460  VmaAllocator* pAllocator)
15461 {
15462  VMA_ASSERT(pCreateInfo && pAllocator);
15463  VMA_DEBUG_LOG("vmaCreateAllocator");
15464  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
15465  return (*pAllocator)->Init(pCreateInfo);
15466 }
15467 
15468 void vmaDestroyAllocator(
15469  VmaAllocator allocator)
15470 {
15471  if(allocator != VK_NULL_HANDLE)
15472  {
15473  VMA_DEBUG_LOG("vmaDestroyAllocator");
15474  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
15475  vma_delete(&allocationCallbacks, allocator);
15476  }
15477 }
15478 
15480  VmaAllocator allocator,
15481  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
15482 {
15483  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
15484  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
15485 }
15486 
15488  VmaAllocator allocator,
15489  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
15490 {
15491  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
15492  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
15493 }
15494 
15496  VmaAllocator allocator,
15497  uint32_t memoryTypeIndex,
15498  VkMemoryPropertyFlags* pFlags)
15499 {
15500  VMA_ASSERT(allocator && pFlags);
15501  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
15502  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
15503 }
15504 
15506  VmaAllocator allocator,
15507  uint32_t frameIndex)
15508 {
15509  VMA_ASSERT(allocator);
15510  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
15511 
15512  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15513 
15514  allocator->SetCurrentFrameIndex(frameIndex);
15515 }
15516 
15517 void vmaCalculateStats(
15518  VmaAllocator allocator,
15519  VmaStats* pStats)
15520 {
15521  VMA_ASSERT(allocator && pStats);
15522  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15523  allocator->CalculateStats(pStats);
15524 }
15525 
15526 #if VMA_STATS_STRING_ENABLED
15527 
15528 void vmaBuildStatsString(
15529  VmaAllocator allocator,
15530  char** ppStatsString,
15531  VkBool32 detailedMap)
15532 {
15533  VMA_ASSERT(allocator && ppStatsString);
15534  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15535 
15536  VmaStringBuilder sb(allocator);
15537  {
15538  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
15539  json.BeginObject();
15540 
15541  VmaStats stats;
15542  allocator->CalculateStats(&stats);
15543 
15544  json.WriteString("Total");
15545  VmaPrintStatInfo(json, stats.total);
15546 
15547  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
15548  {
15549  json.BeginString("Heap ");
15550  json.ContinueString(heapIndex);
15551  json.EndString();
15552  json.BeginObject();
15553 
15554  json.WriteString("Size");
15555  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
15556 
15557  json.WriteString("Flags");
15558  json.BeginArray(true);
15559  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
15560  {
15561  json.WriteString("DEVICE_LOCAL");
15562  }
15563  json.EndArray();
15564 
15565  if(stats.memoryHeap[heapIndex].blockCount > 0)
15566  {
15567  json.WriteString("Stats");
15568  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
15569  }
15570 
15571  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
15572  {
15573  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
15574  {
15575  json.BeginString("Type ");
15576  json.ContinueString(typeIndex);
15577  json.EndString();
15578 
15579  json.BeginObject();
15580 
15581  json.WriteString("Flags");
15582  json.BeginArray(true);
15583  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
15584  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
15585  {
15586  json.WriteString("DEVICE_LOCAL");
15587  }
15588  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15589  {
15590  json.WriteString("HOST_VISIBLE");
15591  }
15592  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
15593  {
15594  json.WriteString("HOST_COHERENT");
15595  }
15596  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
15597  {
15598  json.WriteString("HOST_CACHED");
15599  }
15600  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
15601  {
15602  json.WriteString("LAZILY_ALLOCATED");
15603  }
15604  json.EndArray();
15605 
15606  if(stats.memoryType[typeIndex].blockCount > 0)
15607  {
15608  json.WriteString("Stats");
15609  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
15610  }
15611 
15612  json.EndObject();
15613  }
15614  }
15615 
15616  json.EndObject();
15617  }
15618  if(detailedMap == VK_TRUE)
15619  {
15620  allocator->PrintDetailedMap(json);
15621  }
15622 
15623  json.EndObject();
15624  }
15625 
15626  const size_t len = sb.GetLength();
15627  char* const pChars = vma_new_array(allocator, char, len + 1);
15628  if(len > 0)
15629  {
15630  memcpy(pChars, sb.GetData(), len);
15631  }
15632  pChars[len] = '\0';
15633  *ppStatsString = pChars;
15634 }
15635 
15636 void vmaFreeStatsString(
15637  VmaAllocator allocator,
15638  char* pStatsString)
15639 {
15640  if(pStatsString != VMA_NULL)
15641  {
15642  VMA_ASSERT(allocator);
15643  size_t len = strlen(pStatsString);
15644  vma_delete_array(allocator, pStatsString, len + 1);
15645  }
15646 }
15647 
15648 #endif // #if VMA_STATS_STRING_ENABLED
15649 
15650 /*
15651 This function is not protected by any mutex because it just reads immutable data.
15652 */
15653 VkResult vmaFindMemoryTypeIndex(
15654  VmaAllocator allocator,
15655  uint32_t memoryTypeBits,
15656  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15657  uint32_t* pMemoryTypeIndex)
15658 {
15659  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15660  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15661  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15662 
15663  if(pAllocationCreateInfo->memoryTypeBits != 0)
15664  {
15665  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
15666  }
15667 
15668  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
15669  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
15670 
15671  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
15672  if(mapped)
15673  {
15674  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15675  }
15676 
15677  // Convert usage to requiredFlags and preferredFlags.
15678  switch(pAllocationCreateInfo->usage)
15679  {
15681  break;
15683  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15684  {
15685  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15686  }
15687  break;
15689  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
15690  break;
15692  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15693  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15694  {
15695  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15696  }
15697  break;
15699  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15700  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
15701  break;
15702  default:
15703  break;
15704  }
15705 
15706  *pMemoryTypeIndex = UINT32_MAX;
15707  uint32_t minCost = UINT32_MAX;
15708  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
15709  memTypeIndex < allocator->GetMemoryTypeCount();
15710  ++memTypeIndex, memTypeBit <<= 1)
15711  {
15712  // This memory type is acceptable according to memoryTypeBits bitmask.
15713  if((memTypeBit & memoryTypeBits) != 0)
15714  {
15715  const VkMemoryPropertyFlags currFlags =
15716  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
15717  // This memory type contains requiredFlags.
15718  if((requiredFlags & ~currFlags) == 0)
15719  {
15720  // Calculate cost as number of bits from preferredFlags not present in this memory type.
15721  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
15722  // Remember memory type with lowest cost.
15723  if(currCost < minCost)
15724  {
15725  *pMemoryTypeIndex = memTypeIndex;
15726  if(currCost == 0)
15727  {
15728  return VK_SUCCESS;
15729  }
15730  minCost = currCost;
15731  }
15732  }
15733  }
15734  }
15735  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
15736 }
15737 
15739  VmaAllocator allocator,
15740  const VkBufferCreateInfo* pBufferCreateInfo,
15741  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15742  uint32_t* pMemoryTypeIndex)
15743 {
15744  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15745  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
15746  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15747  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15748 
15749  const VkDevice hDev = allocator->m_hDevice;
15750  VkBuffer hBuffer = VK_NULL_HANDLE;
15751  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
15752  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
15753  if(res == VK_SUCCESS)
15754  {
15755  VkMemoryRequirements memReq = {};
15756  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
15757  hDev, hBuffer, &memReq);
15758 
15759  res = vmaFindMemoryTypeIndex(
15760  allocator,
15761  memReq.memoryTypeBits,
15762  pAllocationCreateInfo,
15763  pMemoryTypeIndex);
15764 
15765  allocator->GetVulkanFunctions().vkDestroyBuffer(
15766  hDev, hBuffer, allocator->GetAllocationCallbacks());
15767  }
15768  return res;
15769 }
15770 
15772  VmaAllocator allocator,
15773  const VkImageCreateInfo* pImageCreateInfo,
15774  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15775  uint32_t* pMemoryTypeIndex)
15776 {
15777  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15778  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
15779  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15780  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15781 
15782  const VkDevice hDev = allocator->m_hDevice;
15783  VkImage hImage = VK_NULL_HANDLE;
15784  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
15785  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
15786  if(res == VK_SUCCESS)
15787  {
15788  VkMemoryRequirements memReq = {};
15789  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
15790  hDev, hImage, &memReq);
15791 
15792  res = vmaFindMemoryTypeIndex(
15793  allocator,
15794  memReq.memoryTypeBits,
15795  pAllocationCreateInfo,
15796  pMemoryTypeIndex);
15797 
15798  allocator->GetVulkanFunctions().vkDestroyImage(
15799  hDev, hImage, allocator->GetAllocationCallbacks());
15800  }
15801  return res;
15802 }
15803 
15804 VkResult vmaCreatePool(
15805  VmaAllocator allocator,
15806  const VmaPoolCreateInfo* pCreateInfo,
15807  VmaPool* pPool)
15808 {
15809  VMA_ASSERT(allocator && pCreateInfo && pPool);
15810 
15811  VMA_DEBUG_LOG("vmaCreatePool");
15812 
15813  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15814 
15815  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
15816 
15817 #if VMA_RECORDING_ENABLED
15818  if(allocator->GetRecorder() != VMA_NULL)
15819  {
15820  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
15821  }
15822 #endif
15823 
15824  return res;
15825 }
15826 
15827 void vmaDestroyPool(
15828  VmaAllocator allocator,
15829  VmaPool pool)
15830 {
15831  VMA_ASSERT(allocator);
15832 
15833  if(pool == VK_NULL_HANDLE)
15834  {
15835  return;
15836  }
15837 
15838  VMA_DEBUG_LOG("vmaDestroyPool");
15839 
15840  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15841 
15842 #if VMA_RECORDING_ENABLED
15843  if(allocator->GetRecorder() != VMA_NULL)
15844  {
15845  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
15846  }
15847 #endif
15848 
15849  allocator->DestroyPool(pool);
15850 }
15851 
15852 void vmaGetPoolStats(
15853  VmaAllocator allocator,
15854  VmaPool pool,
15855  VmaPoolStats* pPoolStats)
15856 {
15857  VMA_ASSERT(allocator && pool && pPoolStats);
15858 
15859  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15860 
15861  allocator->GetPoolStats(pool, pPoolStats);
15862 }
15863 
15865  VmaAllocator allocator,
15866  VmaPool pool,
15867  size_t* pLostAllocationCount)
15868 {
15869  VMA_ASSERT(allocator && pool);
15870 
15871  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15872 
15873 #if VMA_RECORDING_ENABLED
15874  if(allocator->GetRecorder() != VMA_NULL)
15875  {
15876  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
15877  }
15878 #endif
15879 
15880  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
15881 }
15882 
15883 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
15884 {
15885  VMA_ASSERT(allocator && pool);
15886 
15887  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15888 
15889  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
15890 
15891  return allocator->CheckPoolCorruption(pool);
15892 }
15893 
15894 VkResult vmaAllocateMemory(
15895  VmaAllocator allocator,
15896  const VkMemoryRequirements* pVkMemoryRequirements,
15897  const VmaAllocationCreateInfo* pCreateInfo,
15898  VmaAllocation* pAllocation,
15899  VmaAllocationInfo* pAllocationInfo)
15900 {
15901  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
15902 
15903  VMA_DEBUG_LOG("vmaAllocateMemory");
15904 
15905  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15906 
15907  VkResult result = allocator->AllocateMemory(
15908  *pVkMemoryRequirements,
15909  false, // requiresDedicatedAllocation
15910  false, // prefersDedicatedAllocation
15911  VK_NULL_HANDLE, // dedicatedBuffer
15912  VK_NULL_HANDLE, // dedicatedImage
15913  *pCreateInfo,
15914  VMA_SUBALLOCATION_TYPE_UNKNOWN,
15915  1, // allocationCount
15916  pAllocation);
15917 
15918 #if VMA_RECORDING_ENABLED
15919  if(allocator->GetRecorder() != VMA_NULL)
15920  {
15921  allocator->GetRecorder()->RecordAllocateMemory(
15922  allocator->GetCurrentFrameIndex(),
15923  *pVkMemoryRequirements,
15924  *pCreateInfo,
15925  *pAllocation);
15926  }
15927 #endif
15928 
15929  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
15930  {
15931  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
15932  }
15933 
15934  return result;
15935 }
15936 
15937 VkResult vmaAllocateMemoryPages(
15938  VmaAllocator allocator,
15939  const VkMemoryRequirements* pVkMemoryRequirements,
15940  const VmaAllocationCreateInfo* pCreateInfo,
15941  size_t allocationCount,
15942  VmaAllocation* pAllocations,
15943  VmaAllocationInfo* pAllocationInfo)
15944 {
15945  if(allocationCount == 0)
15946  {
15947  return VK_SUCCESS;
15948  }
15949 
15950  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
15951 
15952  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
15953 
15954  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15955 
15956  VkResult result = allocator->AllocateMemory(
15957  *pVkMemoryRequirements,
15958  false, // requiresDedicatedAllocation
15959  false, // prefersDedicatedAllocation
15960  VK_NULL_HANDLE, // dedicatedBuffer
15961  VK_NULL_HANDLE, // dedicatedImage
15962  *pCreateInfo,
15963  VMA_SUBALLOCATION_TYPE_UNKNOWN,
15964  allocationCount,
15965  pAllocations);
15966 
15967 #if VMA_RECORDING_ENABLED
15968  if(allocator->GetRecorder() != VMA_NULL)
15969  {
15970  allocator->GetRecorder()->RecordAllocateMemoryPages(
15971  allocator->GetCurrentFrameIndex(),
15972  *pVkMemoryRequirements,
15973  *pCreateInfo,
15974  (uint64_t)allocationCount,
15975  pAllocations);
15976  }
15977 #endif
15978 
15979  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
15980  {
15981  for(size_t i = 0; i < allocationCount; ++i)
15982  {
15983  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
15984  }
15985  }
15986 
15987  return result;
15988 }
15989 
15991  VmaAllocator allocator,
15992  VkBuffer buffer,
15993  const VmaAllocationCreateInfo* pCreateInfo,
15994  VmaAllocation* pAllocation,
15995  VmaAllocationInfo* pAllocationInfo)
15996 {
15997  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
15998 
15999  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
16000 
16001  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16002 
16003  VkMemoryRequirements vkMemReq = {};
16004  bool requiresDedicatedAllocation = false;
16005  bool prefersDedicatedAllocation = false;
16006  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
16007  requiresDedicatedAllocation,
16008  prefersDedicatedAllocation);
16009 
16010  VkResult result = allocator->AllocateMemory(
16011  vkMemReq,
16012  requiresDedicatedAllocation,
16013  prefersDedicatedAllocation,
16014  buffer, // dedicatedBuffer
16015  VK_NULL_HANDLE, // dedicatedImage
16016  *pCreateInfo,
16017  VMA_SUBALLOCATION_TYPE_BUFFER,
16018  1, // allocationCount
16019  pAllocation);
16020 
16021 #if VMA_RECORDING_ENABLED
16022  if(allocator->GetRecorder() != VMA_NULL)
16023  {
16024  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
16025  allocator->GetCurrentFrameIndex(),
16026  vkMemReq,
16027  requiresDedicatedAllocation,
16028  prefersDedicatedAllocation,
16029  *pCreateInfo,
16030  *pAllocation);
16031  }
16032 #endif
16033 
16034  if(pAllocationInfo && result == VK_SUCCESS)
16035  {
16036  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16037  }
16038 
16039  return result;
16040 }
16041 
16042 VkResult vmaAllocateMemoryForImage(
16043  VmaAllocator allocator,
16044  VkImage image,
16045  const VmaAllocationCreateInfo* pCreateInfo,
16046  VmaAllocation* pAllocation,
16047  VmaAllocationInfo* pAllocationInfo)
16048 {
16049  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16050 
16051  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
16052 
16053  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16054 
16055  VkMemoryRequirements vkMemReq = {};
16056  bool requiresDedicatedAllocation = false;
16057  bool prefersDedicatedAllocation = false;
16058  allocator->GetImageMemoryRequirements(image, vkMemReq,
16059  requiresDedicatedAllocation, prefersDedicatedAllocation);
16060 
16061  VkResult result = allocator->AllocateMemory(
16062  vkMemReq,
16063  requiresDedicatedAllocation,
16064  prefersDedicatedAllocation,
16065  VK_NULL_HANDLE, // dedicatedBuffer
16066  image, // dedicatedImage
16067  *pCreateInfo,
16068  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
16069  1, // allocationCount
16070  pAllocation);
16071 
16072 #if VMA_RECORDING_ENABLED
16073  if(allocator->GetRecorder() != VMA_NULL)
16074  {
16075  allocator->GetRecorder()->RecordAllocateMemoryForImage(
16076  allocator->GetCurrentFrameIndex(),
16077  vkMemReq,
16078  requiresDedicatedAllocation,
16079  prefersDedicatedAllocation,
16080  *pCreateInfo,
16081  *pAllocation);
16082  }
16083 #endif
16084 
16085  if(pAllocationInfo && result == VK_SUCCESS)
16086  {
16087  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16088  }
16089 
16090  return result;
16091 }
16092 
16093 void vmaFreeMemory(
16094  VmaAllocator allocator,
16095  VmaAllocation allocation)
16096 {
16097  VMA_ASSERT(allocator);
16098 
16099  if(allocation == VK_NULL_HANDLE)
16100  {
16101  return;
16102  }
16103 
16104  VMA_DEBUG_LOG("vmaFreeMemory");
16105 
16106  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16107 
16108 #if VMA_RECORDING_ENABLED
16109  if(allocator->GetRecorder() != VMA_NULL)
16110  {
16111  allocator->GetRecorder()->RecordFreeMemory(
16112  allocator->GetCurrentFrameIndex(),
16113  allocation);
16114  }
16115 #endif
16116 
16117  allocator->FreeMemory(
16118  1, // allocationCount
16119  &allocation);
16120 }
16121 
16122 void vmaFreeMemoryPages(
16123  VmaAllocator allocator,
16124  size_t allocationCount,
16125  VmaAllocation* pAllocations)
16126 {
16127  if(allocationCount == 0)
16128  {
16129  return;
16130  }
16131 
16132  VMA_ASSERT(allocator);
16133 
16134  VMA_DEBUG_LOG("vmaFreeMemoryPages");
16135 
16136  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16137 
16138 #if VMA_RECORDING_ENABLED
16139  if(allocator->GetRecorder() != VMA_NULL)
16140  {
16141  allocator->GetRecorder()->RecordFreeMemoryPages(
16142  allocator->GetCurrentFrameIndex(),
16143  (uint64_t)allocationCount,
16144  pAllocations);
16145  }
16146 #endif
16147 
16148  allocator->FreeMemory(allocationCount, pAllocations);
16149 }
16150 
16151 VkResult vmaResizeAllocation(
16152  VmaAllocator allocator,
16153  VmaAllocation allocation,
16154  VkDeviceSize newSize)
16155 {
16156  VMA_ASSERT(allocator && allocation);
16157 
16158  VMA_DEBUG_LOG("vmaResizeAllocation");
16159 
16160  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16161 
16162 #if VMA_RECORDING_ENABLED
16163  if(allocator->GetRecorder() != VMA_NULL)
16164  {
16165  allocator->GetRecorder()->RecordResizeAllocation(
16166  allocator->GetCurrentFrameIndex(),
16167  allocation,
16168  newSize);
16169  }
16170 #endif
16171 
16172  return allocator->ResizeAllocation(allocation, newSize);
16173 }
16174 
16176  VmaAllocator allocator,
16177  VmaAllocation allocation,
16178  VmaAllocationInfo* pAllocationInfo)
16179 {
16180  VMA_ASSERT(allocator && allocation && pAllocationInfo);
16181 
16182  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16183 
16184 #if VMA_RECORDING_ENABLED
16185  if(allocator->GetRecorder() != VMA_NULL)
16186  {
16187  allocator->GetRecorder()->RecordGetAllocationInfo(
16188  allocator->GetCurrentFrameIndex(),
16189  allocation);
16190  }
16191 #endif
16192 
16193  allocator->GetAllocationInfo(allocation, pAllocationInfo);
16194 }
16195 
16196 VkBool32 vmaTouchAllocation(
16197  VmaAllocator allocator,
16198  VmaAllocation allocation)
16199 {
16200  VMA_ASSERT(allocator && allocation);
16201 
16202  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16203 
16204 #if VMA_RECORDING_ENABLED
16205  if(allocator->GetRecorder() != VMA_NULL)
16206  {
16207  allocator->GetRecorder()->RecordTouchAllocation(
16208  allocator->GetCurrentFrameIndex(),
16209  allocation);
16210  }
16211 #endif
16212 
16213  return allocator->TouchAllocation(allocation);
16214 }
16215 
16217  VmaAllocator allocator,
16218  VmaAllocation allocation,
16219  void* pUserData)
16220 {
16221  VMA_ASSERT(allocator && allocation);
16222 
16223  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16224 
16225  allocation->SetUserData(allocator, pUserData);
16226 
16227 #if VMA_RECORDING_ENABLED
16228  if(allocator->GetRecorder() != VMA_NULL)
16229  {
16230  allocator->GetRecorder()->RecordSetAllocationUserData(
16231  allocator->GetCurrentFrameIndex(),
16232  allocation,
16233  pUserData);
16234  }
16235 #endif
16236 }
16237 
16239  VmaAllocator allocator,
16240  VmaAllocation* pAllocation)
16241 {
16242  VMA_ASSERT(allocator && pAllocation);
16243 
16244  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
16245 
16246  allocator->CreateLostAllocation(pAllocation);
16247 
16248 #if VMA_RECORDING_ENABLED
16249  if(allocator->GetRecorder() != VMA_NULL)
16250  {
16251  allocator->GetRecorder()->RecordCreateLostAllocation(
16252  allocator->GetCurrentFrameIndex(),
16253  *pAllocation);
16254  }
16255 #endif
16256 }
16257 
16258 VkResult vmaMapMemory(
16259  VmaAllocator allocator,
16260  VmaAllocation allocation,
16261  void** ppData)
16262 {
16263  VMA_ASSERT(allocator && allocation && ppData);
16264 
16265  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16266 
16267  VkResult res = allocator->Map(allocation, ppData);
16268 
16269 #if VMA_RECORDING_ENABLED
16270  if(allocator->GetRecorder() != VMA_NULL)
16271  {
16272  allocator->GetRecorder()->RecordMapMemory(
16273  allocator->GetCurrentFrameIndex(),
16274  allocation);
16275  }
16276 #endif
16277 
16278  return res;
16279 }
16280 
16281 void vmaUnmapMemory(
16282  VmaAllocator allocator,
16283  VmaAllocation allocation)
16284 {
16285  VMA_ASSERT(allocator && allocation);
16286 
16287  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16288 
16289 #if VMA_RECORDING_ENABLED
16290  if(allocator->GetRecorder() != VMA_NULL)
16291  {
16292  allocator->GetRecorder()->RecordUnmapMemory(
16293  allocator->GetCurrentFrameIndex(),
16294  allocation);
16295  }
16296 #endif
16297 
16298  allocator->Unmap(allocation);
16299 }
16300 
16301 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
16302 {
16303  VMA_ASSERT(allocator && allocation);
16304 
16305  VMA_DEBUG_LOG("vmaFlushAllocation");
16306 
16307  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16308 
16309  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
16310 
16311 #if VMA_RECORDING_ENABLED
16312  if(allocator->GetRecorder() != VMA_NULL)
16313  {
16314  allocator->GetRecorder()->RecordFlushAllocation(
16315  allocator->GetCurrentFrameIndex(),
16316  allocation, offset, size);
16317  }
16318 #endif
16319 }
16320 
16321 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
16322 {
16323  VMA_ASSERT(allocator && allocation);
16324 
16325  VMA_DEBUG_LOG("vmaInvalidateAllocation");
16326 
16327  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16328 
16329  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
16330 
16331 #if VMA_RECORDING_ENABLED
16332  if(allocator->GetRecorder() != VMA_NULL)
16333  {
16334  allocator->GetRecorder()->RecordInvalidateAllocation(
16335  allocator->GetCurrentFrameIndex(),
16336  allocation, offset, size);
16337  }
16338 #endif
16339 }
16340 
16341 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
16342 {
16343  VMA_ASSERT(allocator);
16344 
16345  VMA_DEBUG_LOG("vmaCheckCorruption");
16346 
16347  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16348 
16349  return allocator->CheckCorruption(memoryTypeBits);
16350 }
16351 
16352 VkResult vmaDefragment(
16353  VmaAllocator allocator,
16354  VmaAllocation* pAllocations,
16355  size_t allocationCount,
16356  VkBool32* pAllocationsChanged,
16357  const VmaDefragmentationInfo *pDefragmentationInfo,
16358  VmaDefragmentationStats* pDefragmentationStats)
16359 {
16360  // Deprecated interface, reimplemented using new one.
16361 
16362  VmaDefragmentationInfo2 info2 = {};
16363  info2.allocationCount = (uint32_t)allocationCount;
16364  info2.pAllocations = pAllocations;
16365  info2.pAllocationsChanged = pAllocationsChanged;
16366  if(pDefragmentationInfo != VMA_NULL)
16367  {
16368  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
16369  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
16370  }
16371  else
16372  {
16373  info2.maxCpuAllocationsToMove = UINT32_MAX;
16374  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
16375  }
16376  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
16377 
16379  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
16380  if(res == VK_NOT_READY)
16381  {
16382  res = vmaDefragmentationEnd( allocator, ctx);
16383  }
16384  return res;
16385 }
16386 
16387 VkResult vmaDefragmentationBegin(
16388  VmaAllocator allocator,
16389  const VmaDefragmentationInfo2* pInfo,
16390  VmaDefragmentationStats* pStats,
16391  VmaDefragmentationContext *pContext)
16392 {
16393  VMA_ASSERT(allocator && pInfo && pContext);
16394 
16395  // Degenerate case: Nothing to defragment.
16396  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
16397  {
16398  return VK_SUCCESS;
16399  }
16400 
16401  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
16402  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
16403  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
16404  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
16405 
16406  VMA_DEBUG_LOG("vmaDefragmentationBegin");
16407 
16408  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16409 
16410  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
16411 
16412 #if VMA_RECORDING_ENABLED
16413  if(allocator->GetRecorder() != VMA_NULL)
16414  {
16415  allocator->GetRecorder()->RecordDefragmentationBegin(
16416  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
16417  }
16418 #endif
16419 
16420  return res;
16421 }
16422 
16423 VkResult vmaDefragmentationEnd(
16424  VmaAllocator allocator,
16425  VmaDefragmentationContext context)
16426 {
16427  VMA_ASSERT(allocator);
16428 
16429  VMA_DEBUG_LOG("vmaDefragmentationEnd");
16430 
16431  if(context != VK_NULL_HANDLE)
16432  {
16433  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16434 
16435 #if VMA_RECORDING_ENABLED
16436  if(allocator->GetRecorder() != VMA_NULL)
16437  {
16438  allocator->GetRecorder()->RecordDefragmentationEnd(
16439  allocator->GetCurrentFrameIndex(), context);
16440  }
16441 #endif
16442 
16443  return allocator->DefragmentationEnd(context);
16444  }
16445  else
16446  {
16447  return VK_SUCCESS;
16448  }
16449 }
16450 
16451 VkResult vmaBindBufferMemory(
16452  VmaAllocator allocator,
16453  VmaAllocation allocation,
16454  VkBuffer buffer)
16455 {
16456  VMA_ASSERT(allocator && allocation && buffer);
16457 
16458  VMA_DEBUG_LOG("vmaBindBufferMemory");
16459 
16460  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16461 
16462  return allocator->BindBufferMemory(allocation, buffer);
16463 }
16464 
16465 VkResult vmaBindImageMemory(
16466  VmaAllocator allocator,
16467  VmaAllocation allocation,
16468  VkImage image)
16469 {
16470  VMA_ASSERT(allocator && allocation && image);
16471 
16472  VMA_DEBUG_LOG("vmaBindImageMemory");
16473 
16474  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16475 
16476  return allocator->BindImageMemory(allocation, image);
16477 }
16478 
16479 VkResult vmaCreateBuffer(
16480  VmaAllocator allocator,
16481  const VkBufferCreateInfo* pBufferCreateInfo,
16482  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16483  VkBuffer* pBuffer,
16484  VmaAllocation* pAllocation,
16485  VmaAllocationInfo* pAllocationInfo)
16486 {
16487  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
16488 
16489  if(pBufferCreateInfo->size == 0)
16490  {
16491  return VK_ERROR_VALIDATION_FAILED_EXT;
16492  }
16493 
16494  VMA_DEBUG_LOG("vmaCreateBuffer");
16495 
16496  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16497 
16498  *pBuffer = VK_NULL_HANDLE;
16499  *pAllocation = VK_NULL_HANDLE;
16500 
16501  // 1. Create VkBuffer.
16502  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
16503  allocator->m_hDevice,
16504  pBufferCreateInfo,
16505  allocator->GetAllocationCallbacks(),
16506  pBuffer);
16507  if(res >= 0)
16508  {
16509  // 2. vkGetBufferMemoryRequirements.
16510  VkMemoryRequirements vkMemReq = {};
16511  bool requiresDedicatedAllocation = false;
16512  bool prefersDedicatedAllocation = false;
16513  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
16514  requiresDedicatedAllocation, prefersDedicatedAllocation);
16515 
16516  // Make sure alignment requirements for specific buffer usages reported
16517  // in Physical Device Properties are included in alignment reported by memory requirements.
16518  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
16519  {
16520  VMA_ASSERT(vkMemReq.alignment %
16521  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
16522  }
16523  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
16524  {
16525  VMA_ASSERT(vkMemReq.alignment %
16526  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
16527  }
16528  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
16529  {
16530  VMA_ASSERT(vkMemReq.alignment %
16531  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
16532  }
16533 
16534  // 3. Allocate memory using allocator.
16535  res = allocator->AllocateMemory(
16536  vkMemReq,
16537  requiresDedicatedAllocation,
16538  prefersDedicatedAllocation,
16539  *pBuffer, // dedicatedBuffer
16540  VK_NULL_HANDLE, // dedicatedImage
16541  *pAllocationCreateInfo,
16542  VMA_SUBALLOCATION_TYPE_BUFFER,
16543  1, // allocationCount
16544  pAllocation);
16545 
16546 #if VMA_RECORDING_ENABLED
16547  if(allocator->GetRecorder() != VMA_NULL)
16548  {
16549  allocator->GetRecorder()->RecordCreateBuffer(
16550  allocator->GetCurrentFrameIndex(),
16551  *pBufferCreateInfo,
16552  *pAllocationCreateInfo,
16553  *pAllocation);
16554  }
16555 #endif
16556 
16557  if(res >= 0)
16558  {
16559  // 3. Bind buffer with memory.
16560  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
16561  if(res >= 0)
16562  {
16563  // All steps succeeded.
16564  #if VMA_STATS_STRING_ENABLED
16565  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
16566  #endif
16567  if(pAllocationInfo != VMA_NULL)
16568  {
16569  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16570  }
16571 
16572  return VK_SUCCESS;
16573  }
16574  allocator->FreeMemory(
16575  1, // allocationCount
16576  pAllocation);
16577  *pAllocation = VK_NULL_HANDLE;
16578  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16579  *pBuffer = VK_NULL_HANDLE;
16580  return res;
16581  }
16582  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16583  *pBuffer = VK_NULL_HANDLE;
16584  return res;
16585  }
16586  return res;
16587 }
16588 
16589 void vmaDestroyBuffer(
16590  VmaAllocator allocator,
16591  VkBuffer buffer,
16592  VmaAllocation allocation)
16593 {
16594  VMA_ASSERT(allocator);
16595 
16596  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
16597  {
16598  return;
16599  }
16600 
16601  VMA_DEBUG_LOG("vmaDestroyBuffer");
16602 
16603  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16604 
16605 #if VMA_RECORDING_ENABLED
16606  if(allocator->GetRecorder() != VMA_NULL)
16607  {
16608  allocator->GetRecorder()->RecordDestroyBuffer(
16609  allocator->GetCurrentFrameIndex(),
16610  allocation);
16611  }
16612 #endif
16613 
16614  if(buffer != VK_NULL_HANDLE)
16615  {
16616  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
16617  }
16618 
16619  if(allocation != VK_NULL_HANDLE)
16620  {
16621  allocator->FreeMemory(
16622  1, // allocationCount
16623  &allocation);
16624  }
16625 }
16626 
16627 VkResult vmaCreateImage(
16628  VmaAllocator allocator,
16629  const VkImageCreateInfo* pImageCreateInfo,
16630  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16631  VkImage* pImage,
16632  VmaAllocation* pAllocation,
16633  VmaAllocationInfo* pAllocationInfo)
16634 {
16635  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
16636 
16637  if(pImageCreateInfo->extent.width == 0 ||
16638  pImageCreateInfo->extent.height == 0 ||
16639  pImageCreateInfo->extent.depth == 0 ||
16640  pImageCreateInfo->mipLevels == 0 ||
16641  pImageCreateInfo->arrayLayers == 0)
16642  {
16643  return VK_ERROR_VALIDATION_FAILED_EXT;
16644  }
16645 
16646  VMA_DEBUG_LOG("vmaCreateImage");
16647 
16648  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16649 
16650  *pImage = VK_NULL_HANDLE;
16651  *pAllocation = VK_NULL_HANDLE;
16652 
16653  // 1. Create VkImage.
16654  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
16655  allocator->m_hDevice,
16656  pImageCreateInfo,
16657  allocator->GetAllocationCallbacks(),
16658  pImage);
16659  if(res >= 0)
16660  {
16661  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
16662  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
16663  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
16664 
16665  // 2. Allocate memory using allocator.
16666  VkMemoryRequirements vkMemReq = {};
16667  bool requiresDedicatedAllocation = false;
16668  bool prefersDedicatedAllocation = false;
16669  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
16670  requiresDedicatedAllocation, prefersDedicatedAllocation);
16671 
16672  res = allocator->AllocateMemory(
16673  vkMemReq,
16674  requiresDedicatedAllocation,
16675  prefersDedicatedAllocation,
16676  VK_NULL_HANDLE, // dedicatedBuffer
16677  *pImage, // dedicatedImage
16678  *pAllocationCreateInfo,
16679  suballocType,
16680  1, // allocationCount
16681  pAllocation);
16682 
16683 #if VMA_RECORDING_ENABLED
16684  if(allocator->GetRecorder() != VMA_NULL)
16685  {
16686  allocator->GetRecorder()->RecordCreateImage(
16687  allocator->GetCurrentFrameIndex(),
16688  *pImageCreateInfo,
16689  *pAllocationCreateInfo,
16690  *pAllocation);
16691  }
16692 #endif
16693 
16694  if(res >= 0)
16695  {
16696  // 3. Bind image with memory.
16697  res = allocator->BindImageMemory(*pAllocation, *pImage);
16698  if(res >= 0)
16699  {
16700  // All steps succeeded.
16701  #if VMA_STATS_STRING_ENABLED
16702  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
16703  #endif
16704  if(pAllocationInfo != VMA_NULL)
16705  {
16706  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16707  }
16708 
16709  return VK_SUCCESS;
16710  }
16711  allocator->FreeMemory(
16712  1, // allocationCount
16713  pAllocation);
16714  *pAllocation = VK_NULL_HANDLE;
16715  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
16716  *pImage = VK_NULL_HANDLE;
16717  return res;
16718  }
16719  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
16720  *pImage = VK_NULL_HANDLE;
16721  return res;
16722  }
16723  return res;
16724 }
16725 
16726 void vmaDestroyImage(
16727  VmaAllocator allocator,
16728  VkImage image,
16729  VmaAllocation allocation)
16730 {
16731  VMA_ASSERT(allocator);
16732 
16733  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
16734  {
16735  return;
16736  }
16737 
16738  VMA_DEBUG_LOG("vmaDestroyImage");
16739 
16740  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16741 
16742 #if VMA_RECORDING_ENABLED
16743  if(allocator->GetRecorder() != VMA_NULL)
16744  {
16745  allocator->GetRecorder()->RecordDestroyImage(
16746  allocator->GetCurrentFrameIndex(),
16747  allocation);
16748  }
16749 #endif
16750 
16751  if(image != VK_NULL_HANDLE)
16752  {
16753  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
16754  }
16755  if(allocation != VK_NULL_HANDLE)
16756  {
16757  allocator->FreeMemory(
16758  1, // allocationCount
16759  &allocation);
16760  }
16761 }
16762 
16763 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1723
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:2026
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1781
diff --git a/src/Tests.cpp b/src/Tests.cpp index 8675c3f..058fedb 100644 --- a/src/Tests.cpp +++ b/src/Tests.cpp @@ -13,6 +13,10 @@ extern VkCommandBuffer g_hTemporaryCommandBuffer; void BeginSingleTimeCommands(); void EndSingleTimeCommands(); +#ifndef VMA_DEBUG_MARGIN + #define VMA_DEBUG_MARGIN 0 +#endif + enum CONFIG_TYPE { CONFIG_TYPE_MINIMUM, CONFIG_TYPE_SMALL, @@ -1379,6 +1383,7 @@ void TestDefragmentationSimple() Allocation that must be move to an overlapping place using memmove(). Create 2 buffers, second slightly bigger than the first. Delete first. Then defragment. */ + if(VMA_DEBUG_MARGIN == 0) // FAST algorithm works only when DEBUG_MARGIN disabled. { AllocInfo allocInfo[2]; diff --git a/src/vk_mem_alloc.h b/src/vk_mem_alloc.h index 8c663fd..c007128 100644 --- a/src/vk_mem_alloc.h +++ b/src/vk_mem_alloc.h @@ -2499,7 +2499,7 @@ VkResult vmaAllocateMemory( @param pCreateInfo Creation parameters for each alloction. @param allocationCount Number of allocations to make. @param[out] pAllocations Pointer to array that will be filled with handles to created allocations. -@param[out] pAlocationInfo Optional. Pointer to array that will be filled with parameters of created allocations. +@param[out] pAllocationInfo Optional. Pointer to array that will be filled with parameters of created allocations. You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages(). @@ -2875,8 +2875,8 @@ typedef struct VmaDefragmentationStats { @param allocator Allocator object. @param pInfo Structure filled with parameters of defragmentation. -@param pStats[out] Optional. Statistics of defragmentation. You can pass null if you are not interested in this information. -@param pContext[out] Context object that must be passed to vmaDefragmentationEnd() to finish defragmentation. +@param[out] pStats Optional. Statistics of defragmentation. You can pass null if you are not interested in this information. +@param[out] pContext Context object that must be passed to vmaDefragmentationEnd() to finish defragmentation. @return `VK_SUCCESS` and `*pContext == null` if defragmentation finished within this function call. `VK_NOT_READY` and `*pContext != null` if defragmentation has been started and you need to call vmaDefragmentationEnd() to finish it. Negative value in case of error. Use this function instead of old, deprecated vmaDefragment(). @@ -6272,9 +6272,6 @@ private: struct VmaBlockDefragmentationContext { -private: - VMA_CLASS_NO_COPY(VmaBlockDefragmentationContext) -public: enum BLOCK_FLAG { BLOCK_FLAG_USED = 0x00000001,