From 1a8424f41aa79f189f00bdd2deec3df01731d4e7 Mon Sep 17 00:00:00 2001 From: Adam Sawicki Date: Thu, 13 Dec 2018 11:01:16 +0100 Subject: [PATCH] Preparations for v2.0.0 release. Updated version numbers. Rebuilt binaries and documentation. --- CHANGELOG.md | 2 +- bin/VmaReplay_Release_vs2017.exe | Bin 208384 -> 214016 bytes bin/VulkanSample_Release_vs2017.exe | Bin 273408 -> 291328 bytes docs/html/general_considerations.html | 3 +- docs/html/globals.html | 6 +- docs/html/index.html | 2 +- docs/html/vk__mem__alloc_8h.html | 57 ++++- docs/html/vk__mem__alloc_8h_source.html | 294 ++++++++++++------------ src/Tests.cpp | 9 +- src/VmaReplay/VmaReplay.cpp | 4 +- src/VulkanSample.cpp | 4 +- src/vk_mem_alloc.h | 2 +- 12 files changed, 212 insertions(+), 171 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e2ae636..823e3ee 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ -# 2.2.0 (TODO) +# 2.2.0 (2018-12-13) Major release after many months of development in "master" branch and feature branches. Notable new features: defragmentation of GPU memory, buddy algorithm, convenience functions for sparse binding. diff --git a/bin/VmaReplay_Release_vs2017.exe b/bin/VmaReplay_Release_vs2017.exe index d68dd24b7749953ce956c48156edd3d2deaf9673..e05eca01114977298049020b4833c1a25d635d30 100644 GIT binary patch delta 75748 zcmbS!34Bb~7yrDMgjiyNOct_?WDsIWgh)h4Na76>VhOQ?P+GM{B$iqxOhTs5D7|I0 z)i!j~ohs3yjh(8UR#A=W@*=3E)u?6u-*ewg7JmKzKcBzPXY%en_nvdlIrrRq&pr3P zr{GfKf?}`D-a=?e$7W+R&VRQwp_)cw6Rl>UsL_0@0(Z<|U4*tm0UIUs>iAcXo8}h5 zv)wf10-lL(8beF9BLAD?rpW<3=d!5DS>39oZ#fbgs_6Q+O4La1t6Q~T-lAtry%$jG zjUsB#-Bm?>>d{E2+==7!#%nZl-UJMoni4z<*g!EPD$-5G(BoO_U!q1+)+tBD;*5bo zO&}^5*qgx0Mh;|%?b`nm0qK-GY1+(Zz&ddYF?hW`TW^Xwp^tH%sHr$SLDYPQf0E7X z1Iq8H$v2pf{HB`x^ORp#o%d)JzBR=?O@tCzy<)-40_*Pf*1o7u>>@)9T@$Vrlu4yZ=%QiNq=`iuq7wq$b3)XeU4 zS39y>8B&r>w=Ity@ri3bf#3-Q&r#vC@|aE+-P}O%M1r?b;n8_)v@XW`=gok3de*32 zd=f#CSO%wNzWxIVlh*wRl(l<#=8p}HQWr-V1%;py? z%wQk;of2mcU}pc8t)@f6Qqk!=Pf7k(F%A$lCR>UiS=}W02T4Axl#O9K{j5*mSLMIY8<+`Q&{g7 zBZO&N?JYb7aj?WTv1ej~dk)Z2q= z*Sb2GtqXfmY?#OlhNu=!+vnp&)lB{*(oFAcHq#JLpQl5z*-M7+gz;=l>tOs{&^p{) z;)%gt&$@)3@=|xLMial}9Vd*Q@={L@+vJ4NQ<9f@11QBW*-X&-l9eYU`9*(#jq%nU z1a4xq@`MvC$uDyxw^PRW)ny#dXk9Iov9LC6h5c+mn`Z)NuNq`?f8DsJ+}&s!+Q8(t z&uH^u_?=3UHSA=Y5nd}Ld5pnxAIbRLcdS#}0O8?QsqFy4YYD{5ZzLyq#(OND#7e@G z#L6D*YWPa;tm9-mBPOWM61>J6Z_Xbh$$j?sW;(~#AcCN)O2htIIo z_T98co&l}|7@_WehP~K+K!BNG860gPCHrf)CpGTjk@pO{*?x#;B67zv|c4$s6)D1o>X?=hGI`T-y(dB=P7 zpTKr?=oZ+LNDh_czEs7RGO5UTBhz#O^Xh19`2BcwTF^->yW^mSYn<6#{n(cshc-(` zR$H_j6CBv~?BOAeXThDi)eitzY1xmB>NL37PuWx@1G6!zxcBflmCbf{>eg^0z><7i znV-rYb{fzy-?G8+;#sA1(;l@tc%M^Ps6r!nk)RIAX}0M!DR;vXnu-zdn)8j8Ov682@-~|&$@JO_Bf01n{i30@Lm?X)uj~* zn?+}d^Rw8j=vKmD#-ay`+AMZ8Iux0DU86eef!*W}06i$@OG@`P+5&s_w>9o8S;rV+ zQ*8Y_B-=1UtV7p)FngwJM=@^!-+h2KRd!QCnz%Jy;TY z-C%Crd#Hp$8QNBk-*|$MS2dxnquGM)ErsjM4wN)%JJf+frN@&fua0!J9Wt6}V+kc9 z)``+NS#Wj%(Av7bK8-Yb5(QE?Bt6 z9>?_*25@oLqC`1$?FrsXM!1^Xp2_C*2o@@sy+_a9{mFiBNpV?``8|yB2lKkQnraT> z4}qg}uc4I2J1quK6YsR-Ox7&E4JgIM_v~%_zfl@Rl-5*K^Q2PRKioxW;|TUee3+oH z^YNoPe+97ew^CghL*y8YWhlTfx_w~mJk>aZ8kU6eKiK%58-+iZdqQHsWWXiMetpuo zq^xnHvntO20ma(9HVkK32@%5Y%$m?mte3&c5(31n#q452TXEDW=H4qx98t`=_e%EZ zM@#s}tw|D-A(LFK-mTe7y$t3fSaIRgICQb8NEDUjF9JhnUBECWa58{DyMRrcz@Y%P zAifCweu|3T3&1oN@K+U>rvotC1w7%b=_AR*4D~W0#meCcD|+luA7%XU>0$M8k{-5|=kbU>owQ?9eUNFXk|}`G18T+UV-*{pR4Dcf zWq%~~5#2&rM4wg-%UjWc-z-_wu(e5p#O1Bn`+fR|eOs|g6zX~5kiO2sLwfdH-#(&6 z&pztg$}?L}m@oHXXZso&d=UiF`h}N;m{3AVty&XjsNHjtwfYdvd6K@ zEGeZ}9PY!8rlbqKw+5z;7sR*DGjqRC&#T<6^GIMH^y@5k{f=Ge*UsmXs1X;{lPvcW zXKRgc9Bp2Iq%l7!Tm0=Hn=iE$da?~tZ0n1n#=6juXmPme2B>whFl&^UOAd#ekbIFn zmfDRC0wlksk$k*(4}Z!ufv3>V8_RMtgt*u3Ma|3J7B1B6*BLTeS_SrsGwKQOk$0wYSrxY3(8p<;~k{tt|#p1`TpxP5gvRpqP9^<7ndXvX8Er*PHZPhnE8d2!Lt+_iu$4w%b4mOEE%T{V zkYA#b*XFlT)%pr3kXomzNZz1*OQrn+N-E>PidVi#Wn~_4aiS@fZ8Y@|{bSi>w(Ae z7&a4+{xR$wJajSa$AM==@3ZXFK|#?pyp0hLCEEx=`Fjg#WF>)MxJLP`;?1Y?Wdu3s zUZ3U&d07E_JjlQG2WM#1Hl$IjwTyE(9BJ|)j9foV+$MRUAr^yE$z}2BExWDlicX8m z+LT{UvZlmVcEGrD4k}AR8mCfc?EgFv<2jVc>0Q$`PnR|vhSGCqu)L*u3Iz!hFe_;C z!X$ZMqGVg6htsD_UN6v8y8|ZU0(qRpO6FL0m}v(`x9D+90EfwjbjfBbhKW~hfP&FRTiU;=++eEX&gS(42x-el#cX3x2)?-MFSlET-4%s4}iejyY zMvLpC*eE=ni()U~(ItxQ$K&rvb{~(jNY-iCWpVTIr!>Ci<0mz~8PhH)xvs{?X85-@ z9{-;jFU6Rs)_61adPd8b7@CUXvNT-d*>yF3E>fY*>w6$pT&HDK)-{ZF4*LQSS4aNIp#MGcPYnZo#6$?~ z7#3jIxpU;N`VWi^BY-hW#aL4ZBmF-xl12d|O2x>kgW>%j7~`{mahohw*0rvKaWe3K z%dt9}nv)c>f2+?a3qu`T%mI)W&FY zvgQAlqvkeV?4@XU<{fk-;5C^u+oh)iw|BZ);3?r0NvBj5$a7 z?OS$aWN_mYat3LgR(5{N?vCu})}evO!bWuwyB}dAMs*BWw~ee5lb6X5DamIP>1|pA zJ?6)=H%9r1B@t}iUs_3&bmtZYh~!ARI)i+Hs3?h)2tbUX8DOzaeDOeCGb z|83}rjNqvhA<6qQhL1=ZmXwh+JZS{NUfI&2#LbT}l}gr3 zPi?!L9VR)~JFV!Rwy-(xgejhqb+Wf4&q|EGD8;=gVT#Ze?t$)-<@e?x^q@D%UlY)w z>w#q>Sys464&DB?D({i(`k2U;VSGu+^OO%Gh=jBxe{4_(O+ z5~i2v&@JVQEt2+|O~fGk(gQ*>^Dlt8WO&TWoo!f?4dK>l2E8`?GR1R$OrcGs?&xSz zz(7xB?_Q1OUMlq+?*Uydb&ynak@^l>siP`cF4g-;uOfL=-cKm+t-y3HuQ!xvG`<>b zcpjEY%l>R^3&$p9C5;(3HftX%9h(yP_Wqh-=SwSU4u(T?cMOL)2bll3w}O;?H5L5Q z~VJcjuDUioT2J3HwzCu2J%_%s*NI$}IJ>Ef z`igF#{@gx{5RB(}utKHbzm&-{Oa_aICdv_I4#(FIF*{XwV)rUhleS1wMwepjD^2ol%5W!em?2z}(YBSS=oTCz z8watZ_>;Lrtq=OB6Uv=Og2u8y@8Dd%frV2k|JudnEI4qM1qQY311j5JU6w)4oo!7zpM8k% zEbOB1YP~(KjAJ(@+1>gy7TNM=*NT6Hu#o3;Eu>`_@bdJ6H2E0yR3v(ev$G?F4ScS3 z>sHj+u*E4}e2uH0Ts+aBuMB{Cdt=m5NxGNF);%{reJj@g?~L*>CHH}#Ni1~aHApGE zm*@VuP$_mfEqhJgDQYYgLS945eIeh!cxYy2bNneh&24jLBt+=3%i$+E@!>Z1`Q-O|L*;u^eVlVIKuC_Way-*IIyaTSfsL;>1kHbYSh9=*p+UiNKUZq#tx*(NhL{m2-C> zuK^$jI9+*a=o}QrY??yEvs+qmMusXC6>i6*EjtX*8Phv{7tP#qy^1 zj&7>vk1Hqt5inuVfLK<*gEi{Nn{R=U9F$}mAH7&(rGMD(Q^(eu+!5O8%lc0X5u0yk zQ>XRvzj}{mv7=J_IM4iRU%{>9J(2bz+cV9#z86*BT*@v_ixt{1zv+GaqyK01H>_;d zbl-Y51P2rivrTU!zWOQKHa#Y6!{7gldLx)E;wb~6T+aa3WJZuUK7e(c;V%vfVEtya z7WFgPj2UhH2KmyUoRLU1Dd^HT`m{rL+Mj(qBVKIn&njm0b{pm)vWS^&d$;%kRrz#P zf-SB|u0SuOOR_OhIk^aKi_N1QIVOOSRoQ~c&sd|7thTfQC1?>_J=4@AWEIJdxJud% zrEd|doEh8U6gV)+lXjpVoAb$oHuuWVFEF~3aq{TUm1AX=QiRNrj&U0e!PPQYb zjnCrG1PtaXhc3ZY-k~|WlM^at`!e5I!vhO+w5+DZI&|M7<QI zLp!4EuB2^$5Qm!{{%V1@@`JO$sBM3U3LHZ?#x3P*6u<__&r$Ynp>#&1WLYG6OWM9j z*}PJyWRqIrjgoL);*FA_RI(B!W+2@{ktZ*@g`&Y!l*f_0rc-({rR9wCC|gANOf_GQ zJh>B!guI*qf6VJm%Es~Rb!0!IY|oX+uXp>Y9M}=SN^KI4rd`1-qy%nuI55zSa)t!`!$2=K zW0t($0U}j5%HJt_ZUVy#Je6ayQ?`D_F6Q;{`^=kKn1vz?IkG@k-v2RzaMk&8s1ao?ojmL?{R} zK?SW-TU@0+iq%Jf`Y6F8Z$6r;M+i|sz+nn!VJ?*IKeieSQ7CegJAFJ?9n9L=1eNN}rA9$e;;>H;C z`3qndUVE`AzNO5a90 zS$-)S9&@t1d<-74J(D&MHm^=pehTF#`K26yO!D%{R3h8wsTDG)!e&&EZ^p5!^L!_q zho3UHp%nMU&s9jx$&k;1%yvv%%6#6;il55?PnHXRzJ#az%Fl#=DqE?_aNg32pKk$X zLKTI|P=`pN&JR?l3mZK@*nA$nq?y}Dvh4b~ijrvPd(=p#8vlGq@}MCU@P_cDhVVd@ zD5`Q6RZOUI3ozagl~F@#oeZk88Fd^7<4bozZMn;6ADT`BF|Go01*@9hMI7=k>+phJ z)PKMRzR+xD(fIl#i(hdPkOf#^JFLZWuYbdg_FLZP} zgUzw~f=+I);n{6Ll-sj-K96S?Jm0{x9-gNbLpo%QdLi6-50eM`@YA$kW>^w`KM7Kb#rgiVS=_t;%zP0e9`lh=~MQxwTM&%ZJ!rRZLMyd`1yJ8Xbi-*T27 zxPkAIapOYEK&-lmE>=cb5?a2)mYKt%rc(Lkci@yI%={P~mM3A-1~t^2M{W}`v@jEe z%8r(VLvIsR-%*5R0~RLLbmycRe3f|@B#0qzvq1$NTZX+&J?MX6Lc6XY*2bSTF0Y#g&c^kp~2XN_CYpQQam|v^%I#faB zp~T%l)_ZB3_*V&gVQGl3h)Njhsjv*AWuCXk>{v^}of7uh(!i!ka1Se*(*BE^V3$C4 zX=z)xBZA24Sq_WG-(n{%fx{^Ts_a}un+iC5xIps~hkSoafKHcFI;ps8J)>Mo$;zQ7 z>qLW;v;zkt5Pg^d5u#%b-Fgj)aLW{jvOIL#xVPAzBK@FW;AJYU$v2Syk>sg*qdYE= zPG#jYz*2g?4-#=ZX^XBc90}zo%UjiL`|IkqJ=(#&Alj;AI&`}qvn_?)l7j(s8FUY% zwcArdEak$Uat{e{kq~U9Lixe3?mlf7fI3-WpHeqr^IBP7vUN*?%h$fBO>ZA;Lg9>i zD*Il?9=@--haceS(9MQ0wIUPWJ?0JaL9hwwRh8o`3C-SwfIh*gm2O5yeeAh$^9Y^O z3?Vh;+?%QqmcKzd@M}b{_W+aR6$WhK_|hKp6RRrf*;t|lpP%J*dV?h{>)aypjav4| zL*q6tt3L?1GwZTE@rOsuu`Ehl|A=*2-c6kTh&{hNM(p;8ZNTHvL-ynH9^#&dOjnHi zRS(%fJO({v%kl7i$jb0I_ki6mjuGE@z@n_Dg5G$w#uA%R-`l*jjcH=|L7Dj)3wg1P zaX4zAYwTb;>E3>QULQ#|drCHbaAAyiEYVp;-?@;e)>+2)%GhfVswT3|Pcl}%h|}MH z*=H{Xn$E7PY1VlR`7Es{AG`_`9ZELfa0YXCO2G$Zu!y|+J2rub%79JbNN%QNlTAK5 zb|8`Ov7?On$x$N?BO}>TN@zBqnNp9HIi*BNTSr6o>k^d8Re3?Y)Rs}E92if=kWmIX zXxu83s9jsix@7qTVTY&52e9W0aj>^!eT#DYQ_PNn!>w#v$9|MMifw*io-5jjz3eP< zMZ1txUfDUB6snWSTkt297VF^e&t|U|M2PLG*t;t}Gb_hvDm(Hfn5#XXoEk^G)tXOo z>~-do^H7bla2B)xPq1d1c(kU5%kJu&qQ!DniSwG8O6O};YS&D?8}TA)VaO{Vt|jwa zZz!Wl5@uT9oGiPXV-IZ+=1H_RW>X!T*DOj8Lt0*|p<3{*3DSo{=>8iZy9pA>A$0o< zkTV1cCJOReevQrMr4Xo*3Zi>&sP#92s@CvYdKDzXG$t#*sjzSr=11moS_MU_pbiB3 zMg_&HpdoS6Gvmx|?aMr1SxV?7(XSa+0M?FWCYM zq&-*NCAZ6Rb0v(5hq^n<6(63($ng4#kUfx<&65mOlz+guOeBLR$xx~0 zNd`UTFEU4OQUb{kOMs(l1(G41@;lW$$uODnAE|kgp@8zQGj?}XQe){VMua*;zRVI= zV}^X0Y~4A?a#SbL3h$g|$kjbWw=3+x>Im`IJM7NtNb&PK% z&nrE|AOB>JUg<8D{K;b0bQhodlg-4V)1Pb;9ye~XD{ErJZ8urK+9N@`SJaG7=RDgP z>lV$kFWH!5ZF^%bYW%O`aMC#15FZ=}6{EvAy{szsn=38=2v(9;TDcm{bzzX!7 zYo4_%`a;NWfCEIDCM&ziDLXdlG0_%KE@0D1B#SB8WRv?2+G1hO#q5n%4@?YGJYr>j4Ps=*3Yn^mtCC$O#G8D__tLNZy<B*z*;k}{bcU)Rwb^(}}k*`EoIBxxK)&FOY)V(lZJCD+Sn zw3E<}`LWM>gEwm93QaeV=6h?LGN6h{{zbAPaw5sMCC3uD0jfup*9Tit&P3}cef_VT z`%{|-81j@c&tp&EbhqXc?5`IA!_=!uiN28{KQiGgAv{UGK~IVP-~fMKw$Uv%O_nm# z+&oj{zX=*?ia(*X6kxhH%0D7K+p8d6kK5mleNz0-xp(QRWTMjtGs6i>G|ERDx^HRW zh4#{Er<$lOqGJ(b{LMVvB(7()9K>-?o*}Zb6@^w2#)316(_tmQkOngNkT^#O@kb(7 zDWJ{VB0BC4PmB1+xaYB(QGUda$Z%$%k#@8W3oZP0{hg!6ueO>2&=3$r^l5Tqhl$RZ z(~6F03q$!D2z@5ejb?ai4W;NkM(t-fp%@|2bSx_8LzlXJkUb=`vBu=1;PL))|%bI-6H*~0Z==(@J$12 zCChJBMx3xNc|`B1w+ebGWwM0BXlndU5-%u zbr=rcjTW+`_ge;XJ7`FDU@M|Aw?cKdp!A^qY|HC?1H;duJ-*g8DRk&toM~el2AE*AEkZWpAzTg1@KNcf()LH@f3*;v4b8RkrYrSp40_ z|K8+(+rCMV>^Hj$zp!<09u35d@7k3**Qh~v)ivs~CCvPmL82w$|FIQVHWs|rS)w+a zqQ0xXWK+Erm~e%bs6l?UT6Hc_AK<`7xn2PMxR$7bgggu|P4((gqAW5}zc@!hMu`%% zgt`USj^>th9Gdw86`E)%In3|BaDU#b9c+NEEV%B!to{mp6J9fBEeR74=ya*T$;@A* z-0lVJpOTK|O<#lVlKuX5RvG6|sI%DEJ~kxYayWLBp{BC;1hCMhhf?&emB+s+X~|>V zVpkW_&`XocCHY@kd(usO?ZKXpkDMm6vG%;R7^{r9$hG)jI#&Zt@6{ti^tyQ;Y(8(M zb%p?ZtABxD83bDb7+s%Dfw`t+Qdl3`qZQVa^>|`IOo_&Xz0iJqaiwyHAc5U@VLT2p%|lEV*{EibK;dbbc zRGHSRmkx-<75ox~8F+`Cc!wRjRL*7r&<<;hKCXpUR>S2EN?Nl{I%2pE0e+s)^ED}->AYjf1)cQUs)x=xVo@->#z zvYcs*_(y`Tfk2M7@I@kMoesAx1znF*0IZDK!i^+n^%4q6HL7o6{Bo;vF@5$~ZAc+% z)$%tGWOMl&MtLRIaMWTL$*D>Hk~M7=ovEiAEej3aO|;7zfiol4 zRPvI@9Mf=fm`rdULrX_EwL=L#C9o{xOb*r}(Yjofp!yfi8$)@q zINE4HmY|XiC$cbH%uTRfV7!EAq!};3w1()oU@@5*$>H`&5yvS;89wFJw1_`26@Oh$ zgARRh?9RK@k}zZ* z8Nd_Wlj~9044tobj~6W!{`1wLnMuB6Z}PWtj1ea!%N5n3yzg=-_xvx1vN^TfO_ER1 z1%nf%q$Zooq0Hvl^#;D|!J+i7>rk@z4N=-i7y}Po=fO>!GnXyc5U4wFN6=JYNp`Mh z+bNsFR=?VueY3$=?0261u;Hw@A%lIgG0c0|_tc@~wCqPZbeGSu@{KXgdjaV35=SA8 z{f+%fV^NoN96?U_cb-b?*%)&C-W_Efx^q9&@e@0#1h}8r5b0Govd|X3L*_&wYzoGo z!Q}~MAZE1+8^Act$Y)pH^Y=XkaOE?S1JP!$1C$&MC)pnSP6&t$M$+Hqoh^`eZ7WS?u`xfx6R} zwJI)DYiA#2k8LvP|W>YrBh?h>WjhjwJUITkl+*|6=!yu^1bzzWS zB6R5PoFFybB8y6Q=oX$}9-kQ4!4JHfTts}Wsp`o|Myqh8_{$Jw_Z#fS@! zGx6ioVsSeA{^Jnum%gj5hk);(hnObw0d(nL&3B{&w;VT-&HTi-UH#*AM9x-ga*=x? z?GpLzsa)jYf7XcHpVAOHe`;Nk^A(63J53e2?^G^w9O{zD4W>b4J6Njrc~?SF`0|{;ChXye^MHvzBn182Kw$*BLpvkJtggV zRnW}IT+r`PlLkQW^B4dFwVAbrJo_A5v?Wj%h5%{Bxaz+3p=>(u+dW%+#di<0AGiGK zeF(dHTqkl)kb@7>1leQ@fG#nQ9V9Wu1`gJ7t6!XSxQ>(?)QVinMM&ckE3Q%UfqqQc zg~ik<>e)$LR6D{@E<=_n4N>2lR9n>deu1dZKdXwGH;Ie-0tK%iYL91``xhB)_f5p0 zqDwD@h7t-*$amC;LUwHxoSMiAz6jLmu}W5KtX4r&$}*@ROx$#UegDPJekb;k_&lJH z=+KQsjuyB?3Md4#YulPN-2sqG6=$iKA`0DOACj3NqVQJs6A^`h2kI#3eHEQ6$g+=( z-X12l-^dEK$HzR{KmpKZQ^B0JumEcW#S_wbbiua++Nn9~NmsT#1BKvdF?#}Z6{ao2 zM5}}m3Vu~^{mlZVK+){_WU zPO#qqi$0xf>w~yp+GeHRCoFg8Q1P3u*`b{i#lc^*j=Q>v|Bhf2cg+}Egh>M9Nx z!J6+LB(~ViChqPoR_tPL?2ZvX*u~BvH*Xhf@^y?DyNjiM-CcC-+-m-Mgdi>(w)Ols zD+Tf15H@+wY%y;H`)Q9!T<{f(*c;dKFv)*r1ms_lESITg(8<`ADc_7>=Dk4yIaubf zidhy_Y4XMzZDYvv8sQf1S8UtfzFzNqN%qZyZ1=xp9{ZBS--j`2U(e{H8Ke`u*@^8k zF18r0?gk#y><^8Y&}3ydqXQt^_oGa>CE>LUR=Q8`H3?a~DGxrBQN!7<`+7GTT*`gY z6N>j5)@gs=7H*U$M|bZlz>JsV-NhIbmV|@Dn7lu%{@2)DDAo+NZGXQ;^S1MQFgD{rpfHcg2LfAd8cKXS&xV4?`LTHkSZQ%* zJ$&5QiaEpB;R9_&+ZXKifxY7V!ED>XX!CPJ2uHn-@x6L>x&5CbX6TjV1 zGN+mo)ef~#O9;W{^(jZKLAf`?;ZTD_PsWJ6e?^SwR$w~{5^Y3#6ePm7!WAWo4FdPF zhkdYf8Z0Vjs}8m4dKqux)7Am~=yAcB&O~}UrQFuw1uDJ9L;32Z3YxOe<}X^**%Qbq$=!mX9t?(B3|3_ zuaH+ii5+Vf!df2j5?3{5|nEw`1?P=g;c5$sV%yN(A1tkyc<4;$WjK1BW1a zc)=dNDdINyuU=H0GTfOCYr}aDHoAyYdRd5! ztXqLcN+ALv{9P_v&_axMdJWeV^FYTpRcng$h}g@Pe5W7tsR>4&ua|7P`CA3x-;=Z{ zXH4>Oe(T?>Bh47|(r|B8yIlq8h5+PJ=FO#K_FjJs13kwc$?EkJQWf(kKp^X`3}cVJ zYtvvg70L-qhp`UF0?ZB3oUoAJG2oN&pEOPVVsbc=wg|jQovs%Bkwo7qbOP=>R9Dj1 zs??P;p-bRQ?3Jv#XkuapX991*R@`|+o#Oj3b&5xoL+9}^J94Z;)~ErrysUf$CQA2t z*WD24-eyWMu5HfaTXWpQL_l<;M=DCVO)2j2#E#bo#Iq-ORxQo4Xcb*c=0@lBDwSOy zz=j-;9exc>;&Tiy;suF>FZ-)%=2O8)s`SbL(h|Q6=9NtV3jv(Mc3)4I7IEN)*UX%t zUtgx12($XL^5X#^l{63Zuo@De_;D4c63b80_p5y6aN*{Ta{dS_Cr^N9Z zT3OeVDaj8$BqFhV3e2R>uPk8)ETwuB#F;t zOZMMlyH8FMTNE?HslLJImJ`46l68S6e%>^0a9w=Ae%wBX?)C?)=v0?yC-s9xskfeU zBwNs0WU=AHU(MnWx<7_R2N~)U-3wIJ<~4xU<$)4izrIIDoqE)nDwTRzOAH{`L!xi! zhe(xt*(g8882LDreR#U7I46~rpVo=*E9{@sts+ed$dlWrG>k!zg&!OtGC0}Wr0vUR za$O5h1-%HOcQ2JmXW~5XzE2w8R>Z7lqEmKIhK?fV7FBb49k9x1VD0+>SYuN$ZocM= zhsyOaD)y02qQYLPkdVrRvp(XXLgsfiUEG$;o};IaHGy3_+coKNPltmB z$(-N$Ai-%oMoBv2tB9t9J>5pcKuB@v$~&=`Yq=m_p+7h)}!jXM|D@6x-(_?s5> z0{y+;o}h)}ETXmS6BjKU@wsThDxI_jskFv$TER|QbDpF{ky_CD6yXmiU)`%|A>Qqz zb)t>R*E^iArB1%!9`f@Kmy9r4qV;tRUp2H;`4JIQ2C1~ha9Y7mT63PH6{gZ^^LaI2 z=y#{Su;RJ+YOV71Hs@=Jldr2!@a2@T7d8W`jD{LoSYMsA2C1~ha9Y7mT63PHg;&T) z#^YyPwBG4f&6nB^$#}w`@&%^^{aE1S>z60^YOd0Hwwjiqh8DbH7hfu^F`QPglh&Lk zY5AzM9-pq}t7|o1s&m2l3RC&QjtYF4oqSz+g0B`Tt+my(3^lYG)$qk>g^uC0fgHVD_!TC!LzYd?Z`K*(R*V1-;m`Hyd*yxc)D_aCAF8nk~8DD|-LKUcJyg@Qb&A zO0NZIi)O)kEmdw>%VZ?19?$k-*DrK!For5vJv#Pc?JnkcR>FI+dbCbu8!q;2IWqwZ zSQ^GpPk2HT@I2@-8X!$YoX9p`YR6h#@@dhW>L6Ce>)^_Zrc0T2dzsbaK~Fa5QlA!E zagV#=RcA5$5m)i+J=sT>dg^W@qqwiLI1|r<9zQ0thnKqb9Pmap-`;>@f6z$#3YK<+ z(u=V%#=0H_Tq#c11&=(P)*A)drlMr=@K0vbe@ad~x}M~sSi35{OsbW2*ZXp;8+hb- zNmfEQkF;myV45u=X);1qnI6M#{}g4O03ue8%^Zhbtq4+kVV_>@g`$5SU_r4YgvLW~ z{^sG2doh#!iuHLXmKsL|78q)@B?y>s1|xelpj5;j~AiGa{yvk?mSKw>6rp`04LxG-L2vhu<~)QtxUs z8}a)Kzr4RRn&0rFG`WtRBP2GWysx-%9$Q~N%wR$TSaEZtLw)BVtfEOQUXYDD(23+) zOv80ON%^)r^SP2Co|((0UP%^hbJ>fqRNx%;`!5D@S`lk}HCXHu!#Z4@*eY*VrFo=^_XVm<#bg!*D`qWo3~0}A7mTYR0Jqz&tyj{ zPOvWC1yYsyKSF=ASLULE1|W-TV84fL?4oX_*odZ(OJ=cB^TEZ~klrmu=tj+bPB zh8-Sc*9}zB2GQeC3O>ZsF;TdGk-KbOqV+xt=d%7l`hmW6*a#T3%@%>{K|v9a<`S} zSIkQ9{wc1V$Tt0TQ+)3kw(jpe;`DIQG{4P;>8b z)F!vZB&~OS$@2RxdSlq_uC{4ClkD+MTf)|@!NVA@6 zSloM6-fqc`K58kp7|oPNy~GQnSoq^!;%lR}PJY}=5G$r^-Q+kcip?jNE*6D!QQR}h ze$ritZ(cZwEXXGUU+9fav-J~{*^}(S9>V-KRTD|~^mP$>x9S>lQpDZxTu@AeS-i`^QOHmE0Da}$k&?0+{9wu-kl+b=d245h~#3d04_Kh55zF`(ak zS_x$VV~WIEV(;{_FpUmZoB zzBZ8#KH`mT=LXmscU88Jw4d`9B4T=iMr9X=ZrT*GVH^<6djfkh@&jx>ogtOv_C3eV z7-^4bDuj!jJWI2i3J2Xq-@f*6U*X$;QNKLKPV$I)8GyV3yt9P!&yObB8~F(_VxwQ| zef@+`appw(bACc(bk+n!rm$}D;C3u!&@ZvLi%d60a2dA9GfB|U)nr%xR-*k|KcTbV zY&fBG<Vh_x2Y)8~xcuJJUe!`)?c;b@O(#~Rvt*S$DiCxo ziBHq4XLyR(F21g|sp$6LsQF&gibjN1D z9xMd9C2ItGqY$Bw+2KI2sMKQ}7QD(CPvBft%krpf$P;CU)Uq*D7V|{eM{1cwWi6g4 zdr2(|L>aw^X8$fkSmNb)OpDh5PcV|c!ahnb1bLmS0p+f+7wU!CV#Z_peZA1#E3l^M zRoR}+rQ@1U%zYE=_^RU)!%cY~8qIE0KLcc+@5pseNW))8{{zxI#9gut|ORoI?wg1t~2E=RfdG>6en!kB4Z z0o|{3s7yN|J?YRSN0S;&a+ ze-PJ7`qMnvoOL1F(kEnF3IXa+b{CX!_wC;p1f98x%iRb%9Q(K0VZT4EiLe^lkx$U} zf08yn2wA0a?P6G7gf|b4Dl4m7`s*+3i5Den@}@{;6wG+la8!SYFG`sUz`Hv_fh#PG*UhJn^3mwdhx#-80gIt(kaO<311;vfi z#wXTox(=L^iA>wNCAG|B&dYExW2A#vH87;B=eFXOy#d3Fo*nrglqa zn#cv^RTna%T1CX8i-w@Ae7dQruG-br(9~FWFib5_ThBY&y3I9KhtEzfWSeewkq}y^ zt+1!t()?o&ZY%hj*KtZj`|%xBj%82N-gnQb1DnqO2{CqXJ=fu6jjMKz7)E)lx6)B< z3iG|v#VJP%^V5xuaG~3Dy9(>-%=C0)?Oe6%H0H1D;UYJo#tuTTB+FuVLA&fcpGl4@ zZ@Uy?t}%=9zpI7Jc%t2N+}P>}`MI<9=R!@VYLtgg9VWZz*N|UQL%v=Oc{4Y=IZx9c z>Z)ExF1=F8Rlw!Cg)j{n-ub7sqH|%}bQI~XVNp;T)@jSk&GZihWS+2b^jNrJu4$~$ z+1NB!V{|HVlw*J3!p0Q*L}PFAey*dq1+Lm$Ff)xi%$s8)mF6{#`J*weSmje!b2W;L zcM-4+^u)kjB*}yERpD=ytC+FT&7QToxvY+?oY_YdccS5J)4jY@(3oqh<__<@I^3Rc zR1sl67Nrz8}=aRa_ z+dfU)-Br7e2_-7)Ibq(`hCkJ|%vM)pHQjm;kvpfRhCb2QPA-2PMNzEpX%mu^$ePBw zp)s{rkGfiO^=h1;WV#4E(XARRR4UfmC$R)2?2i3Ggb*Qaud;`C5W1K{(FEP`odRvt z@nr?&eFP?*;+P8PuE7bK^^*?v!?gc8RQ^QudCuy!>H&SMr=sPg>4Z)3xv94H5bEIQ zwPusJn&#*%pcWZH>G*VW4gW`T%DbE{n!*e4H`JzHb~WYFoS>YzRV|p={mG{GafO7` zUj9m)^h_w>xlI&s@sQmf-%T9?3PYKjCmiYeOzh8Y1gqa?&?dHxx5A!~b2o^c5I&DfftHt+1kfm1Bg3^~wscM}1K>I&D7BUl}iqZ&M6fYE=`-CZmjVQRnf2AwI@d*dJ? z_V4-#>&5hOrBnI}xQ{n^oc-TqVVO8)qkUe!#KyL%nK z)%YF3?+ktfo0=tPdgIp$zfSnY;YYBdv4SQ8zXABo!fye71f$q#7=BIhYl&YE{CbZo zUD;1)E{O6)``ePxPwaEieq9n)iK{Nz7aN78ts7s0%Hki)4wfQ*rgN`7zu?0a!#(L6 z5ES~d_4ihKhS*#87ala}9ZU-yu1pS?r<7l?cN!pc5;sq^j~#$sz1-1WJV0o#|GXpb zRga}~rcm9N1HV|W3ZI`5eB z%>Tgpv&`N(OK2l5KU6v*OBf=G(Z%+U#tBxjprAA+Tksadbp`et&j|tc@#6(aj9zB{ zXuNPs++0xl#ss0CARaC#{rwpsMG(siN;^&xz81wQUVpZk;Q(whep&b}!mk)Vf~}e_ zXv*=NiRXO$mg7gTJ_`iRalBHxAHPmW$Kpq@sD)~MFFgJ6Yl|Pjg3N+u!XiO43_k&B zEq(;+hBXoNwHhBJ=p?)Ye%%U6Z%!6|62+ua`{rpvkp0?JX#cP6_7>9wT*%*UA2khQ zWbF1*6b1`ouWj~|(}hIw;TQDOmA0KBd@D3=xs@)Vl1I~YtNqbTVXZj-bNiYc5QzKS zelAD&Mx3|BzIK+-LF~W9zJHcr6L+TChvo_<{egOFx2~aWGKdXuo<>Tki?ql!u5Wxo z#8w!%eIX|JNjc^5aT*PtOZJyGnJeJGY{A=f_M!8IH1VvXbmM$sqaf-Zm!>Wd;zjZ5 z1G{CB5GH>8pmftBAx{j>w4r zZ*qk1Ue&rq-R)l%2*F~v82gU}g1Jdxt+Mkm_Ggy}t;C=hyLE}M++MO&7}LZbyCqi) zxa&Q8LyPcilV7n{aKXy{wlB5_3!Aj41&jIH?pY{wsRO(Bmt86p)`_NE`!9t;Px1S| z?D`@hQ1txE-m6GxEw28{KB-9P?2!#$eU&|7p%7rE*j3FxlYA-7IzXRd?dxx}4)q>d z`)e4M>zT$weK%3q5APaB<8uQ7&attGBqho}r4*i~n4Qr&8aK1(9xGm(*VBxB?-WI% zSMZNl;|qJvk8;1_{KEJnvq|1%Ku4j@JBIUkrWO5~H_IshlZ=-gPjdJ!!(lwDdlHeM z*knW;Bk5((a2|G}1(czVtRSRkkR14G!9h=B{KNlhH}@-|RzlRx&gO|<#1f7wnc%nR zMqVfSj=CRRYIU42%07IJUV_Um0OwyHXmkG!$QVB4qy1WrmJcArky-1D2cy+6I)&}` zCtDNoa%Z}CZDrafualNsj}*)xS^biCran!+k|I}`WQ@!ExIjzGUWy!TD^i_ z1ia^+ga2GCX-|cFQo$7=*_xJvi*u+sh-5MAg%oYSqeQk49|NV5FhK1KmI-al zskkD*C6HwK{ZGP~F3BS@@x4rK;Y_}-@0X5~ zn3H&&w}?V}{2DTKAo3;1gYiV8oT%rwNRrXJk#uty1tye)(^up4De{RlGu_lUp={=H zTWwKuP>wFE_#F45lJMCIaHTC;0wkk+(G)i>Q@ac!1(#b3@E4{-9Qd5C%>V+}5@G!p z52#B#5K}4-W@19+p(E!ZZwddZ%D7BtKbho=dAVyU7fbRu2!AneUd;=|k^>9d(>(lN zmGC43L%0@H20noy(vjCAj72n6i-G(#DbW{shqULaALpGU-z_kA)8x;fPBCJI#b;i- zrC~9b``N!Q7NX4iYOVmEkxpHc;!n--8(_mVeS8ia4&4RE!6Zjn60RuaUN%ugo^Ir1 z^TUA?$`M?%;?&y~mCbK$dF%$y;Z&tD{!*^5<6!NIC8$_bHt#2l&*$-~2D*b2@!5m0 zV?!|l2hi9aZH6t{&T)%nWg-3MGmfF$r-R?ZDBT&xS5$Gm!UVa zYq|wT712lVfj#^)O>tpa>z#(n1Zlz(>OW zuLf{=0&S=os3HwCp(_;)c(=K0^1G|5P+Y30a;m~p+muvc_D=J6=>n?AYw5(xYX*U9 zbW!93U4Xc@5HGlD3UCb<2OyiF4U^p6b!ru?o?2z$o8hki$KhaQ%_9B_(#|uhP)C{T z$f_k49f$7cTJ?5Py-K`4pq^Qo>C9>%&aBqbnH63x#Tcr8D0MQr1*?&(p%-frc$Nsz zcT#)NcS>X!8!Vg*Ct2QI04#iU+L4}FSeD<0Mj=?iUrUuWpPaT@1VSWYTh z3ch6LM8=UG343!t*smu5TKim^(9!$`nxg+|;1H%#OvpI(meA#rcQU@!`s|^^K#fo= z1*4T|#eG7OtuG{6`zM>^ZL|y+t+=8>Wa;B2;W(+!LH}d@`%ZqN1jWg;-N5m^BRvOq z!)Y2wmK$K*bE9>SW)GY-`W`g*3B+lSbF~mn)(M{UKN(C;Dl5lF{`&+MkHYKkDf3L$ zK6=TT8!lPJP{9twhaGtyJNVHiTm;+>BG)kz zk6LRxU)mFKjU~grZ6ywNa_9hbo=Lt2w+Np_QT-yEN+l&mJ5mbEU=>vod=;E!;y)Dl zKkU7GSe3>4KRnNHa0%#IC@3iEQc>|tN`}z^6m+4WXlj;Pc#O=9jc9mW7+ByEH>EXJ zR_tAtl@*nxl?Tido~dm`WkzMS@KD$`MN`-BbI;6jJ)rx$zwh;4*Za@=d#-ENXXc)J z?m5phhx?gDH`r6*s&pjm&pCU_N%!R7bySk$ zNlQ39Cjv?lm8m^tJFf<_E#oT3IJhk)k0H^CHq*e~|$1c5({jkQCZej`z=krAHo@xO|>rAFtrKmAHYMpxtDu$j)yVpYR~ za?z$Or3N>eVeaN#7Wn-l4m@eJY4(ecbnIvG$CJi#Ggr#bqWCGJo$<4H?J20z@D*a> zDy(h}W{S^O8L7qz5w+UrWc(!VTn(k3LD-LSxRzk_L6Or@S&~bXtTyW4zvOA7AOEmQ zy!W)>_OCH+HVVOrhq>e!wolCxpvzNCdjqs)EWTxD**ES?5s@meEx zxs`v!U56M_3!D*b-fW8dCK-2wfU~^;V@#!ZvYCo#%JmW5=@gz9jMmOCWtL?&zFjCI zv3usoNVZ&G7#EjZpP9uvjx&p|tEv5s*s1L7l>W)rB6YfFAXcJ{xIQ{pqwrK!3w3nP z>!X|4h@GMvPon=VvoG^lzd)U-9C5Fd@i`k5Vq230{MYVCG%Iyh_Guvow*qG@n};>j z5%x<@Sh${Z0vVAm&>0Qk(R=s3|PIf?^YD#asttosfUasPA>zI>{;IIPK^k^IL{0rG=?lpGg>AQ`xd>lc4DXP}9B$-Fx3pqx!#)9mqxbTW`+n}_a^j4*PX@|RH0nN{$Dx8q!8i85#5!W~iy~puw zFonD$t$2ow5{;L46Zt0b;AJsVOnA&_-{S1&6jxWg2a(JKPxwuri+k1?okwi}{gnTn z!M7Qdq-B3zRrep+>3yAn`J6vuq|GVl5yj92k?~sL5oJieYPAaAlpu{ zZyh#ocV~->>x@x6uzXY6tN8Z3MGCe+X*aWsGKt;HQE!Xh>y3N)8LKE>Zxr$VM?}ll zjg5TqVX^OZqX!?bP5k*fR2T_x*1Q=bOg6d_$h-x4{_QVK0_Gj^b=L z?PnC{)ua_?R;LwbSEd!`VbPmaC0^cO#2UAWqZ^E9?CbqTnEandmESD2wQ!%->$p2d*k=V^a1M*G< zX$9TU;aVMCCSHHbXzOf8n}-R1S)b@ZO%1D(Ecns%zc$l@u`?ysQ;|>@4Rv8=eWD|N zG1-nu^*iE7SRwyZ()t9-5k%aJNOh;0@#<(ySur4DHA8=#SR@O#2^9v`FxY3oRVDeL zr4?_9cG5z}JuZiuFcR%3LBc?xeN#$AI%yH2@1hvxjshwJ+f&Gzopu8d^R!TF*VHeurophRb)+qet1?Lq4K3;_8Bt5$<2-B75>X9?#+0H?Ckb!-N=j4j-s3 zg(FShTGzjV{@<}q%znqXLzKT`?BJi|iaF&*DvzBnww2>J=2c7!-!-C)BGKht98Q%s z5wqVlx;NdUn&`MdQTDD8#`D&R58pM~JMS8oTJk2X)My`-8m23t%XGA2(nelZgS=z< zZNspI-AuGl+KCvu#^K5yPy0Rbj^eqPnqxJ8yM<=^*h`CpC50{@?g>k3@xqAo;;A+1 z#Sc}d7tg6mFJ4%Qm0gmx`<%^5F2aRL>t*=fsFu`iZ@wX)9@nlA#IS^kE5^4Bhsz(T zuoy!~w`F)9{;18K4C(IYa>Uc`VFJBCd;m2HL zEYi4v1MOR9QK=uH!frubW3a_RX+{<0IuXS`iJlks@y*TDHe(!~RoR@nV#kBg1L!F1MJlFCy30*3hS^VUBP` zpC!_I*ujAjjFGbfnC|Ko4X#R9(~9R-6S-QF7hBSbr&l?O7w0Dzkb{|+bJu3S)o1$#;jHF4xU8&#G|s>Zp|4(vKxmrX=?YYctV zK-D-{N*iLwCod5QRY!YknGRRHE2VPa;CO3cr1T#io9c(?py7nNGYvK(o6`rg%JsXdJU;uSRL(U23%rv8B1@VmECbqKlS^r$QUy^8X># z74Jwu4)K3GB+HdvgS!LkW;m=%>SkD-NG$f8>?j~RT#J#&Y*Ydz)EFG=8H_gtx+CIs zINe`7Ou_HZlsU9^m)ZO_H*`j6+Q2!sIT%oJ%(Cp;xo6S;V%+g_P!zTlJK|e|_{yxI zi73wL>#d>LpwWhAN}1d;fu&qUD7Pu+Zgw|&h~hz+B1hHu!#U&zO%HpbDC^pF)bd<+ z;gxw9=|=0An@CwX>yDpE9+YtU=QUH}kztijnGKmqckm6fV!csDvN7->BZp zChWC7{t3wGJujsecx*X&{r6!6DoMhJ^r>$A7gRt#JUzMnF!%@1_?(*X)tpwi!^e{w zG|YeC>Sm>)db82dnJr0C!%O^;|Fyd*P+IZo-9-|&?52?PSxKhUc}?}~?Eh^_|Ie`p zBomk97~zVmR^yN(qkjc;AnGaU(5a?$KQ@4}Sv)pBt#}f|POqZAlUfoNPoB%}Mx#Sd zz-vP()F%946&3T^=Fhj|(^^NuC9IOM|I%|Wwmt(>6Dq7vRZvsKxQrrLoR2f4w4Pt3 z7F@FBEHr2R+HU`iis18WML;D0N#fhh7;5vbF9b*N7F`IdWg$=_%Fla;MZ`P%ft?i;S;#6yJxgI3BN{q*zZ9dIQ2z>vcJ4Ki=E< zWn;!2z8Ehm-1aQ*Ab=jLJ}mJ?Ui;x+6ce^;F(^_w;;#H%R@+b%+h~i`_NX0=IujL@ zsTy@0-DQEQjF*j?x^D4YWB~6^r^CpO^&}!iLoL@e@2d~$B&3$4$I71SeqN5dm2@hQ zWJw*El07%nf%~C~(=$`|rr--SOdwmPx>7KhL6L#LVM+C*WXsH=EWntBUVxIU3PNn$0!4&u)^K`0o{dV*Ht}t8Ffz}w@`HPfVV>U*!5qP`OF|AGF#^?x%8=~zJUx`*H z|68In;YOx$l?NoyoORk-Ahu6Izi)#2e+3*hi`kzTf9Jn15}r?t>4sZO-(rmCeHV%C zTQF_4Jt&%QHSXumh2oK|P`innC7#;~&8*Q`;^0>7Ki!e{;Wi`D;2UDZpzTI~-s@}e z=yv0+?wM%!$@t$&{KW*ed{I?nW87n)-jn94I5l1VyJ*QU+(|a@BavQVv@tG*Pf7%BRK@zU0GAmYs&(;1fR(J$6CM zrS&>7W|tAiPj3{I+!NjgD>8kLTDB+rmG{sOr&bw##LC@9;648E#GK7Sf9q|B9kVsN z%ZfQnZr}4e?72Z{`Z~{3eX*|o_GhG`MCnAhbEc4^FghRcx~!*g3rj#Z6dO%I>Gdui zwemquth(&J|E%5x86Di z&>!9X&0K+z6{Z2x6!#}2m;HJULlnYJmIU# z#f06)Kt~-ydrO&^(DzZwG+B;8DbMYJc7+5V{)y7PBDw}~xTj~5qfk0@@wV9{j_o$~ z4Ha@%7a9L8tzo`il!?D~igV!|U%vhYyUZ6Itx#|Jbfq}G$4GHLFY6=Kf-`%xojUgf z$V_%YCnfwzZ%sKQX|O+NFL-HjibfjN?|Nx=MZM5q^@}jxz|^W?lk0#r3$q z;vVQF7lGXWTi2O*BcZw@ykGn5?J-d?<-X@_r8&hQarQ;QumeAp%q|)f@jFTaDx=#$F z434By9Hn`1Eag4~vS(*fLi-$i!cQL-g-?YqYGbeaJ$PxFlW6@b+%Ne^Ye7odjF@p( zosaseW}N3GIZMLd*R-i#T1j}7CS~|YKWozMKGG$SbQIlu)Ton0MZAgbP#?9AmuhqW zm3l4Rj+5f;eMT!@ohtV1Gp6z7V??k0#!Wr%-yoM^t}F{xU_kaJgrGTcT1m4pu*)G9 zHyTE`@A$hY+HcJ5k@_4d$l#WBjQ)-i7ahX8Zw3watU%?(;N|Z4+$P+P`kBEOy&*0g zG@j?*zb?cf<6k`JbusI((S{FLFNzKuH{IRyH4K*}BeFc<-C;{B33!7BFC58`f~P0^ z*z44HtjnTM!0`O@ONuId<}n&$(eu_Nqgj>&+y^(vcX`PxlZEYwag((rM5>!U^YBL} zuZoN#M%;J?qIVF8f*q5PYhxo?9^`EMmXd%wq0bCP$2Q#w{lfYN1J?OheI5JrSH-a- z#+=Xq&`R}X!5HROt`nJ6#x2e>>p=DnZWiKcI3(l$_QLmlL(w8v)q`;3jd`Ga0?CvE zC0*Tszlk;r`-B`0m%P20mgs}9V8k+FUoQUJ#VNy;47d5?9{|>g=RbqCSeI8t(r0)# zA5iY_S5)}+6c*Eo1Marv9*=n8GpNkXS|=`jW(@3blVmCRJGBzDjOX4lw~SXF7o$JN zc~8PCV&UgdJEna@H%)9Y{PpS&$UdU-?;-i)saaHs#U@mAjU%fM=DT zUHKhQt{CO|Lb+n$qB2XR;bMLbz2sey;!+D?$_S-4Qa{m}-G2fbz%=J}b( z3;u!Z>OhRsr|$En)3smjC?hWv-jRY&^AY;SDHqRtY1sSqMwC#t;Iwn4%8JrNWk<81 za>Av%N&)xZr_}wD#g1iIE?q>Wts&pNDX-D&FNt*d&hAaw6aXUWNThFvIa0hBe;he> z@7|QJB0pZ6Aa#ho$DIFHnBSA)hZwm%paJv*$w9sXiF(Qg%HK65MDmuTV5=f!J8rIX zZG8zoX|A`N0x19V0{7U6lr5@Qp5v7KL?txSU#SkpdyzxPDk~sa95$)`Z)L=56 z|TT3duhd!u^%+qR-6RwkR)h_>`7`*-!VU0G^#^!QetxPUZAT1vT6?k`Gemx@2$xKQ2Z^ZXz4i$=9Co^MYpPQs>cDmIW)piqE2 zCqgc`A9kkT{p8t`n((K!fLe3yG1LU)js(};>OT|SKmo|jBkZ)}#j+$K+4b-Pz8yTz z;Z#@D1?z{yqiCPnR&Y^nR_{C2Jk2#c<`kBugJZD6jIE%-&yVCIj6aj{M6#;M*W`!a^peOv?3)Dge+YmXD(U1#QMi7@$7dSwS*yLI^ z6Kx6gOVvA~#QzKhR2rx*7j@TNXpPkD5yW78> zKC$;IE4@RbIYs2oTZq>~x@+UrtR}kstb(*_^Dewb=Rpf%goQL*zN9V4eo&3@&CBpw z^}p5enH%N5BQ-&ssm3uwVzOxdHG0IqR?u5`95$Y1o|C(s>e^rJ3IJ-GsygztBE6x8_L=4UOu7yjf3xh=$6 zNH?sZGlZSrfpIYrj?{uB5l}L{nXI(Sh)y}@{0`)yv+(~0ADX9PcL4v1POTl?Bm!zB z>Bz0?GdW~Atb?mDDpn#!TA|0&kSLGMr2ew|{?~9ex=jwck6=qu{p*nHi@j=PgwpWH z|5!fae*7ji$$1LA$N^RNC)BZ$UE-~8a9CFcJ2H%$UgzRuW#1`3>;iZPK%DKR;zlP# zEeCQcMskohrOP`QZYDdDNXb(JEOC@;ZoB(_sQ1u@c@xBIeg97+%=Lvg$v7=I;@%#s z(nv#t*1)BtifP{(vHxEr@aYTkO_0<$ag??}7yHV@6(*rfrJ5Ht1ZohZ5so<<9m-{* z2Th_sgoI{TjZZh8?symzZ4*hyjLyBz(pE2N3B7}2pnDNowm^G9u_RKld^nN@4Ry9& za8VQo{x4{2R2{M3B&D27#(~{Sg#dPgZwg#4uEU# z)U!hjUkBWq=83`I8M8Ttl=r{G$=z9T;ya_4u}g#=H}2*mmWi3ijgkCHq4?xDrg+tb zqUJchttl@QmhXW@g(8+PvrvpDj4u@VgtkJlp3q$&st79zL_Oh}0)Y$nyYlP;;UIJr zh-^Z8fmlOW|Cp#Cta?ni3D-X+Y(D_=9~1F};~x{5gt3o_B0|e!qMWd1si-F0y!4qL z0i{bt3}Mz%kwKWaROAsxEEQ#hb@`%_urgoN5tikPh@XIY`67`pBVS|@#^j4qLY6N! z6IMG#4Pm)cSWWEV5iFbck z?EA&|oKK&%>4jg77qEOAlK2QhR2FID64(8CAZCO74g|zx{O_lZ5?}(JJcIDDpYt{;k7x z96hCskq>AY9UvnKf8YwWx#+gnF`sQuG5J&hWwa}<7rIYuyacVq`^5K`jFfQ8^$aQP z#g3~+m`MN2XlDEjPA`^TmuW*T#}+4t>p-FSh=L72DdY{wV(TNC`C*!KP^NJCjzE4(h137^W<&Y}YU>)mUZK#^1G8c=u zF1d$9`4yurU;3Olbj4^F_Q&<2o#N^hqqWgZw7QB>=@oJNRU_h|udZ(}VYF5-H5f`m z7WSkm=N6=)QG@=uO+$9cmw&z3t)c?)@^Kx-#J`M=t&X@mUe9x%=-@Frg-vL6T`VzT ztjFjuym}2ZIXrvQ6E5dGq3U>?lyaw8BkZS0N|DkCGc}?Wb{Z_aji;a3?lBUKDdI1W z5oyFf9nPonjgevpF8?!re|japGF$%i`@DNd;xVKJ%R;+*i1M{Bjqr;1>jZri2bIrv zm@_J_Ijxn95u&fb+wr;%Vg#=C<9i|`xwmnHz|*e<{x>o%dosm$zNHMpceEB~19&*U zSBWs57H)&b7`;R&=iT@@6`#Ft>+4h1l6GBW+B>#Vsk$tdB2Z*uYbs!@yeOV0T6%N% zu$Hx6eg{DX=*wXkw&<0t=fYLG=C)GlYOZsfD&qW*t{O4Pj}PF_su=A@!>>=5gY>;* zy7q)AS^o$ZBQ3l&cdJl*PPkZX;SuK#;UvJtKtt# zv#BG})dSv!Hseiif=`6rAjo#HEs(c$-gP5!D{mmK%{6iPHxf6YQ4n^1r+@DKs=$74 zafA4K$NT`JuAD?Hg^Bt%gy=zwRxGSFNgrEwzc2{yd_2H;m{Q&}66hL`i5n#3uootvJ&s z)mCuCxKE2wO)y?BDeupNHRCh?k?}TR1o0mH*_onu5RZ>|WF|?h zr_mDMvf%^Dv1Qoca^Ef%2l3vaZQzS(SBag(;k0h_rcZ+S9;0Rd2WWL(L#KpTh+%v8 z;~8Rg2*1_V3Lco%$yseb*jJT)6miRtwQ~2s;=baCu4n!;@(o#fq*k-u02Nx!dViQ9 zdNy4%G~?G zUP_;e;LqqGk;=YKgxPp!k!a&7{DH5;6E@zXh2K=fR!51TO%rUtPZ2w9{MKf_PVr^H zPXvYXDCaZJQ!4PcWQROgvGFp)lHyw8FAuiqPfl;aVRkfn*V?Bw*K6Q%T}Vyv%!z{6 zIe1-sNqen;m)wQCkaGg`>0ZIPQt%Rcl4~9|lFsMGVJ8j#sZwMI#5{%qV!1^U@v049 z@0FO~xhi&r^7sLD&*?ZWT)pZs|Iy}a$f)V4oT>cDqB=HN^lQPLHowVKoAuOhaN1Rp zDR#8rw^&bS`bxqwnXO8=FEZ(btPmvVU-;J897_SjnqoPo!pc}S!;YSSN!bzi3Ng7Q zpWs*OVPbnrya<04?v{92Y!|jL{+zkBy&1+k@gY0Kkuct``5X6P3o4}$+RAG@;WrCw zIKMUJDbTzP?QPgeK^EnQe;Na|(|Mf9N2@dWho3~jl%(6;7Haq_pxo6a_@# zlT7!S@95CH1|NGja}AA$rg3pnGWOB>$PLT`?h&}PrN1Y&_>RPQy3(u_5{4JkaOfEB z^{m2~SugC!qDbzb>mX{71Uh-4T%kW+pswv12>a4qRD?yZZFoGA1C7$$6`5kz?VT6c}?EEgi{w^Yp6q7pg zLjKce@l!|Q94$FTekVSSSB(|%?ak?3OTLQjqen&#HffXX) zF77g-#gljOs20{w(QNj~eP(O&&Y}R%4zc|%K9KL)Auip;qr1Mk15M#c^fnxpVWQUT zZE`P_?%X;3Do*9$I;uC-e%#cbRmhS- zb&rn}eeUI*ox_m;_Zt)e#!Ki>2lS&-!ue`E6s)0}8Sp6>Fc`HZa&f?lPZVb&?4|J8 z8!!VD61+Dc6E@1r-hi3fw%J00AF7~aPoa=|{S6M1k3PxOZ02To6sP&yy;RKOo27e+ z2pQQ9Nu(nmq@6B4KPb<=M0oGA~4l;t9Nc*SDf5LO;?Lmd)g#qhPWPNep;JdL-e%J)k&B9Gbw}@Y|zAErpr; zpTf-Yg&C#8eAiZ_PUJo8@3y6Q3`fBZOKor<^)h^(g8xmSzcwfdSk+dnn#hOn2``Ip zC-U|YJ6{I3oXVEq7#L}qZ=p0ri_l5Dd-I?V87aeH7~dOk=>y@I#AotnKM)%y@mu>Z zLDeJ`RN@prU=FOOnk2gdX2P=U%-qPOzcOo`b*YDiX0n`MaN$Dhfb^}R<9)nuL=>2; zAO8|IXZGHJ6WB1J2=)dX-zc)~582NUFL5Q_4n}+QM1FSdPvC(xp}(UpIiq}O`hDhR!Mlm!CNXK}W|qk4Qhl_(uRj9O)8!2;ehwZi$L;s1lrKO-<2fDUe)CC;Jp~qj z_b;-2T5xKyD`w}x{uKiQ@*vH+x&qe}1dRQd_)7v(BwlL;QTYG40}oPeZ{Gzt)dHWs z$+LV<__T1*W(vCJOUipqBkvem(--Wt;2(!_YAFZ$U+!qU_H_9aY4vp3D1W!f-wOGw z=)0ty)`OldmGZX@ipSF>TK*oFzm@X$y%tXLcttvBCG6>vCx2(k-#Ga@LjFd`-yHcn zQT}!{(Vvrce^jie%RFh%l)pjpH%$Jj1eMDK?U$6bp=e#6E_c{AB~Im2jW%Cn<>cw| zt^D0Be>chB2SdfyX}qU%^*`u6usj1zt#aXAod!IrFPPihC#OKASf#eQ%zxpH43x5Q8ddNL=%Bg zv$jd2AQu)kjOK0xq159+h8*atwaN3rQnH--R&D$|r*veJqZf%USx$P@wRUB95Is!G zfL}hj{gvNlvgc8@5Qfr{Rwhf*M)pKyuOpr~*mVM8$sO%;$1vo-cH>-?NC=`Lu6+e= z*h%xUc8&Ja0XzpMTw8DSF19p{zjTAtmT2-9AdZ7Umg3F(V7b+Pn8tbGQBW7B1`9X?W>)jjOAoRr{&fd;Ar(4I=|d@YA$QE??ANbN>ckanx$ z^0{Sd3w`b);y1Zv?&5CuG04Up?Q=UQK$y=h>n9FwL|#UIF|UasGcnPvIwc;PiMQ^X z7sSq)yoWJW{5BI?Ll20Mhj`o8&DK*v;FF+RFb8wL1_!`&4mC-+425bh`o`8S6CKf?}b?Z9_1HGnEn9}cfhpmda)U*9m8LkV$;X^~m-hfBI zLl1?1PZ<^vTcDbv!t9Z#y)+NJ51wM9qy#Y zFExIw@l}maY0T9)UgIw+J}2v;39U3<)e)Z1sPc|+KqqX9#zc*4H5O`oPh*wFa~dr% zDg$15Y0{0s`&R7{t#O>jDH<1Od`ja6jh||qqDy4F#$=5nG?4bgF+Ed{_HG0FXXyo3n`Mv(@wY!bx3(+_RDC7TFGu)>=)@t`tUO}2( zsNKgkW@${*I7IXH(C&EcwrRIbXQ-2=BY`UayR^r8jb$2FYs}X;L*ruW^vZJ{lu5Uhbmgozxhu@f(dB zHU3lMQ%-GJqVYbBBQy@s7~5G1z8tO4t?{VF4>gu*%+)wiqeEkq#vqN((^~MC8Y?uu zt#OUU$2I0^oTxEbqqhXvT6~2DN7+&?(Mz|_)ovS;belr)Xb$ffGCo&%+Q1}!_?Y){ z)_kuhr*jlm_4AX#vimC>U!&+_<}0k$ICl0!vmbeMwtafu)alu|4?Z$m$6xgy_@_*s zW}iH3)+19VE9Q(Jq`Y9}6D;nX!#{M6FH;d|dCV@9J62<&#!QXb8jCcp(YRjYW{p)E z-5S{%B{xE2yh0}%uL)Ti^E8%fT(7Z0W3|S5jkaf%;24dkcPe+iMz%}2EgB;<#z^#M zta7KoM$zlxLvp51PoMtqBRTUkCOW;!>(ymWkn-KM=VgAYAsp*?)N5huc%YxXOZL-W`?VJn-{ip}@lAeI4l|iQ zsvKrcRZY}3O`%<5`2)%w+gD+ob|-3g>4S=1@vy?GY=!Jmg|?*%>ok^*QSQQW* zGDS$#Sk*_l^G~RRWv41P8>X;IqfIBkt}!u9`Dbe^)tH#B=%pI-M<}=LE~m0o+@%?G z%F4VrPSM%j3Ntm<=@P5a39ZnW=2Qv(-{b$|05^`nTVjbnDgQcMBL6KRPA@}^4w$iA z=h&stwnFDz3(VGND^c`(Z_#Uag~r&G%0Kb%8cP*kt9upB$uhJ@nHE%`(VL@6Z&B}5 z0_t`tw8tteuT&VT-H93_ycycB{4+E?Q)88+_hKwt6Y@273Ro41RU;cyOTX$tFK1b` z;?{R?T7278&S)S%2=oKii_bpbEyIb$>vfw)d1Z;EjlCx7P(?D7lcI>fs02EIQL3uoZp??>M;38`%kB>=c5GoXz(|W0 zdb|QjK-77kKnj=$H*E{hg_bKoUJ3eDNd= z&y5ADJljZNt3FWZ*-@YQGom4m6 zc4ENe3xgjP-YFzPfS2bd6oIq-n6~ABKOQ^&gpnD&ZmkoEOCV3;iCeNlhn*dAE zdL3fy`#jot#wVIYdOKhY=-$TyOTLNEEc)DOAD^^=FIERM^EIx-Dtzl~2VXkzdLt$Q zG0mvasE2H2nvp18fj8pIB0kDeXP(ho%-qPkI_p}gI4Gz&ilQ)I6jZ!x;BK5|iejy$ zAzuXe?4Z+QPD*3G>XwpE4LD}TQh0bDnK#8KNnSpp$C>yj;Tdo@P9X6&F4X!K<^&>s z@A$qR9^Qv?ytx6R;v<36Onej~`#-MFYp zz`NI9y4F#Zs*20VV7JD z#u;aW0fZ7#ZesKbq>^!%1{&hag8RSqcS>Mllj|cUxhjD)kHSNY>&4mkdCS{#3Ad&qs_rJTWcZ-Qcr$Q!6 z^3R+le?cz1xwKyh>^KN!W$-I&1UwC+EM%a`KX1h0XAJ;GoM2J)WS$^&ly!I!ug z*c(eEsT7LxO^-Q|^;&8L{PPeN8#a{6e28sj5?c5tNf-Lrz_vYLut07R>z;LnWmobaQDu@tW#)a%aZ@>( zAIkcT;<20%x9;HWjrQW69sE&e#@{&WlRG^xH~L|@5m~xa&A7d^=-60R4q6#rO^TMs zGKx6+5VDnf{xlhFDdy~WTl9Hw+T`5H(Kizf0|T=G`^3gho$&x>FL2Xrz0cs$W1`0o zPJ-3RG8++zngyEO$2b;NR@9L1UZRyYpruk!wx$t{h-HmvgF#y_X_y75)TBy*AHc=A+YEbVZ(avBjS zLbRL*o&)h1(NsP5gOyda_hS)0T7ROsK{M4i#4<)F07LLz*I@?`jnNJbLM4DfVB$}PDqnBC6Ut1ug}F-7tC+~T+0d}x=Gid+-T*aKa# z)R%5#`hI{LWC#THckh?%lAYIVs-Q!}}U1#W#C+2d}TKJ7eQuHk=eedwI{^ zr$|1^>>N}L$6&t*7vN^}Wb9d(5b1}P@in)Yf*_%qT!(G8PB>qmc1pasm-qFOGx{*L z1g7kiIKCJ0YVNAOjC})Bb4oO?6He0LmCe6=&48sWPgT zic6Ke>wSsXqH}+#DyRk(MNgfsF!q?*uxkmV;jy!J_XpC{YzPqL#l~s(3?Pj}^MSPA zR{WS)vX77GN^w*m4tg$(RPjgl4rEcrB>yIL0abpPJOjD^V;}Dvime1$_oEp58m8uq z=(?YGpdE|I=q9Yiw7#rGTyNIG=)r=sF~vfz5)H;eJB6@LSplqlTo`NdUh}$;nxHC6 zMPPP7rhf*-zDDP82<)ybm4XvMdi)+#?sGtTtTUC{`jB$>0FI*~Gpzp1I;|PA<^*SV z3_uaY1+h$cE`_PY+EdcT1WWhhpdBR|WRSnv?F%E?9N?YqqpHb90{da0r+CUxeDrJp zB5!OnkSM!=R1HQDliOC0f+x8SqJ*K~-;YHEdA?A1i9C)63Ts(J>ckM_#Rm6j&(6jGx z#k;LoxxW@a9ONyX)C}^idL)UfH)>WeHmn50DfZgr=rZOHgWQk9TqYVqH+DA&L+M2r z)>szhyAdt&mS$qkA>Qs*O6z*0b-79!D=k%NZT^%ZbO%yCBYOq1wnh^kSm&RuTmJq- zyp6L0{&%iIbz^6e5@>cuwhLgv#vg_Xq1wGU!xd{#yw4)gA3ChbZ3O$X;Au@+aK9!s zs@Vpz^5<|(%JbL>A^sqL7L;ol$^ynTk;DpkIHVxtHrLXKh83+Tu*yuZIWun@=4~cc zyujE^FXCrQ3u)>=$D#Uyg;(Mq~Dy&#EiZ5SP1t``l zny5L92CrM zitEff8WH?P35=x#qD4{y6LsTH1K#tlqD|f?Q^3lg!%Q7u?Iz`ZXtVMU?Hs~H?hzi* zp2j8%q|tNfBjvl{W5vDqWAVlj-o6d-)Ewn(Hk6ks#%(})4r~=a9^sv&jLq<;XL5z& z3Ev@|EOv*Fr&ROo+^KjL>{2{W?~?HhqIj~v(|)h=pADoZv`Y}{f*X+Rajlv44@+HO zjjAl-PXzy!eTqMQzxcF@ckM(~7yQu3ZYB!WKEe0BfwRp8tz#kU_w z&!s9c`ZL~X9K}-!KFWKhF6~)BD%m_B<-AbSNBv8c_{(3YVKMP5HROEtjdG90B#eg8 z3CG0#&(P0 zulaQ}Ui8zg%GsdZmm!lsMyWz5MTRKDYc;Od8Qln^3{+@0z0kbRJ;zm8_Jg7f1X3YQ z0utBrKnlx$R^xHX&v*k$VIa`^y!sas4+|9SEp3`x1Uz=j|Sp#vaPUC6rU>syZwwRfYCsDwgTx*^|^N6{+lY8 z(Lj2(0%^kYxpv?FJN<1eW21reYz10?pKJH+b?V@8G!WA>Fu)Q-43fau@$;%AY!`?D zgH|(x`A=bt+I-Bv#4|^EC+9>Qmo|kTZMVb#^O4JVU?Pn7`9Rai{awAHt_F>&KQf53 zpnidJ9u$MpH)y<{)`Vz!8WYGeE;4o!Wu{8Yd<~rnHI?G zmv9yfQ>_E}++yYz=v?eC#{LNta!Cno;HJ(78QGWNf5j)Gfm_b8@Unt`;#FVb%x)tP zQ_z0=T1N80_c~0`CH{cSiHV?kcw8W>0Yg*let}7i{U*1O7s&E*Ls)iPFbmA#ncg>M zKFacS7;hM#Tg?2Dw`o&l7;Gjtu9cwAEnfVR59yPE3wGDSWc{T}xPcpu@&sz(8cnqL zir?C&4F8?-CyX~tL$?a!qlpDy@%H^P@E2!mV4^PTcpA9TxKE(LqTqdeVn4zSslh+7 z^@0BTEFI45HjplVMAs2N{<+v72sPN}Ft2D{pIh{)=B@g~wlLV8Fjd;Wp&Nn_zmFzz zs-fIn0siK=b}{UVO1IA~-md1uoSAJ6=7h;7f7vjaOZOqrRuGLAk-Kb++!46(_7yMR z(WcUUGiaN&Z1Oj|8-zjYr+BW(g&T1<(UFTKU-MRh)IHh1#B*QseuJ{1@Q?^5?^9#y ziobR=kQK6z)an@rjdL*WKj2~Cn8$O&j6DA z5jT)u{<%+n^$p}_cp9cZ(lKho7kYD||`0X2L-2Ez=MB9mJ@e~d@Ats7vlpj!-fn`SV%#St(#%iwt!CYu-nXk=_6-IqaI zqyv$^*=-IZN9bvdM(8ntEEE5?v>m2W%P_k`I|Z>$)2yuBnC4Er;bo!K`PcAl`R+lX zQ-znI!U<`bL5OmZ&np8}gZLgOpIZ(2uIUA&XQXyVwhd$R& zCz`W-@YN-R8_-}-Wv2+zEyQ=cOOWhpNXn4(%}9H|Nnl!Wyl#`!}i0uqDIO$lodQ1X}rvB)rg7KS~jz|pEss*kCTuy(2{ z$owBO*e(i4MR3kvy1Ri^<)xhnVB{VNQ>P?Rr4 z!k~mqa>dXec)PYxpk$GS_b6QEGZ-iOJT49F)MeFK_Y`=TNn(<#;1 zHyr-B!KTA()Df86;&-Irj;bdNmQsW)Xa=)8GBS`wqTtPyRE|nozFbw%;pJl3kGRAE zTU)b#{Q~OzvU#j(KuuZsYJ=^B*-ZSc{aI^NaKMQm zNsDQ6;TB(w5V!&#Bh+6_B2w z=f#4bFsY;PnL2!h4)0xrtbSgF`3OkQQ6SkO6+n$C0~AiC8b85hO3v@ED7n^kDkBN&oQg0_6N-T{ zLR=(aA7f>0<4GI}m+q*BX$P04#6NVX4w#I;A$udLVd zy9cxGaqU<;@%0JpcahjNTCDBy8xPU0H&oEWKze=ylC9~Rik<+Z=Wjr=txjCO^o}CY!H`AQt}2BjrLpTc^s7RMFE*qxS{kZ7MU~ zR;h@k+hXzD|E@~K6F>_2Par+p-xVi+=IwjYi=zx~wVT(P)xkgNJ+yl$o^%A=>=rer z_120*rzTOSV%i5vYWq*b%wI5Kze=xk_~^xBdc7#M{>|x zy9TkY80^{~4P>o$HDS##X9$=U%woYe0ER);M)v42LDKyeXck3Flp`k*cEa=`A|hz) zCLz}X!QUiUbo-UJdytmZ_1|E_1)kIxGj(o%k~j1)wqS=!vE0asv$3$zwz}puAyq*Y z>ZMGjP=)XmHd}{X1ib50mE)~DRrpA8(;42n2eGf$?B41mOy8y0bAj}{vP+!&mAAhi z3eBv;Zi8h&iHY)Ob{i4?EW+r_LeZ@|whduzk9J_K&$VMMPv92xU3M0n+mZQC>%__t zz8a=b%P_gc18&r0C00r>mE<2lZ=@#D-F~0Jj_EMu?{lM(Vf=@3^|f$~-?#b5;b&Npy@@KH@QVZE$`ko|6c}s7iO(yFu8@ZmUn5J{UzprFg2RN?LVf(6$#t!Yx>JrBaf8O>(AZZ>axr>eO2xXSNeKxz)poa9k* zqdpdl9DdurR~{EA_jsEAAZ|U$JGG7d!C*sRYIGt@Zt>_z-fjT38jEf|uhoFE$gL7^qE_*GYDLf~>@AV}&EWa;r1H0(QvPFs zl#{o$`%CTadRqC92a><>Rxo?(Tm)NpqBVPAOe^fRw_tLg6R(@lzCo;;(ULWI6UEx( zj!G%wL~y64PG{oZXO+M%=TrpyfHHz`3k#16XW{RK)>*5XWrt)2XUNeE<7*9gZvGQ% zlTcNK4c*jK8}fwGn1hq3j$=Yt6ke#!M^6f>v(yC2xqFsMk%!Uk?s$HzR|&CQR0#>~ z7{oea7_g0QxVKFun5e_r5cZIvhJ^N%@mp23`t0#+iRVO5M1fb`6>axwJ`?DyMKk4W@8;T6Ts>wxxxV*S7_y{XU!? zwGjKy@b)p3+6)A!&O)<=5{s!$ zHpV`RQaro6sxWmxO6kyU%Kb<;(dKu4tL%JbdJVp&yW*(@((`N&k@Gt`D9MdDjzba9 z=JZrN@ApzXhkJ?le&=0fy(WSuxwrEFrjPRfvkw;sPV$a2qC6eZr+pPui&(|fAy)LO zqs2cWssqnxROW8@QG&1ZQ#=vOCI-1drVYI7~qg^bWE~AQ&IN9OOwPh9H?G}fb zEOZ9d&QX+fPX=uf(ZEaoW;epz;tNFk#1O=GMKus+-npp8aXUR-28f`uG(-*HYzj=h zi~xREVK$_(25z&ga>!bCi^^cfp8l*S7BsX=P!-SFA24Q_4QY7iC?2O`G6jxJKuN-w z12?2$wqfQmg=M#+8NrytHl!ivW}cn{eW{MU0};cR12?20Vjqui{=~y&zc127`sSUg zXukkbzqTf-q6zJV3)6CgS+{A}+<*>8JFK6rql0Ux!(<|C(&t15z8h(NZoR z{A&g&|ILGRzpoi2F8#r~_9=I8R+tLa34DP-jisR*k?g{*!!;f;;@`Z}kc{D+MW>-x zLqVCsG<2gq9mUbWuCPXQl4Yie5C4rrK{krB1eh`{%jXuq{hPPykbC90|D{Fw=CH#%^;MS_QR&m;T0Cj~twBVF)EOFw^0f<%LEBu!t#GK4ynzV%gd9T7cnmIGYbs z`h;kIp0{b`6|^3-BQRSa=*S!~;yiCP!s{2Ai?I*p3jFA4N~bE=J+P`KnoWG+ zGv{&Ey-4AUu$NzRi}%j+1;L5)m5i8ne&Y8n)Hk4CmIY&eFvd$|TB1=KCyan`McEHnd|e!Zxy_iv$`uZNLQiz zK;O;#^y>zDDGIfNkJc*2kF5u-#zZ>^T0Ll49esTJdqQ>XDZGm9KH5ppvYzCu+Qb*q z%a7GPg}SXYG4=tm28`JzS_WuEplvqMvO%i?ZM}(B3R=aJqz3zcFVy(I7izHod!dH6 zH*ZD{r|17S7itXpe}AFIra@Quwh(;XVSgsbZ@#fwEC}-J0C+CQuN&agAiutVKa{IY zuyhSmuprp)777sT*GJ%^tx#p}Ejonw1;O4UgqSzo8{+ql;dJu`D!io1>(>$oji3|S zVCrEfOoFjM9XA8mHVp3%*zLfaaGVUlo)7#11_vLk4j5|35KcV6S7At%m+*&9P#1=s z(8)R@GB^l_!%*N1;5*u04!j6cM?BF`Xn?s4dkxU5_)FNVE4-oUOV}AE9(KYWFb>!W zV>O*{5X{7rIHAdaWf(?=!E-Ws$lnN7GvfG+NfCQK!Q#<8it+5yxR%OJuo&nGJ!cT6OrnC z;8WUO2JCW^sv1%bUj`Ec{!+sJsC(FBf!*U!0X z`dm%yiCVqhnHPtz-a-#&l_1$qte?uXEB zVD~BU@&*dNr0DzAEEGF<2><>t+63%`A+uqA$2`vl%j<#9Jp>FmLg;a{=O8w z1NJ)LTaQCQu?}?L7c0O6yID`wtiwvWtq-k437zE<=fEBfJ7Eb-9PET2JMohY$H%}E zFcV>~18#->X*TQ?K(8Ju;qz6&`g zN}Xh>T8oImMEKSVs9V?xKY}TO-K;-KdZeZ6(Aq&K+yGMrdpXc}9i;`k4fy4ojM@K< z5(0WvI|(D-MVAJh@aA_>|7CC_!r|Hsp|F<%pZWwvcMgpUxNsX5XRzl3Yjz+(uoI^2 zMk9qi1K6_?BjcZF?Z6fX(4}Fw10OkzDuX>6xaTv-hrJ3o`54+9(T}13dsPn2s)eL- zSn(ZldLFq4?uK~fiIR<+oaLcc#8rUmVHt1% z2?ZuX&8*G|2U&L1E2b*o?=UeJ(GGxV&;yf0GSI6NMi>lLuZ`d#>;O{55l)$>R>1Q0ES&bNdUciX@r}@pfCYC z;Wn5=;sIWOkrS41gWUo{_C(;L+MW-bfO~H)()lRY_ICF6xhtJ8wK#rP6o4sDG~Tc zH0mGrY~V{>LHD5UfnEhK!t@xl3(yJgf~kPLrn|v5^?()#>}CZoQv9m!iG+ggBuk54 z20I6b9heA16A;4K-YRi~V_}G%3H-aZmjd6{_6lG?AJt%Oz*#W<%#URQUxW#R9T!-! z;J&KIB?8CxMg5adTp-2T#wri93K^-E{Q;AVz=Wgw8Msi!uYvj)sgZpLvj}v;KVeE? zH>-+~%GhFu9xY=N*K{siTNzJlvp+IIc{21XW zm|n1(l*puV*)z}&O9q|r1(;0O%{pS&^v9g=AVsn}pcu9qc0vctde{laz-)%ytSm-K zWVb=rs~U8|44Bif6Fv$ZuFJ5SRJWkh1ufLu43@|786xof?a>SxC@GyhzfvJKWH>9zFX(}vkKV!u(G^*kTG&X;P zDqY-y#)9rb_XfWWxDBSB`#G6OXH4pn%^QP04b?Bg5}0_{aYGtgJq{&9c3?fsG}zhQ zXb&)C&jfD02MrH)`@QHf_am3Eo7K%o746GRgKYtwaMEPd|4}$HCqtJ4hIGydx5L!I zPB?uEhEJ$&5k3SH4ZB%2%&baADrNOB4)730PereRop3D7EZ7Mrz~sZ8>4e2^8d@wI zxB-uqKY;p%-J~BTb;r`P&|g6(+zN9JcEZjNDm&p)ZO09HY}3O?G&H?T>S9v8Z0#c` zQP2rnXQTcz;2_Hkm`vCScfk-HSL-p)JTx5GalIbH)q1QHcEaCb*28X66KkM$W>zmF zwX>gL{Gqo+=z)oV9q43>3ZOUw2Vn_JJnSabGO2R*M5IzX4xLl4}wm(;|Y{#Ag*XJYmt$5+5f5PTw|lC!Z3W;DWodcm5ZP@ zW`B^ffh8`AP@rHz8mMTq5)4hiST9i&FXbXg3~rGc1gcc1v|!Oa8x#mdFu_={mBm6N zh{+lnt&xUSAU29&2^A$0@p)$!8GpPy@0>j|JC}3L%=G*AaAGZEHQ8KZ$vXBF&qQ_R z4;03m^_En0 z3ray#D%$#uGzz?U3GKw2P28A}?qHQLS?ArT{Ap!vlwC_X6VIa#))jBq%({3nii{>U zf%)}ZPJBFs`5bAaOTkg3m=*Kj5QTS>GB%t_#V~`a@WIADX4*D}Pkan+d7q6kO-llv z`-r2%XJE@N4gjBmuQzeVm_x>wq~vBVHSOX0*YE+x975aiY1sV}cam;JctwX>v(#@u$=%)us_uP%bv1rTG6+X5uU;m*#_`bS=EN z8#SvEyp~7f&zDi&%mvg-9dY0>zLAlxalwRPOw9ii}B_BQ^SHU z)x}MLdGvcuBjw^VD2?}JySP~}KXh=-DHo^zNP{BZ#Gmk@L>Q-&hDW)WgJSp~y!{kA zk;nKS!i2l&uYBN3RB;nnQa^K)l#4y5Im#fX7Y^v*DDgo!;#bB4d=zdw#|Vl~z|eWG z*NBk|tV=pa6rSp(QRBU|jyTd4xh#9>-MN!b<@YjERpAB=Uu?gtG_n zWUF#GZ7=~^{PpAm$4yGuGlW1Ei3#HGs2rbxTl0y@;1h7m2t6U=;h8WmBh00fftN<| z{1;-uj0%`55q3m=bimwk7Y~^D5G;xEAc>E`1NQ_>h7Lmihh@%eahE{ftS;CrYTpMYNx zWY>d?KlrY|{I?T0L(~zwQRPs^f0+!b<^yrmByVMZuw8-qf}((_qmDQcHQ~ill)}g0 z-$-M-KiICoe4i=2V^T*Pf_m`c@HqcmVZj$*cZ2Z-_wl|)I9r^EhU3LjREUqs7ZYuj zhr5yc7gX3amA7Ha#mUoXbb9-N8PrUlytFlH7~r5L;z^Y-p~ zfZTBd>?1H$BN4GxbIOYZ%jn7k8l+yg&NxkFqQJUjG!AhdSa< zs5j1njH2y1M9uwCcSYZwr#an}iw95QYpNv$_+~sC7(xY1gY({!f5j(QJH;&o!DjQ*=$KkCj zX+-!CT#U3s)hf$+alvcsBz0=w4x~DVq2XZMzk0x;tXHnp7^FI5;RbY)b(Oo7w~Fh3 zMqh?mgbX|fR-bCbd3 Yuw*D1P8RNs?JeKS#-sIRqn(fc1@h}isQ>@~ delta 70925 zcmbrn33yD``v*SvBq5e$f(!{+CK1FQLL?$&NVvm<*cGuAwF?qUi3v%_^cupUS6fP3 zRlZ$pC90O#cS2iLOQ|ky8oO4tTJ!(B_s(Q(fB*mU{5{XO=e*~<>v_*P?^*6mX0BJ} zjhfqigwO#g-otgye~)ybx*E(|ubaelx)T~u8OPfSO@vfFROsB&x4N6|5y6w)bhiaO zBiwYy>T4C%|3|s$rU0ILj_FcYhBT;q8wnj%bp6X@I?;V)NPX_ZJfp5mK&cOkh@ZQw ziuN?>bfa3$96LH~v`+Wt+kgR6w+_!~yf+JOV|3FnAW+$V@j6{Wt5gk36$}FE0#L)7 zzY8pgtpgcdvr1U;t!9p!GW}JZD!vYCsP$UxdcX`qSO3?TuIPDRraOm!qTOpA<(1O> zca*&dr!_7tY_ z7oO&NQ^7~6(-Vf0mW5?PS~2pR@`0HTt2UZ#H1mVif`uvk`)cilYTUQ_OE&3$+_%O$ z_DK)Ev&Jv%r!D;Lnrm6J?!28>2y3#15BADpixc?`FFSjhuxqk+3xWOo;jXvzz5=_r ziEs1CWsSP>m|7t$bQ2$5YXCd<0pD3Gn>9A^=-T7ht@rtc+S$UZJgm+D_EQ)BdY$R4 zP8VKWrx$ZD9_?!s#_?BtgM7x3v}ctOXhi+s_Eq2GOMP3{d2=Jv&FcD8|4RGqNQrjC z?y>x|Z%mzj1dk*5C=EViEH@Y;>-Z8pp5S#gc;r|<%n;>saRcD3#+&sE3qj=fSNVH} zaQ5*Ae%8w5}p zQ3w8BgHV>zfgf-1lx>RQ`vSTNuke=v$5~2yempRg#kAj56sQ;2*mnG{pw7Zz9us_; z1x0dqV<`KpEe|)g@E?`{DWx8sqP@3})ck~KuX)xiT~QJe_*7#DwznX2I@}G#>YB{KJqicDpM-8`7>$3d)N89EOwuWC9R)t1GV^dYN^K;J<{1 zGQS93ylPRkW!IQF_Az&um*MZ08LdHxZ8TUX5Q3otI~lj`p#;zhgdp+r6jVcYP>zMYlqW+fkFG zGuwSH@iUDFSIrfr;bERfd8;P=!o6K$lU{27>Hxl|=?9^O{h3am15eVa7^l^260-VyQ%zLc-=BAH)?RM|JY58g zP_z5_SEk&)$Hieu0Ma!tRMTSAMe^ciGA3QuWa5&KNd|=f#bz-U-V?7&C0>{ET9GkY(Jf6=mkZ69RyIZI(x zi0#?Y!?Q0BYSq5#cgakr^ors`TlKBI9%+5fK#azy;@;6?Wimg|s(tk_0E^NEWovu> zcdK62+dH!Z+Vj4xd+DF|(In|kVmzYyCstUgY3QD8gBnc-mguc>K{9^=$Ie3v9R?P{%4zz(4m0pb8bUYr^))N zfQtaAChOUocWT?iyBeVBiTXX=p|3}8zOrqg(3Nj%+q(8aw75Q}wyG%=?n&a0+J>O8 zc4P{RN#ZjiLj)7&k-gbP3;!`P6qzdR+O$|?0li+J2jk46bQiNdpi@tK&DNrAcvy6z zy@!Wr9}pJpFra_F+O8#QXW^^bg|UtnzPDY75YMl*>*M`ZFCg~Ngu2*^cW7@!!I1VX z{g+ZfAE~=1JdTnHs0066(2Kv@K2Yew_qT7(ZuaCw?L*j5k=KkGE!^bOqGE;4ymN;J zo$mrg8S}v5II%Py?kprq7chLAFtH^_7{I)$LSCgS%0;PaqLdcq{!GX8*;(+1thmlM zxm$;h8llksMCii(3WUy^U4&-!;`2K+5N>c8C~?F)^boix9V?=|Qi5`;C)Y<4O7mzZ zO6TT;viV*GE;mINmtIu91m#X~eV*xjT;eXi&JTf5C*mC=gNstTBB9YGgz~BUNwG@k zna-%srl=+>?&&q&JH}GCT63nu;GK|ph>WI7uLn!L6#4d;#?`L$fFs(yMw}UA ze;LzBh*i}c1meo2Un|Hy3iUNp&ldUIjzPkAT<+MZ3+6jfdL+iAM5K2#$DT-Q?_yO4 z#PhqKfH7*2N-jMq|)uTUc^?)JBQbf@=)Z@GJ1yni_0V%9OE zX{0G-Yt9M~oyVvOMJSDEVaE06A+Orc0ydch>eK3b#k88pvo#5`B+pr#Uc!$mj zz8z=^zu`-oSi*)-s5RY}ujm|RwIveltiuo;g+xw4dIkhq?E*G*0w(~t!3Feo0{a7a z%?13ky@uWyKra%OX#S)D(+mI_UBHXZn!chmAgsy|XwkbnkM6RLMIYol;)AN6`b4M8 z6YU4D>Uds!Fl((sne`LiCB6yUe1Ph(s84tS`Me5)~qa3p0_2zc8*%Y#=% z7KpYbYmvYMkrq9+Xpimcl*hvob5kGghcvwV0l3X^@PuwSIJ@DDwIty@>N zOV2;<7UG$wr@~cj`PbdTs`-XP>8!Z;LM|j20v00XP-^|)aO8=SL^4D=#GKPb>-4u#xRcO};NR`M%8L=?R_K$KHHb!f?+vsNpd5X~XL!2Gy(=468K1ElM8e zFK|<0F1u2PpG`~_TJH+zK3ZVg&U0&zP|rv;tn+BexAthw5)SigJ(~JrWy3P6h*^Ke z&(fO_INH5FisSXfQSA3G`8=_S(1O1wMmIXkbhZUy@mUU6U4N}^aVIDeb;IG1;u6mC zmtxZqHvvg+V5S%^*5fK=yb&q%@WHfv8M(9obJpK=(%T{~gU_4-gkrNE4QRATN7+B_ z`s}4xONo*vfw`uOi@61Oi`C)Xrv*G*Dx~w1EW?nFm`Ad_gkwhG>oC~|AFydEHGwJ z2$%J@PkIZpR26nP2WII;r;$!lrjF=XLQ7GL-D?~2MIXS|A(aFke7U)!-<)#tSH0B^?<^h!%-R<$X=Yl&g!!g**?N47Sck4TDPDdGIB zq|ez$O?aQ)?bwVaJg0Xg>(qqr!=q{w{t%Bdjd@s~NVcXif2Ge?Ebka^l^hsJUC-S7 zZ_z$jP;x#Z4VM!LPGy$9RtnUib_qdFxL2kAD=kUo3zPjCH9bZ>syg*3ebz{a!;vJN zM6Vi*LDeEH2#ZF?QJ(NGlN+?J+Xx1YN!gNKMYJVG7q@`VI~_hN?)=e8{W7=<`r7-v zM&EYHnUzbgbwlaEuQBy>_Y^YY?x4r&(i)0V?|9L^#)yDSiM9(g4DO}g;soWOKCHKw zujm_D|F8W5BuofP7VY+2xL?sQvaMvO-<03z8{pHt4mFNGXTD}}KknJDGuzydi~TyV zsSWueJUTVx`}##O_lEpQza6Y^DF3j3Bnt@T*YLO-!u10p+0GE2fJb@=pNB_$2>)!r z_blR2C5tzDzoNxQVK^ltm$&%$1N|DEKKQQ|{{#M7V)0eH!Jr0FduW)9NzthmA64Gs zQ&lSEM#4jg43w@H`IJGC$;QgXuXjfA?-yJqUwoph$x~~?ZTn)=N?jl+m0~q1|gU5R2|X z6g1uI4V_&w6U4P%nd$ZKeN5;H`a3kVI1SA?7RLVv#w80dW@s257ikyl*5Ss4~)Y@NDmF8Q8^6Ve_+fQ zMtW!%_Yp{7OmOK@IMn(uf{YpUn&RW&F6$fF* zWbSw+sD<1DW4l<^J@WRI4kE8;6ZYjW`0Ti|+&na>W)1~vG>0m&XZft5E!}QB7x)K5 z+p_#G__sq_`VaY#yc2_$B`iXezEZBPrTNL@i{?CVSUqM8;8DXOjk_>)Q_N{}81^Dr zT#FWJ5%9yI&B3YB8EzZa)Y=b|BW(l5lg$vD?)w9j?d!slkvB`j{LIpn=gG~cW15JZWgR$+BES^jVi+G8VRJ5%N%Y#YuP1C~SMQM_cm~&sB?Fk|lX(o2T zX2eHc7h~QPF+}LI520d$w0SPJ1sYqVg9LOK_F>vckQC9rEDXv3U;=<7qa5!$JVL)- zomkR5rBe>W7k-TIPx0@N(TDnCA5UfVKAq0=tcSahR)g{mLnIW-6LTDCXq1Z%gElLr zRU_mHe!S+0z=?j~o0RhmV#4pS*M+`ZL^CSH_xt}Seu?xS)+>>Iqbhw107!busnX{H zfMu5|{ib@F^to89tI}J)s33h1X6Q1~&(V-o>HF7%^o@r+1!1SqKO!lrW}X)>l0x4S zAnOnr7Un*P{}ZHqGPfvg)2zLwF3J0xtWBev!w>)f$-6~B1dX1mKmyW;u3_(s z%9aca!;;f?!pV2CjwPE$)-;VUrI?0~9FYRQNsl zyPU|+<7d@!QNM>EK&g5R&W)Zpm!xg>7iH4ICP2#gkw>%Q{@z)Nx50(?6X9LtFGqLp zF#+Xb;#x@^29W~IP;aFzW1?2)vSjyP~hvVSi7+8utF}sSy(jGy^xRZg^nngN588v(x z5EicTZc0=P4fc{>gP19{a3)cEpQvRN@V?_h+SJws@li~V8V4eF3{Q8E4_@aPHilKW$& zdd@7JSGpDpy7+8ITD8RNXKC&`g<{tBvgGCY3Dad232D`{{uI*va{CV{u7m%wFRQ+J z(Fhf;!ErL@57DKiz#-NcxiHbU+P8aDXWCwu2Tg3t*6rc_CVtfMEI47bAg9&=v1lS1 z$71glkr-JZ^*2KNaEDHrq4!ooCIFK-?>X-?BOFnfyjfthqPvS%a)1Y5UwSN_DS zq^3%GSTO%;QeQ7EOiHT;W{rY*o5@kW9%@jNM!QSZ5k^XdO8JU-0)h!UM=QW)NCpjTXcD46ncJ#Z; zmrNN^eP?3*>F)et>T9g_Cw$0^#=avq2sBAK3^(1J<>%b^su}2cRr#J7g941u%q%5EI}Edt zau`HcwGM8KPr4FRAET%d72=goHwq}%que4rau{ARK5FJr_QZqlpBcacJovSlK`g8r z|7&Jb>*21diV;fQdk%+waKTei-|PgA3_Z4J1wP6gXMtJY>?9Se&W1 z8MBkp10%$&4ADo_cSFkV^&yprT8R%z!qpNVlO9MsN9}YU-jWXcK{Pd3X`cSE2LJ_(g zc_0O(mv8cO>D{dB--U?enzRa*{>+Ly1OS4Yk#Hc8>5j2ad21UPLf%UQEe>C4IO{9IO(Kw?3^ThI1#Sw{K)D*nOzkmOd!%ZQJUr6ofz^0Ixbzu`@%?%dV=Tl-Wi&%M z8SF+OU@ATqqg4NW4Wv@Gd#xtW3Js+4w0k`!D=*MM#L8tJ8&w^v!3)a3R}$Q!!M9*A z7x}U|ts9wNqGe)+hZ)o7!!NwB$$)9hY6*)!foTfzVRfP06~)tI`1;KoBuVEBk@!C=TZIO)6}(2p$BYGCrB%j5hf={OD%XvGKI$|UryySP;QY{i~`6aEgeE7k~~4Hu$w9* zqk{AxhG)&IJLVMPlsVPKn9px1NX;H7eFHLesg}RWX4$Pmz!RkGTj%hUR^K87RH;K% zo_$Q5mfgAym<3g?QW@$HDb(pebuRN4^Mb6W&`P>FHN>p_w-ic((G}F#grQB5K_vlH z3Q?dk!jl-`fhsGhN}7uiU@9XjBSu=C-Bc$TbsQ&R^Relqo4JHmkjc~$t9^&Dg0FqO zEerjXpL^ZN3f|%mUT@U;+*@dz45KJvMppNPwf(hpHR|!Yj*zfuETtU+ggEERyUuUz z_Q78QPn+M;Z5E!J=C^X|i03!++qhN1v)Y0-ZkPWQcpRRa@tmzh~fcdA;VR-)!fqP{{>fHf#EoY2a zO83b~jy7OXt}ecgy;WiH^(wfPS-b4Vk{HP9+GQ;Wew)#V{rDz-nbELHFGO<6X5M7s z8rJYlesE#KDqqzCIGvo9a8+2(R z25nG7%?Tr5AY^D_CJMzZv*KP;sn*>`SS?km_GIUv`rtC(o*BmuasDu~WrL%fTF|*K z+Fk1`HxRBV@+RJ4QERu&zX^QeqJ5pWz9Hx`<2t@Wtb;|VPrTOUR}ClLE7aCd$ABL` zAaBN*8pY=J6|{g?yk_#zi-TC;CBA5}U-#~-h@-ZncOXeR49}PgAE(8dACQZ;BBa;6 zscKf7f0@c_Y2AqFlB9dUZ!L~tt5@>+S;2MRUkR!7+1=q8=B&9s95Ud!$i|woM=Gx0b$tNIqxuZ!gd%AJ}ogQOGC3cR`AhFjD5Ny z3RRA8q-Om?lqMU^(#Uu^>y*9%mU0h=vtrDbH16oLUxD!xq+Qxl{T+fi3|~D|qaa$U z48gschrDl2`-Fo4x*Yl%()vA#!CAM3!?!)K0gx7qg+Zu#oN6ed9)O+9jQ8`h5?< z)DyA`c7A(F>-vTEGXAJ;YTLbvJV9U+Z@M&%&3V8#EN#O&J>Zv@wr5@sc(vRp_RW26 z!ejk?J}nciCPtmR(IhdQj6zLS==@e*G<`m+J z(&`%dLdl-EdIr%-3TB6X<$a_8OB#r}I)L+V?*t9<=C@%YY^n_bH=UH2xwQZukydpt z1tP`%Tf&i~_@mf+i|^hoNThr0-Olr+HiP#fBia);(r7>L}Y;7=cAWBsBtwp>)i^nc%&Q2>l(Z0>5epp7ZhF`X6Tq@}dd+%lD# z*4Azu{EAxW@=8yc+;d%60S%IH(@f`J$&T}N%bQz|)7+?|I(Dy1l>Y5a%>Hlcs1{;t zYP~xugl?4q5=f9j6+(B)0Er~XK|n<5P4#k^-Ag3U#~O%klcCl?0^bVhhNBSN99v*Rb_;+}C+~KuWN3ruia5Elje&CDnNcn+( zfk&es_+vco-{wu%M6uns`RFwr*}U6){hAIe{x-jYN7dV0|7Hhv;T9M1u;1c~-i%@c zZ}BhQJRO*lQQAA5>cQnt=<_71{1Wt<%?>l$aXGCmj5tKqaY4-yo&mlF4GF=C)irJVnkLL_Oig)!HC zFDy* zAR;l1qGgN<)~o8PVN1BfdosYcTu zKbTU_nuNn)v((#D`3d`)rf1!?6Pn(LFtVau^bYCKF|JrTSGP{x=Tc{&N#S`eOKSI&YhcZ!cs z&gDNG_T`z!t6*NFz-1Ly35wkYVi^4kXlO)`l9bPPZ0Hs+GXSm^(y=e#)>vX<*P zmyS^bR*q1{d_z5_PrWi+ojqqFI;?UpyG+l?j7viY)0H^&M&*1QHIDNt!@!M7U=Fnn z?lH{S)UyBDsZ?ko4?m?IsHxo`r`v=AnfXw-nkevtJDNeVHS{aJ7n`e zHnpsGbqDA!I_6j6{MyZME`8EQZ-jg$zJ6jRKl@35;T!Da7hy}s>FXybo61+Nufz49 z)@AZnyve6uGgDvw&!-K2HeDjGM<>NmtHaRh5|8*Ss?Kr%U9SE=q($2l`Mp49NU4jh zPB^Kjl7pp$b{t7rhoKRM@G@)4g&G0X*@q&Xhn1i{dlMp?L|Pfp8fVrMDERIo1Wk5; zv*I?V@^|<8)tv!w@vEW(LA=*EN)OJ8o0|$xZm5rj9!e}Q$h*4}O!aQsX0xCst*&Y= z;xo6s5+7dHDEZULxJHX4nC%^}x&YlumSw``}+qoX?4)&==^>nhWz_;#eq`y$ItbtwIT;MnN zMSB6}GV3Q7xNxw(H5#Fbdce3ewfq5N6R<90&`b?gHRy1p^YnXq2hj0GERHwyIX{wA zd%6d0v1+HI%?{^w57OtH16VxSJpvDK83dn0Tgg{(QmP*C>-RJ!lr)c*r%>HQkus{$ z+B{sjdIOcTmKilwZJBq_JzOnAa~rix+o@=U;5szAJSjr!{GOq;j*j3Zty2ZHDFQu_ z!AI>hy1{$-{GILe5zbbsO2hQ5Dg5Bh0K;PR{Gw|m%`uCzXVvClyXvxp^Su79&dmKh zAG#}wfy}C1g%S08gL=#cZH3tfI!N2J1*UpH>@eIpLoM-$JUrQ9m~w{yo*%~deD33Y z0R^t+`1K5r+1s$I9aD$u-}|xRDeK?E&y{PA-5RRe94nE|bDHY;MAcOD@0FTrI;CN% zGZSHw`hFJaI2~)-z23kXiDG|Uv)5WGEV0)hprDd6;&ooIyN^D)texsigMF+g%bKIwNth$bdm?}? zQ{&9p^FL=pb0bguB1#|c)ZT-<M+fgr09|^nJ4QXwZQ~1pclokzyLu6xn4RQ0qV zokybiz)3S2w|NWQn%ND4VQmyJLS{w&o~5z5gel{KI9nH65m5-1|KM zUD_TvOxm)d=RELWy_o2u<&<5iRaBLoigccOhOTOSUimw^MB`(lRE@v=sZ`^$l!nG{ zjw-A1APA|vGDFjNEESb#yblPZakClxw}S(l?1E;_7;YnlB+?TthD$DMiNhm#>Y)Hb zA)>CL_e(5sn6g+vtZ&HH7Vr~?Zq++wy5zh^30zRethsnx^(_j~DFD0kIE)Dgm7h$V91jDozL@?D>2-;|v*!lxXf10WV zZK{P3v~7Ba_dJr!#ti4{j`-_`I89ZBddSq_2zL#Bd&x?6uLa-2L?4w{Z0fiC@sTDh z;Q+6H6kAINc;BO6)rez<_ z`KDi!r6(dF`ID{^shg<=f)x_%XTTyaC)m3tW9xmp()c4j=0tzC?F*iNVk|R#!2?gW zXTOf&15VCiEBEsAC)=^uG2HD`A69)YA8@Jz`!1i`Perk}^7(P(Cgk&%$c5+gsMFZM z+p}x>>A?b+ltI4 z*tRiz`uRZrjY)zIvs_ljb6wg9vwb*uUk!(0^ltv)`EFjTKPMMgS2jhT^WV>#SlvE6 z`a-A3FVWFh2CmK5#DyG_V79r3saNuQ!XnFH@Z$Dn2S7N9qfB^K+#AXKqYFl_50S;Y zF_1&)Ihuccp-TBHw=Y*_W> zH;{eaDE{Ha9yKOtRWB-om-9a^M%8FSdE7Tp>MrNeg+ZRrM&N}7<hR zgskmNayvKm1GRm)9jD<*S;xXhjwl*GlJ6~S!ZNn;9}17Kt-bh%mm;l$lL$xKmcOEH zp*^Op2O8O@&}G-**y+~r{lT&9##%rb)Qwute4wJ1qWqHLaA<*fxod>>Pr{5pIUIX$ z2Qk?3_?!+=!}U3hs`C0@*XNVJ_Vz3O7-|C=q0f0#MW;J_*QdsF zTbzG?zVhp^2J`V^Sb9B-J4D8-2y0_yz%cAv#a-*i3%{Okh+T_uCHs0>dtx#d-EX6E z5To2TQTol8Q}b|j3L}zE`QlCf;WsUt-RX;WJzC;IlBx;rwqOKM+SdVpCkPjwaaf`( z9?JjzCWJXA@!%`|ZJuN1T^MAx4-_o1X?Tsv?)CarL6=UekoS;Kj=h3IL&Ko0!eh?u&}ekMJ4a8i&8z1Kyr)6zzsrJ`;fdOw=cSWsxqZx4^un($Fzs2<|cH_h=x) zDu7(Z96_5klGnFH&v96^d7Ve92+{#+xYUQ=|F&_pMN}xo&FsSiuKHX5>dth+0(H+B zo@V!IO$;s%b~sEs1eK*>U>W+Smw}Q~X*=$flvEm3rqU*=BpF(&LL3oohK3{rW=cTu zK10!yf2dKad_;}n@!VnfeKY^!YKxSfkVL0nz8aRH`yF!;V$o?qE{^f%;_Z6%%1Z&= zY-vov7x|R(TOx_$RYWt)^Ffp*Ps_^4qiZUv(|LVIWj6-#uHQuux(1f#lylj*Fc-HK zo4I6Wb+&$sDrFM^Nj)6&+DL#|Dn-ppmu{v*>@?hayYAAz-Fn)Y9-YVwzw-}%XAcls zsTAy^sOREJ6Rv8kNu_l2OMLXTh5=1g_6@*Zw5?Yw+15{>a@+b;-s4)FXW?d&??@J3 zcCBIKlv$*Y-IJXD5<)VhPf?%9nW?68DgDI^{>`->p3y2^vn(EVJu#sem$1(%Hx~;! zlXRlEKFm`MBOaZudELGYA^8A|YSaQ45DFWBI|7qFs7+@K>X5eR*lqsl^>M7@V(xpR zThQ-|NZwe{Hs2FJAL=)_K6K1D@~Fe`!zP}7qwV;CUExvMEg0;61uj7rD?|LRTNq5| ziI{zmp*As0qN;W;2_h+-`_pxtXBN7@i#p?-bppF1fm>Zxb)f!=0vsc*R&U~7H|rUb zH<6?&WT2Y7iVHn-7f}VZ-NX}awr)}lC77^G)WUia0C$c^^_Be`4zu*VS$c^+vd+Zc zzS)kAH}S%o2DbDLzk4$zqQxDMhZ7|$f5qc;2iu;>V%CDN2|gBmH+4uiR0mbin@se6 z>C2wTc4yH` zl69uNxY7Xe40o^%L553KR(Hc#t17Y&?V$owl|yZLpW87#&TS;Y4`yf=s;=M;RXJMN zMy?bakJV4Owl&forT-cU$(mK>elWT)2 z*8-bw%5|dxtvVX5&F4$xDy4<+*d>=n%c9ci?4&iQA}wEyR_790 ztGkrQr3DnKT&*;@)~RyMbINtSf?V_g9q4%FT!~zzv}%;frP2zusI)qR7TGpcIo2AZ z?{mC3UrXK9jyvYhoBeELLPu`-IjHL87#vfjbmlXDuFH(~xb5c-0h`_iDqR!P=gffj zW<7V)XH7uD<}oOS-}t#*H49a+c?8CA|6fu)ufK&GJ01oTU-?U?1|zYTQ-n}b*9no( z7(7pS^rG#Zq8P?E-fhZj-Sw^SMs;vnq1M3-UR~P){hk7wM{x(<d^2ma38PKKLZP~6Q~JOs}Z9;dtTdw1J+qAzDo$h4B-2JXoL>n0v)UZRZjfs1thm}yP+YxCW#B)pb1--y?eQjX?r{6(jGieJQCd|C;=E%^O~-ylV&+lilh zkxrM5-|zTQnnFjITX$(I!m2%7>1m~m6ZlilWGi$xcoJAV}J#Kco*F$KrB zqOz|mcfU7~J)Oaa-AiDf&fxFeYrxj7<_GVkvY;6};(k5mwVIpnk9HfMD)3A{f1l+X zJ(BL?c+wQrDV1NjAI6qh`QP`0SX&bhcreaw#dNJ|{wlSqgQ{Ls#!cs+K4{9eFXUGr zgtC6|{O<>on{J&339-$96A`hAnDxVL5i647dJe4Buf}RTUn~VrygioYNh|oRWBJ5CH?nT+d5ynP-PXLS)j6=FtXz{{Sa8^-WiFGAV;FU|Omwecve8L#$_F)C6+yNyeeK&z*rb=J_j z(BotxK6l}8sHnF!)Sza3zLQ+{r$a{L94v|8ZyNJBZ_lyCpX zz~sgJcKL0Uu5KuET)P*k80u6OsQ||Y268oPI&wd%gs%G(zUOr@d zM)0+c2kb=3nq1*9J2_nLB?zJHgW>XYL8$Nh&tz)Cx^O=Wz8gbr=RQqdF9;pj+~M*S zL1@F0ta3Fb1O$$zn(7GC-Lv>W+LuTditx_j9ZJoXa#tqEtiv$*Is@@K!(;~&>iMQl zBI5PXq%OypGevIZCPcD_L*2y-_!^+Kx^2csM zM92&b)aP&>80LppCFvzeBi3G18p3k4a~n6Qq5QX-5YEJga$|SlHJ0^?yuw`wwT^s+ zIIEs%()&~uvwnF*?~}Ma)z_+Gkvvw15&=UF9_Cmj-2-|TV>jYB>Awp?qYAwjA=-QU zSehRfvFWs4RmH16K{1YDd&w|ZC2|<6Br)sGBXQAR^l7}A0eHdVxg6mk)bXsf0Lm$D zugJYTgusTsg%Ho-nE7mRk0@>PdQ1;2N$85fQRQe;S@ID6a62>bBT~ zfJWQp(f+6L);MsGX^_(tn~db#Fh%?Kjq;5uFh!rx50i?~{}$mJlr0^AK8i{xX;3zdBH!?J0ydyb(nB z_%?=UYk!r}Hm~pKLGq8l4GD!emcR5A8nBK7<*S}TXZF_s*{>R&`v&9>!Bb$>#^tZA zE_}^c;>hg`QFpc}U213FJ&o{1V@`sVx>{z8mU{YF=ir&HG5{6T1awzsQ1w1JS&uFa9vmt}L?)$R;|kN7x&*gKeK51}LR>?3)dRZKKb>N>|&vMid=Gf_xAGO629D zJnzjQ{=H4kqX4>+`0Jvs;k;Pt`oEj7b9`$+%exLOh{qAE1siNTZ>1C z(qpWnQzUs&W1&mhRr!$5UZIW7{d1-QYErD%kD`?Z6tG!`29KK!GE zPP9hn@EdYS6VUnl@5*!Bwup5-`(I1gx5t6$2%$K3*!Db0N3TH(L z8xi)wOtEQ|+UCkPnhF7KWB=00FPaKnt-%_cKBrJTtpbjlR`xlS^{Y^J_fM7SIx35* zP!2nVYTI4N}do~vWy?%dQ0@}GmPHQgAV(HK2 zqUJ&euTG^!-{r_HTL>BKOn>>47Q&c1v(=u0@e_WF4dF-1>s{rLmO|@V&QXlU^LUKs z>mSQ2TM53_IoRG2C5)|zZ|A^Jg9PR4XT-P%8sCsKf`mMK!o5o9iEiP7vXw&j^0ha+ zYHQWtioXpObf}Hn3IlP%*-EK&yc)AR;xXR2rx@wnH14f3Y zvJ)@b;w%Jf0VV%Atu8Wra7gE?#tUba)oN)u9UuR$=@gBq}FCH5Xk(rK!&LAgPti%2vAhTS)^xsi51=KPxn#yP$0QO}-T& z7_8wcr5do|@@G^FXET*)&nl(8u>x(eB5grQbk=qWEM@o+-Gi!yiBlLjWo0Dj(k6*k z6l|_+dbd)B2^AP#rKT=#rf=|30o8X)+EXWGBq+U07=Hdc;oy{v2A&Qr$u94>x`40qgr4Y*ei5!pQm%F*SM8E& z*klb--c{*(OJ6DOsw5YbWo0>8X{-{Iq9>$=%Bph(RtNte1WC^dg zej}s{N=R8=8momaWV_+VOd+(KTKy~YN`4}Djuh%zPpKM{9<7|T_f)n)ol@GlndL2} zsCF)=#?SbMiK=v|8fIyPkCLLX!jPtnaq{YEt<3Br7rNchq5`u8m6^?R)h@@(PkF4W z>`ly`J;mm0)J||$N_N9>Yk4I-D)VK|+B=0(|G~QePQB7x z^h?QKdQ{>HvrEZaRkz!$x==YIe&DKJPA{Xe1xlBRF0%uk1KOdH8ws~QB!(CAAQBJY5uG%UuEA=~! zn{NhR0@ z24StGttTj#3^ ztm>y-YorV1%dA zMI}u~OiIE-jav&Bw=$!Nt(2X2MRqBb*$w>{cFH#}vl*`pVGVulfyU~H3JNYeoduV2 zvZi6lTSj8M4yGEUT>VGInju{36M`~XV?`JGQk|@ZRA#l?iHe!Cenn0;aI#h~;u2^5 zG9u{|0d#3iI0~vM?oMX+Dx1Vy$}FfNv%tz~)kk%Cc1JkFmk;C*Ito$Nr}xQb7nE3) zb~$fd`)i3aWZ-xKwKQu`3@}A))pFG*I;)qdhx2Ik1;al7SVra{BhfDnac@7|4}wPMc3I%SH=O zPC3!wJm1XLxD0dR3CcPZubf72m9iv}7r$Zn5iA%BRX6+|BmDxu z`bY=&&;P8yP)A_bugFIS2tC-UD{{?&LLR&Qt^DplVR573-@;_EFJ=Xa&2Q1c&*3|G zlW&kGy%a~AJNBMF%Ch=$;vnIL_X>ZSdvHyq*IXsyirjm!(25m|mlqC3tG;Y1Zy78! zGhS}0wyMXf*HG8yYRgP#TP~}b|75UGtCr<7UbitzDb&Cvi-Gk0uEtBEXabElP;I=x zqgvygSNOkRb=fcXnJ6@7-yX@IGg0WrSpU`X*~x;9jY`WOG)3?c*xWSPD^2j17flsJ z)_;|JcB=4*<)!5xm?rcP*p{^XDyc%EzzWjxlV%798M~_1pJkmj1uEbdhhGYQ6YwKg z>Qq5jfM*{(U%~G+{0J60UC@1oGnq~Jc_ZzIAHiy+YW43U{T#ol$P?_JnS!qC3_;fp zzaNl(h#$cWX=;7F?zdTj?j?RT0WhTHd#4LG8C$tiE}Sa_%GKw>`UyMbj&rd&>Aq8b zeJ=XQXFKvy7$mUe+vP{|gm`9ApN9M%uM1~{nv*}H>zEYLjQ&gxTp+y3PJb%zUH}5N zPvzei2#46IPvkurLJRixC-P4jf}Nc;$+H#;7UR!MYt|2G8BNW4g1TrpjvJ|qZn!Pq zS}1(WUJChpt%Af@(Rw*?u`tOR_`gbB^vwINR#s!ik}f)PtE-DDTPq>0DUX!l>eq2V z5~XEfs()ei`OkIAuL~<6e_M1czg3pdmANHg6KqzFfIrpoBL7H^fbY%3|CK*}sSwMU z+jIE?o6wMb|405Qn~=tW=Bk?lSFv4b_Xvx`EuOu%X`y0sL;j~odFV1BnuXnwS1%Jf zco(VbXszy=w(@t&gdmpKM*ee|VD;`)rc7xgFR=?DtYsT{t6f+s7laGG&2XOd$FlbdVS)GmuoJ=F9R)3f91JtNZ)G1g?9?pmnB2*p0?@$Q%v~vvk7}Gk%DEi5?}^JW(RfzE{gK#>HEa5dMod>Jo1f zrXuh2o*M7vxX(kEG-F2$`M>OAotar=BT~``SmU}rdjVw}m?bJp^?#b<{}3tGDbnBc z1)7}e<;01p&|c};YHOL*CK=(ADE*cYJ-&T<8nqUVQRtgV?UU#&((dx0H9{LJQU|s` zWSH?Q@zSqi?4{X$IAiQ0;?sjzK3p?7PM918Cft9Sd6TyO=h8<*M$w&*QG|iU>V}Iv znG4(N(mUi7%xmu?H$Kml_0nxtuz0B$cQR(y1DUcodf?;~%)O2sKaj+SK4#*m0j;P~ z^*vm+DJvKaMb!61DZY#=m9-txqz^T%F#%uTLK+vIwczZ6auThYQ&6-Bw}7NI=%rR& zHXJw|*~rt`-|*HPk{@KA{L zoh7C_T+@%P8NmfCQrK1cI|7M&@uMeRGfct(ZsJw^Zi_Z+qC%JQEmAF~Vv_E#QYB2l zpIX{$feOhkPtF|>7B8(S01xz+=v=yTQ^tqODs91weF;+c2x-km)HXT#C1)3;^$;bn zJAB($bW1!%C;lGh*psu`Bw3~3a0b3tUn?4Jc7~~gA&m?Nw^-G=p*9qNBALO z{4XOsQjMVc!2eGR41@*zoED%j=B9bW0yC}E)b?}NrAH;HroeRo)f76XDSTX}FqSbz zTA&jv%@_T#tSxeCz!spc&&JohbQs7JqQ{0!z^=eQr4y|s%x~N+Wxf|_yv*xJk2B-_WeMJxl&QMZ z`i(%6zH%fF$u39_r|#+~fR~QSdT!KbwFE`4;z*8wBf6jH z-U&U!Tg*D_hc~X}OK)Kgdjx*;dDN2_vS~&b<%yq4L&3vRj>`An76J!Npt~2T7U@@vL6Nvgj$ID{H_~v7WQvz=C1w{m z-3Nm(8W1toS>-HN9eHLcJ^TRK(3F`Gq|5M5p!ZO5&E$fThaE8az&nDUb*3qEVJBVM zIGi{QF~|1yn?1~e8_qbW&Za>Ukt)XFlhk=*+8s%&;W&wFbFqhKJ}%CXPFmJBx)Y9G%x6FcR!Aec-%Md>!}2ML+yCQZ0{{9Da-J+39~78TBqV}8plp5N2@w9 zE;xli0rQKXc}1vM>JDML`_0Cyvkt;{ZOzsL858*o_GhEr(@<&JkXilo!3iWPrdYeL zB1&CB43=f0gi#SB(Hek|-j04oG4?q=HRm`^m^057%fGA>{2O1VmN=^{K0;s5cBH}2 z(R>$`&Dy?;+1|$wFID{{FI+D)47>LeX*HZaiGfDJIZ*`uiVi;QjN2owaPeIJ{`CSA zyzZ3tA-5mpv+oM^y|BV}_30aTFxwh*O-cy9&vp);ZxOJ)0e!XuO~d|znGemN)ad|$A# z$@lUjKM;ZimU3Pmut|8~b)!^o&GJE@PcJy*tlB5?GX2>%CHqvq???Cm0`tn3A8!#R zT4(QpGt$Zpe|sg9guZCXw6?=}>;pI4KFH1@PzQ@N+sBO4W^&IT5H?}H^fal*gxFZc zR?qPoSi6D3?nRZV@2pTA-DwUQ^rF%fq@^oX_?EI_p~_gPSD9N{1$5#z!O2am%q_7( zb$A}m$-!;^Vq$THxE*$K+e#p7dF859z$bRqo$Riu<|${3Z|9e{g}R4*QPEkk%*?X- z>1IzO-X@iIImCBeh~Z_d*s;21#hH~EO`k72{wLJ4{-Ua^s!^PwGBG-4sZ7pQHbzqy zlQKNcSJQa>0}(4U>|d`}PzJU`_q?dgC}C<Tm15xUt^33pdzd-{|? z4W-io$QtI9^(WOo%5nN(o@N@EeEU7guLB!mOZ;t~5k9doHmksBEslxpLpngdoE|^U6xElKXXzoc)>5 z*6^${)(3Osv!4m|6MEpI)|!VMP%CwDU92H)lje}IdSJ`23JxF#(g7sx{?}1IgpJu; zotVDZE;rpK{91eWHr(yUAB4akO~Q+v7nPU>a-;3S&%!0S#txyGHG<-Vc=dW0mh-f_ z$K?IGp9N5?T7RW0D)zCo4Iuw{Z_N|B)MH`_jxthd#_;j z%s7K-53jDuhi?hNY|t4wbFa|a*hqtVWKfT=RW)gQkwa7iT~uy;BwySs>|ia@<;7nJ z{etUYh7@h73ay=Nb8jOMzG|^eD74s8Pg!hp3*@I?2oXZA9JWsgv%+}d;UC;7?nlIk zZ*{55VmI}5k5QuJqunjRaLO5c(3zzcN6*e9Qf-Ir#O4*i~ffwtA^I|&Gb z<1O~p>L1deEtzDzLaD7i-F0|bEFLEB^c{4QcqaZdrjDltTQfx${&62{+Zs)9G>@xLlzUY_YDx~TZ6R0_(sS5O3 zOD2X+nRr@kpR0e{#Vpb4Gh2nxUnoN|S1sQ3|Frk+VNq7=-}rjg0FE4-K|wi*1ES%1 zQZm#IV4{PGPUfl79V!}`*`#P>>fouMkz$v%tgPs4CuEnY?QMl=N@hw{hNg+-t}(T+ zOsy>5&%M?f=K*c+@9+9u?_cltxvn*zd)@0^=fkrO_p>Ig66pVQ7Bx}QeAB5#`^wMr z?Zj2Yx}4BI;SC!{+wdWs1F=S9jqO3WIMu?)1!tqFqJ+Zq;RZ*qb%gIVRjR&ekQkXh zV$iQendr|$lkrDixI-%2_Ro{8mepjJ-;G|L`|A+jcK@NzXzlhPNQlt9^xaRXhfqq& z7NzHv&B8PnN%Uf38K%}uO*vrU%R|~i@wkkfbf|u(=aqY!atEEB0~I{9jS)Szxh$g` zG-1nK>PO$d&hDT87UdQ

hQ6C(cG`$!|THb{+lOABa#6Mok|b9RkY;-@3F$WC|N1 z$j@1sJ#p72V}N*VlkovR<`Hw=F>-nST=DKZIJVe3Us&HYlAyrZ9ca3hX zX^n!7>7iUyzH7wrHP49k?_!%3|Hd;uxBS&re6m%C-P1S$Emu(;6 zJ3OQ;4_g8`4p-S&B&2u(wff8v*wDl*-nPt%6>c8wR9pfghn6(SAacv_4?tye8gk3- zsn09BtuC)@PA&RDwyoQojoH3Gvu#tl-Lppezl0jjyRW_?pYFC!REsE|A!(TCVm!n+ z6Y{IehoO}-#K0OO%iFX)cTkgU$@j>0F4P0FFfwAJ)<2_=;@m~clD5+;P|+CIBWv+} zcJ83vvm0^Q2>#&%+PK5qP=wy>-z4W8!(8;mGkw|V>i1<%-g7e6j6c5TaD@NY|p7ChFavZgKt1Kre-=NpDdbI`r9JLik@ z~aH$Bt|GpF0U zNUHqx*z9*YZ1!yXbeC-r?JZ@x%I3m&oG5=J*%;<*8kXsz|Bv?N$?UuQCvK&*-kh&8 z+>Lbq(GzX10dwqgLR`L^@T&fD?n$gs)BU6GK)Kivs4uSuw=mWs@CI9S7B;PLO_?vJ z8bwnJq{>3-3Ef_pX|sD!&IM^! zn?1`;f17j~`IvyHCgfo_%f)bZd0;s6)vCd){0768yAL0g41w18$dC1ChObOMqe)D! zofJG8E5e_K@n9^fF7IqEhBjGzxn=X5=ZkP*#R%gTRYqA>swJ0yx9lMQqw>i>$3*!| zXjwjJtA6i)EF^#U^z&si4ez*S<+Df*h+|9W)0s(F!_IGVS$@Z^iR;YI_|Op3DeTLX zEkwIIGh)zw+fp2@p^0EmB@3IP%U~gEWKNqIfK$DfnhXW!NDLyWNH z+4z_|*N^-0Lo4CfRMZzQ&(La+xr08R(;ipbH5G*qAFwj2TAbNpq>QbzwpG8S{Jxpl6xyPYG9EN4?z zCW*_Agbo%QlVrClTiAfX!0)e`O3B9{F^SHi(EVK&91@~0(O`Hq9{SVx7K+xqC=*cl zt6J(p804(zM#ZQEeBpvw!GOFqMOW&6^u z(RTSZrDaK6mxkM7U|*e90!03O<+MFb>x8Fqzb|%gX`+Iyz=`z#ZXcUk(BtS+ zENr#LG^1Y3tu-d_JMI#@Yq3^v7l`B!jhp#<^Mv<9DC-p6B3}6r`dkl96$d`V=G~$> z8|*ubEQ4>#5TifB$9hR0i~Bw@Uhn3?9Bl~x{|6s)pcA7O$EN#Z@X6x{U(K;;@?V5JM!-~lBnE$M{K)6LDf;cgK4JF>V%;ud z3V+~@4RN~-hrwt5Lkz5ga?JQjQBY@O@N?DT@j7FkaC{1tgQFuw4zOKMEJ=sZO=2t3 zXlBq9`(~B6=Tl>V%MJ(3NUikmgd~L5r2~7sV5Ohl?3fD;0=%PQuR07D_-%k+|67yD zvP@chwqv+;B((cm;@qdkhr?cyyRNvdE!o}Fq;A}$&ykv9-$l@-3&=V^M0@29hU>iA-<1sCXi?O(D%nE--=yWk>Zc#;F02Ul@E({S*VV#k&++e_#(IYZ-#5r z2ib$FDA3vKa(rPB7#@f;d!%@MuhH2%MEcm`kAd_M-zX}@$y$b{0Dt5mkS*DRl4s+a zYKkd#4qQle+sJ;(~j;ggPHe308ya?wV!{MkY7RRONu-y_JK@SWK8h0&f{^TfAb7zO;5(PBis zacK`PwihtH@fBLB0z=t9kyDyy;?J&+yxr6!`!4R9i}OFQLaeDb=5~J!;|l$Syp;x? za$kN3YG*l(H3%b2IWai-?-Uz`?K3_x`06zx;Y;HQ-t>~#@TKu7@BWflallC6<6abN z4j7k?zxD-88Olc%Vt4>GqCE8e4G7IQ-U=qV(z%zY@7R_np=#kc?K6rhcK<$_f1>?; zj%#DU-UU0@TLSENhY9CF<5F8c1gWlJEJm+Lb&qG9C^Xh4et$I$Bd}UnjjUskn z5J)Erwjj<&OP3Ls#}25cXpt)`Zkebs4=67oA~~SYxAaZ1%pBO4&ykx?-aAXPaO|R8 zi~0A5MXj1*pO*g0ueFCX#cp~|{Ob@Di*lY9lMi9kKc?)lTb2K0^6R7%r&r&gbGSno zPgguA5*myFovxE2+*%Z zM~t$?85)VffE`zyVxmlHsY6Wg~Wzz|jn~A>Ax0OGKayzDM>B{ztvN>U+GRvjmqU<2O zXYKh=o7~{`Q`3->?D++|GKboQv=NN)#KG( zD+YdJBuBfka>bHuh}`Ik#$m0P`Hc~0xW)2sFqjpxxvjI8?l7dYtL(M33iziDq*cId z`dIgN{Bqm!S74{`(X?;^wS)GE=#{Adf5b&j8%cy#wPdN*7Bv;yd^D8KpW)~%9g&uf z){(vWd!Ah)u0CQ|z33(EC`u5aEiIOX{hEx&RSvC?A(*vLOt|Qcc7QW+XC*qClfuHz z8@^Td&sf{kgdS0VS-Bf&#r6MK(={C%7f6$D2YGbqq-MzryUhsepDIIMJ6TS;vy3VE;2#VRT%~UV_6-HHoGFxcRWxd+jFWZG4yxKl!TU^SalUpC2GkGXl>_MC?Jf3C(wEA+2ZIgK!9Gqa7NOEAHN1_k!P zh;z&iMHosLaU!K$IceGC)b>Mc3$vhAlZDA~jlUp44t%8>Fz~^_m6f?LAAP$#$0dsp z>jTG#@=>He;qvtxi@L*T`6%@qj6KkDipS@hS@`2tr2v(gNO{##SNjk$1+ zhfd+$zQ{EHJ{L_bes-0I9F(si3tqs7vNEr7N&S;a8);yY`U8>JNU9j=TX(v!6;6_$y*V2JBCpapb!mo2}8(T$8+ln3he0ZL8Gww@N zt&V!+Ds}_a94>LBGY&U3OzK?OzBaziOB*a#SZvF#!JCpSkyk|8meEB+SQZq;oZgIn zOM1Y$OBcmKp|B&7zM-hJnxc89H{+J|2vK$vALT-;016=YQPtsP80Q+aWrQ!><@>~C zy9TReHKtHw$<&=3q*I=^$giyZ*S(4pwdc?x@HgTkFx{p4^nJcv?KY$FH__iTVNH_j zRIgxsr@@d368jfOCVGEw^z>GMVspcBE#zEm-G6w@f*TaiNABC5i|bzyZ=g?ul*7y# zctWXQ6D=?%kVGZ8B<=9;#?pv(NueXnY;XR8l=wamq><9}U>}vHx7)yr-UX$!iBsPj z&i_kkYAe%JlzXxC?CmYnvy5h<8+*#31FOLaH0scZESG4&5n$r}y+Cv6S97dh+!4k* zr@G|O;&0a$>a56i7G-<=_@t%Z8|s90>2D(AI7&MXM^Y8TeXspVT>zcVDwo0zTD>BRuwjuXB;2@2R1j7y5*rB%7l{VKtVP0l3K+LY zI0+jUiV1|Z3q=WGlX+QVf6yB>K9xDQuUsw z`vs?h_s$gOenIh1dQYUBHrnxtGe!T?MmBFfQ`~*p*u!&f+mLt0fPT=#yT$RdMt>t! zbp92GA_t1a%wLUXjLSuv-;82@x{oOR4U2Aksq*e`#%uhc^>| zEAZD}9orl;@wR)#h7dkH>R}V_4$&r*j}f!O_`Eg?aAqFxziN><9mbt;i%h&5L04c~ z7Rp<0;aqDt#NS%+`E6D#40>C$P<-18-X1XVW`J(yZNo?_Pv%CNxILUZV>)%E6>Bq9 zjs*h$!rd+@U#_lp``u|5NaTrzaNa&?UMjiZK(EUM5{WX5Q5|#`KX%407wwnZ{HGij z2p>+Sx(w<|!Lr*w@xnmg7DW-fyD?Hc9l>*A>n}{#WyxZ(jmL0iBeCJVW~q(??}f&;?;G;+U>t5j ze__yfh*+$JW>j!#YbEH?mA0O;CCKd#u18ws+tg zl`lRuR$Lj+ssqeR9njmwa5Y|PkbRAbD0MFSHaXDH+Mf@1 zU5AT!St}NG;ECSO7YlCDMS|ObtqD~V?u!L?Z3{2#^p=pKo0W(6;w}<@g7n~Rp5}Sy zBRSswB7VM$Zk|mp4f3I?0{Kb1Y?HWtkKvBE5d6Pp1NN_R3}|UwdKH^DQx!|D zis5-XWvY1$ek{0@(7agHzV5V(k+D23YCHNpMl{(ad@_K<{=opC{)caQNdFH4>HY&E zI)QgIu2OPGZRaCuA@`v8TLSOLzf`$Ny!S5!dy35oygR?)HgO_>XU6rpjY2$2Z(19D zD(xFvi8CdCFL7le?-jj&1`VXXat8&6Gp@lK?n&g^jCMcYLc1OfbYw$|)27(@w}>$v z`9S+#IAHQ6XMI0`ez;_7+~wm|%jL4w|8Z+{JM$kU&b(C*)M{4vf$}Y9h0oj~PT=ns zjYT3NnU{1uf*F9+5-cypRMnq~Rp|NI!O0<%hh6^idEy_*yfgRQBK9WpvHY1((W?`J z$P!~ZVO-g~L(J>M)7m{ZjS}yt#Fxm#cb_I+@5C>6mBAs9_!mH5{H$t&fD8Oqz^}hI z4Kp~aB8xt&dhRyS)4^|bRX#=`U~lFLCSXqib!*k;B*R3FIQ@(iEwf? zZOa{WX7&;AV*>#XkpVAqWc%=`Aa;i`@}Llh|DTXSc0$Z)G zi#zknZ+#8iKuOesroEwb+7vY%18hfHR?4bEBA>G~kNFxDr77msDm>Xfi$SAnV3M}m zzd)p<@tZ7eW(aQ@-ilv|HEDQrtQU1@{BbnDqAom@KfP6~>cacBnR7EXVse&2BXm_$ z?4P2(3m@3#Ch!6Yz3XP-=*kn5?)^IwT%PCf8`Rdzb5OLAY4S9*c$yebE8rhi(*DLY zeBd)8&FZga)VYSFq1+-R2j!68Ecq|~Em&|%Kv(!Cl`j9X7#n5y`*)mk;15I(q2lZD z5oQ})rjqT;ag=2b!IsH&+WDOiNNMLtUfCr>GHP#XTzm?;7=2%@yYeZw-#22)AlL-vpS^S9v8wJEpk>a0w<{-`~Y zbA}Emkie#g={$qJ^D4V)gzx#Z21HH!l=VP#&@(POy8Q10U|GDC3NHI*vA-KneY88j zoOhfk26X55^Ji`nA9Ux#dCpBDx(C0PzjLE_tOsAk``sw|_2f73+8e|JJ^40nzd?-a z#YgkkCXn1v(b$VWX#880^yWA6`Qycb-u!d^<~Z?YA3l;_KaOPbHn^QU)4)eRWtZ~n zdGQ!xjuO54@@4$Z>&2UWN%DFrDct>d0k0e_*7xJx_}tOrFyZjg;>pW+2XWJr239?z zMc)jbWqZ8g4{Wrf=i%GC?2XY6FHVTTtNGCkUc|A!kGzbJ;?H=+q|5kK{B1Y+PYZe2 zjaLWl2pqT!8F^!rc)mXh;OcvoU(@+OzBW~yna=0(>J%~S7Cw$oND-O29(`1_ZW;mc1i3-^#D%Nu30r!3XogQZaM}f4Iv{a+Qfaq8mXp#SW3y+JCkV zVKuU3P~GVX0(Wb7_9h|${#Pgh986`p%0fAn63)VKi$o1hR6-wu0LGqU1ZANUHz~<1 z(D`6(5A6>Q1-w1f35xQvJ#?U^Hd-m*+iECZeaYwc5QBpiRW*ZkWlFyDmg{bdd+9YO?fHZv5i!n61&axQRvQv)K`l3V~F|cg4wByieTm zcd%jq&u?Sq%-SBh?kyF;_Ry-gMCKjb*}DXsrM1@Wp?83=Anvlz+d*ZY-r^`~x3t!= zJ#-wbNGlx1yd@sJgLjS00>}19&6cHIa^DfPckq!(SH)8GJSpE`<&V^dknJN>Q&VhG ztmx}OXbwBr9ewx(~{*{^0ms0*RlR@UI^q|h$^BKh*rSc&IbK@$F@>4`!FUw-9E zz_GUYOh}%&HpOPN7vIlD_q;hciM+SFy4 z{Cz)t)Un%KFR83u0$=@aIykseomcPkgm;4q7Bl1@zs6^6_OV^og(Yl(tw29teFXB^;gqN|x zYwGf<{C!0J-Y0(tv=!xZc@J;F8}uHy-;EY|wDkEu^xp|mCElM&ydiL>0&@B8p8&h; zmvu}2#M(Dtk2_koK-wt`*W%5q$lH|%5bSvo>(MKhHTUwQ_JXn^KlS7%p@p9rEuxvx zESflsnn%C*vUxOLEN&jnFGR}k_fw2<3zQ|FERhs^`TeASd6X&I zMf_9us4(DGLiQuDgWgE=1j-itP+F4yAxqLuv|Z7SBohK!C%{SeQ`P6~9$fjsj;>Sz zmXc$sqO*uzplB-gqaLDt_vaIRWT}j_lIVI9ois!88z}>hgEBG2B(p3Sp-jLsm1(N3 zquxU8s8i@6=9s%>KvKX3cjQpgSD0w0iB2=oaV9#_L^Bh8YVG+jZAbk<%h3aC#q46< z!`9(yyu9vz=Z~fe^#7sr#cRbp-qv&#m{T+}dA|6pn5QO;TmC0B3NRSp+X!D?zJKWJ ze?rl=--mvj{@u;HCbVziey>lvXG?eQUR0pjfBzNQ-M|ga6;vpJ2ZfMHh4Wd}`Q{*N zW^2$co53mAAuCIMjdr6jz{_vpzd=_2vS0vGAgvlN?ehB@Nw~0i0J0WL^64Zq<{#=$ zK;b8g>;-}FeD{0EzH%)Rq-gZs%%f=mdGMkj5a@oB){1B5^LX!`1gz&aI^ZHTzxGJFqaPxnfzmlYUK~LM= zQg(`9M^-C|E__|gy9X0d=W+4IJ$NfdoE1OcgA=AL;ve_$*w(m-!}p2L>fbJ|SiloI zT=_DMUr^}39CM{OvNYl2fX(9Y0`5o-!%TFi|Lk|_zv=A5@fS!oZzyf1Z43Qmjd%?K z*iKZ#57h0h!_@m^H4}08H<{#zF{#|?Uk;D>>dCb|^l1puL*AdR7M&LIINLOn$6Es) zJ4=tT=RICx@@P{Y6S|l@b|V^hUdaC%Ijd6rChB3Q{{_59Rpj5T4Mx6u?S`g>{8GbP z5Kj{k#**l3PCS_!_mD0xp5q!vCMbJ`#$=7*8c*wR_G{dx@ePeDH5O|OQ}KA&aLwqi zFvW zzt{2s_gZZqsO5TUECR~-_iKSg+F_HnzoF5g`Oj4^5|2wu_d!-EmZLg}jcD-yh_NCQPx z-&WaoX#A(fRT_&mPSH40W2VL=jo})HM62+A)A*Uj_cgxZ)x=X87ipZOahyiCO$C&s z?N*Ja?25lvW3|SW8W(6R)Odx)?i#(3nmDZ^XwbMz<3^1yYFw#tfyM%jffDpo;L8wL z>O5RxfbTBQb~}@NyFzi-GM*AyI#oGlK}a0l#d~@iK31HhDGKXSEz+B(v%(3x6n|Nv zLhG#x$IiNK)*W}va!f0pI?Ypb>m9Rn{B?gJKV@=(WAeX7HcfkxJu(1jWrtUHJ;TNSE|BKSLkJ#n&H-1sIf%j zDvcX8)@k%>yy8t2pi84$;{=U`8jCf?X;c{&$@lRm#i#f2t3>j%y!D2@rF=@9*jdTP z7;M80&+scEHaz?qzs<6~niqzN%niKFhF$CVKtrUz4KjHn4-2D)D&B5yNftBTYwZ^;KG4C%~!Ek)`-U|m!~ z|64-50f7eX(YZ+HSYx9uA&)NN5{-=|O0FVM^oy0fQlou|vOAV)T&6Hk_m#?@mpQdV zwGOCGV<1Phx~MDG>j*X|bm|;d|4Z4kwB4;SPG`vdj&je}e2>OD$?wTniDp!2ObvZF z9&0o<<_r2&54Lz%;TK}~`+Syn^)dXdIqW@;b4ERC0?+}Zhxp{5R_L0XN_^Pl8O{|9oEKsV%QxkL~C=S0;pVauG@O@jYPuI})EPas9S z>Owg$E-JrB#1L#*#}|{UJoG;+>bkEuYd}2oG|R|W-eBDkRRO_tDHar@{nJSB0}r)@ zOxWu*qgjBawm@yD=HP{mn!J0t{0Pd&|U<-0EL~Ns)s9F^QbE+4=cb3Jd}_%u(ynm0&H12ZgKK`-Y!;pHU&Xxu}de};Wqsl z44q=Dh1~(9Cy-3SLX#Z%@ce~b&lYl-e<4@-2M-K5>cC*lAOAgD)a;xlxj?;KEc<8A zT^Jr^a5ebqLHx3fTtx6<0j5K)UVAl{jJe#2-|$rg_GalvMC4Ult*>*{zr;n~GQ#*dlQW+DktsSz>cQ_hBLCL89G+AR_^ zjsjUF+$vkR(PnQ0#Mi-XUC>Q@w2RNOI4p8m9uw(W#5g?e;@3oU7L0Y57+%K{DYO!J!GfPc zLj)oHSDAuXW1`7x<6i_!H^q3QQ zR)<;x_hR_P_5r1`1i`j91zQbT2mSYy*hG>J#Ayxg3U zcwz*cDSRxARCtjS5MBd9Z#>VTAWC7wGDyjEZUm^zd&#o{D+^CR;vyHd!=I#W1QW`V~XHfe7EIz6g69Yd zjs?j|o6FruyozSLT=L3Rwcru6vIXxN@YYBk-i<0XxiY}&W`b@R;Kt@WDOb~sH%7|U zHs?vXI`Hhdh+{UD3I@}UIV->`LfO-1%dKu2mB*k`HFN&_`2|x@T5Q!`U0kz^rl#d) z%SbAl@otontTFS(D7k9zWYLq~F-oomyu!YSr&uK>y*0M_k0T<*NGk@(V!|!#0P$2k zLL9>h%3O>b>Htdfp)g zIZc7wyC{P1V0LP|8#M81_KNoVc;AGYJJ8PXURwfL>Pytn36Fcklzq@!9CC-K+Q-{> z9|AcyWKxiFYf)R~a4_~F%sTAHP&`4q_+cL()}>yt8`2oNr3OT??C>-sRZ zAEsfSXmbFcY3&nT5AeQJEH@s&R}1^ayaT-RWUN6`qC!~Ikti0rHIfY-%-BenQsUDq z4gBUjEJff>ykf}2cMf9_Z?tL%^@QYG+}uf(QvCsE)V8TI*q!3H1H9|sJhM0}1)q5K zI}t7wJdo^8AaSk%((rh_woe7pjBFkd<;CvR_D6v<61@yW`LVa}6pIe>kzFZ{e8fS| znNceK_+DWw$(R(|ox^#cqYG6E zrU2<#tnE{8RrWffkp>_}i=bbN}Z4#h{$0U+{*n|)L@oB~ocu-TmXZ&$KA z@p>U?FA9U)TBElk820r%W&h?b@%SO0AgjQ?RfYSm9Fnkoc()3J&4&j(E9Q&+hj`b? zl%5*MK5JL*cEm?d5|Gl94kS(nkmAhJ?w8!73ts%?QDV+v-f|b^>@W615!TYdxo(@qZJQpRP7Tjf5%~-;B`F2*wZi@52*y2?eQH$S%mSE zp?s)zZ_cn|6=V0o(7J*$XSN#=AuOVxHH+xmszEi|FlK+8vBW1ByMpAyLs)o`br{-i zE6H@g!6gGiZi}ofco=w1o>gWd%$a%RYiK|_o@DIrFslLyGTDXW8}5+BDQE{i#a`QPxwL^$8P295Es%5j6qF6>8mQZn`Xvs+ZteWgkTHgT;A z{I0cP_z|9*Kr(fZNq$4Q&jQj@^@dn-gr~|d*61+qd`ro6fi^2WS?hx`C0gdwjY{Tz zXaLjm!rL;Q!4%H~$n@T%+!q1qiS824y5M37M@9!``^nlE)({-CoRE)tPs!sJfhqsy zT~n!zS;2o+7h2f+%6&YLo)_L1uLR^8-VYY;@(+~Uejq*J+r&@*=BeW-T^k`s?J{3? zn=i8jB!PSd^+3y!dM(Sl^5nEGfoniyr;Gdick@1cg?C zOzufig;0SEQHED*T%$9(4oKr!jkeRvBJkY$sq$;zt2ntpDx}+iB=stg{I=b%#=5cl z@m3*59T@bk^na-Wi0>E9(u!i4qacPQjcLc?@WzV#DYStSS%^f=hAbtn4vC=WfW}+C zQv6*&dQuLl1jICm@4w^8Uh|EWk7`ES(z&S|vHQQ^gCbZG3UF;n9Zjc$z-H5O|8 zu7P65cVmZD{8NBx&y7`uI+jM#@i0|DL_np5CMpA5|9kMevYY%|P%if4T zlYp72-J98|vmuO@mpl? zH8E5VM}UnGh(9HTq>R~a6o;|mqDbb+h+tu}d4Aw+RPhS}hC#q8-1Bx3evBt1G@NE^ z{+VDRgLZM%F+Q|+{;!zC!_@k92{*HI%#~3K=Xv6dV|-xmYQtcm+&J&2xn21Q@OzVz4zZ3y$h|n%P4TPb;he5Z8IWm;rx78*xkKRiOsksr{Ml2HZjrT?&3eim+C| zUzU6g6Y{-^BWM@9j`Q}tvs&Rldtsc~y}2C$A^spwv^@dk+&ai7<5tfi?I&m#BTw+* zUJrDoAB1TjciAx7Nc%zXjuVfjP+RSc?CH@+8xF3e+-I$&JqNrv;-N>7yV>5%58Nr9 z^WnmdxLfJSMe+~4eHeAm#?4~j54`W-Is}>pp}^B%>WXLD8*;pHb};0C*QQ8~-sUce za45_hDNNdhMe#^u0aEwP!byD=Fz`4K26caW(sBGs^7%j-P)A-Q{7O^!wHFD$x<&Z; zrtl{us_-j-6#fGj3BPu;c;H89Pgg_cE0{)vM}-`;i(NnRgu!*44E6&|pwXD^M%z%< zc1jyhRDPs80>h+v7<7L`Ib`hF&Z2uGhQrD(=nya!*wv?a%=UQbI)rCLvGCbp`83}~ zP^uA7hrYyM@4?h+e`b42e^kD*@xY%QCnq=4>B$c=zGe*ZMI+Bm%&;>0<=rLi; z1OK~W(tlKCWwysBhOx*D8;hK6b$VLmhhcTA8kS7Jkv1&9HxksxU=y`xlU+RV6YtW7 zT5P(mod>TJJAUF*5}~`q_QBX2b?pT0V%X1ovA5!SgMAG1yc9sy7TK7Yrw4Smoblr= zMK6G?u_#;^Kly1L##&>-&)~*dx}n;d$x=}kwPNmZ_{(>g(l2l#r^+`>F~H0?sZuHh z&f6(r%{6!nan12>?F7jWh{a0)Mfi*%%h4Rx4 zNY4-;QSqtaEVUq#b;Q0<9QKRCM`PLFpjvf4!aAh=-T8Or?=c{yBf3vGyTs_m5*y={6M${yDd z;OJ(utP8|9N?#bU173Er-idP zqaBNGv^7N4MbxzRSo2%C!<>Enop6&~-1a+ zbp>lwWXV`^{CW90i3OEURqRYf50ClA#H<7$e0ac_#Zs^=1SOi!!-V? z+L+mH;Lp3`jLt0DZFi<5MzX|H9a)DX$*kRHotSN_gGCgju#kdOmcJ42LYOpY-Js;m zcCq~rROP=6_6tlQxyu(wD``*Ogms1XL+(L4S|5gMXuqEK*YfpPhdi!$N985{l2F#( z7|yVlR1<;LOh7FpzC)`DCXlHq1hlVyG1#Y8|S z!ekdu{K>l|dbS(v8JH`6S2;7;#i2iO@mBs0gUy5~(E=vB=+wl!BxQYUut_jATEJ`< zl}*^Qqg{+O$PE@|?5R&w?S8RGC@CfYwWSXY=V=s!^jo9vHtuypKCwO*oY z-J@I0QXqx0PTQC7RbjGv)w_4@dt`=1ZTC%pS9&JySJ7|%Qi&e=@{w08$+9OpA@b}2 z<@_U%p34p@`vX9F)@ys%SIV9Vqz+)b9L`=o(vCedrY%l0VsV-g!`l87;fd}V&bnqK zu=YPS8&KqUZ`HY}n~OQtecz~nHXl&|U-54paGMa;=8f=19DI0E609r%uglni2xiA7 zlNG(o1Gy(*YM`%{LOUsNXK-h5XD!^L`7{Z>=Pp`fpb~szH}!-=iua`A5a~ zh9xV|&fGGfCka$&0X9#S_E*0vq5bDnVDtV^cHDN#<7A;1 zY9Ds^px~$@-~6YNx)(^#TR@^d(fsKDFfe2xyz82j|5t(ZY}5AWv`Cg#5RDh1oh4jod}015tJC|8$adfYGgko-x=5dGgQ0l4W#ELZU5^6#^QGyWVs4w0Fa)E z95qjAyh}U$rvPdZK%Eo*0FwV(fRsSFQAMk{hxA4j<|q|#I|1Xy`z-~sW_xrO zT!hmlBbLRyap53ci3llBr!JK%fCn(P(GzD?ZrR}~0dGZ!H*gPIZ|do_nAWG-8T&Ot z$%M97ew;uG?>=o`+g|Wc%Ru?2tkg?^`Zy)iEnfNA7cXuMMHi-U<92fP2I77tLCJid zsANKu#N(lsuCj$@L5BX)jp9j2R_>Q2^G9y8q{w)Rbv(S2k{J!8=jKi#AQFKnsY+%Pkiw1cY-O=VM}}Ecojiwi(m5yQBnU@N4~`@<)|siIq_KnRlCcpW;5HXC|ubF3~dNDDC_biW<4JTcf^_RU> znTPgK4gcysV!qXqJe)>92AKg)<^D7fZHUzYWuv*+(&__rmP?hpA4reAulU+(>Ds&D za%d4|f)}b9OLIH;TWu`J6a8CTQiqldgl^0r^mOf~xgG84GrSwN#<1pm3d_tBPqnrr zk9KBpb{ot_9aeKYy=U-_!E`gAQqkO<+Ah6=TC*TuvX2O*<&y(@HLy9C>4cD z`zzqBA|A?s+|Bk2{AA#DH5%o6JvLXr$9qq2vXVU<=75?SLpn+{`+ob1Ig3$&k>cC#OC6I)2OD;1Y@;JQBu6ff z6ma(VG|qm79N{jQhVypuP^4vkgr`u2;c9IWKi2VBZ$8weN@2_oUI|hSlz7x72;*Ri zZ&9Af9(V#usAjoL9Z5ijFc5KF^SH!4pYYZZjbQ48Kr7TsN~83c!>l*OZEq7y?^%3) zY+Z_R8=6~zXEc2eUxLjB6R$tMn_dGRiww#g2k$I+rBOlN_Z=)=Rxe61{170b3^(I~gcy-{_nt0XV)qsa{`?U7|dso5#-c`W< z_pX9KhpyLm4E#THSHa-_|92H^NNs0%H}X-3!D6-nd_p_S zS!g`v5z`WP&E_4&wq% z=+u0|!7vl+u?BF1con7qLP1?uE_Gh9dCW?{Cu{|?8gx+Ol}pXn%X%=j34FpUU=DyL z91L>~bWq(Db20E5dP1WBTC9ZM_4Fd8uzUy0379dUgNm@2^Ftvvt2gF;;1dposQ?|+ zise!}c55FTXo63;1EvmiP(_wYRoUw>h4F(=I2wlS$JE{n;szKyEcw9MFcXpL65vCc zt^{_uR8WrG`0>jp(hu52%rg94`-|%bR}^3wYq%(i}H}@FTn?nAF0|$KCr_mMLU3BXu1KI za2+;MA@2YxrC3hrWzS7R3x@;Y$1riwI3Ld?*3fh83EHghNJ^6d z{YWnn&eez*LWEIJax4N(7!Okl+N=mk3XzS^qqT!i82JJc1lkVV`w~hEv{{dl^c&+} zXDsV5w4Z^O!Q_L^0uHW5{d-_>SL3G3HxVf43Si@U)EQ{PvHwIn_!{*M9P|#V0CW~G zwFVPx&`zLt3+f(p32^T=6#F+w1n|yJ@M#HXv+A8$UC*`${l9JxB7_j(=P)5hxWogX zV?Yyj_!RXAns7MG6`k&e{? zm<8Yyo`$Ia%}%1^=HrJVHY-+OHHk}e?!3F6UP0HoPj1xhG9q1Yk+Ofsc;;?@qZv=;O7IUV?Edtw8snLBN%%7 z)d6d;G^SJ&lIj-GK@~2XdUGtDp_(-Wo(W@NCV(bPf+++|I9BuXfdO?d!UwPjUjZ3H z`Zs0o^RN){0?azlgrwU=e6x-h>3*$(PS*kO37>*F1v;qP#ihK__Pgi~Q0pc|77w!IV7=|G#crbwI0??6JqCrnK-*ci}Gpc{tf8D1i?QdQ~* zr@@ee2l%w6D}h@yT?dTqY{&-d04^XJftCQPV2VKFq9K-;CiQ8U8#oPy!em_x)&qvR zqE^fDNFsWrpb**DPP=DYP{tmMOv`I5dYHYm-rK{(` zCwv{I2DDl6%B++{%2^{}PC10=Re(N(f!I0_YujrLy4GD)t#nHF*ZU zcok00LF4u+7N4a&JF*RSD-6{iZl_{53|2*o8>-m%Ls2!5Zv;Ly%wT6hR{{gdUWDUZ zI8%m>7UAtMnT;6#aeEaj8V;ohIFtbEVG2Ow7Aux{t)g+06?-xdjTC$~61siYqdGvF z6~jnrY(1_Q+XO!0m1B`1&}M}%QWAR_rV)I?oN=gs>q!p6VAsN=fi^2~nU%gu;a(5p zf(&8Ec(iKJgqOk01Wh;yrUY~r@Ee#4(72q8;bJnj5wuB>3-8~F2I~dW00+V+VNQW2 zv`kVo;bcwY_A(|WBhk>wsssiUzz7#iMTvn=cm~D|ns8_V+74*K=V6GCThG{`8L0na zSa9_j!?kCu0yN?7xp--SHYs#9QxG#Nf{}vQ2QVSfup&GJ69<}b;$7%|pb2k>$pmfE z0VCZoFWXZLK{yZ|hw*?m>wuAd*io1j;1iOLS0!kZu9wsSdlCGd;1fP`4@wlYS>21& zz}{Sl(eo6S*kutCdJ5w|E)ip&!=%HZ0q94YpcanH#Mto?6c_l8#b`J%G>YNUFlJq% zUp&ALOVO7}1{k*t3WHcJnRUZRXY96n(Iy~6xEE#(#RYVE?^6M}flEr&+sUi|W>yX( zC9&Nw_3%u16s8e0q5Xd3>=%rNz;iHm(9CDB$6#o1s{}?opvD6`@DUi|dn-Xa3nj5M zc&G;ES0JUJOMq|u9SH(m1AP5)L~t51z&BSLtQd3+aLLmcCqSEZnMlX!wMx8f&S0hl z+zc}lbRF>U^Ju7`wF(jDf3T2l(KRn2)n~cHur){!XghF76@9{nZVBvJjrIZBtnOph z>LGQXmH$Amfehg~80)V%TLRX?#DT5@4qu1r2JK!)b-f4gvxZQnt}pb2wf zHiC8ox5MlNZPH|s8ZPebNYL+)0nUQy3EHI0B9&b7cA$ZQPdFaN1KOm|BK2In+jpXC z!GSR2V~qcx%{ncl>+*9Q>KJ^&Q!tI7*(WI4Pmzdos3u@Qn}x9QGt@o!gtecebV1hv zPwYdx0o@2}bpT)8ddA146+52~fBOodxWdg!enqz?+hBQVBXAxV#fiR6th%Po_eiTbiAq zn~&Y;`2A!Wl#?Js_$!Qe6D&l;cEMs2G@%3L0BFKXG~Wrl4dxX1<|B4GYA@(Yiw=v# zV$d0&3Gah(fvy041Tz729q^}1u!g|=04L^N7C2TX`~$`VA;RcxI8*{n=zv)bn((IX z%Cq^zoQ~JW^}t{Q8NwMbTR{`v4O0)g1lXe|5{K#zo|xl8UpijDv=^qx5F*ThVYs`D za12ZgXwxaVJW?Om8&w59;S887Koi~#;|5(qbRW+0i3WZNLrc5{p#4&00DQvRz4(#S zDWIb-+5sF0D>Kl@uu>uX3Gafj%GB4bUIAm4s#Ylgb`yn zi^TahA)SdQfexOHTjZH}zp)r>z$g43W(?>?V88JwUeM-qaPt{Cot^(Y0U7Z^h=`~g zQPiLbUxrx+x*E9UCbZTQXcxfeCMvobxO0*!tvcY2zv=FDGqeL>YT-8xm=8m3s}T4M z3{7612cFRUMxeJgAAK4g2q#ZQz@Q6&rDWil@J&sx16rmieh4rTh62T{%WRnDyMS-P zoI^P4fU#52Q>?fp1c-Z^nHx0W)deyfFT*9y47WYYfCz8Z^h}_C8X6ru5dH>3k(~oN zr>jH|8n>XdAQJ+d3$qC{9kOqLp$yQWcZWhn)1mt9n#PUIY+IohEfYeuu=rtU;X#P| zp(ULLJU9dH2=p9q=uC`^pxwY{VJJcCfX6jWxaxM;A+rhi1q{iY0y<}@xCm!zdWiQ9 z6gvzF6#(CZIS0?A0(P|riwm6UyMfQZI6&i`Y_u(RAa_ zG8oE01@N?{p$x{R!w}yC+^gsf(Tn)ChG>&z85Dx6S{Bl+VC^>KXIY*xLP{VhLI+!J z8^Nj(D0b1}svK46RnDr+s;nwkmAh&}Ren`rm8Yt>s-&v4s-kLDRb|zhs_LqZRW(($ zRdrSMRSi}Cs>Z6bRcx(wt$l6WTE|+kV6bICNDYQX@%CWLwc(ksyI-Gx&1YQaHu@S% J)!&^V{|7hK@QnZf diff --git a/bin/VulkanSample_Release_vs2017.exe b/bin/VulkanSample_Release_vs2017.exe index f1a4640fc4c80872ab1d67891aa893c998b385c0..99c7d7431e588069eca1291587e518ff6d0eaff4 100644 GIT binary patch delta 108650 zcmb@P34BcF+xKVABxZ=1AR`GO!5|V#h$Y05kxZO0AxdnuZ;hp;ny7t31k+=P-qk^A zwc1i$P;D*U5;TcDv{iJ|(mKQvd#YvL-*w+-hX4QhKkxJTJnx&2IOo3Z@44>l-q&-I zcP1q7V(r3il1ab1M=$3~&Z*xYZxp!H_ggyY)urd)-Y~1$vLpPeUv`jRb<3{vYrwL@ zxYqsc_)@wKOAc6e4Ar<1*XHyuufBjh< zUp+R{uG39eYSb-nU1x;Pw^G+c7bMj(>(UH5UA&%)lRL5|`WE_f)<@r?_59WWx>h9L z5uiIy*Sr8-hlak#aV-ka9fiE;oIy8Zb>jy0;(vrDp-50;47xc;h7ma2D5gSA}!DWJL68j7;#6u^+^&I;}s$YvtrzmjI3|7r><>i2YvAqns%U?7x z=#Jt?b_Y);_Xm&NhseF*vAbmo+$$ctUF4n*H=g~2?q_IzLSJCxv20>MN9n0SY(qdp z$uNk09?(;|HIV%SSHeKnATTEU@(?`j3Q9O6yK2a;J|@{Mo4l6I1K87nky68-o0CSYVtgfkX{BkaJ2Nq?K zGDAtqd|gS(^JI+IG6H^bX?`Bm(T}VI#*SnYOzk9N2J@IY>mBTbskOAj!5*2~NPGIT zHr0nnE&8)%)tgF^gMCmvR(iD`J5#-twESsSt%g%F_uI9shNKU_?J(#HigFWWWmO*O zbq}(}do7j>_GS>%r?ZYVdm3iP8`z6AJ4!JQ_EF7AdYM^*dm7fZHLxMUy`cSLqiv%daxzjxm4buqMk2qlkDd?d+^$-Hy@}JDXOwtzl;^1AD!0W2qp8 z71wPfjZ9&`)wN3Z)0r`JnBK<5h9(;70@)DS$#5>%z`hS1CiMtl@s_?)%Vf5|(p{>b z!oILHFyz)Wuyd9Hjo0zeWwZT0vd5QGUdxo86w%<6{sGKUZ?x31C;PBo1LIPOAUeBV7AWOp1mE#Vjx5*K= zU+ro&u+PKW8@>in86F>b6(Js#lYXAnLN=U9IOR|dq$`Iq;)>O0!wsxk#DK`r-MC4C z-o9jTsuh6;Cqd@5)G}mu-p+mCBOYXOWGn&+f_}iRinrnpBG(^t78m&A)s0HM$3>YITXQ@RhIF%EEl-wc*=+Gp1Eenwk$@faB z#mlBDs2ow1Kzahzff_w(@eF9hJk7@In>i=9@3>@FkgPm#C^xCWTvRf3d_*G(28p$wMCPS1QuU2Cbz#H$e%3ZPZskh&Bd3E31xd0 z<>lqw|E>5+jmi6~j5=MMkRULdE1;isvYAyZtM4@0idAE3LNPS+zSVaBy#I;RxFTDA zk;(2#i%GusjXtM^O-ZxvrSO%Lg{ZJE2HEKU!2G}TvqHT4PpY<)>dDI$f9&JUCB>TB z%4t?KOF2}f=UZi)E3F%ThvGNf<{BLDM7SLY$2Gc{xF%N<*F>%c>1G>6jXz~ai0qng z4bPI@)2!w4qN_AYKpQ%)BZ^(ZLD^-GPADq17gE0r#|{U|W+radI^6bhTsO(?tU_G( z@GP_!nTHg0P;6H7kVEF}wwMGjvU5jTfUeZOgF?P-cG}OB+P|Y7y7;_==e8m`s6b;_ zX0tG-?8Y5AW9V)u-9_V(47Jo=j!ql~%%{NTTZgJU$4O}8%mrhhDph$pCRO=CR0Zkq$R;ARAx?;7PKRaA#{;bSfRQ#LC7Idiiw~$4LB7$`~bin!h18hk} zGknmR<^ywJ19_P z5YkTxt_M3^cJ!~5g_PO|Y0NFNGX%+h%Us)DdCMGKzRxFYr!qk)y$Rf-f$ z2&9Mp=d=h$PN|h!B%}kCEG;37uve`yK|+boh?bq`Usvi3I){6Mm8wCy@^pB*GA}w^ zxiAuqvPD9l!&_`M`vGs8+j)J0_s!JUx-D(+gwOp+;qC)#QCumFa^_*=4av$P-?Ak|1JYy~nLxMYgU z7;(uKml?Q}7LY8Or&9qRcA+sqJw?qzQlO^d#$nAaO?C1>b;$BTRNE4MlwBhW@p~TH z3x&S5;<8Cxwu;LRaoIx`w?^_hY{!!*KMr?Bp#v2mzLVyLku=|FR#01GIXzk zZllm`daV0@n+hrJfTuKrYY3%5kChJmUm6T!^^c7W7J6&_f6*&F)*B-9*8cyZ*YH?x zU7#l}Qszy``T?K2! zWs|sU<(H8;u95Rg3-aKg?y^~+<*50?odUbW8_v;tF1J1a%Z7o56+2uCz%g9Yg z^i3jIdK|$9@n9Ed!Oljor3INt8Cpc4(Pr{c+3g+*f$IFxWPoQj?(4v_v|v8&qPZ6P zCWMp+r|b^Ap9m{m1omVjQB_%UyOFDKsYgickc?@f(fbm{L2XHVQ}#v{J?*OnW@S zU5-dE7bT5`=M8u`fm%4n(N&b%6Px(dCF3ECG3?86MZcU$Dn;&fciAzd8)2PIeZg!L zTa00YPb87itJ?{q-yvlMI}pBE(^s$u4kSU5r_Xk&jZ;Y*14kKTr3K~qpR_A!(XIj- zQX)$=QE5Sy|E^NILqzfrkvy$%LWyE~~VlP8&z>RBTEBs$t5RdQw}VF?y;_=MH^3 zRSEE*Q%O}qm$`dMO29IutoU-cQeJ!|BvmQ16<;>lx?-H)4@TR&IuDx*?6}K&5Ns~! zMmK*&m zP07?huk#`4_S-%|%K$MX?cT{JIUtY~exw?ZLbQyBQT~y+q-K*~Ci|HS2BD@i>bnSO zfk4vyNRfu*_>tiPY3fHdYRE}H(pexnKQh+|hOFtA;0q~KFhAe&35IBD-uELnukmQ_ z^CKNJ#Dtzu*n3TUiLI#lbDEc< zcIYs%meZ9`D@x2P?wcKF&?R^=MT_V4H`N-ic@N><>+oG$tD6qWh_G1vA)oX`h3XOMF?>h}oPt}}<&WlRI3F(J$t ziV0yxyu-C1+OFKSDc{TPe%Wd*ETPc!F;fiimBBM)1!7|miYXxuYofZXx=vTfT^XCy z)+RV)B`sT4ayQAY9GZP%j$l`gk%pmI1IUV2TlV5JgXNgyBmL0)*bA=Zj6%wV$nKO7 znpBI z*H(ooCnb=>9eAxnfX>%y7USM)>5Ga?%L3nka45k?Aw;eg`JhiYbX2cvr_qM&kmHWad*0v1e`4kPm*wIISZmua7ppyG zL%Hk@eM&C=J%H7i(zL;p=2VZBocNFvBxnJI=FdMm{Ymni`cO(EElUvVGxPcqEhzX|DymPyoGs zzH|g!pt%ot^HDlfJo!VN-xCX#&r%#|@8qmQv7rbLn2{b=PN}ZmMv;}3zS-2Q z_mq`^vpqqHdaHAIx>BZgxgqhfR!TNjw^6}TW&=P&5U7$NSRU0DK5Mtv7CzM${)Nvx z%5N-u-n~xxpIG=j;I-JSJWmhOih{3o%DAqB{sd9Z_rJ0Ds-ElSy@vWLE!E>?b;4O9q&!oi%B`_bCWy@!y7CTVWWA>6sYAmmz|Vg%uuH^$*gz$F{MGaON_c^EK^KqbfWxBC@qp@@w)WbzVz&pv8V&$)u% ziUrojPvyz?e^ZL%u=HuzU#yxY7p+1Ci7Qhq-^IhK{f3h+Y{ZDw&n`Ju*}SF*`v4Ic z$|Y``>}up1Y?AX!Q0=imS`kUD3PL|EnxaiKFQS!Ghcdw$<4~?+1=VRybj00AFMb%1 zQE<(?gr->O?q~IK@gD)Q;f6U%m+n0#=U)p^>{VR?GhxKc>2lKboPM%OUaGX6ofiZ3in4pgjw4;;E9{ z{jj-GR%%iC`IiSM8e~|(EVK`_;EQ3iKi@t^kC9PuF~>};7Y`#{IZDCgU(@@t*IW=u z-IY7IBMrXJW7_3VN*r;e>BUuuaZ!%5m8|@bQQ*x9lNCvkW6axSt4--0V>55>ZA~hh zWpF56u|^t_p0Db2f3TB}GQPBgiz6nm*c+WvCyIrj>S&cjcCN3L5}10@2D4>%E}q4 z)fSlFh`L~-?CNd8FI1qr<2xIu;6tJfI#0`JecQVKp;`AV>Y#6EYCsLlsw)!TgEgFv zGI-vL)^|+I5`dd3s)yni$^r#p@|-tbf)J1uxqCHB4$`@XWa& zb+k+pKh@-RG)hp?L*%%_4%Y;0pP_Q`UjYu+H0+mPkZ|&f14T93;hv#SSBjuYM!gM8 zS4t{^(7@D4Pbx___bRa`T$Axcrb9WOUVJeiUHLAQzZm&o2&f^{N7rd%;;TRz^~`rlj=sL;u|a z0CO);AP6BR))9Ba9F<2Ig)YY7ic5E;MuaFSup~7ioLX42<09#mQAbf4M###G|D{4l z`P*lIv!vB%QJGnv1`VBd+T%i{t%-DSU=MCgx;vm1btFS&SKIXBKLZ>FbOn^at{0IC z$PiJXjTmmD48+(*)9$X<(yBhnt~v_|%}cOv7jBw0oJP{ow!ySNO0lQ1F$bD%VxSQs zJCz%999E?;-gs!>BfX?^=A{*gxxctXA+qLWLAz#d|X2L=v`IO zz-t+a9d$GpMN~}6)Lj=OPi0qqM6YAFB-EaIsM8rJ8nn@F!~o1e`|cRW{D2L3HIL^T zZ)K_3L*ga#Jxyu=u|?@wk))64HWPYaX;Qi?%~Ww9rx^EV&~%t8cB{{x@p?rfYX9?k zNG-_z-I%_a-$(Xf%U!@@>2>=B{Xqf|sWc>)JiJCuFeu&FfY7CYB)pyBH} zID64(aM0ko(5{@pfrC)Cy3sr-x-P2~HB4Gin@x_o5Ec@mN7$tp1I@Q)SA^2oXN>w< zZ8ko-l{7Mht&R@0oV-S{Uc(oeusYKiDuS{d_N7=-4K-c{z7*e54SSLyy@VY&(+d2xXt9(fURkoTF-!>kE$Dg7dAUu=FTp zcRYNl{JEnsz@QbDug|g3)@(zwngQ&v^kv}eO`7^bp$kFew2s0#E8cj>*Bw|t((LCCTO zY)p$n^G}c=txR;Jr5vYEye&FJ42#m!3>in8E(zc2v*n_k_ch z>y(wz(U_yIk_|!Fdq#~K5Kc{imj-P4DJIoxspZqyT4)^8B7jx3?rOdTIV|GQUTH8e zTbp^&AvpX4i@fH=M_`ddal)dSSjUl-O0Oj@z`%ZL(<*g-U6|uc5atXtsi~d0Ihgps z9QVNR4CNq9peDTV3i2iYYB=?!ve)t&R(p7D+Go(RCV)+CJHZgFH?UJ}->o@8uleUo z2DT(Vp~9QRb~`>!dRt}D?Pdq1(xMX$jDI}FHn;02 zEve4VwQJIQE%pRx_MeDY!2~f)@mkh_c3^s*n1NAiB-%@D?cjvhGK zB!H!~Z*SNRYiGBg8M5At^#&!th%vR%q`uOYJ!n5piayFlB}5v==?%LUBv|z!7lNOJ zes0Znb_kUoUSy>m#u!Eh2e8B(!vL9MyJ_Q&UALA({|JC8nh(R+RNVT+_u#h+|eBCDCu7Gi6yf$ zlKwF-E`i?UCElFg>a^!jI+Tp<4kcp?TbUTf>L!jh*1)e91>RZA57438cW?= z!pXK&{5zMhONq^$ovG%MUDYtEg~}?V44rO^qs_3bZIZOxzCEad?Kk+cg^ZrOK&n6De}GkW-F39nvUQo7Y_OT z!wx02Hq_JW*-NgfvoZ))ios8hlHw|1vAah z{CR?{=+RC3=mb05qrAZtDS*xzp<1cEqr_6;Tc}@N%dmUw)1Gamwc+f~o(^el1nZF8 z)JaFmxVc|o*o|^i@TE`SJth*f!jG&I$T&aplZNQfj0!EY1k%%w?A4I&eq^9PA~{lx z^6}MU?5E`R(xW>p#MVRl>JH1a)sx=6!?JC$(%d`j6Grm?Qf5zj`{&UU!;Smvqo76TDAPb8fhLMe!zLT;Kw^xeY}dN+;Ki5O+8U+&~lT>6<$petoFdk+e4 zei;HP1N{6H7O=ALh&pB+`=$3Bsr3UkOwN@)ddSM<;l|;A!Dr(m)-HWr^u8+GKSUv+ zCH>Xd*r2s4%vFNlB#-avvMcFRsv*|u#aG$zKF6gc_gHM-IZc1N2j7b*`ZP8gyjFd$ zzNE{bg|`xP^E426bZm1;Uwxgg?t&|}Xk3W8G=iOR9FW7g`n`*__r-U#G`yaJITa%a9PYS{NJJe~0w;O;#iGqV?6A@Lh)}kS*{X zh2UiOklB185CftU>c?ff>I|?-b^6@^Il$~E>(yoQz!i0NV1-=WRZEDoHmS4gus;W8 zG#xUdyUu+X!$c(+m|I_o!{GXi$>#4q4+a zLb+Azz0LFP*R7OyLNo;Bnzj!H{}WL_dO!i5^fyrQOp;RokJw}8VH40*N+SnNE1J@K|_+Ch++o;J>24pB1Sj4 zd)ULQ4)s4j=PRN7Ac{M93Z7H%zJ%qk=fm|@Hhpks>6Z$&b?^()*c+_jkkLj{1yZ?h zCCeYOPnv$2^&C2(>2E0J>c#3>7NXz+JaOzS8)&S%ar%N;fvMRZmZ<(PFb`nRF7{@p=O%4!oa1;(9H6aiIjXU8S7GW|Vqq z1JBnTZ&KQjAogQ{G{P1NGJgl<>V5$!91yp1^+E32m4PSY|xI|{)?!Y5|F%5!-0XD2&(udbu#JMwsTPuZ^93aLy9R%LOhwk9 zBk|YiBkI@tSm5Y&Qja5S)9B%CnshZ_6%LDj7zI?R0f(^-i$QA+ z5-Mm>;g?fv!268?9ZJWf-&x-=PsO#x`XkPz=VE~ewVN8<;D>tNATC6C@miwzlbFx< zIK@U`qJD8qgVvrB%2o=hHN^?>g2IjW8bm9kAopt zRgLx@z-oMS{@?l+LTNQ96MI*e&9upGxqOP<9h+*}dn#cFe{9#s<0t8j(|$&>=K#g)}e(|*Bqn3 zk>Kjo@7V20%|hpYOP*`Ulge8%-j_PgT0Ij{_c+Cn3+((hxXq7j=rhfvEk~L2nR(Kw zYwVwA+Dnbju=woGlKnKBoZVK+`Hrp69%fp1mW=-P82dZBr}WfW)@^dfW_2M`2j3Do zj5oq`>RBvg`1-xOXW$!ihrK%46SSQV$WR{A*097WJ%c8ao65&Oudu~aQse89Tix*r zErVdi@0j{2_KQ$uuG3ng7R{Fr`J$O!&hAZ#4qbTSU!ocBwJbTzVyD)x3uP2d2-5g% zaN8f)kg3h2tz~TC)R-=(Pg7KO93_e;GapZtG>T%Nl85*sAMTJHgIUOevR?!-zD|feg?h*=Nm)zZEe#{O&&%j5$ zgOh0*rS2huqWkR8$9Scl!GBSt+uy0HOsb_sg)1x0o(SG0BgZmfr5)V5BkI~h#7r6K^o4)sPlT^z?M59e zApW4n|Ahb$*gD9_sV0yUgot@}y2kwct1qMr%egTh_|-JmkSTtoNFXjh zV$hJ@-}$s`6v!k$avsewmB>$i(U3vvT*0LHnJ+bFn2%A12qfB%tS5y3&@n1{&uA~s zP8A^w(2K1;$?ndWEUmo8#yoGSXFiEJk?RvHjegV`D7zs3E@#T~ou$}o?BMfFYi%q? zZfwHQ8+x{!-F?1;bl@tBo7u_Olzf_3vh117yWKlMb1OSaP1djcc#$i&R87ZGOR5Nu zpuEe-FmpjYlsR-xDDzpLoRXY5v6F6Bvk>m|w|xSxGZq{&Vn zU=qMdAK=h{^E^TD#H9kRVTTWh(*WKZkjfGPtnvXhUXz?W-Cy^qct;S=`H0Kwg%Q_$ zz!Cvu_<%AEsOhtEyZ~DHfGrx(fEN%L(Mte90tncxE?v*AzR)GS^b16s&zXvlTI$w+ z5E-Rm`f=7hr@eISYxaE3gs|;jNO*sviZ@z#!51!Sq_aK8`bZ5~_1w@}+rOkrE>^iW zf5GB%>!&@VhhC+hNnN?c>%Es2pr=idekQMFa1k}8*r&rX0+zsci%0od@IYwBizEf- zdC~bnO*zk$+$K`gPi%Yc%x2?{BKu0vbWt9oo+4{;3eMMO*~wSF%F<@FYF3Btb2ajv ze<$x8f$#ka=9x7~`t^I}oi){1pF|5TvFBgxE|tB?cD(qqw6B7t&W@M1E@iW3w{r4^ z-(5zfb~=VPs56NLRA7W?#~P7cgU<0nm8kX=OekkE64g~2FjWAVYODZmFX5#wQ+-YY zKE_fEiea`|T>z&9;LKJtG-5R;_{M4RxrX%-7E?Q@ekHJF0^=et`fIBB zyJ!%qjk-Mo8zV425I;j0e%Pit9i_0{?1wo$gGZIYW_saG34FSYHJxi~cm^{rR77NV znflL5Xkf9;tvQ2Nu|;!Vk$$+q!sf+G9lv7f^OB{MqfD7MxJJ+gk(CSdENWhiH2F(r znLkL%|D8>p-?8mg^xks*4Snt<3}|V%G4CjK=l62D*~}qn;pW;{k-^=p81nmdB|9;{ ziIlLL-Jajxx_dXpP5G9l;C&avG@Nmx0)?Lbr$yR4g)NwEb@ngiiM>{RcOFm2>3KXi z)~Z*~M^Wmu@c{(_=;s3t0ibz7FbAl+7%zxcoFKuw8ghk}vC&<~YO+A8Ylv)B^R;l6 zeXkkPNFbM-njzx~S;)fF@U?2iZ3Q{&jQg>OnP9$;qYT26g+2(?0!snu`Q5XC~C8Sg=aL`uGTZ}bX z+SeFHo^#KzoTY7~xu3ANmku_*ge6S%+7C>>ELOU?i?v=hDdHxU0o6O_{@vp6m)Qr) zN{zM2H|I2)yL_~i@-h2yd4h4)UWn$LWfs>?<5yUQQX7BIj=9=Mjd!w#u6D-3BpP;x zb$sbdtc@dYmm@K~Qi2CVOb_?N6%UWz#agWDYpg?X?g=($Rbrp1?^C}`Zy(`Z z6q*e3{o241s*ejc>C6Rnu`~SmG>j7b5=tkJCY4e|6PJIpr)lAEqY+|t%5k=&Fi9G@ zjTIO6AM(XPO=oV~{_cRb8A@tAT9v_-=&Fb91U=#;FDYU{4Tq5{)x71XDET)+&=BPA zq`mJ%yz2^-g0F=uf{)tCo>|>c+OwA}UENCxILHpKPBzZn0d)(?S?!lYjic#a+Ra+M z+`0aow@}fk1qpc;iWN7nUT7!YoW>Ts+}s&~MF+HwV`zT#>?Gbsbef7#G|@-%L@RJ> zC&x^vn`)ZC%mU-0Wx%L4IU=w^(iSB!bqd$UMPoE=N6|kb1gtPQb!yc_9%`}RAj6DX zQ}>SGib!@+VCx0OMRhc6p};VsLs1pj3j*V!in+qLAp)x;EMH&_r{K8kpvIX*B(iC& zFhX$61ja=hHSEo=c_i`(>nN~ClX+aZXqJYZ6QR2#kxuH0)_%+zi6D3v9N)xTtcrh-(Ldp?c}mBz_{qw7e$f;edXlVg%kIp;F1N$ zMSC=E0|v0DuknOs3M@ikT(nxlItolCtfjziP2zFoqA7&&A6Yq=B@TPD`kofy@2>;I z`~FQdh;awcMLAOcNN2-5xzcCbSh;7S)cYgWd2MH7y=`dOhHYoLYZn<;zK8pp->`>k zJ4@=DtkXK#SdB!JzhMRIdPwaivPB!(v3u(hq#q}+xb@FO+}Z-)yQTl$(?vMh`|I0C z4JL5D=4-8`BjZ`pYsu#7#7{dQhHyO%tk$~b(zG|&$FEH_Kl3iU4(}IU^#Zup;n&%q z4RKQLIOxfi8jNEV8xo9@-Vqix+n8ulQQw+`dtTAls z>&>0lF%`hzdbw&J&dX4((UkshyRhs*E%Y%TU=_f%Q5uj9fW~_t_yB_dj{1P!ng&Gx zL)G7h@gUw8z)-dMbjqB|0cl(ic z8j=ewVLYvm3S00P3?K|*-C}t63kjr0UMP4nMOGPvI z?58OG6O!4vO%o;io6Pp+Bfk%9##>9H?!1BfAzz3Rh?8$vE5KBd zpI-}Cr>|w(-)d`qmze4rbI;&;zP`SVq+iP#ZtiT1CqMQ%dwO#liFuf^xtsCJ*CC4j zoE?WK+QS}!>QADN_pxqow~;=6l}&rQv+=i$5T)*8+um-|E%j9zNabRmWkIwSlXc|1 z0S80rG{5;XQ##B_PFH?ja!5AglOvcEV*qpng@=W{GdO(xFw9nPo;R76h?CU|Ymfp; zaKJ{k_?kmFfq08#_RZ|_JMEhlucQK_oF=td(1%*V zQx>)2&!J*6R1|_un?}x2uZxV9)lZ({xwwG76vk&R_n#5PKgo}*7RYcv@*NPWLg5_A zKq?C&krQz5(sbP4=nE!85cPx(CuT^6nv(DQOsrsT5B8bztcL9LBSwLg`;k5xveJ)S z9Ka*=p&x0XAyfTGaUH?DoMX1>Y{n!oWJ%%w=;{vfTG} zNW0%+Ew;6h?yq4(wk;05{Vs2Y@fIO=llgJBAD&~^wk1eEEoM#Lw+6kslrjbK;Y|x@*QZHeCG?J3vKgh& zTav!$Q6N5&Gqb{LIhe0KcVP*0e9(m1KkAS<5ENdO!x`&eax1Wmwh4ycXNdgz^?H0_ ziPCmDBkOucZb@*N_8rw7PAfPKWh}wu`X5sMqB*qN}9KX)Y@)az~BS zS!9?wwW=aSc6U#Qqr!-rMBLc%nnkoN-y4uq8~@{+WMDnPo*}lOUSk{9e1%QfVU=nz zR=6Xw&MUM>;kKN87Z)XCj~YFVeYK;r=@K&OsJf^hd$^-#&?a)=eIT`SKh}F^r~0FX z`?z{9gPwASAR%bkV;Me_w=>o$`le`gU~gU>A{|tz5HrsRQFnjD`aT!57r@_rHDHzo z>=S@Vtu26a0x+pr0H||Y#{s+suii-GVeH`q3EGcnESOooP>u=YRbMEzN3gLUr%3Wb z_U_01P0LnM{rPJNd-!og(@ES>gmSEGq^~{lS~Q5C$?40zK3&4P>`E|(!=VN-_QI}C z^?t;%yxQm$QAaKOR7Y^>O7``x(Z)&Sb!Z!F_DLJ5WdY0jq>J$$RARpT{ma(UqlsT5=>}esj+sHQVX=(h*1)t*g*s(n^lGo1e@0lcRv9r;8 zryD<84xhbS+4p<97-!M_*|(WxU%b@W$$IbW5K)~(HMjoz1Ur5-du3k>sqqZ<<-RV` z%4Mu--^{w>Hi^Aqyj+!ahRzq*-eL>N8z|3{vt`x8hU$m9<>)5d`tnK=) zBt6(0mdph-1LcRMFuYUS!^6=9FJYJqypW?PkTh~7Yw~3)V<)V*s$Je;Szk_V9*Oq~ z^Q#`23uXc%RWq=7fgv|KUv!7T?_ix%MdkhT%g#-{o<%hPF&k{-6{iekj4IBhWNIKb zSjz)>7de_Y32r_)+0~U7@cBKdjtJ46{-Pcw3XBi`R|qUiU|e(z7*&?q0^@U&i2`dQ zFfMvm({=^TIYPkP1nU7k1ZNZ+pRYJIb@|k&xZQki5+<h=h3k>I%KFEqM*79p@I!d44xxWKsR6fnyEdekes z-61qvog%OVfpO9MnzrpiTMRI#nj*NGf+Lx_L{nELI22f&8Y!?py7468qLG@qVu1}M z>~3cs*RKS|MV&NkpTK4ic3fbdZceU_%gh=#5Iqhev7ERqf}0`Kagpi}NzzJSn+VGj zSh~Qts93`$uIAeI5H?I;O$5e8(s~CwQydq0`8gOuQ(}7zh5|`2Rc#)Hs3_f^$zxIb z_eyW)vvd2~NpqYm_&{svDPiQ@@ zUPcQ=n^~#{5SQarG-rXrg6RTE6{M3dhuQ(MyU&Mk{|8s z_I&32rkm7w8T;~^h_G2`gUN8~;}Eb#zlvS^ra@S$@N;!|(r^1J7FE(9td8cF`lR22 zRct_srPd?`1}gaE1KQnfi7cn2sgnU_z>gjAJuj|uutsbX*`ca8<9SN#^Qq{l0mVKQR zpN2RA+(AL*@tm#!%m+AY2;y5G(N7~Dh#{=1UT(`%;$t7sS_5|aRFnx|wGXJN0egHJ zwg_N`52#2LDRBZChHqK@)|o?dT5QCl5A$uX$8ul^fK=tvN_c34a2n#( z;(oK?N1ttrAs_deJpOR-ty!e_pr*LDP)t1`DaPVR@JRw1Sq@Ca=>AI++I~$j`F+M8 zE}X`WAKBGAVWOmS55dvMGOwlg1S;^Btvt7bZ$67YU_+?ayZ0`3x$q7rCvi9lWJ4k` z7lLAHS_ZdgWk(lu?wO0xU;8;2N}~PG#*~nz!dUj&iSf?UV zOW^ieROswt@V}M06GD8!6>nwo4-V48mNsks6(CMx_}Os zYpD+@eR>W~6W+4ZTrsk@NM;Mly9YJl609%S#$myr0XtuAF|K-^5^VOaf69mJ>nuem zINsZWF7(P}z1A%IRJU4*aAO#Kl0-*av5!yn2)YVY{LEc`EB5!PuFfsOeM0Tg3h!Fx z{Kf0f+*OpB?o1R4wWJvrt%{{uDMZF3F=9%3o@0E(_=do;1;$0^yNaqaezOC z3(O|4O8UNw^KVkCngOcd$>!POTW7rm@urv(;GSggQqx8%lg(NqoFF0e$x3<5hU zFfK|Z?C8;53Eypzq{U;{Pp213i-)lhKU|eUN3%X>>Pw?+yC$6(ub0?JcI0f7bfzb} zdiI92XaxKDTvKU(4<`M1A^H*uEEYL_z(~T3fbhYmXYl34EX0Fewbc(n@odDJ{#2)B zf6Q~#rHwypWiF$qgFqlPw&?mjBYKW%+n$_%>gd0S#-H zJ*lCFk8U}LYpD8VQP6mp#+Ni6M6o5m_7AE~Zd$zD6vck|wZ3x^OV!`70Hy?B0l(7jBG zITqL~PqCZlEo~Q$6)*b8_a9Mff(>(gP(n>thM3eN3-oky%4_+UKZCSAIe>Ni?TsK4 zcdMvF1E8ZWyY*YW&^wDX_y{IfJR`1TvdG_uMlK|iX;J=-dOT9u{FPr4{2s>&e;-;e zpdXTfkBEn2L|hF~RBewQu}8nRl7^3Ftu8c^=1pJ&FKn``#<-7coPidvj31m+|D4NA z7ZW5~4(om~UNYyf=@(l{7hhoOFWS;i=1_iR#j9o74IK(lf1E)x@QHiO1u4|1Dp|?u zR^gw_{Xh5n>$rdLxRK2IM+@oAIV|Ik=2G<+SneOq&9%iN_^udKT4%6rf5b$7JObtA z)AwknvVG+#uXA9>{UxNHmhbPEKVN?t#L1Hi|qG8*#Q9fk9 zKoma`s3GZoL z;;rYicaDaP^dofz@{=DKtRcgB^G88DD{J#;@AWeY8q?X&923Z^euQ4Irj&WbAE#{s zdBKldYc7y>eq@ZWu?eJxA6e3zUB5K9$r7wN zh|NtJX(V`4l^a+zibok9j`UDJt$M^>Qim8fV7yDW`GM^%p`}+Q?7;ECMXH>Zo_;;wNMy(gyri%w7J)4chR|v5BzM!4g zyDtO=rz-o=9iG=UhVAaUlFsHmXv_b{U|u|qp9%GUDDLMv%yu;^^EyJKb&RF6s33@g zkTYxYa+!pQu}`$Cu_#mbhj45MrA%Leg$j&|Ry1aRUbO}1cB796@|xVI3+r%=8NVC^ zTce+6$FIF0B~4)|*E>o9Q`p?=w%RwR(CAHkYFVkLO3$#P*K?)!Z7k)+`1aL=XBmRS zs{(4+Bx+N>RN+M5S;G5H170SnLKCy=DTMIZOm^l*N>FhsS&N0hp=Q>+vU8A=+;l*w zo|$D=Hg@JvBygY;<0hI64hp7-!hNElh~+#16sl7MkRX6U^&<`V_!+KYtC}u=Is(|L zx;5Y=b&BqcOf^OTH*4}BGSz7S_>6RuAc|C@Aj$<%r1sTR^cO~)QU5e@E4K^aj2f!} zZG;h(>Nf&dDS%3~x(0L?8cgcj0(e#cCabDOiilpF#LdJ@zJhQFB3dntWbfbXEj{SL z?%a%#bX!@YTVth%TiBvo4W+>;Y}2jKE{W(i9PSlX%6M$O{4v14^|Hcx9x|_`ISASP z3U|pM3n5jDli0Oe(Nfbb%zXQ$+FPgtbz6RV0T<*?2YkukcF&00$uLiK2a#}vbqw;E z#{3O~BqrTiBu(zd3hu;7)7~MYo}2s*-yT}RLn&EeEk__wxx!k6tJkt0s>lX%k+hOa z--eWICl_oVfSy|0Hd?{|j)K?nvi2N}4(T~ki$zMd=x(&M>TTGb%r4x0z4q4`-2NQu z%J52xqd9x|UeAc29%TPJCSm_3&Hj!iiCwu@5E%?PrdF<$c>L05Y?A3SI`qY%f!&#M zKQ;10qXxwIAq}|QjaA(5EFDf{)~e^Eu4mXQRn5b4I%5yeP~<(2guFh3?8%s+ZtTEL zR@HU-oOvA(jZ*ub5JcnhYWzou?(-ZEPt6aX9d8fcis#|~ozFkzv46z?czo;gh}E=W ze4wl70iTmTpAhi~8~<1B8a`QN0$cc((~uGz89HRc#j=BGyXGp|5X+i&3(`p;IW{hh5#?3{g!X9ci z{keq&W73s_{Cj@*(49Uv0-E|l6YUjR+6$GL>R<6J_n(E*UI)ATPk$-f!O|Y~m%<#( zgG)(YrapX58r+u+ebiqH?91N6<&!?_0WRbFu)f}T(x2&`V|sl!JMZl*tr7QY+UYw> zsoMQw+~1bhe6D{^%9ja^@TBVXouu}fV+}ce>rFng9#LQa=@CBvjlV6FiH@xhr9qukUGmfo)X$P`|K(8v^?jvx|MHxn z>#)B(&8q3MB$HHaD%|g)bOUB!8P>R(&9Uwq8j>U^`E|jnIt|=SAx=j^U3}i z_H`|fw}!q=T?EJ#yeLB6s0gwxZ+p%K!4lhTkG>{8Zum~H zsN= zqogKDo)xw9d!+2Ho-{M$b-H>s(DjQho?B-4kLcogDn$RDv@_B3Fa+|a`8C2bt+qZ% z3e+6?YwPXOJDok%>*!yTOr430@O)hd#K)b4!%`QH5t?IaUHuK|&nuo0q55VGzP>_f zgTMJlP1F-<(=<@>YzWo2cJlET4qUw!uG8(so1eOhF^a?LsK8g3EiHVW3(0dlnBOwTYGE~dfE7Q491&1+IR{h^aU}~$#E)G zY^US|(4gY+Nwq7{P*-X%!r#C2R7U8V8JiGYkm{LSA5HXvXwRFt8b53SVMway7dpTe z678wg0K#MvZcg#^Z2;lsD9^kG`W{l#cAn20=xtJTl;=?ceRHY&cTe+3ST_52&&o*s zpjvM=!$X0+gfXdX=i4o$z($^khWZBj{+^Bv^&=B*SwY-|x&QjYNCfo?IG3NE^eA^d zt?MRZTwOx!7m07yfW511u@o<3mT4ZV0V z?@==?&@4eT@)7H_Fn;y{9R%?37wv)B8t{b=s4IYBu#`a^|j(h_-i$LYNC+R zDAG>9SOnsK>+C$PD#}_pdQkMFXHcni{FM{>FPZ+6=|4=OF@r`2fR04~^oQL-ol3(O zn_y*0c>y2M!PnUM9R7uHe0?M_B_zX~T1wjjsM%S2FqiVOkQz8jh2`~Q2#Z^S?-<(g zZA^PK7K1{l{LmK&yi5m2f|6Q z*IbYb#q`m&Y`%Yso7p@*xWUpCbdrH$sSYj97?jn0*qBuxLNoQ2W5~8Fo(|Epoq%Ks z$38Gs#Spss{d)Ybkr||GeNxQrM1Rg=8Pl$T9wWxk~&>q?p3`p?BxV|Re7f|!N;(Qxs@2G8a z!f}hUQ)rP=oF=&o-g?GS2`n6Ru~p$uX5g02*6gRRu*T!6pcM?0tUO96!nbgF?kq>n6ykR+emCKFD}Hz2d%0MC z#>ZWM&1r)_EN9<}&+@M|hh#1P*N^3zYloO?%lJ)>#^;^m)00Ybukv&(L|P-!Vqg#o z8j``bmyCOy5U0@JAS0I4;!!MtbvYJ%WHvt$6Zn(JMX19EVh`qCgE}YA7oJZ#R2rVX z|4FA))Av4}?tS$P^?{Knr5)xP*`@mgLD zK^Kh-H5bs=tS`sXrwCEqqkiPU;l)pC7iP$Dr|}hMEZda$^wAf@S;UmkMrlyM*KU5M z*8{ba1kdB1I+u9fxpa|vlNzEXf5#i$>Ss0glFOp+sPX|(0#MIrz<2=EcWj9w0on-l z;kR5x2`5O9tm#++MEzCxZuR>%fv=M^OyU~wDUvS*veXxZUVA>vXTt^oO!NWgG0LG8 z+zviqo&ZvPz}FgJ@C7kk08KTZpZdB+Je97+tBW8cpNa(~X0piCq zI!fPDYIN4KDM}wEO|0eF8>Md{Z93!mJqly%?Uy{&qxDOqwr7O@tYFX9X#G^_*&jSL zt@`>Q*J@IB`949O@`I;mV|`0Qzd!Z-3*=JIqn=fbF-86Qw0OK)O;30e3;{<8cfdPX z-Q@8{XkR6krg{hbJHa19>XJ!;yHs~>#l>15Sg z{||3(9v5}>{f~1mjG_XA%P1}&Dk_Kzhzp1)qm0hzjH0GeZe+PsW~N}|HW;8xFQ;hH zYiUKjTWoVF6|_u91>Dh0F)J-AUy5txQZDm-p8J}CLErDsAHP3-J{<1ro_p@O=iYnn zcFtY+*Js7V2o}b_*ee!AV3uCbjlHaaH};R;5q3xRtA_k*g|K#H>3q(2TDnF4qP!!U z#;+cg{syO^gu%2j#$Sx+#Gc_(G%)|LzNqTN!h;X!Am^UjPiOdX;oq4}w-G@9lhB+TQ2(kBr=nPr|6vA2*~`d7co&wy4v9y)ut9n=uT_u# z?Zd^6p)9QbU#u2bjy^hH-Ue-@o3(MP4b<}KptN2?l6L{T2P8BX-JeakXklh;hlX54 zd!rY`1+b)6zKYIEkXC&XaG``x_sNXHE$t~NDRdQSW-M<~U4}HTlzV}lr=nTQUI(c) zqli+eh=55%yzu^UQ4a!O41iKKe-kl_quF=tfLPI$wG6C}OhS6`;((fdKzz}a{m%A_ z)!o>K`sQE;)jxwAwQ-5Cbw^`(@iURpowf6Q8Dv&>2bsMZna)VaO(uJC# zEAsoW0senLKLIU8kvP}p8`zDw z!-pC-9#BZ@Y|c4*25MY+0sHR1z%qr%3)owIgLMiF#MM0R6rCi3s19@Z{bj`^>}I98 zE%8>S4(CRyLv8Xzzd9SUc?z@05-!(D_=GxMf)862hzxEtKcrUDQbnmpO(K~tN;kV( zNJc~&o>RC=x@LL#^u%52@4jE4zV|1x>%8!5@oj(Bo-g{k=#T#FD&u9_Mb!Y7=zpG; zDe7w*MC*ZglIWprA}ASXG24oICbKxk`xc8CM%3)Xmk7=GbMdK>U2j;jRhEE0*I#t$ z$y)KXTSeI*=pm0)ir~Sl8ULnIbQ;V?^N6it!C(ln30>a-_2r}BrgSj-ivPA*EJ;B! zC7Z>2DQrLpM;GOmCGg_1N@yd^s0S77SEa3Tip7kXg&lqBEJ;< zhOlnIndmKS%5Kbo=t_wh2gP$kSWLU?UqIo~9*^tE5fcIzQ3tL0K~aIoVVys7iy59E zQREvX4W)VT{7Z^wVbk0{i3oy9=*u=2VG zO*3@LXOJ3gPlaTwQJWs@O?P-jefVq>cXwIM;uCE8TIU0k!WE9Ccg%TJj^WzUfJLfXorVmL(?npc|xAXH=w~T1Le~L z9WSAW)o0!!<`t~3v8gP%b7RWx2#h;)yQMGs!Ch+B zE`p&k(A*)0bbMv&#OhSmvDq^})R2kCNB}ZPZN!oPL8j_kFEa5&rqenRk9Xl&uvj{Z zMX(kkel&|0KaXNP0ct#&b;4*0&8XePOM&u!|uG zYjt0kD^Rjjf-q`q)5X&dv84e3k`KHFgouA1Vqsp8<{=_tEX!a4;*GH^J|Lh5HK~R8 zX)ILh4Xeb&acpYSZ%$BGtqn#M*k7e~+aZpOgW59mm}oqnMFupYs5DV97@G^zc-FpY zLpMa(U*=b@J!E0gA7a6H79HRTa2kFxh)qCc4aKqX%O}z$k$CV&3#11NqB^Hf8vQD2;Y z1XOE7wqlALJ3)qwnZWu7)~ks#0U>J(gF&FbF zNXNvgiEOA>P!fVJxPt;Gu^wJQmxH;mOhPO_CT2|nLBAS;%Mi519aL2}C=5aECTp=$ zCbI!8eLb;8_?P+BUo0QSaqUxc8j3ZOS;xSdV8a4E7Y8Y(FB4ZMgO%E#MhNOUMGG1^ z1*~unQNw?NFaf6^&cs)BgAx(sm#zhMP6sQ@6RR#Oh5lkmI%~rj6Km61G-|mbos9^r z!8Y`QHL1g+=na7}^ih@+=;I+|SQfy=D-`6z#Fj@{f>+SRMqJ!>2eqAAC+JlKO`8f? zb&33`$c~FvI8rE9A>@b_^7mA>G!SP}-67W-a`D1rh;vVT@E98k=h9=CYM^rPV9wY; z%t<|nuw%aPI1ulO4<2Xzyh7Iab8!(NdN(q3vS!6_vPy(bW078XNx&OD4HWK(%xSDY z93&Ov+Yc4<`!uXC3$Q@cPiI}aL|4#2;2a*KK94p*^ON(^5bv9Oqnu&*U5%a_9hf@d zHGvCQ$D&W%Hl58v1s8Rm0ZU5Dn;O%Y$C7MJ!lu*^Gi2l0d*bsau~NM$_C3kEvU^20 zpJYGPV~s@CTo%PA-&r-6J*WA)u-ouN7af|*LO3e!Tm~BjCn1wvVSg4y&1Xi&-y0)l zJ;&Dal8r?Tv)D_FS8Wt;KF^l%i>adh0#?YcZWOy0u-)8$lX!6<8#3AUIrC|RAHy#J zzrpws?9_VZvjM+X@OvA-kMSeeb<8IA;P(}Nzu;GmAHgqj?RI)??T z16PVYOW5t8L9deto%>{ujhOJB#E4){Dq&)+M|Nc%wS`0hUCw!&4{O z?mDqPUooKQrdKd5Rf@x}u+Hp6(al#dP3OD%h~cj@2fuox z==kd_m~sE3!nqb^O|}ZN`dz)m@Kx+84?kLzBp{?b{%Fy%)y%|r>d~TuZ?JDTpDFR@ z*?sphpQ^pg=OBKU@VkW{!F={JpH#S~@jH*-O*ia|1I%Xte!cK}8^3k<5$xTA8vbIq z`S@X*gM`tG-bW8HpWXO9i*GxA1p5H`7SPxGWufB5 zLe>M{+Y8x1IR6$xcJ~w^q%FfzdCq$fE9n@D%HL!AS)*^EB*q(ahty5qinrcp0;}ZS zA0TR}$oznn(31B<){T{j$3J9F-gJ+^eE`<*j?1;`xkPgm2dbxxnoJ}qkkMqOj22Tq zVkh{JWkvDpn8NuJ*<#NpY)Q)^Pj%I8GvDqI6F+5bqPErvUhHX7&K(3_5g}_I^OqgA zqIW-KBROA@E&lwRjo|&Wiw3P{?=haer09U%iv)pv{_xL zirWjBnuTSZu^3ZHVjcdjMdF9eES|qVMl{>P`Ub^%;r_HmJiG-;Ip=2a{1#>p%7yX9 zLnmg7sJ9hebmnFewUsRw$wh2pkPj}@z$8{KeE!Z5pA@m_LBD0z!m=|&STS1^#JylQ z=81L1tY=UwFWA<3LM>*i`J!jV3)?WM?UN}=wxOdho-2Od1|`n5458c3dgw~9bGbnL zw2`%xFH6?y)2zymHplRAlVfm*#W5y$OxpNjLFJR zrre`uTmWKmOzq2vbySImKg1 zuo{)YvU2Btr8{@K=RHoV-JN{R%_Xx2px7^Ebf@K13*1&`b{)oB)_6bkkh;_#$>Toe zizA$Vzau#`0a{LPwxPG%gVjM=?ZZ}+Y93U>Wk@NGWS9n1gS`vWIdiqNcf#%gGfp@a zs@ahiZc~0SDYvW&+SWf-MY{x8i7_h2OE1*R{*~cf**PZV5T2a(GiLw7QXIB9My0G& ztr8-m!^$qRH)9x8NvGHvGAL6_hvLm z&b^_}Ek-Lg=KOJm;yjHNUK*~j*5^J$%|#iS3TqH6()7#d(Lj&QQF73%%9{aL#LN`yfAIj^0#G2PG#>24gvP@T$N^gGQnPE9IdMvw zyPmAd*sQrLtC!$WF=YN!Mpmso%Iw0DhDxC>5C3N&+|R&hSqd5hNsxT$t70K#YwdvTxCXAi81Gv@7$lQXl&Awoi6olht-E+t(Md~=aoF)G3TByM)D)o zmc$?DHg}bgA=HByBkko`KcRiX=r$5XA;X%L;>pPWDHSLJZ9e5K9QT(C6$@@i&LZKs z4QaSp)w(3-%)Rq-V9QpG;GsHltF4+Dxyl7CdGew-;(qT<&3Z*s}4m@pWag7FLaABCV=?Nkb~CH#LafpI8-V0Q`N=KLEOK`4Q_4P>?X`DFIh zibC}i#VvPH_-jRB54MGP%7WC>t=;ip#m+lbTNZgZnu)fCo(_gJ(l*Y78GIv3!>aT( zJBNhCoifIU(FllrvXRvMn`7{UJ?N?0LHxZZG3Y8oW+IXHt`f%(C?J8>TH>Ad!dpmq z)i?$~TEiP%t18j=r6^a?hU%9OnTcw_Xyi%gMK1!Kh=2(fj1Hj3y7AIC zti4$zI4R1GY{W7uCtRs%xh0vMs1<@0{LQ%)uFq)?n#eNR%K4)>F@fRad~4R6s-MWX zQez-*eq}@Z1E3czzQvY2-7K2fRGQ1tm7+t&3!`RKxj8t+krgt%Jh_bz%QuH1m09_N z+NL-bBs+$sSd}g7fNOD}govye@*YWsanwJ?zXQr!g$Pce>Hk9!S+3MM*sn(2K#I}; zh zl0Piosu;t~N(+k;q#l3Og?vyFFM3V;6Wulhw?#F)hnck=vv$7;|2bEU#k}#je zK`F`}?EzZ#m%nJg0ETSZBJ4^C>3~KdhWIAB`Nr#b1`#W*2F{eLWVm7Dqs`BuEssLNTi*k z{MGd%sTPl8RccmO6bj>RmeO)aA@W={q(4UWCGmb4<1LA^)8>sr+ZXTbX5;OhdeLRp zE`9E+NZg_fuqpLz%E(}wlI)MC*ZrwCj+=&%@}a>%0ET?nxQAKsrFFx87p9FPL&*Br z6jKy@lw>rNV@d;8CB2b?rgISzZYjqkgJOqSfbKG6O$VdS;UQ+xW)G&dU(OVJk7MzY zzK7in8i+8Ek#p#{Y2v9e)-q_07c5|!c(sfhX_MY?$MAg1lj zCy8|j*&BTS6fyV^OzI{v3r=`CqL>{EA6}>`8lP$QqUPVfBQaxIS#mw)aA(bs4*n)Mn7Vo0Yy+Wqz;)DnMGvm14eQruVT) z5m`1g5>H_p4z4A^IINoR_Rf9iuil4_UIlMf19#gCxTX0x#&x~gc8N?_{Rp7@qCa$B zbV_ZGJGCmhIleA)9q&VjNI-ds)U1;9akLe6lsmNq39{G8+#fGLn7JnI|o=jxogNZb#Nu)N7@88tYaLT%_llLE9s+)HzJM@2tRaTRzU9E>y33mBG9@pF# zpQ`FRcXDkXOm5bF=#*RM#QCN(A0%C=?#i1rU`a1fg z9D9tO}z9+cg4Oy1Xg`*@*?>aM&|gdQafDNpm6FK6zl4<>O}5!b$FVFu^( z50KlZh=}8?r(xNHv2qmg)N$4-r8CAQO~PyvX`sj2LFGD3p~4yp?O#2p=f{$4(FwM{ za5zlv3xvS!S8>t;gG+UAJ6Ics`Z1$^D|RIvRvlvZ3HCc5K305M#k$y&Ngo06hP_W3 z-4@9Anz69{jkeHYDSN>Rgh(46lz>73T--F5{^6%>tE6eGbN*Fm7_dIB=T9rMmKY#Y z;?skzPN)nNvp;qo?{*c?reIP$95f|N^U<$}Cy@kotfHDdm6nqKSbuZgC4Fuvz@!ri z7c)YRI~cli!^zyXH^od$_Pv`4=dwvU1zet z>X%9p2Us1`gNbL1+4bc(;=)idQrzCk!kUzhffm5g3Bw?`OoaV;l=$Z)>o5vtJ)dg7 zvcdfsag=xc9;u4J5$(lDHD)iS_nc*^ph#%{ZoJ!6i6W#OBg2!)>^tFnXc#1kzIKTx zet@3$`;p>}AJ{1VVrbFDAJ}2mWN&K{wG|PClt(D{j&|GQA(Kyhpn#(KOh zKW-D1r&*8i4>YJwryw;)eP~ojUzsM$^{{$6M6^A_Hu10Xi;kYb?m_<83!?g0_CnK_ zzjL{8b4rR*E+76ltj<0vvd^-yK~MLf(#LM-ks<1*c|~W>vhIv8%oR<4XBM975R-q$ zl%wn=vF>-Ax$<8ob=wVR50$Lx4&{pEKd^mdT@x|x4;IJI4i?HEY#o321=0H)yUYJw zD0-UclsH^8)ea1@?H;d;8sN?vguh z534`z6OF5ZFlCVzA>swGpc)#^H=i%sT#drz1D+GXm)Z0d%PP=zl@rJ#_D`?FOr5k~ zkCqGPWj3t&_gPpWMlOO*2dj=4eH6abBTjMYGR~Qu&lJmvSj8v>-b%9_AfwYMT-`Hv(Ndhcv1E@^qoiI#ihSlOCI!`@W0MBH@_OieJUrf|3Wh+Q|Boo|0iq};?|x8{9{*4@M;j{n_D?7Iak!?0H3?^~=F zU(`!<`3JMJk7LB1e^`g6ZDQ%ti!+{+vMxjGSaJIwmS7I)DW`pMA4~v{(B*_pjRgw(7A`ywUJJor0E92OiaB4~L=4W-FlvEnu}*>j+9HY*l?=>7gd-=r>Vbq`k; zK(B3hUWVY+qnQ#k821Ai@;tDIG#D9k-|)baHCO;(A9!G0H5j@#!zK?b1Tf+sXCRx| z5wHybs-Hy6u@ZYuLzxHnQp>jZEW@} z0nOGDbB{?0C{srw)V0T$xSoj4-E6{kGpC`wC(+oJqT5~8!QMv3*roabJ4G2urI_Hx zL4#2*g@DK|V`A7`9R45!y)Q5)szQ3SbRGYA$}qVB8(-Dkr8^u7IMUz8*c1pOwkMuPIeh)JCYkD2h>X1o zsMEyL^Fj-(ir>!&J_6>jGu~fai(sj-Iz9I{+V2-bV+_WV4D9^FHb2@sBsz3q29b3S z9sTGeao`?{jqcG^_NK~AXi(9%Vu~@hfchgGw@5U@_jZ7Na6~plo%*kcbg|4XC$UK( z?x?ZYpR80)g;a^Sa{xRzm6^~sQV_Pu`ly@g7nQr%5ysz-7H=}XhS!f4eL4S*_v|79 ze0eKgFH%JM^4RflSeId;J=0}~#h2Bo>jP~KbhEU#5j(#&ccIp+&ufF6BQTT#(MQOU z1C7$u1P#Nf>#l*%dcfVAh>gBHw8NB}q_ru^f}+Tx>@;KAfG5H`LCdYuj{hu|;lobi zsxR-Awhn2dlaO80S|FfG!u63qPq*looJ4y-k}t_A=SY9m*%;joCP4K1*+_6W794tY zH(NDIa@jq)lUU@(=QUp05h&19QKEBjE~4Wq9d7MiiF9gqROJ&2q@gBN6V5}RSHYs6 zgkQ}H#!(~hN7n{Mxsz=nwF8CT#1e^^S$T&tOda7M+R8)9h59bTxkw7bBj$eCYt~UD z*5e(ysk8V+$J2OqC+RorCSqnKjN(F$_N%8h;yL4_db|T)(Mh~nk0-PpB~hF&li)yD z9sxqFu#{XAf7auj`KKL3>-xNVa->f`Q~Z93eDG1 z^O33#@rG)?D$N(G`OauQU(Kg#z9h|em3($ZH&XN7(gIs*K01sBqW?jOfEjVxP*)LVegTXuB>$PD=`+8o>EmYR!(Ecwpfc zy2zs8{(LNJl{^NIEH1r>H9hdO!Oo^s!4jv6(gr-NSvnN5D9hP;D4%O|seF=rnPV08G+@ma)1;L@v%#nSycq6Dsy_TMeWU);Z34nHMDCV5u1wv$ur5Csn z7P^#tXfai?4xZnJKPgHMHAXXxnQ{nrLeFSbwAUl8%ApkGnfSo`8f_t&mI^R|(YXhZ zhTRFaP4)TDT`n}$Sx}LtBQF{Ci|^qgU>8n3yZKc1$;ANjLh;QerJQ2_cg{thMm)S{ zHzWxyGLI`E>uL#n#1oYrF-m0#rDp-0DU&rjW{P-pI}5Y_gGyUXq2;EJFn*z{fsyiB zxv5A8ASr)sJMla0Sd3B~TV8HD(Q^CRL}?s3ZFZQ>;djLtKZIp;fiYw`(LuiI5frEA zl{{!QBqTuMI!!0w?u@%(Rd&3C5UcXJUBI=`Ql~Y#T>aKb&lG@@kpQAennvRAVG)^W zSGT5eKZ9O0pBC?ODK_n$+*28GW~b>LS+Xf)RkWdZ!bH6UE5MO;H0&~J8K@#EdS$7m z427CftO(!{_Tk|q-4#YYDpB%Kr9gp9}?>Wd6<0_w(LPC?+^2q*%9!sC8ylql5+_M zS)W6ez<@qCkUlV^e5j(XeBe1Cw=ATtgEsEu>^H&LxQ1AQ_ZPA;zBXxi@c%h4RD_oF6DEx81)Q3+B?bI$no z_tobvP_XQskk7ct40oLiC2h ztg@V>9nB=foVd$ImWd;>%QEW+SD_!OK#Fo_26c&2;~{gYwp|CU7LJ5~ooArbSP?<> zsC-|ZD?d;gF1!#zYL!2_Yj*utfr}gvnwtIaz-orAi zbcfZpZXT&?aBnRjR)t?MkNN+D!IhZZp~p1d&m8e)e;A!Qac@MK7E|X;fh7Q(*U}>YhV&oMlr~ zdA^8@UfO9ji_3m!xU%Rvnlns7I2M9bp)EV4Ml^x2fvXfM)2$<^$I<7ufCs&fKCe4I zQj{6tW~~JebNZdJD5tXr;29D!q)x=OwjR@2^>1jy!<5hdpf-#i8>%WY`(xbrmJWzT zi#d%k#q4QLNSNF0xoDd+%VkSEJg=E8x)eQ1t)Skxr1tCDs}MGNVO-6W+DYQ ziy7UiQ*0)a7>Z6%9!SnI;FgdPWP_*EIBwu$1WZ6TD7(x6uaIFPrU`G?JaRqW4jziG zvnyuLvghnfhTQ?&R8FiAr)YLA^!p>T4pr8QeM7 zgqd9PH8F?W@M~f(kxp*RRgq7w|5dS(+%s21 z6}iP%RyBuHa79FtJM)T2B{%ts$Rao7iYO%a>Sa+uZpCGBmE6M1BBTY}tji*q+|#9X?E4bO!BA(o|YLQNEOtr{w z#ryEtJ48t<-h~f4B`&r?@rUjZja&1kJp7dC)S4Ul!5_pEt@%;j;D@55HvBEde>^9C z3`N8z14XkiK7>y^CnkpRcey=4xWafAGm5OXycfUvomk(Nf5!P{F)^Ia=C6J!4uoTm zm$~R#IA7z-uhtVUMew)yvxTBtN8XoLREp^x`BMXN^&k%JVGoY7tbk^n&Sp5Ge#Kn+ zI}|&^{H@9!RZkmkm6s^gqSQA#{Qjwt|L!=S)`@pvDI&2GU!Wh4kqr$?uIq+ltNHFu ze5n3mFW`0T4R6(%|J2QZz3IS|Ysm(*hn&q$BdX0Y?70jhwNOc?+J_ZIMDm6#=x0n_ zJk*c6ME@ur8q^4JJ+O~lVn!4n7WAbT-bk0&8O8So<)RIH@UrfSRb6;Y$lG49e%L}@ zfJrc#P^zdr&cemLE_^}I>i~PA<=+*{q7iMq7i<`Ik=2cMIhrrw6??_(t~_RN1U1S! z_MFe?UbXuSrd2~}a(Gyc>F@yh;-2D{uDrRu9fcfL-+qwb@c%;aWjDdY?P1ciy!R?t<1# zDYqZQzZFwk2-tfoZ7CMmQVRQuCEc-7doaewUNI(l#TeBWgMohQW^F|F^vaK6<%VlK zl|0hCq$HlvzhJowy>;%5j6{v6VKRlbW(78JY02KN4wWekCi9M{h~a_!N*~dq2RA)| zGBncxqgvGCJ&1jY+CC-!nMf>IJD9X8oDlv1_BtQ4DVEz4B5_}!T?E9@hR#SyFICzH@0~o{Y%}&voX*mz4DfD#$cWNAnA9yi=8pN z1?w$N#6Zyzj}y@`Jfx}2N*&?tB{p^CO-%)ZwRDTa9*`*=W;@>EE%}RikkU`xNMbsP z*4=sg7BGrGD4k2Xi8HaheY4gNq+#wRn)l>kR&@<53a)Zn;+?t6wQ8*=6B_7kwe!Uc z2P3Vt+Ih*%XK`q^HN9BbE|J}n$Mem{#P*&%vh}d*$RHh_%L~@iezmhWO>u9u z75RO6432CNBi#x*Kgh_j{+_r^P$aSiQu!DR8ds^EMNm9%?xw&aI*WesJW^ci#SP-Q zc;3_<5dKZ42TAv=A)VYpQVUV;!*UnFW{vA9orEi%cVb;drv%;;HnDd7Ad6Q!66GOu zV5LSDXC-==p4(hv@f{t-y9vBCPG`zUEB;HQ@R~?LTBNXt@5A!BvCA)GidHR)C8cI< z??WN6IMC0N`X>f0_3IJhiGH=I?~Zto<*b@WL)?*Gi4X@VQgc~0;ps9GHS*dv7f}=G z*JfIMbc-M<2|+%oM!Bm42}w)VMLeH~mgpw+MTZ9~V*m``TF!d8N!dG)khBa$i$u>O z+k!NxW;5Gf*gKj0}q#vG~K~Am_#N19?KwBNyZ}NY2(qoEIen`S3m#SVg4P-T&C# zI;O?Si|)Qvo6E&&aphd@pel^XJiC>zT9Z(|Oo#(uR#rshi`Yl-&g|`E-ZbbQV3>Q! zME^V|+8BAOzT=`vbploYPe<>%jGC&S{4T@$n524H7<9;rXX5tmIq|fShw}F}ixoz0 z?{xE!ww`l3^BX`jaZExhb`WOg!RN)eL3|3=-xNiIc)O+-e<#ji z8CemLEjh3LT~rU^{U)FLqn6uKK%%W3ky2&oF_D<0iG;4qlD4R_N)($(Gm+SkftSFs zR|c+V^klIqzl)r~U}o7DV%=b#%2U1&{|@FO_-|*$uoT{|$-@}0tk{umro}hL3m+em zkplcX72=H)-mBf^vruB>M&u%Z7}tvYJb<#u3H{6v9x57{cw_Uq-@Ni6QHF$L%Pq>% z+y1;af||sNh(bhGn^E(F{h2&UkhDKHz9Akr@x}IUVF8i3jXG2}x7Uz8uiTE-QkA*A z{40X6O*Eq^cCYAhnFo&2M36@D`ya5?3RCnI5hX~8B)2KVlvEYr$g@6DQ-(qBcLu?7 z`4<`kP1-Rr-ASsM0prCQsfoVUS3?|5An z7%9DJOE2l%;^A7A-go|_#rNLK`q=u6+l^2Kbm=|m1a}!;|3xaKGHRvnRY0g9vXO`-+BQ=^ z{>GnpgZ3Kk==;;6?_z>6Wb##u(-y3pHrnGSk#6K9$+$r-ube1hPs>^gN;d3d4mTR!FC>0D#iy!yg~1^x=mOc@dIF4i#fCKropYu^}8vLE6>~HxjsW;MUmvXu~#2 z!>|QnXd%Pyf&F)(6ohO*^?Xo0^d)Js+-f?f-SC|U?L(j$wMIwLa5EI-&Pxmrp)fqX z#*=3dgymosDR-muPGxq+_N0vVZXF(OobsITGFCx~ETrfDPRC@lqiZCh^FFsz zRjrK(W}1wapxhv%B!e~D8EDs}Hmx0SAgy%ni#EkW(@Pg(r9xIK+_H*-of!^!g}ygw z=~YxqjOsw-eP&-Dyo3yWPXRkvS;Z=asHJY*=bI>fH^E_GGku5}r!VxlWbz+HaQ6_}P zSd^#Y^^vf>Ta>fL$x|jv9hIh5#0tYlgj;xzq@>|jymNURlv0$Pb+Q@tbIoXyjbN7{ z5&+pcMD{@5)7}w`c*MOW;Zj4a?Ui>!kfxG^Q~nz4B|(CA85jQ@QX6fesjlI1PiS>T z-Q0iqOP$;|(-M-o-}n=PB&8-Gby$tvQ=9d8@#r8PBJf;aY%kx-l)W8PVW`AiGJ9LJ z?0v4);x9i^_HNZS{xd>n=81tnA_Y&jDuvC=!+ZCI0LkulYkA%7cFCDKjGoq_N=6ro z_so3#z}$<(hwO;J#}3KIHyR%j=rT+_bzd<*3V^2*5=JZU(R4Cc^{_OWLLIY2J0myr znJ*c`#(<l>IHc|woq+}ApP4TtmAtz4}T*NNNG zR9h?X>Rc3&N}XBK$ctj&a2}EJ1euyhC`xwMt@eDX+ghg2BSFxXOy`}XAZldP_{%>O z9}VZ@8y-JTln$%os)WClx7UACRl_2-I(#M)th{gFXNc-yu~00w^47gqV0$KQpiq@v zb&G8b%E^m^&018+L8>@t<&W|YRS{!DY2=>pPHLH$V&n1pJf!bQ>iH8suXW9A*_(Ec zNZEP(+q$yT6-DQjmQ#x>(~1&*+xSB~cd8gRf@4y6PADUI4!_4l@JQZ4G#$rVOgO2j z{c{%JSXE%of)F1$UM@icSBLI%VV~2PTCtl3sN$Bott9EL)c{2Enw4@ZSvX_-A+Cb?pSJQgIs9vBpYjQ~Zd|HL_)r+?QHaK2x44$2~NHwW3HBD1w%~YfRS@HZsJo}+H zQ5#leT7dCMI$?=6f7e^LR^N%RA{xWbS{$flxGk|-vVV)e%`69r?H*ceU|DL zdeO1GQ}fDldA@5LdheQ%%UUFI$71TeeXTe?7N$kI!fCe^XSBU%mfBD)54(G&m9$%p zIx48lk7hQ&{7Roo69H4)C2PXRB;oqp3fb3gqrTRvY|{FCfhCH2d#EP!s{oKP4`fU9 z2+{Wu-o5!NhigkSPakuastPyKkk`19jk4E(ID9S?gU7iGcCEqupS2 zYSa0e{O4>z6TvK|0O`vy8`h;%@A->n8B89FT{>h8G&^TS{DPeykZJI(J9YroNqx6T zpR=G1s{NW($$Sp`_pHj(n4gDQf2I|TN$7JMV?0(2z7;N|12?#Dgao>PR{Xu1Z`)rh%Ad=iT2cNO`IC$O zf-U!n@^}cnmnau&M5HJ`w2wqN)JP>oGx!HZ`csMRm-O0Q&H0CiA`EvHTD4xjQV9{dqB;JiL-XNAv;?b;Fd_IYnu)(`zL^0=a{x~ZT-#-pJ-LzHH z5WC;EV#jpeA_xlI+M|=r#HeX7KmIUVygrS$;~TyeMbr2^-ei{OIvv*H(x=4Y>3jhD zSbRO5cVs!@*Xb}K&Ko9L&){95z&6j|Kd?cf!%T45plISu9>n?T_ljmc$=5J`yRfLi zEZ)+OpIKWJF$cyKe&o%fxViik=YNZ$;7s0tv&V{BJ;y`a^Ob8w!W+Cxw~rwwXf--? zG&6Cm*RU6&OtZCc_4H^At9S%=JPi>9Xg^aFyun*FZNHYb67JR_E-)*B@4h8=yupq3 zLu(j4-!c>J1anf%A6G>i!2*PW5#24I?1bESSGL6|IXuoGfy;I!!1ol_6Iu{U6aj^nf zO*+!wFIQx&<;lFlBDSvO?Sj6CR;fC`>{}6c)MdyQzpv%-!LAo6tvAMMX}xk)bbE`N zqCb()6hN{0E|qF9*=5+W99|{O-|QQbftrJ49#%&U6K}o6yM-=57~SlI?K&FrvxJP# zRJDa!oO_G6WABRwZ}YfLB_^r^T(9gh1WFv+RhJ=Jl6F>7*`Z^+Ge%5(8zcCad5{K- z%d}}Cvv1BnJd@t_*+*F>6Z9*{4<;r0JZLtMnBj;gAj=bgSw96-c><<*0!p<2JtlCH z)DTa=IxS!T1$3cAx@jR$%Qy|AJm?`BFms9fRp%1X`yJkK#P(bwN4JzZ14jUtE^eIN z`>yBh=Y@F`T(P}A(r_7^K+bXT3|)@PKU34g4k83+VD(-K?=9B7gOVMe>lQl`2F1W% zR$>rF>7~b&lPOO51eK(QV z;=E>+X!I_RXnYq#C&j{L=ZF=!>~)d+E`P1f!)`FnM&TJp-GUboQ?ac^#G9{+EAR65 z_6Y##8(V@Anl3+$;kbDs@Ns-&f3GD*(LIV!hcO-r_W-{jp8Xo7k9lOd$!8F9ub$YT zB@RuikGc=1^TBtCaT|OnG+nHWMO%QCK%0zef(6$kHCExG;62{Z{vOqUV!BlmCxLP* z%U%gIXmOGeM{WoU$K?(9m=y0aJc)Sf9wcZ*!<*@e@FTXTsi(itpmPXHNtY3HFQ6y@ zGs&fGBqh2QulI?T|KUOQ4=`UPS&mVoVZDZc{A~jh5-@wR*7>x@OiqLNYbZx*P=>ov zUamp;-7KlBuR)1)qXb~8uTl9{Ls1B&zD!9sS$zH<-kLw2E%yC~NAQYC;_83+udSX0 z<5a)bbj0*S5vf+EjdFFgKwNyEFB}^xg>`H?W?H^u*XhS!nx3M6Ch#3q_5{7lH8)B6 z5zz5C0q~)F%l%-cL;ULf9eqh6x(GJC=N%@yLrIzFr& z;8rE@x|51KuMkorb7(BCCqDyMtPq1f=8GEyX3O~FoTB1mp4wwGgxXjP7@AiB5>)41 zp(;0J%h>8P2qn^30xU2rR*0dWVB}x7Ld=J2U+U&CM<(Ii$Uqu0@!n8MVr@jKFWjZO zli@&9h_k*2;CI^c)ul#Rsl$d4uki0d4^G3S?;DVkPy1#V3Y?V z$BohApc^AhBXQ2;PGjmgk@_i5>9jfv0GiY1g+mH(z(l_S0L74xEF!1uMH*G(#P(0I z<#nP4`yRub%Wz_eRviPew1e#QS34!i5_ch#^o1Cc4bfVJ_hp0+-4S{t0!`SH=@cOl zlYxjy8Kt&RA}(I6QI69nC#x6xNy@!;xJgrtBVY_MhRGOKcZ?{+05A_gGqzMKhItO5 ze8xuypOX+>jC2BH9pchwyyLV_KqJTH%Csnf8}OMB*bzsJ^n(J!;J}aoP-3ZmP$|SS zMhSdHV(2vtI)J9==6hlEqVufiL6U4pL+L#fL9Q}IH~2;I+~>SQgEkKlXS&xM;y<7B z0sOW@oc)}43TZ?PIr2zSl)&!u(I?tM6kWIZqTPBv#6EU86|`8W6~`Dd^eKkb`G|rO zRl#!qdSI-?IRzYSjYKbI$U?YW54npMdicNqiF9v)Mkp49P-F3G$7ETq%@Il!0Zcq6eZ#VFS&{&{YodK1UDd)-(goBIE zFu1!6L5oDlMn0IovQ$jk$W!?6rQ$yuc@hs;Dt_I_yVd^y!=KCWjA*`zkG5={kETP1 zQimI%xahqS6w32T(BguST+JJZ6#%FmQi+vg_|#Zw3|vRvkMJwH26UaO3E8xhd(WeyeJ(To;at$rVc8I68_Pq5qPHb6c*6{gN9W?g?B;5idU~ zf&C@w5TH_59-)zYrY^Y!mg4xJ&<_ctY~Wlt_-3M_yZwULk7OcCp@y7} z+3H4wD7vP=cH~vyyFBjz{!&@dob&cc(R4FE(6b{*OJ*>9fSW_vl)0Ii8aF>-p>1_c zOUBQPAEt>0Cyp9#;e+eXl!!lw30ttM>79Mzxh=epzg_~D&KFy@@SeQ!qN4L#_%z0+ z+eK;-Z_Y>Ci)I$_K*pDUFP4Z{tgY&n=?r%8Qrcqf1yU4sPQS zkHjt3DiJ3t9;ce5n=v(6z&`T8&leKMJD<@w?ubrMR>8^wWER0M$>6GI-C%m-PYHZp z0(U(thHnSgYo4YFa_@NI*v^g3M@!#DY-xi=Bol~eG1C`{GuwHnedhv7YcJ9QupfoF zTTCB`XJ|oAw85X8(tmwbR}WHD{0QVNh-r@2eMnB}Enk|`=9Z!=3J`XZfJ#6Wnpc(- zF`Xa+j=XFVNm%JRlI)k59b(Z}Jj6an24ZT1AUrw?UYvO{9^^a=kAWiCVair?cR&`Z zzj7101j+Do0HBRlc~$q1&r^oF&(;cW6{U`=#1!4%fP>xN$su+VtMo4-7HBp{oC6s8yk)J1ttqF6MP_6S2e15XexnXNps(jvRbaL`Wof=$WdN@ed_5#o3|13otwzIz`1ZO%*Lru|e0ws+8l_=!JI}ctBh2g%<2aJLZM9PD6Xv3+?=1Ee}cG)TX*rLmQ@{*=4@I zb>q$V!h1x+>)=IhjT>#J7h0T#cBf};UNhWiAy8|1@*kj~eeZ>4BQ(nYqh5HwcawQt z?}gXNO>UnT+BX{7axXO3AT6&=OKMZa5pF8EbT70jH`+uEP0Rlr4KKk9Zxiqe5cdO# z2YIk+I0lIWe}NmpuawdQAX-EC8^`vg{~32mV>}2AHH7ax2qS=Cr!<;;>n3r&tK|0! z4@NgPiKQMA6&k`T9)tiwaQ~y(H%#;y5yA3IHX7an%xmIq;AQ~XK+$&2XUfLpG88|F z6dk=EevSw!h8Kb8!720LtkiI3QRKT;Tz~x9KHXdsEQ%i9ZVF0Jrs6Q zf_6vmnHmYB7tW;&clH9Fkg0UkP`>eEWS@%l=pq#rG_@gVlftu`(Gh-Q0&d~b1q+mJQ>yLIdlcPs^ zECOPRBX{IUK!o|t>3uvc&IWg!u83nUF4f;Dq5s)QO_30z%Uzu`U|6CLns1&rmx>|Z zV()$@((^b2yKMmNwZp{XZ+RPgLwEpNgO;EUX>u2@;lBSU`eNx zr7$Vqu?5OPZQMD74~#okHOB`z1m}I%``~>eM;=zZ7^f)A$0wNru265}$h#%sgiP@A zJinSRJOL=dG*st>zyl{-b7C|nnH(qCeJM=4J{!fU$)Dpb8SKb2+X+oxjB1->jxMDU z*pa7^aMIie<={JyE;oQoP@*owis_oLbj38eJTDut$V+d^^SgQZM9h&_K#rnQ5D&i9 znlDN76~c!RwP>z;;I*$L3TT3F1L87NJx+D-;S|(?KJO*s-DwEeED;t~l zK~Q5YXsQf~pdhC~+t3#{Lc)SI^iA-Tn+k9J{bnKrYF)W$BbW!WF3WzB`UJX?HM0bK zoqX2K*Vsbv)JW0@#b25Yqz7V|fM)BS(~tt~0?=KCiC~%#rUC(rB{Pa@9WteUohCJ? zZ;?k{R8L__;1(IFvlhvopNd#!rM1}++HsC{i$uIH@#d`b1C6Hr>3b!8OVwg5GDO!& z{3!W%UInUpB~jwgH^`rUhIm>Eyqq00are=$)RhAU)^Xuf3Eh`T>%3x#*4_>_Xn}|H zN{Htl!eo>GfIK$Xo7~un#J>I*sBH~g0FRR7KN)KSrFR23N@#uj%~=$ItI(DeBT1~3 zGg=sP?zu7==kC>ZD&^e9d$ns3rr^ro{>g)L7Txg!wtWde*!tIGuzsuGod`OJnsU#P z^w}g-7+MqpAuiB~p%2eT@sNEw&^4QK_%WD4`Y!?#R)b)q9)p|4Q*NX?KfV;{PQ8xM)%$Rior!jCc0?Kr+BRZoC zu^X1@^6~A1d$Iq5nliSGRP?xQz$zDsS=2Jnr>al&gXK-`5-86j07}ruu_FRsDUKbH zaLnixrw>A6k5G$VXcbsl8JudS;xwQq7!zjeHqFR!6ip3V>F1dTl3djhLxgW>7Cp z0rsKp=VbpHEJ@(-@8ej!c}U=Pa~enpvqfDJ^*toYbUTp+<|d{7!5O^Rw;hH1Y zE802+qC-BbV~bqekliE$%IN4atus$Xm_;cwC;nyDo611J3;-lDf(SXKqyiYpk~A;U zgEy53Is>10%H18VBvw$jsQ|bPJ>3N&pyTOs8& zD%%JN{q00zm3rt3HM<-2-?~?xr13xlIb$_db#vm*85o4*(eEjSOKHGS0w+Jo%pg*X z(njbgEr*JsN((iDkjt!%&7#(hQTJVf zlDQ1V6iKFVl163%k&(BdBY&D`Y^3BI_B`r(%jI7%j@U^C^#dsX-yz}8G;G#I~-auW$M`q5xrw-VW(nj&yb2Mv7?&P;5|Fm**nK{~*V7Z|AF z*?NJglOCqVUPUevWlGoG1vC#FsUgQ*F15o^G(*RhN}`j8*gkkpl0J9Sr~Im@`GWr3 zCs@L+A}AbRa&C1C9<&GQ%rXDMUy6rjQ$mg-&mR!5v(UrNN{S4QKAAyf^uC)TUs*=U z_$zuJgj%H#NbNi+tfLs3UKdU=e5gYFB{_h}DH*^L3EYgo_)qCQiwtXZLZpyhq)e}g zJH7X)9u?hpz(VYN#3|3K0AhPBO}itnlA<~C&dAuh4jN(=d{DUJFaA>|DJHN~cu}=a z6P9x&5kiVZ(p`QEfEKye0XWt3YUXYUOK!+C*Gt}0(Fw* z&J&wJKHfa#a$zIMjSLgn%(Gs*{Y1~umT9!eM+dyIJw_OMVq^-IF_j=|i9R5r4)cz$ zjX&MK)t2$HpFW^k3C*nJ6;r|r=6u3&nD_FZLE$QEFZLYfrjS!j72a;hZdy5-ksD=>iK~R7|`hgzkJ+vXYqlR%gj7ZQJ zJ=2Xcs|JN@DEmDqcGcv@N!a8ebGoJEWSs{`ZSQ7f{)ZlvuQims8WdM|joZOBRNl}~ zrq-bR2o&Nsy9Q-`%e!{czznRx+2+RisRn0^My5>-$|^TXmoMDq602eS8|p5&OgF+_ z4??hpaMFYD5D=(j{lA2UMV9rI5Sim*4@R6Dqc1Qd!XXVoco3Sn5$1akKGzWDdl0U5 z)6(co3mmG>6&k`=4??AeU^irVFrLyddU-H5xJm5tAPmzGntBjkawBk$ghXiwf6~5S zO7m$q!lNDpe+}V?2f^A6{po+#5T&EN^}|vc7yXR&@!7=G&}t0SieWO;19Y8+rnw0L z2Kq6a|3@oz<3_oT1}sqmG?WiClzWH?6uVRa9rfV+f<^-}zfz!LXzeCL z3ZUfxsW0ZzWFg5{Be7c}k&_mJc`mcpE&l5uD6{UAn<3Mpf5nR9U05-G7j z_ac_XvMA=sNQvgepY55a%n3=qA$CSAa{MViFgzS1YD(Y^;2WuPLM^<3C^VqyiYd&Y z+eS`8(kDR4h(ye1wU}?hkC-_!CVEc9oT71g;p?6OcI6@JiJW{U?w%| zIc>K-w>g&YL>nt*Y*^HVZWW_W@PWa-D1t+dY#1Y_PZEVEa5{a?X0i1I_VK+nNt`^v z``8UAcF61@Fht8Tx#{XrlmZAf*-T>yOgf5gAmzj!b3dGyW(d zi~<~hjRYTGqK|SG(pO|)OY{s_R&=RUbpK8yrY1&Y;I{}c@lq8JOmU)v`%6E$5;V4I zpZoMtrfhi6NpEAEn*g2dlD@3~VTEeoSG0K2y+j0^x}lTA=_(#Ec*yK)u?}Q}R=XXG>9Tc$7!c?VTW={((;z_z*y_9-ncJ z!C^R4c)2M=lci>eLE`W^3Loeuis>9119^Pm5lN&H(8G_2`lqJ*;81wtTKSu(%mKKCMNBIBBf*X4`>LL>jA|aAWfaGmt)(& zW|SBKwnb<|)5;o@F&avn8kALTlt;j+7AZzU`KPIe$~-rU{i7P3Kn>@34Nj^X=U5HO zg&@hymo+He+$ardm?_s#3TjXq5{mqXlXYlsy38S9g4#Z5Y-cKgf1qqg&xM+5WI;_@ zqT#ZH zU;70mJOCTPNf_k&%CX|&k33wLPAB_ByW=G)FGz>ude+7Auj*R$^Q7cALiW8Bd)U*E%c~Sqq zeh09?@5hNvKk;^UT$2Qzu@ljvQEX1h=cujqxkn*n_=?x(ek8wcvCPv@%s-n0BE3LF zJ-z&{OML{#xzs!-{*-)=KWlE_&+*&%bL|!U>Ax9&PJDttzeeA4siAT59@;@5LDMui z{YV_JaqbvW*RFPV2AZEj3g7*G$3-)4JrHLAU{MaJQ)2Go=EY0KoV)+8y?2j`s#^bm z*X%um$VEm41w|bY5EXGmRJ@Ohiq}XM6X`;dt!m zu})2CNoB<;>v+m^GAm3gEGzQH`+e40!|WkCzw>_HKi{+XEWXe4tYtT?f%%ht8dBeCyoH*kFI|#ZirKo~0KAQiH>OG3?jPxDk$+ zT=;ji5dV7Ic9q_WJ9(AxW|jc-PGi)`@j~*iQfeAne|L9Z3^PHk4xvX3QIoN_MGEGL zBsXat7q6-o7Azz@7TO5?dMWy6o=pczYA)m@0c+ITJ;srKDS{Sv7ac(K|L z%pjONaAjg`jJkg;_3pW(doR%)t)l3AFIj;2Ivpx`@1-j#i_3?06b5H;sYGM!a+rzXBLCmgzHiZ?87eeJPW%gLvv7 zUc@=}RAbZ^5MY+U+y1-Ed6;Tx`QZjS9;WPv^aD)5_A6*rykiqjQhvn%hx&R;G8OjV zltF99jzm4yTN1#dHU}B|z`Azqn5?yT4{D4WPnOqPDq#-I{sb9c%g@3I&78@5eWAtn zxdtb3^QY75yCd3_B)}lv#QP&;k~&V*;Nf3P!_oFy-JQJn3vHXmGkM3;+WlHYCV%EM z_9)HM`IXb!o!Y=Ly!1=0T-!66YiF>1|FVhCKclVEj*Q~poPqwxQGEGV+D*MTva5~Q z0)dGcw;_CQn1+{cOr>KPtFVWA1Qq`JRQ~f<+Iu~|ok9ut_CnNgP?LMVQTJ*B)hqmM zO2CN>K9U{v-V}cJYb~W0ABnl9t{?7FWj$zq2B$0ckkVR<8!+KPGxlzXmkL^LpTb9e zgExE)n!<1YMoS!QCAHP`KI*!cu3dqH2;?Kys4hvcsVA=C@6bt$@UccvQEyM?AAh6$ zsb?p_hKB;q#nZ+ryoeCD#)5^PKTo1u^{ClTllb;Jtw*0Xgk<<3g5FpU{ZH&}@m=ft#n${;y7e(pt-F0Au^_pTPT`L;K%8fsg-AOUfWE>EN4} z49y=nL_$bQxEL&`@YWOfo9EzSS{5bH5V3(G7r*Z+Tx?4T%EhQG>EaRGc0XE|XKr%w zt?^J=YdHciy7=079`PMqbdTo?&tZtos-yJYp!X z`~fGoWh40Ce$Y~5#jAkUqF=CaV69~|8A5qC9coMr8t`~M)_pya#-r=C-D!)`P=&0< z#90ahcyW9k_1_iE8XEp~KmK#QwnUpWgwMaAtL;yJms_BN1@ZWq ziD+wCAegq{%YbiLGKTOD7q!H8dGB9oTyaO%e9I;f__ezJeLm%)_E_Y+!M^5^9+nNC zIhg-)QS062$v)Hr|6c!2R{y9TgL&VdaTmMQV1C`tsPV5w{P~}?%npAk!Y}yKn9Axp z9t72n{ykO1O~2q>Nvn%^=U=cJXkro1`9&MuT_yluAhGKt%tv@Ky>FCN{X zE!KMW;%geTA^qPPfQ#DIHF&AF;-Y(cPnD<{f0%m-C?ct5`Lk`@~~ zGnt(FYXeO57w@^onWPiH2G}x^%)hvVk@!mw9(@@vmwCj_hhN4(S!?GvU)DN?m)VgM z@1@2$LV=y%dl`!}``P(Zq;kDf>5}DF=_5#Z@j6afkmd8f{K91|R(q{4kGP_ZH~q+G zUeUS%Zoi_%g{3E$<8GMG4ewAyrtn&4@qwAfiDM>b17(F?ig!OzfBMQ7y|gqX(VM0xaRW8z}8S!3c7@qd~4?-Kuu z#Q!zo|83%bq4=+%yBZT;5%hE7e=sEO#>5bj5GMY&5&x~?e>?F%QvB~A{#S`?odg{# z{&yAsZQ_5t_`k*=uN3;L#QzQA{|xcpCjO_9X6wXs!Tb{UQoUYo8oy=>X6n9CF93?7 z7exHH47Z3`A4bfIx&soVhUZbr?01lODB`WxSwybo;Vt|% zO;4Y^v%rMq&Y%4jk(~Gt$cW@Vw-d}d48hGJZNXItN0Tu7NGNuit%vmm5jS}vl8;9u z509Y)Vg@wk^O3sVA?08Bkjy?7G5h0)*`FZme&Uk{R0?<}V)h5ZwKw3y-TC}pT_4!v zB~i|2CnI{|sXUp6Lj!7s{`(}$UJ$WhKv@Alsp|<{CsKhZy5}niWmM1P;TfbUTHT*`DAeR$psO)hbTQ?@?`9D@Mjl9%+5UG*JPsn{sers zJS_y$|5cI}jY*9(FHhDZE>G6uE8GAz*5gelKif+0+53`^H)|lNoNRedB+3H*;* zeAMf>muVd0&k~GSMSeH^aGf>#4P+3Xd6`bw z)>RT+^P5jyWDx0*kogL$^PTn6wc&&7MEYTTbq91A&Iis`X>@N)IvNYPZ|)|{deFTt ziwcRvmw}{UorCx_N)Z|H`wWLV(&e!zpn~{x@JK5pI#^!2Q8GD@O|px!0>YWixK|<) zWqLU|6^Re@2z|FIzzhL0(_Qzo2aws2K^Qp`A<~8M&C+-S6*SCXTo`2%xm>={2V44Y*krIfb;7mKiK zBfSkr{FPA-3j3MJ4;^;{Db@I{E};QoA<6bhBV}Z;O41IZXUm#-h_=ew*oZC(6$Twd z7lQV+pyLLdF4*wy5j+4Y!(EP12dq#zNdIt4-|aTm2@4^ z$0S`&bPZ^qQ5bZROdW=z(BxkN(#t449SsECWTLBZ{|2-e%&$l~3v&Iey&u08p?B_a z2>ps(qUfo|HY1^K3=YBmAL6$B)7U;*;Vb&+<5oS!y}FsM?$ezUpzAB|aGHNClP~-H zJo#|;jGs@YhTY`nvnkz>-%Gr^#IN8aK@OHkqA<}AxhhSyyf9$U7mcxI0={H|fIJS7 z-)C>VoFYqbgBX*q3u%~+GtTDt^rg(5b?@uqD1YJVmByy=jm2XwBoqeIt|kC z+h9Q~5v#lZRMQYwhVOly-uij=H~8Mvx;}@F$)PO{Mso15pGO7UnDYICa#AqxqQ*J? zbTK7l`gt(~^1-99O3It7(o~C7i?Mx33>O#C$&z-E?&LQh$D~<7309dP4hA!5WuO!n zL1)btQGkQ=$|)1ZA<-*AH&qt|DHNHoSW4rxj@ z?)Efh%k?$+43X=I|8uE=)`Gy60n({Sv^_TZeS6QA|=A!&u z+g18`i^ykb$~WGSudm1#6AqEDdi}bJ8_fMLM^JH(;k~HR%^$k>i~AR7v{SOYsJy>jS6Hd=ZwlX0cv@9Z)E57OCy;&2|KDcG zg5FWMSK$tY_ba?np;=+oc$sgG!UTmU#!2}mg(;#S#@Z?V#8{bLPDsB$=}J(ODHGNx zoT{*m!Z*gq^i>K6D!epW%J(Q-r*NJ^yTU7@q@GvdTR+q{147)e4_e=quorNvaDJ`sAL;Qhv`=iQg-} zR^e&4BA!+XzKn&<_~)ib1HN>-O5ddN4^e!)!UiR8r}CvM%vQKWq5DontWo&5!d}Y2 zLB*d?Sf}t8h1V2DHgkBRYKdjOmQnUDsg}!@yqi6wGImu8n##~Y@nH&sl%A&e2Br7C z!f6V#6dqIfj>2II(-hhSV*IUB0$0sBofHNE#qsA*9_A`^ zDg1GmEZ{AL`xM@<(zhu7am6oIe2&5q3OgvgHdNZHQ~2Rfb^Py9f-MTyC@fNVqr%Y& zy#u8oP4V9kk^CnLYZN}G@P36w3TG>H3~|ehb|nZ=cxkYdf2#1X!o3PB6+Wc!PKApV z?outXQq{bdax_<^XDUo{E24wKOBu4@ZxtR_cv#^sg&P%?D0C_ur_ip@s_>UV((Va` zH45D?DxzGWOW_QK;}oVTj8@2es16U;clee%Xxju`pzo-u>M!}pRwm^NtrGc2o8B|b zn*{ucMVBadf?OI^!v4%Q~Y3@o~qC8qw{ldxP#>I@*(kh*TK$H(qLw? zG&mz~$)fzcl10lF+U74`R#LF6#I}5aEpdgdl>U_zl$NY0Do9OD4dEAh;%-!zA$n{6 zO@iKGT5-|*(JK~=DJsY-DVVTq!SWPak`#?vvSj)E7UY2nk`ua=yQgjVa6WaU9?xw< z^dM1!JHNQ3c?nYV`x2xaHVq1aoUahuj7f8+-!Nv(*y+=$8*;`@&#{dgHDU7D%ptbh zZjo+J&s*wRQqaFB*|u^OP07wiu{TE+F;$rAHG%ZJz{!6 z$>h8{maiyjIYWLy@%$zgs(qkiJR()^z#m$u_nXyne%B&b!IDMG3S=3-H$at}?#e4F zE*QONSw6Zqr=Ykbb^h|DuGC@|u2B$V8OsXwJ_B1Wqqs0Hzo2N!^86J`3YwR&G;h%| zUf)yive?&ZV_vqFp+)nyBsJoV0`d4d3x7ji79idg~PvbTsWWBKC$LY4zbF)LxIY%y>-QYhF?%Q%#)c_DmO{r@GJ~y5I<{GQ${hFXPwTEfZrWd)pa=Khj!All&UtJJ{+6@( zXu}e;mrbXzP zGaBJdL**uWvqU-ueB+36E)`=l=MGoF@ACPRnjO7G1;w^F-!#iV?5?+W7nkG@8L|K) zY3`y0$)bSNc?Am>Eo&OLsRhgOiMi?K+Y^CK378DnhPz%`^|4XW#*ynex zjt22JY_nG9A5GGyxa-x>sNE-rM%haeD-_!PE_v_05}k^#S3EvrEY4``uSs;gA<h{x51{ZBH%=b2S`XswcRm%>_wWnW9jDpbKPg}&+I{}s~J&Ht+mE$I7N ztk&n)D(U!tW7kb*t*&omjXa;J8lR9@_PIo>GH6p+c2dgiim&$Zr=+}6VYOHC)n7>T zwXj3w^L$})i@}qv1TJ3zs!4NIjl7YX(5sJ<=uvvCljLg^?^Rf(@>Ol>4ZlKm^}D?P z+Ku{c8s9NTAH4sCn{^(_OBU+!`xh+IZ*0Z0U3%X(&3;b%%+tH?_qg-}x?BE&+UN(9 z&hnQnFR>M`aJiNjl@#QsVi8=t<cHC+?=wbvE`uIz%q|1JxtSj6M;r9W zX&ZRjTtdNTj}Q$h?n#IQO7{l&FZ!A(3LW}KdIz9~9v9({^hmzhU@xHcntBH1i<%h} z|C7ONgPzYIVYx|Oa+62YGR&aovq$BaHM3}e4M56qU>XQ@UfI+%M#V^_=x9N&rGl&R z2Kk#-=!x!n*teks3O-NCaI7d-h=r$i@WJFFg3nXJ!@v2*1PvTdqt2|&^hllnxi3)7 zbHSI%dtH8ml(6KQUlGsZST*=8AO&N=2?jlK*bcr%Xa(vWz3R6|ddI<+H|e2YeTP@R zL6NeQ1}I8Bs>z-!euG$#%e>&}F&YJ*gH%xcWxt-FpjRuj{CeVEm&>3?^IZk_vK9=! za>;LyTBa7fx0xPkq!{$bUN-T~^oXUdYdWr|X0;G_ffPu=H<8V5*2jh5=>a1Jqu#{^ zzk_6tLC;A|fne0D@awr5DQaX z75>Govw9>@@OelM4{N5F4Ze9Flm4-v0}Xn?mo+Px?70kjl%ov1n3$;jjRo(!7-+Bt zycaqYe0qe120hB*0^fX!rGhJd^4lY-61;!(tU z$~yT=AgTg<^IrujAvPjV&jCFfMXJiy7{5W9TMvM5UNGr5Z!0JC zY>-p%*&_$7276@L7NDo5GRkl?+Fvj^avZ!3NWo{2u+*SO1(f}Po}-ywR!e$R|2-W8 z3qAn8`Og*=Y^*U&`WevkwT$mqsJWgPcLqIQMMz$S?a#9P!H~cMfzO~%u|I}Uvc zf22q91Cjm~^f}mURJujM)fmn;vW8$(bQ*>^Wpo+zj0Gn&)3bwb-hz}rvz@=CiOOw< z#}+ma6pRMV21Uwf1#crI1fL!mnB3NHkmg1g_%a{`pB}aB4y#{}y3PaM2^8(`Q>2>X z!yzOVLDU$$P4PZGGI+vp*ipd^;BBOU;L{_lH0Y7ND)0^<1xgoF845P3%O_|l@zcc9hFZF_B&Ao+z7A%?&2D7NT2>+?Z!O(LEt^cf-g&O~nUf@jk zU-n%&YVzMSbO>i-M9;1<=;0X>43-KXcz zEbr3}ueFjFhjW3)>U4JOLs_UVjV=-*5!27=Ed8Uvv~bAH5M?1P%b&)F-mfpyoo4am zGcqirP~I+b0O?t16wiJ@598n6ue(C#n_2D&oK2q4ABl@0xp|-qyf2g)Xp&Vz-xsh; zypd=JV(tlk@BzIunX5$>JPsvuj)!&2aHECKk#y2;opMv~AR9Fu1*6s46MRUyw3>~q zc&JKNUF&pn$3pPFP)@Wo{YYF?=Q>zZ{SjadCXkFvX8cH;E=SOCEt_1RGOQnvx+bLzPj?|C3$nRj%Ll`g&qaE0;52j1JPr! z1(pS)3ce8FCaQ(t@TD;;{PYE*VstZu^{8KkS7#2$%T7{*caDZ{Ep;1YsG{hP>)2_3 z9jQ605vf3`FVzjtxqYQK;Q^$z)O2;C>BL~vGEVW6>-3J}pn3pa)wE!y_79ql$}!dy zO-Jr}*XtJTl+Is!K#!VBy@#G+4#eic_6P2fAI4Av8@};Ia;kPVV%8~9UiNz3+@2X) zxg2#kPQKuHqqJIsy+Y8ak4VF@=&m=`*Put`7@LoHD%XLC=EHw~F#2a~5BTR-0+ow) zY}Wga#^EU-O$sCJh6)(RrqN*Yu~~>bO_i0CPFOj{>v4<~Jj26H+57d_L3T*WzQp`x z7(qV2D6ORt0)6UAYQ7$Y$maym(^i8^qT| zqYPiDmyY|`CanI0h9v)tK~!4yymtJIm>WXSX4F1-Xxem4<$mMOX8#()u&ya>SOa)p z@SurW>OZR?Kj~KEhq(?iF=RtCZo$ZbV|W(lD>13e;Ar_DSdIQ#{7Qx*nF_$u5@|53 z5C&Z+0*|VcgiJ((!;=vq=@JhW2QuNIkSDhF#<8x7NA#C6QqED>}DXpb4 zN#7Vm?Zp&hUg|X3`Gc~lC5?gV-cmW#9=B*3m(;bxT`aCmP+fi5|)b93f2koDThy%V7D_nWM2HTOIaziPQ$4kenKRF zN4_Z0iKjE9L(9SP62xK?oXCkc$EMot^A{|{FAR7(K}a1l4IghDGYX`eWj7O&G&fK- znF?@1mJC#xZy;HDpx%uns|b)yAaiVUGZ_ghg$x%L(%^(qV<4;wR1^lQ17z94;DJDy z(DMYya)e$DWYMrI%g7OWUdXbGAuB>lx$|)5j#JJZIBgb!yo4g?i!1_PzYsamX5cwq`IV&tkR69VvNn5mN`l^bhVi| zp_W>NHG~QFew4)tSs6JkWobA!NamG~>&Zc7!!eTH=I=kQcS<6fy{(HAFB;|)<^YkA zxfI?rfw$rMjCRp@nivOpb`)ceAVz=3Z{zy3cC{mLf>fikRY;==qhbR8lPhu?3WBFn}s(CQFsPO4bgjukS)Ar=#Uj&#SY(>jWx~Dk4%6DX+B&WL@)P<1H;uf8TAjsX zLmUkTo9ed|Aa#BQu+y9(;)wDrqJ7M#zql47MrV$ouo~bx-P}lgQ~Zlz9-k zRIUrq?-#}%_QYz%Et`~A6g{+Tf121PrOC)y_ZP-ULcim zcnP1kRi8og-odSUS5a@fs&}QbwFfwTh16dUr116%u05p>98G=%Lw~e7$~`L2UToGR z`vrGM{k5wl-(!vBrvfSc&>DW{Q>s4R4|H}()u&e3Iib*Wztn#pNTIx3*2lC?@(Fmu z3EAtofq(myo;r>EIskj4mA!gp=L!%a7CcE#5?^nS`9FF{+8h3`9B79tBtLkgw7X^_ zU;MP5BnE8-?9yWqD$l0uqyWio1`tibGL^h;leB;2acS>nE}t;PKOxHr+9K22Jt^%~ z0?D&OKnhGuM1|fUuMy^NmySHTL-H@~;O{(x0Zw`j=$%##nvDvs*(r680x2Zzk^B}Qg%gTz z+{4o<^&~M^qG4k(?ut?4IaMPSz9{+if0p^?zApJ%AcbEQKlTm2#b>AfeVuJnzPOZ~ z9!F%pOF#-6-jn=gAcf)YOWyrH*Pd1W)WFVOtCW{1JNrM7{Dc3L`C>no{CXgT7Zv~G z$9ysDh(S>aJMGj#P@(K>1;RyErFfAU|0@3_?R|7yN+y0P2gikza!_12CHcc%sh9nQ z)GG#(y_nJAtY66+tWW;ytY^B1M{LvE3MWe8#5<~SJ*v>73Rit8jnto&M)sYPeExT` zkljEE#}%LSz2raoLF&iVOFkwwl-WvBm@U6Iv!(aK$wd;2tZ&Dk*@pXSm`0x4rl&5X z(UMIqg0W0J?Nv^k2T~0hfHdIDlXdpTRTeN5NIJPd(&>0j9uny$`6{)|ntX5C zYnq7<-L7}(O>=cJ3ZsIoFi0U5*gD804Bgy@%rQ1H!o*i^*GG|;Z*A8niY8;o=sQKR ztIRh8Da#@td9Xt9_bT-Liadprcq;U&TEegt3->7B<3P%H5J>KrJK={PI0i`Rw*t{i zvFrm76oz${h7JQMobPPnyLaeueW_~4-;-6VK}3`A(h-1GTirbW;M=8FTAQd|#@m+sX74+Q zW*v~~dl4vnoT7{Rh5=De77a9pYdvH;tiX%zDBl9Sg^x7KddqfVY4QblbNZWD5M%Ax zD!ikN4D}i$tGsj&pZA<{ECY^hRF2tILBoLLSQd~9$N^FTP9Sx}5+H?}Go&Na21`c< z50Q>c$LkeI@8dDD+#fR~-x;sQq!x`C8IFtfA-olx7E*It-_zMTDiNVymQ-p#K?)l1 zvQ)~naEj#1fyBR~_?W@rtanK&>yh7wbx%(bXYO=1El#Gh@CLOAY95e^`gFRKcg~UgXdo55TJgR!xyl(PHe#l!>Acp+ixcI1!-+D^<{Yz3 z>{G-IXrnlm$pQs*A`B(#jV87WQ7Yj#%D@{hY&@vhqqF&?=k-pLDrcKmEn5(nnTW|)7a4~N8+hKgO7A(34xh~^fNEW-nr#n|TIYb`4=bdDdjDM6 zLiA#P3jKjZ6 z*SWgT#I7PbblDXKo=+!*?8PSb7~)E$VBq<~P;fhz;N|9s2ZRC@kHyeAtx-HKY68+Q z;Ea4m5X*p06!b07_bxTDp37Rc6PSi8c)#v2d!B_rQ9I*3K*x5Fi2TNeDG*+qw^O^xj7 zE9D5S0#f)GNYo`D9eN}0mWN(3kixS-nxbQ{v?>z5;zkbHbF+qb<)PC49;w(iP=VE| z^kS9%4v-FwW`gs`9@Q4lLr>DyI$NEz8lXl!g=V6IY}4h0HTW&Gd^ z*dt7}_k4m&lzaK<7xZ|yeKp1wVlC?j?>VzR^B))aSBI1%HzQA=Q6}h$|LTk zv?AWkwnCV#43UD7|6QgW9CqtXY%XHfck=4U$j5XJVqxhKEUYNl)yf%^O>NoKCTH5P zdgOCHXks2!Uj6#kO#*)E&w8Iu&4i}?%|)}|Ib`WR*%=EX{x!rLlA%J)d9u-z2k8d8{Lg>YySUlM_<2IyqYM~%d2Z6$ ze>V}M5ZmKe_kWr28=$(mxr`12>dNMF>ZUWn>}BNr7%>`$2pkSG3i!-@dY3tlf1B7# zh&fV&8Tpu)AQqF~+Bh?^EY(k!K0{w1j5whtzW7$mnjVdP7ws5+_gq0vI5mR5zfT|7 z89TmM8czF9s4@&Z@Ai`3XN>J@eDnt~1FIV2u)MZo4vywCAwsqp@=TJWFw!ydhJ5_t zm-K!^#BTvSq^UR?XDNi_B_W-f`S_so((Ca+3U{98mtN94xha3c$5PL!^82pS%-_j8 zJ%JQ5fkef`hp_nkFxDj}lttrrGh`a>rF!M`UJF~NRK8okm-(Imk{@k*v}WD5b*_)` zcC3x^w0E_0wsmJ)F(;D~Rmuq$oGAT4X8aT=GWH2&ebW1}I9xM#sE_ovtF?OCxWb)b zIL)WLWh!r(p#r{&ljC3ZVUo`dA}Yrle`L6URA>@3a=>ijT2wL zFKCor*u@zH9&c%y@Ztd=ol|TBQg}_%_$>$YF7xTV9dJp5NI{!VbmDbM_DxffI^|X`%92PI@rI##7b}UmU8z z`ghTBex5&hQ198f8i~gduP6lr&rcmh4`Hh+8;j_`>7!`Hi>(B|46+=O!3#?F^Y9Gw zwlQDxU&?6I>_lf-gP0x`*1;htBl=R5J^Iu=q6FXNYYwQ)oT%};(`Jh*zP|+KmjJR1T7$- z%G#!eVTnKl>zRWwmmWrs=jyHAHnm}%5OHmmtemQVQ-7Tx9rvw-N(7!vWT1@fdD6~WAcZ~iG`BExL?!$u16CMt%$El408-crBuDlu zdE3MgmWZJdxz}3X#v4`};*r-l)Khi;l&$Agl{^pJkRL4#)hv*PFBD3H1&evR*Y%_{ zI`W5bDPs?>lzI(! zOTG5@sImuzv&6zM7MC8+qD-Aw;JN{MbJg*0`OK9}T~zs2mPtDtNOf#$4`Fsp7%4ez zS#rA7&0;r3dpo$I#Eel7M|Rw+8~{=XTjk3brZToe#W=;buT~ph8`puvJ_=g=(R@byMYv5S;MtMdc0W3 za$Gfb)qTpx`}h!~iK#ss`HP@me7{V~dqC!2`2fF@bi{G4%J>Rt?@%Z4k4zD5p+cp=^9m>YN5r_~Ajm>M#yh z8jyu(FWgFIl@Cds;}ufpC!lDrq%f9*j*3rj$2x5E&u6Wj!D6DVQ0={9qtt&NNZ~uh z$KcarQKrsxB~G^X&3U4Yt;I2B3_W_)L`ljC1 zosI8179rLP$B+kCoq;ke!7}I=?QDjf$2QCI;(D7|Z(OZfkl*_Resi}3+6gLRkP47D~sU2g{+cfXa-6*^1sW2Q%KvJ8XJXZTu4Ry zM8xyTPIDf4TB%ID*E@RG&Yrh4_7P%wf-Km;^ZD=SaU;s!*4T51#!`qsfVf6wYR>!f z@HgJk7l;|m{;AGB#3-QIie<}z6$Ye%5v_PaN+(1EvIHPK0JW=hkRn|8L=GC$zvQ5q z3?w_(AIEYsU>=b2Z3p5G+8Fk4C0OxqsbKk38i=u3SUWs?Y-j4lg4E>*o@26@Bn!?r z7h_5+RC0d$UA265j>zSRX;lchjPiTzc|FJ6-JsLN756G-;unuuYhKKVmUJ=2G{O`S~Xrsj`P zy_ozr;*k<%a482?Du>E}RL~|MbxI|Wns5)0n(zRSLfuta;J$0pkyee;ksC}hUn`CB z72p@yEHf?vq7m3!i{$r*O2d1@Wgq_pqyjp%k^FEV1t*XkdK*ZR^FY#zh>-T|5jx-f z9-5kdo~ofKeu>g=WZCU?HaQXt;CfKsAmMj1%q|9EyMD(pG4&aqBTMwPxj_5NMz$+cr zQ)e>~eL4ZW(CLTT_0rj(-hL$`Z(IybE9y_D==|~rSl;k7dYsz4V_z(%MI3^*r_ee& z7!UKTxUCq$qBmj%kk{(+>6Sq6NyG)vC2SiT%wqGyaq=3*VyB4{;3nPgkREN<+4DZT zv`RJ}&&FlB*Re8xv-&OU+yp$T^)G>n?o;(0CZ`^X<#VUPyicl~wtW4g}HL7y;b z0G{z6N}hjPtFH^4JXltuzPDNIo0=4yVrC79#$W|QTT>~}=p&3q+>jwN6YmQ?gV`x& zK6jlS73zX9WfWV_Cefx;3PwBUo7o}fYg_Px@>$!_f9$+SF~gjeTflGoQ15|{R6qTp zo~jr2A*q|T%TX{EQkg77P5-D6N1wpTHuU#b{NPA4%PzoeZ0tSog-;!zop@H;&o4VT z%FIgV;dW`?CYhV1O@v`6mKzl7AoD`8A+1S~JvSAF*CYS23mSv6!e z1_#?*XJ$4ySZ9!>Lgpwye^(oX*$@Upkr@mYL6!@HSq9lg$SNVrFv#{oRxuwts-ph) zIg9^&&fXW{!f_}}L&{`WbH|KEJhV(9;e&sp#>C-gq{Z036<|8&C( z{27ZP`O^F`%UyTiy9x^migK3Acf-3*>K!6tWNPLjyd=J4ej)$-q~5>n1_;L&ln6EP z9$6kYL&udIo5@Fc^~h*?(LUP?CGlo<|C`(Sd#CgP951=B@fly}nWo{q?U#CcUU{qD zI+`WQl6)Tu7@b#Cw5Xs6pCkBE@64xtsYi0W(j~s@P*M}0muUWF0lL=Xc#(fJ?|KH> zG=c zs9kp99mR|0=Pi+6dKiZfTHuox#eA+;|5Y2y_?$(041ehhy}foT<6UpTCn`ct>#y*W zr}Z>#8RJhc)f0IBmwF{ngEJ9hn!FZ{>|cLIAK&ir20b%`PLgZsY?$$J;P-EA(A((o z)yfSC3vs-+(me6y2HV&i})Lt@n&1#x0m&SgzsF{JMm#x^mN5^<_})M zSLKj$@QTzteML`g(oDIkrx0`FRhfwFo%vH&WezYSC^6)kOiTmb*pg2H@8K8Y0nVw= zlK$wt;(}&ssLTF&*Yss36uAGxM!hQ$kNs$ViySy;GIu5X)MOT}hiDRqXl7AqU@EWF z%vMqY(}$nZ%&m#}R%v$9&5u8BHV2Cx(Y3giO2j@t+8u2NEqJ3-~c2>3D$|?IrC1KBMSL;0uvhPYIoBAPy*Dhj2V1<;B)wx7u5*3+&YaH`B11 zj&KlS251K`sv~v_fVKgDLCghB7kkGLmw@&HUyqS=E$~_FJzfL3aUU}6N4|>aft)a^ z3-W*GhEWh&SLvN`1F{RUjolc#f((S!h}Q4%xE(m(#+VJX3%DDRTBsU$M$xu7Yz{-r zAbsHSc)Wy<^nrit&e&|w9^i*={Hp@v1^RX?6Shf&r;rnNMLZ6gFcGm1G$DO?o#cdb zlNg@F=m9&FX{T~^54BnMt^w@<(#r*?3*1B`B5p#09XJ}19LfehiFg2Vx=QsT ziv9-n?rB>Of}GezP-ML!DDgR3?0IF#2nCe;BG{6+PKe`_W!O# z%!kg(ac=Af0#gd65_p9Yk-@l+*RaV~Z1yc3j}0y$w0q7$?eScFJjQw9v4jQ)q*3cLYv4d_Z> zH6rPFfS)M39{BEb9J<0-Es8h+!Ae62N%{xH}Bm1$?y-o2fv1fUhmZHqnc?@8$s! z;lidfBv^q%itt9YpK;s+6N|A(1hgI4bp>vfg0=zwbQ|uGg02SI@56l;(gEJQ0f*=> zXf5Enjf|Cnt^i)Q6(jyv3~FF#CGEf$_$K{o;8I$|X)Xge?_*2HFm zwgFGtOw4f=^9t~m?pWdr+6BBm2^$VTJAtnvx@oj~Ky>XP7b4q$6@!rZ8k!C`YCP8a zfzAe2Ohs&j95~?yEPe)^4cvn$4l`in43jv%ZNMiHNp9S-L|d5(DdILf(9KqXSqY{B z7@8|HSb<9v?E)TEbS<#|ELnhYpA+qOx{SCPb_gTz40;b}m>F-Jq39> ziOtD$_xy}HX=Iitw7rH zQIK;JeY!e_DTrvY-pD#S?LEbv9pcF=CaRv@uOC~Gm+EF*z%D&i8*hTTA7mr!qP z@Yw)4VL!ww(8f(av{9&bsfpDIp&onwhwZ`5X57k?Z6N2Ml*u01rEl(n2aDaJMd$~T+j@aS%g>y z8Xrw!QDr7p3EBqyD`E|34>0y#9J(Y2x~JkF!{!lue}%n_NXKhE(6UMmK;X-WBscDP zp}jC05mS(t@W(ZnGe8^nx6r2g0djv#?>0Ae+02k>1) z59nIp%@0W039Lh;Uq9n+7up4L7k09ofj;4SL>7#S0e!n&2nS-5N;Kqz!`Gpi(!m&a zsu*{%&~BEKh=tH0tV3K0n(!jx2GGVGEVQ%b1maf632DblHE83`723fP{E&$qhdkH~ zV!^{0bYNV-gO8cm70@1F)MgwJ*r;LywtWJ%r&BB7CtD0hi~?(2vvJr# zla2(!_Yfz8Htv?89X5UcYGO{v2|q?G1x@%F;wI3>4KT(HGqiE05b*$X2sa@f1x@%S z;)xK<|3titSdRpJw}}70barl8Id{ohd z7ZiA8}$3Po*)X>hFI}qz3C#2mnX6#Wh?wX<9GXsyK|LsTs!TS9hO$yq$ zQHD0qjQI?s9CE^!5Lbf6rhU7!go5zE|Qh}ckv-*C_sz@BI2>}Cf(c}~s;mB4N1@nZ#@YM}Ew z4AKbnJ#g>$IPO7vfUWCc6topMy8*QX?F7Dh2?H5hIXu8sSD^!%c5k@Veh#s(W6w48 z6E;&2zK$3Rx)%6_rm+;z#tjy<@nQ@fU3(xW3<}cNG0;|EW|+p#fX)V{V4H%i9Y#LT zx9`EQbph+Kk>K0sK5Zv$0F<6kx;_nM0jG9##~770v{f$u??V&%jCa;Y)KXd7vzLxhz+0#*B}Pt+Z7eS zzfZ)@AkZG*eK(+TBnMv0MR$N^v!Htu25dOy|7s8e=Ac=S-~c-3qSl}Z`#Di#?Cx~{ z**xsp0Zn)s(F?jB_~CqX4c>|G1+H6+{GcmBzu<8NW2VD*PupIV5dx4YI!9HkrHi!nqXuQdtVfF7<8<3ga>JE&02#$cZ z0rx(F!a#d~y|8%J3);9i*0`XS7S`rIhRq}0@H`*rM9c?GxDv4hG~ok}q5s!_0l^+p z2^Bz%<2Z%f3Y>&^9JFx}EG?ML+>GNMa>881NE-?RE<{WQ?E>yb%m7^t{O}1)oJ|=Q z!fIRG8vA?;JcSbB1;j$ogzQOp3z~4~R+-tj@Rb(H4txrO6gq?x5qE$loP~G*v=bQm zGz~OV2IyNHOW5`q?0|AZiHJl*FKEJa#EYN}i(SRS*nyRp8{$wHa3W$XXu?^DcF<0u zpVgRyXyD6;bbaUn)^Ee$g`9BQb_`6=*+BOT_(vy))j;PCY)7KZK<{(N1e&nzE{qBM z@E8`dibbyjc0(R-W`v(1rh_Kjya(L_n(!}(BsZ>QrM0hpUc{jsk8flVaRG4+ln9^u zGnSl!CVUZb4`{-d5lP1boWEB(XIQf;*1(>Hyaqai+FvzR2byp?VgqQx{fN=sVF#G9 z-;JgNW8aT?{(#0NgJyq6fByp`7PJGH`Z`*ObbvL7Fz9f4Zd~VTTmwsMVedMOzK1?x zHR4gwgfVYof&*<_4?D^Yp>H89VXL<=@Dj`dCxMOxO-Soq6F?i*y^8g)#Yga406Af= z8jZ~cZ3h-(+3OO}{w1!uSRU(J4oleoUG)EEC=oH`D4H5H;aS92K-UBH_fZ4TR^XS4 zt_OzI%GR<1L;fj`4lA(3$EYRrZNSxtVTqUxf$Kj(WrzkAeu}{h+Vv@p|EkZ>uQULG zv`)1K35NBm>?<#(+b_|7kQ0vi2FG_2MlbM*%Q(J4R|1Q#!Xdgl3Y=xdswBvrz$xu< z1oVJD(6{cA@KChQ&h&7b1rZU0B~VZzEJX}U#`-DXlS*C*Ou+KTSjg?bNnLc70y-Ob zxhpQMdScvQ1>>EFbTk=PFVd>Uafm6N>Ub+wGY0oE3p@cj7PN6iBdv72J_YxG zQ;-0Hy@KcfP1wB;Y7aUAI2(~#C_5EK5ep$N0ahcD`~dJvMV|p)*H>ptppy;!3*siS z2~4qL9DpV)K%~2Cg?2ZTz>uMRzzzM-|2UZ>OiaT{Qe;R5PDUgh!rK*H3Ou6d8sI5J zaVrnV`%C@Jz$%I;jPP|u9|k%G=q%FR+boD_U`Qzk7(NIEL2dB!mvJd8+}=!9n!9Xtfjf2Iw^z=5IJz!wq82@kMA z(S$pO!4Pz6fL|h#4jZnsVTe=!VX2~Nx$~`vq*DU?0C5xY(u(uzM(S)UXeaP5hzELO z{CYzr%xt(wtj{-8P z997v>xmC`p!YWr)X;oR(=Bmo7;GNc;(K~HBlXu#8X6$tA%-Wf~vv8+tXX(y{oorX| zF6*x7UAA4xyX?C%b~$!s?aJPjyUV$&aF=UW>8`R}<-0cTs@w(RF07~|*jBo&Y+L!Z zifx;>Rc@==R=w@Owp7oynr*e)j&JjBtJ_w;tzjG69=zSUJ$k!sd-8Vs_KfY0?OEHi zx94tmZZF)|(n!b{FnOL!|#d)-ri= delta 92146 zcmbrHcUV+M)bMv^fn7yd6qX{0sHi9w6crQ;DBz-tq5}5bdjn&`MG*zBtC-P@HPNWC z#9o43Py~(GjlE#MN}^~i#J1m=nY$~=`@HWzpU-o>JLml7Os{wDCG~N|)Eh2YEpe%8 zN$a+zw*RQ|9hQ$4S9R72)zYN$H)b?##o zmZ++$9I>-b<)&7td=M8GsYzp08>Nz7sC9jxVzsIslMk>{6|sAeoyy>C^$zz5cB*|K zFIuBkO;}RR+db|_up|r#DoCx;YL`^2Ok8lerjG@Y4nSRDgRasLK2cSE+IuOb}jq)g|ug7bINm^1YqiKo_;@d7sx3hH~4+Re!D0-VbA`O3mIzH6nF3^tL>^_M}KnIt_^-sgM0=T zsX@H$13b6XR;vt|j=^UPndJ?c9kd3sL2J>kuSR;?`{28^$uxU^Q)k9V{}?inbjy&I z3;Kj=YSsIbD9P!NCnU`vU67?>h=tV0Kgb{z7}E0m`k!QRu>CWb?K7E`f(TYJ5Ml&` zWFH;C0!IcZ_un+j4Evk4gfdN&t2J0^KJp_q97f~wwMmvku!}E?(ODKyZ_ussC088U zEDADj=|i| zB`Ww#RB(<#x?+&dTl907+8{X?1{>@kG97CXS8cVRR4z5!r`BX~Q|GsvsmgCRkwwL# zj|ZKhAT1Tlv3~~1{#FGtMB4~kd7YE^npa;QOCoXYH)T4H+6)KO`rXjaX%Qg!kG z-7%woxIrQ;Ar^hEo-}oBid*Q(7}tjCE_X5c(X|>*aV5E~zIdoBdEn}gv%QGMttYBT z2D*i+_uj!|wOcdwbh!WF))T)}lOTOZT+M}~=v(0zmBO)zvgiltSk-a*`>UE5-TUKeI9j?E#YwH2o0H_g&|+sp=8O@XZRt!#dGytu(yJkzADqYq zkCp08H!vCFSxub=_bg9${n}?N%ROSG^G2z?));s~zV!x^!=8=R{edX)4DxYR#-6IGD0)EV61$o1g@7MceYI9YRqd|&cea~qv85i!0Z)nM)@YY@vz-S>r z0cAQZfFd;HNq7zH|Cp4I=%#-4L`?=nBs$J>0PE5Xi++uU+VA?o0nZ=OlKW^*bJUxP^1_VGTiA*O{f(rH-*TGro~{b0yoUC!b{vbg;Sb;5l$`PlxwqtU`D zGvw@G6{*^x327MPRNe3jqSw0~Rz3HG%qWQRa|YAFmm|$>;~>oea^+j3U(9b60=_C< z+d#+Z*P*pXgCMMmkve&1B8&d=7e)E}E17-Xn`Cv!(%FFr$SN&s@v1MFH0;>KYnm5g zBLwvPI-xDcw0l2eGP7f&jtxN@BSm{k5J#RsmV#cV!Sw9B+LF+o_&>R?9#y0p+$F6A-e$v*ZTlZjns_zkGcV`S0IISVl| zN+yU=%l9lsB^G_R513TwS})T06r%I0mWWO_t-Q1@kB)^$$K1^`R?34YK-S6w?m)Su z{po4U9O7@V=+C}W()RGZQZ#mCRo5Zv*teMAZogDm`c~2Z`VNzQ-GZyHc?=hDN;sbHgEfN<$)p0dM7OLL``Pbv*N_o!ikCYO3Othbaq+##Q0EHItRpBfwXSvMyi?1s;^B(;~f z4P+S|yVEGeZX(Bedysa$`)eNFfy_>?Lbmko?lI&Q&&D(IM#NYah9O>6oyPi90mXLq?)mK0Ls3LXxH^-y; zk|F&qbr4K*p?Va`F8g@j6`LhTzk^grvAl~~xfkk#d9}X_^ksc~O()*4gc$vUWY7CN zsg2gLcrn4RCqz;f+mLtnIO1$WG8E)b8**77#cQorhAPMs8?sX%rx{}6A+=Q)2OG0c zFdJ=Vsw&7yHXrAnMhaw(4f&ia;yb`*C0Zc;ZOBapv9}^_at*<>v@yTsk|P6K;#s-m zsw%Z|i-uo3?b^XS1fOeNOLYEY4SP8tp&8_HcNT!0cvLROl^5+^BkOT8H>^P|~ zq%m$Flkg#}@yll z^5WRUPrru8!|3lPt3^0IJRBzY;fRPTwjo6~cw+upDGJ0-o~$6A51ST;ev0-#0T>KWNRy!zNJ+!3gtY?f6#7(#J@QC*nL z5N2U{m}B5(Up*CO3xru0W)||=sXw>0;)xg$f{aomTQ_ub0aYYdJ$*{TN4T5ve-(X1 z7Wj{mdV9u5ll@|(NBvo@PkJHpD^8BP&a-sn3X!F8@}ol{*Iy~Q9w*lqNVE;fQIL8z zE9C`J!-lLm6fMk@QWjJ{OT0A?FF??s5^C5pwrK#Cv3O+~Xce z9N7%V-6e}g`rtPjvTfuj$4(De2-11E?^aT6RET2*=7vR%?7fu?9#zAKZ4^M27$LX% zg9n^QtyV7{AU}_a_e?wj0#*mo!(lXqt&IfmB4-^T&ZDQ{FNGv?bTWSV_WLotG<6GM z#JT&93>>qLn5Fl~7;Lr7dM|Ko&Ju~NPYldHzY}eHT}G(G{hn)~sfL znP)$Y+YMHelJCMy_gQSrIZ$oo9}0NU?aX9FCq%WOTEk>sfB4=vY`nlS%3vzM*ck6G zu(iyZL0|&~#zi-`3M}dyW|FH3u0%QNZONbWdDQeAVuHnrDOVeEQy_0L zM8yl0$0$g6nI!HL&^ZpVB-*LusI8>_v?i5{w_7V{DMTMC=qT_f*W6*T&~;(RpZqi} z1n)RTZcLku*F7b%)B9^~9EDt#o{_!KwiZ7k<7W)1u^Vc+9RCH0!iI_myMr)+GzP+Q z_Bv}m=l78JGcMcBEmEmS;ml#xx*i4(`ge#|aF{~zB7`2ssX0^ld_bgIN?O2Xpldy7l`jIUiiBpTjaI^>dhfD7hO@X z_GK_{fi)?EZDW`TLYz>BdwZOR=~RYGSIn&_gPjxD)7iXc@=*H$#_C9=o7VVm7EmsS zSc1Y7OPj7)k@QM}`O(IBt|z@xTjBk`kQJ$W@b0@LcJ}(HnZJXF<*=gu@V8;~t*J5U zyKIOu!X68ZP0o~5HywnFC12^%aHShwdvm~7$ekPoXMUwfm})NwzVk=8ZP^b)CH zvn|<)w{F(wnoC4|nFaO5qVLRo!fJllDY6Kb>27nq>odC}*IO(`DJn=xXbC~JW1>V5 z>3W?XX3)+09c1H~tnGTzXYNM-8H-_I-pAk1axxSCPy2)>yBVaJ{&7*kmgl;!EQS&J zq-J`g_R}dSN7-pHnV25!dTtM7#Du?X8KH*?`+ItCk2Va7kK`7hErVX4Cct-PJLagsWIMho(zyeu>K-hAvMI2=gIFZ8sV)I58MVEADz|4yts?55%BxP}J6ZBPLc2RW~ zPeJG;(MOGt_Ylzy_@1;PpLcRZZ$lOLK+jJJ%R31zSSnP~E zc4y@gb0GCnUz0Wb04OGdl#mT=FrD#B75*Ox00LV9G8W}V5KXI>cQ=`r*lgWz_R~S# z0_oB>0}akG%ozdh<=~F0&!5T2A0Py)*Tdk|2>FrR1HwEmQh6U%`0|{mWZU<(O%+)c zf`%6)zuLx2SUX-6UywW}OBCNd79)-n3gpQ+E7D&QPiih}g4-35-piV4{{9&> zKRhDKm(^;S55J*Iq1GhaXHPHuGLsABGtlF*s`eF(H}C7AOLvDl2ZIyT`GoNh&=O{# z$s_V{nV;!*=nG&#$d#8#z|I=Rqs=@SpGPUl_)Y=5 zc?E%3o)Exo0oY~Bi3^DjZSMJDG5FvkVGiV$y!kWOk*avDBV%b}Y`>SRqeDDu@5HJ| zcxnZ8afC(x09>q-9znNH$5lx4yQJu8%*Iz7__tAEU zJre1pg^qnM>rB&cC&4ZP?10}FUinsV8d%OUn$wGT)wwQ{E9CI<>iGCYa)0@xn!!Io z@kK+|#mX3lYbe@Cwh$3(H9gZFxKF06s8{nLY|O~-pvPe3#{ZIeOgsEODOfQKS3O4> zt{km-0oye4ms@1r%2s%J1}R$kJ+6G8B&`aT(&R@F1{j^BKm)!J7uy@8ge9maB6 zh*IZ^5|3x&zd1HkJ}a2VLwRY0%Ka5!v;gAdtpYeHfH=9e0&M+_TNoi{3gBk}m`2D| z6e5!o{JDTUR6w&hBt-9FAZ8sy2N7e|56Z0t)?Hv+bV0%P@_|@Pu{{J9Brrbz-OMof zM=q{zirX(Bnl){lqSU7E=yVoqm z-LI4OKL%m@17zBd;W+IiIs9W{dFh%cpcF(p{}_OqXOpO(;<5S~S@l!XhJ9EcpY{qR zKZ6M^n{FIF$%gayNi8FEF43O4im)LAzorB*-TnE*@>6vzZ6!6ux%{I*vN;0o#&byc zx=7E{>0p(&(GhSI1V+lv;6cXo$m(_LaQh!fo1drS;A7dW?~n_QQFy>6dWabT@haOH<7-CMtKC)ZDaIb36-vUp_|)H-%%DUBr2F6HQwtdUBD( zY>v=;gpNgaI!Bgl4#ypLk}I2sdv;)=zb>#EQXn=s*pr$`>7?tHj+$qSK>6`3*|4Pn z?y!Sg-IA#3#YAJzl15wW;@s_|@77^nxn>XzIse}n%W34!)&k9gg`jyO`+tMmVj|u1Z zOKzsqN460l*$;->{EVsPU{StgV`b9f7iZFKR~JpueDLvi9{F`wBOJbo+}qVsGm43v zPLpc88wO5F!K#jCyVh)!3)NLsNltq}%gkgB6n>SuKTR!tFot>9)6*=K5R+;zcgmw>J#e-#}jP z?cBY>0mWu=!_H>AhOts)5cDdEB`{RiVRT_0uPI_f&0y|SBF9aK#+3HT1v-M{-R#MC zsN}Q=P?n;3aEIVur4hQX3NGD6e%sd;$7hp|`@%IH(!ktbCrQM9H%&wKyX_9rXMafL zOUt04vmT_V9~hR*ym>)CF|i-nzQ2~K;u_X=&CnXbcP4M(4n_z?5zQJadVv?KIi`iS zDd!99en*aRQFp*tZ*o##&_q=7PXaq2FfIyGtQA7{1tDx!t4u0+s^FFg4rI{!6muH| zm&0UT1U5=wT=Z~^2z8dgZZNE_z}g9niw-MTcY&2KOf4`Uli;{)slus5B1W*e!krAB zCGR@$v~tl{1zWe3CnA+$`vrDhU|bZfV7El5?5VcANMM@;#zZE$s>1CR+%Coq7hI~~ zxTtirNb6gHon~0LzPUiBDidY!Yvc!x|VSd z=J71~Xy6zZO;WIC0%L1V`H;Y_2#kw5Dp>Xko+TF(0+lpVv#>2BU{n+n`2TA=S zA^5;5GVF*!a}Bn@P~haZ$xSx4aUDFlHNy$d!3jL+M$R4JJ3C!KyDvx zfQu72-||>}yeff=J{GRKISu$fe-ksfHfqxPNG;rQ1$lUEv@V3HIw&f4JFZ%}oXkBQ zh@;}c&InxGjdgq%>#hwtEb`Tb;RP6OPFlDwP%tnEf#=JdozyONmP zn%?dVIG7DbW5T{9KZd;~7$q>1>3=4fxs9uA$%G8#k4#_-sw)(UWab$Ly?Jz3awE5v z>1h@#sMrkoR0=Q5du>=LfxG8GQGZTZfeivUY6U_8VAH(~Rv=9PtF3^CVj)ccJ>)?G zm?3~3^7{d>6Y_06ObgZ%`41iKLCqXHzCb6pNbaO4+A{tD} z=MBaBBr-8?7}l>QH}iUFx=aSjZVhR1YF^bm*p`#8?-Z4@8Vt+qL2D2{yyhvl`JUWA z)lfHuF_{YUUQO4vLJjbxB_uXKM5AIl*PTSlZ-56cA&2u@YRs_zCa>K=zJjPEi!>+* z(Ntxk#yiNEf(Cd_7Fi9V-4j5Rx1HQ7XwcFvi_N2w;W0~!pOTW1Onql8JKkmIulr3K z1JMePk?zht17}Tdur)Evf6mH#I+DoKiJqf6!eS+J44is}bEM&N4Kw6`gG& zUzknqpRVdX54NY|Ixq+t!alK9aTSP3OqRZqc%5ln(~GY#;Ls76tqJy^Rq&jJR`FX0 zu@=g4foC?prbK8zVx7`t3a?VJp`yANpT9>lM_(ulegYu@=G9L3xoqSdvaxU% z{;+~{Ki2@yUr6Sko8v}S@orelA13p|Y?}sq$Q<{nPlJM+Rq*MK`LGM zGk+U&b*@63G6wf$=DrSY{Vi}C%vENjvYY)?ert6!mHcz5A=XBax|e+`^#NhA z#%OlxD43hY{yo`p z+1~^XIWPh0TUyEim2Pev%-4qL#@mNN9N>shR8W4DAH*VdGZX>KIdj5haKV|t_TYk2 zGk~QWuzM(#53puvw@_LwIJ%5EfcBH8PgSX4`=Q}%*j+@bBwu#Ns-#+QB;9zWeB?M5 zFOW-EO9+0y}lb2UQv~8d)3*|21r2f@5j-^myFyz7MxN!2_)n=8m zh5Njm--h`zyFf;;M`GREkbe*hIbOJ;R>uYxnPd~&RT(J z1=u41t-M44TLqw%YXZOqwRkx(8>fUuNWWhwb` zoqCn@2N7Azn_`*05syT&{l+-WNT!{+npD484>w35aW{K;-RcLTl%M|V0VR-=H#=&k zFm2{4qQBJuH=jwOZbjqVCB%Ho7vJnlw%>}-{LmMGEi1{pTS2({3{v}c6Z|@h^u6t+ zab}v`Ye>rNn%J)!*>Jlyo}ER`-L9iq)CV+;){rl^1Mr0qQv1#@{9OoHd}q97UT@IU zSxxNkHrGV6-#wR;sJlT}GmU(Aw~5!yULcwY{S4pqVFyK=TZ0V%5$%p1;WsAH_*WH)9V#A^SG z_I!x!K9MyL^F_A42=2!lkPV0`vpL0~ln#zkuYV~wSpz+`6aNndX5 zYgKNIi>4^n9!}ta!qNn`1C9yqy5K+t52zJ$X{=K*n?0F~3T&6axF|p|*9V^A0mj!H z69kqiFfKw0_9cyDp={|JBd|dx!ExEGaFNz%5j)o9%GCuICNM7At6)_H#Iz!Fqdq7=IxuE>+-k~^ULV~$9J@tf|N96pyg z%ibaDLDy<(z>qv$I&`?-%sPIH2uof+(6QtN@u-iRLk7rj7&(nhm8-d{XG6zXU`c)n zz2yDQYSv%Mucnioa!WjL7Wqr|^2i5*WupJL5a0=Y8maQ!+heKFWi~0Rv&5v6E{e)0j%GhUYBfmUc)oLvgZartc}a6o`w~p7 zzQoG%9Ra-a;D9VoP=L2q;III$T7k|A5SXf1SSf(LR-gd@Cf4fPT8U|b_})r5DJG^_ zfgS>wXa)WZ6|u~+0wDs3vjS%opuh^a3!tF_nB=VranDM;i|0AvY&DUo0Q%WVI<5-f zFLxy;Mkv5cYfkJHK%o_AuK-u9L3}TOU)|Lvk-4=LVyh?)S)M3}1y&RG3Xp3x5hs9= zR^V}n$cY0(b(i zQF8YH1!!Wm@T&mwtUwzDAXdP%KoIM#gs(z;6jNAP9w~r1R^UrBkA6SuL%6QIqCb?JWQIBCx6W}dBesw9OM}w4(m)~Yg%(U8`rGQ*FVCpu|+=wfJkY7 z2`H3FIGf_#4=Gu=GySv7!seYv-NU=OS^cZZXar!D&0@Ecm^=2UibdH~kt!Vrt);5&H8Wzi!} z!r3GszjOx#)CzI}>B>ZSzdJTno{~sHUsuCvT}kiPk?tE4;3!rY{RDM$hZ|qN+zv+NcG)aF`4KIo(o8jh|K+e7yY;pkK zX<^@9$$THrUORJ1#H!|M+bfw5K&W$R2d`wNv-K(4*iu%l8UV|@n0Wv|W9Hz}$jln- z#D7!&R4y8%VCMzq$FQveJ1H=q@P|zpckM+ge4K?(7|Ftqf8xhI$n;N@af9H!KYSXD z@Zj#`)90#qb0gyO25fQK=~Z0s)HsO7Yf)`nZ7faEqFT7;FuD=$x`D!19QTzp znBLN&0G~Mxp%T@E&RT3NKuU@Aia3M$bvRa5KN|xQ7cS{jfoY=FdI_jDRO41 zElqU=($0n)RggwDL?aMC8?uri?V$U;Hq4r)$4z+Fe0gsbe5aV1VMB5Sa?6Iqds1IV zG`;#hZ>1LVIY_W2QhEj3NkLGfdqIZCPrNPkkR$4@nG97h1Zu0_um` zYJPA+gPqH@{B|huXbU>g33*2x2>T{m+5)Tcql(}gGDdg{FEduohflh|V4GBinIIV8 zN}$@~vA5;SFvFi@sJ1s&z*GyT09}H|OEDkz2i(X&G>p}3!g)+ne-r$1qwgsK*+GV| zDb{v7I#Gub;-)}i*iOg1fvf_Elag;5@;c%8=9?(ZRn)&%tgkbRqljk(_W1QTQ6E?8 z?t&tm_BVx@muSHY8q*>0y0pez8=|{*2>t4UCS$2Dom>$$#qoXV)`}>iVnjbS$>RsY z4U)UuwHN(d5hdfZP3YuGXmI1izFd78(g&{;$isWGUgoF_P8ekK`5>!?cRfm>(@Cqz z>e?`>bw%wQy+T>6VaqT8(L`4i;&?p-1nel$Qx#qAimI9Vv$(@qP{(>a$^I`D_fI;J z-WdYOl3xUJKrMhQd6oj~>B@a{AtW&z|1AWm)x0AIZ>7etOc zUl3~rkt2I6COV5q+>nO}z$Abh@<(S8%LXD6C32Ji`U#*!zNrAMgaxhaFMw78(E7^< z6ynb=Jeq#8S`a>h@RO-Cz3qnDevKQD))ug=?v0B{{c!R{_8WMjXlt8yh2C!d!RskXCaT= zxSI?4Q>D2)to*tD7a=GxyU;%e?(p#xwu1~YVJ;>u;nJ%hWhr1TNI?uVL|Y2jG}59! zq4;Ai=8u_`zci-1Js|}aiKQT%x_Y4>D`rE@gJdMJu?{BIM{3d&UXYBzjaV|0KZ<0m zRgz)<5z)$(QM%9Fe~=26Wtr`Q;0lE+Tzsp>qW?9B9n}HCf$$4In`UQ3qTg zLC=@Kf2;_sN;N1IT|v~5#-J^p2>Pp5{iFIeeQ_61T(K&C2L-bVvk6skYtUS>YIX@P zl=z^E<*mZ3`gDg6GO1lZAR1H!`Ro6D5BZcC6=XEGhyEX)2b`7@D$*2Yk7zYtu<{9BsRS!7WvmQwp}l-jP_Wf=b>Z1XUKa>4 zh8;KY^*QjBQS1q!eC)}MyU4Qx=}})aL#=&>XjoO`AN}AhHx5%FTNFD$tA)q0BZ6-m zGE)Y`NO`g8kCSu;_THWs*y;$j*rP;!V_`H-UQw5Btcqsgo6)qUAL@)VqUjWN4~eD) z?EW;0dRBuHof1Ves)4&^6ursrS0icN>Tpksq%*3c8Q39`e(^`1)TIVggZ=G7;tt3ZC?{j#2GK?p_-_sejvPl2zhd` zOk#i6(zi8H{*ZyAV>9{}H-sb>iGJy#{GkSjrKk*pxk75O9yGsLHFs;! zfwfR@d8>PE4f7agDv{)R>i{VbWd#*R^=x>oVHEKfjL9dS6^XtTzWCJ1zTLkmdH)2 z)A9kx&kMZB=Ph3?){hMFCA{416Qb<`&=h>Km>v#59r2W6s;vX}I>j`h4obk6U(juJ z&=5T41+7#U4Z_+NbV^<1Yq$uD8(5T|5w{cKc39l@!L1OscZBN~ar;T!mW$gGaa-U= z@6<&tJUW51P#*9cvgq6K`PVY7ABbv&Eve1Q1ZGY0csD+FWn8lgFl_AhYpD{rkrm)3(3zxgv-u0E`4Dm|k?^^rTCf~cWBs_tV{8gd8rgEfmr@B@6F zmJj&QdG*l<-1R9f0iTrvpTYuF9V6vK6@+uow)VDIMH@Ci4O}5aZs0o+^kx~>yAv89 zkMdUcc5k|*0s0PyKB46sLR2a~p^X|McXhG_(GCrfw<|0uxP|_FZ2?=>`IYI^hA0JV zLgwWbMt23FcwE|)Iy8d&&rNBcMr4&aO?bV_58H*P`;+5Jv1tsV^e znZa~UFxrAIHKz5OfP4nOd(qWRP#CVMINmftQTS9N+Po=Rg##Kf(u=-o3dH3g;fQJm zj+u&MRWtMo|8|Ei3_&%$7sCrT#V`fRWnEU8y!jhC!NL$!-^8bFaFQ#ml1g>(4U9X* z10w)-;Ra{aL#EY4jHRzRA@l7Vq z2P5zu5~JBJ6SnCh&2(xg3TzY(Z7Bg78gEOm@P*zw=LJ&b+pmXN6%;!AVgVC&*~i}S zUGx&utD(@Ay4|LM%~3RVyiF%JM@D?~7R_yr67kSm)T;&R-n#;SIgu@pVQ&nAn*e<+ zp4>hv`2F)Guh~xH71&pVMpr0qr^M|T+@Q5(3)e1j+ahj1i`yy}dbR~>>CqRQh4T2D zyqRS|(%1xAb(8vrp~0SKiy@o%(a#K~F+1IY>6~o;CtVSS#;Rw1#E0*@kgA%)@_R?3U{ET{vp&->N)hAphRRc{zr?$Re3}WP0@7M~#BmqbQs2wCbC`4mEfn<$A5TmRno)jS!?Hq&JsCQgKCzp(x8!G#CPJKV8Uh;~>^uV6X6{$D9D4?8GuTL>eY&8^6}CNMmD`bRAT%PoTLd(1 zz_!`zGstZB!H^kdkdoj)O(y#e-~0e(B(t51_^d8W*ol2wwpyl)Js5Vck$GZ2^L{ipN(( zQ@f$&D4!nchT1x>{lr>!l9oMgiT_BqO+Zy!b}S>S4U3WtcG?HVg&qdzyu)FqWdIm* z5D?EL3=lL!`^t8hY9^wp?G10(ya~Ri`wBFCpXCNSAj`H%j>1v}c2x$blIy^GNRg0M z=o)%3^KlmaF2Jp^S^5v6XXBAqlT*w%`_cr<5g@_H{xV!oFvx`&Amn>3onhhRnSidK zJo;w>@^V@XQH8p7CaHovAdgl`L{HF3TAGM{(VYjsP!(RWn>_J3-QFD<%8Nhf)$XX8 z`T|6tv^$u|6=rs_>5(v#noHmGKrQh&4;s`HZ9+N1+)G%QfH@s}_oGl=@|2o;p||K5 zozoi%=otN_H%joo0sR_$_BF=bDZm(b${_t=P@k0_a;YX3l5R9eooUc0)QYB#L0W7$ zMu+u5t;)ZK1x2BJ1wK?wxA#H2&{5j2FBm;S=k!Iv9@lL~UA`F&xK0cEqEKKW#vym= z-46wn?*bpZ2_QWe@Ao4!FH4q&{*>ve3 zFpy0T3_|rhS-<`-0}?Y(dWn7>ghDmhyhZ#wg8GX>~CK%J7X?L+%%~A@?7c>fADfUDCk>5sr!-V9yibU)7kfi@N!- zbfUMqmt%W)w*gwFZthMMy*(6pscZa(NKR|7YOSD}!6p+_Afpt`Kg;{wMJ93PtrlKw zBfqw(*{UgPxTX7fG~wlSc(X4W%F?d25gIV8&ZBv&g~q~8euK!s7V511ZvPXKLA={| z)moRwJ%=%$#YdA4Ll1C~|K2TszWvoA-Xkfbw}+s2IBp-kJQP*O_4Z}IABsv4p0bC^ z!%>*F8qDnr<@Gmc;0W{@+wG>lBLOSgmECqE3P$+ZPWs&_D2OF6H!hT2uhGM!(7W;z zc0kB1=~}v=H>!+{JLt91$bjdbrha3P8yHXDPw;;Y-~f-z9%QXVtk{nb!eE{#El z@zQN{(O3v(!Zx~RENbn!2e#Ohy6pF@+O5u~+HtU+tg?;OyVlxM51Ezrm+Aa*Xbt|n zmDU;$Hg0UC9mb>jE;S+7W2L+p=@0f`(c=`IKOO~D+qf0xt<^31bUx}q{kRI5_6^I= zqc^~F%aA{8`DKvGv2rTUw`ZTX2LENg(hGSC9d!Uq!iGYISoHKBbf%5su`6vd0d{D% zZDC!ZH=Q^E)puS5I%^Nq_Ac)a!F@q8{oz|sfJfdRmcYE5^@ly4QiqAi&(sVe z%Dcn60R6i={1s5q9oE)W4Tt*ntutIJEb^{-t`!q|(IZ%59S_C9)KWgU`G5A08_Rf& zxl@VNW9T)ff|PfVZSKZer(a)bjupeKfthv=BnGY_hi)C)Uixt+5`73 znq9zUjb?rLEuuNO4DZ3E?V(x2<94IeIW$sg9}Zt=Gq5Bh&%_<=EOhS$NtSS=}y;(LA>%e7=L<;;CkvLg~9}ZF_$Rqc@LXe4NgIuwZZcKu4 zX!HU4C<&U?{5w>W3_Do%0NL_qGmI`-5T#Ivk!UBn^f4Pm3$eQ6H_%bZsG-LU514_% zWF`~6KY^e*d9oU|Xan7qjIPwL!<3mwv@HIyNk6FMuK)4k$Gxo2-ZcgF#cr0p;D@b) z_~#y8!v^YW4Xri}h1V|6lG_P}683>IHT1eCW$j@mgUJD#4g|5~RN`8iF%8vp>;KIR z^sdZIf`;b)FJ?v@Dq|*;nfdJ}8ZaFhP&qnrI)cyZ(tsH#l%AiC8iNSUK(&yLiH0LB z&6Kp+T;{!Ek4kU`hgCRX+_+ z$K|8@Xs!uXh)&xnN<{$`?3h1@88Lv^SNKG*4}R1{djsxa%{ zspvXDk(%bEB10L)JQekv%@tv@Q5z>LOhLgehY9Zd*(e$g6u@)hvK1s>UcHB5FblW< zU%iH0PDFA~OyA8$fmQ<$yE$N>{v1>dA=-Hk3UflXcn5)o3eCYe@I309<|<5!;n*hg zQzs3cw$J~8c27gyoK(y&o0jYA0g=rJ6}_1TR=;V^!y2L5T&`(17qxVLx zT~1R|X+EmyWK-+Ehe`HP=JT_P7R`sKd{fl`l~0CHwaI{}e6kwVABH(~22*`f(O>_q z>ISN}ipnDsqVmz|>)%mXi|Dva|Hv}SN0czz0 zpQBQ;_D6VzFq^6VQPGVHQ1db>J5UuXDxLH%)mGRpOOT+hmeQ#bB&XD7ZwF{{h32t@ zW;=bbX`DbaX(9OeK$k5zD1wDKwu zk%qMks~dgcV38-Q8xud!H3Zh4CG-$M_0WgxX9V4KKz4M*DmZ-7?;R~!iM|ugy67!) z1!f;xg*-77-Ssu73u|*fqF1Om+v_J3f$;Y;X`i)dHNH4M`{P=)2w{0XUGOuU5x6sr zR$7nNVpRs+w;ttT&kQ(S!&MQk7`VE@#bm|lNOc^pb#U!~>mXcAW^NNO;aURMD!2~8b$oud??&W?&|@0>3ydI-=*(Zx67+;RZ$fGQ zk0D3GK27#D1m0(#@63H@XHX|<4Y`l(*yqX29bDuK3+et%=#%qhc$(Bp^TZ!BHRes%~Z^LTH68g>+AV%3)H{G-SPVb3izXgSKSX&w*-+4O_CO7n_Sl+cv6p4cj5X1mloVi=JGRA z4S@SAxc-4l18yceu^y>9!4(PD4!91$#bmoT2;L0$<#26+i^K|@Is;h9#hTn_f zVzT`(Nd@~l)l&HJBV60y+P^t_RzA9i(Qz97JE}sbpFs`b_trD04cuOwf!cd48#GlB zER_eGg=)ob`t0np=qz$P%nx3|M$_)Y)O-$6SOM2O4_*(^q32N!+-hAw^D)5TtX7YJ$usqSI{i4UDo3I zcdI$^8#P`x7I{F#m z<===*!ne-GVGz3!v=B(;rO zT_anKzcI4axO7wM0;`Ghu~N8J&YHl|5ElHGA#=0~o8mX5E0$v4hVYr=>z0e`YsYXf zW-vP&2215|LuR1Z@`A;4s8~0P#V}oU9}RTwF^*4_`T8n;EG=JzhB>z^BP$w1XBDBD z&b{EAB}}O~mpF!MAE3t0lgh~4$57ahU5@`8OCLUfsq?Y1)bAk-C!V8e>xZ!WGi@v# z`Vcj=H-MikdhRCj(kW|lGRLk^M13?{S53P-1om{4rKfvPo{2oXRk^{WLZZJoO$4H;~8|=-#6`l(@!-w@H#i|0ZT*-$G?E<^gK(hL z!0Bf=WzIfa?-wPVj%8o9JTDvI6HIImqa38-c^$AGF&id2Fey$a;3G!5V{AUGhi)D_ zVrzn>APd>;M%jSjNu_~#_}{_?mW9nC^WU(~X+_wJ;ApwQa?%jiugCwDKc-2k&oh}R zsUPIVZQb0N%wd5A!vBob|Ct^CN0h>pD1CJV!EGr`It9o)( z!=++F*!@W!V5Y1e9r%I29(LPcVkY6B+$2~fLMd{qZnr7eJ||=RBVc&)X#S>bOZY7bgQF;w; zIv6vfQsK&gD+|sbE{FSCxHiMJE7m;KVyl`^Lm^r>jm@K>YI=f2Rt1cb{|$DkC+YxP z><*`RV=uz}23$pkOj8sbK(63#lqxC-lT^8~gFk}fwFiulgg9$W&VWa2(y|-~1KLYy z2484@tHWgzZIn6%L`u;?(z3NcM_9VX!QU`z4<|5+47o*a26fbF_LV;Sn6QG$jbcsG zTb)bP0bRwYRGmxgX;3S3R zxxJ`yq7jsW7Cwchv;$ivlt7}P4~l})iG_zNQ7#5kW|SxV@cZ8~p@e4wuLJ*|IgkK3 z;Hl&Q|4yMZgKw${xbM+Watq85xTj1AVZ8&>lkyiXM}VoDzvtNeNQB_j|sYC+m|*^5BWB#i~upMaqBkPGpEzoV7|Qw^6Gsk1i1 zLN7wE3WwaGwP_;?qDMe^!S`%!HaC>9X<#!XhboLVL?OL^x&eQ0J>~8A`H;N<&xRMb67#Rj|%07`>G(UF&S;EX7&n?%p4SH zl(wG+GA1)YF>YX^2yClPfLh{bkk+s+z$l%Kl#b@WuPCVi4$!As;wHe;XV!g0O64MQ zpQ|HMAHbw0%9I)-C8+Y-`XJkwRLPLpOB)G+j)=%9(nMyo^^MG!UPb9hjG1lyjG2?* z7+%|;$czr5MkxZqbdP~Q6eNHB-eM`%vz&oEi8%5eN+HQ1;s_g*ZI4}7LQX|MA3Qsc z%`GDOo67vTFqnUYCw6Bamx^Ey9)s8)rt;&y;e+fW3^1ThFiJ0=Yhp*PVZRwg>VAU< zj1l3|{iwM)O08nYm|@&zGuEmQ7Q6zK9B`CnZA5B{uPVhkQaTmGo`Rg0v))3yOuUd` zd)^W^l@*PIL|ILWNS)PEl{6GKXnMqib@H6j+bDg7O=;cSWc4G0%`pBSZEqeIRrUXm&z#FBF2mq5ii$caD2fXzDvIc!po2l8 zx!@X=8l@R28o3Py7K3=HV| z_x}9z5kEeIAO0PKt(`M@ z=pX1iyQlNvf0$!ibUue9>npJ3(fo|5OOviZ2#?BKs&G2iR% zh`NFuD9GB;Ib`*r_#Ah%$(HvUmWPbITw)DM))xmSVNQ-}Pv6zzEf*M1GlP3ZN0>2% zHnD*6|B&(i${XOU9vlzBgBV8~GP*-F_ISE=oAKBRq1b4Bqqg1XXW^Y%N84reJxPM= zMs4fr!BJr`n+WVj50i>-T+mjkw7n)+?mCH^R4+ay?rEj&dyzeL;N9xMP46L#4jCpl z>S(Sz#I$%mdE@FlBTZ7%M^Yz5&L+^Fc&0`C3S+}i!KNU})Ax#dEM!@E!@ z*?(hZJXE8kR>usb;~Gq?SFmI1!71+?WX}UNO5Mg{XeHjv8`U+b7oYNOno{@Jjp~j+ z;>!YQ9jQwr(OQimzSsOL3kQQ-)Zzb;2#4_LlYVH4jv*kS%JW6S+WUMBJ$E>--y&J~3CS zn^H%e(DsTSoF7ZW7p~oqjYlcXv>0!xLq^Gs*>zt&ALli9uwQ$~-+Cc#NIuW=n)}%= z-Wco8JpP5(+%|#hCiT#MEI`A3%MYY&XC7HU8k2DiFke2A=F4ew|6Pb9?n+06`@*oc zUDutR3vPqeztI60wQt8Ah=+{9Jj~3_u&-|AO&IIpj-r>iQ>?$y-T@4S@fux%&tT3A z!$|XNGWHxrq~L;XjL=88^h>VNp9A`zm%C_^baa_Li>qB;ER9USUA!30wQ=d1GbSTA z+2agx>8`+JTf*nwq7V$QAYsvg_}t~_im&;ZYKcBKZO_P-=zis&x8;2e4&4CcpcQFe z&X~jYv;d6^aSa}cAxr_k#8_N-Us|A{DNUDQ6?xEtTK>j(|`@m`uRfd=$C0?->1=|kewHXlkncussu$8!rEc8Ho*HNO?=1G#qX z=WPO5Z!lVjbDr zqj^pv*2l42axL%E<51Wz{-T%#OcVKoCGBSXP$Tv(TlR4A!;M*M#=0!y&opHZxA@_R z_;zuEUMB8DKV*FJ6_2vAi9sKCqg=!lPGduix$fe%R@U3hVps58K`e;{=kg#Mb1~$2 znvKn3JKSQ%xbdiXVVY&d3SMnvE!pM9yg@MQ&(^l*vB7LJYqN}>31-*W&PDumGuD&M zD=2Q>oVhT!K8sIp!E#yLgZ#%9tUF`NdDE6`D9g&?V_LFVEp9!a%)}?BaUbC!qt||3 zN`Q`wWrWj9c@H~_VN(|t&$1(P+4W3b(wfa_{mowLaea%E^ElePIf1@eeaLvXoX3T* zF|F1uKuwEShItaY10FIW;=E}yFZVA+72lQTg1<{V^Qo>CJzl|_(tQ(;?z*) zG;5)z>wIl{a6U5gvi9sPwxu6m+<}F&!hZbO4y+CPB$Myxz_zt=nwTjL|2yD+Py8Q* z|ETv5cVrjX)+oNv!Q5=gZ2p&n-N_F2DNgIe9A@@rZ@#26i(pST=Pz|;(QHC*{(Wcm z6r0kRFY3a&v}oCbjsaKtjsxK%wmtY8UD#k(33{r&CI62ALd)UMZ5TvnMO-X(EW%`J z37Y7`GZ5mDN2CnPTvTX}Jhi0TXfL{DXEg5_#`cbGcn3+KFEwB!Q0Gex)VZ%67XAQD zzFE;+Oq&(agF-_pdQk5Cn-2|ViSD0-u%y65+x9DHW{kOznV@5~*#4bQO`2eh#N7$@ zGd^B|sHB)z%ft24OB z--kT`cCwy}ZYy%bKHA6LBiVFQ7T6g+_B&uF>Z<}IdZmxO0qp+A^mFlppQE6~ljqdT z>Tq{g7C31+kO}$$jDu3Nz52DW`kI4*02(t8QSMh5-OQzxK-E%#{5(-_y2lufQ19pQ zv2T;m1=|w!1^_(lQ`)H`Khc$SaW4}wb{T(lpahPkTzpDl@W2=+!1nHnkL|Dkm%7oy zxBU%-qBP2k&k&?|WyI@WV!Tt!It^C*4rs<5!nNGr6mXS~g-cofRlJ2h9`0(yh@Rx7 z*RpykR!S{v1XxPNpjz%M!G&I@(W#cZ*vAbvn$&WO?&8_`|Do5Ap#Fm=hpmc z1nV1lJ7xmV(CQh1RD>4OPP{;9jt&P{Pva9{JHa;LTizi?zdw0ecebd%*88$qe-`$ga#qJH`Lwm8d zY+z@e){FI>d{-wl!k$|*y!Jcb!X|`AC(&~?nf7U7OVg1sdU|a6p~zwcE}}q>{Vbj% z+Mr;(AA*YMv8bXo;;`r92WXL z?kA-ru?X_8gX5D43!7PK2$w*g_ec)Q^xF4yEHX5hFA zb7O$Q3DGXaOZ%~j=1!P%48x2oD&1tPZROyhbLn|uC7>z#QT?N^a(=cy>(DF}vz@3x z37A$(rBAt}^U!D(Vov1)qS3@9KR^Z-$W`mxorK^ld^@vVM@d+c7wHgfye;Ef#$6X+z^)e6|hG z;VeuVroz^&hGOCD+$5h(`aX*2Ql>iT8L4An09nW%8^Eyh6IwB=%CJ}fwUh^RswD>$ z-vD;t8^ADK=8^aou-{4}eU3|BC6C!Y{a0U!lqGJ#O9Jg9ZnlJ!c>TLX-AJbEN`A+o zAglt3lftx?ka)Ckh8q*f6D!IRE8Fbch#kj?2L4y$|K*r?b5=LZZ(@qMB&z{&aBf!5 zC&P%-RNy?c43SvgAKI2XvFzZ)OlgS`h66}w3=6PV8>O?{<&0ULj%VAGhye?-kj4$k zp0PB+?9%&BMA@MQT`wn?tFa8!1|N@irs!p~&Fr9Y8N%ZAJyml_7)A_e9{8|x=I-v@ zZE5kbTz^!QpPc=56=nzGe{i8o&Hg$IvOCzeCuYPK#CMD@OmfB-K7{$-mkW@JltNlM zflav2Bz{kPC`&3FoKpBuU}C{N?UT^Ox^%Sd-kcwE;tP^G#1~F+Bo&T}NGXiR%41?C zO+Kb#<;D08cL62qe`3w+gm^V+DCZxXF~eM9f_}!1nwiw}pD@(BRo`|oqahXS;@}i| zeWO#4)s)B-9ruNI1hCxoVhX)-w7l{(Xh$v-54*WBk~M`0+mlltg+eRIT70n-pYChI2M=KpZkIj+Gvtp@(IJ%~uGn44dShHJ<+6=D zN{>p3umr2UaMMGMdJ`oEx6}kVjB#&Z^5Q?J5&FFtyRHeN^3=aB%M%V%elIT9rm;yO zPN36kIh&x{L?||pV!QguA*`pn2rkp2#)*077g;bWIj65Te*R=HOS@ z#$SqMQU5>L)Lht?FR)<<67iwA#D4k!RZOf{<^e;Jg zbpQHEtn;1XCZyku7C17Jhh#e`+1n?F=AJdn$*kOeQ0y37py75qT%J|*pp5YZT<(7? zrh|M==**69$zk3Pcu>aZZ(gSd1&=1a+P;@3#<7u@`WQTTLF9woQ#_g8l-Pp{Tc$*o z-s-e&I1hkN(WliEjCs7Z5G@nQtMQ{xd&HH0I~^ zqd5p9Z#A05vl(CW zDWln8_SBK$UM}{mnaw-J_a`CZosD?F7&eOi`Xf&s!(Lzy2J#DIST+_-GVlR57WEB( z?iThYdjh++lG%LLc{~3o89R9rihoRI&uGkGd_^jImYM&<+mB;|SlSnS(l|DEcnqq0 z>@{o@)K?Wzhs4wJvMcg}tMm*;*JJSE=+BMk&(IU`D1|2J4ePuVKH0Z^JnMlwL?Xwt zCAQ(%M2rSk^zCD@MR?nIHrf{N$2@~grj2i7Cwg7OTLwsRfaD5o&o$2%k76slX}tD9 z*m3NWa}0YtTTWn&%|UyB^Kn1>m-n2=+6VpV$K(I<$rIU_plAGe!~f;DPH=C~JnZxJ z#m)GKubRZ7LRR_ldSH)k5mo}R1V;~wga0;(EeTo%t}j;h6~1^fVy*Y%4Z(w7C)UZy zYz0f(&1a^usFB@nn9h9@Z-x46LD|^KM6GjF*Bju=d-E^TSSxoJg&Z=TyHVgRHxbxJ z2|N@|V>`^0*^DEdZ;;)rUQA)b&>J;W^)m4}Q&@L%G%uRM5?kD8+z9AeR}QVeG*MdLK-tFA%g1JH>3D+y=X>xwreijt(sF_%RlA6O~;Nyk(N43NYOoN3(mLV6oq+qs+v1G-B7_S_22>* zb)(o5-T4DESXXltRh>?)4;f$HSXoze=M^(p=awCA(BkUO8_Z-KlCz(~6tA}|CHCrq zM`STK5UPgzuZC#dDFt#h#IGK`*~kB#J?RskITJ6*ghTxGnJl8+kS~!$Iwh1JjBf`e z>wjR_vL#vXHT9I4pP$Ka8kawQ2b%A{y78&Eqq*(PaYa{uvqNwltGfAAYMpU9*+nI{ zx;G?Zro&+bc9d~%>&E}RowYZ21=?+<+qH$sk9cdNIk)yJ%D{?0mPqwDW2>AAbky79#2y21z8L zbvqP_KwaxVsFoeW>l#6#KEh=j64XPAYO|;wN(DbBWVB#O;oNd3OpWBd?_>kblIu7U zcBANOOj=9PlR|}+<`=l*ZWh9_m8gYbd@T9fMDoAy#QeB4pktSt$QoNC>t&IlI5%+c zZcN8t?98+8Vo_$PpjT0s8_md~`ErpB&5A- z&TpH=x+$*Xlg>BFJ&Y+kDc9Lnhzil`p;YiTVKKYCGruqkk5dG1HJkOr(a~meSPMsW zCz3phHs#k;!=^Qo7qk&5_A#pRY}U@)Rz_NLlSm~IK68R(qz+T+p-dNd1!UbWOFi4 znjKO8Nm9U&gjxYud!?^QWAPP;2;(g3e*cTuT$&jBKE0)E52yWG8~>`TGNG3`)EH1N z{ z7GXb<<(Ma56P6<^KlUR(HlGcddFWKFwO;^4YvmDQf1_{0JW))Tmmvi%9T!erJc?{f zloRGDNWkJVbrSHclqV9f^hdt%ei+m0ExzG?mdcL2$S4;aUHF!wf6{sXeS)lPKONlU-diN zoT|QD!8Y1ENRXU+(1XOP-{C4g;{mpO;5jkeM5^xmrf#ZEAsK$l03)SVk*Z%#^56xm zb<30AV_ORP;PTQ#s*rEK&SMv_@a8p>kJvk4%fsZmiEmlTdJO!-rN_N>5SHbN#S83W zp*fobxuW6H-;paC{ZA0@GCh1)T{wS&JD0OTEl!`TO?M>_n08v5zQI1?I5xtFa%u8) z-4y63OurQTD&vb36d^tyB=o`#8MF5C9xGT^p7S8<8=ZTAjQg1EfN{Hoaqr7{-?yZ3 zjgBkheD$b=pLvkQTDL)@uhMVfy|S5OKpB+*)tGO-s%zOKWTu~G#WJd}Y#M(!o4q|e zU_aUMiL&EkVaF%Z4#D)=SAS1-h(|sL!Crg(F{%sFD&A=s>(gQ&O!b-6k3tLQ@n-ii z`@jYw%dqw{<1h^KTl+bIJn}Pam=qukJM}%k`VfY}d%nT>XV$ax<_};Y^|LTRorJ>D z&${#hU+kk4BU=v`*-iN3CCp(Pg&A~Jm`<-9XC;@gK|z;cyw9?~zT;JB!u|d{6MD>}N!)GjG?FNJj z3y^JZ9IBgbk;o^%Y+ECv3Jaq6Tg%uTtZO+BUXI*&^BaFvbNKM(EXFqDXsxO#-ApLaBmT`ryM*PTfHh~4)OBH%J`(h8Dy8;_DdNW?V0wcAO z>zKH66{cb0I|GX|nGRd`%7e-(%l*v;LFw) zfW+$!5MNC7`$2gTOY=})J4Yg9!8J!Ab;aw_B5u7#^$ol zY{E{yFqieS1!JkGFfk8%nKob}N(JARi(W>_m$jCCRKfqvW$i)_X|T$Z5@9rGMo)8M zL_F3(#H$Cl>-EcHITJMGbFw}g`^J;Q0*%Bn>K7!0;kj-qIJgX#GKOC8^1<#RwFvvm z4k1XNQILNuJ0n?dAoiIbGD?r|2KlUOv!}lBdNJ=jPO+zc$VcSk?Hlq8e=?u-!0O0u zu^z@V=dlp(aI?l^t3~^T#goH}8sY9FTi$h4w8Z}BlLx;>ZLlq`1pWm51FHJT`n&Q~ z@azHNRo1?uCmfV@UUY_{gGR$hUxAH0DEmR*T2w!1{^#}jLFbV?ztr9;Ba77jae%+R znziQ_R$)aF%k<*C^&jkKNcLd<3(J2|N%~wTmH=J02poUJ`d`gD&o1Z0+r&=)$u7^l zuphDZhXw_Q9jguC)#qCgrZx81Bc5W#2kIHexKW=xBm!R`EmM)+pwV#7o0C~ zAQS$E>z3RlVQ`)k%m}!C6t13d;qE9b8PWS7NJz;)pA`gV3c7As3;VK6RES7&uPcVz zn^ZX58H3w3^j5Ix64?eTy=x<&2Ztu&aUWB%{!&;lQbdbdlpZuQHhPQ9;c&0qP1z_G z#AZ&Lu`6e3u<27UOy}Mat7Yd7^Lg`G)WjsTi1cENyPvKjxv_GI4v(*LeJU$gv8-HY z(ATZ=e&4uHR<0cCX-5Y=_C;;w()WqVHE=kVl0+waduhGOH2^i!uX3%IkwxVi!e3j> zx;FJyt8?#TXT-1fvoPLBXT!|l+@&+!{5U_NqZL^74uAYn41iO(V-0)6{5jvehCOBO z!bfJDw`;@*+EiQ`hUazswpj^R(i| zud?=?S+8Zh`(D}73R z>|9FQF?v2OA}+z5nBAB2xAwAl_Z$S^NKXc;FJ2Sqp?U2i9}}pdK$S0}Xm?6|x1ui7 zJDOCsu2DJTVWDzyf=^}jY!hy*^Vkz=AVVbNwi?I*1-a)yJD=sPB_^r{^ZIOFu#ZJ0 zPq;_5KKb&|!hz>2^s(Ni7!2ZLz4;}uXpcB~h$P#uQ`J}B1X$w1z9Bjks{H@xJm3qK z)O`3nSS;Sfcew(RJp(R0!EgV9jSJ2~W3k^@HPIwK2HfFiUh)NV1&0c5Z^hknhKKBD zy*s>uXAW|aF(#w!ni@UA|MY4;Yd>?c9S`wW_OnhwA7fN@uBl5a?|;~9&*We4XEDL8 zb<)*+g49)dnzui|5+grbP23~5k?e`7RD$R(z5EEgI{IQ-NES*ClXEz>u>>~=uwL!& zMHpTBfMsO~zJtJUrWy^i`L_pHC-WM9vAL++*bFDbJGKgdrVVo%tOcg3oF!IO;CCvJdNrm zW)?jSmTrqdu)81v+r%Ol^M5N?ShF}4tEq@(ZI#8NzGQ1dpTg2Fz3Fshy)D>sC$>!D zHG*%1TOY~dKYYnLyGMd$YnBv@P?vDV;|OT5H5qPfj!KHsElF?|#>YT$G3nQ>!3#-0 zb`X>$nnob?Mr);|C$t#*a2ODFm&9XyMwwA%f=|W{utB z3q@Kl*TmUNDn#Dv*0*FFC*p`*3=Uj9566rcue~ke8J|O8G9J7PUxayxVA!@wZYFU_ z^(5k6!LB-FfQ#OvCsIi1#;|lVZ}b%la!=et<%K=n#@vTQnkuO`aT{_yeQH^0WLp?&HUxf2UDj>;?zwddvg`S0Ig z_2>G09`Y@glt1E+e~Xdd{rB--zGXd{ja*4>y{+K1tc}t4@y&eVZe0$(%agy!q*55aqk(W*!{+gR-?8P5X6K0bC+G1U-?7v_$#ZEN zFL-#oirlCO3olTSC+3L!F&1Ix8*~H#1%tcI3Avz$kq{RE_WZldbSiL<>%@rH4utV3P!V-~&V} zz|jvCz)gVO9{UC#qJ=_~`9#c=A|CVs{+Pg>$5=ww0&Ev|iIpV>Y5{iXK998F8l_-C ztgqpfhRX^0>&LJw>-4=8WL++J_ak>GvBNPELfsi^Y>yQ={s=>0=QZNy9nTcRhVMA#h^1&5b{#Rfuasldc8qp*o+#q1`8Q}*I26p_8y4{F^gK({WHg8D)bN-V~B zf0)SE=MYK-0i^x2yG0Jor3h{8?Ny9!LUsE+;35&lbNGdmtV6p#LTV>qz+6pX`NtNW za1~ls&EXM0;Gj_Rp?uj7tWEneyrl7U#tK)V=O`ZDviwT?BFV_YC+G0@eqe*!e*j3b zr)dQx%CoLSKH@{c^xAtb<$;xKB{>6B7=w!{Y;TX~ncmQE#`6YPbf}pPEV-)3J7+Y^8Q;xtXO?d&>!u;d#>z1cx z@fUt%i#oKTS1Y|z@U)-3i%i7!P}oQD+rG@BPqC40)`5>Qi~z4JpT2@F#f`a(7oB2} z38e^@sR~9I3L_J*K6F&GWnsL~M3m}C5`t1~ElZW&lwSMx2T?wS;oa`y-c!uMPTa*i z{lwy@e1wf}Fc9|>!V(H!-V^}E$QzKe2cC}A zE&84O!=IQV;&*h0=b*Q`1tGfS0-_e?(?{dW^7rGH5=VX7$b(!v&Gz?uAK}6nygp#f z%yhD5$0BLXiAJPxwiEwd_>XR)Jq{a<&ajaUFW*i;#&0=;jR9jn<#(T90|TEI%-0t2 zm(So_(v(k&zdM5iPVC}L?)-(dVy9*nr~SgLW>&nH-)-R3QR--J7;FSM<^p>zeJh=yQ@CzMF9&HuxymFhhB*E{_Y-45jKWJHQ>V= znLD`O{Q!JVN|CYocJbaW?F6IAKXjygZ%4gL08cuy!^o4EX9Y&G>bi~V4~ zjgZm~`@seR=EhSr)(=;vaFOrTYPCsm5q_{u3P$&-_>41ViUgSbU=JzSc|TaGgnjRz zbb8z(UE>w5>(*LvPfOfZi4(9$1-su5wot;>_`w1ctlSTlEMc?#U?<{{C6xYV+iJCj zOWZI&+;+kNw!{y1eTGPDYd_dy3bxA+_N|0fcNA$AX~4{#CyT-)Wbf96AUmxvKQ z#K%fRUmqY)0y_Bs8wsHPXuyYsliDve*s+xey8>{&Q9Pk|cpjQ4o+ht7YA$Rl9Pr~L zvD?bt96%ptk`H5*n2YGhpz$jM8*RakiM&BI3wJm3p&|&STUJsq+CMaJ_5l>5(6U&t z@D_yPQxlZpFYghG@mT`IvOl9BEiedt2X3S7Q)pHS2jmi8w>KpeH7<~_$tR&BNpKeq zsFs*7l?cKt_hFVvE$>Unt$vWHK1ivA^b?TwTfs&uULq2C)K&T~8s%eI*e^ffD89`6 z1!Q;fMGU5hF}md;!ip3ZS{A|W%=2b-fze5R+Ropml08dm>ot+z@jL4s?85yonCC7E zgs8IoRBi1$t6I57K%VHk=U~F1zZYx#L+L9x$zemi_Wl{NQFhAS-cUTbTNoOoKRctA z;u0Y83`T^-xDV!Q6~}wGjMInud!Llru9nchoHz}N%!^i_P27Q4Ns$L!Sk1ar#ryuj zI=1acr{HZJ^Gx(7=CG^>{{uzsllaU(Sg89Acrf2E6qVlSHsTsSQIsEVCUb3*OE4=& zV{90n?ebU#({n++ZR(ARFMB43Wf)o(j&^!1J!=B>SwLgLCo?(rC!B!Cb&a{ZP>aDh z7b;>3BPeS5!s}jlat&T8!~vGVk8@yaPTVyor7H!a9*#OGJ|L87Jc&rq_cb*N!kGW1 z9@ zU*VT7CzIVTA=?EHq@+rcE=eX4Pksp^AS?~OWZj(=mXQ>Bk}7E7vjj*kd_S9Tbht@R z$#g}Q(kr?!U&`=g5X5827*9t?QE5o4*WO6gjHXtyt{l9G^H0YLODaeh5=lh2Tt+L*DvX80Q5d%TN6C1c`^7XH0H?+gV9jiJ z3N7d1LC|wDs7wSsKtUe+i;}lh@McJO2M8}qbZ@%+*V8!LwR|&4F?V@Q`z(wq`n z-|?v!=wgc3T6HU#atjH!z7G)Q`&;Ukt#Cm}l-q1e*i~qG8D8(MSbQmGf#*-m3!dLe zbK*E2nOv4n{c#v9ADW}^Pz932kmRx?ghE)LB*7qsp3?}z^MYUeL#{z0Ak~6P(cC0i zw{#`z^JyltET1C!gG1n_GttL|0NF`1Va}l}|49@IAiyTLEq00#I2t%7_`3D;6w0PccgSipsWGfV5Ow6YK|ii=L%yaOx3?7~ zh|@+X`az#i-C7_b?^Kb8BC-n$#mHYYy{q=a!emdcWw0gL6Soe8nq=I2l7|4yx}YN5 zQyqYGBxx6^vB!q8MI*_KMSbY%6dWy@QIus(#{IIQA)QfhZfb2inv!FznNNr7!5&Hb z)DU}+j2%svpwz^Ut%+^-kDXxLs-o|)j6_W_d&^>Bw(a4&F&;?PMO9U=quN&vU9)%_ zZE;CYiadyy4PShP#kwPIN7scuLp&$~1%}9I+RE~)kw_GdtUZ_EK=D(dfdot904e-f zdOi*z9<4(x%5g>Rkxd@~a37m1_D`2B5g$s-j{rkIq+7NR!L*QN2YzkqO2>Ii3q6#L zXkw|#0v9NK)J=ph3O+O^Pv7Ee<;XOY{8M1yfbku&>5LD zu4G*pBjDd-0kuani42*6q0@Q1N4~2xlT$u^F{azWkPz0_+U|D_#UnvtT1hcz3@H+ngbO(jLJl>)$^5r}v9SIF8CjT^ zUbsA?EODNUYWatlv^J$%e@G(BzMCl{b%Xh3`8j~0B%~1q@E}`@Kwutzk@<)mHy`Tz zAET!!SwAFN-*prZBG(9!lZ40^Tfqfn~Z)XJKe@Iy()kL(5!Ip98?x1~448#4&RLrKr84+;ygzb2#HB~TcKM6~T zMqW)qf`yF}t6^Y?i_+Y0hD`GqOnV#FZ@pe4qUsdm66iY$V%0F@A=EI%5*6b}yt-oeC=0SD%oeCDj9$grr7u;_}i=zHW?S)#K6 zHBe=V2|{%u5agq}579Xwo!}Bm3*ez5l4Ju&Sw8(jHgx0cbz%M{!F&?G_(vvOggjMh z;J}2pMpLF-nkF;lAu2T8ZAqg7D$FksN;d=9*{5_1MJvoN5wR_sAr8JB@L}Q%zxYS# zrikE|!;9iRODNAek^_xHg~owO<6g=MWIG6SBiu67h5Se4O%$SA)=L!|;42do8eM#* zAb@b`l4MGfEtFWYWvo>4mF1^_;2#9cs&P>?;=x*y4M7?3isQuH#cN6^^V~)=V)`h` zr1LP@D8gM;*kN%oE5W5F7V%Oq>(@473Jp@J74D2z742J_iGcw%!E4mfWt2}muzGbQrpq|&R}7*ESHYjZr^ zS{sv9gMY~(Am_8DLsAJ(HEXwYUL^w$80!!SHjRO3x~40<;?tOcYb#4TxL3RdwkKti z@$xNV0^k!X6%YJ_`%pSO_Fiu&K%N9V>jQ)UKx6IWFB5=X&UZ@0Qh^}G-@PTI%_~00 za0yBCLEOe+h55M#6DlzSd>G>$K**FX8+~G~j1k5*uYs&lkm?#K-$=*>v<1?-dz7MH zull5H9dpf%v5b9x4Q7VISlH33)uK9Q&k5>-G%ne&%ZB`R_vS?rFRM}eO9?+Y^EIS_dxpG&{( zvMcwTxQVX?GVCK?L`|e4))jkl(J|NHxNXpv)fb6g3I`hXmp}mb03N>sZ5eX;9>k!R zgr$JO3N3|12FE=N$av}s*(qg+x&RDBy-h@==Fn^JBQq^cgxK3qM3sqk`?8+k~Qn0%HACKrlWNnqHJ)Z#a2uL#=yTqZh=B zc_Dg#>6VQI_SoNTe~qtbsCBp{OUTIEm)w3BV>nT#jii#aNKosb2d` z2&R5koOn}OUquM@?NfKk%)5O8|D~ZeBy8b$35x&>i|)HA#eiYhYcG-+Gk6&864V@_ zj{q6hvfu38xaB)QM!E`Wi?Dd!;41^Q;lTkELA~d6OYdR4B2a6|DqrJ22Wp+zeZzR8 zM%qC4eaLoH$@MUZ23KPDQ8UPfpoh^Skdc|X#YQRdtP6&sR>-3-(5!R=ef zg1vePX)XWLHpRLK8f+1zMpAd8U^PoDvQ4&r5An$!F}$%wRDg?XvY=?8XiY^=*|w)b zoCJe@%e>e46HwA<{fD9rdUYuOw6Qj}Q3I;Ex@Bw}Z{0)-yR(h(+V31n)e##hkfoyb z6JNI<13SSp65RvA--~>Rz<-C={=rxhBMRWrT4!#m5Izgh&@nH8p2vssXPao#hKGWM zWz6wUi80+|0A*2Fa9?J(c)m)w1LzAdRi*QGU z^#aBY_^u)Z^xtcYIE_y3`9u7LrkdS7#|~jDcbA}LDaVe-`#UIY0`LVjgF$Q%VZ`=U?E775u~0~x6x?m_?YVLTG^N)5)LFtchP z_ew}^4dhxEY0OhKkkJy7UIY1_Ao2~A`zxL|%gfzhCq4U^L3L&5RvN*IPY8PLEykcE zlfxn~?a(t4J1!#=!ou?Rt{9wOvSL82wGa^(1;$#4W`!48A0k1zbyW<;WtJ>>y!PfP zB8R^^g&baiQxsI};wyysoLP|Ou%x=moJV-vlu9^s*tI%44(lnZ&6zTr&!uch9*GaUS#B+wtDdtLBQ;QX&3BB*vJN0SlU~z|Zl;_!*RUjjq)DC|lUFR}d72 z*B%5gl>YJnJJl8)ZVhhVzdt^Y|CwfxZX?~?jC`tJ83A0rl&CR2khJg11=nkY`-F( z47OYtNHsa77~Df>I@9r$1UNs%xcQRTem0R_bOl`mWKdz^a2Tvxa^Q#Am})!}T$nf# zGcJR?_E!OgxJ?j;9Q=TU7xJX$+Hf{2k*{v9h4xwj1g1{f<}SB2&%%M66kN!6C&d!3 z3~~*Q`w=^-vOYCl=U+9~USu6d@dsOIs~At@-WJ+4_E|Ka-cnn_K1tw5TWSR?I)UG5 z*KTLi`tx0O?O`@{Bp=-h__7iFP%CX(A!ba<7)ZGv^mI#-B+0wYT<)UMgl8Sr}Y-8yD|U&A&f}dW#O`3 ziR6jxkpG>9m-0UtF3SId4q+y~xL9MRB~qB#qfZ=}8Q(yd`2+HQdk6ia?>j+K)C>u?}Wi|KU;q75#kM0yeHip1OHvZN@*w}~y zR90HMOB=_=l8tu+)Y$k*1VDwBPeJ@`ERNv!cYuu#Mv#qNV+bWpOziPrP5fTGOsEz%ksL;Ye{7rnU zDlN`;OgTE>$P>Za}ThR+rOc*zYZP zb|>wPetY{P2`+E=2a4A5i|1>9&3(JL5y{%U1)tbio5$wH@?D*^MJ##*a5SYwKDr zXzR-#X<<&>-EH}#u3BW!zd=+2-|72Y{q2L=^2fSr9a!tO{GF~y@!d;#!){tat4>St zW_}J$nbCR|T+Qjnx|HA1O$%XvF5yeNX@l6#C45^qZA7>2=v(5(Gg}_s1IU3zW6CVg zgGCtudrAfnvA!^Xw~x?5TDqk-2E_QF5vI>9cZBey2)q|gNAuMYT4m5bP0V7tk#^Rf zZNjs=YxlCkCj4}FZAkxbT46YvTaJ4@EnmQSu3-Xpc)^i>eo>p1znE;*wv}E}vvkvf zCO`!M^>Qmdu7}nqBnu$h&YxQ@oL^=+(pV}evm9*9H}ue=dT#+@S@)#T%>@Ku; z;EKPv%aPS0x7<-?Sq!g51Y|Yl7kg;k!k_O!PY})^JO>AAvGA7fY!qOk>fxA%>Nl?k z9}%f_nL7x|jn^qV&?%%gvO{Z>00vL^z+eg0<+lOAYp5Zy3oY4p^3i~?(6S66WtQ>a zkl->)GF+6gWtK_GwFAwv$u+lxGWk|JuZYxIby?qt1SJPJe1SCK_>hkYV6T3>QBN&2 zw75GNwe>PY^cQz!Vkt8nZv(G=U3VVe6E$&Ccb?r->%!jc%b)Fuit<`t{&7#OwRKZp z#4N<>hsToNm;cyPYs<#<<<|)EfP}QqsD-2mC#)#N)scvDEsBrprG>H|qWJy2w9)1t zd2uhTBgnVCv`$T*Y)C6XCA)IN?+M`ny|r6IS9c>jpEW3{o^?m5`P9D<1E-8`e131# zo{ru4JH55>y`Sld(;SyED#Q3$j=Hg#)b=E8qO_1<8xhy`WC>P|Emu%Yh*k-TjzXa9BY4^$dXdr=R}OLxO(|_) z%xd7ZyL$4jeYGy`|H$Bt^@5vXeod^Z;5&F#vhOdNaoJGr-|r1Si#+p&9~Zv|#czrD zeNp_r>Bp}S?wu$uZ+L+Cy(l;?xZU3HF@m*U{LT@-PVqZJ{3eLsQQ|jH{7QVXaNjC^ zQ^oIi@f#gMY9GHPk;rof zXdT&%QT&AgTAL~Tq)v#FR!l!M2l%VN&#_IqHpezSu3ESwEJp>MQU>ToAs4^_TP5=n zfR(oH8^vwR;J4Qkd5eK~(!vw@Jp;8qT~8;F!09LF*wS`U_-^SZVJG0Pnq!;N%bCbO z8K{NEHWxCF_4-F(BH~U7O2%}Tx2uv>2V|h-eL=~phyhO}@Mi~WtyyLQ?>$KC!}{R- z{UEJJ^FK${RsPjT{>&h)bE7`sW7;QhB;NyXnDuNN*9HXT) zep^V181TwSo))7yW@QQg^#St)J=SZ2qyv%#?Y=Fn5_u1^;vxk@W@bkjm={(p11;B~ z6p0u1pOS8onUqPNjgaYBIYOo*QH0lKV)zK2G+65%IaToMWsn3%SbF&>Xf{G-kP$Bh zzZx$E&&9N@f9c#3FEXghtC;9S2IUCyi*1S*h9XDO$uC@sF_}9=i*I@taYPkQa*8S* z--$SWRopCO3^*Q zPqd?E;Ysm3zy10+ZGthKjEuRU#am^IO0D+@ja_EDRjL}b<-(6KDtB^+R)6v>Kwekt0J1w zl$aDe6ntM>OB8$;;iKD1KKxGdzoc;R(}Ydc?r^CB{vz^kgCFh;a%bR*e4k90$Gtk0 zm=nMRKp2zO+#j&IfQ*X9)P*uYC>F8~IJ2<0+DAPL`(2;f%}W{czt1F~AyHE4IOKaJvTtM#M9i5#l+3%RqT ztZdA9n5xpG$3eiMs_bgBNM>mO1i+BXgoyy&FFb>IEv1Yck{2l5PI51k2`eFYg-nct z+|ezBJhziP5%Bl~Iw#6u{aISVM^%a65e&#q`oS)S^l zk^AZ;;igIds%q&@CHHCR7P5{?cQws8RqcnzmwKWk$WMVNugx+-Iq^42_hxbzNw-1n zJn1eVcQ)KUnUI%5yq3rSxFg8zvI^s4nv-X`^ti|!JcJ*N*V=XKj~a|~hYpv%ZUgwn zRy<<;4`Y5is2WT1KGFMmMZDI={U-|9AH2{9K8VwcwO}Y0PuNSbGPmJRBb`sy`b7@z zh96RRK}ze&o6!@`P~z01qeX&7*r7T)Wp;kkV+B!@FF*6-QpFwg*Z+MkM_ya zWs@~wC=8NrG>i65vMJ*D?yo5KZI|hxG^eBW%(AWya9iUN}WmW%YO;Cldz%WMk7{40hKZ!mE1?*RsymyD5*N# zR4Bk(4DF4?MUXpL;e1Jk5nC|MtlEwHN8rvNB^&T`!Y!1qk?uf}nIqlR?I}W7g@}+t z5$JJ71d;MLd1->y(>D4(RN~ivzl!Z79?PBkdEZ2>JL4z$^h9mAc{|^js710KC;8q) zExOlgES49^2F7QWpfv8laB-p;;}XnXjl;CW6tkEb>hMzwni~2W+f}fH=PI+b28RBy z!QA3C@lQu-os?$U~drds<7tgKY zwfBqHNyh8A7SH>B z0!23}8ltF31%AG_kUqD`^pS9wOdA!=Q*^PSX^Qq#^qT-_=n_Sv6uqcP{@aSqQS`cs zcbchiMW+&lep3U*c-kxh4n-?X(*LBQ6BG?pbiY>?(F#SUD;ljRQ}p0}3vprWG`9k1w2MdvA+t>~kQKBwsGitbQ!kD?Bh6O&ZJNGJU*xvmNQrdi6O z35t$Tw6~%=RoU%V^thsD6}_lvV})<4s8nMz&1)zVn62o;icW3lmH{IaBSO*UiaxFA z7DdCf!HEMSc7d%z)9~HbqmLNPniHFS=F06N>sG4p9J~KfAGv==1MW z_|u9GQ{l;qPEs^O(awslQS@y^-8+?|T+s?e=P1EHEB{}LGG$m(MX#z9H&Ed|16Qc{ zofWNCxFaej(`9_O$xs0&eE>!GDO#fFD~fJZbe5vi6?G~arf6G5gA@%=^bcjgQDHFZ z-+smLS-wD798<51or+(e=y*ka@n+WZ2dD<>uYXlbs^}g?-zG}+e}iJ=C^|>c@rw3P zG+5EA|0scq?osq@MIB0UKSf6=xF`N2a2_+8q2j z&1;xUC9O3e-uH8@C$Ai@1>Sk&U6~$&Q}oNfXeI}F@JDMA zsn5%3Y1fz#yQy7(pw3W9jZHGVsF_LdGZigY@l1hy%xzkS#x8|}f5B~7EjM1||JkFPWfqaOb=z1tvsDff27rf6gSCzM-+w?^M*K=mbU6X36k$MKcu5 zR5VA?B1NMWH7yYFrkPyQVcMi(O=PE8o@Z#X+}g|8(V>3p7{pWnJ6MxT{EO#ovBZ8^WEBX}ycrKc>CXfC`5XU$0$r zJHj(fA)BS>N?nOwBk3kZql%=zI!)3{VhS#L!74ONM$l0p0Xq%)CB``|SqW2}=sr)5A|8~jWqG(C6^q1_A)R)7lGM?L1 zp#svCpd6n9U)HJ=MV(U$f0eXC;Zm!mzf$?D6|GS5s(+W^rm2z!E1LNS`9n-5hhju2 z+Pe=mQhXTK)@qpL;YYZ3YeP5$S3F|FHfW2PrEx9e;M3anCYJl|xM#k42%mdJyR%_& z`rFzL&E46uwhc9b^_#>>G?5OHfj?>z2}jRmf?v}~{B%DY$srsAeThcl>W?~wJK(38 z0{Zje;N*R_YR$%{BhW!1`12_v(2;j)g^CcBF{_&4%ahU2dIe>G? z-f1n6B0T8_qy%n)zZ{hQd;*CMt%q~LPt#xY=fhF_BOlc&CYJ$!Ee^d?UrIv#fY^Ux zN`*gyB;n6rVETu31!lruAz?K*Dv8RSwYWeE2QH_+Jn9|y^aE1XIN`6X2P9D&{fdMH zmXN<5j=c4Y&Iw#4`1D8NlwhL!cc|Vu1yETY1%oE==+7TG3w+-n;WFV*r@Y0VKh9JS z7x-V~`L$PW6N%nYjK{Q|V#>me+iMk5&XmAkSr12uFh3j_=!Cx-l>U4MlD%)fUn`Gr zCGWd$EMtn0P!E_2e@;EcB(QQ@tw3^B!(YF>AzYXrjugkhUkNHkM?MRQ9^G13U?uz; z2#7x)PUL(&Trm8#wYs(`-}qjwJj&56@Rti|Ql5B+5M*dQU>f|idPILd#YFweP841F z4>z_SWT5kKtzvQ&_~Cr=fNOCBF!cw~Fj-?x`U&Q5y{ENvA3fp6f=u80$~ao9ixPPm z{+pFT=)tT;r$0ZNO!aXY5+_EoH9!&-;HQ`xPsdFZ-!#*{y}9CS@O^)jkb*im z)mD+fXg?rDan=FWKH`<%)C#0hG2nNA(w{HIL|>|h+j27;r9Z?ENTMC^*UxGa=wD5! z`QQ9?EzbWn(yh~R;8k1J4hP-{DCIF3QrM({c!$@%fBSE9_uUj&2S*0Ql>6aiTUL3K4&u!+A@lhEwppa@yZ;+*GjQoQ>vz@9 z{Vm`BZ-G=QE(qKKN`Jo0zga@Ipz*HFe80A=zGCB;Kd`nf^T$!FN_QnlFsH?=A6v z`Wu>Q`r5i-R`3JB%0yUZZ5VI&xi()54iHm~Et>|$$VrP}P?~(i7UBxc$`5|7-QT!H zfGPWVB=mXh@lI`c)E+HhU=jSjzfFE_;%x%n_lJ{8UUEC}Kl^$9^d7ApiK|8w%s-O2 zYI}esZd0u^2i)Z^)cPql{gogZlsuvlaZd69oornR1lqq7w+Y9_-^n6$!V%CQHSe3ts#R8bF`2HeeT zcm0R7xv5pz@{&&wN~&dmT92%o)bx7UQVAUW1&UvkGN=>(EB$0qP~iV-?`-^|sILEi zcW06;Nq|jAAPG;K@Dvnq!GI|W1c(^LhY0yXic}W}5EW&`)S{vrANu=Zi#0`bsA7#3 zX$TZkwGu08L{th|S4E8#Ws8VWP*+4nMg6|d+*vY9>0j{kzFsGvbIv{Y+;i{TdE7G- zdHdHXNntW9N|{r|;hi`W#cf!6oI^i>v^>ah6@B9aGU2^TQJrYBp<{^l_P8MMQ0EwB zI$9v;4yv+)HHhv8hxDI=%${OMDWDyKKitV!?@C#$5^3{wdL6O15j3n;63p ziG}@*SbD@=?v}$d9=nd=NBpk3fQy%I+|wtrbXx{7ef;e#?(p36L3o`;a7&rTVaqlF>7r zrC#}GPFD}vb&W3>Dd)MG2B|*LYLX`I(wcwb9ierd9Ks6D58#`Q5$nL*VUJd z`b3*bX2M3i2lgxs1bijuH-7a+?b0izd?P2e$zx|vUR*PO_KaUH zx_r)+=ULyqW{gW2LB$Ab-0OT3q%zG=BkJZats8j7mC|QRFMaEg&li)kDsOWF3|h+uHuf`!}wB1=poCQswP%1oXJ z8&QGdl%g$8)}4z=1<@6QPUHU(9q$IX9nKP6C`os=xKp*q=|mZ}bzE0#hi6ZmjB;tR zq3y6fNmpq*T%N46?HZDFRkq#gWSwo-h^}P;{VrjqB6I9tpw-3mX!}ebX4jf15J8SXaGi3`q@Sav;9l`s15M!C>Lb zZa3_U7g^p?egGM{^5<#nmuJ2ArZFSC;$cG-qOZ(V>i1B^!`76yjA_~7u1pI)Zl+Fl z-(a5A{FX8J;*CscpF29edx|Zasu7^^?hv{GDw$SHQzM356=Vj+G zl-HvBa?APMy()E@Hz+a3S6Cz3Il&yxw5GHhgL{f@`wBgdGUApvMrB$zw;R*Wk2$_# z1K6Vma$Rf79CG$C)itTgtOF4rLXL=6c1<9o=jO*$RG5i+T*eA*lmoC$Rw^_WvHSoJ&9qb z!)4A!SnkMg4b}BidCiWgRgL^MDjREdQ0wzYr>j1mQ&rb^MyyLTtt};NhK!)MQv8SU ze>281IgR+!lVk_jTYbyYwH5YqRq8BI94{-eX1!~SI7voTfdjE=ae|T2jXVt_%5Zib zv7bEFdgNW>r11^omD&WAKFUm`cvVVWnk~PAu3U83O1)d|Y-1I?XG|I_K?msb@7PMe zZS?!bBm~OXa;-~0)4KURWAHg*dxZN^uQR}?6XR|NFP@^~`}tYA{gxKvmPhktDk(n?Y58kVO20W;zH^S19&^T1kDqUy@dO+{!yJ5-l_viGI8V2)p0CIA z>H;kvUa0FothKI-86&1iT-6enT%U#iSvdx5Q~L+#r6l*`Xw!u;XW$<99hI3w4#FZ@o(Uw>YASzu|TI!StbD>VDq5M%VBC zmE(`YgUsNa_90lQ#~s--vx}Np(%-uO-^TDMv1LkKdxKJe&3a)a%XGi5ze?80?%$uo zcR!cj%HC@XE|Q7zF`%syHRaTL9mu?!Y*F2KlQnBE3ta4CJV4pYC}iM`46y3|X?_hT z<*VgdzU($FZv@5mmD{YX^kpxWR(xE>y}RgJoJlI>ZBCIdTA|yW@<%NK^|E?~hZuZqV`t8?2QHKehOIg!7}=MIAr? zyHU%(*reMX{HvBPdPK`Nf?~h*5$jw0*o&eJKiSNo=wptbgP?5mBaWP7MF7A>!OO4|jV)^?LY@s~fTs~TCiRSmCsRt+s_v+mz#WZ6OF5yTE> za{UE*PweC1v^MRe?FH@R*6ms@*`YhR9+cAL$X_|~gD+|O&p|2qBQjK=ZkP(xoU8&R zC#isEu*!*NTc>DZg|b@Wzh_2>L1rZA1aSr^Bd7pn!Bv6Jw`-dp-WK;t zW&BHbSPY8IhoB6_d{4`@d$fGV9%b$P!03CjeKw>s=_urMvN zvEP^|QO?+JoMTT}BMtXD+pXSd{1;H7d;*jRo_FL|9Za0&uJ}?%{xT?K+*i881)#M1 z4JeWC0@(vqkFRz8M3Al2N8Jx2CG&`Ovoh<56Y(_CsUz0Ga$s8qH-4VxW5Kh9Cc+4S(Q`jrTFM_x)lF6Z?_hF}%?uqgQIb z*Baa5tZyar)uAJ;O>qt53mM4@N&A>A!|$PiDj zNmJ>gQp5JOMG04!d~4|(k|9kdOKVVN_hZedsvGP8o6oGS!sh|U6O4Uo}-@a{LR5O?bG=?>20ODmk&WH zFqN8JbIZPnYu zC0o~vLA=bPMo;vpD#ySjTbnV6#3p&v(8(UvU>ivH=^Tu^(7jJ3*Kq^kwk=InrP$4eJ9k^jGHW5(#=7%UqL1>ahN%@ERr{PX zdHa~WZA@NWx=OC&SmL%VPgUjEq+_3gePAj(2XrDo$#u$Y8&QSgs3cu=W2>dG)mqN; zsN(Zayfu^SI0m_GN119A)0uL(i;7<0QGF{rH>z9qWTdE!Nolc^sKK3@^~h&N&qzK0 ztz#3kbemoTZaFJ2Rpr%WtDKT7<$KKx%c}0U!%*|4>y_CGO8Ed3E}eh%Bm3P2psf8{ zKq-F(V_b3+}^ z>sfQl`8;q-FX^h%m!yW|PMv#9=Uy&T`Dc67B~ai6-Oeo^->%fvu7x%~Le;umrgAB) zXFex0{;J(Zj`hapM*rUG682iC)#<|}Tip+Fl4uNh)Zd}Qj)7bD@E%hJdt}C_v{)gp zGIp(FS#c7moa0e9Ld7o!)Q)-?^g=;)&t<#}Pw!S-L47b|eHu7_iTSI;}+bK(| zNnaRqdy^lpu7&Ei>&{)W^~@K>O_AVokGcZN`G**=mg~B!B%N~(l_`nc#v0*lT_z*x zlB&8eRf=g*$Ja46Nw&^?eVn#gzhw&LiH|*{sS#UIjB;>&X>j&^LT+m-3krc>5TlaY0G~` zSDMi6OHm@f0#%ESfgUf5k7I49clot8d}Z{F_&0jgAD|Y;fm=Ryg_5`&Vs-N3hB|c< zyR~i4Y#*O`+RCAZ9jneykrL)Nm6(+Wp$^k%u#xE7xF@0H;&lSu(HLY zo`bZ0gq@9g&(Uy?cJ*UI|5R_7|&HDC;Q4o>#VW)kA(>`&hw04_r^AAwU zK2W&)qI6YMld1Yvv8tJ3t~%pp%sKYsEAqTT;tMx3k^)dK;w=n%l8cF#iYF+m&Cm)%isACg#%}`=y z#rEXabVpZ$Qho;tx8BjeZKYm*aC&4dR4k*EABP(`b#+VPB^Ns-h$@4+U?VOJy!3dTmgzv zwPWPR$Q|R>_nl3(PdB;+6mAnJc7+4d)c_7sy?14(Y_0~ovWup$(?qF%6FU4?LX+bH z2RD%~`GBu0ysE=>aNJ7_*YcC)?t_1KMyKDCMvtd%CBp?ltMEJ68hTr)4;| zi#}C$kdrl3`Yhoq>Uo^zBhp4w(s%JGj)kBh>A-PFLPh1Q-QjQrlb zGo@aGLa*y#xMZuxj|{_qlrK+0k2wY|*_!#IF{rot(W5e-$_!BataIdcNA|2wkEr$gdZ-7gvQ>6XuF5XSQQ3zw!dZjU)!>o>)$>qh zJU!-%vaQ41(%6UX_}zM)w=mKndD`(D`%N=a4MFM1Bd6X+(&MxY7Z&(b0Vld#ZovFi zJ(RD+ulxmGH4Z9$vrsl4*L4`!_thAvBm;6b3^RX*40XHHbDJ~Aul3c#Q~h+WS%vAU zP#RDB&!`)nwo%$XHCWqs8)E&&YmT*lZd~fr#~j}`@;|9$B$tlVeWZ-CKK7dZekO5L zI<|3a#YXn@XGUrJ{h*ZGQ@!?j7zv6^mUYwjMy~>CUQF}9I_eDhrJgdl#9CsQMN_0+ zIqp@zPWR%%HssGIv=Mp2Xx(-mC`<2mj{M|k>tBZ1uULGw-^CN3G1~qQpp-43*uUlI zt(alv6^V5V)|IDe>)Svn8$q#t@ic3eY4#f>cFVDo2>ecjeZV!P+P;0PZtoeV+x>F9 z)ohxBN6G%E?&VRl-*&4ukJonlDs;QBA3}s&&#J-rnGB@5MmSrCl+MPHGrfd7lKqfZ-zCEI{TQGNBbqz-8no5@J9D>qfexz;l(NT>-A`>p z>?>z3v_9@)_KSoTdezNP%npM#-0CFjRFdrWQ;MI5YIT2wC!6YIZjt06L7abpk$qXR z&tx5QuWcf_V^`x!{>9-E$Q@9!j$aAfHmBj0XBwpJ4C^WX5|qQ!DOc&K$r_ffhSg=LAzWP*adp+V#3xr+G1+V# z{AfRPiEeiXDDB#oSj`#Ea&DtuN?x7zRSHT8)ma~BnBz|gUguTj^gAF1>Y1SM!EQ>`t%x2)gy(L@Aezbt$&|ExHELF%44hmfdYU zCQl#<+HPCoRl&Qw>OyF@w57u2_jHkGLHitkF1bSA^0*p#diYe2QQhNRV;RvdVP7b% zGBs#BXO5$Hd)52Uu)WTdcFNY}-OR%A4S(>e$02v`B5#M5IgO5&JKI=0x|s{?A04aj zGnCxN$)P96IZnzMpe&pUM;4TNK_*yLf%2nR$f@`I*;$yNl-Z!PpVP#wfLDQH|0u}g z9QBcwPglQQuMKjWwS)XX3fE`DRJP|NmFir1^Kf0|4^C0JC6{qrtabF(q3-5My;~mk zsxMnO7QU}%$t7E-`OQLmgh57FM7J`6urrcnphURb8NrvEbpT~s^aw^irR84F>XF<4 zO3knrwcgyR^@BjsF9gNE{yDtBGrT6>GqfZx;wkWCdyZYHF(Nr#_z37`7P2&GbOPAu z1k?(O!)>5!j3_8mxEquy>;R=4d0BVZ{!bmq$X9eA%U|`{?abF?X6Gw))9ZTfw}LYF z&+gLltN+qd|NL(4DD_>f9}Y@jm7tWXLE(0T()Ev^1XA>#_BZ)Guk~aWQ!VExHibSP zmCRBBe#-9izE`zcpI=|{$!$n9*H`R5e ze#{|9x|nm3fvIfDsrL4)+HWX+IQ!5y$M9WEa>#q^$u@GNQ!&Kkn9nJ3Uz%$9!mH8_b8=~MR=!(yoV9)FRlUCIbe1GL zHjQ6<)z?tsQF@YW+stNk-+0wqP{PP9yK@62vGnHe^bE_aX62=*ytz3sopr}ODw|F8 zo4@y}51`Tgt>5LC1-%mboFBaEbZ8D+1+<>YF>@n{%G;@Eh5kgPlrC~OF6wl8D$Da} z=%3UJdblh0vd7uUeoMT;F81>Xo<-=hWmx=5sJDCh(cUoBe%cAX0JV3@*3?|{wv2;m zy3>Y_46FYRBX?>OS*oi))=E1Z{`etz%SnptLV|hFFsHG|iIQ*_$Kiy3kf`s+nd6jf z{;BeQlN7g~703CJ=)*KSJ`QQ;&z8UBLwroadak!R*Lt(3ImFY``k|*e!tfmsZA9Lt zkmBwnUTmw5{gF;$$91y$^Ui^{Om~x^z#YYe84*3NVH)K!D3# zI>iSx%SK;F!Ck= zJ@luqhbGP;KYcy)r>}?pfAjUw4A<8~t=9wQaO=7OX2@zUGK)Od%6BIFZT@C}*~N2r zp_ylS2l4v;s6sQxnl#9)wq_KXqpW)dnKho7R#uUjV^%I+yvX`~kU7!XU1*kA;|H7L zEq{?2u0zTMpTm5m z*c_U@KwWcr`TVQvmMp$@`kX7mi{>w}zA82wx&F2KwHSF;aDbW9;{uE(FPT55ZvOQ7 zSIi05E?zR_QD&AnLT~H#GV^2+{$8dn_m(mIgypb_<}eXvP1Kb%?`_>TQMW)CFO}(& zbmb_J$5OgPOE*6<$-L5o!RDfJv!5{5q%+MI>=skZeu5`W(S6Nw@HbO*l;8*}KE?El z5yEiml(Wn(B8)vtx47&q^KL#ZUwLC-Q~aj!6l7mnoRw;;Fxk{lksbgEv59uqy6 z@C%?e_%QeeBsMXyl%J4s@yA2{z^n(2$-(L+I2f^pT3x*xN;3b2( z$3gFY5lvo5YZ*eaH+sRxp~v9cKzVmmHbF#~LC`iTg5V@b0;&Y>f;!MQf(IZ;0FXPf z;85K-4Aw(p-vF+2_%`rfo|+t$w&2bZHs2#`8gTj;9wWhrz$eDhe+;2*96vveC#Xkp z1SVb>bH5lSFNmd2-~}aY1aqK8@PZL&8GJoBY7)~6Uk3i}Oag^(0Pi@5=TyRjd(LIk zJjSe_8^P%XO5KX!2fu{Yz{|%JKE06qc=$N@%jv9q_rZX_{a+2VOH!_nEDunS7NCmv$W<1C)*vbIF`5W1d;ltonkK*?Uf(C1zCLs7Q_|D4=;_n0mMm~9i-3cKM zj*Kx~@MYlAgRJ%^X$;=^DK_wJ;OAd3EAZ+tPfm~U6-4+rxaoTq+ZGlYc)C1Sc?v&Z zp+OE9d=UH{B#W*A{8aN16^9AskkN*bVAepgF`hP501Tasqi4{ATOeuN1`ZrXCIfsB z+yIH*{bGQ;Ft8K~(^haLbSr!#*n5Pw4}i-?M6^(ku-mDKfs=V_Ksu-dHwsAyVB*aJ z!ONjWoC-#u$Kc&>6380{r$gJ&3tj-V!&ibkAn_TEz+5;=3nB0whmV2XPbF`U2ClaX z?6(af=rh~ccftFh0DL1@S;B~9*9EVKWVfsb-y7{wL2P1R(HN}{f-mv%K%|0-C`{#8 zvb5m^H$bxAw}6skU-2wc4JPvP1t(7Ms3>~D3g{rb;Q5emD}KO4D!yO`XWL=u1wVz# z;mN*_981TSwEUfuJgSBY!B?Srcz2e)Wafv=!@INVB?G@ZN%S4) z1$#o~bB2w5-~-P^SZFYbOnbpFiSa><1b+>c!war}s^A5mcl7QgdP%&`n##h%MlcUr z1uxhSYJqnr&r5>+1CjHXb}AkKo1oqBf}5Zt@b08|Nu>YV1xzoA?SfmNf$)M`p)z=P za=awX*Ime_MK5?AR15D;h>u97{NZW5XF!GEw@?#&V7f<*f@BeufqSaRzeXPe&n9Wz zN1k{k_yJT5@1H>;GE@oAr`y#a^3B8Wd=*_i2d##Wg2SpAu;{^ytJ(fh1isd-c0#gC zsGoZj{~O5exDnh5iQb)%E{W=Yf{xNw&^z0s{N#SSlg}k7{dTAXz2HMo1-vWIoCNfU zM-BglM=hd4@Jy&4z7l)~YJiV{S6r<5Fn9!7i{6!mZfB?eHbgQZdck|4eejK7A~9WX z${d15FL)l57kQD+U=o?=?u>KEME?{z7aPGNPzYYooXhmWlYM?H*<6y&4?^|m1tpn$ z6}&5{+)g&no6p)oFBrLI0sApRJ-A~LX>9OOaL{7b1$+=J40FIC!8{1=ze@9b`CZj5 zVF9A&>+fpH)hs}Gz5uVDsMBrvy1VLfjoy5G;az!!yWYUUgb#o@Y_?Y6!KKhHc)kR$+8v&+!K-mMvp1n{1SR49D7-uI zU6SHw+=k&W64Pgp`Tj0c4)4xvmu&b6P&InN51=r--~ng_ygON4vfmTQ?t)997HkC9 zLfha4-+*?(zY&3X2kJoJEC1@26>MjCzVNU3y1xqS;6MZ>GS>xPhAPktj{Chw&4w3z z(BTC=E47~g0H990o1n3AXR}L|d!N;8|3)f=v7lCX!Tr#7c){{NXg&WrKt=9neW2&x z2Ppn^fI0#%xVV|!j|6j9wt7qbTm@6K?S z%=aaKVH(j3u7KL%-C6FE?S30{7`>omu=`%(oB}2?*#)OO%(3Amj{m|;Y+*_PjZ9Il6Zz=An((-D^aPtA`yK(@ zyqOuLEg#@lt&vt1f;a^e+3bQDPqHS^3tj{HU#2tBLE7!d2xM8b49<+d?pL6u!q;2rnSAvQ}boEbm zMR3{+%p!aUJnt1cfUg9nzsgz>Jy^7hbHpnI4U&Exk^E~r5BuCVmJy;_$9yskFIWpTz=y$)J6MeHaj^IRvjWfmJ+RY<1Fx}b9qddamUQBS&{Fh*N1+w)uFPRO zqxkeV>movhFlRwgcz0H?J8M|7hkyS$hu+t@QwAS_0`P(#K|y%_p@Ld?h+P)ml>lrf z2FJf6Ah7|v9c8xQB?mZ?ND3DGmt_|+EPmG&F4^MT8{c-2c(2-foQX$(FL-rmit;_waNpZz#} zgT`Rr9$vK^J^(i6@+0;pIskv&i?-++z|;crhTuChUpY#md#8_AmA`4&xTjCVt7;Hp zC|~YJHV%9gylW__0`QGsx8XQ~_k($-cyA~L1+Y@IGax1=HLJtQ*jLM2N#lrc`JMvyykp@ zgRcksS9*DgkeL7%PV=fV_%L|ybUJ`<1oQs~|6&84GlTrRcL)yLKGUlrWe8E2Lu4%0 z!>gZr)#bD306q-nk&UQ!(-=Jc7c_=11Lt4NXyC)(;W=~$ujYE~{Jrh)?mRws@}DFD zUNfIW;pxN+(4?&CI1vfhS7BFuiGl4z;UIcB2cc=78x?feTR~x2u=|>;>DPEfEA^hTg>y5Z;}}=StVJ)A)Mb&4CCT!Hb~Gy@rjFo!1B6ouwz) zdbe0CUi5-vR}(aR8F&jxc{T8ofEP(*?YXn_Bx`T-z2qTcB>0OpWK_Znegi!QukQ1z z9)I+zZSVo`3x`+t6HKF?tpHf?fZjU-;NXW??AQmvJ0bIZuKych?p?=d-sflqE^YRz z9Qb;$b%Q>(w}FyP*NWbiRcB}09eR|_hhA{{X7=xWh6;h}cd~!Ow}7|2%&yqNh{36Q z@Pob*?7Nrk-|_(?*xQ*NCTU`Ken_O~1@}N5@PZW`UUd{+@N!7>Vel}Nxu042h-1b9 ze8I=Tdq452)$omA!KZp}a%XI9iJ;sMZNo_LN2pyKedblyL$dqTgNbx5!E>PwYy=lT zN8!WZ$B@{&Q@JF?>v1UaL-rc*6(|6oNbQQqV|B^#n)n3=2PyCFxxwzhoLcnrav#wF=ppMT4&pckC| z9kT*2_#D&$zYXM{$>|Xv#p(AP$U1l)3|2xi6Z{i9b&JE_3VsHe*u=pLe;}W=BZ5vybuycRp2nirvR}Ld>*3oUr7!#9D4x-gwMItreju7e1Hs~ujjJi|}} zv8e@bfh5pIa5p3!2k7uDt4MDkrC;Z+}|7b<-g8T=L00x$21$$Mh9O@Gd$&~o;g(f!%} zlHVs4f|AZByr87+2`?z=cxyW}ngOks&wgG5+1nM|@9=SOMxirX;0q2P1;-R=JrCe@ zx$|`6;l~%4XZy^Mscw)@rT5qzy3%~W6A3cU*!Y_KO@XH3reITPQ(03*Q)N?aQ@Ck+ zQ?#kQDc01{6mL4xq}Kb^`_~t*53Vm=-@3kS{r2_I_3i6p>pRxR*B@E0ntjdw=Dg-W zb8&O9xwN^gxuUtcIn-R+9By9PT;IIBxuJP=b7ON;b4zn;b6fNF=4f+!b3v@RqdDGu zq*-n7ZSZf%+Ys1Lydk)Oh(gw;usPKew!RIU|9e&gCq2HhHWX(_+ZPjji=RRQ*u>L)#zntqZOTt&9FS-qcEfK7t9;X-9M323`w3_x}LuoC>f2 diff --git a/docs/html/general_considerations.html b/docs/html/general_considerations.html index 08593f7..8efb747 100644 --- a/docs/html/general_considerations.html +++ b/docs/html/general_considerations.html @@ -111,8 +111,7 @@ Allocation algorithm Features not supported

Features deliberately excluded from the scope of this library:

    -
  • Support for sparse binding and sparse residency. You can still use these features (when supported by the device) with VMA. You just need to do it yourself. Allocate memory pages with vmaAllocateMemory(). Any explicit support for sparse binding/residency would rather require another, higher-level library on top of VMA.
  • -
  • Data transfer - issuing commands that transfer data between buffers or images, any usage of VkCommandBuffer or VkQueue and related synchronization is responsibility of the user.
  • +
  • Data transfer. Uploading (straming) and downloading data of buffers and images between CPU and GPU memory and related synchronization is responsibility of the user.
  • Allocations for imported/exported external memory. They tend to require explicit memory type index and dedicated allocation anyway, so they don't interact with main features of this library. Such special purpose allocations should be made manually, using vkCreateBuffer() and vkAllocateMemory().
  • Recreation of buffers and images. Although the library has functions for buffer and image creation (vmaCreateBuffer(), vmaCreateImage()), you need to recreate these objects yourself after defragmentation. That's because the big structures VkBufferCreateInfo, VkImageCreateInfo are not stored in VmaAllocation object.
  • Handling CPU memory allocation failures. When dynamically creating small C++ objects in CPU memory (not Vulkan memory), allocation failures are not checked and handled gracefully, because that would complicate code significantly and is usually not needed in desktop PC applications anyway.
  • diff --git a/docs/html/globals.html b/docs/html/globals.html index fdc32cf..48a4fa4 100644 --- a/docs/html/globals.html +++ b/docs/html/globals.html @@ -337,10 +337,10 @@ $(function() { : vk_mem_alloc.h
  • VmaMemoryUsage -: vk_mem_alloc.h +: vk_mem_alloc.h
  • VmaPoolCreateFlagBits -: vk_mem_alloc.h +: vk_mem_alloc.h
  • VmaPoolCreateFlags : vk_mem_alloc.h @@ -352,7 +352,7 @@ $(function() { : vk_mem_alloc.h
  • VmaRecordFlagBits -: vk_mem_alloc.h +: vk_mem_alloc.h
  • VmaRecordFlags : vk_mem_alloc.h diff --git a/docs/html/index.html b/docs/html/index.html index f8fc3b3..9a05bb2 100644 --- a/docs/html/index.html +++ b/docs/html/index.html @@ -65,7 +65,7 @@ $(function() {
    Vulkan Memory Allocator
    -

    Version 2.1.1-development (2018-09-24)

    +

    Version 2.2.0 (2018-12-13)

    Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
    License: MIT

    Documentation of all members: vk_mem_alloc.h

    diff --git a/docs/html/vk__mem__alloc_8h.html b/docs/html/vk__mem__alloc_8h.html index 06a31b0..b775504 100644 --- a/docs/html/vk__mem__alloc_8h.html +++ b/docs/html/vk__mem__alloc_8h.html @@ -331,6 +331,9 @@ Functions void vmaFreeMemoryPages (VmaAllocator allocator, size_t allocationCount, VmaAllocation *pAllocations)  Frees memory and destroys multiple allocations. More...
      +VkResult vmaResizeAllocation (VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize) + Tries to resize an allocation in place, if there is enough free memory after it. More...
    +  void vmaGetAllocationInfo (VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)  Returns current information about specified allocation and atomically marks it as used in current frame. More...
      @@ -897,7 +900,8 @@ Functions VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT 

    Allocation strategy that chooses biggest possible free range for the allocation.

    -VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT 

    Allocation strategy that chooses first suitable free range for the allocation.

    +VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT 

    Allocation strategy that chooses first suitable free range for the allocation.

    +

    "First" doesn't necessarily means the one with smallest offset in memory, but rather the one that is easiest and fastest to find.

    VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT 

    Allocation strategy that tries to minimize memory usage.

    @@ -1287,9 +1291,7 @@ Functions

    You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages().

    Word "pages" is just a suggestion to use this function to allocate pieces of memory needed for sparse binding. It is just a general purpose allocation function able to make multiple allocations at once. It may be internally optimized to be more efficient than calling vmaAllocateMemory() allocationCount times.

    -

    All allocations are made using same parameters. All of them are created out of the same memory pool and type. If any allocation fails, all allocations already made within this function call are also freed, so that when returned result is not VK_SUCCESS, pAllocation array is always entirely filled with VK_NULL_HANDLE.

    -

    TODO Also write tests for it.

    -

    TODO also write test for allocation that will partially fail.

    +

    All allocations are made using same parameters. All of them are created out of the same memory pool and type. If any allocation fails, all allocations already made within this function call are also freed, so that when returned result is not VK_SUCCESS, pAllocation array is always entirely filled with VK_NULL_HANDLE.

    @@ -2310,8 +2312,7 @@ Functions

    Frees memory and destroys multiple allocations.

    Word "pages" is just a suggestion to use this function to free pieces of memory used for sparse binding. It is just a general purpose function to free memory and destroy allocations made using e.g. vmaAllocateMemory(), vmaAllocateMemoryPages() and other functions. It may be internally optimized to be more efficient than calling vmaFreeMemory() allocationCount times.

    -

    Allocations in pAllocations array can come from any memory pools and types. Passing VK_NULL_HANDLE as elements of pAllocations array is valid. Such entries are just skipped.

    -

    TODO Also write tests for it.

    +

    Allocations in pAllocations array can come from any memory pools and types. Passing VK_NULL_HANDLE as elements of pAllocations array is valid. Such entries are just skipped.

    @@ -2660,6 +2661,50 @@ Functions

    This function fails when used on allocation made in memory type that is not HOST_VISIBLE.

    This function always fails when called for allocation that was created with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag. Such allocations cannot be mapped.

    + + + +

    ◆ vmaResizeAllocation()

    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + +
    VkResult vmaResizeAllocation (VmaAllocator allocator,
    VmaAllocation allocation,
    VkDeviceSize newSize 
    )
    +
    + +

    Tries to resize an allocation in place, if there is enough free memory after it.

    +

    Tries to change allocation's size without moving or reallocating it. You can both shrink and grow allocation size. When growing, it succeeds only when the allocation belongs to a memory block with enough free space after it.

    +

    Returns VK_SUCCESS if allocation's size has been successfully changed. Returns VK_ERROR_OUT_OF_POOL_MEMORY if allocation's size could not be changed.

    +

    After successful call to this function, VmaAllocationInfo::size of this allocation changes. All other parameters stay the same: memory pool and type, alignment, offset, mapped pointer.

    +
      +
    • Calling this function on allocation that is in lost state fails with result VK_ERROR_VALIDATION_FAILED_EXT.
    • +
    • Calling this function with newSize same as current allocation size does nothing and returns VK_SUCCESS.
    • +
    • Resizing dedicated allocations, as well as allocations created in pools that use linear or buddy algorithm, is not supported. The function returns VK_ERROR_FEATURE_NOT_PRESENT in such cases. Support may be added in the future.
    • +
    +
    diff --git a/docs/html/vk__mem__alloc_8h_source.html b/docs/html/vk__mem__alloc_8h_source.html index 2ce9ec3..8bf1edd 100644 --- a/docs/html/vk__mem__alloc_8h_source.html +++ b/docs/html/vk__mem__alloc_8h_source.html @@ -65,209 +65,211 @@ $(function() {
    vk_mem_alloc.h
    -Go to the documentation of this file.
    1 //
    2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
    3 //
    4 // Permission is hereby granted, free of charge, to any person obtaining a copy
    5 // of this software and associated documentation files (the "Software"), to deal
    6 // in the Software without restriction, including without limitation the rights
    7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    8 // copies of the Software, and to permit persons to whom the Software is
    9 // furnished to do so, subject to the following conditions:
    10 //
    11 // The above copyright notice and this permission notice shall be included in
    12 // all copies or substantial portions of the Software.
    13 //
    14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20 // THE SOFTWARE.
    21 //
    22 
    23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
    24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
    25 
    26 #ifdef __cplusplus
    27 extern "C" {
    28 #endif
    29 
    1618 /*
    1619 Define this macro to 0/1 to disable/enable support for recording functionality,
    1620 available through VmaAllocatorCreateInfo::pRecordSettings.
    1621 */
    1622 #ifndef VMA_RECORDING_ENABLED
    1623  #ifdef _WIN32
    1624  #define VMA_RECORDING_ENABLED 1
    1625  #else
    1626  #define VMA_RECORDING_ENABLED 0
    1627  #endif
    1628 #endif
    1629 
    1630 #ifndef NOMINMAX
    1631  #define NOMINMAX // For windows.h
    1632 #endif
    1633 
    1634 #ifndef VULKAN_H_
    1635  #include <vulkan/vulkan.h>
    1636 #endif
    1637 
    1638 #if VMA_RECORDING_ENABLED
    1639  #include <windows.h>
    1640 #endif
    1641 
    1642 #if !defined(VMA_DEDICATED_ALLOCATION)
    1643  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
    1644  #define VMA_DEDICATED_ALLOCATION 1
    1645  #else
    1646  #define VMA_DEDICATED_ALLOCATION 0
    1647  #endif
    1648 #endif
    1649 
    1659 VK_DEFINE_HANDLE(VmaAllocator)
    1660 
    1661 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
    1663  VmaAllocator allocator,
    1664  uint32_t memoryType,
    1665  VkDeviceMemory memory,
    1666  VkDeviceSize size);
    1668 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
    1669  VmaAllocator allocator,
    1670  uint32_t memoryType,
    1671  VkDeviceMemory memory,
    1672  VkDeviceSize size);
    1673 
    1687 
    1717 
    1720 typedef VkFlags VmaAllocatorCreateFlags;
    1721 
    1726 typedef struct VmaVulkanFunctions {
    1727  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
    1728  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
    1729  PFN_vkAllocateMemory vkAllocateMemory;
    1730  PFN_vkFreeMemory vkFreeMemory;
    1731  PFN_vkMapMemory vkMapMemory;
    1732  PFN_vkUnmapMemory vkUnmapMemory;
    1733  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
    1734  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
    1735  PFN_vkBindBufferMemory vkBindBufferMemory;
    1736  PFN_vkBindImageMemory vkBindImageMemory;
    1737  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
    1738  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
    1739  PFN_vkCreateBuffer vkCreateBuffer;
    1740  PFN_vkDestroyBuffer vkDestroyBuffer;
    1741  PFN_vkCreateImage vkCreateImage;
    1742  PFN_vkDestroyImage vkDestroyImage;
    1743  PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
    1744 #if VMA_DEDICATED_ALLOCATION
    1745  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
    1746  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
    1747 #endif
    1749 
    1751 typedef enum VmaRecordFlagBits {
    1758 
    1761 typedef VkFlags VmaRecordFlags;
    1762 
    1764 typedef struct VmaRecordSettings
    1765 {
    1775  const char* pFilePath;
    1777 
    1780 {
    1784 
    1785  VkPhysicalDevice physicalDevice;
    1787 
    1788  VkDevice device;
    1790 
    1793 
    1794  const VkAllocationCallbacks* pAllocationCallbacks;
    1796 
    1836  const VkDeviceSize* pHeapSizeLimit;
    1857 
    1859 VkResult vmaCreateAllocator(
    1860  const VmaAllocatorCreateInfo* pCreateInfo,
    1861  VmaAllocator* pAllocator);
    1862 
    1864 void vmaDestroyAllocator(
    1865  VmaAllocator allocator);
    1866 
    1872  VmaAllocator allocator,
    1873  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
    1874 
    1880  VmaAllocator allocator,
    1881  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
    1882 
    1890  VmaAllocator allocator,
    1891  uint32_t memoryTypeIndex,
    1892  VkMemoryPropertyFlags* pFlags);
    1893 
    1903  VmaAllocator allocator,
    1904  uint32_t frameIndex);
    1905 
    1908 typedef struct VmaStatInfo
    1909 {
    1911  uint32_t blockCount;
    1917  VkDeviceSize usedBytes;
    1919  VkDeviceSize unusedBytes;
    1922 } VmaStatInfo;
    1923 
    1925 typedef struct VmaStats
    1926 {
    1927  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
    1928  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
    1930 } VmaStats;
    1931 
    1933 void vmaCalculateStats(
    1934  VmaAllocator allocator,
    1935  VmaStats* pStats);
    1936 
    1937 #define VMA_STATS_STRING_ENABLED 1
    1938 
    1939 #if VMA_STATS_STRING_ENABLED
    1940 
    1942 
    1944 void vmaBuildStatsString(
    1945  VmaAllocator allocator,
    1946  char** ppStatsString,
    1947  VkBool32 detailedMap);
    1948 
    1949 void vmaFreeStatsString(
    1950  VmaAllocator allocator,
    1951  char* pStatsString);
    1952 
    1953 #endif // #if VMA_STATS_STRING_ENABLED
    1954 
    1963 VK_DEFINE_HANDLE(VmaPool)
    1964 
    1965 typedef enum VmaMemoryUsage
    1966 {
    2015 } VmaMemoryUsage;
    2016 
    2031 
    2086 
    2102 
    2112 
    2119 
    2123 
    2125 {
    2138  VkMemoryPropertyFlags requiredFlags;
    2143  VkMemoryPropertyFlags preferredFlags;
    2151  uint32_t memoryTypeBits;
    2164  void* pUserData;
    2166 
    2183 VkResult vmaFindMemoryTypeIndex(
    2184  VmaAllocator allocator,
    2185  uint32_t memoryTypeBits,
    2186  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2187  uint32_t* pMemoryTypeIndex);
    2188 
    2202  VmaAllocator allocator,
    2203  const VkBufferCreateInfo* pBufferCreateInfo,
    2204  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2205  uint32_t* pMemoryTypeIndex);
    2206 
    2220  VmaAllocator allocator,
    2221  const VkImageCreateInfo* pImageCreateInfo,
    2222  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2223  uint32_t* pMemoryTypeIndex);
    2224 
    2245 
    2262 
    2273 
    2279 
    2282 typedef VkFlags VmaPoolCreateFlags;
    2283 
    2286 typedef struct VmaPoolCreateInfo {
    2301  VkDeviceSize blockSize;
    2330 
    2333 typedef struct VmaPoolStats {
    2336  VkDeviceSize size;
    2339  VkDeviceSize unusedSize;
    2352  VkDeviceSize unusedRangeSizeMax;
    2355  size_t blockCount;
    2356 } VmaPoolStats;
    2357 
    2364 VkResult vmaCreatePool(
    2365  VmaAllocator allocator,
    2366  const VmaPoolCreateInfo* pCreateInfo,
    2367  VmaPool* pPool);
    2368 
    2371 void vmaDestroyPool(
    2372  VmaAllocator allocator,
    2373  VmaPool pool);
    2374 
    2381 void vmaGetPoolStats(
    2382  VmaAllocator allocator,
    2383  VmaPool pool,
    2384  VmaPoolStats* pPoolStats);
    2385 
    2393  VmaAllocator allocator,
    2394  VmaPool pool,
    2395  size_t* pLostAllocationCount);
    2396 
    2411 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
    2412 
    2437 VK_DEFINE_HANDLE(VmaAllocation)
    2438 
    2439 
    2441 typedef struct VmaAllocationInfo {
    2446  uint32_t memoryType;
    2455  VkDeviceMemory deviceMemory;
    2460  VkDeviceSize offset;
    2465  VkDeviceSize size;
    2479  void* pUserData;
    2481 
    2492 VkResult vmaAllocateMemory(
    2493  VmaAllocator allocator,
    2494  const VkMemoryRequirements* pVkMemoryRequirements,
    2495  const VmaAllocationCreateInfo* pCreateInfo,
    2496  VmaAllocation* pAllocation,
    2497  VmaAllocationInfo* pAllocationInfo);
    2498 
    2506  VmaAllocator allocator,
    2507  VkBuffer buffer,
    2508  const VmaAllocationCreateInfo* pCreateInfo,
    2509  VmaAllocation* pAllocation,
    2510  VmaAllocationInfo* pAllocationInfo);
    2511 
    2513 VkResult vmaAllocateMemoryForImage(
    2514  VmaAllocator allocator,
    2515  VkImage image,
    2516  const VmaAllocationCreateInfo* pCreateInfo,
    2517  VmaAllocation* pAllocation,
    2518  VmaAllocationInfo* pAllocationInfo);
    2519 
    2521 void vmaFreeMemory(
    2522  VmaAllocator allocator,
    2523  VmaAllocation allocation);
    2524 
    2545 VkResult vmaResizeAllocation(
    2546  VmaAllocator allocator,
    2547  VmaAllocation allocation,
    2548  VkDeviceSize newSize);
    2549 
    2567  VmaAllocator allocator,
    2568  VmaAllocation allocation,
    2569  VmaAllocationInfo* pAllocationInfo);
    2570 
    2585 VkBool32 vmaTouchAllocation(
    2586  VmaAllocator allocator,
    2587  VmaAllocation allocation);
    2588 
    2603  VmaAllocator allocator,
    2604  VmaAllocation allocation,
    2605  void* pUserData);
    2606 
    2618  VmaAllocator allocator,
    2619  VmaAllocation* pAllocation);
    2620 
    2655 VkResult vmaMapMemory(
    2656  VmaAllocator allocator,
    2657  VmaAllocation allocation,
    2658  void** ppData);
    2659 
    2664 void vmaUnmapMemory(
    2665  VmaAllocator allocator,
    2666  VmaAllocation allocation);
    2667 
    2680 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2681 
    2694 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2695 
    2712 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
    2713 
    2720 VK_DEFINE_HANDLE(VmaDefragmentationContext)
    2721 
    2722 typedef enum VmaDefragmentationFlagBits {
    2726 typedef VkFlags VmaDefragmentationFlags;
    2727 
    2732 typedef struct VmaDefragmentationInfo2 {
    2756  uint32_t poolCount;
    2777  VkDeviceSize maxCpuBytesToMove;
    2787  VkDeviceSize maxGpuBytesToMove;
    2801  VkCommandBuffer commandBuffer;
    2803 
    2808 typedef struct VmaDefragmentationInfo {
    2813  VkDeviceSize maxBytesToMove;
    2820 
    2822 typedef struct VmaDefragmentationStats {
    2824  VkDeviceSize bytesMoved;
    2826  VkDeviceSize bytesFreed;
    2832 
    2859 VkResult vmaDefragmentationBegin(
    2860  VmaAllocator allocator,
    2861  const VmaDefragmentationInfo2* pInfo,
    2862  VmaDefragmentationStats* pStats,
    2863  VmaDefragmentationContext *pContext);
    2864 
    2870 VkResult vmaDefragmentationEnd(
    2871  VmaAllocator allocator,
    2872  VmaDefragmentationContext context);
    2873 
    2914 VkResult vmaDefragment(
    2915  VmaAllocator allocator,
    2916  VmaAllocation* pAllocations,
    2917  size_t allocationCount,
    2918  VkBool32* pAllocationsChanged,
    2919  const VmaDefragmentationInfo *pDefragmentationInfo,
    2920  VmaDefragmentationStats* pDefragmentationStats);
    2921 
    2934 VkResult vmaBindBufferMemory(
    2935  VmaAllocator allocator,
    2936  VmaAllocation allocation,
    2937  VkBuffer buffer);
    2938 
    2951 VkResult vmaBindImageMemory(
    2952  VmaAllocator allocator,
    2953  VmaAllocation allocation,
    2954  VkImage image);
    2955 
    2982 VkResult vmaCreateBuffer(
    2983  VmaAllocator allocator,
    2984  const VkBufferCreateInfo* pBufferCreateInfo,
    2985  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2986  VkBuffer* pBuffer,
    2987  VmaAllocation* pAllocation,
    2988  VmaAllocationInfo* pAllocationInfo);
    2989 
    3001 void vmaDestroyBuffer(
    3002  VmaAllocator allocator,
    3003  VkBuffer buffer,
    3004  VmaAllocation allocation);
    3005 
    3007 VkResult vmaCreateImage(
    3008  VmaAllocator allocator,
    3009  const VkImageCreateInfo* pImageCreateInfo,
    3010  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    3011  VkImage* pImage,
    3012  VmaAllocation* pAllocation,
    3013  VmaAllocationInfo* pAllocationInfo);
    3014 
    3026 void vmaDestroyImage(
    3027  VmaAllocator allocator,
    3028  VkImage image,
    3029  VmaAllocation allocation);
    3030 
    3031 #ifdef __cplusplus
    3032 }
    3033 #endif
    3034 
    3035 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
    3036 
    3037 // For Visual Studio IntelliSense.
    3038 #if defined(__cplusplus) && defined(__INTELLISENSE__)
    3039 #define VMA_IMPLEMENTATION
    3040 #endif
    3041 
    3042 #ifdef VMA_IMPLEMENTATION
    3043 #undef VMA_IMPLEMENTATION
    3044 
    3045 #include <cstdint>
    3046 #include <cstdlib>
    3047 #include <cstring>
    3048 
    3049 /*******************************************************************************
    3050 CONFIGURATION SECTION
    3051 
    3052 Define some of these macros before each #include of this header or change them
    3053 here if you need other then default behavior depending on your environment.
    3054 */
    3055 
    3056 /*
    3057 Define this macro to 1 to make the library fetch pointers to Vulkan functions
    3058 internally, like:
    3059 
    3060  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    3061 
    3062 Define to 0 if you are going to provide you own pointers to Vulkan functions via
    3063 VmaAllocatorCreateInfo::pVulkanFunctions.
    3064 */
    3065 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
    3066 #define VMA_STATIC_VULKAN_FUNCTIONS 1
    3067 #endif
    3068 
    3069 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
    3070 //#define VMA_USE_STL_CONTAINERS 1
    3071 
    3072 /* Set this macro to 1 to make the library including and using STL containers:
    3073 std::pair, std::vector, std::list, std::unordered_map.
    3074 
    3075 Set it to 0 or undefined to make the library using its own implementation of
    3076 the containers.
    3077 */
    3078 #if VMA_USE_STL_CONTAINERS
    3079  #define VMA_USE_STL_VECTOR 1
    3080  #define VMA_USE_STL_UNORDERED_MAP 1
    3081  #define VMA_USE_STL_LIST 1
    3082 #endif
    3083 
    3084 #ifndef VMA_USE_STL_SHARED_MUTEX
    3085  // Minimum Visual Studio 2015 Update 2
    3086  #if defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918
    3087  #define VMA_USE_STL_SHARED_MUTEX 1
    3088  #endif
    3089 #endif
    3090 
    3091 #if VMA_USE_STL_VECTOR
    3092  #include <vector>
    3093 #endif
    3094 
    3095 #if VMA_USE_STL_UNORDERED_MAP
    3096  #include <unordered_map>
    3097 #endif
    3098 
    3099 #if VMA_USE_STL_LIST
    3100  #include <list>
    3101 #endif
    3102 
    3103 /*
    3104 Following headers are used in this CONFIGURATION section only, so feel free to
    3105 remove them if not needed.
    3106 */
    3107 #include <cassert> // for assert
    3108 #include <algorithm> // for min, max
    3109 #include <mutex>
    3110 #include <atomic> // for std::atomic
    3111 
    3112 #ifndef VMA_NULL
    3113  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
    3114  #define VMA_NULL nullptr
    3115 #endif
    3116 
    3117 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
    3118 #include <cstdlib>
    3119 void *aligned_alloc(size_t alignment, size_t size)
    3120 {
    3121  // alignment must be >= sizeof(void*)
    3122  if(alignment < sizeof(void*))
    3123  {
    3124  alignment = sizeof(void*);
    3125  }
    3126 
    3127  return memalign(alignment, size);
    3128 }
    3129 #elif defined(__APPLE__) || defined(__ANDROID__)
    3130 #include <cstdlib>
    3131 void *aligned_alloc(size_t alignment, size_t size)
    3132 {
    3133  // alignment must be >= sizeof(void*)
    3134  if(alignment < sizeof(void*))
    3135  {
    3136  alignment = sizeof(void*);
    3137  }
    3138 
    3139  void *pointer;
    3140  if(posix_memalign(&pointer, alignment, size) == 0)
    3141  return pointer;
    3142  return VMA_NULL;
    3143 }
    3144 #endif
    3145 
    3146 // If your compiler is not compatible with C++11 and definition of
    3147 // aligned_alloc() function is missing, uncommeting following line may help:
    3148 
    3149 //#include <malloc.h>
    3150 
    3151 // Normal assert to check for programmer's errors, especially in Debug configuration.
    3152 #ifndef VMA_ASSERT
    3153  #ifdef _DEBUG
    3154  #define VMA_ASSERT(expr) assert(expr)
    3155  #else
    3156  #define VMA_ASSERT(expr)
    3157  #endif
    3158 #endif
    3159 
    3160 // Assert that will be called very often, like inside data structures e.g. operator[].
    3161 // Making it non-empty can make program slow.
    3162 #ifndef VMA_HEAVY_ASSERT
    3163  #ifdef _DEBUG
    3164  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
    3165  #else
    3166  #define VMA_HEAVY_ASSERT(expr)
    3167  #endif
    3168 #endif
    3169 
    3170 #ifndef VMA_ALIGN_OF
    3171  #define VMA_ALIGN_OF(type) (__alignof(type))
    3172 #endif
    3173 
    3174 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
    3175  #if defined(_WIN32)
    3176  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
    3177  #else
    3178  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
    3179  #endif
    3180 #endif
    3181 
    3182 #ifndef VMA_SYSTEM_FREE
    3183  #if defined(_WIN32)
    3184  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
    3185  #else
    3186  #define VMA_SYSTEM_FREE(ptr) free(ptr)
    3187  #endif
    3188 #endif
    3189 
    3190 #ifndef VMA_MIN
    3191  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
    3192 #endif
    3193 
    3194 #ifndef VMA_MAX
    3195  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
    3196 #endif
    3197 
    3198 #ifndef VMA_SWAP
    3199  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
    3200 #endif
    3201 
    3202 #ifndef VMA_SORT
    3203  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
    3204 #endif
    3205 
    3206 #ifndef VMA_DEBUG_LOG
    3207  #define VMA_DEBUG_LOG(format, ...)
    3208  /*
    3209  #define VMA_DEBUG_LOG(format, ...) do { \
    3210  printf(format, __VA_ARGS__); \
    3211  printf("\n"); \
    3212  } while(false)
    3213  */
    3214 #endif
    3215 
    3216 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
    3217 #if VMA_STATS_STRING_ENABLED
    3218  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
    3219  {
    3220  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
    3221  }
    3222  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
    3223  {
    3224  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
    3225  }
    3226  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
    3227  {
    3228  snprintf(outStr, strLen, "%p", ptr);
    3229  }
    3230 #endif
    3231 
    3232 #ifndef VMA_MUTEX
    3233  class VmaMutex
    3234  {
    3235  public:
    3236  void Lock() { m_Mutex.lock(); }
    3237  void Unlock() { m_Mutex.unlock(); }
    3238  private:
    3239  std::mutex m_Mutex;
    3240  };
    3241  #define VMA_MUTEX VmaMutex
    3242 #endif
    3243 
    3244 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
    3245 #ifndef VMA_RW_MUTEX
    3246  #if VMA_USE_STL_SHARED_MUTEX
    3247  // Use std::shared_mutex from C++17.
    3248  #include <shared_mutex>
    3249  class VmaRWMutex
    3250  {
    3251  public:
    3252  void LockRead() { m_Mutex.lock_shared(); }
    3253  void UnlockRead() { m_Mutex.unlock_shared(); }
    3254  void LockWrite() { m_Mutex.lock(); }
    3255  void UnlockWrite() { m_Mutex.unlock(); }
    3256  private:
    3257  std::shared_mutex m_Mutex;
    3258  };
    3259  #define VMA_RW_MUTEX VmaRWMutex
    3260  #elif defined(_WIN32)
    3261  // Use SRWLOCK from WinAPI.
    3262  class VmaRWMutex
    3263  {
    3264  public:
    3265  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
    3266  void LockRead() { AcquireSRWLockShared(&m_Lock); }
    3267  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
    3268  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
    3269  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
    3270  private:
    3271  SRWLOCK m_Lock;
    3272  };
    3273  #define VMA_RW_MUTEX VmaRWMutex
    3274  #else
    3275  // Less efficient fallback: Use normal mutex.
    3276  class VmaRWMutex
    3277  {
    3278  public:
    3279  void LockRead() { m_Mutex.Lock(); }
    3280  void UnlockRead() { m_Mutex.Unlock(); }
    3281  void LockWrite() { m_Mutex.Lock(); }
    3282  void UnlockWrite() { m_Mutex.Unlock(); }
    3283  private:
    3284  VMA_MUTEX m_Mutex;
    3285  };
    3286  #define VMA_RW_MUTEX VmaRWMutex
    3287  #endif // #if VMA_USE_STL_SHARED_MUTEX
    3288 #endif // #ifndef VMA_RW_MUTEX
    3289 
    3290 /*
    3291 If providing your own implementation, you need to implement a subset of std::atomic:
    3292 
    3293 - Constructor(uint32_t desired)
    3294 - uint32_t load() const
    3295 - void store(uint32_t desired)
    3296 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
    3297 */
    3298 #ifndef VMA_ATOMIC_UINT32
    3299  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
    3300 #endif
    3301 
    3302 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
    3303 
    3307  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
    3308 #endif
    3309 
    3310 #ifndef VMA_DEBUG_ALIGNMENT
    3311 
    3315  #define VMA_DEBUG_ALIGNMENT (1)
    3316 #endif
    3317 
    3318 #ifndef VMA_DEBUG_MARGIN
    3319 
    3323  #define VMA_DEBUG_MARGIN (0)
    3324 #endif
    3325 
    3326 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
    3327 
    3331  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
    3332 #endif
    3333 
    3334 #ifndef VMA_DEBUG_DETECT_CORRUPTION
    3335 
    3340  #define VMA_DEBUG_DETECT_CORRUPTION (0)
    3341 #endif
    3342 
    3343 #ifndef VMA_DEBUG_GLOBAL_MUTEX
    3344 
    3348  #define VMA_DEBUG_GLOBAL_MUTEX (0)
    3349 #endif
    3350 
    3351 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
    3352 
    3356  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
    3357 #endif
    3358 
    3359 #ifndef VMA_SMALL_HEAP_MAX_SIZE
    3360  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
    3362 #endif
    3363 
    3364 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
    3365  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
    3367 #endif
    3368 
    3369 #ifndef VMA_CLASS_NO_COPY
    3370  #define VMA_CLASS_NO_COPY(className) \
    3371  private: \
    3372  className(const className&) = delete; \
    3373  className& operator=(const className&) = delete;
    3374 #endif
    3375 
    3376 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
    3377 
    3378 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
    3379 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
    3380 
    3381 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
    3382 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
    3383 
    3384 /*******************************************************************************
    3385 END OF CONFIGURATION
    3386 */
    3387 
    3388 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
    3389 
    3390 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
    3391  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
    3392 
    3393 // Returns number of bits set to 1 in (v).
    3394 static inline uint32_t VmaCountBitsSet(uint32_t v)
    3395 {
    3396  uint32_t c = v - ((v >> 1) & 0x55555555);
    3397  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
    3398  c = ((c >> 4) + c) & 0x0F0F0F0F;
    3399  c = ((c >> 8) + c) & 0x00FF00FF;
    3400  c = ((c >> 16) + c) & 0x0000FFFF;
    3401  return c;
    3402 }
    3403 
    3404 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
    3405 // Use types like uint32_t, uint64_t as T.
    3406 template <typename T>
    3407 static inline T VmaAlignUp(T val, T align)
    3408 {
    3409  return (val + align - 1) / align * align;
    3410 }
    3411 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
    3412 // Use types like uint32_t, uint64_t as T.
    3413 template <typename T>
    3414 static inline T VmaAlignDown(T val, T align)
    3415 {
    3416  return val / align * align;
    3417 }
    3418 
    3419 // Division with mathematical rounding to nearest number.
    3420 template <typename T>
    3421 static inline T VmaRoundDiv(T x, T y)
    3422 {
    3423  return (x + (y / (T)2)) / y;
    3424 }
    3425 
    3426 /*
    3427 Returns true if given number is a power of two.
    3428 T must be unsigned integer number or signed integer but always nonnegative.
    3429 For 0 returns true.
    3430 */
    3431 template <typename T>
    3432 inline bool VmaIsPow2(T x)
    3433 {
    3434  return (x & (x-1)) == 0;
    3435 }
    3436 
    3437 // Returns smallest power of 2 greater or equal to v.
    3438 static inline uint32_t VmaNextPow2(uint32_t v)
    3439 {
    3440  v--;
    3441  v |= v >> 1;
    3442  v |= v >> 2;
    3443  v |= v >> 4;
    3444  v |= v >> 8;
    3445  v |= v >> 16;
    3446  v++;
    3447  return v;
    3448 }
    3449 static inline uint64_t VmaNextPow2(uint64_t v)
    3450 {
    3451  v--;
    3452  v |= v >> 1;
    3453  v |= v >> 2;
    3454  v |= v >> 4;
    3455  v |= v >> 8;
    3456  v |= v >> 16;
    3457  v |= v >> 32;
    3458  v++;
    3459  return v;
    3460 }
    3461 
    3462 // Returns largest power of 2 less or equal to v.
    3463 static inline uint32_t VmaPrevPow2(uint32_t v)
    3464 {
    3465  v |= v >> 1;
    3466  v |= v >> 2;
    3467  v |= v >> 4;
    3468  v |= v >> 8;
    3469  v |= v >> 16;
    3470  v = v ^ (v >> 1);
    3471  return v;
    3472 }
    3473 static inline uint64_t VmaPrevPow2(uint64_t v)
    3474 {
    3475  v |= v >> 1;
    3476  v |= v >> 2;
    3477  v |= v >> 4;
    3478  v |= v >> 8;
    3479  v |= v >> 16;
    3480  v |= v >> 32;
    3481  v = v ^ (v >> 1);
    3482  return v;
    3483 }
    3484 
    3485 static inline bool VmaStrIsEmpty(const char* pStr)
    3486 {
    3487  return pStr == VMA_NULL || *pStr == '\0';
    3488 }
    3489 
    3490 static const char* VmaAlgorithmToStr(uint32_t algorithm)
    3491 {
    3492  switch(algorithm)
    3493  {
    3495  return "Linear";
    3497  return "Buddy";
    3498  case 0:
    3499  return "Default";
    3500  default:
    3501  VMA_ASSERT(0);
    3502  return "";
    3503  }
    3504 }
    3505 
    3506 #ifndef VMA_SORT
    3507 
    3508 template<typename Iterator, typename Compare>
    3509 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
    3510 {
    3511  Iterator centerValue = end; --centerValue;
    3512  Iterator insertIndex = beg;
    3513  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
    3514  {
    3515  if(cmp(*memTypeIndex, *centerValue))
    3516  {
    3517  if(insertIndex != memTypeIndex)
    3518  {
    3519  VMA_SWAP(*memTypeIndex, *insertIndex);
    3520  }
    3521  ++insertIndex;
    3522  }
    3523  }
    3524  if(insertIndex != centerValue)
    3525  {
    3526  VMA_SWAP(*insertIndex, *centerValue);
    3527  }
    3528  return insertIndex;
    3529 }
    3530 
    3531 template<typename Iterator, typename Compare>
    3532 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
    3533 {
    3534  if(beg < end)
    3535  {
    3536  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
    3537  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
    3538  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
    3539  }
    3540 }
    3541 
    3542 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
    3543 
    3544 #endif // #ifndef VMA_SORT
    3545 
    3546 /*
    3547 Returns true if two memory blocks occupy overlapping pages.
    3548 ResourceA must be in less memory offset than ResourceB.
    3549 
    3550 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
    3551 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
    3552 */
    3553 static inline bool VmaBlocksOnSamePage(
    3554  VkDeviceSize resourceAOffset,
    3555  VkDeviceSize resourceASize,
    3556  VkDeviceSize resourceBOffset,
    3557  VkDeviceSize pageSize)
    3558 {
    3559  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
    3560  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
    3561  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
    3562  VkDeviceSize resourceBStart = resourceBOffset;
    3563  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
    3564  return resourceAEndPage == resourceBStartPage;
    3565 }
    3566 
    3567 enum VmaSuballocationType
    3568 {
    3569  VMA_SUBALLOCATION_TYPE_FREE = 0,
    3570  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
    3571  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
    3572  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
    3573  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
    3574  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
    3575  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
    3576 };
    3577 
    3578 /*
    3579 Returns true if given suballocation types could conflict and must respect
    3580 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
    3581 or linear image and another one is optimal image. If type is unknown, behave
    3582 conservatively.
    3583 */
    3584 static inline bool VmaIsBufferImageGranularityConflict(
    3585  VmaSuballocationType suballocType1,
    3586  VmaSuballocationType suballocType2)
    3587 {
    3588  if(suballocType1 > suballocType2)
    3589  {
    3590  VMA_SWAP(suballocType1, suballocType2);
    3591  }
    3592 
    3593  switch(suballocType1)
    3594  {
    3595  case VMA_SUBALLOCATION_TYPE_FREE:
    3596  return false;
    3597  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
    3598  return true;
    3599  case VMA_SUBALLOCATION_TYPE_BUFFER:
    3600  return
    3601  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3602  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3603  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
    3604  return
    3605  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3606  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
    3607  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3608  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
    3609  return
    3610  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3611  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
    3612  return false;
    3613  default:
    3614  VMA_ASSERT(0);
    3615  return true;
    3616  }
    3617 }
    3618 
    3619 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
    3620 {
    3621  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
    3622  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3623  for(size_t i = 0; i < numberCount; ++i, ++pDst)
    3624  {
    3625  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
    3626  }
    3627 }
    3628 
    3629 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
    3630 {
    3631  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
    3632  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3633  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
    3634  {
    3635  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
    3636  {
    3637  return false;
    3638  }
    3639  }
    3640  return true;
    3641 }
    3642 
    3643 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
    3644 struct VmaMutexLock
    3645 {
    3646  VMA_CLASS_NO_COPY(VmaMutexLock)
    3647 public:
    3648  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
    3649  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3650  { if(m_pMutex) { m_pMutex->Lock(); } }
    3651  ~VmaMutexLock()
    3652  { if(m_pMutex) { m_pMutex->Unlock(); } }
    3653 private:
    3654  VMA_MUTEX* m_pMutex;
    3655 };
    3656 
    3657 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
    3658 struct VmaMutexLockRead
    3659 {
    3660  VMA_CLASS_NO_COPY(VmaMutexLockRead)
    3661 public:
    3662  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
    3663  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3664  { if(m_pMutex) { m_pMutex->LockRead(); } }
    3665  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
    3666 private:
    3667  VMA_RW_MUTEX* m_pMutex;
    3668 };
    3669 
    3670 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
    3671 struct VmaMutexLockWrite
    3672 {
    3673  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
    3674 public:
    3675  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
    3676  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3677  { if(m_pMutex) { m_pMutex->LockWrite(); } }
    3678  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
    3679 private:
    3680  VMA_RW_MUTEX* m_pMutex;
    3681 };
    3682 
    3683 #if VMA_DEBUG_GLOBAL_MUTEX
    3684  static VMA_MUTEX gDebugGlobalMutex;
    3685  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
    3686 #else
    3687  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
    3688 #endif
    3689 
    3690 // Minimum size of a free suballocation to register it in the free suballocation collection.
    3691 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
    3692 
    3693 /*
    3694 Performs binary search and returns iterator to first element that is greater or
    3695 equal to (key), according to comparison (cmp).
    3696 
    3697 Cmp should return true if first argument is less than second argument.
    3698 
    3699 Returned value is the found element, if present in the collection or place where
    3700 new element with value (key) should be inserted.
    3701 */
    3702 template <typename CmpLess, typename IterT, typename KeyT>
    3703 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
    3704 {
    3705  size_t down = 0, up = (end - beg);
    3706  while(down < up)
    3707  {
    3708  const size_t mid = (down + up) / 2;
    3709  if(cmp(*(beg+mid), key))
    3710  {
    3711  down = mid + 1;
    3712  }
    3713  else
    3714  {
    3715  up = mid;
    3716  }
    3717  }
    3718  return beg + down;
    3719 }
    3720 
    3721 /*
    3722 Returns true if all pointers in the array are not-null and unique.
    3723 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
    3724 T must be pointer type, e.g. VmaAllocation, VmaPool.
    3725 */
    3726 template<typename T>
    3727 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
    3728 {
    3729  for(uint32_t i = 0; i < count; ++i)
    3730  {
    3731  const T iPtr = arr[i];
    3732  if(iPtr == VMA_NULL)
    3733  {
    3734  return false;
    3735  }
    3736  for(uint32_t j = i + 1; j < count; ++j)
    3737  {
    3738  if(iPtr == arr[j])
    3739  {
    3740  return false;
    3741  }
    3742  }
    3743  }
    3744  return true;
    3745 }
    3746 
    3748 // Memory allocation
    3749 
    3750 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
    3751 {
    3752  if((pAllocationCallbacks != VMA_NULL) &&
    3753  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
    3754  {
    3755  return (*pAllocationCallbacks->pfnAllocation)(
    3756  pAllocationCallbacks->pUserData,
    3757  size,
    3758  alignment,
    3759  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    3760  }
    3761  else
    3762  {
    3763  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
    3764  }
    3765 }
    3766 
    3767 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
    3768 {
    3769  if((pAllocationCallbacks != VMA_NULL) &&
    3770  (pAllocationCallbacks->pfnFree != VMA_NULL))
    3771  {
    3772  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
    3773  }
    3774  else
    3775  {
    3776  VMA_SYSTEM_FREE(ptr);
    3777  }
    3778 }
    3779 
    3780 template<typename T>
    3781 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
    3782 {
    3783  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
    3784 }
    3785 
    3786 template<typename T>
    3787 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
    3788 {
    3789  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
    3790 }
    3791 
    3792 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
    3793 
    3794 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
    3795 
    3796 template<typename T>
    3797 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
    3798 {
    3799  ptr->~T();
    3800  VmaFree(pAllocationCallbacks, ptr);
    3801 }
    3802 
    3803 template<typename T>
    3804 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
    3805 {
    3806  if(ptr != VMA_NULL)
    3807  {
    3808  for(size_t i = count; i--; )
    3809  {
    3810  ptr[i].~T();
    3811  }
    3812  VmaFree(pAllocationCallbacks, ptr);
    3813  }
    3814 }
    3815 
    3816 // STL-compatible allocator.
    3817 template<typename T>
    3818 class VmaStlAllocator
    3819 {
    3820 public:
    3821  const VkAllocationCallbacks* const m_pCallbacks;
    3822  typedef T value_type;
    3823 
    3824  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
    3825  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
    3826 
    3827  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
    3828  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
    3829 
    3830  template<typename U>
    3831  bool operator==(const VmaStlAllocator<U>& rhs) const
    3832  {
    3833  return m_pCallbacks == rhs.m_pCallbacks;
    3834  }
    3835  template<typename U>
    3836  bool operator!=(const VmaStlAllocator<U>& rhs) const
    3837  {
    3838  return m_pCallbacks != rhs.m_pCallbacks;
    3839  }
    3840 
    3841  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
    3842 };
    3843 
    3844 #if VMA_USE_STL_VECTOR
    3845 
    3846 #define VmaVector std::vector
    3847 
    3848 template<typename T, typename allocatorT>
    3849 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
    3850 {
    3851  vec.insert(vec.begin() + index, item);
    3852 }
    3853 
    3854 template<typename T, typename allocatorT>
    3855 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
    3856 {
    3857  vec.erase(vec.begin() + index);
    3858 }
    3859 
    3860 #else // #if VMA_USE_STL_VECTOR
    3861 
    3862 /* Class with interface compatible with subset of std::vector.
    3863 T must be POD because constructors and destructors are not called and memcpy is
    3864 used for these objects. */
    3865 template<typename T, typename AllocatorT>
    3866 class VmaVector
    3867 {
    3868 public:
    3869  typedef T value_type;
    3870 
    3871  VmaVector(const AllocatorT& allocator) :
    3872  m_Allocator(allocator),
    3873  m_pArray(VMA_NULL),
    3874  m_Count(0),
    3875  m_Capacity(0)
    3876  {
    3877  }
    3878 
    3879  VmaVector(size_t count, const AllocatorT& allocator) :
    3880  m_Allocator(allocator),
    3881  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
    3882  m_Count(count),
    3883  m_Capacity(count)
    3884  {
    3885  }
    3886 
    3887  VmaVector(const VmaVector<T, AllocatorT>& src) :
    3888  m_Allocator(src.m_Allocator),
    3889  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
    3890  m_Count(src.m_Count),
    3891  m_Capacity(src.m_Count)
    3892  {
    3893  if(m_Count != 0)
    3894  {
    3895  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
    3896  }
    3897  }
    3898 
    3899  ~VmaVector()
    3900  {
    3901  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3902  }
    3903 
    3904  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
    3905  {
    3906  if(&rhs != this)
    3907  {
    3908  resize(rhs.m_Count);
    3909  if(m_Count != 0)
    3910  {
    3911  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
    3912  }
    3913  }
    3914  return *this;
    3915  }
    3916 
    3917  bool empty() const { return m_Count == 0; }
    3918  size_t size() const { return m_Count; }
    3919  T* data() { return m_pArray; }
    3920  const T* data() const { return m_pArray; }
    3921 
    3922  T& operator[](size_t index)
    3923  {
    3924  VMA_HEAVY_ASSERT(index < m_Count);
    3925  return m_pArray[index];
    3926  }
    3927  const T& operator[](size_t index) const
    3928  {
    3929  VMA_HEAVY_ASSERT(index < m_Count);
    3930  return m_pArray[index];
    3931  }
    3932 
    3933  T& front()
    3934  {
    3935  VMA_HEAVY_ASSERT(m_Count > 0);
    3936  return m_pArray[0];
    3937  }
    3938  const T& front() const
    3939  {
    3940  VMA_HEAVY_ASSERT(m_Count > 0);
    3941  return m_pArray[0];
    3942  }
    3943  T& back()
    3944  {
    3945  VMA_HEAVY_ASSERT(m_Count > 0);
    3946  return m_pArray[m_Count - 1];
    3947  }
    3948  const T& back() const
    3949  {
    3950  VMA_HEAVY_ASSERT(m_Count > 0);
    3951  return m_pArray[m_Count - 1];
    3952  }
    3953 
    3954  void reserve(size_t newCapacity, bool freeMemory = false)
    3955  {
    3956  newCapacity = VMA_MAX(newCapacity, m_Count);
    3957 
    3958  if((newCapacity < m_Capacity) && !freeMemory)
    3959  {
    3960  newCapacity = m_Capacity;
    3961  }
    3962 
    3963  if(newCapacity != m_Capacity)
    3964  {
    3965  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
    3966  if(m_Count != 0)
    3967  {
    3968  memcpy(newArray, m_pArray, m_Count * sizeof(T));
    3969  }
    3970  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3971  m_Capacity = newCapacity;
    3972  m_pArray = newArray;
    3973  }
    3974  }
    3975 
    3976  void resize(size_t newCount, bool freeMemory = false)
    3977  {
    3978  size_t newCapacity = m_Capacity;
    3979  if(newCount > m_Capacity)
    3980  {
    3981  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
    3982  }
    3983  else if(freeMemory)
    3984  {
    3985  newCapacity = newCount;
    3986  }
    3987 
    3988  if(newCapacity != m_Capacity)
    3989  {
    3990  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
    3991  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
    3992  if(elementsToCopy != 0)
    3993  {
    3994  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
    3995  }
    3996  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3997  m_Capacity = newCapacity;
    3998  m_pArray = newArray;
    3999  }
    4000 
    4001  m_Count = newCount;
    4002  }
    4003 
    4004  void clear(bool freeMemory = false)
    4005  {
    4006  resize(0, freeMemory);
    4007  }
    4008 
    4009  void insert(size_t index, const T& src)
    4010  {
    4011  VMA_HEAVY_ASSERT(index <= m_Count);
    4012  const size_t oldCount = size();
    4013  resize(oldCount + 1);
    4014  if(index < oldCount)
    4015  {
    4016  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
    4017  }
    4018  m_pArray[index] = src;
    4019  }
    4020 
    4021  void remove(size_t index)
    4022  {
    4023  VMA_HEAVY_ASSERT(index < m_Count);
    4024  const size_t oldCount = size();
    4025  if(index < oldCount - 1)
    4026  {
    4027  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
    4028  }
    4029  resize(oldCount - 1);
    4030  }
    4031 
    4032  void push_back(const T& src)
    4033  {
    4034  const size_t newIndex = size();
    4035  resize(newIndex + 1);
    4036  m_pArray[newIndex] = src;
    4037  }
    4038 
    4039  void pop_back()
    4040  {
    4041  VMA_HEAVY_ASSERT(m_Count > 0);
    4042  resize(size() - 1);
    4043  }
    4044 
    4045  void push_front(const T& src)
    4046  {
    4047  insert(0, src);
    4048  }
    4049 
    4050  void pop_front()
    4051  {
    4052  VMA_HEAVY_ASSERT(m_Count > 0);
    4053  remove(0);
    4054  }
    4055 
    4056  typedef T* iterator;
    4057 
    4058  iterator begin() { return m_pArray; }
    4059  iterator end() { return m_pArray + m_Count; }
    4060 
    4061 private:
    4062  AllocatorT m_Allocator;
    4063  T* m_pArray;
    4064  size_t m_Count;
    4065  size_t m_Capacity;
    4066 };
    4067 
    4068 template<typename T, typename allocatorT>
    4069 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
    4070 {
    4071  vec.insert(index, item);
    4072 }
    4073 
    4074 template<typename T, typename allocatorT>
    4075 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
    4076 {
    4077  vec.remove(index);
    4078 }
    4079 
    4080 #endif // #if VMA_USE_STL_VECTOR
    4081 
    4082 template<typename CmpLess, typename VectorT>
    4083 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
    4084 {
    4085  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    4086  vector.data(),
    4087  vector.data() + vector.size(),
    4088  value,
    4089  CmpLess()) - vector.data();
    4090  VmaVectorInsert(vector, indexToInsert, value);
    4091  return indexToInsert;
    4092 }
    4093 
    4094 template<typename CmpLess, typename VectorT>
    4095 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
    4096 {
    4097  CmpLess comparator;
    4098  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    4099  vector.begin(),
    4100  vector.end(),
    4101  value,
    4102  comparator);
    4103  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
    4104  {
    4105  size_t indexToRemove = it - vector.begin();
    4106  VmaVectorRemove(vector, indexToRemove);
    4107  return true;
    4108  }
    4109  return false;
    4110 }
    4111 
    4112 template<typename CmpLess, typename IterT, typename KeyT>
    4113 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
    4114 {
    4115  CmpLess comparator;
    4116  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
    4117  beg, end, value, comparator);
    4118  if(it == end ||
    4119  (!comparator(*it, value) && !comparator(value, *it)))
    4120  {
    4121  return it;
    4122  }
    4123  return end;
    4124 }
    4125 
    4127 // class VmaPoolAllocator
    4128 
    4129 /*
    4130 Allocator for objects of type T using a list of arrays (pools) to speed up
    4131 allocation. Number of elements that can be allocated is not bounded because
    4132 allocator can create multiple blocks.
    4133 */
    4134 template<typename T>
    4135 class VmaPoolAllocator
    4136 {
    4137  VMA_CLASS_NO_COPY(VmaPoolAllocator)
    4138 public:
    4139  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
    4140  ~VmaPoolAllocator();
    4141  void Clear();
    4142  T* Alloc();
    4143  void Free(T* ptr);
    4144 
    4145 private:
    4146  union Item
    4147  {
    4148  uint32_t NextFreeIndex;
    4149  T Value;
    4150  };
    4151 
    4152  struct ItemBlock
    4153  {
    4154  Item* pItems;
    4155  uint32_t FirstFreeIndex;
    4156  };
    4157 
    4158  const VkAllocationCallbacks* m_pAllocationCallbacks;
    4159  size_t m_ItemsPerBlock;
    4160  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
    4161 
    4162  ItemBlock& CreateNewBlock();
    4163 };
    4164 
    4165 template<typename T>
    4166 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
    4167  m_pAllocationCallbacks(pAllocationCallbacks),
    4168  m_ItemsPerBlock(itemsPerBlock),
    4169  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
    4170 {
    4171  VMA_ASSERT(itemsPerBlock > 0);
    4172 }
    4173 
    4174 template<typename T>
    4175 VmaPoolAllocator<T>::~VmaPoolAllocator()
    4176 {
    4177  Clear();
    4178 }
    4179 
    4180 template<typename T>
    4181 void VmaPoolAllocator<T>::Clear()
    4182 {
    4183  for(size_t i = m_ItemBlocks.size(); i--; )
    4184  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
    4185  m_ItemBlocks.clear();
    4186 }
    4187 
    4188 template<typename T>
    4189 T* VmaPoolAllocator<T>::Alloc()
    4190 {
    4191  for(size_t i = m_ItemBlocks.size(); i--; )
    4192  {
    4193  ItemBlock& block = m_ItemBlocks[i];
    4194  // This block has some free items: Use first one.
    4195  if(block.FirstFreeIndex != UINT32_MAX)
    4196  {
    4197  Item* const pItem = &block.pItems[block.FirstFreeIndex];
    4198  block.FirstFreeIndex = pItem->NextFreeIndex;
    4199  return &pItem->Value;
    4200  }
    4201  }
    4202 
    4203  // No block has free item: Create new one and use it.
    4204  ItemBlock& newBlock = CreateNewBlock();
    4205  Item* const pItem = &newBlock.pItems[0];
    4206  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
    4207  return &pItem->Value;
    4208 }
    4209 
    4210 template<typename T>
    4211 void VmaPoolAllocator<T>::Free(T* ptr)
    4212 {
    4213  // Search all memory blocks to find ptr.
    4214  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
    4215  {
    4216  ItemBlock& block = m_ItemBlocks[i];
    4217 
    4218  // Casting to union.
    4219  Item* pItemPtr;
    4220  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
    4221 
    4222  // Check if pItemPtr is in address range of this block.
    4223  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
    4224  {
    4225  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
    4226  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
    4227  block.FirstFreeIndex = index;
    4228  return;
    4229  }
    4230  }
    4231  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
    4232 }
    4233 
    4234 template<typename T>
    4235 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
    4236 {
    4237  ItemBlock newBlock = {
    4238  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
    4239 
    4240  m_ItemBlocks.push_back(newBlock);
    4241 
    4242  // Setup singly-linked list of all free items in this block.
    4243  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
    4244  newBlock.pItems[i].NextFreeIndex = i + 1;
    4245  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
    4246  return m_ItemBlocks.back();
    4247 }
    4248 
    4250 // class VmaRawList, VmaList
    4251 
    4252 #if VMA_USE_STL_LIST
    4253 
    4254 #define VmaList std::list
    4255 
    4256 #else // #if VMA_USE_STL_LIST
    4257 
    4258 template<typename T>
    4259 struct VmaListItem
    4260 {
    4261  VmaListItem* pPrev;
    4262  VmaListItem* pNext;
    4263  T Value;
    4264 };
    4265 
    4266 // Doubly linked list.
    4267 template<typename T>
    4268 class VmaRawList
    4269 {
    4270  VMA_CLASS_NO_COPY(VmaRawList)
    4271 public:
    4272  typedef VmaListItem<T> ItemType;
    4273 
    4274  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
    4275  ~VmaRawList();
    4276  void Clear();
    4277 
    4278  size_t GetCount() const { return m_Count; }
    4279  bool IsEmpty() const { return m_Count == 0; }
    4280 
    4281  ItemType* Front() { return m_pFront; }
    4282  const ItemType* Front() const { return m_pFront; }
    4283  ItemType* Back() { return m_pBack; }
    4284  const ItemType* Back() const { return m_pBack; }
    4285 
    4286  ItemType* PushBack();
    4287  ItemType* PushFront();
    4288  ItemType* PushBack(const T& value);
    4289  ItemType* PushFront(const T& value);
    4290  void PopBack();
    4291  void PopFront();
    4292 
    4293  // Item can be null - it means PushBack.
    4294  ItemType* InsertBefore(ItemType* pItem);
    4295  // Item can be null - it means PushFront.
    4296  ItemType* InsertAfter(ItemType* pItem);
    4297 
    4298  ItemType* InsertBefore(ItemType* pItem, const T& value);
    4299  ItemType* InsertAfter(ItemType* pItem, const T& value);
    4300 
    4301  void Remove(ItemType* pItem);
    4302 
    4303 private:
    4304  const VkAllocationCallbacks* const m_pAllocationCallbacks;
    4305  VmaPoolAllocator<ItemType> m_ItemAllocator;
    4306  ItemType* m_pFront;
    4307  ItemType* m_pBack;
    4308  size_t m_Count;
    4309 };
    4310 
    4311 template<typename T>
    4312 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
    4313  m_pAllocationCallbacks(pAllocationCallbacks),
    4314  m_ItemAllocator(pAllocationCallbacks, 128),
    4315  m_pFront(VMA_NULL),
    4316  m_pBack(VMA_NULL),
    4317  m_Count(0)
    4318 {
    4319 }
    4320 
    4321 template<typename T>
    4322 VmaRawList<T>::~VmaRawList()
    4323 {
    4324  // Intentionally not calling Clear, because that would be unnecessary
    4325  // computations to return all items to m_ItemAllocator as free.
    4326 }
    4327 
    4328 template<typename T>
    4329 void VmaRawList<T>::Clear()
    4330 {
    4331  if(IsEmpty() == false)
    4332  {
    4333  ItemType* pItem = m_pBack;
    4334  while(pItem != VMA_NULL)
    4335  {
    4336  ItemType* const pPrevItem = pItem->pPrev;
    4337  m_ItemAllocator.Free(pItem);
    4338  pItem = pPrevItem;
    4339  }
    4340  m_pFront = VMA_NULL;
    4341  m_pBack = VMA_NULL;
    4342  m_Count = 0;
    4343  }
    4344 }
    4345 
    4346 template<typename T>
    4347 VmaListItem<T>* VmaRawList<T>::PushBack()
    4348 {
    4349  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    4350  pNewItem->pNext = VMA_NULL;
    4351  if(IsEmpty())
    4352  {
    4353  pNewItem->pPrev = VMA_NULL;
    4354  m_pFront = pNewItem;
    4355  m_pBack = pNewItem;
    4356  m_Count = 1;
    4357  }
    4358  else
    4359  {
    4360  pNewItem->pPrev = m_pBack;
    4361  m_pBack->pNext = pNewItem;
    4362  m_pBack = pNewItem;
    4363  ++m_Count;
    4364  }
    4365  return pNewItem;
    4366 }
    4367 
    4368 template<typename T>
    4369 VmaListItem<T>* VmaRawList<T>::PushFront()
    4370 {
    4371  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    4372  pNewItem->pPrev = VMA_NULL;
    4373  if(IsEmpty())
    4374  {
    4375  pNewItem->pNext = VMA_NULL;
    4376  m_pFront = pNewItem;
    4377  m_pBack = pNewItem;
    4378  m_Count = 1;
    4379  }
    4380  else
    4381  {
    4382  pNewItem->pNext = m_pFront;
    4383  m_pFront->pPrev = pNewItem;
    4384  m_pFront = pNewItem;
    4385  ++m_Count;
    4386  }
    4387  return pNewItem;
    4388 }
    4389 
    4390 template<typename T>
    4391 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
    4392 {
    4393  ItemType* const pNewItem = PushBack();
    4394  pNewItem->Value = value;
    4395  return pNewItem;
    4396 }
    4397 
    4398 template<typename T>
    4399 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
    4400 {
    4401  ItemType* const pNewItem = PushFront();
    4402  pNewItem->Value = value;
    4403  return pNewItem;
    4404 }
    4405 
    4406 template<typename T>
    4407 void VmaRawList<T>::PopBack()
    4408 {
    4409  VMA_HEAVY_ASSERT(m_Count > 0);
    4410  ItemType* const pBackItem = m_pBack;
    4411  ItemType* const pPrevItem = pBackItem->pPrev;
    4412  if(pPrevItem != VMA_NULL)
    4413  {
    4414  pPrevItem->pNext = VMA_NULL;
    4415  }
    4416  m_pBack = pPrevItem;
    4417  m_ItemAllocator.Free(pBackItem);
    4418  --m_Count;
    4419 }
    4420 
    4421 template<typename T>
    4422 void VmaRawList<T>::PopFront()
    4423 {
    4424  VMA_HEAVY_ASSERT(m_Count > 0);
    4425  ItemType* const pFrontItem = m_pFront;
    4426  ItemType* const pNextItem = pFrontItem->pNext;
    4427  if(pNextItem != VMA_NULL)
    4428  {
    4429  pNextItem->pPrev = VMA_NULL;
    4430  }
    4431  m_pFront = pNextItem;
    4432  m_ItemAllocator.Free(pFrontItem);
    4433  --m_Count;
    4434 }
    4435 
    4436 template<typename T>
    4437 void VmaRawList<T>::Remove(ItemType* pItem)
    4438 {
    4439  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
    4440  VMA_HEAVY_ASSERT(m_Count > 0);
    4441 
    4442  if(pItem->pPrev != VMA_NULL)
    4443  {
    4444  pItem->pPrev->pNext = pItem->pNext;
    4445  }
    4446  else
    4447  {
    4448  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4449  m_pFront = pItem->pNext;
    4450  }
    4451 
    4452  if(pItem->pNext != VMA_NULL)
    4453  {
    4454  pItem->pNext->pPrev = pItem->pPrev;
    4455  }
    4456  else
    4457  {
    4458  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4459  m_pBack = pItem->pPrev;
    4460  }
    4461 
    4462  m_ItemAllocator.Free(pItem);
    4463  --m_Count;
    4464 }
    4465 
    4466 template<typename T>
    4467 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
    4468 {
    4469  if(pItem != VMA_NULL)
    4470  {
    4471  ItemType* const prevItem = pItem->pPrev;
    4472  ItemType* const newItem = m_ItemAllocator.Alloc();
    4473  newItem->pPrev = prevItem;
    4474  newItem->pNext = pItem;
    4475  pItem->pPrev = newItem;
    4476  if(prevItem != VMA_NULL)
    4477  {
    4478  prevItem->pNext = newItem;
    4479  }
    4480  else
    4481  {
    4482  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4483  m_pFront = newItem;
    4484  }
    4485  ++m_Count;
    4486  return newItem;
    4487  }
    4488  else
    4489  return PushBack();
    4490 }
    4491 
    4492 template<typename T>
    4493 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
    4494 {
    4495  if(pItem != VMA_NULL)
    4496  {
    4497  ItemType* const nextItem = pItem->pNext;
    4498  ItemType* const newItem = m_ItemAllocator.Alloc();
    4499  newItem->pNext = nextItem;
    4500  newItem->pPrev = pItem;
    4501  pItem->pNext = newItem;
    4502  if(nextItem != VMA_NULL)
    4503  {
    4504  nextItem->pPrev = newItem;
    4505  }
    4506  else
    4507  {
    4508  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4509  m_pBack = newItem;
    4510  }
    4511  ++m_Count;
    4512  return newItem;
    4513  }
    4514  else
    4515  return PushFront();
    4516 }
    4517 
    4518 template<typename T>
    4519 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
    4520 {
    4521  ItemType* const newItem = InsertBefore(pItem);
    4522  newItem->Value = value;
    4523  return newItem;
    4524 }
    4525 
    4526 template<typename T>
    4527 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
    4528 {
    4529  ItemType* const newItem = InsertAfter(pItem);
    4530  newItem->Value = value;
    4531  return newItem;
    4532 }
    4533 
    4534 template<typename T, typename AllocatorT>
    4535 class VmaList
    4536 {
    4537  VMA_CLASS_NO_COPY(VmaList)
    4538 public:
    4539  class iterator
    4540  {
    4541  public:
    4542  iterator() :
    4543  m_pList(VMA_NULL),
    4544  m_pItem(VMA_NULL)
    4545  {
    4546  }
    4547 
    4548  T& operator*() const
    4549  {
    4550  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4551  return m_pItem->Value;
    4552  }
    4553  T* operator->() const
    4554  {
    4555  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4556  return &m_pItem->Value;
    4557  }
    4558 
    4559  iterator& operator++()
    4560  {
    4561  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4562  m_pItem = m_pItem->pNext;
    4563  return *this;
    4564  }
    4565  iterator& operator--()
    4566  {
    4567  if(m_pItem != VMA_NULL)
    4568  {
    4569  m_pItem = m_pItem->pPrev;
    4570  }
    4571  else
    4572  {
    4573  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4574  m_pItem = m_pList->Back();
    4575  }
    4576  return *this;
    4577  }
    4578 
    4579  iterator operator++(int)
    4580  {
    4581  iterator result = *this;
    4582  ++*this;
    4583  return result;
    4584  }
    4585  iterator operator--(int)
    4586  {
    4587  iterator result = *this;
    4588  --*this;
    4589  return result;
    4590  }
    4591 
    4592  bool operator==(const iterator& rhs) const
    4593  {
    4594  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4595  return m_pItem == rhs.m_pItem;
    4596  }
    4597  bool operator!=(const iterator& rhs) const
    4598  {
    4599  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4600  return m_pItem != rhs.m_pItem;
    4601  }
    4602 
    4603  private:
    4604  VmaRawList<T>* m_pList;
    4605  VmaListItem<T>* m_pItem;
    4606 
    4607  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
    4608  m_pList(pList),
    4609  m_pItem(pItem)
    4610  {
    4611  }
    4612 
    4613  friend class VmaList<T, AllocatorT>;
    4614  };
    4615 
    4616  class const_iterator
    4617  {
    4618  public:
    4619  const_iterator() :
    4620  m_pList(VMA_NULL),
    4621  m_pItem(VMA_NULL)
    4622  {
    4623  }
    4624 
    4625  const_iterator(const iterator& src) :
    4626  m_pList(src.m_pList),
    4627  m_pItem(src.m_pItem)
    4628  {
    4629  }
    4630 
    4631  const T& operator*() const
    4632  {
    4633  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4634  return m_pItem->Value;
    4635  }
    4636  const T* operator->() const
    4637  {
    4638  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4639  return &m_pItem->Value;
    4640  }
    4641 
    4642  const_iterator& operator++()
    4643  {
    4644  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4645  m_pItem = m_pItem->pNext;
    4646  return *this;
    4647  }
    4648  const_iterator& operator--()
    4649  {
    4650  if(m_pItem != VMA_NULL)
    4651  {
    4652  m_pItem = m_pItem->pPrev;
    4653  }
    4654  else
    4655  {
    4656  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4657  m_pItem = m_pList->Back();
    4658  }
    4659  return *this;
    4660  }
    4661 
    4662  const_iterator operator++(int)
    4663  {
    4664  const_iterator result = *this;
    4665  ++*this;
    4666  return result;
    4667  }
    4668  const_iterator operator--(int)
    4669  {
    4670  const_iterator result = *this;
    4671  --*this;
    4672  return result;
    4673  }
    4674 
    4675  bool operator==(const const_iterator& rhs) const
    4676  {
    4677  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4678  return m_pItem == rhs.m_pItem;
    4679  }
    4680  bool operator!=(const const_iterator& rhs) const
    4681  {
    4682  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4683  return m_pItem != rhs.m_pItem;
    4684  }
    4685 
    4686  private:
    4687  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
    4688  m_pList(pList),
    4689  m_pItem(pItem)
    4690  {
    4691  }
    4692 
    4693  const VmaRawList<T>* m_pList;
    4694  const VmaListItem<T>* m_pItem;
    4695 
    4696  friend class VmaList<T, AllocatorT>;
    4697  };
    4698 
    4699  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
    4700 
    4701  bool empty() const { return m_RawList.IsEmpty(); }
    4702  size_t size() const { return m_RawList.GetCount(); }
    4703 
    4704  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
    4705  iterator end() { return iterator(&m_RawList, VMA_NULL); }
    4706 
    4707  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
    4708  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
    4709 
    4710  void clear() { m_RawList.Clear(); }
    4711  void push_back(const T& value) { m_RawList.PushBack(value); }
    4712  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
    4713  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
    4714 
    4715 private:
    4716  VmaRawList<T> m_RawList;
    4717 };
    4718 
    4719 #endif // #if VMA_USE_STL_LIST
    4720 
    4722 // class VmaMap
    4723 
    4724 // Unused in this version.
    4725 #if 0
    4726 
    4727 #if VMA_USE_STL_UNORDERED_MAP
    4728 
    4729 #define VmaPair std::pair
    4730 
    4731 #define VMA_MAP_TYPE(KeyT, ValueT) \
    4732  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
    4733 
    4734 #else // #if VMA_USE_STL_UNORDERED_MAP
    4735 
    4736 template<typename T1, typename T2>
    4737 struct VmaPair
    4738 {
    4739  T1 first;
    4740  T2 second;
    4741 
    4742  VmaPair() : first(), second() { }
    4743  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
    4744 };
    4745 
    4746 /* Class compatible with subset of interface of std::unordered_map.
    4747 KeyT, ValueT must be POD because they will be stored in VmaVector.
    4748 */
    4749 template<typename KeyT, typename ValueT>
    4750 class VmaMap
    4751 {
    4752 public:
    4753  typedef VmaPair<KeyT, ValueT> PairType;
    4754  typedef PairType* iterator;
    4755 
    4756  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
    4757 
    4758  iterator begin() { return m_Vector.begin(); }
    4759  iterator end() { return m_Vector.end(); }
    4760 
    4761  void insert(const PairType& pair);
    4762  iterator find(const KeyT& key);
    4763  void erase(iterator it);
    4764 
    4765 private:
    4766  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
    4767 };
    4768 
    4769 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
    4770 
    4771 template<typename FirstT, typename SecondT>
    4772 struct VmaPairFirstLess
    4773 {
    4774  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
    4775  {
    4776  return lhs.first < rhs.first;
    4777  }
    4778  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
    4779  {
    4780  return lhs.first < rhsFirst;
    4781  }
    4782 };
    4783 
    4784 template<typename KeyT, typename ValueT>
    4785 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
    4786 {
    4787  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    4788  m_Vector.data(),
    4789  m_Vector.data() + m_Vector.size(),
    4790  pair,
    4791  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
    4792  VmaVectorInsert(m_Vector, indexToInsert, pair);
    4793 }
    4794 
    4795 template<typename KeyT, typename ValueT>
    4796 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
    4797 {
    4798  PairType* it = VmaBinaryFindFirstNotLess(
    4799  m_Vector.data(),
    4800  m_Vector.data() + m_Vector.size(),
    4801  key,
    4802  VmaPairFirstLess<KeyT, ValueT>());
    4803  if((it != m_Vector.end()) && (it->first == key))
    4804  {
    4805  return it;
    4806  }
    4807  else
    4808  {
    4809  return m_Vector.end();
    4810  }
    4811 }
    4812 
    4813 template<typename KeyT, typename ValueT>
    4814 void VmaMap<KeyT, ValueT>::erase(iterator it)
    4815 {
    4816  VmaVectorRemove(m_Vector, it - m_Vector.begin());
    4817 }
    4818 
    4819 #endif // #if VMA_USE_STL_UNORDERED_MAP
    4820 
    4821 #endif // #if 0
    4822 
    4824 
    4825 class VmaDeviceMemoryBlock;
    4826 
    4827 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
    4828 
    4829 struct VmaAllocation_T
    4830 {
    4831  VMA_CLASS_NO_COPY(VmaAllocation_T)
    4832 private:
    4833  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
    4834 
    4835  enum FLAGS
    4836  {
    4837  FLAG_USER_DATA_STRING = 0x01,
    4838  };
    4839 
    4840 public:
    4841  enum ALLOCATION_TYPE
    4842  {
    4843  ALLOCATION_TYPE_NONE,
    4844  ALLOCATION_TYPE_BLOCK,
    4845  ALLOCATION_TYPE_DEDICATED,
    4846  };
    4847 
    4848  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
    4849  m_Alignment(1),
    4850  m_Size(0),
    4851  m_pUserData(VMA_NULL),
    4852  m_LastUseFrameIndex(currentFrameIndex),
    4853  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
    4854  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
    4855  m_MapCount(0),
    4856  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
    4857  {
    4858 #if VMA_STATS_STRING_ENABLED
    4859  m_CreationFrameIndex = currentFrameIndex;
    4860  m_BufferImageUsage = 0;
    4861 #endif
    4862  }
    4863 
    4864  ~VmaAllocation_T()
    4865  {
    4866  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
    4867 
    4868  // Check if owned string was freed.
    4869  VMA_ASSERT(m_pUserData == VMA_NULL);
    4870  }
    4871 
    4872  void InitBlockAllocation(
    4873  VmaPool hPool,
    4874  VmaDeviceMemoryBlock* block,
    4875  VkDeviceSize offset,
    4876  VkDeviceSize alignment,
    4877  VkDeviceSize size,
    4878  VmaSuballocationType suballocationType,
    4879  bool mapped,
    4880  bool canBecomeLost)
    4881  {
    4882  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4883  VMA_ASSERT(block != VMA_NULL);
    4884  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4885  m_Alignment = alignment;
    4886  m_Size = size;
    4887  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4888  m_SuballocationType = (uint8_t)suballocationType;
    4889  m_BlockAllocation.m_hPool = hPool;
    4890  m_BlockAllocation.m_Block = block;
    4891  m_BlockAllocation.m_Offset = offset;
    4892  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
    4893  }
    4894 
    4895  void InitLost()
    4896  {
    4897  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4898  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
    4899  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4900  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
    4901  m_BlockAllocation.m_Block = VMA_NULL;
    4902  m_BlockAllocation.m_Offset = 0;
    4903  m_BlockAllocation.m_CanBecomeLost = true;
    4904  }
    4905 
    4906  void ChangeBlockAllocation(
    4907  VmaAllocator hAllocator,
    4908  VmaDeviceMemoryBlock* block,
    4909  VkDeviceSize offset);
    4910 
    4911  void ChangeSize(VkDeviceSize newSize);
    4912  void ChangeOffset(VkDeviceSize newOffset);
    4913 
    4914  // pMappedData not null means allocation is created with MAPPED flag.
    4915  void InitDedicatedAllocation(
    4916  uint32_t memoryTypeIndex,
    4917  VkDeviceMemory hMemory,
    4918  VmaSuballocationType suballocationType,
    4919  void* pMappedData,
    4920  VkDeviceSize size)
    4921  {
    4922  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4923  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
    4924  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
    4925  m_Alignment = 0;
    4926  m_Size = size;
    4927  m_SuballocationType = (uint8_t)suballocationType;
    4928  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4929  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
    4930  m_DedicatedAllocation.m_hMemory = hMemory;
    4931  m_DedicatedAllocation.m_pMappedData = pMappedData;
    4932  }
    4933 
    4934  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
    4935  VkDeviceSize GetAlignment() const { return m_Alignment; }
    4936  VkDeviceSize GetSize() const { return m_Size; }
    4937  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
    4938  void* GetUserData() const { return m_pUserData; }
    4939  void SetUserData(VmaAllocator hAllocator, void* pUserData);
    4940  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
    4941 
    4942  VmaDeviceMemoryBlock* GetBlock() const
    4943  {
    4944  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    4945  return m_BlockAllocation.m_Block;
    4946  }
    4947  VkDeviceSize GetOffset() const;
    4948  VkDeviceMemory GetMemory() const;
    4949  uint32_t GetMemoryTypeIndex() const;
    4950  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
    4951  void* GetMappedData() const;
    4952  bool CanBecomeLost() const;
    4953  VmaPool GetPool() const;
    4954 
    4955  uint32_t GetLastUseFrameIndex() const
    4956  {
    4957  return m_LastUseFrameIndex.load();
    4958  }
    4959  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
    4960  {
    4961  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
    4962  }
    4963  /*
    4964  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
    4965  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
    4966  - Else, returns false.
    4967 
    4968  If hAllocation is already lost, assert - you should not call it then.
    4969  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
    4970  */
    4971  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4972 
    4973  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
    4974  {
    4975  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    4976  outInfo.blockCount = 1;
    4977  outInfo.allocationCount = 1;
    4978  outInfo.unusedRangeCount = 0;
    4979  outInfo.usedBytes = m_Size;
    4980  outInfo.unusedBytes = 0;
    4981  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
    4982  outInfo.unusedRangeSizeMin = UINT64_MAX;
    4983  outInfo.unusedRangeSizeMax = 0;
    4984  }
    4985 
    4986  void BlockAllocMap();
    4987  void BlockAllocUnmap();
    4988  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
    4989  void DedicatedAllocUnmap(VmaAllocator hAllocator);
    4990 
    4991 #if VMA_STATS_STRING_ENABLED
    4992  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
    4993  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
    4994 
    4995  void InitBufferImageUsage(uint32_t bufferImageUsage)
    4996  {
    4997  VMA_ASSERT(m_BufferImageUsage == 0);
    4998  m_BufferImageUsage = bufferImageUsage;
    4999  }
    5000 
    5001  void PrintParameters(class VmaJsonWriter& json) const;
    5002 #endif
    5003 
    5004 private:
    5005  VkDeviceSize m_Alignment;
    5006  VkDeviceSize m_Size;
    5007  void* m_pUserData;
    5008  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
    5009  uint8_t m_Type; // ALLOCATION_TYPE
    5010  uint8_t m_SuballocationType; // VmaSuballocationType
    5011  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
    5012  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
    5013  uint8_t m_MapCount;
    5014  uint8_t m_Flags; // enum FLAGS
    5015 
    5016  // Allocation out of VmaDeviceMemoryBlock.
    5017  struct BlockAllocation
    5018  {
    5019  VmaPool m_hPool; // Null if belongs to general memory.
    5020  VmaDeviceMemoryBlock* m_Block;
    5021  VkDeviceSize m_Offset;
    5022  bool m_CanBecomeLost;
    5023  };
    5024 
    5025  // Allocation for an object that has its own private VkDeviceMemory.
    5026  struct DedicatedAllocation
    5027  {
    5028  uint32_t m_MemoryTypeIndex;
    5029  VkDeviceMemory m_hMemory;
    5030  void* m_pMappedData; // Not null means memory is mapped.
    5031  };
    5032 
    5033  union
    5034  {
    5035  // Allocation out of VmaDeviceMemoryBlock.
    5036  BlockAllocation m_BlockAllocation;
    5037  // Allocation for an object that has its own private VkDeviceMemory.
    5038  DedicatedAllocation m_DedicatedAllocation;
    5039  };
    5040 
    5041 #if VMA_STATS_STRING_ENABLED
    5042  uint32_t m_CreationFrameIndex;
    5043  uint32_t m_BufferImageUsage; // 0 if unknown.
    5044 #endif
    5045 
    5046  void FreeUserDataString(VmaAllocator hAllocator);
    5047 };
    5048 
    5049 /*
    5050 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
    5051 allocated memory block or free.
    5052 */
    5053 struct VmaSuballocation
    5054 {
    5055  VkDeviceSize offset;
    5056  VkDeviceSize size;
    5057  VmaAllocation hAllocation;
    5058  VmaSuballocationType type;
    5059 };
    5060 
    5061 // Comparator for offsets.
    5062 struct VmaSuballocationOffsetLess
    5063 {
    5064  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    5065  {
    5066  return lhs.offset < rhs.offset;
    5067  }
    5068 };
    5069 struct VmaSuballocationOffsetGreater
    5070 {
    5071  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    5072  {
    5073  return lhs.offset > rhs.offset;
    5074  }
    5075 };
    5076 
    5077 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
    5078 
    5079 // Cost of one additional allocation lost, as equivalent in bytes.
    5080 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
    5081 
    5082 /*
    5083 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
    5084 
    5085 If canMakeOtherLost was false:
    5086 - item points to a FREE suballocation.
    5087 - itemsToMakeLostCount is 0.
    5088 
    5089 If canMakeOtherLost was true:
    5090 - item points to first of sequence of suballocations, which are either FREE,
    5091  or point to VmaAllocations that can become lost.
    5092 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
    5093  the requested allocation to succeed.
    5094 */
    5095 struct VmaAllocationRequest
    5096 {
    5097  VkDeviceSize offset;
    5098  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
    5099  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
    5100  VmaSuballocationList::iterator item;
    5101  size_t itemsToMakeLostCount;
    5102  void* customData;
    5103 
    5104  VkDeviceSize CalcCost() const
    5105  {
    5106  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
    5107  }
    5108 };
    5109 
    5110 /*
    5111 Data structure used for bookkeeping of allocations and unused ranges of memory
    5112 in a single VkDeviceMemory block.
    5113 */
    5114 class VmaBlockMetadata
    5115 {
    5116 public:
    5117  VmaBlockMetadata(VmaAllocator hAllocator);
    5118  virtual ~VmaBlockMetadata() { }
    5119  virtual void Init(VkDeviceSize size) { m_Size = size; }
    5120 
    5121  // Validates all data structures inside this object. If not valid, returns false.
    5122  virtual bool Validate() const = 0;
    5123  VkDeviceSize GetSize() const { return m_Size; }
    5124  virtual size_t GetAllocationCount() const = 0;
    5125  virtual VkDeviceSize GetSumFreeSize() const = 0;
    5126  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
    5127  // Returns true if this block is empty - contains only single free suballocation.
    5128  virtual bool IsEmpty() const = 0;
    5129 
    5130  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
    5131  // Shouldn't modify blockCount.
    5132  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
    5133 
    5134 #if VMA_STATS_STRING_ENABLED
    5135  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
    5136 #endif
    5137 
    5138  // Tries to find a place for suballocation with given parameters inside this block.
    5139  // If succeeded, fills pAllocationRequest and returns true.
    5140  // If failed, returns false.
    5141  virtual bool CreateAllocationRequest(
    5142  uint32_t currentFrameIndex,
    5143  uint32_t frameInUseCount,
    5144  VkDeviceSize bufferImageGranularity,
    5145  VkDeviceSize allocSize,
    5146  VkDeviceSize allocAlignment,
    5147  bool upperAddress,
    5148  VmaSuballocationType allocType,
    5149  bool canMakeOtherLost,
    5150  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
    5151  uint32_t strategy,
    5152  VmaAllocationRequest* pAllocationRequest) = 0;
    5153 
    5154  virtual bool MakeRequestedAllocationsLost(
    5155  uint32_t currentFrameIndex,
    5156  uint32_t frameInUseCount,
    5157  VmaAllocationRequest* pAllocationRequest) = 0;
    5158 
    5159  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
    5160 
    5161  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
    5162 
    5163  // Makes actual allocation based on request. Request must already be checked and valid.
    5164  virtual void Alloc(
    5165  const VmaAllocationRequest& request,
    5166  VmaSuballocationType type,
    5167  VkDeviceSize allocSize,
    5168  bool upperAddress,
    5169  VmaAllocation hAllocation) = 0;
    5170 
    5171  // Frees suballocation assigned to given memory region.
    5172  virtual void Free(const VmaAllocation allocation) = 0;
    5173  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
    5174 
    5175  // Tries to resize (grow or shrink) space for given allocation, in place.
    5176  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
    5177 
    5178 protected:
    5179  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
    5180 
    5181 #if VMA_STATS_STRING_ENABLED
    5182  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
    5183  VkDeviceSize unusedBytes,
    5184  size_t allocationCount,
    5185  size_t unusedRangeCount) const;
    5186  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    5187  VkDeviceSize offset,
    5188  VmaAllocation hAllocation) const;
    5189  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    5190  VkDeviceSize offset,
    5191  VkDeviceSize size) const;
    5192  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
    5193 #endif
    5194 
    5195 private:
    5196  VkDeviceSize m_Size;
    5197  const VkAllocationCallbacks* m_pAllocationCallbacks;
    5198 };
    5199 
    5200 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
    5201  VMA_ASSERT(0 && "Validation failed: " #cond); \
    5202  return false; \
    5203  } } while(false)
    5204 
    5205 class VmaBlockMetadata_Generic : public VmaBlockMetadata
    5206 {
    5207  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
    5208 public:
    5209  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
    5210  virtual ~VmaBlockMetadata_Generic();
    5211  virtual void Init(VkDeviceSize size);
    5212 
    5213  virtual bool Validate() const;
    5214  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
    5215  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    5216  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5217  virtual bool IsEmpty() const;
    5218 
    5219  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5220  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5221 
    5222 #if VMA_STATS_STRING_ENABLED
    5223  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5224 #endif
    5225 
    5226  virtual bool CreateAllocationRequest(
    5227  uint32_t currentFrameIndex,
    5228  uint32_t frameInUseCount,
    5229  VkDeviceSize bufferImageGranularity,
    5230  VkDeviceSize allocSize,
    5231  VkDeviceSize allocAlignment,
    5232  bool upperAddress,
    5233  VmaSuballocationType allocType,
    5234  bool canMakeOtherLost,
    5235  uint32_t strategy,
    5236  VmaAllocationRequest* pAllocationRequest);
    5237 
    5238  virtual bool MakeRequestedAllocationsLost(
    5239  uint32_t currentFrameIndex,
    5240  uint32_t frameInUseCount,
    5241  VmaAllocationRequest* pAllocationRequest);
    5242 
    5243  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5244 
    5245  virtual VkResult CheckCorruption(const void* pBlockData);
    5246 
    5247  virtual void Alloc(
    5248  const VmaAllocationRequest& request,
    5249  VmaSuballocationType type,
    5250  VkDeviceSize allocSize,
    5251  bool upperAddress,
    5252  VmaAllocation hAllocation);
    5253 
    5254  virtual void Free(const VmaAllocation allocation);
    5255  virtual void FreeAtOffset(VkDeviceSize offset);
    5256 
    5257  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
    5258 
    5260  // For defragmentation
    5261 
    5262  bool IsBufferImageGranularityConflictPossible(
    5263  VkDeviceSize bufferImageGranularity,
    5264  VmaSuballocationType& inOutPrevSuballocType) const;
    5265 
    5266 private:
    5267  friend class VmaDefragmentationAlgorithm_Generic;
    5268  friend class VmaDefragmentationAlgorithm_Fast;
    5269 
    5270  uint32_t m_FreeCount;
    5271  VkDeviceSize m_SumFreeSize;
    5272  VmaSuballocationList m_Suballocations;
    5273  // Suballocations that are free and have size greater than certain threshold.
    5274  // Sorted by size, ascending.
    5275  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
    5276 
    5277  bool ValidateFreeSuballocationList() const;
    5278 
    5279  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
    5280  // If yes, fills pOffset and returns true. If no, returns false.
    5281  bool CheckAllocation(
    5282  uint32_t currentFrameIndex,
    5283  uint32_t frameInUseCount,
    5284  VkDeviceSize bufferImageGranularity,
    5285  VkDeviceSize allocSize,
    5286  VkDeviceSize allocAlignment,
    5287  VmaSuballocationType allocType,
    5288  VmaSuballocationList::const_iterator suballocItem,
    5289  bool canMakeOtherLost,
    5290  VkDeviceSize* pOffset,
    5291  size_t* itemsToMakeLostCount,
    5292  VkDeviceSize* pSumFreeSize,
    5293  VkDeviceSize* pSumItemSize) const;
    5294  // Given free suballocation, it merges it with following one, which must also be free.
    5295  void MergeFreeWithNext(VmaSuballocationList::iterator item);
    5296  // Releases given suballocation, making it free.
    5297  // Merges it with adjacent free suballocations if applicable.
    5298  // Returns iterator to new free suballocation at this place.
    5299  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
    5300  // Given free suballocation, it inserts it into sorted list of
    5301  // m_FreeSuballocationsBySize if it's suitable.
    5302  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
    5303  // Given free suballocation, it removes it from sorted list of
    5304  // m_FreeSuballocationsBySize if it's suitable.
    5305  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
    5306 };
    5307 
    5308 /*
    5309 Allocations and their references in internal data structure look like this:
    5310 
    5311 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
    5312 
    5313  0 +-------+
    5314  | |
    5315  | |
    5316  | |
    5317  +-------+
    5318  | Alloc | 1st[m_1stNullItemsBeginCount]
    5319  +-------+
    5320  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    5321  +-------+
    5322  | ... |
    5323  +-------+
    5324  | Alloc | 1st[1st.size() - 1]
    5325  +-------+
    5326  | |
    5327  | |
    5328  | |
    5329 GetSize() +-------+
    5330 
    5331 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
    5332 
    5333  0 +-------+
    5334  | Alloc | 2nd[0]
    5335  +-------+
    5336  | Alloc | 2nd[1]
    5337  +-------+
    5338  | ... |
    5339  +-------+
    5340  | Alloc | 2nd[2nd.size() - 1]
    5341  +-------+
    5342  | |
    5343  | |
    5344  | |
    5345  +-------+
    5346  | Alloc | 1st[m_1stNullItemsBeginCount]
    5347  +-------+
    5348  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    5349  +-------+
    5350  | ... |
    5351  +-------+
    5352  | Alloc | 1st[1st.size() - 1]
    5353  +-------+
    5354  | |
    5355 GetSize() +-------+
    5356 
    5357 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
    5358 
    5359  0 +-------+
    5360  | |
    5361  | |
    5362  | |
    5363  +-------+
    5364  | Alloc | 1st[m_1stNullItemsBeginCount]
    5365  +-------+
    5366  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    5367  +-------+
    5368  | ... |
    5369  +-------+
    5370  | Alloc | 1st[1st.size() - 1]
    5371  +-------+
    5372  | |
    5373  | |
    5374  | |
    5375  +-------+
    5376  | Alloc | 2nd[2nd.size() - 1]
    5377  +-------+
    5378  | ... |
    5379  +-------+
    5380  | Alloc | 2nd[1]
    5381  +-------+
    5382  | Alloc | 2nd[0]
    5383 GetSize() +-------+
    5384 
    5385 */
    5386 class VmaBlockMetadata_Linear : public VmaBlockMetadata
    5387 {
    5388  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
    5389 public:
    5390  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
    5391  virtual ~VmaBlockMetadata_Linear();
    5392  virtual void Init(VkDeviceSize size);
    5393 
    5394  virtual bool Validate() const;
    5395  virtual size_t GetAllocationCount() const;
    5396  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    5397  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5398  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
    5399 
    5400  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5401  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5402 
    5403 #if VMA_STATS_STRING_ENABLED
    5404  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5405 #endif
    5406 
    5407  virtual bool CreateAllocationRequest(
    5408  uint32_t currentFrameIndex,
    5409  uint32_t frameInUseCount,
    5410  VkDeviceSize bufferImageGranularity,
    5411  VkDeviceSize allocSize,
    5412  VkDeviceSize allocAlignment,
    5413  bool upperAddress,
    5414  VmaSuballocationType allocType,
    5415  bool canMakeOtherLost,
    5416  uint32_t strategy,
    5417  VmaAllocationRequest* pAllocationRequest);
    5418 
    5419  virtual bool MakeRequestedAllocationsLost(
    5420  uint32_t currentFrameIndex,
    5421  uint32_t frameInUseCount,
    5422  VmaAllocationRequest* pAllocationRequest);
    5423 
    5424  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5425 
    5426  virtual VkResult CheckCorruption(const void* pBlockData);
    5427 
    5428  virtual void Alloc(
    5429  const VmaAllocationRequest& request,
    5430  VmaSuballocationType type,
    5431  VkDeviceSize allocSize,
    5432  bool upperAddress,
    5433  VmaAllocation hAllocation);
    5434 
    5435  virtual void Free(const VmaAllocation allocation);
    5436  virtual void FreeAtOffset(VkDeviceSize offset);
    5437 
    5438 private:
    5439  /*
    5440  There are two suballocation vectors, used in ping-pong way.
    5441  The one with index m_1stVectorIndex is called 1st.
    5442  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
    5443  2nd can be non-empty only when 1st is not empty.
    5444  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
    5445  */
    5446  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
    5447 
    5448  enum SECOND_VECTOR_MODE
    5449  {
    5450  SECOND_VECTOR_EMPTY,
    5451  /*
    5452  Suballocations in 2nd vector are created later than the ones in 1st, but they
    5453  all have smaller offset.
    5454  */
    5455  SECOND_VECTOR_RING_BUFFER,
    5456  /*
    5457  Suballocations in 2nd vector are upper side of double stack.
    5458  They all have offsets higher than those in 1st vector.
    5459  Top of this stack means smaller offsets, but higher indices in this vector.
    5460  */
    5461  SECOND_VECTOR_DOUBLE_STACK,
    5462  };
    5463 
    5464  VkDeviceSize m_SumFreeSize;
    5465  SuballocationVectorType m_Suballocations0, m_Suballocations1;
    5466  uint32_t m_1stVectorIndex;
    5467  SECOND_VECTOR_MODE m_2ndVectorMode;
    5468 
    5469  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5470  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5471  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5472  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5473 
    5474  // Number of items in 1st vector with hAllocation = null at the beginning.
    5475  size_t m_1stNullItemsBeginCount;
    5476  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
    5477  size_t m_1stNullItemsMiddleCount;
    5478  // Number of items in 2nd vector with hAllocation = null.
    5479  size_t m_2ndNullItemsCount;
    5480 
    5481  bool ShouldCompact1st() const;
    5482  void CleanupAfterFree();
    5483 };
    5484 
    5485 /*
    5486 - GetSize() is the original size of allocated memory block.
    5487 - m_UsableSize is this size aligned down to a power of two.
    5488  All allocations and calculations happen relative to m_UsableSize.
    5489 - GetUnusableSize() is the difference between them.
    5490  It is repoted as separate, unused range, not available for allocations.
    5491 
    5492 Node at level 0 has size = m_UsableSize.
    5493 Each next level contains nodes with size 2 times smaller than current level.
    5494 m_LevelCount is the maximum number of levels to use in the current object.
    5495 */
    5496 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
    5497 {
    5498  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
    5499 public:
    5500  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
    5501  virtual ~VmaBlockMetadata_Buddy();
    5502  virtual void Init(VkDeviceSize size);
    5503 
    5504  virtual bool Validate() const;
    5505  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
    5506  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
    5507  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5508  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
    5509 
    5510  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5511  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5512 
    5513 #if VMA_STATS_STRING_ENABLED
    5514  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5515 #endif
    5516 
    5517  virtual bool CreateAllocationRequest(
    5518  uint32_t currentFrameIndex,
    5519  uint32_t frameInUseCount,
    5520  VkDeviceSize bufferImageGranularity,
    5521  VkDeviceSize allocSize,
    5522  VkDeviceSize allocAlignment,
    5523  bool upperAddress,
    5524  VmaSuballocationType allocType,
    5525  bool canMakeOtherLost,
    5526  uint32_t strategy,
    5527  VmaAllocationRequest* pAllocationRequest);
    5528 
    5529  virtual bool MakeRequestedAllocationsLost(
    5530  uint32_t currentFrameIndex,
    5531  uint32_t frameInUseCount,
    5532  VmaAllocationRequest* pAllocationRequest);
    5533 
    5534  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5535 
    5536  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
    5537 
    5538  virtual void Alloc(
    5539  const VmaAllocationRequest& request,
    5540  VmaSuballocationType type,
    5541  VkDeviceSize allocSize,
    5542  bool upperAddress,
    5543  VmaAllocation hAllocation);
    5544 
    5545  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
    5546  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
    5547 
    5548 private:
    5549  static const VkDeviceSize MIN_NODE_SIZE = 32;
    5550  static const size_t MAX_LEVELS = 30;
    5551 
    5552  struct ValidationContext
    5553  {
    5554  size_t calculatedAllocationCount;
    5555  size_t calculatedFreeCount;
    5556  VkDeviceSize calculatedSumFreeSize;
    5557 
    5558  ValidationContext() :
    5559  calculatedAllocationCount(0),
    5560  calculatedFreeCount(0),
    5561  calculatedSumFreeSize(0) { }
    5562  };
    5563 
    5564  struct Node
    5565  {
    5566  VkDeviceSize offset;
    5567  enum TYPE
    5568  {
    5569  TYPE_FREE,
    5570  TYPE_ALLOCATION,
    5571  TYPE_SPLIT,
    5572  TYPE_COUNT
    5573  } type;
    5574  Node* parent;
    5575  Node* buddy;
    5576 
    5577  union
    5578  {
    5579  struct
    5580  {
    5581  Node* prev;
    5582  Node* next;
    5583  } free;
    5584  struct
    5585  {
    5586  VmaAllocation alloc;
    5587  } allocation;
    5588  struct
    5589  {
    5590  Node* leftChild;
    5591  } split;
    5592  };
    5593  };
    5594 
    5595  // Size of the memory block aligned down to a power of two.
    5596  VkDeviceSize m_UsableSize;
    5597  uint32_t m_LevelCount;
    5598 
    5599  Node* m_Root;
    5600  struct {
    5601  Node* front;
    5602  Node* back;
    5603  } m_FreeList[MAX_LEVELS];
    5604  // Number of nodes in the tree with type == TYPE_ALLOCATION.
    5605  size_t m_AllocationCount;
    5606  // Number of nodes in the tree with type == TYPE_FREE.
    5607  size_t m_FreeCount;
    5608  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
    5609  VkDeviceSize m_SumFreeSize;
    5610 
    5611  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
    5612  void DeleteNode(Node* node);
    5613  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
    5614  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
    5615  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
    5616  // Alloc passed just for validation. Can be null.
    5617  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
    5618  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
    5619  // Adds node to the front of FreeList at given level.
    5620  // node->type must be FREE.
    5621  // node->free.prev, next can be undefined.
    5622  void AddToFreeListFront(uint32_t level, Node* node);
    5623  // Removes node from FreeList at given level.
    5624  // node->type must be FREE.
    5625  // node->free.prev, next stay untouched.
    5626  void RemoveFromFreeList(uint32_t level, Node* node);
    5627 
    5628 #if VMA_STATS_STRING_ENABLED
    5629  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
    5630 #endif
    5631 };
    5632 
    5633 /*
    5634 Represents a single block of device memory (`VkDeviceMemory`) with all the
    5635 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
    5636 
    5637 Thread-safety: This class must be externally synchronized.
    5638 */
    5639 class VmaDeviceMemoryBlock
    5640 {
    5641  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
    5642 public:
    5643  VmaBlockMetadata* m_pMetadata;
    5644 
    5645  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
    5646 
    5647  ~VmaDeviceMemoryBlock()
    5648  {
    5649  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
    5650  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    5651  }
    5652 
    5653  // Always call after construction.
    5654  void Init(
    5655  VmaAllocator hAllocator,
    5656  uint32_t newMemoryTypeIndex,
    5657  VkDeviceMemory newMemory,
    5658  VkDeviceSize newSize,
    5659  uint32_t id,
    5660  uint32_t algorithm);
    5661  // Always call before destruction.
    5662  void Destroy(VmaAllocator allocator);
    5663 
    5664  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
    5665  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5666  uint32_t GetId() const { return m_Id; }
    5667  void* GetMappedData() const { return m_pMappedData; }
    5668 
    5669  // Validates all data structures inside this object. If not valid, returns false.
    5670  bool Validate() const;
    5671 
    5672  VkResult CheckCorruption(VmaAllocator hAllocator);
    5673 
    5674  // ppData can be null.
    5675  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
    5676  void Unmap(VmaAllocator hAllocator, uint32_t count);
    5677 
    5678  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5679  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5680 
    5681  VkResult BindBufferMemory(
    5682  const VmaAllocator hAllocator,
    5683  const VmaAllocation hAllocation,
    5684  VkBuffer hBuffer);
    5685  VkResult BindImageMemory(
    5686  const VmaAllocator hAllocator,
    5687  const VmaAllocation hAllocation,
    5688  VkImage hImage);
    5689 
    5690 private:
    5691  uint32_t m_MemoryTypeIndex;
    5692  uint32_t m_Id;
    5693  VkDeviceMemory m_hMemory;
    5694 
    5695  /*
    5696  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
    5697  Also protects m_MapCount, m_pMappedData.
    5698  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
    5699  */
    5700  VMA_MUTEX m_Mutex;
    5701  uint32_t m_MapCount;
    5702  void* m_pMappedData;
    5703 };
    5704 
    5705 struct VmaPointerLess
    5706 {
    5707  bool operator()(const void* lhs, const void* rhs) const
    5708  {
    5709  return lhs < rhs;
    5710  }
    5711 };
    5712 
    5713 struct VmaDefragmentationMove
    5714 {
    5715  size_t srcBlockIndex;
    5716  size_t dstBlockIndex;
    5717  VkDeviceSize srcOffset;
    5718  VkDeviceSize dstOffset;
    5719  VkDeviceSize size;
    5720 };
    5721 
    5722 class VmaDefragmentationAlgorithm;
    5723 
    5724 /*
    5725 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
    5726 Vulkan memory type.
    5727 
    5728 Synchronized internally with a mutex.
    5729 */
    5730 struct VmaBlockVector
    5731 {
    5732  VMA_CLASS_NO_COPY(VmaBlockVector)
    5733 public:
    5734  VmaBlockVector(
    5735  VmaAllocator hAllocator,
    5736  uint32_t memoryTypeIndex,
    5737  VkDeviceSize preferredBlockSize,
    5738  size_t minBlockCount,
    5739  size_t maxBlockCount,
    5740  VkDeviceSize bufferImageGranularity,
    5741  uint32_t frameInUseCount,
    5742  bool isCustomPool,
    5743  bool explicitBlockSize,
    5744  uint32_t algorithm);
    5745  ~VmaBlockVector();
    5746 
    5747  VkResult CreateMinBlocks();
    5748 
    5749  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5750  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
    5751  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
    5752  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
    5753  uint32_t GetAlgorithm() const { return m_Algorithm; }
    5754 
    5755  void GetPoolStats(VmaPoolStats* pStats);
    5756 
    5757  bool IsEmpty() const { return m_Blocks.empty(); }
    5758  bool IsCorruptionDetectionEnabled() const;
    5759 
    5760  VkResult Allocate(
    5761  VmaPool hCurrentPool,
    5762  uint32_t currentFrameIndex,
    5763  VkDeviceSize size,
    5764  VkDeviceSize alignment,
    5765  const VmaAllocationCreateInfo& createInfo,
    5766  VmaSuballocationType suballocType,
    5767  VmaAllocation* pAllocation);
    5768 
    5769  void Free(
    5770  VmaAllocation hAllocation);
    5771 
    5772  // Adds statistics of this BlockVector to pStats.
    5773  void AddStats(VmaStats* pStats);
    5774 
    5775 #if VMA_STATS_STRING_ENABLED
    5776  void PrintDetailedMap(class VmaJsonWriter& json);
    5777 #endif
    5778 
    5779  void MakePoolAllocationsLost(
    5780  uint32_t currentFrameIndex,
    5781  size_t* pLostAllocationCount);
    5782  VkResult CheckCorruption();
    5783 
    5784  // Saves results in pCtx->res.
    5785  void Defragment(
    5786  class VmaBlockVectorDefragmentationContext* pCtx,
    5787  VmaDefragmentationStats* pStats,
    5788  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
    5789  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
    5790  VkCommandBuffer commandBuffer);
    5791  void DefragmentationEnd(
    5792  class VmaBlockVectorDefragmentationContext* pCtx,
    5793  VmaDefragmentationStats* pStats);
    5794 
    5796  // To be used only while the m_Mutex is locked. Used during defragmentation.
    5797 
    5798  size_t GetBlockCount() const { return m_Blocks.size(); }
    5799  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
    5800  size_t CalcAllocationCount() const;
    5801  bool IsBufferImageGranularityConflictPossible() const;
    5802 
    5803 private:
    5804  friend class VmaDefragmentationAlgorithm_Generic;
    5805 
    5806  const VmaAllocator m_hAllocator;
    5807  const uint32_t m_MemoryTypeIndex;
    5808  const VkDeviceSize m_PreferredBlockSize;
    5809  const size_t m_MinBlockCount;
    5810  const size_t m_MaxBlockCount;
    5811  const VkDeviceSize m_BufferImageGranularity;
    5812  const uint32_t m_FrameInUseCount;
    5813  const bool m_IsCustomPool;
    5814  const bool m_ExplicitBlockSize;
    5815  const uint32_t m_Algorithm;
    5816  /* There can be at most one allocation that is completely empty - a
    5817  hysteresis to avoid pessimistic case of alternating creation and destruction
    5818  of a VkDeviceMemory. */
    5819  bool m_HasEmptyBlock;
    5820  VMA_RW_MUTEX m_Mutex;
    5821  // Incrementally sorted by sumFreeSize, ascending.
    5822  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
    5823  uint32_t m_NextBlockId;
    5824 
    5825  VkDeviceSize CalcMaxBlockSize() const;
    5826 
    5827  // Finds and removes given block from vector.
    5828  void Remove(VmaDeviceMemoryBlock* pBlock);
    5829 
    5830  // Performs single step in sorting m_Blocks. They may not be fully sorted
    5831  // after this call.
    5832  void IncrementallySortBlocks();
    5833 
    5834  // To be used only without CAN_MAKE_OTHER_LOST flag.
    5835  VkResult AllocateFromBlock(
    5836  VmaDeviceMemoryBlock* pBlock,
    5837  VmaPool hCurrentPool,
    5838  uint32_t currentFrameIndex,
    5839  VkDeviceSize size,
    5840  VkDeviceSize alignment,
    5841  VmaAllocationCreateFlags allocFlags,
    5842  void* pUserData,
    5843  VmaSuballocationType suballocType,
    5844  uint32_t strategy,
    5845  VmaAllocation* pAllocation);
    5846 
    5847  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
    5848 
    5849  // Saves result to pCtx->res.
    5850  void ApplyDefragmentationMovesCpu(
    5851  class VmaBlockVectorDefragmentationContext* pDefragCtx,
    5852  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
    5853  // Saves result to pCtx->res.
    5854  void ApplyDefragmentationMovesGpu(
    5855  class VmaBlockVectorDefragmentationContext* pDefragCtx,
    5856  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    5857  VkCommandBuffer commandBuffer);
    5858 
    5859  /*
    5860  Used during defragmentation. pDefragmentationStats is optional. It's in/out
    5861  - updated with new data.
    5862  */
    5863  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
    5864 };
    5865 
    5866 struct VmaPool_T
    5867 {
    5868  VMA_CLASS_NO_COPY(VmaPool_T)
    5869 public:
    5870  VmaBlockVector m_BlockVector;
    5871 
    5872  VmaPool_T(
    5873  VmaAllocator hAllocator,
    5874  const VmaPoolCreateInfo& createInfo,
    5875  VkDeviceSize preferredBlockSize);
    5876  ~VmaPool_T();
    5877 
    5878  uint32_t GetId() const { return m_Id; }
    5879  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
    5880 
    5881 #if VMA_STATS_STRING_ENABLED
    5882  //void PrintDetailedMap(class VmaStringBuilder& sb);
    5883 #endif
    5884 
    5885 private:
    5886  uint32_t m_Id;
    5887 };
    5888 
    5889 /*
    5890 Performs defragmentation:
    5891 
    5892 - Updates `pBlockVector->m_pMetadata`.
    5893 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
    5894 - Does not move actual data, only returns requested moves as `moves`.
    5895 */
    5896 class VmaDefragmentationAlgorithm
    5897 {
    5898  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
    5899 public:
    5900  VmaDefragmentationAlgorithm(
    5901  VmaAllocator hAllocator,
    5902  VmaBlockVector* pBlockVector,
    5903  uint32_t currentFrameIndex) :
    5904  m_hAllocator(hAllocator),
    5905  m_pBlockVector(pBlockVector),
    5906  m_CurrentFrameIndex(currentFrameIndex)
    5907  {
    5908  }
    5909  virtual ~VmaDefragmentationAlgorithm()
    5910  {
    5911  }
    5912 
    5913  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
    5914  virtual void AddAll() = 0;
    5915 
    5916  virtual VkResult Defragment(
    5917  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    5918  VkDeviceSize maxBytesToMove,
    5919  uint32_t maxAllocationsToMove) = 0;
    5920 
    5921  virtual VkDeviceSize GetBytesMoved() const = 0;
    5922  virtual uint32_t GetAllocationsMoved() const = 0;
    5923 
    5924 protected:
    5925  VmaAllocator const m_hAllocator;
    5926  VmaBlockVector* const m_pBlockVector;
    5927  const uint32_t m_CurrentFrameIndex;
    5928 
    5929  struct AllocationInfo
    5930  {
    5931  VmaAllocation m_hAllocation;
    5932  VkBool32* m_pChanged;
    5933 
    5934  AllocationInfo() :
    5935  m_hAllocation(VK_NULL_HANDLE),
    5936  m_pChanged(VMA_NULL)
    5937  {
    5938  }
    5939  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
    5940  m_hAllocation(hAlloc),
    5941  m_pChanged(pChanged)
    5942  {
    5943  }
    5944  };
    5945 };
    5946 
    5947 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
    5948 {
    5949  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
    5950 public:
    5951  VmaDefragmentationAlgorithm_Generic(
    5952  VmaAllocator hAllocator,
    5953  VmaBlockVector* pBlockVector,
    5954  uint32_t currentFrameIndex,
    5955  bool overlappingMoveSupported);
    5956  virtual ~VmaDefragmentationAlgorithm_Generic();
    5957 
    5958  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    5959  virtual void AddAll() { m_AllAllocations = true; }
    5960 
    5961  virtual VkResult Defragment(
    5962  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    5963  VkDeviceSize maxBytesToMove,
    5964  uint32_t maxAllocationsToMove);
    5965 
    5966  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    5967  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    5968 
    5969 private:
    5970  uint32_t m_AllocationCount;
    5971  bool m_AllAllocations;
    5972 
    5973  VkDeviceSize m_BytesMoved;
    5974  uint32_t m_AllocationsMoved;
    5975 
    5976  struct AllocationInfoSizeGreater
    5977  {
    5978  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    5979  {
    5980  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
    5981  }
    5982  };
    5983 
    5984  struct AllocationInfoOffsetGreater
    5985  {
    5986  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    5987  {
    5988  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
    5989  }
    5990  };
    5991 
    5992  struct BlockInfo
    5993  {
    5994  size_t m_OriginalBlockIndex;
    5995  VmaDeviceMemoryBlock* m_pBlock;
    5996  bool m_HasNonMovableAllocations;
    5997  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5998 
    5999  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
    6000  m_OriginalBlockIndex(SIZE_MAX),
    6001  m_pBlock(VMA_NULL),
    6002  m_HasNonMovableAllocations(true),
    6003  m_Allocations(pAllocationCallbacks)
    6004  {
    6005  }
    6006 
    6007  void CalcHasNonMovableAllocations()
    6008  {
    6009  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
    6010  const size_t defragmentAllocCount = m_Allocations.size();
    6011  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
    6012  }
    6013 
    6014  void SortAllocationsBySizeDescending()
    6015  {
    6016  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
    6017  }
    6018 
    6019  void SortAllocationsByOffsetDescending()
    6020  {
    6021  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
    6022  }
    6023  };
    6024 
    6025  struct BlockPointerLess
    6026  {
    6027  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
    6028  {
    6029  return pLhsBlockInfo->m_pBlock < pRhsBlock;
    6030  }
    6031  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    6032  {
    6033  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
    6034  }
    6035  };
    6036 
    6037  // 1. Blocks with some non-movable allocations go first.
    6038  // 2. Blocks with smaller sumFreeSize go first.
    6039  struct BlockInfoCompareMoveDestination
    6040  {
    6041  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    6042  {
    6043  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
    6044  {
    6045  return true;
    6046  }
    6047  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
    6048  {
    6049  return false;
    6050  }
    6051  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
    6052  {
    6053  return true;
    6054  }
    6055  return false;
    6056  }
    6057  };
    6058 
    6059  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
    6060  BlockInfoVector m_Blocks;
    6061 
    6062  VkResult DefragmentRound(
    6063  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6064  VkDeviceSize maxBytesToMove,
    6065  uint32_t maxAllocationsToMove);
    6066 
    6067  size_t CalcBlocksWithNonMovableCount() const;
    6068 
    6069  static bool MoveMakesSense(
    6070  size_t dstBlockIndex, VkDeviceSize dstOffset,
    6071  size_t srcBlockIndex, VkDeviceSize srcOffset);
    6072 };
    6073 
    6074 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
    6075 {
    6076  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
    6077 public:
    6078  VmaDefragmentationAlgorithm_Fast(
    6079  VmaAllocator hAllocator,
    6080  VmaBlockVector* pBlockVector,
    6081  uint32_t currentFrameIndex,
    6082  bool overlappingMoveSupported);
    6083  virtual ~VmaDefragmentationAlgorithm_Fast();
    6084 
    6085  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
    6086  virtual void AddAll() { m_AllAllocations = true; }
    6087 
    6088  virtual VkResult Defragment(
    6089  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6090  VkDeviceSize maxBytesToMove,
    6091  uint32_t maxAllocationsToMove);
    6092 
    6093  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    6094  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    6095 
    6096 private:
    6097  struct BlockInfo
    6098  {
    6099  size_t origBlockIndex;
    6100  };
    6101 
    6102  class FreeSpaceDatabase
    6103  {
    6104  public:
    6105  FreeSpaceDatabase()
    6106  {
    6107  FreeSpace s = {};
    6108  s.blockInfoIndex = SIZE_MAX;
    6109  for(size_t i = 0; i < MAX_COUNT; ++i)
    6110  {
    6111  m_FreeSpaces[i] = s;
    6112  }
    6113  }
    6114 
    6115  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
    6116  {
    6117  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6118  {
    6119  return;
    6120  }
    6121 
    6122  // Find first invalid or the smallest structure.
    6123  size_t bestIndex = SIZE_MAX;
    6124  for(size_t i = 0; i < MAX_COUNT; ++i)
    6125  {
    6126  // Empty structure.
    6127  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
    6128  {
    6129  bestIndex = i;
    6130  break;
    6131  }
    6132  if(m_FreeSpaces[i].size < size &&
    6133  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
    6134  {
    6135  bestIndex = i;
    6136  }
    6137  }
    6138 
    6139  if(bestIndex != SIZE_MAX)
    6140  {
    6141  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
    6142  m_FreeSpaces[bestIndex].offset = offset;
    6143  m_FreeSpaces[bestIndex].size = size;
    6144  }
    6145  }
    6146 
    6147  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
    6148  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
    6149  {
    6150  size_t bestIndex = SIZE_MAX;
    6151  VkDeviceSize bestFreeSpaceAfter = 0;
    6152  for(size_t i = 0; i < MAX_COUNT; ++i)
    6153  {
    6154  // Structure is valid.
    6155  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
    6156  {
    6157  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
    6158  // Allocation fits into this structure.
    6159  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
    6160  {
    6161  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
    6162  (dstOffset + size);
    6163  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
    6164  {
    6165  bestIndex = i;
    6166  bestFreeSpaceAfter = freeSpaceAfter;
    6167  }
    6168  }
    6169  }
    6170  }
    6171 
    6172  if(bestIndex != SIZE_MAX)
    6173  {
    6174  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
    6175  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
    6176 
    6177  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6178  {
    6179  // Leave this structure for remaining empty space.
    6180  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
    6181  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
    6182  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
    6183  }
    6184  else
    6185  {
    6186  // This structure becomes invalid.
    6187  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
    6188  }
    6189 
    6190  return true;
    6191  }
    6192 
    6193  return false;
    6194  }
    6195 
    6196  private:
    6197  static const size_t MAX_COUNT = 4;
    6198 
    6199  struct FreeSpace
    6200  {
    6201  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
    6202  VkDeviceSize offset;
    6203  VkDeviceSize size;
    6204  } m_FreeSpaces[MAX_COUNT];
    6205  };
    6206 
    6207  const bool m_OverlappingMoveSupported;
    6208 
    6209  uint32_t m_AllocationCount;
    6210  bool m_AllAllocations;
    6211 
    6212  VkDeviceSize m_BytesMoved;
    6213  uint32_t m_AllocationsMoved;
    6214 
    6215  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
    6216 
    6217  void PreprocessMetadata();
    6218  void PostprocessMetadata();
    6219  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
    6220 };
    6221 
    6222 struct VmaBlockDefragmentationContext
    6223 {
    6224 private:
    6225  VMA_CLASS_NO_COPY(VmaBlockDefragmentationContext)
    6226 public:
    6227  enum BLOCK_FLAG
    6228  {
    6229  BLOCK_FLAG_USED = 0x00000001,
    6230  };
    6231  uint32_t flags;
    6232  VkBuffer hBuffer;
    6233 
    6234  VmaBlockDefragmentationContext() :
    6235  flags(0),
    6236  hBuffer(VK_NULL_HANDLE)
    6237  {
    6238  }
    6239 };
    6240 
    6241 class VmaBlockVectorDefragmentationContext
    6242 {
    6243  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
    6244 public:
    6245  VkResult res;
    6246  bool mutexLocked;
    6247  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
    6248 
    6249  VmaBlockVectorDefragmentationContext(
    6250  VmaAllocator hAllocator,
    6251  VmaPool hCustomPool, // Optional.
    6252  VmaBlockVector* pBlockVector,
    6253  uint32_t currFrameIndex,
    6254  uint32_t flags);
    6255  ~VmaBlockVectorDefragmentationContext();
    6256 
    6257  VmaPool GetCustomPool() const { return m_hCustomPool; }
    6258  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
    6259  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
    6260 
    6261  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    6262  void AddAll() { m_AllAllocations = true; }
    6263 
    6264  void Begin(bool overlappingMoveSupported);
    6265 
    6266 private:
    6267  const VmaAllocator m_hAllocator;
    6268  // Null if not from custom pool.
    6269  const VmaPool m_hCustomPool;
    6270  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
    6271  VmaBlockVector* const m_pBlockVector;
    6272  const uint32_t m_CurrFrameIndex;
    6273  const uint32_t m_AlgorithmFlags;
    6274  // Owner of this object.
    6275  VmaDefragmentationAlgorithm* m_pAlgorithm;
    6276 
    6277  struct AllocInfo
    6278  {
    6279  VmaAllocation hAlloc;
    6280  VkBool32* pChanged;
    6281  };
    6282  // Used between constructor and Begin.
    6283  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
    6284  bool m_AllAllocations;
    6285 };
    6286 
    6287 struct VmaDefragmentationContext_T
    6288 {
    6289 private:
    6290  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
    6291 public:
    6292  VmaDefragmentationContext_T(
    6293  VmaAllocator hAllocator,
    6294  uint32_t currFrameIndex,
    6295  uint32_t flags,
    6296  VmaDefragmentationStats* pStats);
    6297  ~VmaDefragmentationContext_T();
    6298 
    6299  void AddPools(uint32_t poolCount, VmaPool* pPools);
    6300  void AddAllocations(
    6301  uint32_t allocationCount,
    6302  VmaAllocation* pAllocations,
    6303  VkBool32* pAllocationsChanged);
    6304 
    6305  /*
    6306  Returns:
    6307  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
    6308  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
    6309  - Negative value if error occured and object can be destroyed immediately.
    6310  */
    6311  VkResult Defragment(
    6312  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
    6313  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
    6314  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats);
    6315 
    6316 private:
    6317  const VmaAllocator m_hAllocator;
    6318  const uint32_t m_CurrFrameIndex;
    6319  const uint32_t m_Flags;
    6320  VmaDefragmentationStats* const m_pStats;
    6321  // Owner of these objects.
    6322  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
    6323  // Owner of these objects.
    6324  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
    6325 };
    6326 
    6327 #if VMA_RECORDING_ENABLED
    6328 
    6329 class VmaRecorder
    6330 {
    6331 public:
    6332  VmaRecorder();
    6333  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
    6334  void WriteConfiguration(
    6335  const VkPhysicalDeviceProperties& devProps,
    6336  const VkPhysicalDeviceMemoryProperties& memProps,
    6337  bool dedicatedAllocationExtensionEnabled);
    6338  ~VmaRecorder();
    6339 
    6340  void RecordCreateAllocator(uint32_t frameIndex);
    6341  void RecordDestroyAllocator(uint32_t frameIndex);
    6342  void RecordCreatePool(uint32_t frameIndex,
    6343  const VmaPoolCreateInfo& createInfo,
    6344  VmaPool pool);
    6345  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
    6346  void RecordAllocateMemory(uint32_t frameIndex,
    6347  const VkMemoryRequirements& vkMemReq,
    6348  const VmaAllocationCreateInfo& createInfo,
    6349  VmaAllocation allocation);
    6350  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    6351  const VkMemoryRequirements& vkMemReq,
    6352  bool requiresDedicatedAllocation,
    6353  bool prefersDedicatedAllocation,
    6354  const VmaAllocationCreateInfo& createInfo,
    6355  VmaAllocation allocation);
    6356  void RecordAllocateMemoryForImage(uint32_t frameIndex,
    6357  const VkMemoryRequirements& vkMemReq,
    6358  bool requiresDedicatedAllocation,
    6359  bool prefersDedicatedAllocation,
    6360  const VmaAllocationCreateInfo& createInfo,
    6361  VmaAllocation allocation);
    6362  void RecordFreeMemory(uint32_t frameIndex,
    6363  VmaAllocation allocation);
    6364  void RecordResizeAllocation(
    6365  uint32_t frameIndex,
    6366  VmaAllocation allocation,
    6367  VkDeviceSize newSize);
    6368  void RecordSetAllocationUserData(uint32_t frameIndex,
    6369  VmaAllocation allocation,
    6370  const void* pUserData);
    6371  void RecordCreateLostAllocation(uint32_t frameIndex,
    6372  VmaAllocation allocation);
    6373  void RecordMapMemory(uint32_t frameIndex,
    6374  VmaAllocation allocation);
    6375  void RecordUnmapMemory(uint32_t frameIndex,
    6376  VmaAllocation allocation);
    6377  void RecordFlushAllocation(uint32_t frameIndex,
    6378  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    6379  void RecordInvalidateAllocation(uint32_t frameIndex,
    6380  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    6381  void RecordCreateBuffer(uint32_t frameIndex,
    6382  const VkBufferCreateInfo& bufCreateInfo,
    6383  const VmaAllocationCreateInfo& allocCreateInfo,
    6384  VmaAllocation allocation);
    6385  void RecordCreateImage(uint32_t frameIndex,
    6386  const VkImageCreateInfo& imageCreateInfo,
    6387  const VmaAllocationCreateInfo& allocCreateInfo,
    6388  VmaAllocation allocation);
    6389  void RecordDestroyBuffer(uint32_t frameIndex,
    6390  VmaAllocation allocation);
    6391  void RecordDestroyImage(uint32_t frameIndex,
    6392  VmaAllocation allocation);
    6393  void RecordTouchAllocation(uint32_t frameIndex,
    6394  VmaAllocation allocation);
    6395  void RecordGetAllocationInfo(uint32_t frameIndex,
    6396  VmaAllocation allocation);
    6397  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
    6398  VmaPool pool);
    6399  void RecordDefragmentationBegin(uint32_t frameIndex,
    6400  const VmaDefragmentationInfo2& info,
    6402  void RecordDefragmentationEnd(uint32_t frameIndex,
    6404 
    6405 private:
    6406  struct CallParams
    6407  {
    6408  uint32_t threadId;
    6409  double time;
    6410  };
    6411 
    6412  class UserDataString
    6413  {
    6414  public:
    6415  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
    6416  const char* GetString() const { return m_Str; }
    6417 
    6418  private:
    6419  char m_PtrStr[17];
    6420  const char* m_Str;
    6421  };
    6422 
    6423  bool m_UseMutex;
    6424  VmaRecordFlags m_Flags;
    6425  FILE* m_File;
    6426  VMA_MUTEX m_FileMutex;
    6427  int64_t m_Freq;
    6428  int64_t m_StartCounter;
    6429 
    6430  void GetBasicParams(CallParams& outParams);
    6431 
    6432  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
    6433  template<typename T>
    6434  void PrintPointerList(uint64_t count, const T* pItems)
    6435  {
    6436  if(count)
    6437  {
    6438  fprintf(m_File, "%p", pItems[0]);
    6439  for(uint64_t i = 1; i < count; ++i)
    6440  {
    6441  fprintf(m_File, " %p", pItems[i]);
    6442  }
    6443  }
    6444  }
    6445 
    6446  void Flush();
    6447 };
    6448 
    6449 #endif // #if VMA_RECORDING_ENABLED
    6450 
    6451 // Main allocator object.
    6452 struct VmaAllocator_T
    6453 {
    6454  VMA_CLASS_NO_COPY(VmaAllocator_T)
    6455 public:
    6456  bool m_UseMutex;
    6457  bool m_UseKhrDedicatedAllocation;
    6458  VkDevice m_hDevice;
    6459  bool m_AllocationCallbacksSpecified;
    6460  VkAllocationCallbacks m_AllocationCallbacks;
    6461  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
    6462 
    6463  // Number of bytes free out of limit, or VK_WHOLE_SIZE if no limit for that heap.
    6464  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
    6465  VMA_MUTEX m_HeapSizeLimitMutex;
    6466 
    6467  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
    6468  VkPhysicalDeviceMemoryProperties m_MemProps;
    6469 
    6470  // Default pools.
    6471  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
    6472 
    6473  // Each vector is sorted by memory (handle value).
    6474  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
    6475  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
    6476  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
    6477 
    6478  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
    6479  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
    6480  ~VmaAllocator_T();
    6481 
    6482  const VkAllocationCallbacks* GetAllocationCallbacks() const
    6483  {
    6484  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
    6485  }
    6486  const VmaVulkanFunctions& GetVulkanFunctions() const
    6487  {
    6488  return m_VulkanFunctions;
    6489  }
    6490 
    6491  VkDeviceSize GetBufferImageGranularity() const
    6492  {
    6493  return VMA_MAX(
    6494  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
    6495  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
    6496  }
    6497 
    6498  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
    6499  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
    6500 
    6501  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
    6502  {
    6503  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
    6504  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
    6505  }
    6506  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
    6507  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
    6508  {
    6509  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
    6510  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    6511  }
    6512  // Minimum alignment for all allocations in specific memory type.
    6513  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
    6514  {
    6515  return IsMemoryTypeNonCoherent(memTypeIndex) ?
    6516  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
    6517  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
    6518  }
    6519 
    6520  bool IsIntegratedGpu() const
    6521  {
    6522  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
    6523  }
    6524 
    6525 #if VMA_RECORDING_ENABLED
    6526  VmaRecorder* GetRecorder() const { return m_pRecorder; }
    6527 #endif
    6528 
    6529  void GetBufferMemoryRequirements(
    6530  VkBuffer hBuffer,
    6531  VkMemoryRequirements& memReq,
    6532  bool& requiresDedicatedAllocation,
    6533  bool& prefersDedicatedAllocation) const;
    6534  void GetImageMemoryRequirements(
    6535  VkImage hImage,
    6536  VkMemoryRequirements& memReq,
    6537  bool& requiresDedicatedAllocation,
    6538  bool& prefersDedicatedAllocation) const;
    6539 
    6540  // Main allocation function.
    6541  VkResult AllocateMemory(
    6542  const VkMemoryRequirements& vkMemReq,
    6543  bool requiresDedicatedAllocation,
    6544  bool prefersDedicatedAllocation,
    6545  VkBuffer dedicatedBuffer,
    6546  VkImage dedicatedImage,
    6547  const VmaAllocationCreateInfo& createInfo,
    6548  VmaSuballocationType suballocType,
    6549  VmaAllocation* pAllocation);
    6550 
    6551  // Main deallocation function.
    6552  void FreeMemory(const VmaAllocation allocation);
    6553 
    6554  VkResult ResizeAllocation(
    6555  const VmaAllocation alloc,
    6556  VkDeviceSize newSize);
    6557 
    6558  void CalculateStats(VmaStats* pStats);
    6559 
    6560 #if VMA_STATS_STRING_ENABLED
    6561  void PrintDetailedMap(class VmaJsonWriter& json);
    6562 #endif
    6563 
    6564  VkResult DefragmentationBegin(
    6565  const VmaDefragmentationInfo2& info,
    6566  VmaDefragmentationStats* pStats,
    6567  VmaDefragmentationContext* pContext);
    6568  VkResult DefragmentationEnd(
    6569  VmaDefragmentationContext context);
    6570 
    6571  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
    6572  bool TouchAllocation(VmaAllocation hAllocation);
    6573 
    6574  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
    6575  void DestroyPool(VmaPool pool);
    6576  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
    6577 
    6578  void SetCurrentFrameIndex(uint32_t frameIndex);
    6579  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
    6580 
    6581  void MakePoolAllocationsLost(
    6582  VmaPool hPool,
    6583  size_t* pLostAllocationCount);
    6584  VkResult CheckPoolCorruption(VmaPool hPool);
    6585  VkResult CheckCorruption(uint32_t memoryTypeBits);
    6586 
    6587  void CreateLostAllocation(VmaAllocation* pAllocation);
    6588 
    6589  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
    6590  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
    6591 
    6592  VkResult Map(VmaAllocation hAllocation, void** ppData);
    6593  void Unmap(VmaAllocation hAllocation);
    6594 
    6595  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
    6596  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
    6597 
    6598  void FlushOrInvalidateAllocation(
    6599  VmaAllocation hAllocation,
    6600  VkDeviceSize offset, VkDeviceSize size,
    6601  VMA_CACHE_OPERATION op);
    6602 
    6603  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
    6604 
    6605 private:
    6606  VkDeviceSize m_PreferredLargeHeapBlockSize;
    6607 
    6608  VkPhysicalDevice m_PhysicalDevice;
    6609  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
    6610 
    6611  VMA_RW_MUTEX m_PoolsMutex;
    6612  // Protected by m_PoolsMutex. Sorted by pointer value.
    6613  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
    6614  uint32_t m_NextPoolId;
    6615 
    6616  VmaVulkanFunctions m_VulkanFunctions;
    6617 
    6618 #if VMA_RECORDING_ENABLED
    6619  VmaRecorder* m_pRecorder;
    6620 #endif
    6621 
    6622  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
    6623 
    6624  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
    6625 
    6626  VkResult AllocateMemoryOfType(
    6627  VkDeviceSize size,
    6628  VkDeviceSize alignment,
    6629  bool dedicatedAllocation,
    6630  VkBuffer dedicatedBuffer,
    6631  VkImage dedicatedImage,
    6632  const VmaAllocationCreateInfo& createInfo,
    6633  uint32_t memTypeIndex,
    6634  VmaSuballocationType suballocType,
    6635  VmaAllocation* pAllocation);
    6636 
    6637  // Allocates and registers new VkDeviceMemory specifically for single allocation.
    6638  VkResult AllocateDedicatedMemory(
    6639  VkDeviceSize size,
    6640  VmaSuballocationType suballocType,
    6641  uint32_t memTypeIndex,
    6642  bool map,
    6643  bool isUserDataString,
    6644  void* pUserData,
    6645  VkBuffer dedicatedBuffer,
    6646  VkImage dedicatedImage,
    6647  VmaAllocation* pAllocation);
    6648 
    6649  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
    6650  void FreeDedicatedMemory(VmaAllocation allocation);
    6651 };
    6652 
    6654 // Memory allocation #2 after VmaAllocator_T definition
    6655 
    6656 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
    6657 {
    6658  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
    6659 }
    6660 
    6661 static void VmaFree(VmaAllocator hAllocator, void* ptr)
    6662 {
    6663  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
    6664 }
    6665 
    6666 template<typename T>
    6667 static T* VmaAllocate(VmaAllocator hAllocator)
    6668 {
    6669  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
    6670 }
    6671 
    6672 template<typename T>
    6673 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
    6674 {
    6675  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
    6676 }
    6677 
    6678 template<typename T>
    6679 static void vma_delete(VmaAllocator hAllocator, T* ptr)
    6680 {
    6681  if(ptr != VMA_NULL)
    6682  {
    6683  ptr->~T();
    6684  VmaFree(hAllocator, ptr);
    6685  }
    6686 }
    6687 
    6688 template<typename T>
    6689 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
    6690 {
    6691  if(ptr != VMA_NULL)
    6692  {
    6693  for(size_t i = count; i--; )
    6694  ptr[i].~T();
    6695  VmaFree(hAllocator, ptr);
    6696  }
    6697 }
    6698 
    6700 // VmaStringBuilder
    6701 
    6702 #if VMA_STATS_STRING_ENABLED
    6703 
    6704 class VmaStringBuilder
    6705 {
    6706 public:
    6707  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
    6708  size_t GetLength() const { return m_Data.size(); }
    6709  const char* GetData() const { return m_Data.data(); }
    6710 
    6711  void Add(char ch) { m_Data.push_back(ch); }
    6712  void Add(const char* pStr);
    6713  void AddNewLine() { Add('\n'); }
    6714  void AddNumber(uint32_t num);
    6715  void AddNumber(uint64_t num);
    6716  void AddPointer(const void* ptr);
    6717 
    6718 private:
    6719  VmaVector< char, VmaStlAllocator<char> > m_Data;
    6720 };
    6721 
    6722 void VmaStringBuilder::Add(const char* pStr)
    6723 {
    6724  const size_t strLen = strlen(pStr);
    6725  if(strLen > 0)
    6726  {
    6727  const size_t oldCount = m_Data.size();
    6728  m_Data.resize(oldCount + strLen);
    6729  memcpy(m_Data.data() + oldCount, pStr, strLen);
    6730  }
    6731 }
    6732 
    6733 void VmaStringBuilder::AddNumber(uint32_t num)
    6734 {
    6735  char buf[11];
    6736  VmaUint32ToStr(buf, sizeof(buf), num);
    6737  Add(buf);
    6738 }
    6739 
    6740 void VmaStringBuilder::AddNumber(uint64_t num)
    6741 {
    6742  char buf[21];
    6743  VmaUint64ToStr(buf, sizeof(buf), num);
    6744  Add(buf);
    6745 }
    6746 
    6747 void VmaStringBuilder::AddPointer(const void* ptr)
    6748 {
    6749  char buf[21];
    6750  VmaPtrToStr(buf, sizeof(buf), ptr);
    6751  Add(buf);
    6752 }
    6753 
    6754 #endif // #if VMA_STATS_STRING_ENABLED
    6755 
    6757 // VmaJsonWriter
    6758 
    6759 #if VMA_STATS_STRING_ENABLED
    6760 
    6761 class VmaJsonWriter
    6762 {
    6763  VMA_CLASS_NO_COPY(VmaJsonWriter)
    6764 public:
    6765  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
    6766  ~VmaJsonWriter();
    6767 
    6768  void BeginObject(bool singleLine = false);
    6769  void EndObject();
    6770 
    6771  void BeginArray(bool singleLine = false);
    6772  void EndArray();
    6773 
    6774  void WriteString(const char* pStr);
    6775  void BeginString(const char* pStr = VMA_NULL);
    6776  void ContinueString(const char* pStr);
    6777  void ContinueString(uint32_t n);
    6778  void ContinueString(uint64_t n);
    6779  void ContinueString_Pointer(const void* ptr);
    6780  void EndString(const char* pStr = VMA_NULL);
    6781 
    6782  void WriteNumber(uint32_t n);
    6783  void WriteNumber(uint64_t n);
    6784  void WriteBool(bool b);
    6785  void WriteNull();
    6786 
    6787 private:
    6788  static const char* const INDENT;
    6789 
    6790  enum COLLECTION_TYPE
    6791  {
    6792  COLLECTION_TYPE_OBJECT,
    6793  COLLECTION_TYPE_ARRAY,
    6794  };
    6795  struct StackItem
    6796  {
    6797  COLLECTION_TYPE type;
    6798  uint32_t valueCount;
    6799  bool singleLineMode;
    6800  };
    6801 
    6802  VmaStringBuilder& m_SB;
    6803  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
    6804  bool m_InsideString;
    6805 
    6806  void BeginValue(bool isString);
    6807  void WriteIndent(bool oneLess = false);
    6808 };
    6809 
    6810 const char* const VmaJsonWriter::INDENT = " ";
    6811 
    6812 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
    6813  m_SB(sb),
    6814  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
    6815  m_InsideString(false)
    6816 {
    6817 }
    6818 
    6819 VmaJsonWriter::~VmaJsonWriter()
    6820 {
    6821  VMA_ASSERT(!m_InsideString);
    6822  VMA_ASSERT(m_Stack.empty());
    6823 }
    6824 
    6825 void VmaJsonWriter::BeginObject(bool singleLine)
    6826 {
    6827  VMA_ASSERT(!m_InsideString);
    6828 
    6829  BeginValue(false);
    6830  m_SB.Add('{');
    6831 
    6832  StackItem item;
    6833  item.type = COLLECTION_TYPE_OBJECT;
    6834  item.valueCount = 0;
    6835  item.singleLineMode = singleLine;
    6836  m_Stack.push_back(item);
    6837 }
    6838 
    6839 void VmaJsonWriter::EndObject()
    6840 {
    6841  VMA_ASSERT(!m_InsideString);
    6842 
    6843  WriteIndent(true);
    6844  m_SB.Add('}');
    6845 
    6846  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
    6847  m_Stack.pop_back();
    6848 }
    6849 
    6850 void VmaJsonWriter::BeginArray(bool singleLine)
    6851 {
    6852  VMA_ASSERT(!m_InsideString);
    6853 
    6854  BeginValue(false);
    6855  m_SB.Add('[');
    6856 
    6857  StackItem item;
    6858  item.type = COLLECTION_TYPE_ARRAY;
    6859  item.valueCount = 0;
    6860  item.singleLineMode = singleLine;
    6861  m_Stack.push_back(item);
    6862 }
    6863 
    6864 void VmaJsonWriter::EndArray()
    6865 {
    6866  VMA_ASSERT(!m_InsideString);
    6867 
    6868  WriteIndent(true);
    6869  m_SB.Add(']');
    6870 
    6871  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
    6872  m_Stack.pop_back();
    6873 }
    6874 
    6875 void VmaJsonWriter::WriteString(const char* pStr)
    6876 {
    6877  BeginString(pStr);
    6878  EndString();
    6879 }
    6880 
    6881 void VmaJsonWriter::BeginString(const char* pStr)
    6882 {
    6883  VMA_ASSERT(!m_InsideString);
    6884 
    6885  BeginValue(true);
    6886  m_SB.Add('"');
    6887  m_InsideString = true;
    6888  if(pStr != VMA_NULL && pStr[0] != '\0')
    6889  {
    6890  ContinueString(pStr);
    6891  }
    6892 }
    6893 
    6894 void VmaJsonWriter::ContinueString(const char* pStr)
    6895 {
    6896  VMA_ASSERT(m_InsideString);
    6897 
    6898  const size_t strLen = strlen(pStr);
    6899  for(size_t i = 0; i < strLen; ++i)
    6900  {
    6901  char ch = pStr[i];
    6902  if(ch == '\\')
    6903  {
    6904  m_SB.Add("\\\\");
    6905  }
    6906  else if(ch == '"')
    6907  {
    6908  m_SB.Add("\\\"");
    6909  }
    6910  else if(ch >= 32)
    6911  {
    6912  m_SB.Add(ch);
    6913  }
    6914  else switch(ch)
    6915  {
    6916  case '\b':
    6917  m_SB.Add("\\b");
    6918  break;
    6919  case '\f':
    6920  m_SB.Add("\\f");
    6921  break;
    6922  case '\n':
    6923  m_SB.Add("\\n");
    6924  break;
    6925  case '\r':
    6926  m_SB.Add("\\r");
    6927  break;
    6928  case '\t':
    6929  m_SB.Add("\\t");
    6930  break;
    6931  default:
    6932  VMA_ASSERT(0 && "Character not currently supported.");
    6933  break;
    6934  }
    6935  }
    6936 }
    6937 
    6938 void VmaJsonWriter::ContinueString(uint32_t n)
    6939 {
    6940  VMA_ASSERT(m_InsideString);
    6941  m_SB.AddNumber(n);
    6942 }
    6943 
    6944 void VmaJsonWriter::ContinueString(uint64_t n)
    6945 {
    6946  VMA_ASSERT(m_InsideString);
    6947  m_SB.AddNumber(n);
    6948 }
    6949 
    6950 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
    6951 {
    6952  VMA_ASSERT(m_InsideString);
    6953  m_SB.AddPointer(ptr);
    6954 }
    6955 
    6956 void VmaJsonWriter::EndString(const char* pStr)
    6957 {
    6958  VMA_ASSERT(m_InsideString);
    6959  if(pStr != VMA_NULL && pStr[0] != '\0')
    6960  {
    6961  ContinueString(pStr);
    6962  }
    6963  m_SB.Add('"');
    6964  m_InsideString = false;
    6965 }
    6966 
    6967 void VmaJsonWriter::WriteNumber(uint32_t n)
    6968 {
    6969  VMA_ASSERT(!m_InsideString);
    6970  BeginValue(false);
    6971  m_SB.AddNumber(n);
    6972 }
    6973 
    6974 void VmaJsonWriter::WriteNumber(uint64_t n)
    6975 {
    6976  VMA_ASSERT(!m_InsideString);
    6977  BeginValue(false);
    6978  m_SB.AddNumber(n);
    6979 }
    6980 
    6981 void VmaJsonWriter::WriteBool(bool b)
    6982 {
    6983  VMA_ASSERT(!m_InsideString);
    6984  BeginValue(false);
    6985  m_SB.Add(b ? "true" : "false");
    6986 }
    6987 
    6988 void VmaJsonWriter::WriteNull()
    6989 {
    6990  VMA_ASSERT(!m_InsideString);
    6991  BeginValue(false);
    6992  m_SB.Add("null");
    6993 }
    6994 
    6995 void VmaJsonWriter::BeginValue(bool isString)
    6996 {
    6997  if(!m_Stack.empty())
    6998  {
    6999  StackItem& currItem = m_Stack.back();
    7000  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    7001  currItem.valueCount % 2 == 0)
    7002  {
    7003  VMA_ASSERT(isString);
    7004  }
    7005 
    7006  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    7007  currItem.valueCount % 2 != 0)
    7008  {
    7009  m_SB.Add(": ");
    7010  }
    7011  else if(currItem.valueCount > 0)
    7012  {
    7013  m_SB.Add(", ");
    7014  WriteIndent();
    7015  }
    7016  else
    7017  {
    7018  WriteIndent();
    7019  }
    7020  ++currItem.valueCount;
    7021  }
    7022 }
    7023 
    7024 void VmaJsonWriter::WriteIndent(bool oneLess)
    7025 {
    7026  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
    7027  {
    7028  m_SB.AddNewLine();
    7029 
    7030  size_t count = m_Stack.size();
    7031  if(count > 0 && oneLess)
    7032  {
    7033  --count;
    7034  }
    7035  for(size_t i = 0; i < count; ++i)
    7036  {
    7037  m_SB.Add(INDENT);
    7038  }
    7039  }
    7040 }
    7041 
    7042 #endif // #if VMA_STATS_STRING_ENABLED
    7043 
    7045 
    7046 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
    7047 {
    7048  if(IsUserDataString())
    7049  {
    7050  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
    7051 
    7052  FreeUserDataString(hAllocator);
    7053 
    7054  if(pUserData != VMA_NULL)
    7055  {
    7056  const char* const newStrSrc = (char*)pUserData;
    7057  const size_t newStrLen = strlen(newStrSrc);
    7058  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
    7059  memcpy(newStrDst, newStrSrc, newStrLen + 1);
    7060  m_pUserData = newStrDst;
    7061  }
    7062  }
    7063  else
    7064  {
    7065  m_pUserData = pUserData;
    7066  }
    7067 }
    7068 
    7069 void VmaAllocation_T::ChangeBlockAllocation(
    7070  VmaAllocator hAllocator,
    7071  VmaDeviceMemoryBlock* block,
    7072  VkDeviceSize offset)
    7073 {
    7074  VMA_ASSERT(block != VMA_NULL);
    7075  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    7076 
    7077  // Move mapping reference counter from old block to new block.
    7078  if(block != m_BlockAllocation.m_Block)
    7079  {
    7080  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
    7081  if(IsPersistentMap())
    7082  ++mapRefCount;
    7083  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
    7084  block->Map(hAllocator, mapRefCount, VMA_NULL);
    7085  }
    7086 
    7087  m_BlockAllocation.m_Block = block;
    7088  m_BlockAllocation.m_Offset = offset;
    7089 }
    7090 
    7091 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
    7092 {
    7093  VMA_ASSERT(newSize > 0);
    7094  m_Size = newSize;
    7095 }
    7096 
    7097 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
    7098 {
    7099  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    7100  m_BlockAllocation.m_Offset = newOffset;
    7101 }
    7102 
    7103 VkDeviceSize VmaAllocation_T::GetOffset() const
    7104 {
    7105  switch(m_Type)
    7106  {
    7107  case ALLOCATION_TYPE_BLOCK:
    7108  return m_BlockAllocation.m_Offset;
    7109  case ALLOCATION_TYPE_DEDICATED:
    7110  return 0;
    7111  default:
    7112  VMA_ASSERT(0);
    7113  return 0;
    7114  }
    7115 }
    7116 
    7117 VkDeviceMemory VmaAllocation_T::GetMemory() const
    7118 {
    7119  switch(m_Type)
    7120  {
    7121  case ALLOCATION_TYPE_BLOCK:
    7122  return m_BlockAllocation.m_Block->GetDeviceMemory();
    7123  case ALLOCATION_TYPE_DEDICATED:
    7124  return m_DedicatedAllocation.m_hMemory;
    7125  default:
    7126  VMA_ASSERT(0);
    7127  return VK_NULL_HANDLE;
    7128  }
    7129 }
    7130 
    7131 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
    7132 {
    7133  switch(m_Type)
    7134  {
    7135  case ALLOCATION_TYPE_BLOCK:
    7136  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
    7137  case ALLOCATION_TYPE_DEDICATED:
    7138  return m_DedicatedAllocation.m_MemoryTypeIndex;
    7139  default:
    7140  VMA_ASSERT(0);
    7141  return UINT32_MAX;
    7142  }
    7143 }
    7144 
    7145 void* VmaAllocation_T::GetMappedData() const
    7146 {
    7147  switch(m_Type)
    7148  {
    7149  case ALLOCATION_TYPE_BLOCK:
    7150  if(m_MapCount != 0)
    7151  {
    7152  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
    7153  VMA_ASSERT(pBlockData != VMA_NULL);
    7154  return (char*)pBlockData + m_BlockAllocation.m_Offset;
    7155  }
    7156  else
    7157  {
    7158  return VMA_NULL;
    7159  }
    7160  break;
    7161  case ALLOCATION_TYPE_DEDICATED:
    7162  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
    7163  return m_DedicatedAllocation.m_pMappedData;
    7164  default:
    7165  VMA_ASSERT(0);
    7166  return VMA_NULL;
    7167  }
    7168 }
    7169 
    7170 bool VmaAllocation_T::CanBecomeLost() const
    7171 {
    7172  switch(m_Type)
    7173  {
    7174  case ALLOCATION_TYPE_BLOCK:
    7175  return m_BlockAllocation.m_CanBecomeLost;
    7176  case ALLOCATION_TYPE_DEDICATED:
    7177  return false;
    7178  default:
    7179  VMA_ASSERT(0);
    7180  return false;
    7181  }
    7182 }
    7183 
    7184 VmaPool VmaAllocation_T::GetPool() const
    7185 {
    7186  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    7187  return m_BlockAllocation.m_hPool;
    7188 }
    7189 
    7190 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    7191 {
    7192  VMA_ASSERT(CanBecomeLost());
    7193 
    7194  /*
    7195  Warning: This is a carefully designed algorithm.
    7196  Do not modify unless you really know what you're doing :)
    7197  */
    7198  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
    7199  for(;;)
    7200  {
    7201  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    7202  {
    7203  VMA_ASSERT(0);
    7204  return false;
    7205  }
    7206  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
    7207  {
    7208  return false;
    7209  }
    7210  else // Last use time earlier than current time.
    7211  {
    7212  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
    7213  {
    7214  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
    7215  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
    7216  return true;
    7217  }
    7218  }
    7219  }
    7220 }
    7221 
    7222 #if VMA_STATS_STRING_ENABLED
    7223 
    7224 // Correspond to values of enum VmaSuballocationType.
    7225 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
    7226  "FREE",
    7227  "UNKNOWN",
    7228  "BUFFER",
    7229  "IMAGE_UNKNOWN",
    7230  "IMAGE_LINEAR",
    7231  "IMAGE_OPTIMAL",
    7232 };
    7233 
    7234 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
    7235 {
    7236  json.WriteString("Type");
    7237  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
    7238 
    7239  json.WriteString("Size");
    7240  json.WriteNumber(m_Size);
    7241 
    7242  if(m_pUserData != VMA_NULL)
    7243  {
    7244  json.WriteString("UserData");
    7245  if(IsUserDataString())
    7246  {
    7247  json.WriteString((const char*)m_pUserData);
    7248  }
    7249  else
    7250  {
    7251  json.BeginString();
    7252  json.ContinueString_Pointer(m_pUserData);
    7253  json.EndString();
    7254  }
    7255  }
    7256 
    7257  json.WriteString("CreationFrameIndex");
    7258  json.WriteNumber(m_CreationFrameIndex);
    7259 
    7260  json.WriteString("LastUseFrameIndex");
    7261  json.WriteNumber(GetLastUseFrameIndex());
    7262 
    7263  if(m_BufferImageUsage != 0)
    7264  {
    7265  json.WriteString("Usage");
    7266  json.WriteNumber(m_BufferImageUsage);
    7267  }
    7268 }
    7269 
    7270 #endif
    7271 
    7272 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
    7273 {
    7274  VMA_ASSERT(IsUserDataString());
    7275  if(m_pUserData != VMA_NULL)
    7276  {
    7277  char* const oldStr = (char*)m_pUserData;
    7278  const size_t oldStrLen = strlen(oldStr);
    7279  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
    7280  m_pUserData = VMA_NULL;
    7281  }
    7282 }
    7283 
    7284 void VmaAllocation_T::BlockAllocMap()
    7285 {
    7286  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    7287 
    7288  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    7289  {
    7290  ++m_MapCount;
    7291  }
    7292  else
    7293  {
    7294  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
    7295  }
    7296 }
    7297 
    7298 void VmaAllocation_T::BlockAllocUnmap()
    7299 {
    7300  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    7301 
    7302  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    7303  {
    7304  --m_MapCount;
    7305  }
    7306  else
    7307  {
    7308  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
    7309  }
    7310 }
    7311 
    7312 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
    7313 {
    7314  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    7315 
    7316  if(m_MapCount != 0)
    7317  {
    7318  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    7319  {
    7320  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
    7321  *ppData = m_DedicatedAllocation.m_pMappedData;
    7322  ++m_MapCount;
    7323  return VK_SUCCESS;
    7324  }
    7325  else
    7326  {
    7327  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
    7328  return VK_ERROR_MEMORY_MAP_FAILED;
    7329  }
    7330  }
    7331  else
    7332  {
    7333  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    7334  hAllocator->m_hDevice,
    7335  m_DedicatedAllocation.m_hMemory,
    7336  0, // offset
    7337  VK_WHOLE_SIZE,
    7338  0, // flags
    7339  ppData);
    7340  if(result == VK_SUCCESS)
    7341  {
    7342  m_DedicatedAllocation.m_pMappedData = *ppData;
    7343  m_MapCount = 1;
    7344  }
    7345  return result;
    7346  }
    7347 }
    7348 
    7349 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
    7350 {
    7351  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    7352 
    7353  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    7354  {
    7355  --m_MapCount;
    7356  if(m_MapCount == 0)
    7357  {
    7358  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
    7359  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
    7360  hAllocator->m_hDevice,
    7361  m_DedicatedAllocation.m_hMemory);
    7362  }
    7363  }
    7364  else
    7365  {
    7366  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
    7367  }
    7368 }
    7369 
    7370 #if VMA_STATS_STRING_ENABLED
    7371 
    7372 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
    7373 {
    7374  json.BeginObject();
    7375 
    7376  json.WriteString("Blocks");
    7377  json.WriteNumber(stat.blockCount);
    7378 
    7379  json.WriteString("Allocations");
    7380  json.WriteNumber(stat.allocationCount);
    7381 
    7382  json.WriteString("UnusedRanges");
    7383  json.WriteNumber(stat.unusedRangeCount);
    7384 
    7385  json.WriteString("UsedBytes");
    7386  json.WriteNumber(stat.usedBytes);
    7387 
    7388  json.WriteString("UnusedBytes");
    7389  json.WriteNumber(stat.unusedBytes);
    7390 
    7391  if(stat.allocationCount > 1)
    7392  {
    7393  json.WriteString("AllocationSize");
    7394  json.BeginObject(true);
    7395  json.WriteString("Min");
    7396  json.WriteNumber(stat.allocationSizeMin);
    7397  json.WriteString("Avg");
    7398  json.WriteNumber(stat.allocationSizeAvg);
    7399  json.WriteString("Max");
    7400  json.WriteNumber(stat.allocationSizeMax);
    7401  json.EndObject();
    7402  }
    7403 
    7404  if(stat.unusedRangeCount > 1)
    7405  {
    7406  json.WriteString("UnusedRangeSize");
    7407  json.BeginObject(true);
    7408  json.WriteString("Min");
    7409  json.WriteNumber(stat.unusedRangeSizeMin);
    7410  json.WriteString("Avg");
    7411  json.WriteNumber(stat.unusedRangeSizeAvg);
    7412  json.WriteString("Max");
    7413  json.WriteNumber(stat.unusedRangeSizeMax);
    7414  json.EndObject();
    7415  }
    7416 
    7417  json.EndObject();
    7418 }
    7419 
    7420 #endif // #if VMA_STATS_STRING_ENABLED
    7421 
    7422 struct VmaSuballocationItemSizeLess
    7423 {
    7424  bool operator()(
    7425  const VmaSuballocationList::iterator lhs,
    7426  const VmaSuballocationList::iterator rhs) const
    7427  {
    7428  return lhs->size < rhs->size;
    7429  }
    7430  bool operator()(
    7431  const VmaSuballocationList::iterator lhs,
    7432  VkDeviceSize rhsSize) const
    7433  {
    7434  return lhs->size < rhsSize;
    7435  }
    7436 };
    7437 
    7438 
    7440 // class VmaBlockMetadata
    7441 
    7442 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
    7443  m_Size(0),
    7444  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
    7445 {
    7446 }
    7447 
    7448 #if VMA_STATS_STRING_ENABLED
    7449 
    7450 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
    7451  VkDeviceSize unusedBytes,
    7452  size_t allocationCount,
    7453  size_t unusedRangeCount) const
    7454 {
    7455  json.BeginObject();
    7456 
    7457  json.WriteString("TotalBytes");
    7458  json.WriteNumber(GetSize());
    7459 
    7460  json.WriteString("UnusedBytes");
    7461  json.WriteNumber(unusedBytes);
    7462 
    7463  json.WriteString("Allocations");
    7464  json.WriteNumber((uint64_t)allocationCount);
    7465 
    7466  json.WriteString("UnusedRanges");
    7467  json.WriteNumber((uint64_t)unusedRangeCount);
    7468 
    7469  json.WriteString("Suballocations");
    7470  json.BeginArray();
    7471 }
    7472 
    7473 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    7474  VkDeviceSize offset,
    7475  VmaAllocation hAllocation) const
    7476 {
    7477  json.BeginObject(true);
    7478 
    7479  json.WriteString("Offset");
    7480  json.WriteNumber(offset);
    7481 
    7482  hAllocation->PrintParameters(json);
    7483 
    7484  json.EndObject();
    7485 }
    7486 
    7487 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    7488  VkDeviceSize offset,
    7489  VkDeviceSize size) const
    7490 {
    7491  json.BeginObject(true);
    7492 
    7493  json.WriteString("Offset");
    7494  json.WriteNumber(offset);
    7495 
    7496  json.WriteString("Type");
    7497  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
    7498 
    7499  json.WriteString("Size");
    7500  json.WriteNumber(size);
    7501 
    7502  json.EndObject();
    7503 }
    7504 
    7505 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
    7506 {
    7507  json.EndArray();
    7508  json.EndObject();
    7509 }
    7510 
    7511 #endif // #if VMA_STATS_STRING_ENABLED
    7512 
    7514 // class VmaBlockMetadata_Generic
    7515 
    7516 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
    7517  VmaBlockMetadata(hAllocator),
    7518  m_FreeCount(0),
    7519  m_SumFreeSize(0),
    7520  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7521  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
    7522 {
    7523 }
    7524 
    7525 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
    7526 {
    7527 }
    7528 
    7529 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
    7530 {
    7531  VmaBlockMetadata::Init(size);
    7532 
    7533  m_FreeCount = 1;
    7534  m_SumFreeSize = size;
    7535 
    7536  VmaSuballocation suballoc = {};
    7537  suballoc.offset = 0;
    7538  suballoc.size = size;
    7539  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7540  suballoc.hAllocation = VK_NULL_HANDLE;
    7541 
    7542  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    7543  m_Suballocations.push_back(suballoc);
    7544  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
    7545  --suballocItem;
    7546  m_FreeSuballocationsBySize.push_back(suballocItem);
    7547 }
    7548 
    7549 bool VmaBlockMetadata_Generic::Validate() const
    7550 {
    7551  VMA_VALIDATE(!m_Suballocations.empty());
    7552 
    7553  // Expected offset of new suballocation as calculated from previous ones.
    7554  VkDeviceSize calculatedOffset = 0;
    7555  // Expected number of free suballocations as calculated from traversing their list.
    7556  uint32_t calculatedFreeCount = 0;
    7557  // Expected sum size of free suballocations as calculated from traversing their list.
    7558  VkDeviceSize calculatedSumFreeSize = 0;
    7559  // Expected number of free suballocations that should be registered in
    7560  // m_FreeSuballocationsBySize calculated from traversing their list.
    7561  size_t freeSuballocationsToRegister = 0;
    7562  // True if previous visited suballocation was free.
    7563  bool prevFree = false;
    7564 
    7565  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    7566  suballocItem != m_Suballocations.cend();
    7567  ++suballocItem)
    7568  {
    7569  const VmaSuballocation& subAlloc = *suballocItem;
    7570 
    7571  // Actual offset of this suballocation doesn't match expected one.
    7572  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
    7573 
    7574  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7575  // Two adjacent free suballocations are invalid. They should be merged.
    7576  VMA_VALIDATE(!prevFree || !currFree);
    7577 
    7578  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
    7579 
    7580  if(currFree)
    7581  {
    7582  calculatedSumFreeSize += subAlloc.size;
    7583  ++calculatedFreeCount;
    7584  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7585  {
    7586  ++freeSuballocationsToRegister;
    7587  }
    7588 
    7589  // Margin required between allocations - every free space must be at least that large.
    7590  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
    7591  }
    7592  else
    7593  {
    7594  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
    7595  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
    7596 
    7597  // Margin required between allocations - previous allocation must be free.
    7598  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
    7599  }
    7600 
    7601  calculatedOffset += subAlloc.size;
    7602  prevFree = currFree;
    7603  }
    7604 
    7605  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
    7606  // match expected one.
    7607  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
    7608 
    7609  VkDeviceSize lastSize = 0;
    7610  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
    7611  {
    7612  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
    7613 
    7614  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
    7615  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    7616  // They must be sorted by size ascending.
    7617  VMA_VALIDATE(suballocItem->size >= lastSize);
    7618 
    7619  lastSize = suballocItem->size;
    7620  }
    7621 
    7622  // Check if totals match calculacted values.
    7623  VMA_VALIDATE(ValidateFreeSuballocationList());
    7624  VMA_VALIDATE(calculatedOffset == GetSize());
    7625  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
    7626  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
    7627 
    7628  return true;
    7629 }
    7630 
    7631 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
    7632 {
    7633  if(!m_FreeSuballocationsBySize.empty())
    7634  {
    7635  return m_FreeSuballocationsBySize.back()->size;
    7636  }
    7637  else
    7638  {
    7639  return 0;
    7640  }
    7641 }
    7642 
    7643 bool VmaBlockMetadata_Generic::IsEmpty() const
    7644 {
    7645  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
    7646 }
    7647 
    7648 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    7649 {
    7650  outInfo.blockCount = 1;
    7651 
    7652  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    7653  outInfo.allocationCount = rangeCount - m_FreeCount;
    7654  outInfo.unusedRangeCount = m_FreeCount;
    7655 
    7656  outInfo.unusedBytes = m_SumFreeSize;
    7657  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
    7658 
    7659  outInfo.allocationSizeMin = UINT64_MAX;
    7660  outInfo.allocationSizeMax = 0;
    7661  outInfo.unusedRangeSizeMin = UINT64_MAX;
    7662  outInfo.unusedRangeSizeMax = 0;
    7663 
    7664  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    7665  suballocItem != m_Suballocations.cend();
    7666  ++suballocItem)
    7667  {
    7668  const VmaSuballocation& suballoc = *suballocItem;
    7669  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    7670  {
    7671  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    7672  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
    7673  }
    7674  else
    7675  {
    7676  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
    7677  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
    7678  }
    7679  }
    7680 }
    7681 
    7682 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
    7683 {
    7684  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    7685 
    7686  inoutStats.size += GetSize();
    7687  inoutStats.unusedSize += m_SumFreeSize;
    7688  inoutStats.allocationCount += rangeCount - m_FreeCount;
    7689  inoutStats.unusedRangeCount += m_FreeCount;
    7690  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    7691 }
    7692 
    7693 #if VMA_STATS_STRING_ENABLED
    7694 
    7695 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
    7696 {
    7697  PrintDetailedMap_Begin(json,
    7698  m_SumFreeSize, // unusedBytes
    7699  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
    7700  m_FreeCount); // unusedRangeCount
    7701 
    7702  size_t i = 0;
    7703  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    7704  suballocItem != m_Suballocations.cend();
    7705  ++suballocItem, ++i)
    7706  {
    7707  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7708  {
    7709  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
    7710  }
    7711  else
    7712  {
    7713  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
    7714  }
    7715  }
    7716 
    7717  PrintDetailedMap_End(json);
    7718 }
    7719 
    7720 #endif // #if VMA_STATS_STRING_ENABLED
    7721 
    7722 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
    7723  uint32_t currentFrameIndex,
    7724  uint32_t frameInUseCount,
    7725  VkDeviceSize bufferImageGranularity,
    7726  VkDeviceSize allocSize,
    7727  VkDeviceSize allocAlignment,
    7728  bool upperAddress,
    7729  VmaSuballocationType allocType,
    7730  bool canMakeOtherLost,
    7731  uint32_t strategy,
    7732  VmaAllocationRequest* pAllocationRequest)
    7733 {
    7734  VMA_ASSERT(allocSize > 0);
    7735  VMA_ASSERT(!upperAddress);
    7736  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    7737  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    7738  VMA_HEAVY_ASSERT(Validate());
    7739 
    7740  // There is not enough total free space in this block to fullfill the request: Early return.
    7741  if(canMakeOtherLost == false &&
    7742  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
    7743  {
    7744  return false;
    7745  }
    7746 
    7747  // New algorithm, efficiently searching freeSuballocationsBySize.
    7748  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
    7749  if(freeSuballocCount > 0)
    7750  {
    7752  {
    7753  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
    7754  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    7755  m_FreeSuballocationsBySize.data(),
    7756  m_FreeSuballocationsBySize.data() + freeSuballocCount,
    7757  allocSize + 2 * VMA_DEBUG_MARGIN,
    7758  VmaSuballocationItemSizeLess());
    7759  size_t index = it - m_FreeSuballocationsBySize.data();
    7760  for(; index < freeSuballocCount; ++index)
    7761  {
    7762  if(CheckAllocation(
    7763  currentFrameIndex,
    7764  frameInUseCount,
    7765  bufferImageGranularity,
    7766  allocSize,
    7767  allocAlignment,
    7768  allocType,
    7769  m_FreeSuballocationsBySize[index],
    7770  false, // canMakeOtherLost
    7771  &pAllocationRequest->offset,
    7772  &pAllocationRequest->itemsToMakeLostCount,
    7773  &pAllocationRequest->sumFreeSize,
    7774  &pAllocationRequest->sumItemSize))
    7775  {
    7776  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    7777  return true;
    7778  }
    7779  }
    7780  }
    7781  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
    7782  {
    7783  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7784  it != m_Suballocations.end();
    7785  ++it)
    7786  {
    7787  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
    7788  currentFrameIndex,
    7789  frameInUseCount,
    7790  bufferImageGranularity,
    7791  allocSize,
    7792  allocAlignment,
    7793  allocType,
    7794  it,
    7795  false, // canMakeOtherLost
    7796  &pAllocationRequest->offset,
    7797  &pAllocationRequest->itemsToMakeLostCount,
    7798  &pAllocationRequest->sumFreeSize,
    7799  &pAllocationRequest->sumItemSize))
    7800  {
    7801  pAllocationRequest->item = it;
    7802  return true;
    7803  }
    7804  }
    7805  }
    7806  else // WORST_FIT, FIRST_FIT
    7807  {
    7808  // Search staring from biggest suballocations.
    7809  for(size_t index = freeSuballocCount; index--; )
    7810  {
    7811  if(CheckAllocation(
    7812  currentFrameIndex,
    7813  frameInUseCount,
    7814  bufferImageGranularity,
    7815  allocSize,
    7816  allocAlignment,
    7817  allocType,
    7818  m_FreeSuballocationsBySize[index],
    7819  false, // canMakeOtherLost
    7820  &pAllocationRequest->offset,
    7821  &pAllocationRequest->itemsToMakeLostCount,
    7822  &pAllocationRequest->sumFreeSize,
    7823  &pAllocationRequest->sumItemSize))
    7824  {
    7825  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    7826  return true;
    7827  }
    7828  }
    7829  }
    7830  }
    7831 
    7832  if(canMakeOtherLost)
    7833  {
    7834  // Brute-force algorithm. TODO: Come up with something better.
    7835 
    7836  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
    7837  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
    7838 
    7839  VmaAllocationRequest tmpAllocRequest = {};
    7840  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
    7841  suballocIt != m_Suballocations.end();
    7842  ++suballocIt)
    7843  {
    7844  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
    7845  suballocIt->hAllocation->CanBecomeLost())
    7846  {
    7847  if(CheckAllocation(
    7848  currentFrameIndex,
    7849  frameInUseCount,
    7850  bufferImageGranularity,
    7851  allocSize,
    7852  allocAlignment,
    7853  allocType,
    7854  suballocIt,
    7855  canMakeOtherLost,
    7856  &tmpAllocRequest.offset,
    7857  &tmpAllocRequest.itemsToMakeLostCount,
    7858  &tmpAllocRequest.sumFreeSize,
    7859  &tmpAllocRequest.sumItemSize))
    7860  {
    7861  tmpAllocRequest.item = suballocIt;
    7862 
    7863  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
    7865  {
    7866  *pAllocationRequest = tmpAllocRequest;
    7867  }
    7868  }
    7869  }
    7870  }
    7871 
    7872  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
    7873  {
    7874  return true;
    7875  }
    7876  }
    7877 
    7878  return false;
    7879 }
    7880 
    7881 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
    7882  uint32_t currentFrameIndex,
    7883  uint32_t frameInUseCount,
    7884  VmaAllocationRequest* pAllocationRequest)
    7885 {
    7886  while(pAllocationRequest->itemsToMakeLostCount > 0)
    7887  {
    7888  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
    7889  {
    7890  ++pAllocationRequest->item;
    7891  }
    7892  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7893  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
    7894  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
    7895  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7896  {
    7897  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
    7898  --pAllocationRequest->itemsToMakeLostCount;
    7899  }
    7900  else
    7901  {
    7902  return false;
    7903  }
    7904  }
    7905 
    7906  VMA_HEAVY_ASSERT(Validate());
    7907  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7908  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7909 
    7910  return true;
    7911 }
    7912 
    7913 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    7914 {
    7915  uint32_t lostAllocationCount = 0;
    7916  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7917  it != m_Suballocations.end();
    7918  ++it)
    7919  {
    7920  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
    7921  it->hAllocation->CanBecomeLost() &&
    7922  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7923  {
    7924  it = FreeSuballocation(it);
    7925  ++lostAllocationCount;
    7926  }
    7927  }
    7928  return lostAllocationCount;
    7929 }
    7930 
    7931 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
    7932 {
    7933  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7934  it != m_Suballocations.end();
    7935  ++it)
    7936  {
    7937  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    7938  {
    7939  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
    7940  {
    7941  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    7942  return VK_ERROR_VALIDATION_FAILED_EXT;
    7943  }
    7944  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
    7945  {
    7946  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    7947  return VK_ERROR_VALIDATION_FAILED_EXT;
    7948  }
    7949  }
    7950  }
    7951 
    7952  return VK_SUCCESS;
    7953 }
    7954 
    7955 void VmaBlockMetadata_Generic::Alloc(
    7956  const VmaAllocationRequest& request,
    7957  VmaSuballocationType type,
    7958  VkDeviceSize allocSize,
    7959  bool upperAddress,
    7960  VmaAllocation hAllocation)
    7961 {
    7962  VMA_ASSERT(!upperAddress);
    7963  VMA_ASSERT(request.item != m_Suballocations.end());
    7964  VmaSuballocation& suballoc = *request.item;
    7965  // Given suballocation is a free block.
    7966  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7967  // Given offset is inside this suballocation.
    7968  VMA_ASSERT(request.offset >= suballoc.offset);
    7969  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
    7970  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
    7971  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
    7972 
    7973  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
    7974  // it to become used.
    7975  UnregisterFreeSuballocation(request.item);
    7976 
    7977  suballoc.offset = request.offset;
    7978  suballoc.size = allocSize;
    7979  suballoc.type = type;
    7980  suballoc.hAllocation = hAllocation;
    7981 
    7982  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
    7983  if(paddingEnd)
    7984  {
    7985  VmaSuballocation paddingSuballoc = {};
    7986  paddingSuballoc.offset = request.offset + allocSize;
    7987  paddingSuballoc.size = paddingEnd;
    7988  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7989  VmaSuballocationList::iterator next = request.item;
    7990  ++next;
    7991  const VmaSuballocationList::iterator paddingEndItem =
    7992  m_Suballocations.insert(next, paddingSuballoc);
    7993  RegisterFreeSuballocation(paddingEndItem);
    7994  }
    7995 
    7996  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
    7997  if(paddingBegin)
    7998  {
    7999  VmaSuballocation paddingSuballoc = {};
    8000  paddingSuballoc.offset = request.offset - paddingBegin;
    8001  paddingSuballoc.size = paddingBegin;
    8002  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8003  const VmaSuballocationList::iterator paddingBeginItem =
    8004  m_Suballocations.insert(request.item, paddingSuballoc);
    8005  RegisterFreeSuballocation(paddingBeginItem);
    8006  }
    8007 
    8008  // Update totals.
    8009  m_FreeCount = m_FreeCount - 1;
    8010  if(paddingBegin > 0)
    8011  {
    8012  ++m_FreeCount;
    8013  }
    8014  if(paddingEnd > 0)
    8015  {
    8016  ++m_FreeCount;
    8017  }
    8018  m_SumFreeSize -= allocSize;
    8019 }
    8020 
    8021 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
    8022 {
    8023  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    8024  suballocItem != m_Suballocations.end();
    8025  ++suballocItem)
    8026  {
    8027  VmaSuballocation& suballoc = *suballocItem;
    8028  if(suballoc.hAllocation == allocation)
    8029  {
    8030  FreeSuballocation(suballocItem);
    8031  VMA_HEAVY_ASSERT(Validate());
    8032  return;
    8033  }
    8034  }
    8035  VMA_ASSERT(0 && "Not found!");
    8036 }
    8037 
    8038 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
    8039 {
    8040  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    8041  suballocItem != m_Suballocations.end();
    8042  ++suballocItem)
    8043  {
    8044  VmaSuballocation& suballoc = *suballocItem;
    8045  if(suballoc.offset == offset)
    8046  {
    8047  FreeSuballocation(suballocItem);
    8048  return;
    8049  }
    8050  }
    8051  VMA_ASSERT(0 && "Not found!");
    8052 }
    8053 
    8054 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
    8055 {
    8056  typedef VmaSuballocationList::iterator iter_type;
    8057  for(iter_type suballocItem = m_Suballocations.begin();
    8058  suballocItem != m_Suballocations.end();
    8059  ++suballocItem)
    8060  {
    8061  VmaSuballocation& suballoc = *suballocItem;
    8062  if(suballoc.hAllocation == alloc)
    8063  {
    8064  iter_type nextItem = suballocItem;
    8065  ++nextItem;
    8066 
    8067  // Should have been ensured on higher level.
    8068  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
    8069 
    8070  // Shrinking.
    8071  if(newSize < alloc->GetSize())
    8072  {
    8073  const VkDeviceSize sizeDiff = suballoc.size - newSize;
    8074 
    8075  // There is next item.
    8076  if(nextItem != m_Suballocations.end())
    8077  {
    8078  // Next item is free.
    8079  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8080  {
    8081  // Grow this next item backward.
    8082  UnregisterFreeSuballocation(nextItem);
    8083  nextItem->offset -= sizeDiff;
    8084  nextItem->size += sizeDiff;
    8085  RegisterFreeSuballocation(nextItem);
    8086  }
    8087  // Next item is not free.
    8088  else
    8089  {
    8090  // Create free item after current one.
    8091  VmaSuballocation newFreeSuballoc;
    8092  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
    8093  newFreeSuballoc.offset = suballoc.offset + newSize;
    8094  newFreeSuballoc.size = sizeDiff;
    8095  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8096  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
    8097  RegisterFreeSuballocation(newFreeSuballocIt);
    8098 
    8099  ++m_FreeCount;
    8100  }
    8101  }
    8102  // This is the last item.
    8103  else
    8104  {
    8105  // Create free item at the end.
    8106  VmaSuballocation newFreeSuballoc;
    8107  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
    8108  newFreeSuballoc.offset = suballoc.offset + newSize;
    8109  newFreeSuballoc.size = sizeDiff;
    8110  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8111  m_Suballocations.push_back(newFreeSuballoc);
    8112 
    8113  iter_type newFreeSuballocIt = m_Suballocations.end();
    8114  RegisterFreeSuballocation(--newFreeSuballocIt);
    8115 
    8116  ++m_FreeCount;
    8117  }
    8118 
    8119  suballoc.size = newSize;
    8120  m_SumFreeSize += sizeDiff;
    8121  }
    8122  // Growing.
    8123  else
    8124  {
    8125  const VkDeviceSize sizeDiff = newSize - suballoc.size;
    8126 
    8127  // There is next item.
    8128  if(nextItem != m_Suballocations.end())
    8129  {
    8130  // Next item is free.
    8131  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8132  {
    8133  // There is not enough free space, including margin.
    8134  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
    8135  {
    8136  return false;
    8137  }
    8138 
    8139  // There is more free space than required.
    8140  if(nextItem->size > sizeDiff)
    8141  {
    8142  // Move and shrink this next item.
    8143  UnregisterFreeSuballocation(nextItem);
    8144  nextItem->offset += sizeDiff;
    8145  nextItem->size -= sizeDiff;
    8146  RegisterFreeSuballocation(nextItem);
    8147  }
    8148  // There is exactly the amount of free space required.
    8149  else
    8150  {
    8151  // Remove this next free item.
    8152  UnregisterFreeSuballocation(nextItem);
    8153  m_Suballocations.erase(nextItem);
    8154  --m_FreeCount;
    8155  }
    8156  }
    8157  // Next item is not free - there is no space to grow.
    8158  else
    8159  {
    8160  return false;
    8161  }
    8162  }
    8163  // This is the last item - there is no space to grow.
    8164  else
    8165  {
    8166  return false;
    8167  }
    8168 
    8169  suballoc.size = newSize;
    8170  m_SumFreeSize -= sizeDiff;
    8171  }
    8172 
    8173  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
    8174  return true;
    8175  }
    8176  }
    8177  VMA_ASSERT(0 && "Not found!");
    8178  return false;
    8179 }
    8180 
    8181 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
    8182 {
    8183  VkDeviceSize lastSize = 0;
    8184  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
    8185  {
    8186  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
    8187 
    8188  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
    8189  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    8190  VMA_VALIDATE(it->size >= lastSize);
    8191  lastSize = it->size;
    8192  }
    8193  return true;
    8194 }
    8195 
    8196 bool VmaBlockMetadata_Generic::CheckAllocation(
    8197  uint32_t currentFrameIndex,
    8198  uint32_t frameInUseCount,
    8199  VkDeviceSize bufferImageGranularity,
    8200  VkDeviceSize allocSize,
    8201  VkDeviceSize allocAlignment,
    8202  VmaSuballocationType allocType,
    8203  VmaSuballocationList::const_iterator suballocItem,
    8204  bool canMakeOtherLost,
    8205  VkDeviceSize* pOffset,
    8206  size_t* itemsToMakeLostCount,
    8207  VkDeviceSize* pSumFreeSize,
    8208  VkDeviceSize* pSumItemSize) const
    8209 {
    8210  VMA_ASSERT(allocSize > 0);
    8211  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    8212  VMA_ASSERT(suballocItem != m_Suballocations.cend());
    8213  VMA_ASSERT(pOffset != VMA_NULL);
    8214 
    8215  *itemsToMakeLostCount = 0;
    8216  *pSumFreeSize = 0;
    8217  *pSumItemSize = 0;
    8218 
    8219  if(canMakeOtherLost)
    8220  {
    8221  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8222  {
    8223  *pSumFreeSize = suballocItem->size;
    8224  }
    8225  else
    8226  {
    8227  if(suballocItem->hAllocation->CanBecomeLost() &&
    8228  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8229  {
    8230  ++*itemsToMakeLostCount;
    8231  *pSumItemSize = suballocItem->size;
    8232  }
    8233  else
    8234  {
    8235  return false;
    8236  }
    8237  }
    8238 
    8239  // Remaining size is too small for this request: Early return.
    8240  if(GetSize() - suballocItem->offset < allocSize)
    8241  {
    8242  return false;
    8243  }
    8244 
    8245  // Start from offset equal to beginning of this suballocation.
    8246  *pOffset = suballocItem->offset;
    8247 
    8248  // Apply VMA_DEBUG_MARGIN at the beginning.
    8249  if(VMA_DEBUG_MARGIN > 0)
    8250  {
    8251  *pOffset += VMA_DEBUG_MARGIN;
    8252  }
    8253 
    8254  // Apply alignment.
    8255  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    8256 
    8257  // Check previous suballocations for BufferImageGranularity conflicts.
    8258  // Make bigger alignment if necessary.
    8259  if(bufferImageGranularity > 1)
    8260  {
    8261  bool bufferImageGranularityConflict = false;
    8262  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    8263  while(prevSuballocItem != m_Suballocations.cbegin())
    8264  {
    8265  --prevSuballocItem;
    8266  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    8267  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    8268  {
    8269  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8270  {
    8271  bufferImageGranularityConflict = true;
    8272  break;
    8273  }
    8274  }
    8275  else
    8276  // Already on previous page.
    8277  break;
    8278  }
    8279  if(bufferImageGranularityConflict)
    8280  {
    8281  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    8282  }
    8283  }
    8284 
    8285  // Now that we have final *pOffset, check if we are past suballocItem.
    8286  // If yes, return false - this function should be called for another suballocItem as starting point.
    8287  if(*pOffset >= suballocItem->offset + suballocItem->size)
    8288  {
    8289  return false;
    8290  }
    8291 
    8292  // Calculate padding at the beginning based on current offset.
    8293  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
    8294 
    8295  // Calculate required margin at the end.
    8296  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    8297 
    8298  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
    8299  // Another early return check.
    8300  if(suballocItem->offset + totalSize > GetSize())
    8301  {
    8302  return false;
    8303  }
    8304 
    8305  // Advance lastSuballocItem until desired size is reached.
    8306  // Update itemsToMakeLostCount.
    8307  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
    8308  if(totalSize > suballocItem->size)
    8309  {
    8310  VkDeviceSize remainingSize = totalSize - suballocItem->size;
    8311  while(remainingSize > 0)
    8312  {
    8313  ++lastSuballocItem;
    8314  if(lastSuballocItem == m_Suballocations.cend())
    8315  {
    8316  return false;
    8317  }
    8318  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8319  {
    8320  *pSumFreeSize += lastSuballocItem->size;
    8321  }
    8322  else
    8323  {
    8324  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
    8325  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
    8326  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8327  {
    8328  ++*itemsToMakeLostCount;
    8329  *pSumItemSize += lastSuballocItem->size;
    8330  }
    8331  else
    8332  {
    8333  return false;
    8334  }
    8335  }
    8336  remainingSize = (lastSuballocItem->size < remainingSize) ?
    8337  remainingSize - lastSuballocItem->size : 0;
    8338  }
    8339  }
    8340 
    8341  // Check next suballocations for BufferImageGranularity conflicts.
    8342  // If conflict exists, we must mark more allocations lost or fail.
    8343  if(bufferImageGranularity > 1)
    8344  {
    8345  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
    8346  ++nextSuballocItem;
    8347  while(nextSuballocItem != m_Suballocations.cend())
    8348  {
    8349  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    8350  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8351  {
    8352  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8353  {
    8354  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
    8355  if(nextSuballoc.hAllocation->CanBecomeLost() &&
    8356  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8357  {
    8358  ++*itemsToMakeLostCount;
    8359  }
    8360  else
    8361  {
    8362  return false;
    8363  }
    8364  }
    8365  }
    8366  else
    8367  {
    8368  // Already on next page.
    8369  break;
    8370  }
    8371  ++nextSuballocItem;
    8372  }
    8373  }
    8374  }
    8375  else
    8376  {
    8377  const VmaSuballocation& suballoc = *suballocItem;
    8378  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8379 
    8380  *pSumFreeSize = suballoc.size;
    8381 
    8382  // Size of this suballocation is too small for this request: Early return.
    8383  if(suballoc.size < allocSize)
    8384  {
    8385  return false;
    8386  }
    8387 
    8388  // Start from offset equal to beginning of this suballocation.
    8389  *pOffset = suballoc.offset;
    8390 
    8391  // Apply VMA_DEBUG_MARGIN at the beginning.
    8392  if(VMA_DEBUG_MARGIN > 0)
    8393  {
    8394  *pOffset += VMA_DEBUG_MARGIN;
    8395  }
    8396 
    8397  // Apply alignment.
    8398  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    8399 
    8400  // Check previous suballocations for BufferImageGranularity conflicts.
    8401  // Make bigger alignment if necessary.
    8402  if(bufferImageGranularity > 1)
    8403  {
    8404  bool bufferImageGranularityConflict = false;
    8405  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    8406  while(prevSuballocItem != m_Suballocations.cbegin())
    8407  {
    8408  --prevSuballocItem;
    8409  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    8410  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    8411  {
    8412  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8413  {
    8414  bufferImageGranularityConflict = true;
    8415  break;
    8416  }
    8417  }
    8418  else
    8419  // Already on previous page.
    8420  break;
    8421  }
    8422  if(bufferImageGranularityConflict)
    8423  {
    8424  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    8425  }
    8426  }
    8427 
    8428  // Calculate padding at the beginning based on current offset.
    8429  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
    8430 
    8431  // Calculate required margin at the end.
    8432  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    8433 
    8434  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
    8435  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
    8436  {
    8437  return false;
    8438  }
    8439 
    8440  // Check next suballocations for BufferImageGranularity conflicts.
    8441  // If conflict exists, allocation cannot be made here.
    8442  if(bufferImageGranularity > 1)
    8443  {
    8444  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
    8445  ++nextSuballocItem;
    8446  while(nextSuballocItem != m_Suballocations.cend())
    8447  {
    8448  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    8449  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8450  {
    8451  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8452  {
    8453  return false;
    8454  }
    8455  }
    8456  else
    8457  {
    8458  // Already on next page.
    8459  break;
    8460  }
    8461  ++nextSuballocItem;
    8462  }
    8463  }
    8464  }
    8465 
    8466  // All tests passed: Success. pOffset is already filled.
    8467  return true;
    8468 }
    8469 
    8470 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
    8471 {
    8472  VMA_ASSERT(item != m_Suballocations.end());
    8473  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    8474 
    8475  VmaSuballocationList::iterator nextItem = item;
    8476  ++nextItem;
    8477  VMA_ASSERT(nextItem != m_Suballocations.end());
    8478  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    8479 
    8480  item->size += nextItem->size;
    8481  --m_FreeCount;
    8482  m_Suballocations.erase(nextItem);
    8483 }
    8484 
    8485 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
    8486 {
    8487  // Change this suballocation to be marked as free.
    8488  VmaSuballocation& suballoc = *suballocItem;
    8489  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8490  suballoc.hAllocation = VK_NULL_HANDLE;
    8491 
    8492  // Update totals.
    8493  ++m_FreeCount;
    8494  m_SumFreeSize += suballoc.size;
    8495 
    8496  // Merge with previous and/or next suballocation if it's also free.
    8497  bool mergeWithNext = false;
    8498  bool mergeWithPrev = false;
    8499 
    8500  VmaSuballocationList::iterator nextItem = suballocItem;
    8501  ++nextItem;
    8502  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
    8503  {
    8504  mergeWithNext = true;
    8505  }
    8506 
    8507  VmaSuballocationList::iterator prevItem = suballocItem;
    8508  if(suballocItem != m_Suballocations.begin())
    8509  {
    8510  --prevItem;
    8511  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8512  {
    8513  mergeWithPrev = true;
    8514  }
    8515  }
    8516 
    8517  if(mergeWithNext)
    8518  {
    8519  UnregisterFreeSuballocation(nextItem);
    8520  MergeFreeWithNext(suballocItem);
    8521  }
    8522 
    8523  if(mergeWithPrev)
    8524  {
    8525  UnregisterFreeSuballocation(prevItem);
    8526  MergeFreeWithNext(prevItem);
    8527  RegisterFreeSuballocation(prevItem);
    8528  return prevItem;
    8529  }
    8530  else
    8531  {
    8532  RegisterFreeSuballocation(suballocItem);
    8533  return suballocItem;
    8534  }
    8535 }
    8536 
    8537 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
    8538 {
    8539  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    8540  VMA_ASSERT(item->size > 0);
    8541 
    8542  // You may want to enable this validation at the beginning or at the end of
    8543  // this function, depending on what do you want to check.
    8544  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    8545 
    8546  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    8547  {
    8548  if(m_FreeSuballocationsBySize.empty())
    8549  {
    8550  m_FreeSuballocationsBySize.push_back(item);
    8551  }
    8552  else
    8553  {
    8554  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
    8555  }
    8556  }
    8557 
    8558  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    8559 }
    8560 
    8561 
    8562 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
    8563 {
    8564  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    8565  VMA_ASSERT(item->size > 0);
    8566 
    8567  // You may want to enable this validation at the beginning or at the end of
    8568  // this function, depending on what do you want to check.
    8569  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    8570 
    8571  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    8572  {
    8573  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    8574  m_FreeSuballocationsBySize.data(),
    8575  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
    8576  item,
    8577  VmaSuballocationItemSizeLess());
    8578  for(size_t index = it - m_FreeSuballocationsBySize.data();
    8579  index < m_FreeSuballocationsBySize.size();
    8580  ++index)
    8581  {
    8582  if(m_FreeSuballocationsBySize[index] == item)
    8583  {
    8584  VmaVectorRemove(m_FreeSuballocationsBySize, index);
    8585  return;
    8586  }
    8587  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
    8588  }
    8589  VMA_ASSERT(0 && "Not found.");
    8590  }
    8591 
    8592  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    8593 }
    8594 
    8595 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
    8596  VkDeviceSize bufferImageGranularity,
    8597  VmaSuballocationType& inOutPrevSuballocType) const
    8598 {
    8599  if(bufferImageGranularity == 1 || IsEmpty())
    8600  {
    8601  return false;
    8602  }
    8603 
    8604  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
    8605  bool typeConflictFound = false;
    8606  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
    8607  it != m_Suballocations.cend();
    8608  ++it)
    8609  {
    8610  const VmaSuballocationType suballocType = it->type;
    8611  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
    8612  {
    8613  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
    8614  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
    8615  {
    8616  typeConflictFound = true;
    8617  }
    8618  inOutPrevSuballocType = suballocType;
    8619  }
    8620  }
    8621 
    8622  return typeConflictFound || minAlignment >= bufferImageGranularity;
    8623 }
    8624 
    8626 // class VmaBlockMetadata_Linear
    8627 
    8628 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
    8629  VmaBlockMetadata(hAllocator),
    8630  m_SumFreeSize(0),
    8631  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    8632  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    8633  m_1stVectorIndex(0),
    8634  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
    8635  m_1stNullItemsBeginCount(0),
    8636  m_1stNullItemsMiddleCount(0),
    8637  m_2ndNullItemsCount(0)
    8638 {
    8639 }
    8640 
    8641 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
    8642 {
    8643 }
    8644 
    8645 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
    8646 {
    8647  VmaBlockMetadata::Init(size);
    8648  m_SumFreeSize = size;
    8649 }
    8650 
    8651 bool VmaBlockMetadata_Linear::Validate() const
    8652 {
    8653  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8654  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8655 
    8656  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
    8657  VMA_VALIDATE(!suballocations1st.empty() ||
    8658  suballocations2nd.empty() ||
    8659  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
    8660 
    8661  if(!suballocations1st.empty())
    8662  {
    8663  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
    8664  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
    8665  // Null item at the end should be just pop_back().
    8666  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
    8667  }
    8668  if(!suballocations2nd.empty())
    8669  {
    8670  // Null item at the end should be just pop_back().
    8671  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
    8672  }
    8673 
    8674  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
    8675  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
    8676 
    8677  VkDeviceSize sumUsedSize = 0;
    8678  const size_t suballoc1stCount = suballocations1st.size();
    8679  VkDeviceSize offset = VMA_DEBUG_MARGIN;
    8680 
    8681  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8682  {
    8683  const size_t suballoc2ndCount = suballocations2nd.size();
    8684  size_t nullItem2ndCount = 0;
    8685  for(size_t i = 0; i < suballoc2ndCount; ++i)
    8686  {
    8687  const VmaSuballocation& suballoc = suballocations2nd[i];
    8688  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8689 
    8690  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    8691  VMA_VALIDATE(suballoc.offset >= offset);
    8692 
    8693  if(!currFree)
    8694  {
    8695  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    8696  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    8697  sumUsedSize += suballoc.size;
    8698  }
    8699  else
    8700  {
    8701  ++nullItem2ndCount;
    8702  }
    8703 
    8704  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    8705  }
    8706 
    8707  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    8708  }
    8709 
    8710  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
    8711  {
    8712  const VmaSuballocation& suballoc = suballocations1st[i];
    8713  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
    8714  suballoc.hAllocation == VK_NULL_HANDLE);
    8715  }
    8716 
    8717  size_t nullItem1stCount = m_1stNullItemsBeginCount;
    8718 
    8719  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
    8720  {
    8721  const VmaSuballocation& suballoc = suballocations1st[i];
    8722  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8723 
    8724  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    8725  VMA_VALIDATE(suballoc.offset >= offset);
    8726  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
    8727 
    8728  if(!currFree)
    8729  {
    8730  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    8731  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    8732  sumUsedSize += suballoc.size;
    8733  }
    8734  else
    8735  {
    8736  ++nullItem1stCount;
    8737  }
    8738 
    8739  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    8740  }
    8741  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
    8742 
    8743  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8744  {
    8745  const size_t suballoc2ndCount = suballocations2nd.size();
    8746  size_t nullItem2ndCount = 0;
    8747  for(size_t i = suballoc2ndCount; i--; )
    8748  {
    8749  const VmaSuballocation& suballoc = suballocations2nd[i];
    8750  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8751 
    8752  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    8753  VMA_VALIDATE(suballoc.offset >= offset);
    8754 
    8755  if(!currFree)
    8756  {
    8757  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    8758  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    8759  sumUsedSize += suballoc.size;
    8760  }
    8761  else
    8762  {
    8763  ++nullItem2ndCount;
    8764  }
    8765 
    8766  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    8767  }
    8768 
    8769  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    8770  }
    8771 
    8772  VMA_VALIDATE(offset <= GetSize());
    8773  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
    8774 
    8775  return true;
    8776 }
    8777 
    8778 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
    8779 {
    8780  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
    8781  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
    8782 }
    8783 
    8784 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
    8785 {
    8786  const VkDeviceSize size = GetSize();
    8787 
    8788  /*
    8789  We don't consider gaps inside allocation vectors with freed allocations because
    8790  they are not suitable for reuse in linear allocator. We consider only space that
    8791  is available for new allocations.
    8792  */
    8793  if(IsEmpty())
    8794  {
    8795  return size;
    8796  }
    8797 
    8798  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8799 
    8800  switch(m_2ndVectorMode)
    8801  {
    8802  case SECOND_VECTOR_EMPTY:
    8803  /*
    8804  Available space is after end of 1st, as well as before beginning of 1st (which
    8805  whould make it a ring buffer).
    8806  */
    8807  {
    8808  const size_t suballocations1stCount = suballocations1st.size();
    8809  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
    8810  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    8811  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
    8812  return VMA_MAX(
    8813  firstSuballoc.offset,
    8814  size - (lastSuballoc.offset + lastSuballoc.size));
    8815  }
    8816  break;
    8817 
    8818  case SECOND_VECTOR_RING_BUFFER:
    8819  /*
    8820  Available space is only between end of 2nd and beginning of 1st.
    8821  */
    8822  {
    8823  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8824  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
    8825  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
    8826  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
    8827  }
    8828  break;
    8829 
    8830  case SECOND_VECTOR_DOUBLE_STACK:
    8831  /*
    8832  Available space is only between end of 1st and top of 2nd.
    8833  */
    8834  {
    8835  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8836  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
    8837  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
    8838  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
    8839  }
    8840  break;
    8841 
    8842  default:
    8843  VMA_ASSERT(0);
    8844  return 0;
    8845  }
    8846 }
    8847 
    8848 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    8849 {
    8850  const VkDeviceSize size = GetSize();
    8851  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8852  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8853  const size_t suballoc1stCount = suballocations1st.size();
    8854  const size_t suballoc2ndCount = suballocations2nd.size();
    8855 
    8856  outInfo.blockCount = 1;
    8857  outInfo.allocationCount = (uint32_t)GetAllocationCount();
    8858  outInfo.unusedRangeCount = 0;
    8859  outInfo.usedBytes = 0;
    8860  outInfo.allocationSizeMin = UINT64_MAX;
    8861  outInfo.allocationSizeMax = 0;
    8862  outInfo.unusedRangeSizeMin = UINT64_MAX;
    8863  outInfo.unusedRangeSizeMax = 0;
    8864 
    8865  VkDeviceSize lastOffset = 0;
    8866 
    8867  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8868  {
    8869  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8870  size_t nextAlloc2ndIndex = 0;
    8871  while(lastOffset < freeSpace2ndTo1stEnd)
    8872  {
    8873  // Find next non-null allocation or move nextAllocIndex to the end.
    8874  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8875  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8876  {
    8877  ++nextAlloc2ndIndex;
    8878  }
    8879 
    8880  // Found non-null allocation.
    8881  if(nextAlloc2ndIndex < suballoc2ndCount)
    8882  {
    8883  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8884 
    8885  // 1. Process free space before this allocation.
    8886  if(lastOffset < suballoc.offset)
    8887  {
    8888  // There is free space from lastOffset to suballoc.offset.
    8889  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8890  ++outInfo.unusedRangeCount;
    8891  outInfo.unusedBytes += unusedRangeSize;
    8892  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8893  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8894  }
    8895 
    8896  // 2. Process this allocation.
    8897  // There is allocation with suballoc.offset, suballoc.size.
    8898  outInfo.usedBytes += suballoc.size;
    8899  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8900  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8901 
    8902  // 3. Prepare for next iteration.
    8903  lastOffset = suballoc.offset + suballoc.size;
    8904  ++nextAlloc2ndIndex;
    8905  }
    8906  // We are at the end.
    8907  else
    8908  {
    8909  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8910  if(lastOffset < freeSpace2ndTo1stEnd)
    8911  {
    8912  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8913  ++outInfo.unusedRangeCount;
    8914  outInfo.unusedBytes += unusedRangeSize;
    8915  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8916  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8917  }
    8918 
    8919  // End of loop.
    8920  lastOffset = freeSpace2ndTo1stEnd;
    8921  }
    8922  }
    8923  }
    8924 
    8925  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8926  const VkDeviceSize freeSpace1stTo2ndEnd =
    8927  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8928  while(lastOffset < freeSpace1stTo2ndEnd)
    8929  {
    8930  // Find next non-null allocation or move nextAllocIndex to the end.
    8931  while(nextAlloc1stIndex < suballoc1stCount &&
    8932  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8933  {
    8934  ++nextAlloc1stIndex;
    8935  }
    8936 
    8937  // Found non-null allocation.
    8938  if(nextAlloc1stIndex < suballoc1stCount)
    8939  {
    8940  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8941 
    8942  // 1. Process free space before this allocation.
    8943  if(lastOffset < suballoc.offset)
    8944  {
    8945  // There is free space from lastOffset to suballoc.offset.
    8946  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8947  ++outInfo.unusedRangeCount;
    8948  outInfo.unusedBytes += unusedRangeSize;
    8949  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8950  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8951  }
    8952 
    8953  // 2. Process this allocation.
    8954  // There is allocation with suballoc.offset, suballoc.size.
    8955  outInfo.usedBytes += suballoc.size;
    8956  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8957  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8958 
    8959  // 3. Prepare for next iteration.
    8960  lastOffset = suballoc.offset + suballoc.size;
    8961  ++nextAlloc1stIndex;
    8962  }
    8963  // We are at the end.
    8964  else
    8965  {
    8966  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8967  if(lastOffset < freeSpace1stTo2ndEnd)
    8968  {
    8969  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8970  ++outInfo.unusedRangeCount;
    8971  outInfo.unusedBytes += unusedRangeSize;
    8972  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8973  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8974  }
    8975 
    8976  // End of loop.
    8977  lastOffset = freeSpace1stTo2ndEnd;
    8978  }
    8979  }
    8980 
    8981  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8982  {
    8983  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8984  while(lastOffset < size)
    8985  {
    8986  // Find next non-null allocation or move nextAllocIndex to the end.
    8987  while(nextAlloc2ndIndex != SIZE_MAX &&
    8988  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8989  {
    8990  --nextAlloc2ndIndex;
    8991  }
    8992 
    8993  // Found non-null allocation.
    8994  if(nextAlloc2ndIndex != SIZE_MAX)
    8995  {
    8996  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8997 
    8998  // 1. Process free space before this allocation.
    8999  if(lastOffset < suballoc.offset)
    9000  {
    9001  // There is free space from lastOffset to suballoc.offset.
    9002  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9003  ++outInfo.unusedRangeCount;
    9004  outInfo.unusedBytes += unusedRangeSize;
    9005  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9006  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9007  }
    9008 
    9009  // 2. Process this allocation.
    9010  // There is allocation with suballoc.offset, suballoc.size.
    9011  outInfo.usedBytes += suballoc.size;
    9012  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    9013  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    9014 
    9015  // 3. Prepare for next iteration.
    9016  lastOffset = suballoc.offset + suballoc.size;
    9017  --nextAlloc2ndIndex;
    9018  }
    9019  // We are at the end.
    9020  else
    9021  {
    9022  // There is free space from lastOffset to size.
    9023  if(lastOffset < size)
    9024  {
    9025  const VkDeviceSize unusedRangeSize = size - lastOffset;
    9026  ++outInfo.unusedRangeCount;
    9027  outInfo.unusedBytes += unusedRangeSize;
    9028  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9029  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9030  }
    9031 
    9032  // End of loop.
    9033  lastOffset = size;
    9034  }
    9035  }
    9036  }
    9037 
    9038  outInfo.unusedBytes = size - outInfo.usedBytes;
    9039 }
    9040 
    9041 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
    9042 {
    9043  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9044  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9045  const VkDeviceSize size = GetSize();
    9046  const size_t suballoc1stCount = suballocations1st.size();
    9047  const size_t suballoc2ndCount = suballocations2nd.size();
    9048 
    9049  inoutStats.size += size;
    9050 
    9051  VkDeviceSize lastOffset = 0;
    9052 
    9053  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9054  {
    9055  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    9056  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
    9057  while(lastOffset < freeSpace2ndTo1stEnd)
    9058  {
    9059  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9060  while(nextAlloc2ndIndex < suballoc2ndCount &&
    9061  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9062  {
    9063  ++nextAlloc2ndIndex;
    9064  }
    9065 
    9066  // Found non-null allocation.
    9067  if(nextAlloc2ndIndex < suballoc2ndCount)
    9068  {
    9069  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9070 
    9071  // 1. Process free space before this allocation.
    9072  if(lastOffset < suballoc.offset)
    9073  {
    9074  // There is free space from lastOffset to suballoc.offset.
    9075  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9076  inoutStats.unusedSize += unusedRangeSize;
    9077  ++inoutStats.unusedRangeCount;
    9078  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9079  }
    9080 
    9081  // 2. Process this allocation.
    9082  // There is allocation with suballoc.offset, suballoc.size.
    9083  ++inoutStats.allocationCount;
    9084 
    9085  // 3. Prepare for next iteration.
    9086  lastOffset = suballoc.offset + suballoc.size;
    9087  ++nextAlloc2ndIndex;
    9088  }
    9089  // We are at the end.
    9090  else
    9091  {
    9092  if(lastOffset < freeSpace2ndTo1stEnd)
    9093  {
    9094  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    9095  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    9096  inoutStats.unusedSize += unusedRangeSize;
    9097  ++inoutStats.unusedRangeCount;
    9098  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9099  }
    9100 
    9101  // End of loop.
    9102  lastOffset = freeSpace2ndTo1stEnd;
    9103  }
    9104  }
    9105  }
    9106 
    9107  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    9108  const VkDeviceSize freeSpace1stTo2ndEnd =
    9109  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    9110  while(lastOffset < freeSpace1stTo2ndEnd)
    9111  {
    9112  // Find next non-null allocation or move nextAllocIndex to the end.
    9113  while(nextAlloc1stIndex < suballoc1stCount &&
    9114  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    9115  {
    9116  ++nextAlloc1stIndex;
    9117  }
    9118 
    9119  // Found non-null allocation.
    9120  if(nextAlloc1stIndex < suballoc1stCount)
    9121  {
    9122  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    9123 
    9124  // 1. Process free space before this allocation.
    9125  if(lastOffset < suballoc.offset)
    9126  {
    9127  // There is free space from lastOffset to suballoc.offset.
    9128  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9129  inoutStats.unusedSize += unusedRangeSize;
    9130  ++inoutStats.unusedRangeCount;
    9131  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9132  }
    9133 
    9134  // 2. Process this allocation.
    9135  // There is allocation with suballoc.offset, suballoc.size.
    9136  ++inoutStats.allocationCount;
    9137 
    9138  // 3. Prepare for next iteration.
    9139  lastOffset = suballoc.offset + suballoc.size;
    9140  ++nextAlloc1stIndex;
    9141  }
    9142  // We are at the end.
    9143  else
    9144  {
    9145  if(lastOffset < freeSpace1stTo2ndEnd)
    9146  {
    9147  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    9148  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    9149  inoutStats.unusedSize += unusedRangeSize;
    9150  ++inoutStats.unusedRangeCount;
    9151  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9152  }
    9153 
    9154  // End of loop.
    9155  lastOffset = freeSpace1stTo2ndEnd;
    9156  }
    9157  }
    9158 
    9159  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9160  {
    9161  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    9162  while(lastOffset < size)
    9163  {
    9164  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9165  while(nextAlloc2ndIndex != SIZE_MAX &&
    9166  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9167  {
    9168  --nextAlloc2ndIndex;
    9169  }
    9170 
    9171  // Found non-null allocation.
    9172  if(nextAlloc2ndIndex != SIZE_MAX)
    9173  {
    9174  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9175 
    9176  // 1. Process free space before this allocation.
    9177  if(lastOffset < suballoc.offset)
    9178  {
    9179  // There is free space from lastOffset to suballoc.offset.
    9180  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9181  inoutStats.unusedSize += unusedRangeSize;
    9182  ++inoutStats.unusedRangeCount;
    9183  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9184  }
    9185 
    9186  // 2. Process this allocation.
    9187  // There is allocation with suballoc.offset, suballoc.size.
    9188  ++inoutStats.allocationCount;
    9189 
    9190  // 3. Prepare for next iteration.
    9191  lastOffset = suballoc.offset + suballoc.size;
    9192  --nextAlloc2ndIndex;
    9193  }
    9194  // We are at the end.
    9195  else
    9196  {
    9197  if(lastOffset < size)
    9198  {
    9199  // There is free space from lastOffset to size.
    9200  const VkDeviceSize unusedRangeSize = size - lastOffset;
    9201  inoutStats.unusedSize += unusedRangeSize;
    9202  ++inoutStats.unusedRangeCount;
    9203  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9204  }
    9205 
    9206  // End of loop.
    9207  lastOffset = size;
    9208  }
    9209  }
    9210  }
    9211 }
    9212 
    9213 #if VMA_STATS_STRING_ENABLED
    9214 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
    9215 {
    9216  const VkDeviceSize size = GetSize();
    9217  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9218  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9219  const size_t suballoc1stCount = suballocations1st.size();
    9220  const size_t suballoc2ndCount = suballocations2nd.size();
    9221 
    9222  // FIRST PASS
    9223 
    9224  size_t unusedRangeCount = 0;
    9225  VkDeviceSize usedBytes = 0;
    9226 
    9227  VkDeviceSize lastOffset = 0;
    9228 
    9229  size_t alloc2ndCount = 0;
    9230  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9231  {
    9232  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    9233  size_t nextAlloc2ndIndex = 0;
    9234  while(lastOffset < freeSpace2ndTo1stEnd)
    9235  {
    9236  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9237  while(nextAlloc2ndIndex < suballoc2ndCount &&
    9238  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9239  {
    9240  ++nextAlloc2ndIndex;
    9241  }
    9242 
    9243  // Found non-null allocation.
    9244  if(nextAlloc2ndIndex < suballoc2ndCount)
    9245  {
    9246  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9247 
    9248  // 1. Process free space before this allocation.
    9249  if(lastOffset < suballoc.offset)
    9250  {
    9251  // There is free space from lastOffset to suballoc.offset.
    9252  ++unusedRangeCount;
    9253  }
    9254 
    9255  // 2. Process this allocation.
    9256  // There is allocation with suballoc.offset, suballoc.size.
    9257  ++alloc2ndCount;
    9258  usedBytes += suballoc.size;
    9259 
    9260  // 3. Prepare for next iteration.
    9261  lastOffset = suballoc.offset + suballoc.size;
    9262  ++nextAlloc2ndIndex;
    9263  }
    9264  // We are at the end.
    9265  else
    9266  {
    9267  if(lastOffset < freeSpace2ndTo1stEnd)
    9268  {
    9269  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    9270  ++unusedRangeCount;
    9271  }
    9272 
    9273  // End of loop.
    9274  lastOffset = freeSpace2ndTo1stEnd;
    9275  }
    9276  }
    9277  }
    9278 
    9279  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    9280  size_t alloc1stCount = 0;
    9281  const VkDeviceSize freeSpace1stTo2ndEnd =
    9282  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    9283  while(lastOffset < freeSpace1stTo2ndEnd)
    9284  {
    9285  // Find next non-null allocation or move nextAllocIndex to the end.
    9286  while(nextAlloc1stIndex < suballoc1stCount &&
    9287  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    9288  {
    9289  ++nextAlloc1stIndex;
    9290  }
    9291 
    9292  // Found non-null allocation.
    9293  if(nextAlloc1stIndex < suballoc1stCount)
    9294  {
    9295  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    9296 
    9297  // 1. Process free space before this allocation.
    9298  if(lastOffset < suballoc.offset)
    9299  {
    9300  // There is free space from lastOffset to suballoc.offset.
    9301  ++unusedRangeCount;
    9302  }
    9303 
    9304  // 2. Process this allocation.
    9305  // There is allocation with suballoc.offset, suballoc.size.
    9306  ++alloc1stCount;
    9307  usedBytes += suballoc.size;
    9308 
    9309  // 3. Prepare for next iteration.
    9310  lastOffset = suballoc.offset + suballoc.size;
    9311  ++nextAlloc1stIndex;
    9312  }
    9313  // We are at the end.
    9314  else
    9315  {
    9316  if(lastOffset < size)
    9317  {
    9318  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    9319  ++unusedRangeCount;
    9320  }
    9321 
    9322  // End of loop.
    9323  lastOffset = freeSpace1stTo2ndEnd;
    9324  }
    9325  }
    9326 
    9327  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9328  {
    9329  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    9330  while(lastOffset < size)
    9331  {
    9332  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9333  while(nextAlloc2ndIndex != SIZE_MAX &&
    9334  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9335  {
    9336  --nextAlloc2ndIndex;
    9337  }
    9338 
    9339  // Found non-null allocation.
    9340  if(nextAlloc2ndIndex != SIZE_MAX)
    9341  {
    9342  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9343 
    9344  // 1. Process free space before this allocation.
    9345  if(lastOffset < suballoc.offset)
    9346  {
    9347  // There is free space from lastOffset to suballoc.offset.
    9348  ++unusedRangeCount;
    9349  }
    9350 
    9351  // 2. Process this allocation.
    9352  // There is allocation with suballoc.offset, suballoc.size.
    9353  ++alloc2ndCount;
    9354  usedBytes += suballoc.size;
    9355 
    9356  // 3. Prepare for next iteration.
    9357  lastOffset = suballoc.offset + suballoc.size;
    9358  --nextAlloc2ndIndex;
    9359  }
    9360  // We are at the end.
    9361  else
    9362  {
    9363  if(lastOffset < size)
    9364  {
    9365  // There is free space from lastOffset to size.
    9366  ++unusedRangeCount;
    9367  }
    9368 
    9369  // End of loop.
    9370  lastOffset = size;
    9371  }
    9372  }
    9373  }
    9374 
    9375  const VkDeviceSize unusedBytes = size - usedBytes;
    9376  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
    9377 
    9378  // SECOND PASS
    9379  lastOffset = 0;
    9380 
    9381  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9382  {
    9383  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    9384  size_t nextAlloc2ndIndex = 0;
    9385  while(lastOffset < freeSpace2ndTo1stEnd)
    9386  {
    9387  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9388  while(nextAlloc2ndIndex < suballoc2ndCount &&
    9389  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9390  {
    9391  ++nextAlloc2ndIndex;
    9392  }
    9393 
    9394  // Found non-null allocation.
    9395  if(nextAlloc2ndIndex < suballoc2ndCount)
    9396  {
    9397  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9398 
    9399  // 1. Process free space before this allocation.
    9400  if(lastOffset < suballoc.offset)
    9401  {
    9402  // There is free space from lastOffset to suballoc.offset.
    9403  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9404  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9405  }
    9406 
    9407  // 2. Process this allocation.
    9408  // There is allocation with suballoc.offset, suballoc.size.
    9409  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    9410 
    9411  // 3. Prepare for next iteration.
    9412  lastOffset = suballoc.offset + suballoc.size;
    9413  ++nextAlloc2ndIndex;
    9414  }
    9415  // We are at the end.
    9416  else
    9417  {
    9418  if(lastOffset < freeSpace2ndTo1stEnd)
    9419  {
    9420  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    9421  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    9422  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9423  }
    9424 
    9425  // End of loop.
    9426  lastOffset = freeSpace2ndTo1stEnd;
    9427  }
    9428  }
    9429  }
    9430 
    9431  nextAlloc1stIndex = m_1stNullItemsBeginCount;
    9432  while(lastOffset < freeSpace1stTo2ndEnd)
    9433  {
    9434  // Find next non-null allocation or move nextAllocIndex to the end.
    9435  while(nextAlloc1stIndex < suballoc1stCount &&
    9436  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    9437  {
    9438  ++nextAlloc1stIndex;
    9439  }
    9440 
    9441  // Found non-null allocation.
    9442  if(nextAlloc1stIndex < suballoc1stCount)
    9443  {
    9444  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    9445 
    9446  // 1. Process free space before this allocation.
    9447  if(lastOffset < suballoc.offset)
    9448  {
    9449  // There is free space from lastOffset to suballoc.offset.
    9450  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9451  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9452  }
    9453 
    9454  // 2. Process this allocation.
    9455  // There is allocation with suballoc.offset, suballoc.size.
    9456  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    9457 
    9458  // 3. Prepare for next iteration.
    9459  lastOffset = suballoc.offset + suballoc.size;
    9460  ++nextAlloc1stIndex;
    9461  }
    9462  // We are at the end.
    9463  else
    9464  {
    9465  if(lastOffset < freeSpace1stTo2ndEnd)
    9466  {
    9467  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    9468  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    9469  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9470  }
    9471 
    9472  // End of loop.
    9473  lastOffset = freeSpace1stTo2ndEnd;
    9474  }
    9475  }
    9476 
    9477  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9478  {
    9479  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    9480  while(lastOffset < size)
    9481  {
    9482  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9483  while(nextAlloc2ndIndex != SIZE_MAX &&
    9484  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9485  {
    9486  --nextAlloc2ndIndex;
    9487  }
    9488 
    9489  // Found non-null allocation.
    9490  if(nextAlloc2ndIndex != SIZE_MAX)
    9491  {
    9492  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9493 
    9494  // 1. Process free space before this allocation.
    9495  if(lastOffset < suballoc.offset)
    9496  {
    9497  // There is free space from lastOffset to suballoc.offset.
    9498  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9499  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9500  }
    9501 
    9502  // 2. Process this allocation.
    9503  // There is allocation with suballoc.offset, suballoc.size.
    9504  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    9505 
    9506  // 3. Prepare for next iteration.
    9507  lastOffset = suballoc.offset + suballoc.size;
    9508  --nextAlloc2ndIndex;
    9509  }
    9510  // We are at the end.
    9511  else
    9512  {
    9513  if(lastOffset < size)
    9514  {
    9515  // There is free space from lastOffset to size.
    9516  const VkDeviceSize unusedRangeSize = size - lastOffset;
    9517  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9518  }
    9519 
    9520  // End of loop.
    9521  lastOffset = size;
    9522  }
    9523  }
    9524  }
    9525 
    9526  PrintDetailedMap_End(json);
    9527 }
    9528 #endif // #if VMA_STATS_STRING_ENABLED
    9529 
    9530 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
    9531  uint32_t currentFrameIndex,
    9532  uint32_t frameInUseCount,
    9533  VkDeviceSize bufferImageGranularity,
    9534  VkDeviceSize allocSize,
    9535  VkDeviceSize allocAlignment,
    9536  bool upperAddress,
    9537  VmaSuballocationType allocType,
    9538  bool canMakeOtherLost,
    9539  uint32_t strategy,
    9540  VmaAllocationRequest* pAllocationRequest)
    9541 {
    9542  VMA_ASSERT(allocSize > 0);
    9543  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    9544  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    9545  VMA_HEAVY_ASSERT(Validate());
    9546 
    9547  const VkDeviceSize size = GetSize();
    9548  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9549  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9550 
    9551  if(upperAddress)
    9552  {
    9553  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9554  {
    9555  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
    9556  return false;
    9557  }
    9558 
    9559  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
    9560  if(allocSize > size)
    9561  {
    9562  return false;
    9563  }
    9564  VkDeviceSize resultBaseOffset = size - allocSize;
    9565  if(!suballocations2nd.empty())
    9566  {
    9567  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    9568  resultBaseOffset = lastSuballoc.offset - allocSize;
    9569  if(allocSize > lastSuballoc.offset)
    9570  {
    9571  return false;
    9572  }
    9573  }
    9574 
    9575  // Start from offset equal to end of free space.
    9576  VkDeviceSize resultOffset = resultBaseOffset;
    9577 
    9578  // Apply VMA_DEBUG_MARGIN at the end.
    9579  if(VMA_DEBUG_MARGIN > 0)
    9580  {
    9581  if(resultOffset < VMA_DEBUG_MARGIN)
    9582  {
    9583  return false;
    9584  }
    9585  resultOffset -= VMA_DEBUG_MARGIN;
    9586  }
    9587 
    9588  // Apply alignment.
    9589  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
    9590 
    9591  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
    9592  // Make bigger alignment if necessary.
    9593  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    9594  {
    9595  bool bufferImageGranularityConflict = false;
    9596  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    9597  {
    9598  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    9599  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    9600  {
    9601  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
    9602  {
    9603  bufferImageGranularityConflict = true;
    9604  break;
    9605  }
    9606  }
    9607  else
    9608  // Already on previous page.
    9609  break;
    9610  }
    9611  if(bufferImageGranularityConflict)
    9612  {
    9613  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
    9614  }
    9615  }
    9616 
    9617  // There is enough free space.
    9618  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
    9619  suballocations1st.back().offset + suballocations1st.back().size :
    9620  0;
    9621  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
    9622  {
    9623  // Check previous suballocations for BufferImageGranularity conflicts.
    9624  // If conflict exists, allocation cannot be made here.
    9625  if(bufferImageGranularity > 1)
    9626  {
    9627  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    9628  {
    9629  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    9630  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    9631  {
    9632  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
    9633  {
    9634  return false;
    9635  }
    9636  }
    9637  else
    9638  {
    9639  // Already on next page.
    9640  break;
    9641  }
    9642  }
    9643  }
    9644 
    9645  // All tests passed: Success.
    9646  pAllocationRequest->offset = resultOffset;
    9647  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
    9648  pAllocationRequest->sumItemSize = 0;
    9649  // pAllocationRequest->item unused.
    9650  pAllocationRequest->itemsToMakeLostCount = 0;
    9651  return true;
    9652  }
    9653  }
    9654  else // !upperAddress
    9655  {
    9656  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9657  {
    9658  // Try to allocate at the end of 1st vector.
    9659 
    9660  VkDeviceSize resultBaseOffset = 0;
    9661  if(!suballocations1st.empty())
    9662  {
    9663  const VmaSuballocation& lastSuballoc = suballocations1st.back();
    9664  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    9665  }
    9666 
    9667  // Start from offset equal to beginning of free space.
    9668  VkDeviceSize resultOffset = resultBaseOffset;
    9669 
    9670  // Apply VMA_DEBUG_MARGIN at the beginning.
    9671  if(VMA_DEBUG_MARGIN > 0)
    9672  {
    9673  resultOffset += VMA_DEBUG_MARGIN;
    9674  }
    9675 
    9676  // Apply alignment.
    9677  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    9678 
    9679  // Check previous suballocations for BufferImageGranularity conflicts.
    9680  // Make bigger alignment if necessary.
    9681  if(bufferImageGranularity > 1 && !suballocations1st.empty())
    9682  {
    9683  bool bufferImageGranularityConflict = false;
    9684  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    9685  {
    9686  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    9687  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    9688  {
    9689  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    9690  {
    9691  bufferImageGranularityConflict = true;
    9692  break;
    9693  }
    9694  }
    9695  else
    9696  // Already on previous page.
    9697  break;
    9698  }
    9699  if(bufferImageGranularityConflict)
    9700  {
    9701  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    9702  }
    9703  }
    9704 
    9705  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
    9706  suballocations2nd.back().offset : size;
    9707 
    9708  // There is enough free space at the end after alignment.
    9709  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
    9710  {
    9711  // Check next suballocations for BufferImageGranularity conflicts.
    9712  // If conflict exists, allocation cannot be made here.
    9713  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9714  {
    9715  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    9716  {
    9717  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    9718  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    9719  {
    9720  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    9721  {
    9722  return false;
    9723  }
    9724  }
    9725  else
    9726  {
    9727  // Already on previous page.
    9728  break;
    9729  }
    9730  }
    9731  }
    9732 
    9733  // All tests passed: Success.
    9734  pAllocationRequest->offset = resultOffset;
    9735  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
    9736  pAllocationRequest->sumItemSize = 0;
    9737  // pAllocationRequest->item unused.
    9738  pAllocationRequest->itemsToMakeLostCount = 0;
    9739  return true;
    9740  }
    9741  }
    9742 
    9743  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
    9744  // beginning of 1st vector as the end of free space.
    9745  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9746  {
    9747  VMA_ASSERT(!suballocations1st.empty());
    9748 
    9749  VkDeviceSize resultBaseOffset = 0;
    9750  if(!suballocations2nd.empty())
    9751  {
    9752  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    9753  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    9754  }
    9755 
    9756  // Start from offset equal to beginning of free space.
    9757  VkDeviceSize resultOffset = resultBaseOffset;
    9758 
    9759  // Apply VMA_DEBUG_MARGIN at the beginning.
    9760  if(VMA_DEBUG_MARGIN > 0)
    9761  {
    9762  resultOffset += VMA_DEBUG_MARGIN;
    9763  }
    9764 
    9765  // Apply alignment.
    9766  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    9767 
    9768  // Check previous suballocations for BufferImageGranularity conflicts.
    9769  // Make bigger alignment if necessary.
    9770  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    9771  {
    9772  bool bufferImageGranularityConflict = false;
    9773  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
    9774  {
    9775  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
    9776  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    9777  {
    9778  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    9779  {
    9780  bufferImageGranularityConflict = true;
    9781  break;
    9782  }
    9783  }
    9784  else
    9785  // Already on previous page.
    9786  break;
    9787  }
    9788  if(bufferImageGranularityConflict)
    9789  {
    9790  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    9791  }
    9792  }
    9793 
    9794  pAllocationRequest->itemsToMakeLostCount = 0;
    9795  pAllocationRequest->sumItemSize = 0;
    9796  size_t index1st = m_1stNullItemsBeginCount;
    9797 
    9798  if(canMakeOtherLost)
    9799  {
    9800  while(index1st < suballocations1st.size() &&
    9801  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
    9802  {
    9803  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
    9804  const VmaSuballocation& suballoc = suballocations1st[index1st];
    9805  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
    9806  {
    9807  // No problem.
    9808  }
    9809  else
    9810  {
    9811  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    9812  if(suballoc.hAllocation->CanBecomeLost() &&
    9813  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    9814  {
    9815  ++pAllocationRequest->itemsToMakeLostCount;
    9816  pAllocationRequest->sumItemSize += suballoc.size;
    9817  }
    9818  else
    9819  {
    9820  return false;
    9821  }
    9822  }
    9823  ++index1st;
    9824  }
    9825 
    9826  // Check next suballocations for BufferImageGranularity conflicts.
    9827  // If conflict exists, we must mark more allocations lost or fail.
    9828  if(bufferImageGranularity > 1)
    9829  {
    9830  while(index1st < suballocations1st.size())
    9831  {
    9832  const VmaSuballocation& suballoc = suballocations1st[index1st];
    9833  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
    9834  {
    9835  if(suballoc.hAllocation != VK_NULL_HANDLE)
    9836  {
    9837  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
    9838  if(suballoc.hAllocation->CanBecomeLost() &&
    9839  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    9840  {
    9841  ++pAllocationRequest->itemsToMakeLostCount;
    9842  pAllocationRequest->sumItemSize += suballoc.size;
    9843  }
    9844  else
    9845  {
    9846  return false;
    9847  }
    9848  }
    9849  }
    9850  else
    9851  {
    9852  // Already on next page.
    9853  break;
    9854  }
    9855  ++index1st;
    9856  }
    9857  }
    9858  }
    9859 
    9860  // There is enough free space at the end after alignment.
    9861  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
    9862  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
    9863  {
    9864  // Check next suballocations for BufferImageGranularity conflicts.
    9865  // If conflict exists, allocation cannot be made here.
    9866  if(bufferImageGranularity > 1)
    9867  {
    9868  for(size_t nextSuballocIndex = index1st;
    9869  nextSuballocIndex < suballocations1st.size();
    9870  nextSuballocIndex++)
    9871  {
    9872  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
    9873  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    9874  {
    9875  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    9876  {
    9877  return false;
    9878  }
    9879  }
    9880  else
    9881  {
    9882  // Already on next page.
    9883  break;
    9884  }
    9885  }
    9886  }
    9887 
    9888  // All tests passed: Success.
    9889  pAllocationRequest->offset = resultOffset;
    9890  pAllocationRequest->sumFreeSize =
    9891  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
    9892  - resultBaseOffset
    9893  - pAllocationRequest->sumItemSize;
    9894  // pAllocationRequest->item unused.
    9895  return true;
    9896  }
    9897  }
    9898  }
    9899 
    9900  return false;
    9901 }
    9902 
    9903 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
    9904  uint32_t currentFrameIndex,
    9905  uint32_t frameInUseCount,
    9906  VmaAllocationRequest* pAllocationRequest)
    9907 {
    9908  if(pAllocationRequest->itemsToMakeLostCount == 0)
    9909  {
    9910  return true;
    9911  }
    9912 
    9913  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
    9914 
    9915  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9916  size_t index1st = m_1stNullItemsBeginCount;
    9917  size_t madeLostCount = 0;
    9918  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
    9919  {
    9920  VMA_ASSERT(index1st < suballocations1st.size());
    9921  VmaSuballocation& suballoc = suballocations1st[index1st];
    9922  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9923  {
    9924  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    9925  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
    9926  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    9927  {
    9928  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9929  suballoc.hAllocation = VK_NULL_HANDLE;
    9930  m_SumFreeSize += suballoc.size;
    9931  ++m_1stNullItemsMiddleCount;
    9932  ++madeLostCount;
    9933  }
    9934  else
    9935  {
    9936  return false;
    9937  }
    9938  }
    9939  ++index1st;
    9940  }
    9941 
    9942  CleanupAfterFree();
    9943  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
    9944 
    9945  return true;
    9946 }
    9947 
    9948 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    9949 {
    9950  uint32_t lostAllocationCount = 0;
    9951 
    9952  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9953  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    9954  {
    9955  VmaSuballocation& suballoc = suballocations1st[i];
    9956  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    9957  suballoc.hAllocation->CanBecomeLost() &&
    9958  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    9959  {
    9960  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9961  suballoc.hAllocation = VK_NULL_HANDLE;
    9962  ++m_1stNullItemsMiddleCount;
    9963  m_SumFreeSize += suballoc.size;
    9964  ++lostAllocationCount;
    9965  }
    9966  }
    9967 
    9968  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9969  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    9970  {
    9971  VmaSuballocation& suballoc = suballocations2nd[i];
    9972  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    9973  suballoc.hAllocation->CanBecomeLost() &&
    9974  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    9975  {
    9976  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9977  suballoc.hAllocation = VK_NULL_HANDLE;
    9978  ++m_2ndNullItemsCount;
    9979  ++lostAllocationCount;
    9980  }
    9981  }
    9982 
    9983  if(lostAllocationCount)
    9984  {
    9985  CleanupAfterFree();
    9986  }
    9987 
    9988  return lostAllocationCount;
    9989 }
    9990 
    9991 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
    9992 {
    9993  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9994  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    9995  {
    9996  const VmaSuballocation& suballoc = suballocations1st[i];
    9997  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9998  {
    9999  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    10000  {
    10001  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    10002  return VK_ERROR_VALIDATION_FAILED_EXT;
    10003  }
    10004  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    10005  {
    10006  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    10007  return VK_ERROR_VALIDATION_FAILED_EXT;
    10008  }
    10009  }
    10010  }
    10011 
    10012  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10013  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    10014  {
    10015  const VmaSuballocation& suballoc = suballocations2nd[i];
    10016  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    10017  {
    10018  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    10019  {
    10020  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    10021  return VK_ERROR_VALIDATION_FAILED_EXT;
    10022  }
    10023  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    10024  {
    10025  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    10026  return VK_ERROR_VALIDATION_FAILED_EXT;
    10027  }
    10028  }
    10029  }
    10030 
    10031  return VK_SUCCESS;
    10032 }
    10033 
    10034 void VmaBlockMetadata_Linear::Alloc(
    10035  const VmaAllocationRequest& request,
    10036  VmaSuballocationType type,
    10037  VkDeviceSize allocSize,
    10038  bool upperAddress,
    10039  VmaAllocation hAllocation)
    10040 {
    10041  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
    10042 
    10043  if(upperAddress)
    10044  {
    10045  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
    10046  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
    10047  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10048  suballocations2nd.push_back(newSuballoc);
    10049  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
    10050  }
    10051  else
    10052  {
    10053  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10054 
    10055  // First allocation.
    10056  if(suballocations1st.empty())
    10057  {
    10058  suballocations1st.push_back(newSuballoc);
    10059  }
    10060  else
    10061  {
    10062  // New allocation at the end of 1st vector.
    10063  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
    10064  {
    10065  // Check if it fits before the end of the block.
    10066  VMA_ASSERT(request.offset + allocSize <= GetSize());
    10067  suballocations1st.push_back(newSuballoc);
    10068  }
    10069  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
    10070  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
    10071  {
    10072  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10073 
    10074  switch(m_2ndVectorMode)
    10075  {
    10076  case SECOND_VECTOR_EMPTY:
    10077  // First allocation from second part ring buffer.
    10078  VMA_ASSERT(suballocations2nd.empty());
    10079  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
    10080  break;
    10081  case SECOND_VECTOR_RING_BUFFER:
    10082  // 2-part ring buffer is already started.
    10083  VMA_ASSERT(!suballocations2nd.empty());
    10084  break;
    10085  case SECOND_VECTOR_DOUBLE_STACK:
    10086  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
    10087  break;
    10088  default:
    10089  VMA_ASSERT(0);
    10090  }
    10091 
    10092  suballocations2nd.push_back(newSuballoc);
    10093  }
    10094  else
    10095  {
    10096  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
    10097  }
    10098  }
    10099  }
    10100 
    10101  m_SumFreeSize -= newSuballoc.size;
    10102 }
    10103 
    10104 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
    10105 {
    10106  FreeAtOffset(allocation->GetOffset());
    10107 }
    10108 
    10109 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
    10110 {
    10111  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10112  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10113 
    10114  if(!suballocations1st.empty())
    10115  {
    10116  // First allocation: Mark it as next empty at the beginning.
    10117  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    10118  if(firstSuballoc.offset == offset)
    10119  {
    10120  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    10121  firstSuballoc.hAllocation = VK_NULL_HANDLE;
    10122  m_SumFreeSize += firstSuballoc.size;
    10123  ++m_1stNullItemsBeginCount;
    10124  CleanupAfterFree();
    10125  return;
    10126  }
    10127  }
    10128 
    10129  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
    10130  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
    10131  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    10132  {
    10133  VmaSuballocation& lastSuballoc = suballocations2nd.back();
    10134  if(lastSuballoc.offset == offset)
    10135  {
    10136  m_SumFreeSize += lastSuballoc.size;
    10137  suballocations2nd.pop_back();
    10138  CleanupAfterFree();
    10139  return;
    10140  }
    10141  }
    10142  // Last allocation in 1st vector.
    10143  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
    10144  {
    10145  VmaSuballocation& lastSuballoc = suballocations1st.back();
    10146  if(lastSuballoc.offset == offset)
    10147  {
    10148  m_SumFreeSize += lastSuballoc.size;
    10149  suballocations1st.pop_back();
    10150  CleanupAfterFree();
    10151  return;
    10152  }
    10153  }
    10154 
    10155  // Item from the middle of 1st vector.
    10156  {
    10157  VmaSuballocation refSuballoc;
    10158  refSuballoc.offset = offset;
    10159  // Rest of members stays uninitialized intentionally for better performance.
    10160  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
    10161  suballocations1st.begin() + m_1stNullItemsBeginCount,
    10162  suballocations1st.end(),
    10163  refSuballoc);
    10164  if(it != suballocations1st.end())
    10165  {
    10166  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    10167  it->hAllocation = VK_NULL_HANDLE;
    10168  ++m_1stNullItemsMiddleCount;
    10169  m_SumFreeSize += it->size;
    10170  CleanupAfterFree();
    10171  return;
    10172  }
    10173  }
    10174 
    10175  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
    10176  {
    10177  // Item from the middle of 2nd vector.
    10178  VmaSuballocation refSuballoc;
    10179  refSuballoc.offset = offset;
    10180  // Rest of members stays uninitialized intentionally for better performance.
    10181  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
    10182  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
    10183  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
    10184  if(it != suballocations2nd.end())
    10185  {
    10186  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    10187  it->hAllocation = VK_NULL_HANDLE;
    10188  ++m_2ndNullItemsCount;
    10189  m_SumFreeSize += it->size;
    10190  CleanupAfterFree();
    10191  return;
    10192  }
    10193  }
    10194 
    10195  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
    10196 }
    10197 
    10198 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
    10199 {
    10200  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    10201  const size_t suballocCount = AccessSuballocations1st().size();
    10202  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
    10203 }
    10204 
    10205 void VmaBlockMetadata_Linear::CleanupAfterFree()
    10206 {
    10207  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10208  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10209 
    10210  if(IsEmpty())
    10211  {
    10212  suballocations1st.clear();
    10213  suballocations2nd.clear();
    10214  m_1stNullItemsBeginCount = 0;
    10215  m_1stNullItemsMiddleCount = 0;
    10216  m_2ndNullItemsCount = 0;
    10217  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    10218  }
    10219  else
    10220  {
    10221  const size_t suballoc1stCount = suballocations1st.size();
    10222  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    10223  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
    10224 
    10225  // Find more null items at the beginning of 1st vector.
    10226  while(m_1stNullItemsBeginCount < suballoc1stCount &&
    10227  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    10228  {
    10229  ++m_1stNullItemsBeginCount;
    10230  --m_1stNullItemsMiddleCount;
    10231  }
    10232 
    10233  // Find more null items at the end of 1st vector.
    10234  while(m_1stNullItemsMiddleCount > 0 &&
    10235  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
    10236  {
    10237  --m_1stNullItemsMiddleCount;
    10238  suballocations1st.pop_back();
    10239  }
    10240 
    10241  // Find more null items at the end of 2nd vector.
    10242  while(m_2ndNullItemsCount > 0 &&
    10243  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
    10244  {
    10245  --m_2ndNullItemsCount;
    10246  suballocations2nd.pop_back();
    10247  }
    10248 
    10249  if(ShouldCompact1st())
    10250  {
    10251  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
    10252  size_t srcIndex = m_1stNullItemsBeginCount;
    10253  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
    10254  {
    10255  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
    10256  {
    10257  ++srcIndex;
    10258  }
    10259  if(dstIndex != srcIndex)
    10260  {
    10261  suballocations1st[dstIndex] = suballocations1st[srcIndex];
    10262  }
    10263  ++srcIndex;
    10264  }
    10265  suballocations1st.resize(nonNullItemCount);
    10266  m_1stNullItemsBeginCount = 0;
    10267  m_1stNullItemsMiddleCount = 0;
    10268  }
    10269 
    10270  // 2nd vector became empty.
    10271  if(suballocations2nd.empty())
    10272  {
    10273  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    10274  }
    10275 
    10276  // 1st vector became empty.
    10277  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
    10278  {
    10279  suballocations1st.clear();
    10280  m_1stNullItemsBeginCount = 0;
    10281 
    10282  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    10283  {
    10284  // Swap 1st with 2nd. Now 2nd is empty.
    10285  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    10286  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
    10287  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
    10288  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    10289  {
    10290  ++m_1stNullItemsBeginCount;
    10291  --m_1stNullItemsMiddleCount;
    10292  }
    10293  m_2ndNullItemsCount = 0;
    10294  m_1stVectorIndex ^= 1;
    10295  }
    10296  }
    10297  }
    10298 
    10299  VMA_HEAVY_ASSERT(Validate());
    10300 }
    10301 
    10302 
    10304 // class VmaBlockMetadata_Buddy
    10305 
    10306 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
    10307  VmaBlockMetadata(hAllocator),
    10308  m_Root(VMA_NULL),
    10309  m_AllocationCount(0),
    10310  m_FreeCount(1),
    10311  m_SumFreeSize(0)
    10312 {
    10313  memset(m_FreeList, 0, sizeof(m_FreeList));
    10314 }
    10315 
    10316 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
    10317 {
    10318  DeleteNode(m_Root);
    10319 }
    10320 
    10321 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
    10322 {
    10323  VmaBlockMetadata::Init(size);
    10324 
    10325  m_UsableSize = VmaPrevPow2(size);
    10326  m_SumFreeSize = m_UsableSize;
    10327 
    10328  // Calculate m_LevelCount.
    10329  m_LevelCount = 1;
    10330  while(m_LevelCount < MAX_LEVELS &&
    10331  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
    10332  {
    10333  ++m_LevelCount;
    10334  }
    10335 
    10336  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
    10337  rootNode->offset = 0;
    10338  rootNode->type = Node::TYPE_FREE;
    10339  rootNode->parent = VMA_NULL;
    10340  rootNode->buddy = VMA_NULL;
    10341 
    10342  m_Root = rootNode;
    10343  AddToFreeListFront(0, rootNode);
    10344 }
    10345 
    10346 bool VmaBlockMetadata_Buddy::Validate() const
    10347 {
    10348  // Validate tree.
    10349  ValidationContext ctx;
    10350  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
    10351  {
    10352  VMA_VALIDATE(false && "ValidateNode failed.");
    10353  }
    10354  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
    10355  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
    10356 
    10357  // Validate free node lists.
    10358  for(uint32_t level = 0; level < m_LevelCount; ++level)
    10359  {
    10360  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
    10361  m_FreeList[level].front->free.prev == VMA_NULL);
    10362 
    10363  for(Node* node = m_FreeList[level].front;
    10364  node != VMA_NULL;
    10365  node = node->free.next)
    10366  {
    10367  VMA_VALIDATE(node->type == Node::TYPE_FREE);
    10368 
    10369  if(node->free.next == VMA_NULL)
    10370  {
    10371  VMA_VALIDATE(m_FreeList[level].back == node);
    10372  }
    10373  else
    10374  {
    10375  VMA_VALIDATE(node->free.next->free.prev == node);
    10376  }
    10377  }
    10378  }
    10379 
    10380  // Validate that free lists ar higher levels are empty.
    10381  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
    10382  {
    10383  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
    10384  }
    10385 
    10386  return true;
    10387 }
    10388 
    10389 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
    10390 {
    10391  for(uint32_t level = 0; level < m_LevelCount; ++level)
    10392  {
    10393  if(m_FreeList[level].front != VMA_NULL)
    10394  {
    10395  return LevelToNodeSize(level);
    10396  }
    10397  }
    10398  return 0;
    10399 }
    10400 
    10401 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    10402 {
    10403  const VkDeviceSize unusableSize = GetUnusableSize();
    10404 
    10405  outInfo.blockCount = 1;
    10406 
    10407  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
    10408  outInfo.usedBytes = outInfo.unusedBytes = 0;
    10409 
    10410  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
    10411  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
    10412  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
    10413 
    10414  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
    10415 
    10416  if(unusableSize > 0)
    10417  {
    10418  ++outInfo.unusedRangeCount;
    10419  outInfo.unusedBytes += unusableSize;
    10420  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
    10421  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
    10422  }
    10423 }
    10424 
    10425 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
    10426 {
    10427  const VkDeviceSize unusableSize = GetUnusableSize();
    10428 
    10429  inoutStats.size += GetSize();
    10430  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
    10431  inoutStats.allocationCount += m_AllocationCount;
    10432  inoutStats.unusedRangeCount += m_FreeCount;
    10433  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    10434 
    10435  if(unusableSize > 0)
    10436  {
    10437  ++inoutStats.unusedRangeCount;
    10438  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
    10439  }
    10440 }
    10441 
    10442 #if VMA_STATS_STRING_ENABLED
    10443 
    10444 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
    10445 {
    10446  // TODO optimize
    10447  VmaStatInfo stat;
    10448  CalcAllocationStatInfo(stat);
    10449 
    10450  PrintDetailedMap_Begin(
    10451  json,
    10452  stat.unusedBytes,
    10453  stat.allocationCount,
    10454  stat.unusedRangeCount);
    10455 
    10456  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
    10457 
    10458  const VkDeviceSize unusableSize = GetUnusableSize();
    10459  if(unusableSize > 0)
    10460  {
    10461  PrintDetailedMap_UnusedRange(json,
    10462  m_UsableSize, // offset
    10463  unusableSize); // size
    10464  }
    10465 
    10466  PrintDetailedMap_End(json);
    10467 }
    10468 
    10469 #endif // #if VMA_STATS_STRING_ENABLED
    10470 
    10471 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
    10472  uint32_t currentFrameIndex,
    10473  uint32_t frameInUseCount,
    10474  VkDeviceSize bufferImageGranularity,
    10475  VkDeviceSize allocSize,
    10476  VkDeviceSize allocAlignment,
    10477  bool upperAddress,
    10478  VmaSuballocationType allocType,
    10479  bool canMakeOtherLost,
    10480  uint32_t strategy,
    10481  VmaAllocationRequest* pAllocationRequest)
    10482 {
    10483  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
    10484 
    10485  // Simple way to respect bufferImageGranularity. May be optimized some day.
    10486  // Whenever it might be an OPTIMAL image...
    10487  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
    10488  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    10489  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
    10490  {
    10491  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
    10492  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
    10493  }
    10494 
    10495  if(allocSize > m_UsableSize)
    10496  {
    10497  return false;
    10498  }
    10499 
    10500  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    10501  for(uint32_t level = targetLevel + 1; level--; )
    10502  {
    10503  for(Node* freeNode = m_FreeList[level].front;
    10504  freeNode != VMA_NULL;
    10505  freeNode = freeNode->free.next)
    10506  {
    10507  if(freeNode->offset % allocAlignment == 0)
    10508  {
    10509  pAllocationRequest->offset = freeNode->offset;
    10510  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
    10511  pAllocationRequest->sumItemSize = 0;
    10512  pAllocationRequest->itemsToMakeLostCount = 0;
    10513  pAllocationRequest->customData = (void*)(uintptr_t)level;
    10514  return true;
    10515  }
    10516  }
    10517  }
    10518 
    10519  return false;
    10520 }
    10521 
    10522 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
    10523  uint32_t currentFrameIndex,
    10524  uint32_t frameInUseCount,
    10525  VmaAllocationRequest* pAllocationRequest)
    10526 {
    10527  /*
    10528  Lost allocations are not supported in buddy allocator at the moment.
    10529  Support might be added in the future.
    10530  */
    10531  return pAllocationRequest->itemsToMakeLostCount == 0;
    10532 }
    10533 
    10534 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    10535 {
    10536  /*
    10537  Lost allocations are not supported in buddy allocator at the moment.
    10538  Support might be added in the future.
    10539  */
    10540  return 0;
    10541 }
    10542 
    10543 void VmaBlockMetadata_Buddy::Alloc(
    10544  const VmaAllocationRequest& request,
    10545  VmaSuballocationType type,
    10546  VkDeviceSize allocSize,
    10547  bool upperAddress,
    10548  VmaAllocation hAllocation)
    10549 {
    10550  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    10551  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
    10552 
    10553  Node* currNode = m_FreeList[currLevel].front;
    10554  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    10555  while(currNode->offset != request.offset)
    10556  {
    10557  currNode = currNode->free.next;
    10558  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    10559  }
    10560 
    10561  // Go down, splitting free nodes.
    10562  while(currLevel < targetLevel)
    10563  {
    10564  // currNode is already first free node at currLevel.
    10565  // Remove it from list of free nodes at this currLevel.
    10566  RemoveFromFreeList(currLevel, currNode);
    10567 
    10568  const uint32_t childrenLevel = currLevel + 1;
    10569 
    10570  // Create two free sub-nodes.
    10571  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
    10572  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
    10573 
    10574  leftChild->offset = currNode->offset;
    10575  leftChild->type = Node::TYPE_FREE;
    10576  leftChild->parent = currNode;
    10577  leftChild->buddy = rightChild;
    10578 
    10579  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
    10580  rightChild->type = Node::TYPE_FREE;
    10581  rightChild->parent = currNode;
    10582  rightChild->buddy = leftChild;
    10583 
    10584  // Convert current currNode to split type.
    10585  currNode->type = Node::TYPE_SPLIT;
    10586  currNode->split.leftChild = leftChild;
    10587 
    10588  // Add child nodes to free list. Order is important!
    10589  AddToFreeListFront(childrenLevel, rightChild);
    10590  AddToFreeListFront(childrenLevel, leftChild);
    10591 
    10592  ++m_FreeCount;
    10593  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
    10594  ++currLevel;
    10595  currNode = m_FreeList[currLevel].front;
    10596 
    10597  /*
    10598  We can be sure that currNode, as left child of node previously split,
    10599  also fullfills the alignment requirement.
    10600  */
    10601  }
    10602 
    10603  // Remove from free list.
    10604  VMA_ASSERT(currLevel == targetLevel &&
    10605  currNode != VMA_NULL &&
    10606  currNode->type == Node::TYPE_FREE);
    10607  RemoveFromFreeList(currLevel, currNode);
    10608 
    10609  // Convert to allocation node.
    10610  currNode->type = Node::TYPE_ALLOCATION;
    10611  currNode->allocation.alloc = hAllocation;
    10612 
    10613  ++m_AllocationCount;
    10614  --m_FreeCount;
    10615  m_SumFreeSize -= allocSize;
    10616 }
    10617 
    10618 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
    10619 {
    10620  if(node->type == Node::TYPE_SPLIT)
    10621  {
    10622  DeleteNode(node->split.leftChild->buddy);
    10623  DeleteNode(node->split.leftChild);
    10624  }
    10625 
    10626  vma_delete(GetAllocationCallbacks(), node);
    10627 }
    10628 
    10629 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
    10630 {
    10631  VMA_VALIDATE(level < m_LevelCount);
    10632  VMA_VALIDATE(curr->parent == parent);
    10633  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
    10634  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
    10635  switch(curr->type)
    10636  {
    10637  case Node::TYPE_FREE:
    10638  // curr->free.prev, next are validated separately.
    10639  ctx.calculatedSumFreeSize += levelNodeSize;
    10640  ++ctx.calculatedFreeCount;
    10641  break;
    10642  case Node::TYPE_ALLOCATION:
    10643  ++ctx.calculatedAllocationCount;
    10644  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
    10645  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
    10646  break;
    10647  case Node::TYPE_SPLIT:
    10648  {
    10649  const uint32_t childrenLevel = level + 1;
    10650  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
    10651  const Node* const leftChild = curr->split.leftChild;
    10652  VMA_VALIDATE(leftChild != VMA_NULL);
    10653  VMA_VALIDATE(leftChild->offset == curr->offset);
    10654  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
    10655  {
    10656  VMA_VALIDATE(false && "ValidateNode for left child failed.");
    10657  }
    10658  const Node* const rightChild = leftChild->buddy;
    10659  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
    10660  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
    10661  {
    10662  VMA_VALIDATE(false && "ValidateNode for right child failed.");
    10663  }
    10664  }
    10665  break;
    10666  default:
    10667  return false;
    10668  }
    10669 
    10670  return true;
    10671 }
    10672 
    10673 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
    10674 {
    10675  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
    10676  uint32_t level = 0;
    10677  VkDeviceSize currLevelNodeSize = m_UsableSize;
    10678  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
    10679  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
    10680  {
    10681  ++level;
    10682  currLevelNodeSize = nextLevelNodeSize;
    10683  nextLevelNodeSize = currLevelNodeSize >> 1;
    10684  }
    10685  return level;
    10686 }
    10687 
    10688 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
    10689 {
    10690  // Find node and level.
    10691  Node* node = m_Root;
    10692  VkDeviceSize nodeOffset = 0;
    10693  uint32_t level = 0;
    10694  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
    10695  while(node->type == Node::TYPE_SPLIT)
    10696  {
    10697  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
    10698  if(offset < nodeOffset + nextLevelSize)
    10699  {
    10700  node = node->split.leftChild;
    10701  }
    10702  else
    10703  {
    10704  node = node->split.leftChild->buddy;
    10705  nodeOffset += nextLevelSize;
    10706  }
    10707  ++level;
    10708  levelNodeSize = nextLevelSize;
    10709  }
    10710 
    10711  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
    10712  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
    10713 
    10714  ++m_FreeCount;
    10715  --m_AllocationCount;
    10716  m_SumFreeSize += alloc->GetSize();
    10717 
    10718  node->type = Node::TYPE_FREE;
    10719 
    10720  // Join free nodes if possible.
    10721  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
    10722  {
    10723  RemoveFromFreeList(level, node->buddy);
    10724  Node* const parent = node->parent;
    10725 
    10726  vma_delete(GetAllocationCallbacks(), node->buddy);
    10727  vma_delete(GetAllocationCallbacks(), node);
    10728  parent->type = Node::TYPE_FREE;
    10729 
    10730  node = parent;
    10731  --level;
    10732  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
    10733  --m_FreeCount;
    10734  }
    10735 
    10736  AddToFreeListFront(level, node);
    10737 }
    10738 
    10739 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
    10740 {
    10741  switch(node->type)
    10742  {
    10743  case Node::TYPE_FREE:
    10744  ++outInfo.unusedRangeCount;
    10745  outInfo.unusedBytes += levelNodeSize;
    10746  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
    10747  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
    10748  break;
    10749  case Node::TYPE_ALLOCATION:
    10750  {
    10751  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    10752  ++outInfo.allocationCount;
    10753  outInfo.usedBytes += allocSize;
    10754  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
    10755  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
    10756 
    10757  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
    10758  if(unusedRangeSize > 0)
    10759  {
    10760  ++outInfo.unusedRangeCount;
    10761  outInfo.unusedBytes += unusedRangeSize;
    10762  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
    10763  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
    10764  }
    10765  }
    10766  break;
    10767  case Node::TYPE_SPLIT:
    10768  {
    10769  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    10770  const Node* const leftChild = node->split.leftChild;
    10771  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
    10772  const Node* const rightChild = leftChild->buddy;
    10773  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
    10774  }
    10775  break;
    10776  default:
    10777  VMA_ASSERT(0);
    10778  }
    10779 }
    10780 
    10781 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
    10782 {
    10783  VMA_ASSERT(node->type == Node::TYPE_FREE);
    10784 
    10785  // List is empty.
    10786  Node* const frontNode = m_FreeList[level].front;
    10787  if(frontNode == VMA_NULL)
    10788  {
    10789  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
    10790  node->free.prev = node->free.next = VMA_NULL;
    10791  m_FreeList[level].front = m_FreeList[level].back = node;
    10792  }
    10793  else
    10794  {
    10795  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
    10796  node->free.prev = VMA_NULL;
    10797  node->free.next = frontNode;
    10798  frontNode->free.prev = node;
    10799  m_FreeList[level].front = node;
    10800  }
    10801 }
    10802 
    10803 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
    10804 {
    10805  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
    10806 
    10807  // It is at the front.
    10808  if(node->free.prev == VMA_NULL)
    10809  {
    10810  VMA_ASSERT(m_FreeList[level].front == node);
    10811  m_FreeList[level].front = node->free.next;
    10812  }
    10813  else
    10814  {
    10815  Node* const prevFreeNode = node->free.prev;
    10816  VMA_ASSERT(prevFreeNode->free.next == node);
    10817  prevFreeNode->free.next = node->free.next;
    10818  }
    10819 
    10820  // It is at the back.
    10821  if(node->free.next == VMA_NULL)
    10822  {
    10823  VMA_ASSERT(m_FreeList[level].back == node);
    10824  m_FreeList[level].back = node->free.prev;
    10825  }
    10826  else
    10827  {
    10828  Node* const nextFreeNode = node->free.next;
    10829  VMA_ASSERT(nextFreeNode->free.prev == node);
    10830  nextFreeNode->free.prev = node->free.prev;
    10831  }
    10832 }
    10833 
    10834 #if VMA_STATS_STRING_ENABLED
    10835 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
    10836 {
    10837  switch(node->type)
    10838  {
    10839  case Node::TYPE_FREE:
    10840  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
    10841  break;
    10842  case Node::TYPE_ALLOCATION:
    10843  {
    10844  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
    10845  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    10846  if(allocSize < levelNodeSize)
    10847  {
    10848  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
    10849  }
    10850  }
    10851  break;
    10852  case Node::TYPE_SPLIT:
    10853  {
    10854  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    10855  const Node* const leftChild = node->split.leftChild;
    10856  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
    10857  const Node* const rightChild = leftChild->buddy;
    10858  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
    10859  }
    10860  break;
    10861  default:
    10862  VMA_ASSERT(0);
    10863  }
    10864 }
    10865 #endif // #if VMA_STATS_STRING_ENABLED
    10866 
    10867 
    10869 // class VmaDeviceMemoryBlock
    10870 
    10871 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
    10872  m_pMetadata(VMA_NULL),
    10873  m_MemoryTypeIndex(UINT32_MAX),
    10874  m_Id(0),
    10875  m_hMemory(VK_NULL_HANDLE),
    10876  m_MapCount(0),
    10877  m_pMappedData(VMA_NULL)
    10878 {
    10879 }
    10880 
    10881 void VmaDeviceMemoryBlock::Init(
    10882  VmaAllocator hAllocator,
    10883  uint32_t newMemoryTypeIndex,
    10884  VkDeviceMemory newMemory,
    10885  VkDeviceSize newSize,
    10886  uint32_t id,
    10887  uint32_t algorithm)
    10888 {
    10889  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    10890 
    10891  m_MemoryTypeIndex = newMemoryTypeIndex;
    10892  m_Id = id;
    10893  m_hMemory = newMemory;
    10894 
    10895  switch(algorithm)
    10896  {
    10898  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
    10899  break;
    10901  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
    10902  break;
    10903  default:
    10904  VMA_ASSERT(0);
    10905  // Fall-through.
    10906  case 0:
    10907  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
    10908  }
    10909  m_pMetadata->Init(newSize);
    10910 }
    10911 
    10912 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
    10913 {
    10914  // This is the most important assert in the entire library.
    10915  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
    10916  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
    10917 
    10918  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
    10919  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
    10920  m_hMemory = VK_NULL_HANDLE;
    10921 
    10922  vma_delete(allocator, m_pMetadata);
    10923  m_pMetadata = VMA_NULL;
    10924 }
    10925 
    10926 bool VmaDeviceMemoryBlock::Validate() const
    10927 {
    10928  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
    10929  (m_pMetadata->GetSize() != 0));
    10930 
    10931  return m_pMetadata->Validate();
    10932 }
    10933 
    10934 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
    10935 {
    10936  void* pData = nullptr;
    10937  VkResult res = Map(hAllocator, 1, &pData);
    10938  if(res != VK_SUCCESS)
    10939  {
    10940  return res;
    10941  }
    10942 
    10943  res = m_pMetadata->CheckCorruption(pData);
    10944 
    10945  Unmap(hAllocator, 1);
    10946 
    10947  return res;
    10948 }
    10949 
    10950 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
    10951 {
    10952  if(count == 0)
    10953  {
    10954  return VK_SUCCESS;
    10955  }
    10956 
    10957  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10958  if(m_MapCount != 0)
    10959  {
    10960  m_MapCount += count;
    10961  VMA_ASSERT(m_pMappedData != VMA_NULL);
    10962  if(ppData != VMA_NULL)
    10963  {
    10964  *ppData = m_pMappedData;
    10965  }
    10966  return VK_SUCCESS;
    10967  }
    10968  else
    10969  {
    10970  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    10971  hAllocator->m_hDevice,
    10972  m_hMemory,
    10973  0, // offset
    10974  VK_WHOLE_SIZE,
    10975  0, // flags
    10976  &m_pMappedData);
    10977  if(result == VK_SUCCESS)
    10978  {
    10979  if(ppData != VMA_NULL)
    10980  {
    10981  *ppData = m_pMappedData;
    10982  }
    10983  m_MapCount = count;
    10984  }
    10985  return result;
    10986  }
    10987 }
    10988 
    10989 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
    10990 {
    10991  if(count == 0)
    10992  {
    10993  return;
    10994  }
    10995 
    10996  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10997  if(m_MapCount >= count)
    10998  {
    10999  m_MapCount -= count;
    11000  if(m_MapCount == 0)
    11001  {
    11002  m_pMappedData = VMA_NULL;
    11003  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
    11004  }
    11005  }
    11006  else
    11007  {
    11008  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
    11009  }
    11010 }
    11011 
    11012 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    11013 {
    11014  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    11015  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    11016 
    11017  void* pData;
    11018  VkResult res = Map(hAllocator, 1, &pData);
    11019  if(res != VK_SUCCESS)
    11020  {
    11021  return res;
    11022  }
    11023 
    11024  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
    11025  VmaWriteMagicValue(pData, allocOffset + allocSize);
    11026 
    11027  Unmap(hAllocator, 1);
    11028 
    11029  return VK_SUCCESS;
    11030 }
    11031 
    11032 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    11033 {
    11034  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    11035  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    11036 
    11037  void* pData;
    11038  VkResult res = Map(hAllocator, 1, &pData);
    11039  if(res != VK_SUCCESS)
    11040  {
    11041  return res;
    11042  }
    11043 
    11044  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
    11045  {
    11046  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
    11047  }
    11048  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
    11049  {
    11050  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
    11051  }
    11052 
    11053  Unmap(hAllocator, 1);
    11054 
    11055  return VK_SUCCESS;
    11056 }
    11057 
    11058 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
    11059  const VmaAllocator hAllocator,
    11060  const VmaAllocation hAllocation,
    11061  VkBuffer hBuffer)
    11062 {
    11063  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    11064  hAllocation->GetBlock() == this);
    11065  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    11066  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    11067  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
    11068  hAllocator->m_hDevice,
    11069  hBuffer,
    11070  m_hMemory,
    11071  hAllocation->GetOffset());
    11072 }
    11073 
    11074 VkResult VmaDeviceMemoryBlock::BindImageMemory(
    11075  const VmaAllocator hAllocator,
    11076  const VmaAllocation hAllocation,
    11077  VkImage hImage)
    11078 {
    11079  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    11080  hAllocation->GetBlock() == this);
    11081  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    11082  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    11083  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
    11084  hAllocator->m_hDevice,
    11085  hImage,
    11086  m_hMemory,
    11087  hAllocation->GetOffset());
    11088 }
    11089 
    11090 static void InitStatInfo(VmaStatInfo& outInfo)
    11091 {
    11092  memset(&outInfo, 0, sizeof(outInfo));
    11093  outInfo.allocationSizeMin = UINT64_MAX;
    11094  outInfo.unusedRangeSizeMin = UINT64_MAX;
    11095 }
    11096 
    11097 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
    11098 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
    11099 {
    11100  inoutInfo.blockCount += srcInfo.blockCount;
    11101  inoutInfo.allocationCount += srcInfo.allocationCount;
    11102  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
    11103  inoutInfo.usedBytes += srcInfo.usedBytes;
    11104  inoutInfo.unusedBytes += srcInfo.unusedBytes;
    11105  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
    11106  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
    11107  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
    11108  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
    11109 }
    11110 
    11111 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
    11112 {
    11113  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
    11114  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
    11115  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
    11116  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
    11117 }
    11118 
    11119 VmaPool_T::VmaPool_T(
    11120  VmaAllocator hAllocator,
    11121  const VmaPoolCreateInfo& createInfo,
    11122  VkDeviceSize preferredBlockSize) :
    11123  m_BlockVector(
    11124  hAllocator,
    11125  createInfo.memoryTypeIndex,
    11126  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
    11127  createInfo.minBlockCount,
    11128  createInfo.maxBlockCount,
    11129  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
    11130  createInfo.frameInUseCount,
    11131  true, // isCustomPool
    11132  createInfo.blockSize != 0, // explicitBlockSize
    11133  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
    11134  m_Id(0)
    11135 {
    11136 }
    11137 
    11138 VmaPool_T::~VmaPool_T()
    11139 {
    11140 }
    11141 
    11142 #if VMA_STATS_STRING_ENABLED
    11143 
    11144 #endif // #if VMA_STATS_STRING_ENABLED
    11145 
    11146 VmaBlockVector::VmaBlockVector(
    11147  VmaAllocator hAllocator,
    11148  uint32_t memoryTypeIndex,
    11149  VkDeviceSize preferredBlockSize,
    11150  size_t minBlockCount,
    11151  size_t maxBlockCount,
    11152  VkDeviceSize bufferImageGranularity,
    11153  uint32_t frameInUseCount,
    11154  bool isCustomPool,
    11155  bool explicitBlockSize,
    11156  uint32_t algorithm) :
    11157  m_hAllocator(hAllocator),
    11158  m_MemoryTypeIndex(memoryTypeIndex),
    11159  m_PreferredBlockSize(preferredBlockSize),
    11160  m_MinBlockCount(minBlockCount),
    11161  m_MaxBlockCount(maxBlockCount),
    11162  m_BufferImageGranularity(bufferImageGranularity),
    11163  m_FrameInUseCount(frameInUseCount),
    11164  m_IsCustomPool(isCustomPool),
    11165  m_ExplicitBlockSize(explicitBlockSize),
    11166  m_Algorithm(algorithm),
    11167  m_HasEmptyBlock(false),
    11168  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
    11169  m_NextBlockId(0)
    11170 {
    11171 }
    11172 
    11173 VmaBlockVector::~VmaBlockVector()
    11174 {
    11175  for(size_t i = m_Blocks.size(); i--; )
    11176  {
    11177  m_Blocks[i]->Destroy(m_hAllocator);
    11178  vma_delete(m_hAllocator, m_Blocks[i]);
    11179  }
    11180 }
    11181 
    11182 VkResult VmaBlockVector::CreateMinBlocks()
    11183 {
    11184  for(size_t i = 0; i < m_MinBlockCount; ++i)
    11185  {
    11186  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
    11187  if(res != VK_SUCCESS)
    11188  {
    11189  return res;
    11190  }
    11191  }
    11192  return VK_SUCCESS;
    11193 }
    11194 
    11195 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
    11196 {
    11197  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
    11198 
    11199  const size_t blockCount = m_Blocks.size();
    11200 
    11201  pStats->size = 0;
    11202  pStats->unusedSize = 0;
    11203  pStats->allocationCount = 0;
    11204  pStats->unusedRangeCount = 0;
    11205  pStats->unusedRangeSizeMax = 0;
    11206  pStats->blockCount = blockCount;
    11207 
    11208  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11209  {
    11210  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11211  VMA_ASSERT(pBlock);
    11212  VMA_HEAVY_ASSERT(pBlock->Validate());
    11213  pBlock->m_pMetadata->AddPoolStats(*pStats);
    11214  }
    11215 }
    11216 
    11217 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
    11218 {
    11219  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    11220  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
    11221  (VMA_DEBUG_MARGIN > 0) &&
    11222  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
    11223 }
    11224 
    11225 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
    11226 
    11227 VkResult VmaBlockVector::Allocate(
    11228  VmaPool hCurrentPool,
    11229  uint32_t currentFrameIndex,
    11230  VkDeviceSize size,
    11231  VkDeviceSize alignment,
    11232  const VmaAllocationCreateInfo& createInfo,
    11233  VmaSuballocationType suballocType,
    11234  VmaAllocation* pAllocation)
    11235 {
    11236  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    11237  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
    11238  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    11239  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    11240  const bool canCreateNewBlock =
    11241  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
    11242  (m_Blocks.size() < m_MaxBlockCount);
    11243  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
    11244 
    11245  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
    11246  // Which in turn is available only when maxBlockCount = 1.
    11247  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
    11248  {
    11249  canMakeOtherLost = false;
    11250  }
    11251 
    11252  // Upper address can only be used with linear allocator and within single memory block.
    11253  if(isUpperAddress &&
    11254  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
    11255  {
    11256  return VK_ERROR_FEATURE_NOT_PRESENT;
    11257  }
    11258 
    11259  // Validate strategy.
    11260  switch(strategy)
    11261  {
    11262  case 0:
    11264  break;
    11268  break;
    11269  default:
    11270  return VK_ERROR_FEATURE_NOT_PRESENT;
    11271  }
    11272 
    11273  // Early reject: requested allocation size is larger that maximum block size for this block vector.
    11274  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
    11275  {
    11276  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    11277  }
    11278 
    11279  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
    11280 
    11281  /*
    11282  Under certain condition, this whole section can be skipped for optimization, so
    11283  we move on directly to trying to allocate with canMakeOtherLost. That's the case
    11284  e.g. for custom pools with linear algorithm.
    11285  */
    11286  if(!canMakeOtherLost || canCreateNewBlock)
    11287  {
    11288  // 1. Search existing allocations. Try to allocate without making other allocations lost.
    11289  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
    11291 
    11292  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    11293  {
    11294  // Use only last block.
    11295  if(!m_Blocks.empty())
    11296  {
    11297  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
    11298  VMA_ASSERT(pCurrBlock);
    11299  VkResult res = AllocateFromBlock(
    11300  pCurrBlock,
    11301  hCurrentPool,
    11302  currentFrameIndex,
    11303  size,
    11304  alignment,
    11305  allocFlagsCopy,
    11306  createInfo.pUserData,
    11307  suballocType,
    11308  strategy,
    11309  pAllocation);
    11310  if(res == VK_SUCCESS)
    11311  {
    11312  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
    11313  return VK_SUCCESS;
    11314  }
    11315  }
    11316  }
    11317  else
    11318  {
    11320  {
    11321  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    11322  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    11323  {
    11324  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    11325  VMA_ASSERT(pCurrBlock);
    11326  VkResult res = AllocateFromBlock(
    11327  pCurrBlock,
    11328  hCurrentPool,
    11329  currentFrameIndex,
    11330  size,
    11331  alignment,
    11332  allocFlagsCopy,
    11333  createInfo.pUserData,
    11334  suballocType,
    11335  strategy,
    11336  pAllocation);
    11337  if(res == VK_SUCCESS)
    11338  {
    11339  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    11340  return VK_SUCCESS;
    11341  }
    11342  }
    11343  }
    11344  else // WORST_FIT, FIRST_FIT
    11345  {
    11346  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    11347  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    11348  {
    11349  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    11350  VMA_ASSERT(pCurrBlock);
    11351  VkResult res = AllocateFromBlock(
    11352  pCurrBlock,
    11353  hCurrentPool,
    11354  currentFrameIndex,
    11355  size,
    11356  alignment,
    11357  allocFlagsCopy,
    11358  createInfo.pUserData,
    11359  suballocType,
    11360  strategy,
    11361  pAllocation);
    11362  if(res == VK_SUCCESS)
    11363  {
    11364  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    11365  return VK_SUCCESS;
    11366  }
    11367  }
    11368  }
    11369  }
    11370 
    11371  // 2. Try to create new block.
    11372  if(canCreateNewBlock)
    11373  {
    11374  // Calculate optimal size for new block.
    11375  VkDeviceSize newBlockSize = m_PreferredBlockSize;
    11376  uint32_t newBlockSizeShift = 0;
    11377  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
    11378 
    11379  if(!m_ExplicitBlockSize)
    11380  {
    11381  // Allocate 1/8, 1/4, 1/2 as first blocks.
    11382  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
    11383  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
    11384  {
    11385  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    11386  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
    11387  {
    11388  newBlockSize = smallerNewBlockSize;
    11389  ++newBlockSizeShift;
    11390  }
    11391  else
    11392  {
    11393  break;
    11394  }
    11395  }
    11396  }
    11397 
    11398  size_t newBlockIndex = 0;
    11399  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
    11400  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
    11401  if(!m_ExplicitBlockSize)
    11402  {
    11403  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
    11404  {
    11405  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    11406  if(smallerNewBlockSize >= size)
    11407  {
    11408  newBlockSize = smallerNewBlockSize;
    11409  ++newBlockSizeShift;
    11410  res = CreateBlock(newBlockSize, &newBlockIndex);
    11411  }
    11412  else
    11413  {
    11414  break;
    11415  }
    11416  }
    11417  }
    11418 
    11419  if(res == VK_SUCCESS)
    11420  {
    11421  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
    11422  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
    11423 
    11424  res = AllocateFromBlock(
    11425  pBlock,
    11426  hCurrentPool,
    11427  currentFrameIndex,
    11428  size,
    11429  alignment,
    11430  allocFlagsCopy,
    11431  createInfo.pUserData,
    11432  suballocType,
    11433  strategy,
    11434  pAllocation);
    11435  if(res == VK_SUCCESS)
    11436  {
    11437  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
    11438  return VK_SUCCESS;
    11439  }
    11440  else
    11441  {
    11442  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
    11443  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    11444  }
    11445  }
    11446  }
    11447  }
    11448 
    11449  // 3. Try to allocate from existing blocks with making other allocations lost.
    11450  if(canMakeOtherLost)
    11451  {
    11452  uint32_t tryIndex = 0;
    11453  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
    11454  {
    11455  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
    11456  VmaAllocationRequest bestRequest = {};
    11457  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
    11458 
    11459  // 1. Search existing allocations.
    11461  {
    11462  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    11463  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    11464  {
    11465  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    11466  VMA_ASSERT(pCurrBlock);
    11467  VmaAllocationRequest currRequest = {};
    11468  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    11469  currentFrameIndex,
    11470  m_FrameInUseCount,
    11471  m_BufferImageGranularity,
    11472  size,
    11473  alignment,
    11474  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    11475  suballocType,
    11476  canMakeOtherLost,
    11477  strategy,
    11478  &currRequest))
    11479  {
    11480  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    11481  if(pBestRequestBlock == VMA_NULL ||
    11482  currRequestCost < bestRequestCost)
    11483  {
    11484  pBestRequestBlock = pCurrBlock;
    11485  bestRequest = currRequest;
    11486  bestRequestCost = currRequestCost;
    11487 
    11488  if(bestRequestCost == 0)
    11489  {
    11490  break;
    11491  }
    11492  }
    11493  }
    11494  }
    11495  }
    11496  else // WORST_FIT, FIRST_FIT
    11497  {
    11498  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    11499  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    11500  {
    11501  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    11502  VMA_ASSERT(pCurrBlock);
    11503  VmaAllocationRequest currRequest = {};
    11504  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    11505  currentFrameIndex,
    11506  m_FrameInUseCount,
    11507  m_BufferImageGranularity,
    11508  size,
    11509  alignment,
    11510  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    11511  suballocType,
    11512  canMakeOtherLost,
    11513  strategy,
    11514  &currRequest))
    11515  {
    11516  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    11517  if(pBestRequestBlock == VMA_NULL ||
    11518  currRequestCost < bestRequestCost ||
    11520  {
    11521  pBestRequestBlock = pCurrBlock;
    11522  bestRequest = currRequest;
    11523  bestRequestCost = currRequestCost;
    11524 
    11525  if(bestRequestCost == 0 ||
    11527  {
    11528  break;
    11529  }
    11530  }
    11531  }
    11532  }
    11533  }
    11534 
    11535  if(pBestRequestBlock != VMA_NULL)
    11536  {
    11537  if(mapped)
    11538  {
    11539  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
    11540  if(res != VK_SUCCESS)
    11541  {
    11542  return res;
    11543  }
    11544  }
    11545 
    11546  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
    11547  currentFrameIndex,
    11548  m_FrameInUseCount,
    11549  &bestRequest))
    11550  {
    11551  // We no longer have an empty Allocation.
    11552  if(pBestRequestBlock->m_pMetadata->IsEmpty())
    11553  {
    11554  m_HasEmptyBlock = false;
    11555  }
    11556  // Allocate from this pBlock.
    11557  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    11558  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
    11559  (*pAllocation)->InitBlockAllocation(
    11560  hCurrentPool,
    11561  pBestRequestBlock,
    11562  bestRequest.offset,
    11563  alignment,
    11564  size,
    11565  suballocType,
    11566  mapped,
    11567  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    11568  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
    11569  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    11570  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
    11571  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    11572  {
    11573  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    11574  }
    11575  if(IsCorruptionDetectionEnabled())
    11576  {
    11577  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
    11578  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    11579  }
    11580  return VK_SUCCESS;
    11581  }
    11582  // else: Some allocations must have been touched while we are here. Next try.
    11583  }
    11584  else
    11585  {
    11586  // Could not find place in any of the blocks - break outer loop.
    11587  break;
    11588  }
    11589  }
    11590  /* Maximum number of tries exceeded - a very unlike event when many other
    11591  threads are simultaneously touching allocations making it impossible to make
    11592  lost at the same time as we try to allocate. */
    11593  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
    11594  {
    11595  return VK_ERROR_TOO_MANY_OBJECTS;
    11596  }
    11597  }
    11598 
    11599  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    11600 }
    11601 
    11602 void VmaBlockVector::Free(
    11603  VmaAllocation hAllocation)
    11604 {
    11605  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
    11606 
    11607  // Scope for lock.
    11608  {
    11609  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
    11610 
    11611  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    11612 
    11613  if(IsCorruptionDetectionEnabled())
    11614  {
    11615  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
    11616  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
    11617  }
    11618 
    11619  if(hAllocation->IsPersistentMap())
    11620  {
    11621  pBlock->Unmap(m_hAllocator, 1);
    11622  }
    11623 
    11624  pBlock->m_pMetadata->Free(hAllocation);
    11625  VMA_HEAVY_ASSERT(pBlock->Validate());
    11626 
    11627  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
    11628 
    11629  // pBlock became empty after this deallocation.
    11630  if(pBlock->m_pMetadata->IsEmpty())
    11631  {
    11632  // Already has empty Allocation. We don't want to have two, so delete this one.
    11633  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
    11634  {
    11635  pBlockToDelete = pBlock;
    11636  Remove(pBlock);
    11637  }
    11638  // We now have first empty block.
    11639  else
    11640  {
    11641  m_HasEmptyBlock = true;
    11642  }
    11643  }
    11644  // pBlock didn't become empty, but we have another empty block - find and free that one.
    11645  // (This is optional, heuristics.)
    11646  else if(m_HasEmptyBlock)
    11647  {
    11648  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
    11649  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
    11650  {
    11651  pBlockToDelete = pLastBlock;
    11652  m_Blocks.pop_back();
    11653  m_HasEmptyBlock = false;
    11654  }
    11655  }
    11656 
    11657  IncrementallySortBlocks();
    11658  }
    11659 
    11660  // Destruction of a free Allocation. Deferred until this point, outside of mutex
    11661  // lock, for performance reason.
    11662  if(pBlockToDelete != VMA_NULL)
    11663  {
    11664  VMA_DEBUG_LOG(" Deleted empty allocation");
    11665  pBlockToDelete->Destroy(m_hAllocator);
    11666  vma_delete(m_hAllocator, pBlockToDelete);
    11667  }
    11668 }
    11669 
    11670 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
    11671 {
    11672  VkDeviceSize result = 0;
    11673  for(size_t i = m_Blocks.size(); i--; )
    11674  {
    11675  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
    11676  if(result >= m_PreferredBlockSize)
    11677  {
    11678  break;
    11679  }
    11680  }
    11681  return result;
    11682 }
    11683 
    11684 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
    11685 {
    11686  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11687  {
    11688  if(m_Blocks[blockIndex] == pBlock)
    11689  {
    11690  VmaVectorRemove(m_Blocks, blockIndex);
    11691  return;
    11692  }
    11693  }
    11694  VMA_ASSERT(0);
    11695 }
    11696 
    11697 void VmaBlockVector::IncrementallySortBlocks()
    11698 {
    11699  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    11700  {
    11701  // Bubble sort only until first swap.
    11702  for(size_t i = 1; i < m_Blocks.size(); ++i)
    11703  {
    11704  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
    11705  {
    11706  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
    11707  return;
    11708  }
    11709  }
    11710  }
    11711 }
    11712 
    11713 VkResult VmaBlockVector::AllocateFromBlock(
    11714  VmaDeviceMemoryBlock* pBlock,
    11715  VmaPool hCurrentPool,
    11716  uint32_t currentFrameIndex,
    11717  VkDeviceSize size,
    11718  VkDeviceSize alignment,
    11719  VmaAllocationCreateFlags allocFlags,
    11720  void* pUserData,
    11721  VmaSuballocationType suballocType,
    11722  uint32_t strategy,
    11723  VmaAllocation* pAllocation)
    11724 {
    11725  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
    11726  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    11727  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    11728  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    11729 
    11730  VmaAllocationRequest currRequest = {};
    11731  if(pBlock->m_pMetadata->CreateAllocationRequest(
    11732  currentFrameIndex,
    11733  m_FrameInUseCount,
    11734  m_BufferImageGranularity,
    11735  size,
    11736  alignment,
    11737  isUpperAddress,
    11738  suballocType,
    11739  false, // canMakeOtherLost
    11740  strategy,
    11741  &currRequest))
    11742  {
    11743  // Allocate from pCurrBlock.
    11744  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
    11745 
    11746  if(mapped)
    11747  {
    11748  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
    11749  if(res != VK_SUCCESS)
    11750  {
    11751  return res;
    11752  }
    11753  }
    11754 
    11755  // We no longer have an empty Allocation.
    11756  if(pBlock->m_pMetadata->IsEmpty())
    11757  {
    11758  m_HasEmptyBlock = false;
    11759  }
    11760 
    11761  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    11762  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
    11763  (*pAllocation)->InitBlockAllocation(
    11764  hCurrentPool,
    11765  pBlock,
    11766  currRequest.offset,
    11767  alignment,
    11768  size,
    11769  suballocType,
    11770  mapped,
    11771  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    11772  VMA_HEAVY_ASSERT(pBlock->Validate());
    11773  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
    11774  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    11775  {
    11776  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    11777  }
    11778  if(IsCorruptionDetectionEnabled())
    11779  {
    11780  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
    11781  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    11782  }
    11783  return VK_SUCCESS;
    11784  }
    11785  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    11786 }
    11787 
    11788 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
    11789 {
    11790  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    11791  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
    11792  allocInfo.allocationSize = blockSize;
    11793  VkDeviceMemory mem = VK_NULL_HANDLE;
    11794  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
    11795  if(res < 0)
    11796  {
    11797  return res;
    11798  }
    11799 
    11800  // New VkDeviceMemory successfully created.
    11801 
    11802  // Create new Allocation for it.
    11803  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
    11804  pBlock->Init(
    11805  m_hAllocator,
    11806  m_MemoryTypeIndex,
    11807  mem,
    11808  allocInfo.allocationSize,
    11809  m_NextBlockId++,
    11810  m_Algorithm);
    11811 
    11812  m_Blocks.push_back(pBlock);
    11813  if(pNewBlockIndex != VMA_NULL)
    11814  {
    11815  *pNewBlockIndex = m_Blocks.size() - 1;
    11816  }
    11817 
    11818  return VK_SUCCESS;
    11819 }
    11820 
    11821 void VmaBlockVector::ApplyDefragmentationMovesCpu(
    11822  class VmaBlockVectorDefragmentationContext* pDefragCtx,
    11823  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
    11824 {
    11825  const size_t blockCount = m_Blocks.size();
    11826  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
    11827 
    11828  enum BLOCK_FLAG
    11829  {
    11830  BLOCK_FLAG_USED = 0x00000001,
    11831  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
    11832  };
    11833 
    11834  struct BlockInfo
    11835  {
    11836  uint32_t flags;
    11837  void* pMappedData;
    11838  };
    11839  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
    11840  blockInfo(blockCount, VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
    11841  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
    11842 
    11843  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
    11844  const size_t moveCount = moves.size();
    11845  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
    11846  {
    11847  const VmaDefragmentationMove& move = moves[moveIndex];
    11848  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
    11849  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
    11850  }
    11851 
    11852  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
    11853 
    11854  // Go over all blocks. Get mapped pointer or map if necessary.
    11855  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
    11856  {
    11857  BlockInfo& currBlockInfo = blockInfo[blockIndex];
    11858  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    11859  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
    11860  {
    11861  currBlockInfo.pMappedData = pBlock->GetMappedData();
    11862  // It is not originally mapped - map it.
    11863  if(currBlockInfo.pMappedData == VMA_NULL)
    11864  {
    11865  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
    11866  if(pDefragCtx->res == VK_SUCCESS)
    11867  {
    11868  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
    11869  }
    11870  }
    11871  }
    11872  }
    11873 
    11874  // Go over all moves. Do actual data transfer.
    11875  if(pDefragCtx->res == VK_SUCCESS)
    11876  {
    11877  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    11878  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    11879 
    11880  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
    11881  {
    11882  const VmaDefragmentationMove& move = moves[moveIndex];
    11883 
    11884  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
    11885  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
    11886 
    11887  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
    11888 
    11889  // Invalidate source.
    11890  if(isNonCoherent)
    11891  {
    11892  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
    11893  memRange.memory = pSrcBlock->GetDeviceMemory();
    11894  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
    11895  memRange.size = VMA_MIN(
    11896  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
    11897  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
    11898  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
    11899  }
    11900 
    11901  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
    11902  memmove(
    11903  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
    11904  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
    11905  static_cast<size_t>(move.size));
    11906 
    11907  if(IsCorruptionDetectionEnabled())
    11908  {
    11909  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
    11910  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
    11911  }
    11912 
    11913  // Flush destination.
    11914  if(isNonCoherent)
    11915  {
    11916  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
    11917  memRange.memory = pDstBlock->GetDeviceMemory();
    11918  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
    11919  memRange.size = VMA_MIN(
    11920  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
    11921  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
    11922  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
    11923  }
    11924  }
    11925  }
    11926 
    11927  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
    11928  // Regardless of pCtx->res == VK_SUCCESS.
    11929  for(size_t blockIndex = blockCount; blockIndex--; )
    11930  {
    11931  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
    11932  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
    11933  {
    11934  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    11935  pBlock->Unmap(m_hAllocator, 1);
    11936  }
    11937  }
    11938 }
    11939 
    11940 void VmaBlockVector::ApplyDefragmentationMovesGpu(
    11941  class VmaBlockVectorDefragmentationContext* pDefragCtx,
    11942  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    11943  VkCommandBuffer commandBuffer)
    11944 {
    11945  const size_t blockCount = m_Blocks.size();
    11946 
    11947  pDefragCtx->blockContexts.resize(blockCount);
    11948  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
    11949 
    11950  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
    11951  const size_t moveCount = moves.size();
    11952  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
    11953  {
    11954  const VmaDefragmentationMove& move = moves[moveIndex];
    11955  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
    11956  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
    11957  }
    11958 
    11959  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
    11960 
    11961  // Go over all blocks. Create and bind buffer for whole block if necessary.
    11962  {
    11963  VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
    11964  bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
    11965  VK_BUFFER_USAGE_TRANSFER_DST_BIT;
    11966 
    11967  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
    11968  {
    11969  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
    11970  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    11971  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
    11972  {
    11973  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
    11974  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
    11975  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
    11976  if(pDefragCtx->res == VK_SUCCESS)
    11977  {
    11978  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
    11979  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
    11980  }
    11981  }
    11982  }
    11983  }
    11984 
    11985  // Go over all moves. Post data transfer commands to command buffer.
    11986  if(pDefragCtx->res == VK_SUCCESS)
    11987  {
    11988  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    11989  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    11990 
    11991  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
    11992  {
    11993  const VmaDefragmentationMove& move = moves[moveIndex];
    11994 
    11995  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
    11996  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
    11997 
    11998  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
    11999 
    12000  VkBufferCopy region = {
    12001  move.srcOffset,
    12002  move.dstOffset,
    12003  move.size };
    12004  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
    12005  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
    12006  }
    12007  }
    12008 
    12009  // Save buffers to defrag context for later destruction.
    12010  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
    12011  {
    12012  pDefragCtx->res = VK_NOT_READY;
    12013  }
    12014 }
    12015 
    12016 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
    12017 {
    12018  m_HasEmptyBlock = false;
    12019  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    12020  {
    12021  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    12022  if(pBlock->m_pMetadata->IsEmpty())
    12023  {
    12024  if(m_Blocks.size() > m_MinBlockCount)
    12025  {
    12026  if(pDefragmentationStats != VMA_NULL)
    12027  {
    12028  ++pDefragmentationStats->deviceMemoryBlocksFreed;
    12029  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
    12030  }
    12031 
    12032  VmaVectorRemove(m_Blocks, blockIndex);
    12033  pBlock->Destroy(m_hAllocator);
    12034  vma_delete(m_hAllocator, pBlock);
    12035  }
    12036  else
    12037  {
    12038  m_HasEmptyBlock = true;
    12039  }
    12040  }
    12041  }
    12042 }
    12043 
    12044 #if VMA_STATS_STRING_ENABLED
    12045 
    12046 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
    12047 {
    12048  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
    12049 
    12050  json.BeginObject();
    12051 
    12052  if(m_IsCustomPool)
    12053  {
    12054  json.WriteString("MemoryTypeIndex");
    12055  json.WriteNumber(m_MemoryTypeIndex);
    12056 
    12057  json.WriteString("BlockSize");
    12058  json.WriteNumber(m_PreferredBlockSize);
    12059 
    12060  json.WriteString("BlockCount");
    12061  json.BeginObject(true);
    12062  if(m_MinBlockCount > 0)
    12063  {
    12064  json.WriteString("Min");
    12065  json.WriteNumber((uint64_t)m_MinBlockCount);
    12066  }
    12067  if(m_MaxBlockCount < SIZE_MAX)
    12068  {
    12069  json.WriteString("Max");
    12070  json.WriteNumber((uint64_t)m_MaxBlockCount);
    12071  }
    12072  json.WriteString("Cur");
    12073  json.WriteNumber((uint64_t)m_Blocks.size());
    12074  json.EndObject();
    12075 
    12076  if(m_FrameInUseCount > 0)
    12077  {
    12078  json.WriteString("FrameInUseCount");
    12079  json.WriteNumber(m_FrameInUseCount);
    12080  }
    12081 
    12082  if(m_Algorithm != 0)
    12083  {
    12084  json.WriteString("Algorithm");
    12085  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
    12086  }
    12087  }
    12088  else
    12089  {
    12090  json.WriteString("PreferredBlockSize");
    12091  json.WriteNumber(m_PreferredBlockSize);
    12092  }
    12093 
    12094  json.WriteString("Blocks");
    12095  json.BeginObject();
    12096  for(size_t i = 0; i < m_Blocks.size(); ++i)
    12097  {
    12098  json.BeginString();
    12099  json.ContinueString(m_Blocks[i]->GetId());
    12100  json.EndString();
    12101 
    12102  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
    12103  }
    12104  json.EndObject();
    12105 
    12106  json.EndObject();
    12107 }
    12108 
    12109 #endif // #if VMA_STATS_STRING_ENABLED
    12110 
    12111 void VmaBlockVector::Defragment(
    12112  class VmaBlockVectorDefragmentationContext* pCtx,
    12113  VmaDefragmentationStats* pStats,
    12114  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
    12115  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
    12116  VkCommandBuffer commandBuffer)
    12117 {
    12118  pCtx->res = VK_SUCCESS;
    12119 
    12120  const VkMemoryPropertyFlags memPropFlags =
    12121  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
    12122  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
    12123  const bool isHostCoherent = (memPropFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0;
    12124 
    12125  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
    12126  isHostVisible;
    12127  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
    12128  (VMA_DEBUG_DETECT_CORRUPTION == 0 || !(isHostVisible && isHostCoherent));
    12129 
    12130  // There are options to defragment this memory type.
    12131  if(canDefragmentOnCpu || canDefragmentOnGpu)
    12132  {
    12133  bool defragmentOnGpu;
    12134  // There is only one option to defragment this memory type.
    12135  if(canDefragmentOnGpu != canDefragmentOnCpu)
    12136  {
    12137  defragmentOnGpu = canDefragmentOnGpu;
    12138  }
    12139  // Both options are available: Heuristics to choose the best one.
    12140  else
    12141  {
    12142  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
    12143  m_hAllocator->IsIntegratedGpu();
    12144  }
    12145 
    12146  bool overlappingMoveSupported = !defragmentOnGpu;
    12147 
    12148  if(m_hAllocator->m_UseMutex)
    12149  {
    12150  m_Mutex.LockWrite();
    12151  pCtx->mutexLocked = true;
    12152  }
    12153 
    12154  pCtx->Begin(overlappingMoveSupported);
    12155 
    12156  // Defragment.
    12157 
    12158  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
    12159  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
    12160  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
    12161  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
    12162  pCtx->res = pCtx->GetAlgorithm()->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
    12163 
    12164  // Accumulate statistics.
    12165  if(pStats != VMA_NULL)
    12166  {
    12167  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
    12168  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
    12169  pStats->bytesMoved += bytesMoved;
    12170  pStats->allocationsMoved += allocationsMoved;
    12171  VMA_ASSERT(bytesMoved <= maxBytesToMove);
    12172  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
    12173  if(defragmentOnGpu)
    12174  {
    12175  maxGpuBytesToMove -= bytesMoved;
    12176  maxGpuAllocationsToMove -= allocationsMoved;
    12177  }
    12178  else
    12179  {
    12180  maxCpuBytesToMove -= bytesMoved;
    12181  maxCpuAllocationsToMove -= allocationsMoved;
    12182  }
    12183  }
    12184 
    12185  if(pCtx->res >= VK_SUCCESS)
    12186  {
    12187  if(defragmentOnGpu)
    12188  {
    12189  ApplyDefragmentationMovesGpu(pCtx, moves, commandBuffer);
    12190  }
    12191  else
    12192  {
    12193  ApplyDefragmentationMovesCpu(pCtx, moves);
    12194  }
    12195  }
    12196  }
    12197 }
    12198 
    12199 void VmaBlockVector::DefragmentationEnd(
    12200  class VmaBlockVectorDefragmentationContext* pCtx,
    12201  VmaDefragmentationStats* pStats)
    12202 {
    12203  // Destroy buffers.
    12204  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
    12205  {
    12206  VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];
    12207  if(blockCtx.hBuffer)
    12208  {
    12209  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
    12210  m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
    12211  }
    12212  }
    12213 
    12214  if(pCtx->res >= VK_SUCCESS)
    12215  {
    12216  FreeEmptyBlocks(pStats);
    12217  }
    12218 
    12219  if(pCtx->mutexLocked)
    12220  {
    12221  VMA_ASSERT(m_hAllocator->m_UseMutex);
    12222  m_Mutex.UnlockWrite();
    12223  }
    12224 }
    12225 
    12226 size_t VmaBlockVector::CalcAllocationCount() const
    12227 {
    12228  size_t result = 0;
    12229  for(size_t i = 0; i < m_Blocks.size(); ++i)
    12230  {
    12231  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
    12232  }
    12233  return result;
    12234 }
    12235 
    12236 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
    12237 {
    12238  if(m_BufferImageGranularity == 1)
    12239  {
    12240  return false;
    12241  }
    12242  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
    12243  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
    12244  {
    12245  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
    12246  VMA_ASSERT(m_Algorithm == 0);
    12247  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
    12248  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
    12249  {
    12250  return true;
    12251  }
    12252  }
    12253  return false;
    12254 }
    12255 
    12256 void VmaBlockVector::MakePoolAllocationsLost(
    12257  uint32_t currentFrameIndex,
    12258  size_t* pLostAllocationCount)
    12259 {
    12260  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
    12261  size_t lostAllocationCount = 0;
    12262  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    12263  {
    12264  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    12265  VMA_ASSERT(pBlock);
    12266  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
    12267  }
    12268  if(pLostAllocationCount != VMA_NULL)
    12269  {
    12270  *pLostAllocationCount = lostAllocationCount;
    12271  }
    12272 }
    12273 
    12274 VkResult VmaBlockVector::CheckCorruption()
    12275 {
    12276  if(!IsCorruptionDetectionEnabled())
    12277  {
    12278  return VK_ERROR_FEATURE_NOT_PRESENT;
    12279  }
    12280 
    12281  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
    12282  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    12283  {
    12284  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    12285  VMA_ASSERT(pBlock);
    12286  VkResult res = pBlock->CheckCorruption(m_hAllocator);
    12287  if(res != VK_SUCCESS)
    12288  {
    12289  return res;
    12290  }
    12291  }
    12292  return VK_SUCCESS;
    12293 }
    12294 
    12295 void VmaBlockVector::AddStats(VmaStats* pStats)
    12296 {
    12297  const uint32_t memTypeIndex = m_MemoryTypeIndex;
    12298  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
    12299 
    12300  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
    12301 
    12302  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    12303  {
    12304  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    12305  VMA_ASSERT(pBlock);
    12306  VMA_HEAVY_ASSERT(pBlock->Validate());
    12307  VmaStatInfo allocationStatInfo;
    12308  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
    12309  VmaAddStatInfo(pStats->total, allocationStatInfo);
    12310  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    12311  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    12312  }
    12313 }
    12314 
    12316 // VmaDefragmentationAlgorithm_Generic members definition
    12317 
    12318 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
    12319  VmaAllocator hAllocator,
    12320  VmaBlockVector* pBlockVector,
    12321  uint32_t currentFrameIndex,
    12322  bool overlappingMoveSupported) :
    12323  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
    12324  m_AllAllocations(false),
    12325  m_AllocationCount(0),
    12326  m_BytesMoved(0),
    12327  m_AllocationsMoved(0),
    12328  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
    12329 {
    12330  // Create block info for each block.
    12331  const size_t blockCount = m_pBlockVector->m_Blocks.size();
    12332  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    12333  {
    12334  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
    12335  pBlockInfo->m_OriginalBlockIndex = blockIndex;
    12336  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
    12337  m_Blocks.push_back(pBlockInfo);
    12338  }
    12339 
    12340  // Sort them by m_pBlock pointer value.
    12341  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
    12342 }
    12343 
    12344 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
    12345 {
    12346  for(size_t i = m_Blocks.size(); i--; )
    12347  {
    12348  vma_delete(m_hAllocator, m_Blocks[i]);
    12349  }
    12350 }
    12351 
    12352 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    12353 {
    12354  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
    12355  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    12356  {
    12357  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
    12358  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
    12359  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
    12360  {
    12361  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
    12362  (*it)->m_Allocations.push_back(allocInfo);
    12363  }
    12364  else
    12365  {
    12366  VMA_ASSERT(0);
    12367  }
    12368 
    12369  ++m_AllocationCount;
    12370  }
    12371 }
    12372 
    12373 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
    12374  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    12375  VkDeviceSize maxBytesToMove,
    12376  uint32_t maxAllocationsToMove)
    12377 {
    12378  if(m_Blocks.empty())
    12379  {
    12380  return VK_SUCCESS;
    12381  }
    12382 
    12383  // This is a choice based on research.
    12384  // Option 1:
    12385  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
    12386  // Option 2:
    12387  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
    12388  // Option 3:
    12389  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
    12390 
    12391  size_t srcBlockMinIndex = 0;
    12392  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
    12393  /*
    12394  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
    12395  {
    12396  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
    12397  if(blocksWithNonMovableCount > 0)
    12398  {
    12399  srcBlockMinIndex = blocksWithNonMovableCount - 1;
    12400  }
    12401  }
    12402  */
    12403 
    12404  size_t srcBlockIndex = m_Blocks.size() - 1;
    12405  size_t srcAllocIndex = SIZE_MAX;
    12406  for(;;)
    12407  {
    12408  // 1. Find next allocation to move.
    12409  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
    12410  // 1.2. Then start from last to first m_Allocations.
    12411  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
    12412  {
    12413  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
    12414  {
    12415  // Finished: no more allocations to process.
    12416  if(srcBlockIndex == srcBlockMinIndex)
    12417  {
    12418  return VK_SUCCESS;
    12419  }
    12420  else
    12421  {
    12422  --srcBlockIndex;
    12423  srcAllocIndex = SIZE_MAX;
    12424  }
    12425  }
    12426  else
    12427  {
    12428  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
    12429  }
    12430  }
    12431 
    12432  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
    12433  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
    12434 
    12435  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
    12436  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
    12437  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
    12438  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
    12439 
    12440  // 2. Try to find new place for this allocation in preceding or current block.
    12441  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
    12442  {
    12443  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
    12444  VmaAllocationRequest dstAllocRequest;
    12445  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
    12446  m_CurrentFrameIndex,
    12447  m_pBlockVector->GetFrameInUseCount(),
    12448  m_pBlockVector->GetBufferImageGranularity(),
    12449  size,
    12450  alignment,
    12451  false, // upperAddress
    12452  suballocType,
    12453  false, // canMakeOtherLost
    12454  strategy,
    12455  &dstAllocRequest) &&
    12456  MoveMakesSense(
    12457  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
    12458  {
    12459  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
    12460 
    12461  // Reached limit on number of allocations or bytes to move.
    12462  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
    12463  (m_BytesMoved + size > maxBytesToMove))
    12464  {
    12465  return VK_SUCCESS;
    12466  }
    12467 
    12468  VmaDefragmentationMove move;
    12469  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
    12470  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
    12471  move.srcOffset = srcOffset;
    12472  move.dstOffset = dstAllocRequest.offset;
    12473  move.size = size;
    12474  moves.push_back(move);
    12475 
    12476  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
    12477  dstAllocRequest,
    12478  suballocType,
    12479  size,
    12480  false, // upperAddress
    12481  allocInfo.m_hAllocation);
    12482  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
    12483 
    12484  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
    12485 
    12486  if(allocInfo.m_pChanged != VMA_NULL)
    12487  {
    12488  *allocInfo.m_pChanged = VK_TRUE;
    12489  }
    12490 
    12491  ++m_AllocationsMoved;
    12492  m_BytesMoved += size;
    12493 
    12494  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
    12495 
    12496  break;
    12497  }
    12498  }
    12499 
    12500  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
    12501 
    12502  if(srcAllocIndex > 0)
    12503  {
    12504  --srcAllocIndex;
    12505  }
    12506  else
    12507  {
    12508  if(srcBlockIndex > 0)
    12509  {
    12510  --srcBlockIndex;
    12511  srcAllocIndex = SIZE_MAX;
    12512  }
    12513  else
    12514  {
    12515  return VK_SUCCESS;
    12516  }
    12517  }
    12518  }
    12519 }
    12520 
    12521 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
    12522 {
    12523  size_t result = 0;
    12524  for(size_t i = 0; i < m_Blocks.size(); ++i)
    12525  {
    12526  if(m_Blocks[i]->m_HasNonMovableAllocations)
    12527  {
    12528  ++result;
    12529  }
    12530  }
    12531  return result;
    12532 }
    12533 
    12534 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
    12535  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    12536  VkDeviceSize maxBytesToMove,
    12537  uint32_t maxAllocationsToMove)
    12538 {
    12539  if(!m_AllAllocations && m_AllocationCount == 0)
    12540  {
    12541  return VK_SUCCESS;
    12542  }
    12543 
    12544  const size_t blockCount = m_Blocks.size();
    12545  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    12546  {
    12547  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
    12548 
    12549  if(m_AllAllocations)
    12550  {
    12551  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
    12552  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
    12553  it != pMetadata->m_Suballocations.end();
    12554  ++it)
    12555  {
    12556  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    12557  {
    12558  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
    12559  pBlockInfo->m_Allocations.push_back(allocInfo);
    12560  }
    12561  }
    12562  }
    12563 
    12564  pBlockInfo->CalcHasNonMovableAllocations();
    12565 
    12566  // This is a choice based on research.
    12567  // Option 1:
    12568  pBlockInfo->SortAllocationsByOffsetDescending();
    12569  // Option 2:
    12570  //pBlockInfo->SortAllocationsBySizeDescending();
    12571  }
    12572 
    12573  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
    12574  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
    12575 
    12576  // This is a choice based on research.
    12577  const uint32_t roundCount = 2;
    12578 
    12579  // Execute defragmentation rounds (the main part).
    12580  VkResult result = VK_SUCCESS;
    12581  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
    12582  {
    12583  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove);
    12584  }
    12585 
    12586  return result;
    12587 }
    12588 
    12589 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
    12590  size_t dstBlockIndex, VkDeviceSize dstOffset,
    12591  size_t srcBlockIndex, VkDeviceSize srcOffset)
    12592 {
    12593  if(dstBlockIndex < srcBlockIndex)
    12594  {
    12595  return true;
    12596  }
    12597  if(dstBlockIndex > srcBlockIndex)
    12598  {
    12599  return false;
    12600  }
    12601  if(dstOffset < srcOffset)
    12602  {
    12603  return true;
    12604  }
    12605  return false;
    12606 }
    12607 
    12609 // VmaDefragmentationAlgorithm_Fast
    12610 
    12611 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
    12612  VmaAllocator hAllocator,
    12613  VmaBlockVector* pBlockVector,
    12614  uint32_t currentFrameIndex,
    12615  bool overlappingMoveSupported) :
    12616  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
    12617  m_OverlappingMoveSupported(overlappingMoveSupported),
    12618  m_AllocationCount(0),
    12619  m_AllAllocations(false),
    12620  m_BytesMoved(0),
    12621  m_AllocationsMoved(0),
    12622  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
    12623 {
    12624  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
    12625 
    12626 }
    12627 
    12628 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
    12629 {
    12630 }
    12631 
    12632 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
    12633  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    12634  VkDeviceSize maxBytesToMove,
    12635  uint32_t maxAllocationsToMove)
    12636 {
    12637  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
    12638 
    12639  const size_t blockCount = m_pBlockVector->GetBlockCount();
    12640  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
    12641  {
    12642  return VK_SUCCESS;
    12643  }
    12644 
    12645  PreprocessMetadata();
    12646 
    12647  // Sort blocks in order from most destination.
    12648 
    12649  m_BlockInfos.resize(blockCount);
    12650  for(size_t i = 0; i < blockCount; ++i)
    12651  {
    12652  m_BlockInfos[i].origBlockIndex = i;
    12653  }
    12654 
    12655  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
    12656  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
    12657  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
    12658  });
    12659 
    12660  // THE MAIN ALGORITHM
    12661 
    12662  FreeSpaceDatabase freeSpaceDb;
    12663 
    12664  size_t dstBlockInfoIndex = 0;
    12665  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
    12666  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
    12667  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
    12668  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
    12669  VkDeviceSize dstOffset = 0;
    12670 
    12671  bool end = false;
    12672  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
    12673  {
    12674  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
    12675  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
    12676  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
    12677  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
    12678  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
    12679  {
    12680  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
    12681  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
    12682  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
    12683  if(m_AllocationsMoved == maxAllocationsToMove ||
    12684  m_BytesMoved + srcAllocSize > maxBytesToMove)
    12685  {
    12686  end = true;
    12687  break;
    12688  }
    12689  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
    12690 
    12691  // Try to place it in one of free spaces from the database.
    12692  size_t freeSpaceInfoIndex;
    12693  VkDeviceSize dstAllocOffset;
    12694  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
    12695  freeSpaceInfoIndex, dstAllocOffset))
    12696  {
    12697  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
    12698  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
    12699  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
    12700  VkDeviceSize freeSpaceBlockSize = pFreeSpaceMetadata->GetSize();
    12701 
    12702  // Same block
    12703  if(freeSpaceInfoIndex == srcBlockInfoIndex)
    12704  {
    12705  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
    12706 
    12707  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
    12708 
    12709  VmaSuballocation suballoc = *srcSuballocIt;
    12710  suballoc.offset = dstAllocOffset;
    12711  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
    12712  m_BytesMoved += srcAllocSize;
    12713  ++m_AllocationsMoved;
    12714 
    12715  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
    12716  ++nextSuballocIt;
    12717  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
    12718  srcSuballocIt = nextSuballocIt;
    12719 
    12720  InsertSuballoc(pFreeSpaceMetadata, suballoc);
    12721 
    12722  VmaDefragmentationMove move = {
    12723  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
    12724  srcAllocOffset, dstAllocOffset,
    12725  srcAllocSize };
    12726  moves.push_back(move);
    12727  }
    12728  // Different block
    12729  else
    12730  {
    12731  // MOVE OPTION 2: Move the allocation to a different block.
    12732 
    12733  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
    12734 
    12735  VmaSuballocation suballoc = *srcSuballocIt;
    12736  suballoc.offset = dstAllocOffset;
    12737  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
    12738  m_BytesMoved += srcAllocSize;
    12739  ++m_AllocationsMoved;
    12740 
    12741  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
    12742  ++nextSuballocIt;
    12743  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
    12744  srcSuballocIt = nextSuballocIt;
    12745 
    12746  InsertSuballoc(pFreeSpaceMetadata, suballoc);
    12747 
    12748  VmaDefragmentationMove move = {
    12749  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
    12750  srcAllocOffset, dstAllocOffset,
    12751  srcAllocSize };
    12752  moves.push_back(move);
    12753  }
    12754  }
    12755  else
    12756  {
    12757  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
    12758 
    12759  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
    12760  while(dstBlockInfoIndex < srcBlockInfoIndex &&
    12761  dstAllocOffset + srcAllocSize > dstBlockSize)
    12762  {
    12763  // But before that, register remaining free space at the end of dst block.
    12764  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
    12765 
    12766  ++dstBlockInfoIndex;
    12767  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
    12768  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
    12769  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
    12770  dstBlockSize = pDstMetadata->GetSize();
    12771  dstOffset = 0;
    12772  dstAllocOffset = 0;
    12773  }
    12774 
    12775  // Same block
    12776  if(dstBlockInfoIndex == srcBlockInfoIndex)
    12777  {
    12778  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
    12779 
    12780  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
    12781 
    12782  bool skipOver = overlap;
    12783  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
    12784  {
    12785  // If destination and source place overlap, skip if it would move it
    12786  // by only < 1/64 of its size.
    12787  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
    12788  }
    12789 
    12790  if(skipOver)
    12791  {
    12792  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
    12793 
    12794  dstOffset = srcAllocOffset + srcAllocSize;
    12795  ++srcSuballocIt;
    12796  }
    12797  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
    12798  else
    12799  {
    12800  srcSuballocIt->offset = dstAllocOffset;
    12801  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
    12802  dstOffset = dstAllocOffset + srcAllocSize;
    12803  m_BytesMoved += srcAllocSize;
    12804  ++m_AllocationsMoved;
    12805  ++srcSuballocIt;
    12806  VmaDefragmentationMove move = {
    12807  srcOrigBlockIndex, dstOrigBlockIndex,
    12808  srcAllocOffset, dstAllocOffset,
    12809  srcAllocSize };
    12810  moves.push_back(move);
    12811  }
    12812  }
    12813  // Different block
    12814  else
    12815  {
    12816  // MOVE OPTION 2: Move the allocation to a different block.
    12817 
    12818  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
    12819  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
    12820 
    12821  VmaSuballocation suballoc = *srcSuballocIt;
    12822  suballoc.offset = dstAllocOffset;
    12823  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
    12824  dstOffset = dstAllocOffset + srcAllocSize;
    12825  m_BytesMoved += srcAllocSize;
    12826  ++m_AllocationsMoved;
    12827 
    12828  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
    12829  ++nextSuballocIt;
    12830  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
    12831  srcSuballocIt = nextSuballocIt;
    12832 
    12833  pDstMetadata->m_Suballocations.push_back(suballoc);
    12834 
    12835  VmaDefragmentationMove move = {
    12836  srcOrigBlockIndex, dstOrigBlockIndex,
    12837  srcAllocOffset, dstAllocOffset,
    12838  srcAllocSize };
    12839  moves.push_back(move);
    12840  }
    12841  }
    12842  }
    12843  }
    12844 
    12845  m_BlockInfos.clear();
    12846 
    12847  PostprocessMetadata();
    12848 
    12849  return VK_SUCCESS;
    12850 }
    12851 
    12852 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
    12853 {
    12854  const size_t blockCount = m_pBlockVector->GetBlockCount();
    12855  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    12856  {
    12857  VmaBlockMetadata_Generic* const pMetadata =
    12858  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
    12859  pMetadata->m_FreeCount = 0;
    12860  pMetadata->m_SumFreeSize = pMetadata->GetSize();
    12861  pMetadata->m_FreeSuballocationsBySize.clear();
    12862  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
    12863  it != pMetadata->m_Suballocations.end(); )
    12864  {
    12865  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
    12866  {
    12867  VmaSuballocationList::iterator nextIt = it;
    12868  ++nextIt;
    12869  pMetadata->m_Suballocations.erase(it);
    12870  it = nextIt;
    12871  }
    12872  else
    12873  {
    12874  ++it;
    12875  }
    12876  }
    12877  }
    12878 }
    12879 
    12880 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
    12881 {
    12882  const size_t blockCount = m_pBlockVector->GetBlockCount();
    12883  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    12884  {
    12885  VmaBlockMetadata_Generic* const pMetadata =
    12886  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
    12887  const VkDeviceSize blockSize = pMetadata->GetSize();
    12888 
    12889  // No allocations in this block - entire area is free.
    12890  if(pMetadata->m_Suballocations.empty())
    12891  {
    12892  pMetadata->m_FreeCount = 1;
    12893  //pMetadata->m_SumFreeSize is already set to blockSize.
    12894  VmaSuballocation suballoc = {
    12895  0, // offset
    12896  blockSize, // size
    12897  VMA_NULL, // hAllocation
    12898  VMA_SUBALLOCATION_TYPE_FREE };
    12899  pMetadata->m_Suballocations.push_back(suballoc);
    12900  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
    12901  }
    12902  // There are some allocations in this block.
    12903  else
    12904  {
    12905  VkDeviceSize offset = 0;
    12906  VmaSuballocationList::iterator it;
    12907  for(it = pMetadata->m_Suballocations.begin();
    12908  it != pMetadata->m_Suballocations.end();
    12909  ++it)
    12910  {
    12911  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
    12912  VMA_ASSERT(it->offset >= offset);
    12913 
    12914  // Need to insert preceding free space.
    12915  if(it->offset > offset)
    12916  {
    12917  ++pMetadata->m_FreeCount;
    12918  const VkDeviceSize freeSize = it->offset - offset;
    12919  VmaSuballocation suballoc = {
    12920  offset, // offset
    12921  freeSize, // size
    12922  VMA_NULL, // hAllocation
    12923  VMA_SUBALLOCATION_TYPE_FREE };
    12924  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
    12925  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    12926  {
    12927  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
    12928  }
    12929  }
    12930 
    12931  pMetadata->m_SumFreeSize -= it->size;
    12932  offset = it->offset + it->size;
    12933  }
    12934 
    12935  // Need to insert trailing free space.
    12936  if(offset < blockSize)
    12937  {
    12938  ++pMetadata->m_FreeCount;
    12939  const VkDeviceSize freeSize = blockSize - offset;
    12940  VmaSuballocation suballoc = {
    12941  offset, // offset
    12942  freeSize, // size
    12943  VMA_NULL, // hAllocation
    12944  VMA_SUBALLOCATION_TYPE_FREE };
    12945  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
    12946  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
    12947  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    12948  {
    12949  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
    12950  }
    12951  }
    12952 
    12953  VMA_SORT(
    12954  pMetadata->m_FreeSuballocationsBySize.begin(),
    12955  pMetadata->m_FreeSuballocationsBySize.end(),
    12956  VmaSuballocationItemSizeLess());
    12957  }
    12958 
    12959  VMA_HEAVY_ASSERT(pMetadata->Validate());
    12960  }
    12961 }
    12962 
    12963 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
    12964 {
    12965  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
    12966  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
    12967  while(it != pMetadata->m_Suballocations.end())
    12968  {
    12969  if(it->offset < suballoc.offset)
    12970  {
    12971  ++it;
    12972  }
    12973  }
    12974  pMetadata->m_Suballocations.insert(it, suballoc);
    12975 }
    12976 
    12978 // VmaBlockVectorDefragmentationContext
    12979 
    12980 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
    12981  VmaAllocator hAllocator,
    12982  VmaPool hCustomPool,
    12983  VmaBlockVector* pBlockVector,
    12984  uint32_t currFrameIndex,
    12985  uint32_t algorithmFlags) :
    12986  res(VK_SUCCESS),
    12987  mutexLocked(false),
    12988  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
    12989  m_hAllocator(hAllocator),
    12990  m_hCustomPool(hCustomPool),
    12991  m_pBlockVector(pBlockVector),
    12992  m_CurrFrameIndex(currFrameIndex),
    12993  m_AlgorithmFlags(algorithmFlags),
    12994  m_pAlgorithm(VMA_NULL),
    12995  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
    12996  m_AllAllocations(false)
    12997 {
    12998 }
    12999 
    13000 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
    13001 {
    13002  vma_delete(m_hAllocator, m_pAlgorithm);
    13003 }
    13004 
    13005 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    13006 {
    13007  AllocInfo info = { hAlloc, pChanged };
    13008  m_Allocations.push_back(info);
    13009 }
    13010 
    13011 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported)
    13012 {
    13013  const bool allAllocations = m_AllAllocations ||
    13014  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
    13015 
    13016  /********************************
    13017  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
    13018  ********************************/
    13019 
    13020  /*
    13021  Fast algorithm is supported only when certain criteria are met:
    13022  - VMA_DEBUG_MARGIN is 0.
    13023  - All allocations in this block vector are moveable.
    13024  - There is no possibility of image/buffer granularity conflict.
    13025  */
    13026  if(VMA_DEBUG_MARGIN == 0 &&
    13027  allAllocations &&
    13028  !m_pBlockVector->IsBufferImageGranularityConflictPossible())
    13029  {
    13030  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
    13031  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
    13032  }
    13033  else
    13034  {
    13035  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
    13036  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
    13037  }
    13038 
    13039  if(allAllocations)
    13040  {
    13041  m_pAlgorithm->AddAll();
    13042  }
    13043  else
    13044  {
    13045  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
    13046  {
    13047  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
    13048  }
    13049  }
    13050 }
    13051 
    13053 // VmaDefragmentationContext
    13054 
    13055 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
    13056  VmaAllocator hAllocator,
    13057  uint32_t currFrameIndex,
    13058  uint32_t flags,
    13059  VmaDefragmentationStats* pStats) :
    13060  m_hAllocator(hAllocator),
    13061  m_CurrFrameIndex(currFrameIndex),
    13062  m_Flags(flags),
    13063  m_pStats(pStats),
    13064  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
    13065 {
    13066  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
    13067 }
    13068 
    13069 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
    13070 {
    13071  for(size_t i = m_CustomPoolContexts.size(); i--; )
    13072  {
    13073  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
    13074  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
    13075  vma_delete(m_hAllocator, pBlockVectorCtx);
    13076  }
    13077  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
    13078  {
    13079  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
    13080  if(pBlockVectorCtx)
    13081  {
    13082  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
    13083  vma_delete(m_hAllocator, pBlockVectorCtx);
    13084  }
    13085  }
    13086 }
    13087 
    13088 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
    13089 {
    13090  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    13091  {
    13092  VmaPool pool = pPools[poolIndex];
    13093  VMA_ASSERT(pool);
    13094  // Pools with algorithm other than default are not defragmented.
    13095  if(pool->m_BlockVector.GetAlgorithm() == 0)
    13096  {
    13097  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
    13098 
    13099  for(size_t i = m_CustomPoolContexts.size(); i--; )
    13100  {
    13101  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
    13102  {
    13103  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
    13104  break;
    13105  }
    13106  }
    13107 
    13108  if(!pBlockVectorDefragCtx)
    13109  {
    13110  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
    13111  m_hAllocator,
    13112  pool,
    13113  &pool->m_BlockVector,
    13114  m_CurrFrameIndex,
    13115  m_Flags);
    13116  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
    13117  }
    13118 
    13119  pBlockVectorDefragCtx->AddAll();
    13120  }
    13121  }
    13122 }
    13123 
    13124 void VmaDefragmentationContext_T::AddAllocations(
    13125  uint32_t allocationCount,
    13126  VmaAllocation* pAllocations,
    13127  VkBool32* pAllocationsChanged)
    13128 {
    13129  // Dispatch pAllocations among defragmentators. Create them when necessary.
    13130  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    13131  {
    13132  const VmaAllocation hAlloc = pAllocations[allocIndex];
    13133  VMA_ASSERT(hAlloc);
    13134  // DedicatedAlloc cannot be defragmented.
    13135  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
    13136  // Lost allocation cannot be defragmented.
    13137  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
    13138  {
    13139  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
    13140 
    13141  const VmaPool hAllocPool = hAlloc->GetPool();
    13142  // This allocation belongs to custom pool.
    13143  if(hAllocPool != VK_NULL_HANDLE)
    13144  {
    13145  // Pools with algorithm other than default are not defragmented.
    13146  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
    13147  {
    13148  for(size_t i = m_CustomPoolContexts.size(); i--; )
    13149  {
    13150  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
    13151  {
    13152  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
    13153  break;
    13154  }
    13155  }
    13156  if(!pBlockVectorDefragCtx)
    13157  {
    13158  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
    13159  m_hAllocator,
    13160  hAllocPool,
    13161  &hAllocPool->m_BlockVector,
    13162  m_CurrFrameIndex,
    13163  m_Flags);
    13164  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
    13165  }
    13166  }
    13167  }
    13168  // This allocation belongs to default pool.
    13169  else
    13170  {
    13171  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
    13172  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
    13173  if(!pBlockVectorDefragCtx)
    13174  {
    13175  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
    13176  m_hAllocator,
    13177  VMA_NULL, // hCustomPool
    13178  m_hAllocator->m_pBlockVectors[memTypeIndex],
    13179  m_CurrFrameIndex,
    13180  m_Flags);
    13181  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
    13182  }
    13183  }
    13184 
    13185  if(pBlockVectorDefragCtx)
    13186  {
    13187  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
    13188  &pAllocationsChanged[allocIndex] : VMA_NULL;
    13189  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
    13190  }
    13191  }
    13192  }
    13193 }
    13194 
    13195 VkResult VmaDefragmentationContext_T::Defragment(
    13196  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
    13197  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
    13198  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats)
    13199 {
    13200  if(pStats)
    13201  {
    13202  memset(pStats, 0, sizeof(VmaDefragmentationStats));
    13203  }
    13204 
    13205  if(commandBuffer == VK_NULL_HANDLE)
    13206  {
    13207  maxGpuBytesToMove = 0;
    13208  maxGpuAllocationsToMove = 0;
    13209  }
    13210 
    13211  VkResult res = VK_SUCCESS;
    13212 
    13213  // Process default pools.
    13214  for(uint32_t memTypeIndex = 0;
    13215  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
    13216  ++memTypeIndex)
    13217  {
    13218  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
    13219  if(pBlockVectorCtx)
    13220  {
    13221  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
    13222  pBlockVectorCtx->GetBlockVector()->Defragment(
    13223  pBlockVectorCtx,
    13224  pStats,
    13225  maxCpuBytesToMove, maxCpuAllocationsToMove,
    13226  maxGpuBytesToMove, maxGpuAllocationsToMove,
    13227  commandBuffer);
    13228  if(pBlockVectorCtx->res != VK_SUCCESS)
    13229  {
    13230  res = pBlockVectorCtx->res;
    13231  }
    13232  }
    13233  }
    13234 
    13235  // Process custom pools.
    13236  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
    13237  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
    13238  ++customCtxIndex)
    13239  {
    13240  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
    13241  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
    13242  pBlockVectorCtx->GetBlockVector()->Defragment(
    13243  pBlockVectorCtx,
    13244  pStats,
    13245  maxCpuBytesToMove, maxCpuAllocationsToMove,
    13246  maxGpuBytesToMove, maxGpuAllocationsToMove,
    13247  commandBuffer);
    13248  if(pBlockVectorCtx->res != VK_SUCCESS)
    13249  {
    13250  res = pBlockVectorCtx->res;
    13251  }
    13252  }
    13253 
    13254  return res;
    13255 }
    13256 
    13258 // VmaRecorder
    13259 
    13260 #if VMA_RECORDING_ENABLED
    13261 
    13262 VmaRecorder::VmaRecorder() :
    13263  m_UseMutex(true),
    13264  m_Flags(0),
    13265  m_File(VMA_NULL),
    13266  m_Freq(INT64_MAX),
    13267  m_StartCounter(INT64_MAX)
    13268 {
    13269 }
    13270 
    13271 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
    13272 {
    13273  m_UseMutex = useMutex;
    13274  m_Flags = settings.flags;
    13275 
    13276  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
    13277  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
    13278 
    13279  // Open file for writing.
    13280  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
    13281  if(err != 0)
    13282  {
    13283  return VK_ERROR_INITIALIZATION_FAILED;
    13284  }
    13285 
    13286  // Write header.
    13287  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
    13288  fprintf(m_File, "%s\n", "1,5");
    13289 
    13290  return VK_SUCCESS;
    13291 }
    13292 
    13293 VmaRecorder::~VmaRecorder()
    13294 {
    13295  if(m_File != VMA_NULL)
    13296  {
    13297  fclose(m_File);
    13298  }
    13299 }
    13300 
    13301 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
    13302 {
    13303  CallParams callParams;
    13304  GetBasicParams(callParams);
    13305 
    13306  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13307  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
    13308  Flush();
    13309 }
    13310 
    13311 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
    13312 {
    13313  CallParams callParams;
    13314  GetBasicParams(callParams);
    13315 
    13316  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13317  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
    13318  Flush();
    13319 }
    13320 
    13321 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
    13322 {
    13323  CallParams callParams;
    13324  GetBasicParams(callParams);
    13325 
    13326  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13327  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
    13328  createInfo.memoryTypeIndex,
    13329  createInfo.flags,
    13330  createInfo.blockSize,
    13331  (uint64_t)createInfo.minBlockCount,
    13332  (uint64_t)createInfo.maxBlockCount,
    13333  createInfo.frameInUseCount,
    13334  pool);
    13335  Flush();
    13336 }
    13337 
    13338 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
    13339 {
    13340  CallParams callParams;
    13341  GetBasicParams(callParams);
    13342 
    13343  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13344  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
    13345  pool);
    13346  Flush();
    13347 }
    13348 
    13349 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
    13350  const VkMemoryRequirements& vkMemReq,
    13351  const VmaAllocationCreateInfo& createInfo,
    13352  VmaAllocation allocation)
    13353 {
    13354  CallParams callParams;
    13355  GetBasicParams(callParams);
    13356 
    13357  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13358  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    13359  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13360  vkMemReq.size,
    13361  vkMemReq.alignment,
    13362  vkMemReq.memoryTypeBits,
    13363  createInfo.flags,
    13364  createInfo.usage,
    13365  createInfo.requiredFlags,
    13366  createInfo.preferredFlags,
    13367  createInfo.memoryTypeBits,
    13368  createInfo.pool,
    13369  allocation,
    13370  userDataStr.GetString());
    13371  Flush();
    13372 }
    13373 
    13374 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    13375  const VkMemoryRequirements& vkMemReq,
    13376  bool requiresDedicatedAllocation,
    13377  bool prefersDedicatedAllocation,
    13378  const VmaAllocationCreateInfo& createInfo,
    13379  VmaAllocation allocation)
    13380 {
    13381  CallParams callParams;
    13382  GetBasicParams(callParams);
    13383 
    13384  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13385  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    13386  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13387  vkMemReq.size,
    13388  vkMemReq.alignment,
    13389  vkMemReq.memoryTypeBits,
    13390  requiresDedicatedAllocation ? 1 : 0,
    13391  prefersDedicatedAllocation ? 1 : 0,
    13392  createInfo.flags,
    13393  createInfo.usage,
    13394  createInfo.requiredFlags,
    13395  createInfo.preferredFlags,
    13396  createInfo.memoryTypeBits,
    13397  createInfo.pool,
    13398  allocation,
    13399  userDataStr.GetString());
    13400  Flush();
    13401 }
    13402 
    13403 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
    13404  const VkMemoryRequirements& vkMemReq,
    13405  bool requiresDedicatedAllocation,
    13406  bool prefersDedicatedAllocation,
    13407  const VmaAllocationCreateInfo& createInfo,
    13408  VmaAllocation allocation)
    13409 {
    13410  CallParams callParams;
    13411  GetBasicParams(callParams);
    13412 
    13413  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13414  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    13415  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13416  vkMemReq.size,
    13417  vkMemReq.alignment,
    13418  vkMemReq.memoryTypeBits,
    13419  requiresDedicatedAllocation ? 1 : 0,
    13420  prefersDedicatedAllocation ? 1 : 0,
    13421  createInfo.flags,
    13422  createInfo.usage,
    13423  createInfo.requiredFlags,
    13424  createInfo.preferredFlags,
    13425  createInfo.memoryTypeBits,
    13426  createInfo.pool,
    13427  allocation,
    13428  userDataStr.GetString());
    13429  Flush();
    13430 }
    13431 
    13432 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
    13433  VmaAllocation allocation)
    13434 {
    13435  CallParams callParams;
    13436  GetBasicParams(callParams);
    13437 
    13438  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13439  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    13440  allocation);
    13441  Flush();
    13442 }
    13443 
    13444 void VmaRecorder::RecordResizeAllocation(
    13445  uint32_t frameIndex,
    13446  VmaAllocation allocation,
    13447  VkDeviceSize newSize)
    13448 {
    13449  CallParams callParams;
    13450  GetBasicParams(callParams);
    13451 
    13452  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13453  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
    13454  allocation, newSize);
    13455  Flush();
    13456 }
    13457 
    13458 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
    13459  VmaAllocation allocation,
    13460  const void* pUserData)
    13461 {
    13462  CallParams callParams;
    13463  GetBasicParams(callParams);
    13464 
    13465  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13466  UserDataString userDataStr(
    13467  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
    13468  pUserData);
    13469  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13470  allocation,
    13471  userDataStr.GetString());
    13472  Flush();
    13473 }
    13474 
    13475 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
    13476  VmaAllocation allocation)
    13477 {
    13478  CallParams callParams;
    13479  GetBasicParams(callParams);
    13480 
    13481  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13482  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    13483  allocation);
    13484  Flush();
    13485 }
    13486 
    13487 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
    13488  VmaAllocation allocation)
    13489 {
    13490  CallParams callParams;
    13491  GetBasicParams(callParams);
    13492 
    13493  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13494  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    13495  allocation);
    13496  Flush();
    13497 }
    13498 
    13499 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
    13500  VmaAllocation allocation)
    13501 {
    13502  CallParams callParams;
    13503  GetBasicParams(callParams);
    13504 
    13505  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13506  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    13507  allocation);
    13508  Flush();
    13509 }
    13510 
    13511 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
    13512  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    13513 {
    13514  CallParams callParams;
    13515  GetBasicParams(callParams);
    13516 
    13517  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13518  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    13519  allocation,
    13520  offset,
    13521  size);
    13522  Flush();
    13523 }
    13524 
    13525 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
    13526  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    13527 {
    13528  CallParams callParams;
    13529  GetBasicParams(callParams);
    13530 
    13531  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13532  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    13533  allocation,
    13534  offset,
    13535  size);
    13536  Flush();
    13537 }
    13538 
    13539 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
    13540  const VkBufferCreateInfo& bufCreateInfo,
    13541  const VmaAllocationCreateInfo& allocCreateInfo,
    13542  VmaAllocation allocation)
    13543 {
    13544  CallParams callParams;
    13545  GetBasicParams(callParams);
    13546 
    13547  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13548  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    13549  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13550  bufCreateInfo.flags,
    13551  bufCreateInfo.size,
    13552  bufCreateInfo.usage,
    13553  bufCreateInfo.sharingMode,
    13554  allocCreateInfo.flags,
    13555  allocCreateInfo.usage,
    13556  allocCreateInfo.requiredFlags,
    13557  allocCreateInfo.preferredFlags,
    13558  allocCreateInfo.memoryTypeBits,
    13559  allocCreateInfo.pool,
    13560  allocation,
    13561  userDataStr.GetString());
    13562  Flush();
    13563 }
    13564 
    13565 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
    13566  const VkImageCreateInfo& imageCreateInfo,
    13567  const VmaAllocationCreateInfo& allocCreateInfo,
    13568  VmaAllocation allocation)
    13569 {
    13570  CallParams callParams;
    13571  GetBasicParams(callParams);
    13572 
    13573  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13574  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    13575  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13576  imageCreateInfo.flags,
    13577  imageCreateInfo.imageType,
    13578  imageCreateInfo.format,
    13579  imageCreateInfo.extent.width,
    13580  imageCreateInfo.extent.height,
    13581  imageCreateInfo.extent.depth,
    13582  imageCreateInfo.mipLevels,
    13583  imageCreateInfo.arrayLayers,
    13584  imageCreateInfo.samples,
    13585  imageCreateInfo.tiling,
    13586  imageCreateInfo.usage,
    13587  imageCreateInfo.sharingMode,
    13588  imageCreateInfo.initialLayout,
    13589  allocCreateInfo.flags,
    13590  allocCreateInfo.usage,
    13591  allocCreateInfo.requiredFlags,
    13592  allocCreateInfo.preferredFlags,
    13593  allocCreateInfo.memoryTypeBits,
    13594  allocCreateInfo.pool,
    13595  allocation,
    13596  userDataStr.GetString());
    13597  Flush();
    13598 }
    13599 
    13600 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
    13601  VmaAllocation allocation)
    13602 {
    13603  CallParams callParams;
    13604  GetBasicParams(callParams);
    13605 
    13606  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13607  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
    13608  allocation);
    13609  Flush();
    13610 }
    13611 
    13612 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
    13613  VmaAllocation allocation)
    13614 {
    13615  CallParams callParams;
    13616  GetBasicParams(callParams);
    13617 
    13618  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13619  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
    13620  allocation);
    13621  Flush();
    13622 }
    13623 
    13624 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
    13625  VmaAllocation allocation)
    13626 {
    13627  CallParams callParams;
    13628  GetBasicParams(callParams);
    13629 
    13630  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13631  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    13632  allocation);
    13633  Flush();
    13634 }
    13635 
    13636 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
    13637  VmaAllocation allocation)
    13638 {
    13639  CallParams callParams;
    13640  GetBasicParams(callParams);
    13641 
    13642  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13643  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
    13644  allocation);
    13645  Flush();
    13646 }
    13647 
    13648 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
    13649  VmaPool pool)
    13650 {
    13651  CallParams callParams;
    13652  GetBasicParams(callParams);
    13653 
    13654  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13655  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
    13656  pool);
    13657  Flush();
    13658 }
    13659 
    13660 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
    13661  const VmaDefragmentationInfo2& info,
    13663 {
    13664  CallParams callParams;
    13665  GetBasicParams(callParams);
    13666 
    13667  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13668  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
    13669  info.flags);
    13670  PrintPointerList(info.allocationCount, info.pAllocations);
    13671  fprintf(m_File, ",");
    13672  PrintPointerList(info.poolCount, info.pPools);
    13673  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
    13674  info.maxCpuBytesToMove,
    13676  info.maxGpuBytesToMove,
    13678  info.commandBuffer,
    13679  ctx);
    13680  Flush();
    13681 }
    13682 
    13683 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
    13685 {
    13686  CallParams callParams;
    13687  GetBasicParams(callParams);
    13688 
    13689  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13690  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
    13691  ctx);
    13692  Flush();
    13693 }
    13694 
    13695 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
    13696 {
    13697  if(pUserData != VMA_NULL)
    13698  {
    13699  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
    13700  {
    13701  m_Str = (const char*)pUserData;
    13702  }
    13703  else
    13704  {
    13705  sprintf_s(m_PtrStr, "%p", pUserData);
    13706  m_Str = m_PtrStr;
    13707  }
    13708  }
    13709  else
    13710  {
    13711  m_Str = "";
    13712  }
    13713 }
    13714 
    13715 void VmaRecorder::WriteConfiguration(
    13716  const VkPhysicalDeviceProperties& devProps,
    13717  const VkPhysicalDeviceMemoryProperties& memProps,
    13718  bool dedicatedAllocationExtensionEnabled)
    13719 {
    13720  fprintf(m_File, "Config,Begin\n");
    13721 
    13722  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
    13723  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
    13724  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
    13725  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
    13726  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
    13727  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
    13728 
    13729  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
    13730  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
    13731  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
    13732 
    13733  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
    13734  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
    13735  {
    13736  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
    13737  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
    13738  }
    13739  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
    13740  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
    13741  {
    13742  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
    13743  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
    13744  }
    13745 
    13746  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
    13747 
    13748  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
    13749  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
    13750  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
    13751  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
    13752  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
    13753  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
    13754  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
    13755  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
    13756  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    13757 
    13758  fprintf(m_File, "Config,End\n");
    13759 }
    13760 
    13761 void VmaRecorder::GetBasicParams(CallParams& outParams)
    13762 {
    13763  outParams.threadId = GetCurrentThreadId();
    13764 
    13765  LARGE_INTEGER counter;
    13766  QueryPerformanceCounter(&counter);
    13767  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
    13768 }
    13769 
    13770 void VmaRecorder::Flush()
    13771 {
    13772  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
    13773  {
    13774  fflush(m_File);
    13775  }
    13776 }
    13777 
    13778 #endif // #if VMA_RECORDING_ENABLED
    13779 
    13781 // VmaAllocator_T
    13782 
    13783 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
    13784  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
    13785  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
    13786  m_hDevice(pCreateInfo->device),
    13787  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
    13788  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
    13789  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
    13790  m_PreferredLargeHeapBlockSize(0),
    13791  m_PhysicalDevice(pCreateInfo->physicalDevice),
    13792  m_CurrentFrameIndex(0),
    13793  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
    13794  m_NextPoolId(0)
    13796  ,m_pRecorder(VMA_NULL)
    13797 #endif
    13798 {
    13799  if(VMA_DEBUG_DETECT_CORRUPTION)
    13800  {
    13801  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
    13802  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
    13803  }
    13804 
    13805  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
    13806 
    13807 #if !(VMA_DEDICATED_ALLOCATION)
    13809  {
    13810  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
    13811  }
    13812 #endif
    13813 
    13814  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
    13815  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
    13816  memset(&m_MemProps, 0, sizeof(m_MemProps));
    13817 
    13818  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
    13819  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
    13820 
    13821  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    13822  {
    13823  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
    13824  }
    13825 
    13826  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
    13827  {
    13828  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
    13829  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
    13830  }
    13831 
    13832  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
    13833 
    13834  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
    13835  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
    13836 
    13837  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
    13838  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
    13839  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
    13840  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
    13841 
    13842  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
    13843  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    13844 
    13845  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
    13846  {
    13847  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
    13848  {
    13849  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
    13850  if(limit != VK_WHOLE_SIZE)
    13851  {
    13852  m_HeapSizeLimit[heapIndex] = limit;
    13853  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
    13854  {
    13855  m_MemProps.memoryHeaps[heapIndex].size = limit;
    13856  }
    13857  }
    13858  }
    13859  }
    13860 
    13861  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13862  {
    13863  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
    13864 
    13865  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
    13866  this,
    13867  memTypeIndex,
    13868  preferredBlockSize,
    13869  0,
    13870  SIZE_MAX,
    13871  GetBufferImageGranularity(),
    13872  pCreateInfo->frameInUseCount,
    13873  false, // isCustomPool
    13874  false, // explicitBlockSize
    13875  false); // linearAlgorithm
    13876  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
    13877  // becase minBlockCount is 0.
    13878  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
    13879 
    13880  }
    13881 }
    13882 
    13883 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
    13884 {
    13885  VkResult res = VK_SUCCESS;
    13886 
    13887  if(pCreateInfo->pRecordSettings != VMA_NULL &&
    13888  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
    13889  {
    13890 #if VMA_RECORDING_ENABLED
    13891  m_pRecorder = vma_new(this, VmaRecorder)();
    13892  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
    13893  if(res != VK_SUCCESS)
    13894  {
    13895  return res;
    13896  }
    13897  m_pRecorder->WriteConfiguration(
    13898  m_PhysicalDeviceProperties,
    13899  m_MemProps,
    13900  m_UseKhrDedicatedAllocation);
    13901  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
    13902 #else
    13903  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
    13904  return VK_ERROR_FEATURE_NOT_PRESENT;
    13905 #endif
    13906  }
    13907 
    13908  return res;
    13909 }
    13910 
    13911 VmaAllocator_T::~VmaAllocator_T()
    13912 {
    13913 #if VMA_RECORDING_ENABLED
    13914  if(m_pRecorder != VMA_NULL)
    13915  {
    13916  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
    13917  vma_delete(this, m_pRecorder);
    13918  }
    13919 #endif
    13920 
    13921  VMA_ASSERT(m_Pools.empty());
    13922 
    13923  for(size_t i = GetMemoryTypeCount(); i--; )
    13924  {
    13925  vma_delete(this, m_pDedicatedAllocations[i]);
    13926  vma_delete(this, m_pBlockVectors[i]);
    13927  }
    13928 }
    13929 
    13930 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
    13931 {
    13932 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    13933  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
    13934  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
    13935  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    13936  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
    13937  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
    13938  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
    13939  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
    13940  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
    13941  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
    13942  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
    13943  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
    13944  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
    13945  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
    13946  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
    13947  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
    13948  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
    13949  m_VulkanFunctions.vkCmdCopyBuffer = &vkCmdCopyBuffer;
    13950 #if VMA_DEDICATED_ALLOCATION
    13951  if(m_UseKhrDedicatedAllocation)
    13952  {
    13953  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
    13954  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
    13955  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
    13956  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
    13957  }
    13958 #endif // #if VMA_DEDICATED_ALLOCATION
    13959 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    13960 
    13961 #define VMA_COPY_IF_NOT_NULL(funcName) \
    13962  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
    13963 
    13964  if(pVulkanFunctions != VMA_NULL)
    13965  {
    13966  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
    13967  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
    13968  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
    13969  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
    13970  VMA_COPY_IF_NOT_NULL(vkMapMemory);
    13971  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
    13972  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
    13973  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
    13974  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
    13975  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
    13976  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
    13977  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
    13978  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
    13979  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
    13980  VMA_COPY_IF_NOT_NULL(vkCreateImage);
    13981  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
    13982  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
    13983 #if VMA_DEDICATED_ALLOCATION
    13984  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
    13985  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
    13986 #endif
    13987  }
    13988 
    13989 #undef VMA_COPY_IF_NOT_NULL
    13990 
    13991  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
    13992  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
    13993  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
    13994  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
    13995  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
    13996  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
    13997  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
    13998  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
    13999  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
    14000  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
    14001  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
    14002  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
    14003  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
    14004  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
    14005  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
    14006  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
    14007  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
    14008  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
    14009  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
    14010 #if VMA_DEDICATED_ALLOCATION
    14011  if(m_UseKhrDedicatedAllocation)
    14012  {
    14013  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
    14014  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
    14015  }
    14016 #endif
    14017 }
    14018 
    14019 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
    14020 {
    14021  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    14022  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
    14023  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
    14024  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
    14025 }
    14026 
    14027 VkResult VmaAllocator_T::AllocateMemoryOfType(
    14028  VkDeviceSize size,
    14029  VkDeviceSize alignment,
    14030  bool dedicatedAllocation,
    14031  VkBuffer dedicatedBuffer,
    14032  VkImage dedicatedImage,
    14033  const VmaAllocationCreateInfo& createInfo,
    14034  uint32_t memTypeIndex,
    14035  VmaSuballocationType suballocType,
    14036  VmaAllocation* pAllocation)
    14037 {
    14038  VMA_ASSERT(pAllocation != VMA_NULL);
    14039  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
    14040 
    14041  VmaAllocationCreateInfo finalCreateInfo = createInfo;
    14042 
    14043  // If memory type is not HOST_VISIBLE, disable MAPPED.
    14044  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    14045  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    14046  {
    14047  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
    14048  }
    14049 
    14050  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
    14051  VMA_ASSERT(blockVector);
    14052 
    14053  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
    14054  bool preferDedicatedMemory =
    14055  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
    14056  dedicatedAllocation ||
    14057  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
    14058  size > preferredBlockSize / 2;
    14059 
    14060  if(preferDedicatedMemory &&
    14061  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
    14062  finalCreateInfo.pool == VK_NULL_HANDLE)
    14063  {
    14065  }
    14066 
    14067  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
    14068  {
    14069  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    14070  {
    14071  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14072  }
    14073  else
    14074  {
    14075  return AllocateDedicatedMemory(
    14076  size,
    14077  suballocType,
    14078  memTypeIndex,
    14079  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    14080  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    14081  finalCreateInfo.pUserData,
    14082  dedicatedBuffer,
    14083  dedicatedImage,
    14084  pAllocation);
    14085  }
    14086  }
    14087  else
    14088  {
    14089  VkResult res = blockVector->Allocate(
    14090  VK_NULL_HANDLE, // hCurrentPool
    14091  m_CurrentFrameIndex.load(),
    14092  size,
    14093  alignment,
    14094  finalCreateInfo,
    14095  suballocType,
    14096  pAllocation);
    14097  if(res == VK_SUCCESS)
    14098  {
    14099  return res;
    14100  }
    14101 
    14102  // 5. Try dedicated memory.
    14103  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    14104  {
    14105  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14106  }
    14107  else
    14108  {
    14109  res = AllocateDedicatedMemory(
    14110  size,
    14111  suballocType,
    14112  memTypeIndex,
    14113  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    14114  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    14115  finalCreateInfo.pUserData,
    14116  dedicatedBuffer,
    14117  dedicatedImage,
    14118  pAllocation);
    14119  if(res == VK_SUCCESS)
    14120  {
    14121  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
    14122  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
    14123  return VK_SUCCESS;
    14124  }
    14125  else
    14126  {
    14127  // Everything failed: Return error code.
    14128  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    14129  return res;
    14130  }
    14131  }
    14132  }
    14133 }
    14134 
    14135 VkResult VmaAllocator_T::AllocateDedicatedMemory(
    14136  VkDeviceSize size,
    14137  VmaSuballocationType suballocType,
    14138  uint32_t memTypeIndex,
    14139  bool map,
    14140  bool isUserDataString,
    14141  void* pUserData,
    14142  VkBuffer dedicatedBuffer,
    14143  VkImage dedicatedImage,
    14144  VmaAllocation* pAllocation)
    14145 {
    14146  VMA_ASSERT(pAllocation);
    14147 
    14148  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    14149  allocInfo.memoryTypeIndex = memTypeIndex;
    14150  allocInfo.allocationSize = size;
    14151 
    14152 #if VMA_DEDICATED_ALLOCATION
    14153  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
    14154  if(m_UseKhrDedicatedAllocation)
    14155  {
    14156  if(dedicatedBuffer != VK_NULL_HANDLE)
    14157  {
    14158  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
    14159  dedicatedAllocInfo.buffer = dedicatedBuffer;
    14160  allocInfo.pNext = &dedicatedAllocInfo;
    14161  }
    14162  else if(dedicatedImage != VK_NULL_HANDLE)
    14163  {
    14164  dedicatedAllocInfo.image = dedicatedImage;
    14165  allocInfo.pNext = &dedicatedAllocInfo;
    14166  }
    14167  }
    14168 #endif // #if VMA_DEDICATED_ALLOCATION
    14169 
    14170  // Allocate VkDeviceMemory.
    14171  VkDeviceMemory hMemory = VK_NULL_HANDLE;
    14172  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
    14173  if(res < 0)
    14174  {
    14175  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    14176  return res;
    14177  }
    14178 
    14179  void* pMappedData = VMA_NULL;
    14180  if(map)
    14181  {
    14182  res = (*m_VulkanFunctions.vkMapMemory)(
    14183  m_hDevice,
    14184  hMemory,
    14185  0,
    14186  VK_WHOLE_SIZE,
    14187  0,
    14188  &pMappedData);
    14189  if(res < 0)
    14190  {
    14191  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    14192  FreeVulkanMemory(memTypeIndex, size, hMemory);
    14193  return res;
    14194  }
    14195  }
    14196 
    14197  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
    14198  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
    14199  (*pAllocation)->SetUserData(this, pUserData);
    14200  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    14201  {
    14202  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    14203  }
    14204 
    14205  // Register it in m_pDedicatedAllocations.
    14206  {
    14207  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    14208  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    14209  VMA_ASSERT(pDedicatedAllocations);
    14210  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
    14211  }
    14212 
    14213  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
    14214 
    14215  return VK_SUCCESS;
    14216 }
    14217 
    14218 void VmaAllocator_T::GetBufferMemoryRequirements(
    14219  VkBuffer hBuffer,
    14220  VkMemoryRequirements& memReq,
    14221  bool& requiresDedicatedAllocation,
    14222  bool& prefersDedicatedAllocation) const
    14223 {
    14224 #if VMA_DEDICATED_ALLOCATION
    14225  if(m_UseKhrDedicatedAllocation)
    14226  {
    14227  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
    14228  memReqInfo.buffer = hBuffer;
    14229 
    14230  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    14231 
    14232  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    14233  memReq2.pNext = &memDedicatedReq;
    14234 
    14235  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    14236 
    14237  memReq = memReq2.memoryRequirements;
    14238  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    14239  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    14240  }
    14241  else
    14242 #endif // #if VMA_DEDICATED_ALLOCATION
    14243  {
    14244  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
    14245  requiresDedicatedAllocation = false;
    14246  prefersDedicatedAllocation = false;
    14247  }
    14248 }
    14249 
    14250 void VmaAllocator_T::GetImageMemoryRequirements(
    14251  VkImage hImage,
    14252  VkMemoryRequirements& memReq,
    14253  bool& requiresDedicatedAllocation,
    14254  bool& prefersDedicatedAllocation) const
    14255 {
    14256 #if VMA_DEDICATED_ALLOCATION
    14257  if(m_UseKhrDedicatedAllocation)
    14258  {
    14259  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
    14260  memReqInfo.image = hImage;
    14261 
    14262  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    14263 
    14264  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    14265  memReq2.pNext = &memDedicatedReq;
    14266 
    14267  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    14268 
    14269  memReq = memReq2.memoryRequirements;
    14270  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    14271  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    14272  }
    14273  else
    14274 #endif // #if VMA_DEDICATED_ALLOCATION
    14275  {
    14276  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
    14277  requiresDedicatedAllocation = false;
    14278  prefersDedicatedAllocation = false;
    14279  }
    14280 }
    14281 
    14282 VkResult VmaAllocator_T::AllocateMemory(
    14283  const VkMemoryRequirements& vkMemReq,
    14284  bool requiresDedicatedAllocation,
    14285  bool prefersDedicatedAllocation,
    14286  VkBuffer dedicatedBuffer,
    14287  VkImage dedicatedImage,
    14288  const VmaAllocationCreateInfo& createInfo,
    14289  VmaSuballocationType suballocType,
    14290  VmaAllocation* pAllocation)
    14291 {
    14292  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
    14293 
    14294  if(vkMemReq.size == 0)
    14295  {
    14296  return VK_ERROR_VALIDATION_FAILED_EXT;
    14297  }
    14298  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
    14299  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    14300  {
    14301  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
    14302  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14303  }
    14304  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    14306  {
    14307  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
    14308  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14309  }
    14310  if(requiresDedicatedAllocation)
    14311  {
    14312  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    14313  {
    14314  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
    14315  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14316  }
    14317  if(createInfo.pool != VK_NULL_HANDLE)
    14318  {
    14319  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
    14320  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14321  }
    14322  }
    14323  if((createInfo.pool != VK_NULL_HANDLE) &&
    14324  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
    14325  {
    14326  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
    14327  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14328  }
    14329 
    14330  if(createInfo.pool != VK_NULL_HANDLE)
    14331  {
    14332  const VkDeviceSize alignmentForPool = VMA_MAX(
    14333  vkMemReq.alignment,
    14334  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
    14335  return createInfo.pool->m_BlockVector.Allocate(
    14336  createInfo.pool,
    14337  m_CurrentFrameIndex.load(),
    14338  vkMemReq.size,
    14339  alignmentForPool,
    14340  createInfo,
    14341  suballocType,
    14342  pAllocation);
    14343  }
    14344  else
    14345  {
    14346  // Bit mask of memory Vulkan types acceptable for this allocation.
    14347  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
    14348  uint32_t memTypeIndex = UINT32_MAX;
    14349  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    14350  if(res == VK_SUCCESS)
    14351  {
    14352  VkDeviceSize alignmentForMemType = VMA_MAX(
    14353  vkMemReq.alignment,
    14354  GetMemoryTypeMinAlignment(memTypeIndex));
    14355 
    14356  res = AllocateMemoryOfType(
    14357  vkMemReq.size,
    14358  alignmentForMemType,
    14359  requiresDedicatedAllocation || prefersDedicatedAllocation,
    14360  dedicatedBuffer,
    14361  dedicatedImage,
    14362  createInfo,
    14363  memTypeIndex,
    14364  suballocType,
    14365  pAllocation);
    14366  // Succeeded on first try.
    14367  if(res == VK_SUCCESS)
    14368  {
    14369  return res;
    14370  }
    14371  // Allocation from this memory type failed. Try other compatible memory types.
    14372  else
    14373  {
    14374  for(;;)
    14375  {
    14376  // Remove old memTypeIndex from list of possibilities.
    14377  memoryTypeBits &= ~(1u << memTypeIndex);
    14378  // Find alternative memTypeIndex.
    14379  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    14380  if(res == VK_SUCCESS)
    14381  {
    14382  alignmentForMemType = VMA_MAX(
    14383  vkMemReq.alignment,
    14384  GetMemoryTypeMinAlignment(memTypeIndex));
    14385 
    14386  res = AllocateMemoryOfType(
    14387  vkMemReq.size,
    14388  alignmentForMemType,
    14389  requiresDedicatedAllocation || prefersDedicatedAllocation,
    14390  dedicatedBuffer,
    14391  dedicatedImage,
    14392  createInfo,
    14393  memTypeIndex,
    14394  suballocType,
    14395  pAllocation);
    14396  // Allocation from this alternative memory type succeeded.
    14397  if(res == VK_SUCCESS)
    14398  {
    14399  return res;
    14400  }
    14401  // else: Allocation from this memory type failed. Try next one - next loop iteration.
    14402  }
    14403  // No other matching memory type index could be found.
    14404  else
    14405  {
    14406  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
    14407  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14408  }
    14409  }
    14410  }
    14411  }
    14412  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
    14413  else
    14414  return res;
    14415  }
    14416 }
    14417 
    14418 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
    14419 {
    14420  VMA_ASSERT(allocation);
    14421 
    14422  if(TouchAllocation(allocation))
    14423  {
    14424  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    14425  {
    14426  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
    14427  }
    14428 
    14429  switch(allocation->GetType())
    14430  {
    14431  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    14432  {
    14433  VmaBlockVector* pBlockVector = VMA_NULL;
    14434  VmaPool hPool = allocation->GetPool();
    14435  if(hPool != VK_NULL_HANDLE)
    14436  {
    14437  pBlockVector = &hPool->m_BlockVector;
    14438  }
    14439  else
    14440  {
    14441  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    14442  pBlockVector = m_pBlockVectors[memTypeIndex];
    14443  }
    14444  pBlockVector->Free(allocation);
    14445  }
    14446  break;
    14447  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    14448  FreeDedicatedMemory(allocation);
    14449  break;
    14450  default:
    14451  VMA_ASSERT(0);
    14452  }
    14453  }
    14454 
    14455  allocation->SetUserData(this, VMA_NULL);
    14456  vma_delete(this, allocation);
    14457 }
    14458 
    14459 VkResult VmaAllocator_T::ResizeAllocation(
    14460  const VmaAllocation alloc,
    14461  VkDeviceSize newSize)
    14462 {
    14463  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
    14464  {
    14465  return VK_ERROR_VALIDATION_FAILED_EXT;
    14466  }
    14467  if(newSize == alloc->GetSize())
    14468  {
    14469  return VK_SUCCESS;
    14470  }
    14471 
    14472  switch(alloc->GetType())
    14473  {
    14474  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    14475  return VK_ERROR_FEATURE_NOT_PRESENT;
    14476  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    14477  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
    14478  {
    14479  alloc->ChangeSize(newSize);
    14480  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
    14481  return VK_SUCCESS;
    14482  }
    14483  else
    14484  {
    14485  return VK_ERROR_OUT_OF_POOL_MEMORY;
    14486  }
    14487  default:
    14488  VMA_ASSERT(0);
    14489  return VK_ERROR_VALIDATION_FAILED_EXT;
    14490  }
    14491 }
    14492 
    14493 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
    14494 {
    14495  // Initialize.
    14496  InitStatInfo(pStats->total);
    14497  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
    14498  InitStatInfo(pStats->memoryType[i]);
    14499  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    14500  InitStatInfo(pStats->memoryHeap[i]);
    14501 
    14502  // Process default pools.
    14503  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    14504  {
    14505  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    14506  VMA_ASSERT(pBlockVector);
    14507  pBlockVector->AddStats(pStats);
    14508  }
    14509 
    14510  // Process custom pools.
    14511  {
    14512  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
    14513  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    14514  {
    14515  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
    14516  }
    14517  }
    14518 
    14519  // Process dedicated allocations.
    14520  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    14521  {
    14522  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    14523  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    14524  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    14525  VMA_ASSERT(pDedicatedAllocVector);
    14526  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
    14527  {
    14528  VmaStatInfo allocationStatInfo;
    14529  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
    14530  VmaAddStatInfo(pStats->total, allocationStatInfo);
    14531  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    14532  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    14533  }
    14534  }
    14535 
    14536  // Postprocess.
    14537  VmaPostprocessCalcStatInfo(pStats->total);
    14538  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
    14539  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
    14540  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
    14541  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
    14542 }
    14543 
    14544 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
    14545 
    14546 VkResult VmaAllocator_T::DefragmentationBegin(
    14547  const VmaDefragmentationInfo2& info,
    14548  VmaDefragmentationStats* pStats,
    14549  VmaDefragmentationContext* pContext)
    14550 {
    14551  if(info.pAllocationsChanged != VMA_NULL)
    14552  {
    14553  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
    14554  }
    14555 
    14556  *pContext = vma_new(this, VmaDefragmentationContext_T)(
    14557  this, m_CurrentFrameIndex.load(), info.flags, pStats);
    14558 
    14559  (*pContext)->AddPools(info.poolCount, info.pPools);
    14560  (*pContext)->AddAllocations(
    14562 
    14563  VkResult res = (*pContext)->Defragment(
    14566  info.commandBuffer, pStats);
    14567 
    14568  if(res != VK_NOT_READY)
    14569  {
    14570  vma_delete(this, *pContext);
    14571  *pContext = VMA_NULL;
    14572  }
    14573 
    14574  return res;
    14575 }
    14576 
    14577 VkResult VmaAllocator_T::DefragmentationEnd(
    14578  VmaDefragmentationContext context)
    14579 {
    14580  vma_delete(this, context);
    14581  return VK_SUCCESS;
    14582 }
    14583 
    14584 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
    14585 {
    14586  if(hAllocation->CanBecomeLost())
    14587  {
    14588  /*
    14589  Warning: This is a carefully designed algorithm.
    14590  Do not modify unless you really know what you're doing :)
    14591  */
    14592  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    14593  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    14594  for(;;)
    14595  {
    14596  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    14597  {
    14598  pAllocationInfo->memoryType = UINT32_MAX;
    14599  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
    14600  pAllocationInfo->offset = 0;
    14601  pAllocationInfo->size = hAllocation->GetSize();
    14602  pAllocationInfo->pMappedData = VMA_NULL;
    14603  pAllocationInfo->pUserData = hAllocation->GetUserData();
    14604  return;
    14605  }
    14606  else if(localLastUseFrameIndex == localCurrFrameIndex)
    14607  {
    14608  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    14609  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    14610  pAllocationInfo->offset = hAllocation->GetOffset();
    14611  pAllocationInfo->size = hAllocation->GetSize();
    14612  pAllocationInfo->pMappedData = VMA_NULL;
    14613  pAllocationInfo->pUserData = hAllocation->GetUserData();
    14614  return;
    14615  }
    14616  else // Last use time earlier than current time.
    14617  {
    14618  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    14619  {
    14620  localLastUseFrameIndex = localCurrFrameIndex;
    14621  }
    14622  }
    14623  }
    14624  }
    14625  else
    14626  {
    14627 #if VMA_STATS_STRING_ENABLED
    14628  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    14629  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    14630  for(;;)
    14631  {
    14632  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    14633  if(localLastUseFrameIndex == localCurrFrameIndex)
    14634  {
    14635  break;
    14636  }
    14637  else // Last use time earlier than current time.
    14638  {
    14639  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    14640  {
    14641  localLastUseFrameIndex = localCurrFrameIndex;
    14642  }
    14643  }
    14644  }
    14645 #endif
    14646 
    14647  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    14648  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    14649  pAllocationInfo->offset = hAllocation->GetOffset();
    14650  pAllocationInfo->size = hAllocation->GetSize();
    14651  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    14652  pAllocationInfo->pUserData = hAllocation->GetUserData();
    14653  }
    14654 }
    14655 
    14656 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
    14657 {
    14658  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
    14659  if(hAllocation->CanBecomeLost())
    14660  {
    14661  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    14662  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    14663  for(;;)
    14664  {
    14665  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    14666  {
    14667  return false;
    14668  }
    14669  else if(localLastUseFrameIndex == localCurrFrameIndex)
    14670  {
    14671  return true;
    14672  }
    14673  else // Last use time earlier than current time.
    14674  {
    14675  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    14676  {
    14677  localLastUseFrameIndex = localCurrFrameIndex;
    14678  }
    14679  }
    14680  }
    14681  }
    14682  else
    14683  {
    14684 #if VMA_STATS_STRING_ENABLED
    14685  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    14686  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    14687  for(;;)
    14688  {
    14689  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    14690  if(localLastUseFrameIndex == localCurrFrameIndex)
    14691  {
    14692  break;
    14693  }
    14694  else // Last use time earlier than current time.
    14695  {
    14696  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    14697  {
    14698  localLastUseFrameIndex = localCurrFrameIndex;
    14699  }
    14700  }
    14701  }
    14702 #endif
    14703 
    14704  return true;
    14705  }
    14706 }
    14707 
    14708 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
    14709 {
    14710  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
    14711 
    14712  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
    14713 
    14714  if(newCreateInfo.maxBlockCount == 0)
    14715  {
    14716  newCreateInfo.maxBlockCount = SIZE_MAX;
    14717  }
    14718  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
    14719  {
    14720  return VK_ERROR_INITIALIZATION_FAILED;
    14721  }
    14722 
    14723  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
    14724 
    14725  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
    14726 
    14727  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
    14728  if(res != VK_SUCCESS)
    14729  {
    14730  vma_delete(this, *pPool);
    14731  *pPool = VMA_NULL;
    14732  return res;
    14733  }
    14734 
    14735  // Add to m_Pools.
    14736  {
    14737  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
    14738  (*pPool)->SetId(m_NextPoolId++);
    14739  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
    14740  }
    14741 
    14742  return VK_SUCCESS;
    14743 }
    14744 
    14745 void VmaAllocator_T::DestroyPool(VmaPool pool)
    14746 {
    14747  // Remove from m_Pools.
    14748  {
    14749  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
    14750  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
    14751  VMA_ASSERT(success && "Pool not found in Allocator.");
    14752  }
    14753 
    14754  vma_delete(this, pool);
    14755 }
    14756 
    14757 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
    14758 {
    14759  pool->m_BlockVector.GetPoolStats(pPoolStats);
    14760 }
    14761 
    14762 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
    14763 {
    14764  m_CurrentFrameIndex.store(frameIndex);
    14765 }
    14766 
    14767 void VmaAllocator_T::MakePoolAllocationsLost(
    14768  VmaPool hPool,
    14769  size_t* pLostAllocationCount)
    14770 {
    14771  hPool->m_BlockVector.MakePoolAllocationsLost(
    14772  m_CurrentFrameIndex.load(),
    14773  pLostAllocationCount);
    14774 }
    14775 
    14776 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
    14777 {
    14778  return hPool->m_BlockVector.CheckCorruption();
    14779 }
    14780 
    14781 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
    14782 {
    14783  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
    14784 
    14785  // Process default pools.
    14786  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    14787  {
    14788  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
    14789  {
    14790  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    14791  VMA_ASSERT(pBlockVector);
    14792  VkResult localRes = pBlockVector->CheckCorruption();
    14793  switch(localRes)
    14794  {
    14795  case VK_ERROR_FEATURE_NOT_PRESENT:
    14796  break;
    14797  case VK_SUCCESS:
    14798  finalRes = VK_SUCCESS;
    14799  break;
    14800  default:
    14801  return localRes;
    14802  }
    14803  }
    14804  }
    14805 
    14806  // Process custom pools.
    14807  {
    14808  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
    14809  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    14810  {
    14811  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
    14812  {
    14813  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
    14814  switch(localRes)
    14815  {
    14816  case VK_ERROR_FEATURE_NOT_PRESENT:
    14817  break;
    14818  case VK_SUCCESS:
    14819  finalRes = VK_SUCCESS;
    14820  break;
    14821  default:
    14822  return localRes;
    14823  }
    14824  }
    14825  }
    14826  }
    14827 
    14828  return finalRes;
    14829 }
    14830 
    14831 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
    14832 {
    14833  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
    14834  (*pAllocation)->InitLost();
    14835 }
    14836 
    14837 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
    14838 {
    14839  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
    14840 
    14841  VkResult res;
    14842  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    14843  {
    14844  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    14845  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
    14846  {
    14847  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    14848  if(res == VK_SUCCESS)
    14849  {
    14850  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
    14851  }
    14852  }
    14853  else
    14854  {
    14855  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14856  }
    14857  }
    14858  else
    14859  {
    14860  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    14861  }
    14862 
    14863  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
    14864  {
    14865  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
    14866  }
    14867 
    14868  return res;
    14869 }
    14870 
    14871 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
    14872 {
    14873  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
    14874  {
    14875  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
    14876  }
    14877 
    14878  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
    14879 
    14880  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
    14881  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    14882  {
    14883  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    14884  m_HeapSizeLimit[heapIndex] += size;
    14885  }
    14886 }
    14887 
    14888 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
    14889 {
    14890  if(hAllocation->CanBecomeLost())
    14891  {
    14892  return VK_ERROR_MEMORY_MAP_FAILED;
    14893  }
    14894 
    14895  switch(hAllocation->GetType())
    14896  {
    14897  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    14898  {
    14899  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    14900  char *pBytes = VMA_NULL;
    14901  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
    14902  if(res == VK_SUCCESS)
    14903  {
    14904  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
    14905  hAllocation->BlockAllocMap();
    14906  }
    14907  return res;
    14908  }
    14909  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    14910  return hAllocation->DedicatedAllocMap(this, ppData);
    14911  default:
    14912  VMA_ASSERT(0);
    14913  return VK_ERROR_MEMORY_MAP_FAILED;
    14914  }
    14915 }
    14916 
    14917 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
    14918 {
    14919  switch(hAllocation->GetType())
    14920  {
    14921  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    14922  {
    14923  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    14924  hAllocation->BlockAllocUnmap();
    14925  pBlock->Unmap(this, 1);
    14926  }
    14927  break;
    14928  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    14929  hAllocation->DedicatedAllocUnmap(this);
    14930  break;
    14931  default:
    14932  VMA_ASSERT(0);
    14933  }
    14934 }
    14935 
    14936 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
    14937 {
    14938  VkResult res = VK_SUCCESS;
    14939  switch(hAllocation->GetType())
    14940  {
    14941  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    14942  res = GetVulkanFunctions().vkBindBufferMemory(
    14943  m_hDevice,
    14944  hBuffer,
    14945  hAllocation->GetMemory(),
    14946  0); //memoryOffset
    14947  break;
    14948  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    14949  {
    14950  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    14951  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
    14952  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
    14953  break;
    14954  }
    14955  default:
    14956  VMA_ASSERT(0);
    14957  }
    14958  return res;
    14959 }
    14960 
    14961 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
    14962 {
    14963  VkResult res = VK_SUCCESS;
    14964  switch(hAllocation->GetType())
    14965  {
    14966  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    14967  res = GetVulkanFunctions().vkBindImageMemory(
    14968  m_hDevice,
    14969  hImage,
    14970  hAllocation->GetMemory(),
    14971  0); //memoryOffset
    14972  break;
    14973  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    14974  {
    14975  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    14976  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
    14977  res = pBlock->BindImageMemory(this, hAllocation, hImage);
    14978  break;
    14979  }
    14980  default:
    14981  VMA_ASSERT(0);
    14982  }
    14983  return res;
    14984 }
    14985 
    14986 void VmaAllocator_T::FlushOrInvalidateAllocation(
    14987  VmaAllocation hAllocation,
    14988  VkDeviceSize offset, VkDeviceSize size,
    14989  VMA_CACHE_OPERATION op)
    14990 {
    14991  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
    14992  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
    14993  {
    14994  const VkDeviceSize allocationSize = hAllocation->GetSize();
    14995  VMA_ASSERT(offset <= allocationSize);
    14996 
    14997  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    14998 
    14999  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    15000  memRange.memory = hAllocation->GetMemory();
    15001 
    15002  switch(hAllocation->GetType())
    15003  {
    15004  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15005  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    15006  if(size == VK_WHOLE_SIZE)
    15007  {
    15008  memRange.size = allocationSize - memRange.offset;
    15009  }
    15010  else
    15011  {
    15012  VMA_ASSERT(offset + size <= allocationSize);
    15013  memRange.size = VMA_MIN(
    15014  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
    15015  allocationSize - memRange.offset);
    15016  }
    15017  break;
    15018 
    15019  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15020  {
    15021  // 1. Still within this allocation.
    15022  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    15023  if(size == VK_WHOLE_SIZE)
    15024  {
    15025  size = allocationSize - offset;
    15026  }
    15027  else
    15028  {
    15029  VMA_ASSERT(offset + size <= allocationSize);
    15030  }
    15031  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
    15032 
    15033  // 2. Adjust to whole block.
    15034  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
    15035  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
    15036  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
    15037  memRange.offset += allocationOffset;
    15038  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
    15039 
    15040  break;
    15041  }
    15042 
    15043  default:
    15044  VMA_ASSERT(0);
    15045  }
    15046 
    15047  switch(op)
    15048  {
    15049  case VMA_CACHE_FLUSH:
    15050  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
    15051  break;
    15052  case VMA_CACHE_INVALIDATE:
    15053  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
    15054  break;
    15055  default:
    15056  VMA_ASSERT(0);
    15057  }
    15058  }
    15059  // else: Just ignore this call.
    15060 }
    15061 
    15062 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
    15063 {
    15064  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
    15065 
    15066  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    15067  {
    15068  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    15069  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    15070  VMA_ASSERT(pDedicatedAllocations);
    15071  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
    15072  VMA_ASSERT(success);
    15073  }
    15074 
    15075  VkDeviceMemory hMemory = allocation->GetMemory();
    15076 
    15077  /*
    15078  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
    15079  before vkFreeMemory.
    15080 
    15081  if(allocation->GetMappedData() != VMA_NULL)
    15082  {
    15083  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    15084  }
    15085  */
    15086 
    15087  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
    15088 
    15089  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
    15090 }
    15091 
    15092 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
    15093 {
    15094  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
    15095  !hAllocation->CanBecomeLost() &&
    15096  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    15097  {
    15098  void* pData = VMA_NULL;
    15099  VkResult res = Map(hAllocation, &pData);
    15100  if(res == VK_SUCCESS)
    15101  {
    15102  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
    15103  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
    15104  Unmap(hAllocation);
    15105  }
    15106  else
    15107  {
    15108  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
    15109  }
    15110  }
    15111 }
    15112 
    15113 #if VMA_STATS_STRING_ENABLED
    15114 
    15115 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
    15116 {
    15117  bool dedicatedAllocationsStarted = false;
    15118  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    15119  {
    15120  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    15121  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    15122  VMA_ASSERT(pDedicatedAllocVector);
    15123  if(pDedicatedAllocVector->empty() == false)
    15124  {
    15125  if(dedicatedAllocationsStarted == false)
    15126  {
    15127  dedicatedAllocationsStarted = true;
    15128  json.WriteString("DedicatedAllocations");
    15129  json.BeginObject();
    15130  }
    15131 
    15132  json.BeginString("Type ");
    15133  json.ContinueString(memTypeIndex);
    15134  json.EndString();
    15135 
    15136  json.BeginArray();
    15137 
    15138  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
    15139  {
    15140  json.BeginObject(true);
    15141  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
    15142  hAlloc->PrintParameters(json);
    15143  json.EndObject();
    15144  }
    15145 
    15146  json.EndArray();
    15147  }
    15148  }
    15149  if(dedicatedAllocationsStarted)
    15150  {
    15151  json.EndObject();
    15152  }
    15153 
    15154  {
    15155  bool allocationsStarted = false;
    15156  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    15157  {
    15158  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
    15159  {
    15160  if(allocationsStarted == false)
    15161  {
    15162  allocationsStarted = true;
    15163  json.WriteString("DefaultPools");
    15164  json.BeginObject();
    15165  }
    15166 
    15167  json.BeginString("Type ");
    15168  json.ContinueString(memTypeIndex);
    15169  json.EndString();
    15170 
    15171  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
    15172  }
    15173  }
    15174  if(allocationsStarted)
    15175  {
    15176  json.EndObject();
    15177  }
    15178  }
    15179 
    15180  // Custom pools
    15181  {
    15182  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
    15183  const size_t poolCount = m_Pools.size();
    15184  if(poolCount > 0)
    15185  {
    15186  json.WriteString("Pools");
    15187  json.BeginObject();
    15188  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    15189  {
    15190  json.BeginString();
    15191  json.ContinueString(m_Pools[poolIndex]->GetId());
    15192  json.EndString();
    15193 
    15194  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
    15195  }
    15196  json.EndObject();
    15197  }
    15198  }
    15199 }
    15200 
    15201 #endif // #if VMA_STATS_STRING_ENABLED
    15202 
    15204 // Public interface
    15205 
    15206 VkResult vmaCreateAllocator(
    15207  const VmaAllocatorCreateInfo* pCreateInfo,
    15208  VmaAllocator* pAllocator)
    15209 {
    15210  VMA_ASSERT(pCreateInfo && pAllocator);
    15211  VMA_DEBUG_LOG("vmaCreateAllocator");
    15212  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
    15213  return (*pAllocator)->Init(pCreateInfo);
    15214 }
    15215 
    15216 void vmaDestroyAllocator(
    15217  VmaAllocator allocator)
    15218 {
    15219  if(allocator != VK_NULL_HANDLE)
    15220  {
    15221  VMA_DEBUG_LOG("vmaDestroyAllocator");
    15222  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
    15223  vma_delete(&allocationCallbacks, allocator);
    15224  }
    15225 }
    15226 
    15228  VmaAllocator allocator,
    15229  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    15230 {
    15231  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
    15232  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
    15233 }
    15234 
    15236  VmaAllocator allocator,
    15237  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
    15238 {
    15239  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
    15240  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
    15241 }
    15242 
    15244  VmaAllocator allocator,
    15245  uint32_t memoryTypeIndex,
    15246  VkMemoryPropertyFlags* pFlags)
    15247 {
    15248  VMA_ASSERT(allocator && pFlags);
    15249  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
    15250  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
    15251 }
    15252 
    15254  VmaAllocator allocator,
    15255  uint32_t frameIndex)
    15256 {
    15257  VMA_ASSERT(allocator);
    15258  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
    15259 
    15260  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15261 
    15262  allocator->SetCurrentFrameIndex(frameIndex);
    15263 }
    15264 
    15265 void vmaCalculateStats(
    15266  VmaAllocator allocator,
    15267  VmaStats* pStats)
    15268 {
    15269  VMA_ASSERT(allocator && pStats);
    15270  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15271  allocator->CalculateStats(pStats);
    15272 }
    15273 
    15274 #if VMA_STATS_STRING_ENABLED
    15275 
    15276 void vmaBuildStatsString(
    15277  VmaAllocator allocator,
    15278  char** ppStatsString,
    15279  VkBool32 detailedMap)
    15280 {
    15281  VMA_ASSERT(allocator && ppStatsString);
    15282  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15283 
    15284  VmaStringBuilder sb(allocator);
    15285  {
    15286  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
    15287  json.BeginObject();
    15288 
    15289  VmaStats stats;
    15290  allocator->CalculateStats(&stats);
    15291 
    15292  json.WriteString("Total");
    15293  VmaPrintStatInfo(json, stats.total);
    15294 
    15295  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
    15296  {
    15297  json.BeginString("Heap ");
    15298  json.ContinueString(heapIndex);
    15299  json.EndString();
    15300  json.BeginObject();
    15301 
    15302  json.WriteString("Size");
    15303  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
    15304 
    15305  json.WriteString("Flags");
    15306  json.BeginArray(true);
    15307  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    15308  {
    15309  json.WriteString("DEVICE_LOCAL");
    15310  }
    15311  json.EndArray();
    15312 
    15313  if(stats.memoryHeap[heapIndex].blockCount > 0)
    15314  {
    15315  json.WriteString("Stats");
    15316  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
    15317  }
    15318 
    15319  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
    15320  {
    15321  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
    15322  {
    15323  json.BeginString("Type ");
    15324  json.ContinueString(typeIndex);
    15325  json.EndString();
    15326 
    15327  json.BeginObject();
    15328 
    15329  json.WriteString("Flags");
    15330  json.BeginArray(true);
    15331  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
    15332  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    15333  {
    15334  json.WriteString("DEVICE_LOCAL");
    15335  }
    15336  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    15337  {
    15338  json.WriteString("HOST_VISIBLE");
    15339  }
    15340  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
    15341  {
    15342  json.WriteString("HOST_COHERENT");
    15343  }
    15344  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
    15345  {
    15346  json.WriteString("HOST_CACHED");
    15347  }
    15348  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
    15349  {
    15350  json.WriteString("LAZILY_ALLOCATED");
    15351  }
    15352  json.EndArray();
    15353 
    15354  if(stats.memoryType[typeIndex].blockCount > 0)
    15355  {
    15356  json.WriteString("Stats");
    15357  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
    15358  }
    15359 
    15360  json.EndObject();
    15361  }
    15362  }
    15363 
    15364  json.EndObject();
    15365  }
    15366  if(detailedMap == VK_TRUE)
    15367  {
    15368  allocator->PrintDetailedMap(json);
    15369  }
    15370 
    15371  json.EndObject();
    15372  }
    15373 
    15374  const size_t len = sb.GetLength();
    15375  char* const pChars = vma_new_array(allocator, char, len + 1);
    15376  if(len > 0)
    15377  {
    15378  memcpy(pChars, sb.GetData(), len);
    15379  }
    15380  pChars[len] = '\0';
    15381  *ppStatsString = pChars;
    15382 }
    15383 
    15384 void vmaFreeStatsString(
    15385  VmaAllocator allocator,
    15386  char* pStatsString)
    15387 {
    15388  if(pStatsString != VMA_NULL)
    15389  {
    15390  VMA_ASSERT(allocator);
    15391  size_t len = strlen(pStatsString);
    15392  vma_delete_array(allocator, pStatsString, len + 1);
    15393  }
    15394 }
    15395 
    15396 #endif // #if VMA_STATS_STRING_ENABLED
    15397 
    15398 /*
    15399 This function is not protected by any mutex because it just reads immutable data.
    15400 */
    15401 VkResult vmaFindMemoryTypeIndex(
    15402  VmaAllocator allocator,
    15403  uint32_t memoryTypeBits,
    15404  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    15405  uint32_t* pMemoryTypeIndex)
    15406 {
    15407  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    15408  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    15409  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    15410 
    15411  if(pAllocationCreateInfo->memoryTypeBits != 0)
    15412  {
    15413  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
    15414  }
    15415 
    15416  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
    15417  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
    15418 
    15419  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    15420  if(mapped)
    15421  {
    15422  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    15423  }
    15424 
    15425  // Convert usage to requiredFlags and preferredFlags.
    15426  switch(pAllocationCreateInfo->usage)
    15427  {
    15429  break;
    15431  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    15432  {
    15433  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    15434  }
    15435  break;
    15437  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    15438  break;
    15440  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    15441  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    15442  {
    15443  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    15444  }
    15445  break;
    15447  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    15448  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
    15449  break;
    15450  default:
    15451  break;
    15452  }
    15453 
    15454  *pMemoryTypeIndex = UINT32_MAX;
    15455  uint32_t minCost = UINT32_MAX;
    15456  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
    15457  memTypeIndex < allocator->GetMemoryTypeCount();
    15458  ++memTypeIndex, memTypeBit <<= 1)
    15459  {
    15460  // This memory type is acceptable according to memoryTypeBits bitmask.
    15461  if((memTypeBit & memoryTypeBits) != 0)
    15462  {
    15463  const VkMemoryPropertyFlags currFlags =
    15464  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    15465  // This memory type contains requiredFlags.
    15466  if((requiredFlags & ~currFlags) == 0)
    15467  {
    15468  // Calculate cost as number of bits from preferredFlags not present in this memory type.
    15469  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
    15470  // Remember memory type with lowest cost.
    15471  if(currCost < minCost)
    15472  {
    15473  *pMemoryTypeIndex = memTypeIndex;
    15474  if(currCost == 0)
    15475  {
    15476  return VK_SUCCESS;
    15477  }
    15478  minCost = currCost;
    15479  }
    15480  }
    15481  }
    15482  }
    15483  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
    15484 }
    15485 
    15487  VmaAllocator allocator,
    15488  const VkBufferCreateInfo* pBufferCreateInfo,
    15489  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    15490  uint32_t* pMemoryTypeIndex)
    15491 {
    15492  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    15493  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
    15494  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    15495  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    15496 
    15497  const VkDevice hDev = allocator->m_hDevice;
    15498  VkBuffer hBuffer = VK_NULL_HANDLE;
    15499  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
    15500  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
    15501  if(res == VK_SUCCESS)
    15502  {
    15503  VkMemoryRequirements memReq = {};
    15504  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
    15505  hDev, hBuffer, &memReq);
    15506 
    15507  res = vmaFindMemoryTypeIndex(
    15508  allocator,
    15509  memReq.memoryTypeBits,
    15510  pAllocationCreateInfo,
    15511  pMemoryTypeIndex);
    15512 
    15513  allocator->GetVulkanFunctions().vkDestroyBuffer(
    15514  hDev, hBuffer, allocator->GetAllocationCallbacks());
    15515  }
    15516  return res;
    15517 }
    15518 
    15520  VmaAllocator allocator,
    15521  const VkImageCreateInfo* pImageCreateInfo,
    15522  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    15523  uint32_t* pMemoryTypeIndex)
    15524 {
    15525  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    15526  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
    15527  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    15528  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    15529 
    15530  const VkDevice hDev = allocator->m_hDevice;
    15531  VkImage hImage = VK_NULL_HANDLE;
    15532  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
    15533  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
    15534  if(res == VK_SUCCESS)
    15535  {
    15536  VkMemoryRequirements memReq = {};
    15537  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
    15538  hDev, hImage, &memReq);
    15539 
    15540  res = vmaFindMemoryTypeIndex(
    15541  allocator,
    15542  memReq.memoryTypeBits,
    15543  pAllocationCreateInfo,
    15544  pMemoryTypeIndex);
    15545 
    15546  allocator->GetVulkanFunctions().vkDestroyImage(
    15547  hDev, hImage, allocator->GetAllocationCallbacks());
    15548  }
    15549  return res;
    15550 }
    15551 
    15552 VkResult vmaCreatePool(
    15553  VmaAllocator allocator,
    15554  const VmaPoolCreateInfo* pCreateInfo,
    15555  VmaPool* pPool)
    15556 {
    15557  VMA_ASSERT(allocator && pCreateInfo && pPool);
    15558 
    15559  VMA_DEBUG_LOG("vmaCreatePool");
    15560 
    15561  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15562 
    15563  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
    15564 
    15565 #if VMA_RECORDING_ENABLED
    15566  if(allocator->GetRecorder() != VMA_NULL)
    15567  {
    15568  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
    15569  }
    15570 #endif
    15571 
    15572  return res;
    15573 }
    15574 
    15575 void vmaDestroyPool(
    15576  VmaAllocator allocator,
    15577  VmaPool pool)
    15578 {
    15579  VMA_ASSERT(allocator);
    15580 
    15581  if(pool == VK_NULL_HANDLE)
    15582  {
    15583  return;
    15584  }
    15585 
    15586  VMA_DEBUG_LOG("vmaDestroyPool");
    15587 
    15588  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15589 
    15590 #if VMA_RECORDING_ENABLED
    15591  if(allocator->GetRecorder() != VMA_NULL)
    15592  {
    15593  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
    15594  }
    15595 #endif
    15596 
    15597  allocator->DestroyPool(pool);
    15598 }
    15599 
    15600 void vmaGetPoolStats(
    15601  VmaAllocator allocator,
    15602  VmaPool pool,
    15603  VmaPoolStats* pPoolStats)
    15604 {
    15605  VMA_ASSERT(allocator && pool && pPoolStats);
    15606 
    15607  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15608 
    15609  allocator->GetPoolStats(pool, pPoolStats);
    15610 }
    15611 
    15613  VmaAllocator allocator,
    15614  VmaPool pool,
    15615  size_t* pLostAllocationCount)
    15616 {
    15617  VMA_ASSERT(allocator && pool);
    15618 
    15619  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15620 
    15621 #if VMA_RECORDING_ENABLED
    15622  if(allocator->GetRecorder() != VMA_NULL)
    15623  {
    15624  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
    15625  }
    15626 #endif
    15627 
    15628  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
    15629 }
    15630 
    15631 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    15632 {
    15633  VMA_ASSERT(allocator && pool);
    15634 
    15635  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15636 
    15637  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
    15638 
    15639  return allocator->CheckPoolCorruption(pool);
    15640 }
    15641 
    15642 VkResult vmaAllocateMemory(
    15643  VmaAllocator allocator,
    15644  const VkMemoryRequirements* pVkMemoryRequirements,
    15645  const VmaAllocationCreateInfo* pCreateInfo,
    15646  VmaAllocation* pAllocation,
    15647  VmaAllocationInfo* pAllocationInfo)
    15648 {
    15649  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
    15650 
    15651  VMA_DEBUG_LOG("vmaAllocateMemory");
    15652 
    15653  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15654 
    15655  VkResult result = allocator->AllocateMemory(
    15656  *pVkMemoryRequirements,
    15657  false, // requiresDedicatedAllocation
    15658  false, // prefersDedicatedAllocation
    15659  VK_NULL_HANDLE, // dedicatedBuffer
    15660  VK_NULL_HANDLE, // dedicatedImage
    15661  *pCreateInfo,
    15662  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    15663  pAllocation);
    15664 
    15665 #if VMA_RECORDING_ENABLED
    15666  if(allocator->GetRecorder() != VMA_NULL)
    15667  {
    15668  allocator->GetRecorder()->RecordAllocateMemory(
    15669  allocator->GetCurrentFrameIndex(),
    15670  *pVkMemoryRequirements,
    15671  *pCreateInfo,
    15672  *pAllocation);
    15673  }
    15674 #endif
    15675 
    15676  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
    15677  {
    15678  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    15679  }
    15680 
    15681  return result;
    15682 }
    15683 
    15685  VmaAllocator allocator,
    15686  VkBuffer buffer,
    15687  const VmaAllocationCreateInfo* pCreateInfo,
    15688  VmaAllocation* pAllocation,
    15689  VmaAllocationInfo* pAllocationInfo)
    15690 {
    15691  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    15692 
    15693  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
    15694 
    15695  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15696 
    15697  VkMemoryRequirements vkMemReq = {};
    15698  bool requiresDedicatedAllocation = false;
    15699  bool prefersDedicatedAllocation = false;
    15700  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
    15701  requiresDedicatedAllocation,
    15702  prefersDedicatedAllocation);
    15703 
    15704  VkResult result = allocator->AllocateMemory(
    15705  vkMemReq,
    15706  requiresDedicatedAllocation,
    15707  prefersDedicatedAllocation,
    15708  buffer, // dedicatedBuffer
    15709  VK_NULL_HANDLE, // dedicatedImage
    15710  *pCreateInfo,
    15711  VMA_SUBALLOCATION_TYPE_BUFFER,
    15712  pAllocation);
    15713 
    15714 #if VMA_RECORDING_ENABLED
    15715  if(allocator->GetRecorder() != VMA_NULL)
    15716  {
    15717  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
    15718  allocator->GetCurrentFrameIndex(),
    15719  vkMemReq,
    15720  requiresDedicatedAllocation,
    15721  prefersDedicatedAllocation,
    15722  *pCreateInfo,
    15723  *pAllocation);
    15724  }
    15725 #endif
    15726 
    15727  if(pAllocationInfo && result == VK_SUCCESS)
    15728  {
    15729  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    15730  }
    15731 
    15732  return result;
    15733 }
    15734 
    15735 VkResult vmaAllocateMemoryForImage(
    15736  VmaAllocator allocator,
    15737  VkImage image,
    15738  const VmaAllocationCreateInfo* pCreateInfo,
    15739  VmaAllocation* pAllocation,
    15740  VmaAllocationInfo* pAllocationInfo)
    15741 {
    15742  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    15743 
    15744  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
    15745 
    15746  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15747 
    15748  VkMemoryRequirements vkMemReq = {};
    15749  bool requiresDedicatedAllocation = false;
    15750  bool prefersDedicatedAllocation = false;
    15751  allocator->GetImageMemoryRequirements(image, vkMemReq,
    15752  requiresDedicatedAllocation, prefersDedicatedAllocation);
    15753 
    15754  VkResult result = allocator->AllocateMemory(
    15755  vkMemReq,
    15756  requiresDedicatedAllocation,
    15757  prefersDedicatedAllocation,
    15758  VK_NULL_HANDLE, // dedicatedBuffer
    15759  image, // dedicatedImage
    15760  *pCreateInfo,
    15761  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
    15762  pAllocation);
    15763 
    15764 #if VMA_RECORDING_ENABLED
    15765  if(allocator->GetRecorder() != VMA_NULL)
    15766  {
    15767  allocator->GetRecorder()->RecordAllocateMemoryForImage(
    15768  allocator->GetCurrentFrameIndex(),
    15769  vkMemReq,
    15770  requiresDedicatedAllocation,
    15771  prefersDedicatedAllocation,
    15772  *pCreateInfo,
    15773  *pAllocation);
    15774  }
    15775 #endif
    15776 
    15777  if(pAllocationInfo && result == VK_SUCCESS)
    15778  {
    15779  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    15780  }
    15781 
    15782  return result;
    15783 }
    15784 
    15785 void vmaFreeMemory(
    15786  VmaAllocator allocator,
    15787  VmaAllocation allocation)
    15788 {
    15789  VMA_ASSERT(allocator);
    15790 
    15791  if(allocation == VK_NULL_HANDLE)
    15792  {
    15793  return;
    15794  }
    15795 
    15796  VMA_DEBUG_LOG("vmaFreeMemory");
    15797 
    15798  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15799 
    15800 #if VMA_RECORDING_ENABLED
    15801  if(allocator->GetRecorder() != VMA_NULL)
    15802  {
    15803  allocator->GetRecorder()->RecordFreeMemory(
    15804  allocator->GetCurrentFrameIndex(),
    15805  allocation);
    15806  }
    15807 #endif
    15808 
    15809  allocator->FreeMemory(allocation);
    15810 }
    15811 
    15812 VkResult vmaResizeAllocation(
    15813  VmaAllocator allocator,
    15814  VmaAllocation allocation,
    15815  VkDeviceSize newSize)
    15816 {
    15817  VMA_ASSERT(allocator && allocation);
    15818 
    15819  VMA_DEBUG_LOG("vmaResizeAllocation");
    15820 
    15821  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15822 
    15823 #if VMA_RECORDING_ENABLED
    15824  if(allocator->GetRecorder() != VMA_NULL)
    15825  {
    15826  allocator->GetRecorder()->RecordResizeAllocation(
    15827  allocator->GetCurrentFrameIndex(),
    15828  allocation,
    15829  newSize);
    15830  }
    15831 #endif
    15832 
    15833  return allocator->ResizeAllocation(allocation, newSize);
    15834 }
    15835 
    15837  VmaAllocator allocator,
    15838  VmaAllocation allocation,
    15839  VmaAllocationInfo* pAllocationInfo)
    15840 {
    15841  VMA_ASSERT(allocator && allocation && pAllocationInfo);
    15842 
    15843  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15844 
    15845 #if VMA_RECORDING_ENABLED
    15846  if(allocator->GetRecorder() != VMA_NULL)
    15847  {
    15848  allocator->GetRecorder()->RecordGetAllocationInfo(
    15849  allocator->GetCurrentFrameIndex(),
    15850  allocation);
    15851  }
    15852 #endif
    15853 
    15854  allocator->GetAllocationInfo(allocation, pAllocationInfo);
    15855 }
    15856 
    15857 VkBool32 vmaTouchAllocation(
    15858  VmaAllocator allocator,
    15859  VmaAllocation allocation)
    15860 {
    15861  VMA_ASSERT(allocator && allocation);
    15862 
    15863  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15864 
    15865 #if VMA_RECORDING_ENABLED
    15866  if(allocator->GetRecorder() != VMA_NULL)
    15867  {
    15868  allocator->GetRecorder()->RecordTouchAllocation(
    15869  allocator->GetCurrentFrameIndex(),
    15870  allocation);
    15871  }
    15872 #endif
    15873 
    15874  return allocator->TouchAllocation(allocation);
    15875 }
    15876 
    15878  VmaAllocator allocator,
    15879  VmaAllocation allocation,
    15880  void* pUserData)
    15881 {
    15882  VMA_ASSERT(allocator && allocation);
    15883 
    15884  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15885 
    15886  allocation->SetUserData(allocator, pUserData);
    15887 
    15888 #if VMA_RECORDING_ENABLED
    15889  if(allocator->GetRecorder() != VMA_NULL)
    15890  {
    15891  allocator->GetRecorder()->RecordSetAllocationUserData(
    15892  allocator->GetCurrentFrameIndex(),
    15893  allocation,
    15894  pUserData);
    15895  }
    15896 #endif
    15897 }
    15898 
    15900  VmaAllocator allocator,
    15901  VmaAllocation* pAllocation)
    15902 {
    15903  VMA_ASSERT(allocator && pAllocation);
    15904 
    15905  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
    15906 
    15907  allocator->CreateLostAllocation(pAllocation);
    15908 
    15909 #if VMA_RECORDING_ENABLED
    15910  if(allocator->GetRecorder() != VMA_NULL)
    15911  {
    15912  allocator->GetRecorder()->RecordCreateLostAllocation(
    15913  allocator->GetCurrentFrameIndex(),
    15914  *pAllocation);
    15915  }
    15916 #endif
    15917 }
    15918 
    15919 VkResult vmaMapMemory(
    15920  VmaAllocator allocator,
    15921  VmaAllocation allocation,
    15922  void** ppData)
    15923 {
    15924  VMA_ASSERT(allocator && allocation && ppData);
    15925 
    15926  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15927 
    15928  VkResult res = allocator->Map(allocation, ppData);
    15929 
    15930 #if VMA_RECORDING_ENABLED
    15931  if(allocator->GetRecorder() != VMA_NULL)
    15932  {
    15933  allocator->GetRecorder()->RecordMapMemory(
    15934  allocator->GetCurrentFrameIndex(),
    15935  allocation);
    15936  }
    15937 #endif
    15938 
    15939  return res;
    15940 }
    15941 
    15942 void vmaUnmapMemory(
    15943  VmaAllocator allocator,
    15944  VmaAllocation allocation)
    15945 {
    15946  VMA_ASSERT(allocator && allocation);
    15947 
    15948  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15949 
    15950 #if VMA_RECORDING_ENABLED
    15951  if(allocator->GetRecorder() != VMA_NULL)
    15952  {
    15953  allocator->GetRecorder()->RecordUnmapMemory(
    15954  allocator->GetCurrentFrameIndex(),
    15955  allocation);
    15956  }
    15957 #endif
    15958 
    15959  allocator->Unmap(allocation);
    15960 }
    15961 
    15962 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    15963 {
    15964  VMA_ASSERT(allocator && allocation);
    15965 
    15966  VMA_DEBUG_LOG("vmaFlushAllocation");
    15967 
    15968  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15969 
    15970  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
    15971 
    15972 #if VMA_RECORDING_ENABLED
    15973  if(allocator->GetRecorder() != VMA_NULL)
    15974  {
    15975  allocator->GetRecorder()->RecordFlushAllocation(
    15976  allocator->GetCurrentFrameIndex(),
    15977  allocation, offset, size);
    15978  }
    15979 #endif
    15980 }
    15981 
    15982 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    15983 {
    15984  VMA_ASSERT(allocator && allocation);
    15985 
    15986  VMA_DEBUG_LOG("vmaInvalidateAllocation");
    15987 
    15988  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15989 
    15990  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
    15991 
    15992 #if VMA_RECORDING_ENABLED
    15993  if(allocator->GetRecorder() != VMA_NULL)
    15994  {
    15995  allocator->GetRecorder()->RecordInvalidateAllocation(
    15996  allocator->GetCurrentFrameIndex(),
    15997  allocation, offset, size);
    15998  }
    15999 #endif
    16000 }
    16001 
    16002 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    16003 {
    16004  VMA_ASSERT(allocator);
    16005 
    16006  VMA_DEBUG_LOG("vmaCheckCorruption");
    16007 
    16008  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16009 
    16010  return allocator->CheckCorruption(memoryTypeBits);
    16011 }
    16012 
    16013 VkResult vmaDefragment(
    16014  VmaAllocator allocator,
    16015  VmaAllocation* pAllocations,
    16016  size_t allocationCount,
    16017  VkBool32* pAllocationsChanged,
    16018  const VmaDefragmentationInfo *pDefragmentationInfo,
    16019  VmaDefragmentationStats* pDefragmentationStats)
    16020 {
    16021  // Deprecated interface, reimplemented using new one.
    16022 
    16023  VmaDefragmentationInfo2 info2 = {};
    16024  info2.allocationCount = (uint32_t)allocationCount;
    16025  info2.pAllocations = pAllocations;
    16026  info2.pAllocationsChanged = pAllocationsChanged;
    16027  if(pDefragmentationInfo != VMA_NULL)
    16028  {
    16029  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
    16030  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
    16031  }
    16032  else
    16033  {
    16034  info2.maxCpuAllocationsToMove = UINT32_MAX;
    16035  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
    16036  }
    16037  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
    16038 
    16040  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
    16041  if(res == VK_NOT_READY)
    16042  {
    16043  res = vmaDefragmentationEnd( allocator, ctx);
    16044  }
    16045  return res;
    16046 }
    16047 
    16048 VkResult vmaDefragmentationBegin(
    16049  VmaAllocator allocator,
    16050  const VmaDefragmentationInfo2* pInfo,
    16051  VmaDefragmentationStats* pStats,
    16052  VmaDefragmentationContext *pContext)
    16053 {
    16054  VMA_ASSERT(allocator && pInfo && pContext);
    16055 
    16056  // Degenerate case: Nothing to defragment.
    16057  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
    16058  {
    16059  return VK_SUCCESS;
    16060  }
    16061 
    16062  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
    16063  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
    16064  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
    16065  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
    16066 
    16067  VMA_DEBUG_LOG("vmaDefragmentationBegin");
    16068 
    16069  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16070 
    16071  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
    16072 
    16073 #if VMA_RECORDING_ENABLED
    16074  if(allocator->GetRecorder() != VMA_NULL)
    16075  {
    16076  allocator->GetRecorder()->RecordDefragmentationBegin(
    16077  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
    16078  }
    16079 #endif
    16080 
    16081  return res;
    16082 }
    16083 
    16084 VkResult vmaDefragmentationEnd(
    16085  VmaAllocator allocator,
    16086  VmaDefragmentationContext context)
    16087 {
    16088  VMA_ASSERT(allocator);
    16089 
    16090  VMA_DEBUG_LOG("vmaDefragmentationEnd");
    16091 
    16092  if(context != VK_NULL_HANDLE)
    16093  {
    16094  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16095 
    16096 #if VMA_RECORDING_ENABLED
    16097  if(allocator->GetRecorder() != VMA_NULL)
    16098  {
    16099  allocator->GetRecorder()->RecordDefragmentationEnd(
    16100  allocator->GetCurrentFrameIndex(), context);
    16101  }
    16102 #endif
    16103 
    16104  return allocator->DefragmentationEnd(context);
    16105  }
    16106  else
    16107  {
    16108  return VK_SUCCESS;
    16109  }
    16110 }
    16111 
    16112 VkResult vmaBindBufferMemory(
    16113  VmaAllocator allocator,
    16114  VmaAllocation allocation,
    16115  VkBuffer buffer)
    16116 {
    16117  VMA_ASSERT(allocator && allocation && buffer);
    16118 
    16119  VMA_DEBUG_LOG("vmaBindBufferMemory");
    16120 
    16121  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16122 
    16123  return allocator->BindBufferMemory(allocation, buffer);
    16124 }
    16125 
    16126 VkResult vmaBindImageMemory(
    16127  VmaAllocator allocator,
    16128  VmaAllocation allocation,
    16129  VkImage image)
    16130 {
    16131  VMA_ASSERT(allocator && allocation && image);
    16132 
    16133  VMA_DEBUG_LOG("vmaBindImageMemory");
    16134 
    16135  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16136 
    16137  return allocator->BindImageMemory(allocation, image);
    16138 }
    16139 
    16140 VkResult vmaCreateBuffer(
    16141  VmaAllocator allocator,
    16142  const VkBufferCreateInfo* pBufferCreateInfo,
    16143  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    16144  VkBuffer* pBuffer,
    16145  VmaAllocation* pAllocation,
    16146  VmaAllocationInfo* pAllocationInfo)
    16147 {
    16148  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
    16149 
    16150  if(pBufferCreateInfo->size == 0)
    16151  {
    16152  return VK_ERROR_VALIDATION_FAILED_EXT;
    16153  }
    16154 
    16155  VMA_DEBUG_LOG("vmaCreateBuffer");
    16156 
    16157  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16158 
    16159  *pBuffer = VK_NULL_HANDLE;
    16160  *pAllocation = VK_NULL_HANDLE;
    16161 
    16162  // 1. Create VkBuffer.
    16163  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
    16164  allocator->m_hDevice,
    16165  pBufferCreateInfo,
    16166  allocator->GetAllocationCallbacks(),
    16167  pBuffer);
    16168  if(res >= 0)
    16169  {
    16170  // 2. vkGetBufferMemoryRequirements.
    16171  VkMemoryRequirements vkMemReq = {};
    16172  bool requiresDedicatedAllocation = false;
    16173  bool prefersDedicatedAllocation = false;
    16174  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
    16175  requiresDedicatedAllocation, prefersDedicatedAllocation);
    16176 
    16177  // Make sure alignment requirements for specific buffer usages reported
    16178  // in Physical Device Properties are included in alignment reported by memory requirements.
    16179  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
    16180  {
    16181  VMA_ASSERT(vkMemReq.alignment %
    16182  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
    16183  }
    16184  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
    16185  {
    16186  VMA_ASSERT(vkMemReq.alignment %
    16187  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
    16188  }
    16189  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
    16190  {
    16191  VMA_ASSERT(vkMemReq.alignment %
    16192  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
    16193  }
    16194 
    16195  // 3. Allocate memory using allocator.
    16196  res = allocator->AllocateMemory(
    16197  vkMemReq,
    16198  requiresDedicatedAllocation,
    16199  prefersDedicatedAllocation,
    16200  *pBuffer, // dedicatedBuffer
    16201  VK_NULL_HANDLE, // dedicatedImage
    16202  *pAllocationCreateInfo,
    16203  VMA_SUBALLOCATION_TYPE_BUFFER,
    16204  pAllocation);
    16205 
    16206 #if VMA_RECORDING_ENABLED
    16207  if(allocator->GetRecorder() != VMA_NULL)
    16208  {
    16209  allocator->GetRecorder()->RecordCreateBuffer(
    16210  allocator->GetCurrentFrameIndex(),
    16211  *pBufferCreateInfo,
    16212  *pAllocationCreateInfo,
    16213  *pAllocation);
    16214  }
    16215 #endif
    16216 
    16217  if(res >= 0)
    16218  {
    16219  // 3. Bind buffer with memory.
    16220  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
    16221  if(res >= 0)
    16222  {
    16223  // All steps succeeded.
    16224  #if VMA_STATS_STRING_ENABLED
    16225  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
    16226  #endif
    16227  if(pAllocationInfo != VMA_NULL)
    16228  {
    16229  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16230  }
    16231 
    16232  return VK_SUCCESS;
    16233  }
    16234  allocator->FreeMemory(*pAllocation);
    16235  *pAllocation = VK_NULL_HANDLE;
    16236  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    16237  *pBuffer = VK_NULL_HANDLE;
    16238  return res;
    16239  }
    16240  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    16241  *pBuffer = VK_NULL_HANDLE;
    16242  return res;
    16243  }
    16244  return res;
    16245 }
    16246 
    16247 void vmaDestroyBuffer(
    16248  VmaAllocator allocator,
    16249  VkBuffer buffer,
    16250  VmaAllocation allocation)
    16251 {
    16252  VMA_ASSERT(allocator);
    16253 
    16254  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    16255  {
    16256  return;
    16257  }
    16258 
    16259  VMA_DEBUG_LOG("vmaDestroyBuffer");
    16260 
    16261  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16262 
    16263 #if VMA_RECORDING_ENABLED
    16264  if(allocator->GetRecorder() != VMA_NULL)
    16265  {
    16266  allocator->GetRecorder()->RecordDestroyBuffer(
    16267  allocator->GetCurrentFrameIndex(),
    16268  allocation);
    16269  }
    16270 #endif
    16271 
    16272  if(buffer != VK_NULL_HANDLE)
    16273  {
    16274  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
    16275  }
    16276 
    16277  if(allocation != VK_NULL_HANDLE)
    16278  {
    16279  allocator->FreeMemory(allocation);
    16280  }
    16281 }
    16282 
    16283 VkResult vmaCreateImage(
    16284  VmaAllocator allocator,
    16285  const VkImageCreateInfo* pImageCreateInfo,
    16286  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    16287  VkImage* pImage,
    16288  VmaAllocation* pAllocation,
    16289  VmaAllocationInfo* pAllocationInfo)
    16290 {
    16291  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
    16292 
    16293  if(pImageCreateInfo->extent.width == 0 ||
    16294  pImageCreateInfo->extent.height == 0 ||
    16295  pImageCreateInfo->extent.depth == 0 ||
    16296  pImageCreateInfo->mipLevels == 0 ||
    16297  pImageCreateInfo->arrayLayers == 0)
    16298  {
    16299  return VK_ERROR_VALIDATION_FAILED_EXT;
    16300  }
    16301 
    16302  VMA_DEBUG_LOG("vmaCreateImage");
    16303 
    16304  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16305 
    16306  *pImage = VK_NULL_HANDLE;
    16307  *pAllocation = VK_NULL_HANDLE;
    16308 
    16309  // 1. Create VkImage.
    16310  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
    16311  allocator->m_hDevice,
    16312  pImageCreateInfo,
    16313  allocator->GetAllocationCallbacks(),
    16314  pImage);
    16315  if(res >= 0)
    16316  {
    16317  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
    16318  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
    16319  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
    16320 
    16321  // 2. Allocate memory using allocator.
    16322  VkMemoryRequirements vkMemReq = {};
    16323  bool requiresDedicatedAllocation = false;
    16324  bool prefersDedicatedAllocation = false;
    16325  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
    16326  requiresDedicatedAllocation, prefersDedicatedAllocation);
    16327 
    16328  res = allocator->AllocateMemory(
    16329  vkMemReq,
    16330  requiresDedicatedAllocation,
    16331  prefersDedicatedAllocation,
    16332  VK_NULL_HANDLE, // dedicatedBuffer
    16333  *pImage, // dedicatedImage
    16334  *pAllocationCreateInfo,
    16335  suballocType,
    16336  pAllocation);
    16337 
    16338 #if VMA_RECORDING_ENABLED
    16339  if(allocator->GetRecorder() != VMA_NULL)
    16340  {
    16341  allocator->GetRecorder()->RecordCreateImage(
    16342  allocator->GetCurrentFrameIndex(),
    16343  *pImageCreateInfo,
    16344  *pAllocationCreateInfo,
    16345  *pAllocation);
    16346  }
    16347 #endif
    16348 
    16349  if(res >= 0)
    16350  {
    16351  // 3. Bind image with memory.
    16352  res = allocator->BindImageMemory(*pAllocation, *pImage);
    16353  if(res >= 0)
    16354  {
    16355  // All steps succeeded.
    16356  #if VMA_STATS_STRING_ENABLED
    16357  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
    16358  #endif
    16359  if(pAllocationInfo != VMA_NULL)
    16360  {
    16361  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16362  }
    16363 
    16364  return VK_SUCCESS;
    16365  }
    16366  allocator->FreeMemory(*pAllocation);
    16367  *pAllocation = VK_NULL_HANDLE;
    16368  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    16369  *pImage = VK_NULL_HANDLE;
    16370  return res;
    16371  }
    16372  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    16373  *pImage = VK_NULL_HANDLE;
    16374  return res;
    16375  }
    16376  return res;
    16377 }
    16378 
    16379 void vmaDestroyImage(
    16380  VmaAllocator allocator,
    16381  VkImage image,
    16382  VmaAllocation allocation)
    16383 {
    16384  VMA_ASSERT(allocator);
    16385 
    16386  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    16387  {
    16388  return;
    16389  }
    16390 
    16391  VMA_DEBUG_LOG("vmaDestroyImage");
    16392 
    16393  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16394 
    16395 #if VMA_RECORDING_ENABLED
    16396  if(allocator->GetRecorder() != VMA_NULL)
    16397  {
    16398  allocator->GetRecorder()->RecordDestroyImage(
    16399  allocator->GetCurrentFrameIndex(),
    16400  allocation);
    16401  }
    16402 #endif
    16403 
    16404  if(image != VK_NULL_HANDLE)
    16405  {
    16406  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
    16407  }
    16408  if(allocation != VK_NULL_HANDLE)
    16409  {
    16410  allocator->FreeMemory(allocation);
    16411  }
    16412 }
    16413 
    16414 #endif // #ifdef VMA_IMPLEMENTATION
    PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
    Definition: vk_mem_alloc.h:1727
    -
    Set this flag if the allocation should have its own memory block.
    Definition: vk_mem_alloc.h:2030
    +Go to the documentation of this file.
    1 //
    2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
    3 //
    4 // Permission is hereby granted, free of charge, to any person obtaining a copy
    5 // of this software and associated documentation files (the "Software"), to deal
    6 // in the Software without restriction, including without limitation the rights
    7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    8 // copies of the Software, and to permit persons to whom the Software is
    9 // furnished to do so, subject to the following conditions:
    10 //
    11 // The above copyright notice and this permission notice shall be included in
    12 // all copies or substantial portions of the Software.
    13 //
    14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20 // THE SOFTWARE.
    21 //
    22 
    23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
    24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
    25 
    26 #ifdef __cplusplus
    27 extern "C" {
    28 #endif
    29 
    1614 /*
    1615 Define this macro to 0/1 to disable/enable support for recording functionality,
    1616 available through VmaAllocatorCreateInfo::pRecordSettings.
    1617 */
    1618 #ifndef VMA_RECORDING_ENABLED
    1619  #ifdef _WIN32
    1620  #define VMA_RECORDING_ENABLED 1
    1621  #else
    1622  #define VMA_RECORDING_ENABLED 0
    1623  #endif
    1624 #endif
    1625 
    1626 #ifndef NOMINMAX
    1627  #define NOMINMAX // For windows.h
    1628 #endif
    1629 
    1630 #ifndef VULKAN_H_
    1631  #include <vulkan/vulkan.h>
    1632 #endif
    1633 
    1634 #if VMA_RECORDING_ENABLED
    1635  #include <windows.h>
    1636 #endif
    1637 
    1638 #if !defined(VMA_DEDICATED_ALLOCATION)
    1639  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
    1640  #define VMA_DEDICATED_ALLOCATION 1
    1641  #else
    1642  #define VMA_DEDICATED_ALLOCATION 0
    1643  #endif
    1644 #endif
    1645 
    1655 VK_DEFINE_HANDLE(VmaAllocator)
    1656 
    1657 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
    1659  VmaAllocator allocator,
    1660  uint32_t memoryType,
    1661  VkDeviceMemory memory,
    1662  VkDeviceSize size);
    1664 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
    1665  VmaAllocator allocator,
    1666  uint32_t memoryType,
    1667  VkDeviceMemory memory,
    1668  VkDeviceSize size);
    1669 
    1683 
    1713 
    1716 typedef VkFlags VmaAllocatorCreateFlags;
    1717 
    1722 typedef struct VmaVulkanFunctions {
    1723  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
    1724  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
    1725  PFN_vkAllocateMemory vkAllocateMemory;
    1726  PFN_vkFreeMemory vkFreeMemory;
    1727  PFN_vkMapMemory vkMapMemory;
    1728  PFN_vkUnmapMemory vkUnmapMemory;
    1729  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
    1730  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
    1731  PFN_vkBindBufferMemory vkBindBufferMemory;
    1732  PFN_vkBindImageMemory vkBindImageMemory;
    1733  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
    1734  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
    1735  PFN_vkCreateBuffer vkCreateBuffer;
    1736  PFN_vkDestroyBuffer vkDestroyBuffer;
    1737  PFN_vkCreateImage vkCreateImage;
    1738  PFN_vkDestroyImage vkDestroyImage;
    1739  PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
    1740 #if VMA_DEDICATED_ALLOCATION
    1741  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
    1742  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
    1743 #endif
    1745 
    1747 typedef enum VmaRecordFlagBits {
    1754 
    1757 typedef VkFlags VmaRecordFlags;
    1758 
    1760 typedef struct VmaRecordSettings
    1761 {
    1771  const char* pFilePath;
    1773 
    1776 {
    1780 
    1781  VkPhysicalDevice physicalDevice;
    1783 
    1784  VkDevice device;
    1786 
    1789 
    1790  const VkAllocationCallbacks* pAllocationCallbacks;
    1792 
    1832  const VkDeviceSize* pHeapSizeLimit;
    1853 
    1855 VkResult vmaCreateAllocator(
    1856  const VmaAllocatorCreateInfo* pCreateInfo,
    1857  VmaAllocator* pAllocator);
    1858 
    1860 void vmaDestroyAllocator(
    1861  VmaAllocator allocator);
    1862 
    1868  VmaAllocator allocator,
    1869  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
    1870 
    1876  VmaAllocator allocator,
    1877  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
    1878 
    1886  VmaAllocator allocator,
    1887  uint32_t memoryTypeIndex,
    1888  VkMemoryPropertyFlags* pFlags);
    1889 
    1899  VmaAllocator allocator,
    1900  uint32_t frameIndex);
    1901 
    1904 typedef struct VmaStatInfo
    1905 {
    1907  uint32_t blockCount;
    1913  VkDeviceSize usedBytes;
    1915  VkDeviceSize unusedBytes;
    1918 } VmaStatInfo;
    1919 
    1921 typedef struct VmaStats
    1922 {
    1923  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
    1924  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
    1926 } VmaStats;
    1927 
    1929 void vmaCalculateStats(
    1930  VmaAllocator allocator,
    1931  VmaStats* pStats);
    1932 
    1933 #define VMA_STATS_STRING_ENABLED 1
    1934 
    1935 #if VMA_STATS_STRING_ENABLED
    1936 
    1938 
    1940 void vmaBuildStatsString(
    1941  VmaAllocator allocator,
    1942  char** ppStatsString,
    1943  VkBool32 detailedMap);
    1944 
    1945 void vmaFreeStatsString(
    1946  VmaAllocator allocator,
    1947  char* pStatsString);
    1948 
    1949 #endif // #if VMA_STATS_STRING_ENABLED
    1950 
    1959 VK_DEFINE_HANDLE(VmaPool)
    1960 
    1961 typedef enum VmaMemoryUsage
    1962 {
    2011 } VmaMemoryUsage;
    2012 
    2027 
    2082 
    2098 
    2108 
    2115 
    2119 
    2121 {
    2134  VkMemoryPropertyFlags requiredFlags;
    2139  VkMemoryPropertyFlags preferredFlags;
    2147  uint32_t memoryTypeBits;
    2160  void* pUserData;
    2162 
    2179 VkResult vmaFindMemoryTypeIndex(
    2180  VmaAllocator allocator,
    2181  uint32_t memoryTypeBits,
    2182  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2183  uint32_t* pMemoryTypeIndex);
    2184 
    2198  VmaAllocator allocator,
    2199  const VkBufferCreateInfo* pBufferCreateInfo,
    2200  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2201  uint32_t* pMemoryTypeIndex);
    2202 
    2216  VmaAllocator allocator,
    2217  const VkImageCreateInfo* pImageCreateInfo,
    2218  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2219  uint32_t* pMemoryTypeIndex);
    2220 
    2241 
    2258 
    2269 
    2275 
    2278 typedef VkFlags VmaPoolCreateFlags;
    2279 
    2282 typedef struct VmaPoolCreateInfo {
    2297  VkDeviceSize blockSize;
    2326 
    2329 typedef struct VmaPoolStats {
    2332  VkDeviceSize size;
    2335  VkDeviceSize unusedSize;
    2348  VkDeviceSize unusedRangeSizeMax;
    2351  size_t blockCount;
    2352 } VmaPoolStats;
    2353 
    2360 VkResult vmaCreatePool(
    2361  VmaAllocator allocator,
    2362  const VmaPoolCreateInfo* pCreateInfo,
    2363  VmaPool* pPool);
    2364 
    2367 void vmaDestroyPool(
    2368  VmaAllocator allocator,
    2369  VmaPool pool);
    2370 
    2377 void vmaGetPoolStats(
    2378  VmaAllocator allocator,
    2379  VmaPool pool,
    2380  VmaPoolStats* pPoolStats);
    2381 
    2389  VmaAllocator allocator,
    2390  VmaPool pool,
    2391  size_t* pLostAllocationCount);
    2392 
    2407 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
    2408 
    2433 VK_DEFINE_HANDLE(VmaAllocation)
    2434 
    2435 
    2437 typedef struct VmaAllocationInfo {
    2442  uint32_t memoryType;
    2451  VkDeviceMemory deviceMemory;
    2456  VkDeviceSize offset;
    2461  VkDeviceSize size;
    2475  void* pUserData;
    2477 
    2488 VkResult vmaAllocateMemory(
    2489  VmaAllocator allocator,
    2490  const VkMemoryRequirements* pVkMemoryRequirements,
    2491  const VmaAllocationCreateInfo* pCreateInfo,
    2492  VmaAllocation* pAllocation,
    2493  VmaAllocationInfo* pAllocationInfo);
    2494 
    2514 VkResult vmaAllocateMemoryPages(
    2515  VmaAllocator allocator,
    2516  const VkMemoryRequirements* pVkMemoryRequirements,
    2517  const VmaAllocationCreateInfo* pCreateInfo,
    2518  size_t allocationCount,
    2519  VmaAllocation* pAllocations,
    2520  VmaAllocationInfo* pAllocationInfo);
    2521 
    2529  VmaAllocator allocator,
    2530  VkBuffer buffer,
    2531  const VmaAllocationCreateInfo* pCreateInfo,
    2532  VmaAllocation* pAllocation,
    2533  VmaAllocationInfo* pAllocationInfo);
    2534 
    2536 VkResult vmaAllocateMemoryForImage(
    2537  VmaAllocator allocator,
    2538  VkImage image,
    2539  const VmaAllocationCreateInfo* pCreateInfo,
    2540  VmaAllocation* pAllocation,
    2541  VmaAllocationInfo* pAllocationInfo);
    2542 
    2547 void vmaFreeMemory(
    2548  VmaAllocator allocator,
    2549  VmaAllocation allocation);
    2550 
    2561 void vmaFreeMemoryPages(
    2562  VmaAllocator allocator,
    2563  size_t allocationCount,
    2564  VmaAllocation* pAllocations);
    2565 
    2586 VkResult vmaResizeAllocation(
    2587  VmaAllocator allocator,
    2588  VmaAllocation allocation,
    2589  VkDeviceSize newSize);
    2590 
    2608  VmaAllocator allocator,
    2609  VmaAllocation allocation,
    2610  VmaAllocationInfo* pAllocationInfo);
    2611 
    2626 VkBool32 vmaTouchAllocation(
    2627  VmaAllocator allocator,
    2628  VmaAllocation allocation);
    2629 
    2644  VmaAllocator allocator,
    2645  VmaAllocation allocation,
    2646  void* pUserData);
    2647 
    2659  VmaAllocator allocator,
    2660  VmaAllocation* pAllocation);
    2661 
    2696 VkResult vmaMapMemory(
    2697  VmaAllocator allocator,
    2698  VmaAllocation allocation,
    2699  void** ppData);
    2700 
    2705 void vmaUnmapMemory(
    2706  VmaAllocator allocator,
    2707  VmaAllocation allocation);
    2708 
    2721 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2722 
    2735 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2736 
    2753 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
    2754 
    2761 VK_DEFINE_HANDLE(VmaDefragmentationContext)
    2762 
    2763 typedef enum VmaDefragmentationFlagBits {
    2767 typedef VkFlags VmaDefragmentationFlags;
    2768 
    2773 typedef struct VmaDefragmentationInfo2 {
    2797  uint32_t poolCount;
    2818  VkDeviceSize maxCpuBytesToMove;
    2828  VkDeviceSize maxGpuBytesToMove;
    2842  VkCommandBuffer commandBuffer;
    2844 
    2849 typedef struct VmaDefragmentationInfo {
    2854  VkDeviceSize maxBytesToMove;
    2861 
    2863 typedef struct VmaDefragmentationStats {
    2865  VkDeviceSize bytesMoved;
    2867  VkDeviceSize bytesFreed;
    2873 
    2900 VkResult vmaDefragmentationBegin(
    2901  VmaAllocator allocator,
    2902  const VmaDefragmentationInfo2* pInfo,
    2903  VmaDefragmentationStats* pStats,
    2904  VmaDefragmentationContext *pContext);
    2905 
    2911 VkResult vmaDefragmentationEnd(
    2912  VmaAllocator allocator,
    2913  VmaDefragmentationContext context);
    2914 
    2955 VkResult vmaDefragment(
    2956  VmaAllocator allocator,
    2957  VmaAllocation* pAllocations,
    2958  size_t allocationCount,
    2959  VkBool32* pAllocationsChanged,
    2960  const VmaDefragmentationInfo *pDefragmentationInfo,
    2961  VmaDefragmentationStats* pDefragmentationStats);
    2962 
    2975 VkResult vmaBindBufferMemory(
    2976  VmaAllocator allocator,
    2977  VmaAllocation allocation,
    2978  VkBuffer buffer);
    2979 
    2992 VkResult vmaBindImageMemory(
    2993  VmaAllocator allocator,
    2994  VmaAllocation allocation,
    2995  VkImage image);
    2996 
    3023 VkResult vmaCreateBuffer(
    3024  VmaAllocator allocator,
    3025  const VkBufferCreateInfo* pBufferCreateInfo,
    3026  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    3027  VkBuffer* pBuffer,
    3028  VmaAllocation* pAllocation,
    3029  VmaAllocationInfo* pAllocationInfo);
    3030 
    3042 void vmaDestroyBuffer(
    3043  VmaAllocator allocator,
    3044  VkBuffer buffer,
    3045  VmaAllocation allocation);
    3046 
    3048 VkResult vmaCreateImage(
    3049  VmaAllocator allocator,
    3050  const VkImageCreateInfo* pImageCreateInfo,
    3051  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    3052  VkImage* pImage,
    3053  VmaAllocation* pAllocation,
    3054  VmaAllocationInfo* pAllocationInfo);
    3055 
    3067 void vmaDestroyImage(
    3068  VmaAllocator allocator,
    3069  VkImage image,
    3070  VmaAllocation allocation);
    3071 
    3072 #ifdef __cplusplus
    3073 }
    3074 #endif
    3075 
    3076 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
    3077 
    3078 // For Visual Studio IntelliSense.
    3079 #if defined(__cplusplus) && defined(__INTELLISENSE__)
    3080 #define VMA_IMPLEMENTATION
    3081 #endif
    3082 
    3083 #ifdef VMA_IMPLEMENTATION
    3084 #undef VMA_IMPLEMENTATION
    3085 
    3086 #include <cstdint>
    3087 #include <cstdlib>
    3088 #include <cstring>
    3089 
    3090 /*******************************************************************************
    3091 CONFIGURATION SECTION
    3092 
    3093 Define some of these macros before each #include of this header or change them
    3094 here if you need other then default behavior depending on your environment.
    3095 */
    3096 
    3097 /*
    3098 Define this macro to 1 to make the library fetch pointers to Vulkan functions
    3099 internally, like:
    3100 
    3101  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    3102 
    3103 Define to 0 if you are going to provide you own pointers to Vulkan functions via
    3104 VmaAllocatorCreateInfo::pVulkanFunctions.
    3105 */
    3106 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
    3107 #define VMA_STATIC_VULKAN_FUNCTIONS 1
    3108 #endif
    3109 
    3110 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
    3111 //#define VMA_USE_STL_CONTAINERS 1
    3112 
    3113 /* Set this macro to 1 to make the library including and using STL containers:
    3114 std::pair, std::vector, std::list, std::unordered_map.
    3115 
    3116 Set it to 0 or undefined to make the library using its own implementation of
    3117 the containers.
    3118 */
    3119 #if VMA_USE_STL_CONTAINERS
    3120  #define VMA_USE_STL_VECTOR 1
    3121  #define VMA_USE_STL_UNORDERED_MAP 1
    3122  #define VMA_USE_STL_LIST 1
    3123 #endif
    3124 
    3125 #ifndef VMA_USE_STL_SHARED_MUTEX
    3126  // Minimum Visual Studio 2015 Update 2
    3127  #if defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918
    3128  #define VMA_USE_STL_SHARED_MUTEX 1
    3129  #endif
    3130 #endif
    3131 
    3132 #if VMA_USE_STL_VECTOR
    3133  #include <vector>
    3134 #endif
    3135 
    3136 #if VMA_USE_STL_UNORDERED_MAP
    3137  #include <unordered_map>
    3138 #endif
    3139 
    3140 #if VMA_USE_STL_LIST
    3141  #include <list>
    3142 #endif
    3143 
    3144 /*
    3145 Following headers are used in this CONFIGURATION section only, so feel free to
    3146 remove them if not needed.
    3147 */
    3148 #include <cassert> // for assert
    3149 #include <algorithm> // for min, max
    3150 #include <mutex>
    3151 #include <atomic> // for std::atomic
    3152 
    3153 #ifndef VMA_NULL
    3154  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
    3155  #define VMA_NULL nullptr
    3156 #endif
    3157 
    3158 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
    3159 #include <cstdlib>
    3160 void *aligned_alloc(size_t alignment, size_t size)
    3161 {
    3162  // alignment must be >= sizeof(void*)
    3163  if(alignment < sizeof(void*))
    3164  {
    3165  alignment = sizeof(void*);
    3166  }
    3167 
    3168  return memalign(alignment, size);
    3169 }
    3170 #elif defined(__APPLE__) || defined(__ANDROID__)
    3171 #include <cstdlib>
    3172 void *aligned_alloc(size_t alignment, size_t size)
    3173 {
    3174  // alignment must be >= sizeof(void*)
    3175  if(alignment < sizeof(void*))
    3176  {
    3177  alignment = sizeof(void*);
    3178  }
    3179 
    3180  void *pointer;
    3181  if(posix_memalign(&pointer, alignment, size) == 0)
    3182  return pointer;
    3183  return VMA_NULL;
    3184 }
    3185 #endif
    3186 
    3187 // If your compiler is not compatible with C++11 and definition of
    3188 // aligned_alloc() function is missing, uncommeting following line may help:
    3189 
    3190 //#include <malloc.h>
    3191 
    3192 // Normal assert to check for programmer's errors, especially in Debug configuration.
    3193 #ifndef VMA_ASSERT
    3194  #ifdef _DEBUG
    3195  #define VMA_ASSERT(expr) assert(expr)
    3196  #else
    3197  #define VMA_ASSERT(expr)
    3198  #endif
    3199 #endif
    3200 
    3201 // Assert that will be called very often, like inside data structures e.g. operator[].
    3202 // Making it non-empty can make program slow.
    3203 #ifndef VMA_HEAVY_ASSERT
    3204  #ifdef _DEBUG
    3205  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
    3206  #else
    3207  #define VMA_HEAVY_ASSERT(expr)
    3208  #endif
    3209 #endif
    3210 
    3211 #ifndef VMA_ALIGN_OF
    3212  #define VMA_ALIGN_OF(type) (__alignof(type))
    3213 #endif
    3214 
    3215 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
    3216  #if defined(_WIN32)
    3217  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
    3218  #else
    3219  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
    3220  #endif
    3221 #endif
    3222 
    3223 #ifndef VMA_SYSTEM_FREE
    3224  #if defined(_WIN32)
    3225  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
    3226  #else
    3227  #define VMA_SYSTEM_FREE(ptr) free(ptr)
    3228  #endif
    3229 #endif
    3230 
    3231 #ifndef VMA_MIN
    3232  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
    3233 #endif
    3234 
    3235 #ifndef VMA_MAX
    3236  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
    3237 #endif
    3238 
    3239 #ifndef VMA_SWAP
    3240  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
    3241 #endif
    3242 
    3243 #ifndef VMA_SORT
    3244  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
    3245 #endif
    3246 
    3247 #ifndef VMA_DEBUG_LOG
    3248  #define VMA_DEBUG_LOG(format, ...)
    3249  /*
    3250  #define VMA_DEBUG_LOG(format, ...) do { \
    3251  printf(format, __VA_ARGS__); \
    3252  printf("\n"); \
    3253  } while(false)
    3254  */
    3255 #endif
    3256 
    3257 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
    3258 #if VMA_STATS_STRING_ENABLED
    3259  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
    3260  {
    3261  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
    3262  }
    3263  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
    3264  {
    3265  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
    3266  }
    3267  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
    3268  {
    3269  snprintf(outStr, strLen, "%p", ptr);
    3270  }
    3271 #endif
    3272 
    3273 #ifndef VMA_MUTEX
    3274  class VmaMutex
    3275  {
    3276  public:
    3277  void Lock() { m_Mutex.lock(); }
    3278  void Unlock() { m_Mutex.unlock(); }
    3279  private:
    3280  std::mutex m_Mutex;
    3281  };
    3282  #define VMA_MUTEX VmaMutex
    3283 #endif
    3284 
    3285 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
    3286 #ifndef VMA_RW_MUTEX
    3287  #if VMA_USE_STL_SHARED_MUTEX
    3288  // Use std::shared_mutex from C++17.
    3289  #include <shared_mutex>
    3290  class VmaRWMutex
    3291  {
    3292  public:
    3293  void LockRead() { m_Mutex.lock_shared(); }
    3294  void UnlockRead() { m_Mutex.unlock_shared(); }
    3295  void LockWrite() { m_Mutex.lock(); }
    3296  void UnlockWrite() { m_Mutex.unlock(); }
    3297  private:
    3298  std::shared_mutex m_Mutex;
    3299  };
    3300  #define VMA_RW_MUTEX VmaRWMutex
    3301  #elif defined(_WIN32)
    3302  // Use SRWLOCK from WinAPI.
    3303  class VmaRWMutex
    3304  {
    3305  public:
    3306  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
    3307  void LockRead() { AcquireSRWLockShared(&m_Lock); }
    3308  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
    3309  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
    3310  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
    3311  private:
    3312  SRWLOCK m_Lock;
    3313  };
    3314  #define VMA_RW_MUTEX VmaRWMutex
    3315  #else
    3316  // Less efficient fallback: Use normal mutex.
    3317  class VmaRWMutex
    3318  {
    3319  public:
    3320  void LockRead() { m_Mutex.Lock(); }
    3321  void UnlockRead() { m_Mutex.Unlock(); }
    3322  void LockWrite() { m_Mutex.Lock(); }
    3323  void UnlockWrite() { m_Mutex.Unlock(); }
    3324  private:
    3325  VMA_MUTEX m_Mutex;
    3326  };
    3327  #define VMA_RW_MUTEX VmaRWMutex
    3328  #endif // #if VMA_USE_STL_SHARED_MUTEX
    3329 #endif // #ifndef VMA_RW_MUTEX
    3330 
    3331 /*
    3332 If providing your own implementation, you need to implement a subset of std::atomic:
    3333 
    3334 - Constructor(uint32_t desired)
    3335 - uint32_t load() const
    3336 - void store(uint32_t desired)
    3337 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
    3338 */
    3339 #ifndef VMA_ATOMIC_UINT32
    3340  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
    3341 #endif
    3342 
    3343 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
    3344 
    3348  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
    3349 #endif
    3350 
    3351 #ifndef VMA_DEBUG_ALIGNMENT
    3352 
    3356  #define VMA_DEBUG_ALIGNMENT (1)
    3357 #endif
    3358 
    3359 #ifndef VMA_DEBUG_MARGIN
    3360 
    3364  #define VMA_DEBUG_MARGIN (0)
    3365 #endif
    3366 
    3367 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
    3368 
    3372  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
    3373 #endif
    3374 
    3375 #ifndef VMA_DEBUG_DETECT_CORRUPTION
    3376 
    3381  #define VMA_DEBUG_DETECT_CORRUPTION (0)
    3382 #endif
    3383 
    3384 #ifndef VMA_DEBUG_GLOBAL_MUTEX
    3385 
    3389  #define VMA_DEBUG_GLOBAL_MUTEX (0)
    3390 #endif
    3391 
    3392 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
    3393 
    3397  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
    3398 #endif
    3399 
    3400 #ifndef VMA_SMALL_HEAP_MAX_SIZE
    3401  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
    3403 #endif
    3404 
    3405 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
    3406  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
    3408 #endif
    3409 
    3410 #ifndef VMA_CLASS_NO_COPY
    3411  #define VMA_CLASS_NO_COPY(className) \
    3412  private: \
    3413  className(const className&) = delete; \
    3414  className& operator=(const className&) = delete;
    3415 #endif
    3416 
    3417 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
    3418 
    3419 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
    3420 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
    3421 
    3422 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
    3423 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
    3424 
    3425 /*******************************************************************************
    3426 END OF CONFIGURATION
    3427 */
    3428 
    3429 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
    3430 
    3431 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
    3432  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
    3433 
    3434 // Returns number of bits set to 1 in (v).
    3435 static inline uint32_t VmaCountBitsSet(uint32_t v)
    3436 {
    3437  uint32_t c = v - ((v >> 1) & 0x55555555);
    3438  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
    3439  c = ((c >> 4) + c) & 0x0F0F0F0F;
    3440  c = ((c >> 8) + c) & 0x00FF00FF;
    3441  c = ((c >> 16) + c) & 0x0000FFFF;
    3442  return c;
    3443 }
    3444 
    3445 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
    3446 // Use types like uint32_t, uint64_t as T.
    3447 template <typename T>
    3448 static inline T VmaAlignUp(T val, T align)
    3449 {
    3450  return (val + align - 1) / align * align;
    3451 }
    3452 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
    3453 // Use types like uint32_t, uint64_t as T.
    3454 template <typename T>
    3455 static inline T VmaAlignDown(T val, T align)
    3456 {
    3457  return val / align * align;
    3458 }
    3459 
    3460 // Division with mathematical rounding to nearest number.
    3461 template <typename T>
    3462 static inline T VmaRoundDiv(T x, T y)
    3463 {
    3464  return (x + (y / (T)2)) / y;
    3465 }
    3466 
    3467 /*
    3468 Returns true if given number is a power of two.
    3469 T must be unsigned integer number or signed integer but always nonnegative.
    3470 For 0 returns true.
    3471 */
    3472 template <typename T>
    3473 inline bool VmaIsPow2(T x)
    3474 {
    3475  return (x & (x-1)) == 0;
    3476 }
    3477 
    3478 // Returns smallest power of 2 greater or equal to v.
    3479 static inline uint32_t VmaNextPow2(uint32_t v)
    3480 {
    3481  v--;
    3482  v |= v >> 1;
    3483  v |= v >> 2;
    3484  v |= v >> 4;
    3485  v |= v >> 8;
    3486  v |= v >> 16;
    3487  v++;
    3488  return v;
    3489 }
    3490 static inline uint64_t VmaNextPow2(uint64_t v)
    3491 {
    3492  v--;
    3493  v |= v >> 1;
    3494  v |= v >> 2;
    3495  v |= v >> 4;
    3496  v |= v >> 8;
    3497  v |= v >> 16;
    3498  v |= v >> 32;
    3499  v++;
    3500  return v;
    3501 }
    3502 
    3503 // Returns largest power of 2 less or equal to v.
    3504 static inline uint32_t VmaPrevPow2(uint32_t v)
    3505 {
    3506  v |= v >> 1;
    3507  v |= v >> 2;
    3508  v |= v >> 4;
    3509  v |= v >> 8;
    3510  v |= v >> 16;
    3511  v = v ^ (v >> 1);
    3512  return v;
    3513 }
    3514 static inline uint64_t VmaPrevPow2(uint64_t v)
    3515 {
    3516  v |= v >> 1;
    3517  v |= v >> 2;
    3518  v |= v >> 4;
    3519  v |= v >> 8;
    3520  v |= v >> 16;
    3521  v |= v >> 32;
    3522  v = v ^ (v >> 1);
    3523  return v;
    3524 }
    3525 
    3526 static inline bool VmaStrIsEmpty(const char* pStr)
    3527 {
    3528  return pStr == VMA_NULL || *pStr == '\0';
    3529 }
    3530 
    3531 static const char* VmaAlgorithmToStr(uint32_t algorithm)
    3532 {
    3533  switch(algorithm)
    3534  {
    3536  return "Linear";
    3538  return "Buddy";
    3539  case 0:
    3540  return "Default";
    3541  default:
    3542  VMA_ASSERT(0);
    3543  return "";
    3544  }
    3545 }
    3546 
    3547 #ifndef VMA_SORT
    3548 
    3549 template<typename Iterator, typename Compare>
    3550 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
    3551 {
    3552  Iterator centerValue = end; --centerValue;
    3553  Iterator insertIndex = beg;
    3554  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
    3555  {
    3556  if(cmp(*memTypeIndex, *centerValue))
    3557  {
    3558  if(insertIndex != memTypeIndex)
    3559  {
    3560  VMA_SWAP(*memTypeIndex, *insertIndex);
    3561  }
    3562  ++insertIndex;
    3563  }
    3564  }
    3565  if(insertIndex != centerValue)
    3566  {
    3567  VMA_SWAP(*insertIndex, *centerValue);
    3568  }
    3569  return insertIndex;
    3570 }
    3571 
    3572 template<typename Iterator, typename Compare>
    3573 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
    3574 {
    3575  if(beg < end)
    3576  {
    3577  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
    3578  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
    3579  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
    3580  }
    3581 }
    3582 
    3583 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
    3584 
    3585 #endif // #ifndef VMA_SORT
    3586 
    3587 /*
    3588 Returns true if two memory blocks occupy overlapping pages.
    3589 ResourceA must be in less memory offset than ResourceB.
    3590 
    3591 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
    3592 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
    3593 */
    3594 static inline bool VmaBlocksOnSamePage(
    3595  VkDeviceSize resourceAOffset,
    3596  VkDeviceSize resourceASize,
    3597  VkDeviceSize resourceBOffset,
    3598  VkDeviceSize pageSize)
    3599 {
    3600  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
    3601  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
    3602  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
    3603  VkDeviceSize resourceBStart = resourceBOffset;
    3604  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
    3605  return resourceAEndPage == resourceBStartPage;
    3606 }
    3607 
    3608 enum VmaSuballocationType
    3609 {
    3610  VMA_SUBALLOCATION_TYPE_FREE = 0,
    3611  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
    3612  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
    3613  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
    3614  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
    3615  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
    3616  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
    3617 };
    3618 
    3619 /*
    3620 Returns true if given suballocation types could conflict and must respect
    3621 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
    3622 or linear image and another one is optimal image. If type is unknown, behave
    3623 conservatively.
    3624 */
    3625 static inline bool VmaIsBufferImageGranularityConflict(
    3626  VmaSuballocationType suballocType1,
    3627  VmaSuballocationType suballocType2)
    3628 {
    3629  if(suballocType1 > suballocType2)
    3630  {
    3631  VMA_SWAP(suballocType1, suballocType2);
    3632  }
    3633 
    3634  switch(suballocType1)
    3635  {
    3636  case VMA_SUBALLOCATION_TYPE_FREE:
    3637  return false;
    3638  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
    3639  return true;
    3640  case VMA_SUBALLOCATION_TYPE_BUFFER:
    3641  return
    3642  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3643  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3644  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
    3645  return
    3646  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3647  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
    3648  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3649  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
    3650  return
    3651  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3652  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
    3653  return false;
    3654  default:
    3655  VMA_ASSERT(0);
    3656  return true;
    3657  }
    3658 }
    3659 
    3660 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
    3661 {
    3662  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
    3663  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3664  for(size_t i = 0; i < numberCount; ++i, ++pDst)
    3665  {
    3666  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
    3667  }
    3668 }
    3669 
    3670 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
    3671 {
    3672  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
    3673  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3674  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
    3675  {
    3676  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
    3677  {
    3678  return false;
    3679  }
    3680  }
    3681  return true;
    3682 }
    3683 
    3684 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
    3685 struct VmaMutexLock
    3686 {
    3687  VMA_CLASS_NO_COPY(VmaMutexLock)
    3688 public:
    3689  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
    3690  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3691  { if(m_pMutex) { m_pMutex->Lock(); } }
    3692  ~VmaMutexLock()
    3693  { if(m_pMutex) { m_pMutex->Unlock(); } }
    3694 private:
    3695  VMA_MUTEX* m_pMutex;
    3696 };
    3697 
    3698 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
    3699 struct VmaMutexLockRead
    3700 {
    3701  VMA_CLASS_NO_COPY(VmaMutexLockRead)
    3702 public:
    3703  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
    3704  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3705  { if(m_pMutex) { m_pMutex->LockRead(); } }
    3706  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
    3707 private:
    3708  VMA_RW_MUTEX* m_pMutex;
    3709 };
    3710 
    3711 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
    3712 struct VmaMutexLockWrite
    3713 {
    3714  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
    3715 public:
    3716  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
    3717  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3718  { if(m_pMutex) { m_pMutex->LockWrite(); } }
    3719  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
    3720 private:
    3721  VMA_RW_MUTEX* m_pMutex;
    3722 };
    3723 
    3724 #if VMA_DEBUG_GLOBAL_MUTEX
    3725  static VMA_MUTEX gDebugGlobalMutex;
    3726  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
    3727 #else
    3728  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
    3729 #endif
    3730 
    3731 // Minimum size of a free suballocation to register it in the free suballocation collection.
    3732 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
    3733 
    3734 /*
    3735 Performs binary search and returns iterator to first element that is greater or
    3736 equal to (key), according to comparison (cmp).
    3737 
    3738 Cmp should return true if first argument is less than second argument.
    3739 
    3740 Returned value is the found element, if present in the collection or place where
    3741 new element with value (key) should be inserted.
    3742 */
    3743 template <typename CmpLess, typename IterT, typename KeyT>
    3744 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
    3745 {
    3746  size_t down = 0, up = (end - beg);
    3747  while(down < up)
    3748  {
    3749  const size_t mid = (down + up) / 2;
    3750  if(cmp(*(beg+mid), key))
    3751  {
    3752  down = mid + 1;
    3753  }
    3754  else
    3755  {
    3756  up = mid;
    3757  }
    3758  }
    3759  return beg + down;
    3760 }
    3761 
    3762 /*
    3763 Returns true if all pointers in the array are not-null and unique.
    3764 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
    3765 T must be pointer type, e.g. VmaAllocation, VmaPool.
    3766 */
    3767 template<typename T>
    3768 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
    3769 {
    3770  for(uint32_t i = 0; i < count; ++i)
    3771  {
    3772  const T iPtr = arr[i];
    3773  if(iPtr == VMA_NULL)
    3774  {
    3775  return false;
    3776  }
    3777  for(uint32_t j = i + 1; j < count; ++j)
    3778  {
    3779  if(iPtr == arr[j])
    3780  {
    3781  return false;
    3782  }
    3783  }
    3784  }
    3785  return true;
    3786 }
    3787 
    3789 // Memory allocation
    3790 
    3791 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
    3792 {
    3793  if((pAllocationCallbacks != VMA_NULL) &&
    3794  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
    3795  {
    3796  return (*pAllocationCallbacks->pfnAllocation)(
    3797  pAllocationCallbacks->pUserData,
    3798  size,
    3799  alignment,
    3800  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    3801  }
    3802  else
    3803  {
    3804  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
    3805  }
    3806 }
    3807 
    3808 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
    3809 {
    3810  if((pAllocationCallbacks != VMA_NULL) &&
    3811  (pAllocationCallbacks->pfnFree != VMA_NULL))
    3812  {
    3813  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
    3814  }
    3815  else
    3816  {
    3817  VMA_SYSTEM_FREE(ptr);
    3818  }
    3819 }
    3820 
    3821 template<typename T>
    3822 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
    3823 {
    3824  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
    3825 }
    3826 
    3827 template<typename T>
    3828 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
    3829 {
    3830  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
    3831 }
    3832 
    3833 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
    3834 
    3835 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
    3836 
    3837 template<typename T>
    3838 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
    3839 {
    3840  ptr->~T();
    3841  VmaFree(pAllocationCallbacks, ptr);
    3842 }
    3843 
    3844 template<typename T>
    3845 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
    3846 {
    3847  if(ptr != VMA_NULL)
    3848  {
    3849  for(size_t i = count; i--; )
    3850  {
    3851  ptr[i].~T();
    3852  }
    3853  VmaFree(pAllocationCallbacks, ptr);
    3854  }
    3855 }
    3856 
    3857 // STL-compatible allocator.
    3858 template<typename T>
    3859 class VmaStlAllocator
    3860 {
    3861 public:
    3862  const VkAllocationCallbacks* const m_pCallbacks;
    3863  typedef T value_type;
    3864 
    3865  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
    3866  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
    3867 
    3868  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
    3869  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
    3870 
    3871  template<typename U>
    3872  bool operator==(const VmaStlAllocator<U>& rhs) const
    3873  {
    3874  return m_pCallbacks == rhs.m_pCallbacks;
    3875  }
    3876  template<typename U>
    3877  bool operator!=(const VmaStlAllocator<U>& rhs) const
    3878  {
    3879  return m_pCallbacks != rhs.m_pCallbacks;
    3880  }
    3881 
    3882  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
    3883 };
    3884 
    3885 #if VMA_USE_STL_VECTOR
    3886 
    3887 #define VmaVector std::vector
    3888 
    3889 template<typename T, typename allocatorT>
    3890 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
    3891 {
    3892  vec.insert(vec.begin() + index, item);
    3893 }
    3894 
    3895 template<typename T, typename allocatorT>
    3896 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
    3897 {
    3898  vec.erase(vec.begin() + index);
    3899 }
    3900 
    3901 #else // #if VMA_USE_STL_VECTOR
    3902 
    3903 /* Class with interface compatible with subset of std::vector.
    3904 T must be POD because constructors and destructors are not called and memcpy is
    3905 used for these objects. */
    3906 template<typename T, typename AllocatorT>
    3907 class VmaVector
    3908 {
    3909 public:
    3910  typedef T value_type;
    3911 
    3912  VmaVector(const AllocatorT& allocator) :
    3913  m_Allocator(allocator),
    3914  m_pArray(VMA_NULL),
    3915  m_Count(0),
    3916  m_Capacity(0)
    3917  {
    3918  }
    3919 
    3920  VmaVector(size_t count, const AllocatorT& allocator) :
    3921  m_Allocator(allocator),
    3922  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
    3923  m_Count(count),
    3924  m_Capacity(count)
    3925  {
    3926  }
    3927 
    3928  VmaVector(const VmaVector<T, AllocatorT>& src) :
    3929  m_Allocator(src.m_Allocator),
    3930  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
    3931  m_Count(src.m_Count),
    3932  m_Capacity(src.m_Count)
    3933  {
    3934  if(m_Count != 0)
    3935  {
    3936  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
    3937  }
    3938  }
    3939 
    3940  ~VmaVector()
    3941  {
    3942  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3943  }
    3944 
    3945  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
    3946  {
    3947  if(&rhs != this)
    3948  {
    3949  resize(rhs.m_Count);
    3950  if(m_Count != 0)
    3951  {
    3952  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
    3953  }
    3954  }
    3955  return *this;
    3956  }
    3957 
    3958  bool empty() const { return m_Count == 0; }
    3959  size_t size() const { return m_Count; }
    3960  T* data() { return m_pArray; }
    3961  const T* data() const { return m_pArray; }
    3962 
    3963  T& operator[](size_t index)
    3964  {
    3965  VMA_HEAVY_ASSERT(index < m_Count);
    3966  return m_pArray[index];
    3967  }
    3968  const T& operator[](size_t index) const
    3969  {
    3970  VMA_HEAVY_ASSERT(index < m_Count);
    3971  return m_pArray[index];
    3972  }
    3973 
    3974  T& front()
    3975  {
    3976  VMA_HEAVY_ASSERT(m_Count > 0);
    3977  return m_pArray[0];
    3978  }
    3979  const T& front() const
    3980  {
    3981  VMA_HEAVY_ASSERT(m_Count > 0);
    3982  return m_pArray[0];
    3983  }
    3984  T& back()
    3985  {
    3986  VMA_HEAVY_ASSERT(m_Count > 0);
    3987  return m_pArray[m_Count - 1];
    3988  }
    3989  const T& back() const
    3990  {
    3991  VMA_HEAVY_ASSERT(m_Count > 0);
    3992  return m_pArray[m_Count - 1];
    3993  }
    3994 
    3995  void reserve(size_t newCapacity, bool freeMemory = false)
    3996  {
    3997  newCapacity = VMA_MAX(newCapacity, m_Count);
    3998 
    3999  if((newCapacity < m_Capacity) && !freeMemory)
    4000  {
    4001  newCapacity = m_Capacity;
    4002  }
    4003 
    4004  if(newCapacity != m_Capacity)
    4005  {
    4006  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
    4007  if(m_Count != 0)
    4008  {
    4009  memcpy(newArray, m_pArray, m_Count * sizeof(T));
    4010  }
    4011  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    4012  m_Capacity = newCapacity;
    4013  m_pArray = newArray;
    4014  }
    4015  }
    4016 
    4017  void resize(size_t newCount, bool freeMemory = false)
    4018  {
    4019  size_t newCapacity = m_Capacity;
    4020  if(newCount > m_Capacity)
    4021  {
    4022  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
    4023  }
    4024  else if(freeMemory)
    4025  {
    4026  newCapacity = newCount;
    4027  }
    4028 
    4029  if(newCapacity != m_Capacity)
    4030  {
    4031  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
    4032  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
    4033  if(elementsToCopy != 0)
    4034  {
    4035  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
    4036  }
    4037  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    4038  m_Capacity = newCapacity;
    4039  m_pArray = newArray;
    4040  }
    4041 
    4042  m_Count = newCount;
    4043  }
    4044 
    4045  void clear(bool freeMemory = false)
    4046  {
    4047  resize(0, freeMemory);
    4048  }
    4049 
    4050  void insert(size_t index, const T& src)
    4051  {
    4052  VMA_HEAVY_ASSERT(index <= m_Count);
    4053  const size_t oldCount = size();
    4054  resize(oldCount + 1);
    4055  if(index < oldCount)
    4056  {
    4057  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
    4058  }
    4059  m_pArray[index] = src;
    4060  }
    4061 
    4062  void remove(size_t index)
    4063  {
    4064  VMA_HEAVY_ASSERT(index < m_Count);
    4065  const size_t oldCount = size();
    4066  if(index < oldCount - 1)
    4067  {
    4068  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
    4069  }
    4070  resize(oldCount - 1);
    4071  }
    4072 
    4073  void push_back(const T& src)
    4074  {
    4075  const size_t newIndex = size();
    4076  resize(newIndex + 1);
    4077  m_pArray[newIndex] = src;
    4078  }
    4079 
    4080  void pop_back()
    4081  {
    4082  VMA_HEAVY_ASSERT(m_Count > 0);
    4083  resize(size() - 1);
    4084  }
    4085 
    4086  void push_front(const T& src)
    4087  {
    4088  insert(0, src);
    4089  }
    4090 
    4091  void pop_front()
    4092  {
    4093  VMA_HEAVY_ASSERT(m_Count > 0);
    4094  remove(0);
    4095  }
    4096 
    4097  typedef T* iterator;
    4098 
    4099  iterator begin() { return m_pArray; }
    4100  iterator end() { return m_pArray + m_Count; }
    4101 
    4102 private:
    4103  AllocatorT m_Allocator;
    4104  T* m_pArray;
    4105  size_t m_Count;
    4106  size_t m_Capacity;
    4107 };
    4108 
    4109 template<typename T, typename allocatorT>
    4110 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
    4111 {
    4112  vec.insert(index, item);
    4113 }
    4114 
    4115 template<typename T, typename allocatorT>
    4116 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
    4117 {
    4118  vec.remove(index);
    4119 }
    4120 
    4121 #endif // #if VMA_USE_STL_VECTOR
    4122 
    4123 template<typename CmpLess, typename VectorT>
    4124 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
    4125 {
    4126  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    4127  vector.data(),
    4128  vector.data() + vector.size(),
    4129  value,
    4130  CmpLess()) - vector.data();
    4131  VmaVectorInsert(vector, indexToInsert, value);
    4132  return indexToInsert;
    4133 }
    4134 
    4135 template<typename CmpLess, typename VectorT>
    4136 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
    4137 {
    4138  CmpLess comparator;
    4139  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    4140  vector.begin(),
    4141  vector.end(),
    4142  value,
    4143  comparator);
    4144  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
    4145  {
    4146  size_t indexToRemove = it - vector.begin();
    4147  VmaVectorRemove(vector, indexToRemove);
    4148  return true;
    4149  }
    4150  return false;
    4151 }
    4152 
    4153 template<typename CmpLess, typename IterT, typename KeyT>
    4154 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
    4155 {
    4156  CmpLess comparator;
    4157  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
    4158  beg, end, value, comparator);
    4159  if(it == end ||
    4160  (!comparator(*it, value) && !comparator(value, *it)))
    4161  {
    4162  return it;
    4163  }
    4164  return end;
    4165 }
    4166 
    4168 // class VmaPoolAllocator
    4169 
    4170 /*
    4171 Allocator for objects of type T using a list of arrays (pools) to speed up
    4172 allocation. Number of elements that can be allocated is not bounded because
    4173 allocator can create multiple blocks.
    4174 */
    4175 template<typename T>
    4176 class VmaPoolAllocator
    4177 {
    4178  VMA_CLASS_NO_COPY(VmaPoolAllocator)
    4179 public:
    4180  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
    4181  ~VmaPoolAllocator();
    4182  void Clear();
    4183  T* Alloc();
    4184  void Free(T* ptr);
    4185 
    4186 private:
    4187  union Item
    4188  {
    4189  uint32_t NextFreeIndex;
    4190  T Value;
    4191  };
    4192 
    4193  struct ItemBlock
    4194  {
    4195  Item* pItems;
    4196  uint32_t FirstFreeIndex;
    4197  };
    4198 
    4199  const VkAllocationCallbacks* m_pAllocationCallbacks;
    4200  size_t m_ItemsPerBlock;
    4201  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
    4202 
    4203  ItemBlock& CreateNewBlock();
    4204 };
    4205 
    4206 template<typename T>
    4207 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
    4208  m_pAllocationCallbacks(pAllocationCallbacks),
    4209  m_ItemsPerBlock(itemsPerBlock),
    4210  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
    4211 {
    4212  VMA_ASSERT(itemsPerBlock > 0);
    4213 }
    4214 
    4215 template<typename T>
    4216 VmaPoolAllocator<T>::~VmaPoolAllocator()
    4217 {
    4218  Clear();
    4219 }
    4220 
    4221 template<typename T>
    4222 void VmaPoolAllocator<T>::Clear()
    4223 {
    4224  for(size_t i = m_ItemBlocks.size(); i--; )
    4225  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
    4226  m_ItemBlocks.clear();
    4227 }
    4228 
    4229 template<typename T>
    4230 T* VmaPoolAllocator<T>::Alloc()
    4231 {
    4232  for(size_t i = m_ItemBlocks.size(); i--; )
    4233  {
    4234  ItemBlock& block = m_ItemBlocks[i];
    4235  // This block has some free items: Use first one.
    4236  if(block.FirstFreeIndex != UINT32_MAX)
    4237  {
    4238  Item* const pItem = &block.pItems[block.FirstFreeIndex];
    4239  block.FirstFreeIndex = pItem->NextFreeIndex;
    4240  return &pItem->Value;
    4241  }
    4242  }
    4243 
    4244  // No block has free item: Create new one and use it.
    4245  ItemBlock& newBlock = CreateNewBlock();
    4246  Item* const pItem = &newBlock.pItems[0];
    4247  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
    4248  return &pItem->Value;
    4249 }
    4250 
    4251 template<typename T>
    4252 void VmaPoolAllocator<T>::Free(T* ptr)
    4253 {
    4254  // Search all memory blocks to find ptr.
    4255  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
    4256  {
    4257  ItemBlock& block = m_ItemBlocks[i];
    4258 
    4259  // Casting to union.
    4260  Item* pItemPtr;
    4261  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
    4262 
    4263  // Check if pItemPtr is in address range of this block.
    4264  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
    4265  {
    4266  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
    4267  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
    4268  block.FirstFreeIndex = index;
    4269  return;
    4270  }
    4271  }
    4272  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
    4273 }
    4274 
    4275 template<typename T>
    4276 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
    4277 {
    4278  ItemBlock newBlock = {
    4279  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
    4280 
    4281  m_ItemBlocks.push_back(newBlock);
    4282 
    4283  // Setup singly-linked list of all free items in this block.
    4284  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
    4285  newBlock.pItems[i].NextFreeIndex = i + 1;
    4286  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
    4287  return m_ItemBlocks.back();
    4288 }
    4289 
    4291 // class VmaRawList, VmaList
    4292 
    4293 #if VMA_USE_STL_LIST
    4294 
    4295 #define VmaList std::list
    4296 
    4297 #else // #if VMA_USE_STL_LIST
    4298 
    4299 template<typename T>
    4300 struct VmaListItem
    4301 {
    4302  VmaListItem* pPrev;
    4303  VmaListItem* pNext;
    4304  T Value;
    4305 };
    4306 
    4307 // Doubly linked list.
    4308 template<typename T>
    4309 class VmaRawList
    4310 {
    4311  VMA_CLASS_NO_COPY(VmaRawList)
    4312 public:
    4313  typedef VmaListItem<T> ItemType;
    4314 
    4315  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
    4316  ~VmaRawList();
    4317  void Clear();
    4318 
    4319  size_t GetCount() const { return m_Count; }
    4320  bool IsEmpty() const { return m_Count == 0; }
    4321 
    4322  ItemType* Front() { return m_pFront; }
    4323  const ItemType* Front() const { return m_pFront; }
    4324  ItemType* Back() { return m_pBack; }
    4325  const ItemType* Back() const { return m_pBack; }
    4326 
    4327  ItemType* PushBack();
    4328  ItemType* PushFront();
    4329  ItemType* PushBack(const T& value);
    4330  ItemType* PushFront(const T& value);
    4331  void PopBack();
    4332  void PopFront();
    4333 
    4334  // Item can be null - it means PushBack.
    4335  ItemType* InsertBefore(ItemType* pItem);
    4336  // Item can be null - it means PushFront.
    4337  ItemType* InsertAfter(ItemType* pItem);
    4338 
    4339  ItemType* InsertBefore(ItemType* pItem, const T& value);
    4340  ItemType* InsertAfter(ItemType* pItem, const T& value);
    4341 
    4342  void Remove(ItemType* pItem);
    4343 
    4344 private:
    4345  const VkAllocationCallbacks* const m_pAllocationCallbacks;
    4346  VmaPoolAllocator<ItemType> m_ItemAllocator;
    4347  ItemType* m_pFront;
    4348  ItemType* m_pBack;
    4349  size_t m_Count;
    4350 };
    4351 
    4352 template<typename T>
    4353 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
    4354  m_pAllocationCallbacks(pAllocationCallbacks),
    4355  m_ItemAllocator(pAllocationCallbacks, 128),
    4356  m_pFront(VMA_NULL),
    4357  m_pBack(VMA_NULL),
    4358  m_Count(0)
    4359 {
    4360 }
    4361 
    4362 template<typename T>
    4363 VmaRawList<T>::~VmaRawList()
    4364 {
    4365  // Intentionally not calling Clear, because that would be unnecessary
    4366  // computations to return all items to m_ItemAllocator as free.
    4367 }
    4368 
    4369 template<typename T>
    4370 void VmaRawList<T>::Clear()
    4371 {
    4372  if(IsEmpty() == false)
    4373  {
    4374  ItemType* pItem = m_pBack;
    4375  while(pItem != VMA_NULL)
    4376  {
    4377  ItemType* const pPrevItem = pItem->pPrev;
    4378  m_ItemAllocator.Free(pItem);
    4379  pItem = pPrevItem;
    4380  }
    4381  m_pFront = VMA_NULL;
    4382  m_pBack = VMA_NULL;
    4383  m_Count = 0;
    4384  }
    4385 }
    4386 
    4387 template<typename T>
    4388 VmaListItem<T>* VmaRawList<T>::PushBack()
    4389 {
    4390  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    4391  pNewItem->pNext = VMA_NULL;
    4392  if(IsEmpty())
    4393  {
    4394  pNewItem->pPrev = VMA_NULL;
    4395  m_pFront = pNewItem;
    4396  m_pBack = pNewItem;
    4397  m_Count = 1;
    4398  }
    4399  else
    4400  {
    4401  pNewItem->pPrev = m_pBack;
    4402  m_pBack->pNext = pNewItem;
    4403  m_pBack = pNewItem;
    4404  ++m_Count;
    4405  }
    4406  return pNewItem;
    4407 }
    4408 
    4409 template<typename T>
    4410 VmaListItem<T>* VmaRawList<T>::PushFront()
    4411 {
    4412  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    4413  pNewItem->pPrev = VMA_NULL;
    4414  if(IsEmpty())
    4415  {
    4416  pNewItem->pNext = VMA_NULL;
    4417  m_pFront = pNewItem;
    4418  m_pBack = pNewItem;
    4419  m_Count = 1;
    4420  }
    4421  else
    4422  {
    4423  pNewItem->pNext = m_pFront;
    4424  m_pFront->pPrev = pNewItem;
    4425  m_pFront = pNewItem;
    4426  ++m_Count;
    4427  }
    4428  return pNewItem;
    4429 }
    4430 
    4431 template<typename T>
    4432 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
    4433 {
    4434  ItemType* const pNewItem = PushBack();
    4435  pNewItem->Value = value;
    4436  return pNewItem;
    4437 }
    4438 
    4439 template<typename T>
    4440 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
    4441 {
    4442  ItemType* const pNewItem = PushFront();
    4443  pNewItem->Value = value;
    4444  return pNewItem;
    4445 }
    4446 
    4447 template<typename T>
    4448 void VmaRawList<T>::PopBack()
    4449 {
    4450  VMA_HEAVY_ASSERT(m_Count > 0);
    4451  ItemType* const pBackItem = m_pBack;
    4452  ItemType* const pPrevItem = pBackItem->pPrev;
    4453  if(pPrevItem != VMA_NULL)
    4454  {
    4455  pPrevItem->pNext = VMA_NULL;
    4456  }
    4457  m_pBack = pPrevItem;
    4458  m_ItemAllocator.Free(pBackItem);
    4459  --m_Count;
    4460 }
    4461 
    4462 template<typename T>
    4463 void VmaRawList<T>::PopFront()
    4464 {
    4465  VMA_HEAVY_ASSERT(m_Count > 0);
    4466  ItemType* const pFrontItem = m_pFront;
    4467  ItemType* const pNextItem = pFrontItem->pNext;
    4468  if(pNextItem != VMA_NULL)
    4469  {
    4470  pNextItem->pPrev = VMA_NULL;
    4471  }
    4472  m_pFront = pNextItem;
    4473  m_ItemAllocator.Free(pFrontItem);
    4474  --m_Count;
    4475 }
    4476 
    4477 template<typename T>
    4478 void VmaRawList<T>::Remove(ItemType* pItem)
    4479 {
    4480  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
    4481  VMA_HEAVY_ASSERT(m_Count > 0);
    4482 
    4483  if(pItem->pPrev != VMA_NULL)
    4484  {
    4485  pItem->pPrev->pNext = pItem->pNext;
    4486  }
    4487  else
    4488  {
    4489  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4490  m_pFront = pItem->pNext;
    4491  }
    4492 
    4493  if(pItem->pNext != VMA_NULL)
    4494  {
    4495  pItem->pNext->pPrev = pItem->pPrev;
    4496  }
    4497  else
    4498  {
    4499  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4500  m_pBack = pItem->pPrev;
    4501  }
    4502 
    4503  m_ItemAllocator.Free(pItem);
    4504  --m_Count;
    4505 }
    4506 
    4507 template<typename T>
    4508 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
    4509 {
    4510  if(pItem != VMA_NULL)
    4511  {
    4512  ItemType* const prevItem = pItem->pPrev;
    4513  ItemType* const newItem = m_ItemAllocator.Alloc();
    4514  newItem->pPrev = prevItem;
    4515  newItem->pNext = pItem;
    4516  pItem->pPrev = newItem;
    4517  if(prevItem != VMA_NULL)
    4518  {
    4519  prevItem->pNext = newItem;
    4520  }
    4521  else
    4522  {
    4523  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4524  m_pFront = newItem;
    4525  }
    4526  ++m_Count;
    4527  return newItem;
    4528  }
    4529  else
    4530  return PushBack();
    4531 }
    4532 
    4533 template<typename T>
    4534 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
    4535 {
    4536  if(pItem != VMA_NULL)
    4537  {
    4538  ItemType* const nextItem = pItem->pNext;
    4539  ItemType* const newItem = m_ItemAllocator.Alloc();
    4540  newItem->pNext = nextItem;
    4541  newItem->pPrev = pItem;
    4542  pItem->pNext = newItem;
    4543  if(nextItem != VMA_NULL)
    4544  {
    4545  nextItem->pPrev = newItem;
    4546  }
    4547  else
    4548  {
    4549  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4550  m_pBack = newItem;
    4551  }
    4552  ++m_Count;
    4553  return newItem;
    4554  }
    4555  else
    4556  return PushFront();
    4557 }
    4558 
    4559 template<typename T>
    4560 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
    4561 {
    4562  ItemType* const newItem = InsertBefore(pItem);
    4563  newItem->Value = value;
    4564  return newItem;
    4565 }
    4566 
    4567 template<typename T>
    4568 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
    4569 {
    4570  ItemType* const newItem = InsertAfter(pItem);
    4571  newItem->Value = value;
    4572  return newItem;
    4573 }
    4574 
    4575 template<typename T, typename AllocatorT>
    4576 class VmaList
    4577 {
    4578  VMA_CLASS_NO_COPY(VmaList)
    4579 public:
    4580  class iterator
    4581  {
    4582  public:
    4583  iterator() :
    4584  m_pList(VMA_NULL),
    4585  m_pItem(VMA_NULL)
    4586  {
    4587  }
    4588 
    4589  T& operator*() const
    4590  {
    4591  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4592  return m_pItem->Value;
    4593  }
    4594  T* operator->() const
    4595  {
    4596  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4597  return &m_pItem->Value;
    4598  }
    4599 
    4600  iterator& operator++()
    4601  {
    4602  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4603  m_pItem = m_pItem->pNext;
    4604  return *this;
    4605  }
    4606  iterator& operator--()
    4607  {
    4608  if(m_pItem != VMA_NULL)
    4609  {
    4610  m_pItem = m_pItem->pPrev;
    4611  }
    4612  else
    4613  {
    4614  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4615  m_pItem = m_pList->Back();
    4616  }
    4617  return *this;
    4618  }
    4619 
    4620  iterator operator++(int)
    4621  {
    4622  iterator result = *this;
    4623  ++*this;
    4624  return result;
    4625  }
    4626  iterator operator--(int)
    4627  {
    4628  iterator result = *this;
    4629  --*this;
    4630  return result;
    4631  }
    4632 
    4633  bool operator==(const iterator& rhs) const
    4634  {
    4635  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4636  return m_pItem == rhs.m_pItem;
    4637  }
    4638  bool operator!=(const iterator& rhs) const
    4639  {
    4640  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4641  return m_pItem != rhs.m_pItem;
    4642  }
    4643 
    4644  private:
    4645  VmaRawList<T>* m_pList;
    4646  VmaListItem<T>* m_pItem;
    4647 
    4648  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
    4649  m_pList(pList),
    4650  m_pItem(pItem)
    4651  {
    4652  }
    4653 
    4654  friend class VmaList<T, AllocatorT>;
    4655  };
    4656 
    4657  class const_iterator
    4658  {
    4659  public:
    4660  const_iterator() :
    4661  m_pList(VMA_NULL),
    4662  m_pItem(VMA_NULL)
    4663  {
    4664  }
    4665 
    4666  const_iterator(const iterator& src) :
    4667  m_pList(src.m_pList),
    4668  m_pItem(src.m_pItem)
    4669  {
    4670  }
    4671 
    4672  const T& operator*() const
    4673  {
    4674  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4675  return m_pItem->Value;
    4676  }
    4677  const T* operator->() const
    4678  {
    4679  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4680  return &m_pItem->Value;
    4681  }
    4682 
    4683  const_iterator& operator++()
    4684  {
    4685  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4686  m_pItem = m_pItem->pNext;
    4687  return *this;
    4688  }
    4689  const_iterator& operator--()
    4690  {
    4691  if(m_pItem != VMA_NULL)
    4692  {
    4693  m_pItem = m_pItem->pPrev;
    4694  }
    4695  else
    4696  {
    4697  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4698  m_pItem = m_pList->Back();
    4699  }
    4700  return *this;
    4701  }
    4702 
    4703  const_iterator operator++(int)
    4704  {
    4705  const_iterator result = *this;
    4706  ++*this;
    4707  return result;
    4708  }
    4709  const_iterator operator--(int)
    4710  {
    4711  const_iterator result = *this;
    4712  --*this;
    4713  return result;
    4714  }
    4715 
    4716  bool operator==(const const_iterator& rhs) const
    4717  {
    4718  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4719  return m_pItem == rhs.m_pItem;
    4720  }
    4721  bool operator!=(const const_iterator& rhs) const
    4722  {
    4723  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4724  return m_pItem != rhs.m_pItem;
    4725  }
    4726 
    4727  private:
    4728  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
    4729  m_pList(pList),
    4730  m_pItem(pItem)
    4731  {
    4732  }
    4733 
    4734  const VmaRawList<T>* m_pList;
    4735  const VmaListItem<T>* m_pItem;
    4736 
    4737  friend class VmaList<T, AllocatorT>;
    4738  };
    4739 
    4740  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
    4741 
    4742  bool empty() const { return m_RawList.IsEmpty(); }
    4743  size_t size() const { return m_RawList.GetCount(); }
    4744 
    4745  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
    4746  iterator end() { return iterator(&m_RawList, VMA_NULL); }
    4747 
    4748  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
    4749  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
    4750 
    4751  void clear() { m_RawList.Clear(); }
    4752  void push_back(const T& value) { m_RawList.PushBack(value); }
    4753  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
    4754  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
    4755 
    4756 private:
    4757  VmaRawList<T> m_RawList;
    4758 };
    4759 
    4760 #endif // #if VMA_USE_STL_LIST
    4761 
    4763 // class VmaMap
    4764 
    4765 // Unused in this version.
    4766 #if 0
    4767 
    4768 #if VMA_USE_STL_UNORDERED_MAP
    4769 
    4770 #define VmaPair std::pair
    4771 
    4772 #define VMA_MAP_TYPE(KeyT, ValueT) \
    4773  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
    4774 
    4775 #else // #if VMA_USE_STL_UNORDERED_MAP
    4776 
    4777 template<typename T1, typename T2>
    4778 struct VmaPair
    4779 {
    4780  T1 first;
    4781  T2 second;
    4782 
    4783  VmaPair() : first(), second() { }
    4784  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
    4785 };
    4786 
    4787 /* Class compatible with subset of interface of std::unordered_map.
    4788 KeyT, ValueT must be POD because they will be stored in VmaVector.
    4789 */
    4790 template<typename KeyT, typename ValueT>
    4791 class VmaMap
    4792 {
    4793 public:
    4794  typedef VmaPair<KeyT, ValueT> PairType;
    4795  typedef PairType* iterator;
    4796 
    4797  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
    4798 
    4799  iterator begin() { return m_Vector.begin(); }
    4800  iterator end() { return m_Vector.end(); }
    4801 
    4802  void insert(const PairType& pair);
    4803  iterator find(const KeyT& key);
    4804  void erase(iterator it);
    4805 
    4806 private:
    4807  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
    4808 };
    4809 
    4810 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
    4811 
    4812 template<typename FirstT, typename SecondT>
    4813 struct VmaPairFirstLess
    4814 {
    4815  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
    4816  {
    4817  return lhs.first < rhs.first;
    4818  }
    4819  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
    4820  {
    4821  return lhs.first < rhsFirst;
    4822  }
    4823 };
    4824 
    4825 template<typename KeyT, typename ValueT>
    4826 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
    4827 {
    4828  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    4829  m_Vector.data(),
    4830  m_Vector.data() + m_Vector.size(),
    4831  pair,
    4832  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
    4833  VmaVectorInsert(m_Vector, indexToInsert, pair);
    4834 }
    4835 
    4836 template<typename KeyT, typename ValueT>
    4837 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
    4838 {
    4839  PairType* it = VmaBinaryFindFirstNotLess(
    4840  m_Vector.data(),
    4841  m_Vector.data() + m_Vector.size(),
    4842  key,
    4843  VmaPairFirstLess<KeyT, ValueT>());
    4844  if((it != m_Vector.end()) && (it->first == key))
    4845  {
    4846  return it;
    4847  }
    4848  else
    4849  {
    4850  return m_Vector.end();
    4851  }
    4852 }
    4853 
    4854 template<typename KeyT, typename ValueT>
    4855 void VmaMap<KeyT, ValueT>::erase(iterator it)
    4856 {
    4857  VmaVectorRemove(m_Vector, it - m_Vector.begin());
    4858 }
    4859 
    4860 #endif // #if VMA_USE_STL_UNORDERED_MAP
    4861 
    4862 #endif // #if 0
    4863 
    4865 
    4866 class VmaDeviceMemoryBlock;
    4867 
    4868 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
    4869 
    4870 struct VmaAllocation_T
    4871 {
    4872  VMA_CLASS_NO_COPY(VmaAllocation_T)
    4873 private:
    4874  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
    4875 
    4876  enum FLAGS
    4877  {
    4878  FLAG_USER_DATA_STRING = 0x01,
    4879  };
    4880 
    4881 public:
    4882  enum ALLOCATION_TYPE
    4883  {
    4884  ALLOCATION_TYPE_NONE,
    4885  ALLOCATION_TYPE_BLOCK,
    4886  ALLOCATION_TYPE_DEDICATED,
    4887  };
    4888 
    4889  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
    4890  m_Alignment(1),
    4891  m_Size(0),
    4892  m_pUserData(VMA_NULL),
    4893  m_LastUseFrameIndex(currentFrameIndex),
    4894  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
    4895  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
    4896  m_MapCount(0),
    4897  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
    4898  {
    4899 #if VMA_STATS_STRING_ENABLED
    4900  m_CreationFrameIndex = currentFrameIndex;
    4901  m_BufferImageUsage = 0;
    4902 #endif
    4903  }
    4904 
    4905  ~VmaAllocation_T()
    4906  {
    4907  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
    4908 
    4909  // Check if owned string was freed.
    4910  VMA_ASSERT(m_pUserData == VMA_NULL);
    4911  }
    4912 
    4913  void InitBlockAllocation(
    4914  VmaPool hPool,
    4915  VmaDeviceMemoryBlock* block,
    4916  VkDeviceSize offset,
    4917  VkDeviceSize alignment,
    4918  VkDeviceSize size,
    4919  VmaSuballocationType suballocationType,
    4920  bool mapped,
    4921  bool canBecomeLost)
    4922  {
    4923  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4924  VMA_ASSERT(block != VMA_NULL);
    4925  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4926  m_Alignment = alignment;
    4927  m_Size = size;
    4928  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4929  m_SuballocationType = (uint8_t)suballocationType;
    4930  m_BlockAllocation.m_hPool = hPool;
    4931  m_BlockAllocation.m_Block = block;
    4932  m_BlockAllocation.m_Offset = offset;
    4933  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
    4934  }
    4935 
    4936  void InitLost()
    4937  {
    4938  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4939  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
    4940  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4941  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
    4942  m_BlockAllocation.m_Block = VMA_NULL;
    4943  m_BlockAllocation.m_Offset = 0;
    4944  m_BlockAllocation.m_CanBecomeLost = true;
    4945  }
    4946 
    4947  void ChangeBlockAllocation(
    4948  VmaAllocator hAllocator,
    4949  VmaDeviceMemoryBlock* block,
    4950  VkDeviceSize offset);
    4951 
    4952  void ChangeSize(VkDeviceSize newSize);
    4953  void ChangeOffset(VkDeviceSize newOffset);
    4954 
    4955  // pMappedData not null means allocation is created with MAPPED flag.
    4956  void InitDedicatedAllocation(
    4957  uint32_t memoryTypeIndex,
    4958  VkDeviceMemory hMemory,
    4959  VmaSuballocationType suballocationType,
    4960  void* pMappedData,
    4961  VkDeviceSize size)
    4962  {
    4963  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4964  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
    4965  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
    4966  m_Alignment = 0;
    4967  m_Size = size;
    4968  m_SuballocationType = (uint8_t)suballocationType;
    4969  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4970  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
    4971  m_DedicatedAllocation.m_hMemory = hMemory;
    4972  m_DedicatedAllocation.m_pMappedData = pMappedData;
    4973  }
    4974 
    4975  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
    4976  VkDeviceSize GetAlignment() const { return m_Alignment; }
    4977  VkDeviceSize GetSize() const { return m_Size; }
    4978  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
    4979  void* GetUserData() const { return m_pUserData; }
    4980  void SetUserData(VmaAllocator hAllocator, void* pUserData);
    4981  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
    4982 
    4983  VmaDeviceMemoryBlock* GetBlock() const
    4984  {
    4985  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    4986  return m_BlockAllocation.m_Block;
    4987  }
    4988  VkDeviceSize GetOffset() const;
    4989  VkDeviceMemory GetMemory() const;
    4990  uint32_t GetMemoryTypeIndex() const;
    4991  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
    4992  void* GetMappedData() const;
    4993  bool CanBecomeLost() const;
    4994  VmaPool GetPool() const;
    4995 
    4996  uint32_t GetLastUseFrameIndex() const
    4997  {
    4998  return m_LastUseFrameIndex.load();
    4999  }
    5000  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
    5001  {
    5002  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
    5003  }
    5004  /*
    5005  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
    5006  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
    5007  - Else, returns false.
    5008 
    5009  If hAllocation is already lost, assert - you should not call it then.
    5010  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
    5011  */
    5012  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5013 
    5014  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
    5015  {
    5016  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    5017  outInfo.blockCount = 1;
    5018  outInfo.allocationCount = 1;
    5019  outInfo.unusedRangeCount = 0;
    5020  outInfo.usedBytes = m_Size;
    5021  outInfo.unusedBytes = 0;
    5022  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
    5023  outInfo.unusedRangeSizeMin = UINT64_MAX;
    5024  outInfo.unusedRangeSizeMax = 0;
    5025  }
    5026 
    5027  void BlockAllocMap();
    5028  void BlockAllocUnmap();
    5029  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
    5030  void DedicatedAllocUnmap(VmaAllocator hAllocator);
    5031 
    5032 #if VMA_STATS_STRING_ENABLED
    5033  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
    5034  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
    5035 
    5036  void InitBufferImageUsage(uint32_t bufferImageUsage)
    5037  {
    5038  VMA_ASSERT(m_BufferImageUsage == 0);
    5039  m_BufferImageUsage = bufferImageUsage;
    5040  }
    5041 
    5042  void PrintParameters(class VmaJsonWriter& json) const;
    5043 #endif
    5044 
    5045 private:
    5046  VkDeviceSize m_Alignment;
    5047  VkDeviceSize m_Size;
    5048  void* m_pUserData;
    5049  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
    5050  uint8_t m_Type; // ALLOCATION_TYPE
    5051  uint8_t m_SuballocationType; // VmaSuballocationType
    5052  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
    5053  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
    5054  uint8_t m_MapCount;
    5055  uint8_t m_Flags; // enum FLAGS
    5056 
    5057  // Allocation out of VmaDeviceMemoryBlock.
    5058  struct BlockAllocation
    5059  {
    5060  VmaPool m_hPool; // Null if belongs to general memory.
    5061  VmaDeviceMemoryBlock* m_Block;
    5062  VkDeviceSize m_Offset;
    5063  bool m_CanBecomeLost;
    5064  };
    5065 
    5066  // Allocation for an object that has its own private VkDeviceMemory.
    5067  struct DedicatedAllocation
    5068  {
    5069  uint32_t m_MemoryTypeIndex;
    5070  VkDeviceMemory m_hMemory;
    5071  void* m_pMappedData; // Not null means memory is mapped.
    5072  };
    5073 
    5074  union
    5075  {
    5076  // Allocation out of VmaDeviceMemoryBlock.
    5077  BlockAllocation m_BlockAllocation;
    5078  // Allocation for an object that has its own private VkDeviceMemory.
    5079  DedicatedAllocation m_DedicatedAllocation;
    5080  };
    5081 
    5082 #if VMA_STATS_STRING_ENABLED
    5083  uint32_t m_CreationFrameIndex;
    5084  uint32_t m_BufferImageUsage; // 0 if unknown.
    5085 #endif
    5086 
    5087  void FreeUserDataString(VmaAllocator hAllocator);
    5088 };
    5089 
    5090 /*
    5091 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
    5092 allocated memory block or free.
    5093 */
    5094 struct VmaSuballocation
    5095 {
    5096  VkDeviceSize offset;
    5097  VkDeviceSize size;
    5098  VmaAllocation hAllocation;
    5099  VmaSuballocationType type;
    5100 };
    5101 
    5102 // Comparator for offsets.
    5103 struct VmaSuballocationOffsetLess
    5104 {
    5105  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    5106  {
    5107  return lhs.offset < rhs.offset;
    5108  }
    5109 };
    5110 struct VmaSuballocationOffsetGreater
    5111 {
    5112  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    5113  {
    5114  return lhs.offset > rhs.offset;
    5115  }
    5116 };
    5117 
    5118 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
    5119 
    5120 // Cost of one additional allocation lost, as equivalent in bytes.
    5121 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
    5122 
    5123 /*
    5124 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
    5125 
    5126 If canMakeOtherLost was false:
    5127 - item points to a FREE suballocation.
    5128 - itemsToMakeLostCount is 0.
    5129 
    5130 If canMakeOtherLost was true:
    5131 - item points to first of sequence of suballocations, which are either FREE,
    5132  or point to VmaAllocations that can become lost.
    5133 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
    5134  the requested allocation to succeed.
    5135 */
    5136 struct VmaAllocationRequest
    5137 {
    5138  VkDeviceSize offset;
    5139  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
    5140  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
    5141  VmaSuballocationList::iterator item;
    5142  size_t itemsToMakeLostCount;
    5143  void* customData;
    5144 
    5145  VkDeviceSize CalcCost() const
    5146  {
    5147  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
    5148  }
    5149 };
    5150 
    5151 /*
    5152 Data structure used for bookkeeping of allocations and unused ranges of memory
    5153 in a single VkDeviceMemory block.
    5154 */
    5155 class VmaBlockMetadata
    5156 {
    5157 public:
    5158  VmaBlockMetadata(VmaAllocator hAllocator);
    5159  virtual ~VmaBlockMetadata() { }
    5160  virtual void Init(VkDeviceSize size) { m_Size = size; }
    5161 
    5162  // Validates all data structures inside this object. If not valid, returns false.
    5163  virtual bool Validate() const = 0;
    5164  VkDeviceSize GetSize() const { return m_Size; }
    5165  virtual size_t GetAllocationCount() const = 0;
    5166  virtual VkDeviceSize GetSumFreeSize() const = 0;
    5167  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
    5168  // Returns true if this block is empty - contains only single free suballocation.
    5169  virtual bool IsEmpty() const = 0;
    5170 
    5171  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
    5172  // Shouldn't modify blockCount.
    5173  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
    5174 
    5175 #if VMA_STATS_STRING_ENABLED
    5176  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
    5177 #endif
    5178 
    5179  // Tries to find a place for suballocation with given parameters inside this block.
    5180  // If succeeded, fills pAllocationRequest and returns true.
    5181  // If failed, returns false.
    5182  virtual bool CreateAllocationRequest(
    5183  uint32_t currentFrameIndex,
    5184  uint32_t frameInUseCount,
    5185  VkDeviceSize bufferImageGranularity,
    5186  VkDeviceSize allocSize,
    5187  VkDeviceSize allocAlignment,
    5188  bool upperAddress,
    5189  VmaSuballocationType allocType,
    5190  bool canMakeOtherLost,
    5191  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
    5192  uint32_t strategy,
    5193  VmaAllocationRequest* pAllocationRequest) = 0;
    5194 
    5195  virtual bool MakeRequestedAllocationsLost(
    5196  uint32_t currentFrameIndex,
    5197  uint32_t frameInUseCount,
    5198  VmaAllocationRequest* pAllocationRequest) = 0;
    5199 
    5200  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
    5201 
    5202  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
    5203 
    5204  // Makes actual allocation based on request. Request must already be checked and valid.
    5205  virtual void Alloc(
    5206  const VmaAllocationRequest& request,
    5207  VmaSuballocationType type,
    5208  VkDeviceSize allocSize,
    5209  bool upperAddress,
    5210  VmaAllocation hAllocation) = 0;
    5211 
    5212  // Frees suballocation assigned to given memory region.
    5213  virtual void Free(const VmaAllocation allocation) = 0;
    5214  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
    5215 
    5216  // Tries to resize (grow or shrink) space for given allocation, in place.
    5217  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
    5218 
    5219 protected:
    5220  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
    5221 
    5222 #if VMA_STATS_STRING_ENABLED
    5223  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
    5224  VkDeviceSize unusedBytes,
    5225  size_t allocationCount,
    5226  size_t unusedRangeCount) const;
    5227  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    5228  VkDeviceSize offset,
    5229  VmaAllocation hAllocation) const;
    5230  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    5231  VkDeviceSize offset,
    5232  VkDeviceSize size) const;
    5233  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
    5234 #endif
    5235 
    5236 private:
    5237  VkDeviceSize m_Size;
    5238  const VkAllocationCallbacks* m_pAllocationCallbacks;
    5239 };
    5240 
    5241 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
    5242  VMA_ASSERT(0 && "Validation failed: " #cond); \
    5243  return false; \
    5244  } } while(false)
    5245 
    5246 class VmaBlockMetadata_Generic : public VmaBlockMetadata
    5247 {
    5248  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
    5249 public:
    5250  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
    5251  virtual ~VmaBlockMetadata_Generic();
    5252  virtual void Init(VkDeviceSize size);
    5253 
    5254  virtual bool Validate() const;
    5255  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
    5256  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    5257  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5258  virtual bool IsEmpty() const;
    5259 
    5260  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5261  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5262 
    5263 #if VMA_STATS_STRING_ENABLED
    5264  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5265 #endif
    5266 
    5267  virtual bool CreateAllocationRequest(
    5268  uint32_t currentFrameIndex,
    5269  uint32_t frameInUseCount,
    5270  VkDeviceSize bufferImageGranularity,
    5271  VkDeviceSize allocSize,
    5272  VkDeviceSize allocAlignment,
    5273  bool upperAddress,
    5274  VmaSuballocationType allocType,
    5275  bool canMakeOtherLost,
    5276  uint32_t strategy,
    5277  VmaAllocationRequest* pAllocationRequest);
    5278 
    5279  virtual bool MakeRequestedAllocationsLost(
    5280  uint32_t currentFrameIndex,
    5281  uint32_t frameInUseCount,
    5282  VmaAllocationRequest* pAllocationRequest);
    5283 
    5284  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5285 
    5286  virtual VkResult CheckCorruption(const void* pBlockData);
    5287 
    5288  virtual void Alloc(
    5289  const VmaAllocationRequest& request,
    5290  VmaSuballocationType type,
    5291  VkDeviceSize allocSize,
    5292  bool upperAddress,
    5293  VmaAllocation hAllocation);
    5294 
    5295  virtual void Free(const VmaAllocation allocation);
    5296  virtual void FreeAtOffset(VkDeviceSize offset);
    5297 
    5298  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
    5299 
    5301  // For defragmentation
    5302 
    5303  bool IsBufferImageGranularityConflictPossible(
    5304  VkDeviceSize bufferImageGranularity,
    5305  VmaSuballocationType& inOutPrevSuballocType) const;
    5306 
    5307 private:
    5308  friend class VmaDefragmentationAlgorithm_Generic;
    5309  friend class VmaDefragmentationAlgorithm_Fast;
    5310 
    5311  uint32_t m_FreeCount;
    5312  VkDeviceSize m_SumFreeSize;
    5313  VmaSuballocationList m_Suballocations;
    5314  // Suballocations that are free and have size greater than certain threshold.
    5315  // Sorted by size, ascending.
    5316  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
    5317 
    5318  bool ValidateFreeSuballocationList() const;
    5319 
    5320  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
    5321  // If yes, fills pOffset and returns true. If no, returns false.
    5322  bool CheckAllocation(
    5323  uint32_t currentFrameIndex,
    5324  uint32_t frameInUseCount,
    5325  VkDeviceSize bufferImageGranularity,
    5326  VkDeviceSize allocSize,
    5327  VkDeviceSize allocAlignment,
    5328  VmaSuballocationType allocType,
    5329  VmaSuballocationList::const_iterator suballocItem,
    5330  bool canMakeOtherLost,
    5331  VkDeviceSize* pOffset,
    5332  size_t* itemsToMakeLostCount,
    5333  VkDeviceSize* pSumFreeSize,
    5334  VkDeviceSize* pSumItemSize) const;
    5335  // Given free suballocation, it merges it with following one, which must also be free.
    5336  void MergeFreeWithNext(VmaSuballocationList::iterator item);
    5337  // Releases given suballocation, making it free.
    5338  // Merges it with adjacent free suballocations if applicable.
    5339  // Returns iterator to new free suballocation at this place.
    5340  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
    5341  // Given free suballocation, it inserts it into sorted list of
    5342  // m_FreeSuballocationsBySize if it's suitable.
    5343  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
    5344  // Given free suballocation, it removes it from sorted list of
    5345  // m_FreeSuballocationsBySize if it's suitable.
    5346  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
    5347 };
    5348 
    5349 /*
    5350 Allocations and their references in internal data structure look like this:
    5351 
    5352 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
    5353 
    5354  0 +-------+
    5355  | |
    5356  | |
    5357  | |
    5358  +-------+
    5359  | Alloc | 1st[m_1stNullItemsBeginCount]
    5360  +-------+
    5361  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    5362  +-------+
    5363  | ... |
    5364  +-------+
    5365  | Alloc | 1st[1st.size() - 1]
    5366  +-------+
    5367  | |
    5368  | |
    5369  | |
    5370 GetSize() +-------+
    5371 
    5372 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
    5373 
    5374  0 +-------+
    5375  | Alloc | 2nd[0]
    5376  +-------+
    5377  | Alloc | 2nd[1]
    5378  +-------+
    5379  | ... |
    5380  +-------+
    5381  | Alloc | 2nd[2nd.size() - 1]
    5382  +-------+
    5383  | |
    5384  | |
    5385  | |
    5386  +-------+
    5387  | Alloc | 1st[m_1stNullItemsBeginCount]
    5388  +-------+
    5389  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    5390  +-------+
    5391  | ... |
    5392  +-------+
    5393  | Alloc | 1st[1st.size() - 1]
    5394  +-------+
    5395  | |
    5396 GetSize() +-------+
    5397 
    5398 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
    5399 
    5400  0 +-------+
    5401  | |
    5402  | |
    5403  | |
    5404  +-------+
    5405  | Alloc | 1st[m_1stNullItemsBeginCount]
    5406  +-------+
    5407  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    5408  +-------+
    5409  | ... |
    5410  +-------+
    5411  | Alloc | 1st[1st.size() - 1]
    5412  +-------+
    5413  | |
    5414  | |
    5415  | |
    5416  +-------+
    5417  | Alloc | 2nd[2nd.size() - 1]
    5418  +-------+
    5419  | ... |
    5420  +-------+
    5421  | Alloc | 2nd[1]
    5422  +-------+
    5423  | Alloc | 2nd[0]
    5424 GetSize() +-------+
    5425 
    5426 */
    5427 class VmaBlockMetadata_Linear : public VmaBlockMetadata
    5428 {
    5429  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
    5430 public:
    5431  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
    5432  virtual ~VmaBlockMetadata_Linear();
    5433  virtual void Init(VkDeviceSize size);
    5434 
    5435  virtual bool Validate() const;
    5436  virtual size_t GetAllocationCount() const;
    5437  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    5438  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5439  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
    5440 
    5441  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5442  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5443 
    5444 #if VMA_STATS_STRING_ENABLED
    5445  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5446 #endif
    5447 
    5448  virtual bool CreateAllocationRequest(
    5449  uint32_t currentFrameIndex,
    5450  uint32_t frameInUseCount,
    5451  VkDeviceSize bufferImageGranularity,
    5452  VkDeviceSize allocSize,
    5453  VkDeviceSize allocAlignment,
    5454  bool upperAddress,
    5455  VmaSuballocationType allocType,
    5456  bool canMakeOtherLost,
    5457  uint32_t strategy,
    5458  VmaAllocationRequest* pAllocationRequest);
    5459 
    5460  virtual bool MakeRequestedAllocationsLost(
    5461  uint32_t currentFrameIndex,
    5462  uint32_t frameInUseCount,
    5463  VmaAllocationRequest* pAllocationRequest);
    5464 
    5465  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5466 
    5467  virtual VkResult CheckCorruption(const void* pBlockData);
    5468 
    5469  virtual void Alloc(
    5470  const VmaAllocationRequest& request,
    5471  VmaSuballocationType type,
    5472  VkDeviceSize allocSize,
    5473  bool upperAddress,
    5474  VmaAllocation hAllocation);
    5475 
    5476  virtual void Free(const VmaAllocation allocation);
    5477  virtual void FreeAtOffset(VkDeviceSize offset);
    5478 
    5479 private:
    5480  /*
    5481  There are two suballocation vectors, used in ping-pong way.
    5482  The one with index m_1stVectorIndex is called 1st.
    5483  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
    5484  2nd can be non-empty only when 1st is not empty.
    5485  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
    5486  */
    5487  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
    5488 
    5489  enum SECOND_VECTOR_MODE
    5490  {
    5491  SECOND_VECTOR_EMPTY,
    5492  /*
    5493  Suballocations in 2nd vector are created later than the ones in 1st, but they
    5494  all have smaller offset.
    5495  */
    5496  SECOND_VECTOR_RING_BUFFER,
    5497  /*
    5498  Suballocations in 2nd vector are upper side of double stack.
    5499  They all have offsets higher than those in 1st vector.
    5500  Top of this stack means smaller offsets, but higher indices in this vector.
    5501  */
    5502  SECOND_VECTOR_DOUBLE_STACK,
    5503  };
    5504 
    5505  VkDeviceSize m_SumFreeSize;
    5506  SuballocationVectorType m_Suballocations0, m_Suballocations1;
    5507  uint32_t m_1stVectorIndex;
    5508  SECOND_VECTOR_MODE m_2ndVectorMode;
    5509 
    5510  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5511  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5512  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5513  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5514 
    5515  // Number of items in 1st vector with hAllocation = null at the beginning.
    5516  size_t m_1stNullItemsBeginCount;
    5517  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
    5518  size_t m_1stNullItemsMiddleCount;
    5519  // Number of items in 2nd vector with hAllocation = null.
    5520  size_t m_2ndNullItemsCount;
    5521 
    5522  bool ShouldCompact1st() const;
    5523  void CleanupAfterFree();
    5524 };
    5525 
    5526 /*
    5527 - GetSize() is the original size of allocated memory block.
    5528 - m_UsableSize is this size aligned down to a power of two.
    5529  All allocations and calculations happen relative to m_UsableSize.
    5530 - GetUnusableSize() is the difference between them.
    5531  It is repoted as separate, unused range, not available for allocations.
    5532 
    5533 Node at level 0 has size = m_UsableSize.
    5534 Each next level contains nodes with size 2 times smaller than current level.
    5535 m_LevelCount is the maximum number of levels to use in the current object.
    5536 */
    5537 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
    5538 {
    5539  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
    5540 public:
    5541  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
    5542  virtual ~VmaBlockMetadata_Buddy();
    5543  virtual void Init(VkDeviceSize size);
    5544 
    5545  virtual bool Validate() const;
    5546  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
    5547  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
    5548  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5549  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
    5550 
    5551  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5552  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5553 
    5554 #if VMA_STATS_STRING_ENABLED
    5555  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5556 #endif
    5557 
    5558  virtual bool CreateAllocationRequest(
    5559  uint32_t currentFrameIndex,
    5560  uint32_t frameInUseCount,
    5561  VkDeviceSize bufferImageGranularity,
    5562  VkDeviceSize allocSize,
    5563  VkDeviceSize allocAlignment,
    5564  bool upperAddress,
    5565  VmaSuballocationType allocType,
    5566  bool canMakeOtherLost,
    5567  uint32_t strategy,
    5568  VmaAllocationRequest* pAllocationRequest);
    5569 
    5570  virtual bool MakeRequestedAllocationsLost(
    5571  uint32_t currentFrameIndex,
    5572  uint32_t frameInUseCount,
    5573  VmaAllocationRequest* pAllocationRequest);
    5574 
    5575  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5576 
    5577  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
    5578 
    5579  virtual void Alloc(
    5580  const VmaAllocationRequest& request,
    5581  VmaSuballocationType type,
    5582  VkDeviceSize allocSize,
    5583  bool upperAddress,
    5584  VmaAllocation hAllocation);
    5585 
    5586  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
    5587  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
    5588 
    5589 private:
    5590  static const VkDeviceSize MIN_NODE_SIZE = 32;
    5591  static const size_t MAX_LEVELS = 30;
    5592 
    5593  struct ValidationContext
    5594  {
    5595  size_t calculatedAllocationCount;
    5596  size_t calculatedFreeCount;
    5597  VkDeviceSize calculatedSumFreeSize;
    5598 
    5599  ValidationContext() :
    5600  calculatedAllocationCount(0),
    5601  calculatedFreeCount(0),
    5602  calculatedSumFreeSize(0) { }
    5603  };
    5604 
    5605  struct Node
    5606  {
    5607  VkDeviceSize offset;
    5608  enum TYPE
    5609  {
    5610  TYPE_FREE,
    5611  TYPE_ALLOCATION,
    5612  TYPE_SPLIT,
    5613  TYPE_COUNT
    5614  } type;
    5615  Node* parent;
    5616  Node* buddy;
    5617 
    5618  union
    5619  {
    5620  struct
    5621  {
    5622  Node* prev;
    5623  Node* next;
    5624  } free;
    5625  struct
    5626  {
    5627  VmaAllocation alloc;
    5628  } allocation;
    5629  struct
    5630  {
    5631  Node* leftChild;
    5632  } split;
    5633  };
    5634  };
    5635 
    5636  // Size of the memory block aligned down to a power of two.
    5637  VkDeviceSize m_UsableSize;
    5638  uint32_t m_LevelCount;
    5639 
    5640  Node* m_Root;
    5641  struct {
    5642  Node* front;
    5643  Node* back;
    5644  } m_FreeList[MAX_LEVELS];
    5645  // Number of nodes in the tree with type == TYPE_ALLOCATION.
    5646  size_t m_AllocationCount;
    5647  // Number of nodes in the tree with type == TYPE_FREE.
    5648  size_t m_FreeCount;
    5649  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
    5650  VkDeviceSize m_SumFreeSize;
    5651 
    5652  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
    5653  void DeleteNode(Node* node);
    5654  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
    5655  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
    5656  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
    5657  // Alloc passed just for validation. Can be null.
    5658  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
    5659  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
    5660  // Adds node to the front of FreeList at given level.
    5661  // node->type must be FREE.
    5662  // node->free.prev, next can be undefined.
    5663  void AddToFreeListFront(uint32_t level, Node* node);
    5664  // Removes node from FreeList at given level.
    5665  // node->type must be FREE.
    5666  // node->free.prev, next stay untouched.
    5667  void RemoveFromFreeList(uint32_t level, Node* node);
    5668 
    5669 #if VMA_STATS_STRING_ENABLED
    5670  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
    5671 #endif
    5672 };
    5673 
    5674 /*
    5675 Represents a single block of device memory (`VkDeviceMemory`) with all the
    5676 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
    5677 
    5678 Thread-safety: This class must be externally synchronized.
    5679 */
    5680 class VmaDeviceMemoryBlock
    5681 {
    5682  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
    5683 public:
    5684  VmaBlockMetadata* m_pMetadata;
    5685 
    5686  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
    5687 
    5688  ~VmaDeviceMemoryBlock()
    5689  {
    5690  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
    5691  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    5692  }
    5693 
    5694  // Always call after construction.
    5695  void Init(
    5696  VmaAllocator hAllocator,
    5697  uint32_t newMemoryTypeIndex,
    5698  VkDeviceMemory newMemory,
    5699  VkDeviceSize newSize,
    5700  uint32_t id,
    5701  uint32_t algorithm);
    5702  // Always call before destruction.
    5703  void Destroy(VmaAllocator allocator);
    5704 
    5705  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
    5706  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5707  uint32_t GetId() const { return m_Id; }
    5708  void* GetMappedData() const { return m_pMappedData; }
    5709 
    5710  // Validates all data structures inside this object. If not valid, returns false.
    5711  bool Validate() const;
    5712 
    5713  VkResult CheckCorruption(VmaAllocator hAllocator);
    5714 
    5715  // ppData can be null.
    5716  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
    5717  void Unmap(VmaAllocator hAllocator, uint32_t count);
    5718 
    5719  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5720  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5721 
    5722  VkResult BindBufferMemory(
    5723  const VmaAllocator hAllocator,
    5724  const VmaAllocation hAllocation,
    5725  VkBuffer hBuffer);
    5726  VkResult BindImageMemory(
    5727  const VmaAllocator hAllocator,
    5728  const VmaAllocation hAllocation,
    5729  VkImage hImage);
    5730 
    5731 private:
    5732  uint32_t m_MemoryTypeIndex;
    5733  uint32_t m_Id;
    5734  VkDeviceMemory m_hMemory;
    5735 
    5736  /*
    5737  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
    5738  Also protects m_MapCount, m_pMappedData.
    5739  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
    5740  */
    5741  VMA_MUTEX m_Mutex;
    5742  uint32_t m_MapCount;
    5743  void* m_pMappedData;
    5744 };
    5745 
    5746 struct VmaPointerLess
    5747 {
    5748  bool operator()(const void* lhs, const void* rhs) const
    5749  {
    5750  return lhs < rhs;
    5751  }
    5752 };
    5753 
    5754 struct VmaDefragmentationMove
    5755 {
    5756  size_t srcBlockIndex;
    5757  size_t dstBlockIndex;
    5758  VkDeviceSize srcOffset;
    5759  VkDeviceSize dstOffset;
    5760  VkDeviceSize size;
    5761 };
    5762 
    5763 class VmaDefragmentationAlgorithm;
    5764 
    5765 /*
    5766 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
    5767 Vulkan memory type.
    5768 
    5769 Synchronized internally with a mutex.
    5770 */
    5771 struct VmaBlockVector
    5772 {
    5773  VMA_CLASS_NO_COPY(VmaBlockVector)
    5774 public:
    5775  VmaBlockVector(
    5776  VmaAllocator hAllocator,
    5777  uint32_t memoryTypeIndex,
    5778  VkDeviceSize preferredBlockSize,
    5779  size_t minBlockCount,
    5780  size_t maxBlockCount,
    5781  VkDeviceSize bufferImageGranularity,
    5782  uint32_t frameInUseCount,
    5783  bool isCustomPool,
    5784  bool explicitBlockSize,
    5785  uint32_t algorithm);
    5786  ~VmaBlockVector();
    5787 
    5788  VkResult CreateMinBlocks();
    5789 
    5790  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5791  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
    5792  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
    5793  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
    5794  uint32_t GetAlgorithm() const { return m_Algorithm; }
    5795 
    5796  void GetPoolStats(VmaPoolStats* pStats);
    5797 
    5798  bool IsEmpty() const { return m_Blocks.empty(); }
    5799  bool IsCorruptionDetectionEnabled() const;
    5800 
    5801  VkResult Allocate(
    5802  VmaPool hCurrentPool,
    5803  uint32_t currentFrameIndex,
    5804  VkDeviceSize size,
    5805  VkDeviceSize alignment,
    5806  const VmaAllocationCreateInfo& createInfo,
    5807  VmaSuballocationType suballocType,
    5808  size_t allocationCount,
    5809  VmaAllocation* pAllocations);
    5810 
    5811  void Free(
    5812  VmaAllocation hAllocation);
    5813 
    5814  // Adds statistics of this BlockVector to pStats.
    5815  void AddStats(VmaStats* pStats);
    5816 
    5817 #if VMA_STATS_STRING_ENABLED
    5818  void PrintDetailedMap(class VmaJsonWriter& json);
    5819 #endif
    5820 
    5821  void MakePoolAllocationsLost(
    5822  uint32_t currentFrameIndex,
    5823  size_t* pLostAllocationCount);
    5824  VkResult CheckCorruption();
    5825 
    5826  // Saves results in pCtx->res.
    5827  void Defragment(
    5828  class VmaBlockVectorDefragmentationContext* pCtx,
    5829  VmaDefragmentationStats* pStats,
    5830  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
    5831  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
    5832  VkCommandBuffer commandBuffer);
    5833  void DefragmentationEnd(
    5834  class VmaBlockVectorDefragmentationContext* pCtx,
    5835  VmaDefragmentationStats* pStats);
    5836 
    5838  // To be used only while the m_Mutex is locked. Used during defragmentation.
    5839 
    5840  size_t GetBlockCount() const { return m_Blocks.size(); }
    5841  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
    5842  size_t CalcAllocationCount() const;
    5843  bool IsBufferImageGranularityConflictPossible() const;
    5844 
    5845 private:
    5846  friend class VmaDefragmentationAlgorithm_Generic;
    5847 
    5848  const VmaAllocator m_hAllocator;
    5849  const uint32_t m_MemoryTypeIndex;
    5850  const VkDeviceSize m_PreferredBlockSize;
    5851  const size_t m_MinBlockCount;
    5852  const size_t m_MaxBlockCount;
    5853  const VkDeviceSize m_BufferImageGranularity;
    5854  const uint32_t m_FrameInUseCount;
    5855  const bool m_IsCustomPool;
    5856  const bool m_ExplicitBlockSize;
    5857  const uint32_t m_Algorithm;
    5858  /* There can be at most one allocation that is completely empty - a
    5859  hysteresis to avoid pessimistic case of alternating creation and destruction
    5860  of a VkDeviceMemory. */
    5861  bool m_HasEmptyBlock;
    5862  VMA_RW_MUTEX m_Mutex;
    5863  // Incrementally sorted by sumFreeSize, ascending.
    5864  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
    5865  uint32_t m_NextBlockId;
    5866 
    5867  VkDeviceSize CalcMaxBlockSize() const;
    5868 
    5869  // Finds and removes given block from vector.
    5870  void Remove(VmaDeviceMemoryBlock* pBlock);
    5871 
    5872  // Performs single step in sorting m_Blocks. They may not be fully sorted
    5873  // after this call.
    5874  void IncrementallySortBlocks();
    5875 
    5876  VkResult AllocatePage(
    5877  VmaPool hCurrentPool,
    5878  uint32_t currentFrameIndex,
    5879  VkDeviceSize size,
    5880  VkDeviceSize alignment,
    5881  const VmaAllocationCreateInfo& createInfo,
    5882  VmaSuballocationType suballocType,
    5883  VmaAllocation* pAllocation);
    5884 
    5885  // To be used only without CAN_MAKE_OTHER_LOST flag.
    5886  VkResult AllocateFromBlock(
    5887  VmaDeviceMemoryBlock* pBlock,
    5888  VmaPool hCurrentPool,
    5889  uint32_t currentFrameIndex,
    5890  VkDeviceSize size,
    5891  VkDeviceSize alignment,
    5892  VmaAllocationCreateFlags allocFlags,
    5893  void* pUserData,
    5894  VmaSuballocationType suballocType,
    5895  uint32_t strategy,
    5896  VmaAllocation* pAllocation);
    5897 
    5898  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
    5899 
    5900  // Saves result to pCtx->res.
    5901  void ApplyDefragmentationMovesCpu(
    5902  class VmaBlockVectorDefragmentationContext* pDefragCtx,
    5903  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
    5904  // Saves result to pCtx->res.
    5905  void ApplyDefragmentationMovesGpu(
    5906  class VmaBlockVectorDefragmentationContext* pDefragCtx,
    5907  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    5908  VkCommandBuffer commandBuffer);
    5909 
    5910  /*
    5911  Used during defragmentation. pDefragmentationStats is optional. It's in/out
    5912  - updated with new data.
    5913  */
    5914  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
    5915 };
    5916 
    5917 struct VmaPool_T
    5918 {
    5919  VMA_CLASS_NO_COPY(VmaPool_T)
    5920 public:
    5921  VmaBlockVector m_BlockVector;
    5922 
    5923  VmaPool_T(
    5924  VmaAllocator hAllocator,
    5925  const VmaPoolCreateInfo& createInfo,
    5926  VkDeviceSize preferredBlockSize);
    5927  ~VmaPool_T();
    5928 
    5929  uint32_t GetId() const { return m_Id; }
    5930  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
    5931 
    5932 #if VMA_STATS_STRING_ENABLED
    5933  //void PrintDetailedMap(class VmaStringBuilder& sb);
    5934 #endif
    5935 
    5936 private:
    5937  uint32_t m_Id;
    5938 };
    5939 
    5940 /*
    5941 Performs defragmentation:
    5942 
    5943 - Updates `pBlockVector->m_pMetadata`.
    5944 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
    5945 - Does not move actual data, only returns requested moves as `moves`.
    5946 */
    5947 class VmaDefragmentationAlgorithm
    5948 {
    5949  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
    5950 public:
    5951  VmaDefragmentationAlgorithm(
    5952  VmaAllocator hAllocator,
    5953  VmaBlockVector* pBlockVector,
    5954  uint32_t currentFrameIndex) :
    5955  m_hAllocator(hAllocator),
    5956  m_pBlockVector(pBlockVector),
    5957  m_CurrentFrameIndex(currentFrameIndex)
    5958  {
    5959  }
    5960  virtual ~VmaDefragmentationAlgorithm()
    5961  {
    5962  }
    5963 
    5964  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
    5965  virtual void AddAll() = 0;
    5966 
    5967  virtual VkResult Defragment(
    5968  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    5969  VkDeviceSize maxBytesToMove,
    5970  uint32_t maxAllocationsToMove) = 0;
    5971 
    5972  virtual VkDeviceSize GetBytesMoved() const = 0;
    5973  virtual uint32_t GetAllocationsMoved() const = 0;
    5974 
    5975 protected:
    5976  VmaAllocator const m_hAllocator;
    5977  VmaBlockVector* const m_pBlockVector;
    5978  const uint32_t m_CurrentFrameIndex;
    5979 
    5980  struct AllocationInfo
    5981  {
    5982  VmaAllocation m_hAllocation;
    5983  VkBool32* m_pChanged;
    5984 
    5985  AllocationInfo() :
    5986  m_hAllocation(VK_NULL_HANDLE),
    5987  m_pChanged(VMA_NULL)
    5988  {
    5989  }
    5990  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
    5991  m_hAllocation(hAlloc),
    5992  m_pChanged(pChanged)
    5993  {
    5994  }
    5995  };
    5996 };
    5997 
    5998 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
    5999 {
    6000  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
    6001 public:
    6002  VmaDefragmentationAlgorithm_Generic(
    6003  VmaAllocator hAllocator,
    6004  VmaBlockVector* pBlockVector,
    6005  uint32_t currentFrameIndex,
    6006  bool overlappingMoveSupported);
    6007  virtual ~VmaDefragmentationAlgorithm_Generic();
    6008 
    6009  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    6010  virtual void AddAll() { m_AllAllocations = true; }
    6011 
    6012  virtual VkResult Defragment(
    6013  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6014  VkDeviceSize maxBytesToMove,
    6015  uint32_t maxAllocationsToMove);
    6016 
    6017  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    6018  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    6019 
    6020 private:
    6021  uint32_t m_AllocationCount;
    6022  bool m_AllAllocations;
    6023 
    6024  VkDeviceSize m_BytesMoved;
    6025  uint32_t m_AllocationsMoved;
    6026 
    6027  struct AllocationInfoSizeGreater
    6028  {
    6029  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    6030  {
    6031  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
    6032  }
    6033  };
    6034 
    6035  struct AllocationInfoOffsetGreater
    6036  {
    6037  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    6038  {
    6039  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
    6040  }
    6041  };
    6042 
    6043  struct BlockInfo
    6044  {
    6045  size_t m_OriginalBlockIndex;
    6046  VmaDeviceMemoryBlock* m_pBlock;
    6047  bool m_HasNonMovableAllocations;
    6048  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    6049 
    6050  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
    6051  m_OriginalBlockIndex(SIZE_MAX),
    6052  m_pBlock(VMA_NULL),
    6053  m_HasNonMovableAllocations(true),
    6054  m_Allocations(pAllocationCallbacks)
    6055  {
    6056  }
    6057 
    6058  void CalcHasNonMovableAllocations()
    6059  {
    6060  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
    6061  const size_t defragmentAllocCount = m_Allocations.size();
    6062  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
    6063  }
    6064 
    6065  void SortAllocationsBySizeDescending()
    6066  {
    6067  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
    6068  }
    6069 
    6070  void SortAllocationsByOffsetDescending()
    6071  {
    6072  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
    6073  }
    6074  };
    6075 
    6076  struct BlockPointerLess
    6077  {
    6078  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
    6079  {
    6080  return pLhsBlockInfo->m_pBlock < pRhsBlock;
    6081  }
    6082  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    6083  {
    6084  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
    6085  }
    6086  };
    6087 
    6088  // 1. Blocks with some non-movable allocations go first.
    6089  // 2. Blocks with smaller sumFreeSize go first.
    6090  struct BlockInfoCompareMoveDestination
    6091  {
    6092  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    6093  {
    6094  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
    6095  {
    6096  return true;
    6097  }
    6098  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
    6099  {
    6100  return false;
    6101  }
    6102  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
    6103  {
    6104  return true;
    6105  }
    6106  return false;
    6107  }
    6108  };
    6109 
    6110  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
    6111  BlockInfoVector m_Blocks;
    6112 
    6113  VkResult DefragmentRound(
    6114  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6115  VkDeviceSize maxBytesToMove,
    6116  uint32_t maxAllocationsToMove);
    6117 
    6118  size_t CalcBlocksWithNonMovableCount() const;
    6119 
    6120  static bool MoveMakesSense(
    6121  size_t dstBlockIndex, VkDeviceSize dstOffset,
    6122  size_t srcBlockIndex, VkDeviceSize srcOffset);
    6123 };
    6124 
    6125 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
    6126 {
    6127  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
    6128 public:
    6129  VmaDefragmentationAlgorithm_Fast(
    6130  VmaAllocator hAllocator,
    6131  VmaBlockVector* pBlockVector,
    6132  uint32_t currentFrameIndex,
    6133  bool overlappingMoveSupported);
    6134  virtual ~VmaDefragmentationAlgorithm_Fast();
    6135 
    6136  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
    6137  virtual void AddAll() { m_AllAllocations = true; }
    6138 
    6139  virtual VkResult Defragment(
    6140  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6141  VkDeviceSize maxBytesToMove,
    6142  uint32_t maxAllocationsToMove);
    6143 
    6144  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    6145  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    6146 
    6147 private:
    6148  struct BlockInfo
    6149  {
    6150  size_t origBlockIndex;
    6151  };
    6152 
    6153  class FreeSpaceDatabase
    6154  {
    6155  public:
    6156  FreeSpaceDatabase()
    6157  {
    6158  FreeSpace s = {};
    6159  s.blockInfoIndex = SIZE_MAX;
    6160  for(size_t i = 0; i < MAX_COUNT; ++i)
    6161  {
    6162  m_FreeSpaces[i] = s;
    6163  }
    6164  }
    6165 
    6166  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
    6167  {
    6168  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6169  {
    6170  return;
    6171  }
    6172 
    6173  // Find first invalid or the smallest structure.
    6174  size_t bestIndex = SIZE_MAX;
    6175  for(size_t i = 0; i < MAX_COUNT; ++i)
    6176  {
    6177  // Empty structure.
    6178  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
    6179  {
    6180  bestIndex = i;
    6181  break;
    6182  }
    6183  if(m_FreeSpaces[i].size < size &&
    6184  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
    6185  {
    6186  bestIndex = i;
    6187  }
    6188  }
    6189 
    6190  if(bestIndex != SIZE_MAX)
    6191  {
    6192  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
    6193  m_FreeSpaces[bestIndex].offset = offset;
    6194  m_FreeSpaces[bestIndex].size = size;
    6195  }
    6196  }
    6197 
    6198  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
    6199  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
    6200  {
    6201  size_t bestIndex = SIZE_MAX;
    6202  VkDeviceSize bestFreeSpaceAfter = 0;
    6203  for(size_t i = 0; i < MAX_COUNT; ++i)
    6204  {
    6205  // Structure is valid.
    6206  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
    6207  {
    6208  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
    6209  // Allocation fits into this structure.
    6210  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
    6211  {
    6212  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
    6213  (dstOffset + size);
    6214  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
    6215  {
    6216  bestIndex = i;
    6217  bestFreeSpaceAfter = freeSpaceAfter;
    6218  }
    6219  }
    6220  }
    6221  }
    6222 
    6223  if(bestIndex != SIZE_MAX)
    6224  {
    6225  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
    6226  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
    6227 
    6228  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6229  {
    6230  // Leave this structure for remaining empty space.
    6231  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
    6232  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
    6233  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
    6234  }
    6235  else
    6236  {
    6237  // This structure becomes invalid.
    6238  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
    6239  }
    6240 
    6241  return true;
    6242  }
    6243 
    6244  return false;
    6245  }
    6246 
    6247  private:
    6248  static const size_t MAX_COUNT = 4;
    6249 
    6250  struct FreeSpace
    6251  {
    6252  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
    6253  VkDeviceSize offset;
    6254  VkDeviceSize size;
    6255  } m_FreeSpaces[MAX_COUNT];
    6256  };
    6257 
    6258  const bool m_OverlappingMoveSupported;
    6259 
    6260  uint32_t m_AllocationCount;
    6261  bool m_AllAllocations;
    6262 
    6263  VkDeviceSize m_BytesMoved;
    6264  uint32_t m_AllocationsMoved;
    6265 
    6266  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
    6267 
    6268  void PreprocessMetadata();
    6269  void PostprocessMetadata();
    6270  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
    6271 };
    6272 
    6273 struct VmaBlockDefragmentationContext
    6274 {
    6275 private:
    6276  VMA_CLASS_NO_COPY(VmaBlockDefragmentationContext)
    6277 public:
    6278  enum BLOCK_FLAG
    6279  {
    6280  BLOCK_FLAG_USED = 0x00000001,
    6281  };
    6282  uint32_t flags;
    6283  VkBuffer hBuffer;
    6284 
    6285  VmaBlockDefragmentationContext() :
    6286  flags(0),
    6287  hBuffer(VK_NULL_HANDLE)
    6288  {
    6289  }
    6290 };
    6291 
    6292 class VmaBlockVectorDefragmentationContext
    6293 {
    6294  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
    6295 public:
    6296  VkResult res;
    6297  bool mutexLocked;
    6298  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
    6299 
    6300  VmaBlockVectorDefragmentationContext(
    6301  VmaAllocator hAllocator,
    6302  VmaPool hCustomPool, // Optional.
    6303  VmaBlockVector* pBlockVector,
    6304  uint32_t currFrameIndex,
    6305  uint32_t flags);
    6306  ~VmaBlockVectorDefragmentationContext();
    6307 
    6308  VmaPool GetCustomPool() const { return m_hCustomPool; }
    6309  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
    6310  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
    6311 
    6312  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    6313  void AddAll() { m_AllAllocations = true; }
    6314 
    6315  void Begin(bool overlappingMoveSupported);
    6316 
    6317 private:
    6318  const VmaAllocator m_hAllocator;
    6319  // Null if not from custom pool.
    6320  const VmaPool m_hCustomPool;
    6321  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
    6322  VmaBlockVector* const m_pBlockVector;
    6323  const uint32_t m_CurrFrameIndex;
    6324  const uint32_t m_AlgorithmFlags;
    6325  // Owner of this object.
    6326  VmaDefragmentationAlgorithm* m_pAlgorithm;
    6327 
    6328  struct AllocInfo
    6329  {
    6330  VmaAllocation hAlloc;
    6331  VkBool32* pChanged;
    6332  };
    6333  // Used between constructor and Begin.
    6334  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
    6335  bool m_AllAllocations;
    6336 };
    6337 
    6338 struct VmaDefragmentationContext_T
    6339 {
    6340 private:
    6341  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
    6342 public:
    6343  VmaDefragmentationContext_T(
    6344  VmaAllocator hAllocator,
    6345  uint32_t currFrameIndex,
    6346  uint32_t flags,
    6347  VmaDefragmentationStats* pStats);
    6348  ~VmaDefragmentationContext_T();
    6349 
    6350  void AddPools(uint32_t poolCount, VmaPool* pPools);
    6351  void AddAllocations(
    6352  uint32_t allocationCount,
    6353  VmaAllocation* pAllocations,
    6354  VkBool32* pAllocationsChanged);
    6355 
    6356  /*
    6357  Returns:
    6358  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
    6359  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
    6360  - Negative value if error occured and object can be destroyed immediately.
    6361  */
    6362  VkResult Defragment(
    6363  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
    6364  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
    6365  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats);
    6366 
    6367 private:
    6368  const VmaAllocator m_hAllocator;
    6369  const uint32_t m_CurrFrameIndex;
    6370  const uint32_t m_Flags;
    6371  VmaDefragmentationStats* const m_pStats;
    6372  // Owner of these objects.
    6373  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
    6374  // Owner of these objects.
    6375  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
    6376 };
    6377 
    6378 #if VMA_RECORDING_ENABLED
    6379 
    6380 class VmaRecorder
    6381 {
    6382 public:
    6383  VmaRecorder();
    6384  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
    6385  void WriteConfiguration(
    6386  const VkPhysicalDeviceProperties& devProps,
    6387  const VkPhysicalDeviceMemoryProperties& memProps,
    6388  bool dedicatedAllocationExtensionEnabled);
    6389  ~VmaRecorder();
    6390 
    6391  void RecordCreateAllocator(uint32_t frameIndex);
    6392  void RecordDestroyAllocator(uint32_t frameIndex);
    6393  void RecordCreatePool(uint32_t frameIndex,
    6394  const VmaPoolCreateInfo& createInfo,
    6395  VmaPool pool);
    6396  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
    6397  void RecordAllocateMemory(uint32_t frameIndex,
    6398  const VkMemoryRequirements& vkMemReq,
    6399  const VmaAllocationCreateInfo& createInfo,
    6400  VmaAllocation allocation);
    6401  void RecordAllocateMemoryPages(uint32_t frameIndex,
    6402  const VkMemoryRequirements& vkMemReq,
    6403  const VmaAllocationCreateInfo& createInfo,
    6404  uint64_t allocationCount,
    6405  const VmaAllocation* pAllocations);
    6406  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    6407  const VkMemoryRequirements& vkMemReq,
    6408  bool requiresDedicatedAllocation,
    6409  bool prefersDedicatedAllocation,
    6410  const VmaAllocationCreateInfo& createInfo,
    6411  VmaAllocation allocation);
    6412  void RecordAllocateMemoryForImage(uint32_t frameIndex,
    6413  const VkMemoryRequirements& vkMemReq,
    6414  bool requiresDedicatedAllocation,
    6415  bool prefersDedicatedAllocation,
    6416  const VmaAllocationCreateInfo& createInfo,
    6417  VmaAllocation allocation);
    6418  void RecordFreeMemory(uint32_t frameIndex,
    6419  VmaAllocation allocation);
    6420  void RecordFreeMemoryPages(uint32_t frameIndex,
    6421  uint64_t allocationCount,
    6422  const VmaAllocation* pAllocations);
    6423  void RecordResizeAllocation(
    6424  uint32_t frameIndex,
    6425  VmaAllocation allocation,
    6426  VkDeviceSize newSize);
    6427  void RecordSetAllocationUserData(uint32_t frameIndex,
    6428  VmaAllocation allocation,
    6429  const void* pUserData);
    6430  void RecordCreateLostAllocation(uint32_t frameIndex,
    6431  VmaAllocation allocation);
    6432  void RecordMapMemory(uint32_t frameIndex,
    6433  VmaAllocation allocation);
    6434  void RecordUnmapMemory(uint32_t frameIndex,
    6435  VmaAllocation allocation);
    6436  void RecordFlushAllocation(uint32_t frameIndex,
    6437  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    6438  void RecordInvalidateAllocation(uint32_t frameIndex,
    6439  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    6440  void RecordCreateBuffer(uint32_t frameIndex,
    6441  const VkBufferCreateInfo& bufCreateInfo,
    6442  const VmaAllocationCreateInfo& allocCreateInfo,
    6443  VmaAllocation allocation);
    6444  void RecordCreateImage(uint32_t frameIndex,
    6445  const VkImageCreateInfo& imageCreateInfo,
    6446  const VmaAllocationCreateInfo& allocCreateInfo,
    6447  VmaAllocation allocation);
    6448  void RecordDestroyBuffer(uint32_t frameIndex,
    6449  VmaAllocation allocation);
    6450  void RecordDestroyImage(uint32_t frameIndex,
    6451  VmaAllocation allocation);
    6452  void RecordTouchAllocation(uint32_t frameIndex,
    6453  VmaAllocation allocation);
    6454  void RecordGetAllocationInfo(uint32_t frameIndex,
    6455  VmaAllocation allocation);
    6456  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
    6457  VmaPool pool);
    6458  void RecordDefragmentationBegin(uint32_t frameIndex,
    6459  const VmaDefragmentationInfo2& info,
    6461  void RecordDefragmentationEnd(uint32_t frameIndex,
    6463 
    6464 private:
    6465  struct CallParams
    6466  {
    6467  uint32_t threadId;
    6468  double time;
    6469  };
    6470 
    6471  class UserDataString
    6472  {
    6473  public:
    6474  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
    6475  const char* GetString() const { return m_Str; }
    6476 
    6477  private:
    6478  char m_PtrStr[17];
    6479  const char* m_Str;
    6480  };
    6481 
    6482  bool m_UseMutex;
    6483  VmaRecordFlags m_Flags;
    6484  FILE* m_File;
    6485  VMA_MUTEX m_FileMutex;
    6486  int64_t m_Freq;
    6487  int64_t m_StartCounter;
    6488 
    6489  void GetBasicParams(CallParams& outParams);
    6490 
    6491  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
    6492  template<typename T>
    6493  void PrintPointerList(uint64_t count, const T* pItems)
    6494  {
    6495  if(count)
    6496  {
    6497  fprintf(m_File, "%p", pItems[0]);
    6498  for(uint64_t i = 1; i < count; ++i)
    6499  {
    6500  fprintf(m_File, " %p", pItems[i]);
    6501  }
    6502  }
    6503  }
    6504 
    6505  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
    6506  void Flush();
    6507 };
    6508 
    6509 #endif // #if VMA_RECORDING_ENABLED
    6510 
    6511 // Main allocator object.
    6512 struct VmaAllocator_T
    6513 {
    6514  VMA_CLASS_NO_COPY(VmaAllocator_T)
    6515 public:
    6516  bool m_UseMutex;
    6517  bool m_UseKhrDedicatedAllocation;
    6518  VkDevice m_hDevice;
    6519  bool m_AllocationCallbacksSpecified;
    6520  VkAllocationCallbacks m_AllocationCallbacks;
    6521  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
    6522 
    6523  // Number of bytes free out of limit, or VK_WHOLE_SIZE if no limit for that heap.
    6524  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
    6525  VMA_MUTEX m_HeapSizeLimitMutex;
    6526 
    6527  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
    6528  VkPhysicalDeviceMemoryProperties m_MemProps;
    6529 
    6530  // Default pools.
    6531  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
    6532 
    6533  // Each vector is sorted by memory (handle value).
    6534  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
    6535  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
    6536  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
    6537 
    6538  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
    6539  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
    6540  ~VmaAllocator_T();
    6541 
    6542  const VkAllocationCallbacks* GetAllocationCallbacks() const
    6543  {
    6544  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
    6545  }
    6546  const VmaVulkanFunctions& GetVulkanFunctions() const
    6547  {
    6548  return m_VulkanFunctions;
    6549  }
    6550 
    6551  VkDeviceSize GetBufferImageGranularity() const
    6552  {
    6553  return VMA_MAX(
    6554  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
    6555  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
    6556  }
    6557 
    6558  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
    6559  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
    6560 
    6561  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
    6562  {
    6563  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
    6564  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
    6565  }
    6566  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
    6567  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
    6568  {
    6569  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
    6570  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    6571  }
    6572  // Minimum alignment for all allocations in specific memory type.
    6573  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
    6574  {
    6575  return IsMemoryTypeNonCoherent(memTypeIndex) ?
    6576  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
    6577  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
    6578  }
    6579 
    6580  bool IsIntegratedGpu() const
    6581  {
    6582  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
    6583  }
    6584 
    6585 #if VMA_RECORDING_ENABLED
    6586  VmaRecorder* GetRecorder() const { return m_pRecorder; }
    6587 #endif
    6588 
    6589  void GetBufferMemoryRequirements(
    6590  VkBuffer hBuffer,
    6591  VkMemoryRequirements& memReq,
    6592  bool& requiresDedicatedAllocation,
    6593  bool& prefersDedicatedAllocation) const;
    6594  void GetImageMemoryRequirements(
    6595  VkImage hImage,
    6596  VkMemoryRequirements& memReq,
    6597  bool& requiresDedicatedAllocation,
    6598  bool& prefersDedicatedAllocation) const;
    6599 
    6600  // Main allocation function.
    6601  VkResult AllocateMemory(
    6602  const VkMemoryRequirements& vkMemReq,
    6603  bool requiresDedicatedAllocation,
    6604  bool prefersDedicatedAllocation,
    6605  VkBuffer dedicatedBuffer,
    6606  VkImage dedicatedImage,
    6607  const VmaAllocationCreateInfo& createInfo,
    6608  VmaSuballocationType suballocType,
    6609  size_t allocationCount,
    6610  VmaAllocation* pAllocations);
    6611 
    6612  // Main deallocation function.
    6613  void FreeMemory(
    6614  size_t allocationCount,
    6615  const VmaAllocation* pAllocations);
    6616 
    6617  VkResult ResizeAllocation(
    6618  const VmaAllocation alloc,
    6619  VkDeviceSize newSize);
    6620 
    6621  void CalculateStats(VmaStats* pStats);
    6622 
    6623 #if VMA_STATS_STRING_ENABLED
    6624  void PrintDetailedMap(class VmaJsonWriter& json);
    6625 #endif
    6626 
    6627  VkResult DefragmentationBegin(
    6628  const VmaDefragmentationInfo2& info,
    6629  VmaDefragmentationStats* pStats,
    6630  VmaDefragmentationContext* pContext);
    6631  VkResult DefragmentationEnd(
    6632  VmaDefragmentationContext context);
    6633 
    6634  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
    6635  bool TouchAllocation(VmaAllocation hAllocation);
    6636 
    6637  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
    6638  void DestroyPool(VmaPool pool);
    6639  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
    6640 
    6641  void SetCurrentFrameIndex(uint32_t frameIndex);
    6642  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
    6643 
    6644  void MakePoolAllocationsLost(
    6645  VmaPool hPool,
    6646  size_t* pLostAllocationCount);
    6647  VkResult CheckPoolCorruption(VmaPool hPool);
    6648  VkResult CheckCorruption(uint32_t memoryTypeBits);
    6649 
    6650  void CreateLostAllocation(VmaAllocation* pAllocation);
    6651 
    6652  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
    6653  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
    6654 
    6655  VkResult Map(VmaAllocation hAllocation, void** ppData);
    6656  void Unmap(VmaAllocation hAllocation);
    6657 
    6658  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
    6659  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
    6660 
    6661  void FlushOrInvalidateAllocation(
    6662  VmaAllocation hAllocation,
    6663  VkDeviceSize offset, VkDeviceSize size,
    6664  VMA_CACHE_OPERATION op);
    6665 
    6666  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
    6667 
    6668 private:
    6669  VkDeviceSize m_PreferredLargeHeapBlockSize;
    6670 
    6671  VkPhysicalDevice m_PhysicalDevice;
    6672  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
    6673 
    6674  VMA_RW_MUTEX m_PoolsMutex;
    6675  // Protected by m_PoolsMutex. Sorted by pointer value.
    6676  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
    6677  uint32_t m_NextPoolId;
    6678 
    6679  VmaVulkanFunctions m_VulkanFunctions;
    6680 
    6681 #if VMA_RECORDING_ENABLED
    6682  VmaRecorder* m_pRecorder;
    6683 #endif
    6684 
    6685  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
    6686 
    6687  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
    6688 
    6689  VkResult AllocateMemoryOfType(
    6690  VkDeviceSize size,
    6691  VkDeviceSize alignment,
    6692  bool dedicatedAllocation,
    6693  VkBuffer dedicatedBuffer,
    6694  VkImage dedicatedImage,
    6695  const VmaAllocationCreateInfo& createInfo,
    6696  uint32_t memTypeIndex,
    6697  VmaSuballocationType suballocType,
    6698  size_t allocationCount,
    6699  VmaAllocation* pAllocations);
    6700 
    6701  // Helper function only to be used inside AllocateDedicatedMemory.
    6702  VkResult AllocateDedicatedMemoryPage(
    6703  VkDeviceSize size,
    6704  VmaSuballocationType suballocType,
    6705  uint32_t memTypeIndex,
    6706  const VkMemoryAllocateInfo& allocInfo,
    6707  bool map,
    6708  bool isUserDataString,
    6709  void* pUserData,
    6710  VmaAllocation* pAllocation);
    6711 
    6712  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
    6713  VkResult AllocateDedicatedMemory(
    6714  VkDeviceSize size,
    6715  VmaSuballocationType suballocType,
    6716  uint32_t memTypeIndex,
    6717  bool map,
    6718  bool isUserDataString,
    6719  void* pUserData,
    6720  VkBuffer dedicatedBuffer,
    6721  VkImage dedicatedImage,
    6722  size_t allocationCount,
    6723  VmaAllocation* pAllocations);
    6724 
    6725  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
    6726  void FreeDedicatedMemory(VmaAllocation allocation);
    6727 };
    6728 
    6730 // Memory allocation #2 after VmaAllocator_T definition
    6731 
    6732 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
    6733 {
    6734  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
    6735 }
    6736 
    6737 static void VmaFree(VmaAllocator hAllocator, void* ptr)
    6738 {
    6739  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
    6740 }
    6741 
    6742 template<typename T>
    6743 static T* VmaAllocate(VmaAllocator hAllocator)
    6744 {
    6745  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
    6746 }
    6747 
    6748 template<typename T>
    6749 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
    6750 {
    6751  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
    6752 }
    6753 
    6754 template<typename T>
    6755 static void vma_delete(VmaAllocator hAllocator, T* ptr)
    6756 {
    6757  if(ptr != VMA_NULL)
    6758  {
    6759  ptr->~T();
    6760  VmaFree(hAllocator, ptr);
    6761  }
    6762 }
    6763 
    6764 template<typename T>
    6765 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
    6766 {
    6767  if(ptr != VMA_NULL)
    6768  {
    6769  for(size_t i = count; i--; )
    6770  ptr[i].~T();
    6771  VmaFree(hAllocator, ptr);
    6772  }
    6773 }
    6774 
    6776 // VmaStringBuilder
    6777 
    6778 #if VMA_STATS_STRING_ENABLED
    6779 
    6780 class VmaStringBuilder
    6781 {
    6782 public:
    6783  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
    6784  size_t GetLength() const { return m_Data.size(); }
    6785  const char* GetData() const { return m_Data.data(); }
    6786 
    6787  void Add(char ch) { m_Data.push_back(ch); }
    6788  void Add(const char* pStr);
    6789  void AddNewLine() { Add('\n'); }
    6790  void AddNumber(uint32_t num);
    6791  void AddNumber(uint64_t num);
    6792  void AddPointer(const void* ptr);
    6793 
    6794 private:
    6795  VmaVector< char, VmaStlAllocator<char> > m_Data;
    6796 };
    6797 
    6798 void VmaStringBuilder::Add(const char* pStr)
    6799 {
    6800  const size_t strLen = strlen(pStr);
    6801  if(strLen > 0)
    6802  {
    6803  const size_t oldCount = m_Data.size();
    6804  m_Data.resize(oldCount + strLen);
    6805  memcpy(m_Data.data() + oldCount, pStr, strLen);
    6806  }
    6807 }
    6808 
    6809 void VmaStringBuilder::AddNumber(uint32_t num)
    6810 {
    6811  char buf[11];
    6812  VmaUint32ToStr(buf, sizeof(buf), num);
    6813  Add(buf);
    6814 }
    6815 
    6816 void VmaStringBuilder::AddNumber(uint64_t num)
    6817 {
    6818  char buf[21];
    6819  VmaUint64ToStr(buf, sizeof(buf), num);
    6820  Add(buf);
    6821 }
    6822 
    6823 void VmaStringBuilder::AddPointer(const void* ptr)
    6824 {
    6825  char buf[21];
    6826  VmaPtrToStr(buf, sizeof(buf), ptr);
    6827  Add(buf);
    6828 }
    6829 
    6830 #endif // #if VMA_STATS_STRING_ENABLED
    6831 
    6833 // VmaJsonWriter
    6834 
    6835 #if VMA_STATS_STRING_ENABLED
    6836 
    6837 class VmaJsonWriter
    6838 {
    6839  VMA_CLASS_NO_COPY(VmaJsonWriter)
    6840 public:
    6841  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
    6842  ~VmaJsonWriter();
    6843 
    6844  void BeginObject(bool singleLine = false);
    6845  void EndObject();
    6846 
    6847  void BeginArray(bool singleLine = false);
    6848  void EndArray();
    6849 
    6850  void WriteString(const char* pStr);
    6851  void BeginString(const char* pStr = VMA_NULL);
    6852  void ContinueString(const char* pStr);
    6853  void ContinueString(uint32_t n);
    6854  void ContinueString(uint64_t n);
    6855  void ContinueString_Pointer(const void* ptr);
    6856  void EndString(const char* pStr = VMA_NULL);
    6857 
    6858  void WriteNumber(uint32_t n);
    6859  void WriteNumber(uint64_t n);
    6860  void WriteBool(bool b);
    6861  void WriteNull();
    6862 
    6863 private:
    6864  static const char* const INDENT;
    6865 
    6866  enum COLLECTION_TYPE
    6867  {
    6868  COLLECTION_TYPE_OBJECT,
    6869  COLLECTION_TYPE_ARRAY,
    6870  };
    6871  struct StackItem
    6872  {
    6873  COLLECTION_TYPE type;
    6874  uint32_t valueCount;
    6875  bool singleLineMode;
    6876  };
    6877 
    6878  VmaStringBuilder& m_SB;
    6879  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
    6880  bool m_InsideString;
    6881 
    6882  void BeginValue(bool isString);
    6883  void WriteIndent(bool oneLess = false);
    6884 };
    6885 
    6886 const char* const VmaJsonWriter::INDENT = " ";
    6887 
    6888 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
    6889  m_SB(sb),
    6890  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
    6891  m_InsideString(false)
    6892 {
    6893 }
    6894 
    6895 VmaJsonWriter::~VmaJsonWriter()
    6896 {
    6897  VMA_ASSERT(!m_InsideString);
    6898  VMA_ASSERT(m_Stack.empty());
    6899 }
    6900 
    6901 void VmaJsonWriter::BeginObject(bool singleLine)
    6902 {
    6903  VMA_ASSERT(!m_InsideString);
    6904 
    6905  BeginValue(false);
    6906  m_SB.Add('{');
    6907 
    6908  StackItem item;
    6909  item.type = COLLECTION_TYPE_OBJECT;
    6910  item.valueCount = 0;
    6911  item.singleLineMode = singleLine;
    6912  m_Stack.push_back(item);
    6913 }
    6914 
    6915 void VmaJsonWriter::EndObject()
    6916 {
    6917  VMA_ASSERT(!m_InsideString);
    6918 
    6919  WriteIndent(true);
    6920  m_SB.Add('}');
    6921 
    6922  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
    6923  m_Stack.pop_back();
    6924 }
    6925 
    6926 void VmaJsonWriter::BeginArray(bool singleLine)
    6927 {
    6928  VMA_ASSERT(!m_InsideString);
    6929 
    6930  BeginValue(false);
    6931  m_SB.Add('[');
    6932 
    6933  StackItem item;
    6934  item.type = COLLECTION_TYPE_ARRAY;
    6935  item.valueCount = 0;
    6936  item.singleLineMode = singleLine;
    6937  m_Stack.push_back(item);
    6938 }
    6939 
    6940 void VmaJsonWriter::EndArray()
    6941 {
    6942  VMA_ASSERT(!m_InsideString);
    6943 
    6944  WriteIndent(true);
    6945  m_SB.Add(']');
    6946 
    6947  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
    6948  m_Stack.pop_back();
    6949 }
    6950 
    6951 void VmaJsonWriter::WriteString(const char* pStr)
    6952 {
    6953  BeginString(pStr);
    6954  EndString();
    6955 }
    6956 
    6957 void VmaJsonWriter::BeginString(const char* pStr)
    6958 {
    6959  VMA_ASSERT(!m_InsideString);
    6960 
    6961  BeginValue(true);
    6962  m_SB.Add('"');
    6963  m_InsideString = true;
    6964  if(pStr != VMA_NULL && pStr[0] != '\0')
    6965  {
    6966  ContinueString(pStr);
    6967  }
    6968 }
    6969 
    6970 void VmaJsonWriter::ContinueString(const char* pStr)
    6971 {
    6972  VMA_ASSERT(m_InsideString);
    6973 
    6974  const size_t strLen = strlen(pStr);
    6975  for(size_t i = 0; i < strLen; ++i)
    6976  {
    6977  char ch = pStr[i];
    6978  if(ch == '\\')
    6979  {
    6980  m_SB.Add("\\\\");
    6981  }
    6982  else if(ch == '"')
    6983  {
    6984  m_SB.Add("\\\"");
    6985  }
    6986  else if(ch >= 32)
    6987  {
    6988  m_SB.Add(ch);
    6989  }
    6990  else switch(ch)
    6991  {
    6992  case '\b':
    6993  m_SB.Add("\\b");
    6994  break;
    6995  case '\f':
    6996  m_SB.Add("\\f");
    6997  break;
    6998  case '\n':
    6999  m_SB.Add("\\n");
    7000  break;
    7001  case '\r':
    7002  m_SB.Add("\\r");
    7003  break;
    7004  case '\t':
    7005  m_SB.Add("\\t");
    7006  break;
    7007  default:
    7008  VMA_ASSERT(0 && "Character not currently supported.");
    7009  break;
    7010  }
    7011  }
    7012 }
    7013 
    7014 void VmaJsonWriter::ContinueString(uint32_t n)
    7015 {
    7016  VMA_ASSERT(m_InsideString);
    7017  m_SB.AddNumber(n);
    7018 }
    7019 
    7020 void VmaJsonWriter::ContinueString(uint64_t n)
    7021 {
    7022  VMA_ASSERT(m_InsideString);
    7023  m_SB.AddNumber(n);
    7024 }
    7025 
    7026 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
    7027 {
    7028  VMA_ASSERT(m_InsideString);
    7029  m_SB.AddPointer(ptr);
    7030 }
    7031 
    7032 void VmaJsonWriter::EndString(const char* pStr)
    7033 {
    7034  VMA_ASSERT(m_InsideString);
    7035  if(pStr != VMA_NULL && pStr[0] != '\0')
    7036  {
    7037  ContinueString(pStr);
    7038  }
    7039  m_SB.Add('"');
    7040  m_InsideString = false;
    7041 }
    7042 
    7043 void VmaJsonWriter::WriteNumber(uint32_t n)
    7044 {
    7045  VMA_ASSERT(!m_InsideString);
    7046  BeginValue(false);
    7047  m_SB.AddNumber(n);
    7048 }
    7049 
    7050 void VmaJsonWriter::WriteNumber(uint64_t n)
    7051 {
    7052  VMA_ASSERT(!m_InsideString);
    7053  BeginValue(false);
    7054  m_SB.AddNumber(n);
    7055 }
    7056 
    7057 void VmaJsonWriter::WriteBool(bool b)
    7058 {
    7059  VMA_ASSERT(!m_InsideString);
    7060  BeginValue(false);
    7061  m_SB.Add(b ? "true" : "false");
    7062 }
    7063 
    7064 void VmaJsonWriter::WriteNull()
    7065 {
    7066  VMA_ASSERT(!m_InsideString);
    7067  BeginValue(false);
    7068  m_SB.Add("null");
    7069 }
    7070 
    7071 void VmaJsonWriter::BeginValue(bool isString)
    7072 {
    7073  if(!m_Stack.empty())
    7074  {
    7075  StackItem& currItem = m_Stack.back();
    7076  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    7077  currItem.valueCount % 2 == 0)
    7078  {
    7079  VMA_ASSERT(isString);
    7080  }
    7081 
    7082  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    7083  currItem.valueCount % 2 != 0)
    7084  {
    7085  m_SB.Add(": ");
    7086  }
    7087  else if(currItem.valueCount > 0)
    7088  {
    7089  m_SB.Add(", ");
    7090  WriteIndent();
    7091  }
    7092  else
    7093  {
    7094  WriteIndent();
    7095  }
    7096  ++currItem.valueCount;
    7097  }
    7098 }
    7099 
    7100 void VmaJsonWriter::WriteIndent(bool oneLess)
    7101 {
    7102  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
    7103  {
    7104  m_SB.AddNewLine();
    7105 
    7106  size_t count = m_Stack.size();
    7107  if(count > 0 && oneLess)
    7108  {
    7109  --count;
    7110  }
    7111  for(size_t i = 0; i < count; ++i)
    7112  {
    7113  m_SB.Add(INDENT);
    7114  }
    7115  }
    7116 }
    7117 
    7118 #endif // #if VMA_STATS_STRING_ENABLED
    7119 
    7121 
    7122 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
    7123 {
    7124  if(IsUserDataString())
    7125  {
    7126  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
    7127 
    7128  FreeUserDataString(hAllocator);
    7129 
    7130  if(pUserData != VMA_NULL)
    7131  {
    7132  const char* const newStrSrc = (char*)pUserData;
    7133  const size_t newStrLen = strlen(newStrSrc);
    7134  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
    7135  memcpy(newStrDst, newStrSrc, newStrLen + 1);
    7136  m_pUserData = newStrDst;
    7137  }
    7138  }
    7139  else
    7140  {
    7141  m_pUserData = pUserData;
    7142  }
    7143 }
    7144 
    7145 void VmaAllocation_T::ChangeBlockAllocation(
    7146  VmaAllocator hAllocator,
    7147  VmaDeviceMemoryBlock* block,
    7148  VkDeviceSize offset)
    7149 {
    7150  VMA_ASSERT(block != VMA_NULL);
    7151  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    7152 
    7153  // Move mapping reference counter from old block to new block.
    7154  if(block != m_BlockAllocation.m_Block)
    7155  {
    7156  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
    7157  if(IsPersistentMap())
    7158  ++mapRefCount;
    7159  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
    7160  block->Map(hAllocator, mapRefCount, VMA_NULL);
    7161  }
    7162 
    7163  m_BlockAllocation.m_Block = block;
    7164  m_BlockAllocation.m_Offset = offset;
    7165 }
    7166 
    7167 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
    7168 {
    7169  VMA_ASSERT(newSize > 0);
    7170  m_Size = newSize;
    7171 }
    7172 
    7173 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
    7174 {
    7175  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    7176  m_BlockAllocation.m_Offset = newOffset;
    7177 }
    7178 
    7179 VkDeviceSize VmaAllocation_T::GetOffset() const
    7180 {
    7181  switch(m_Type)
    7182  {
    7183  case ALLOCATION_TYPE_BLOCK:
    7184  return m_BlockAllocation.m_Offset;
    7185  case ALLOCATION_TYPE_DEDICATED:
    7186  return 0;
    7187  default:
    7188  VMA_ASSERT(0);
    7189  return 0;
    7190  }
    7191 }
    7192 
    7193 VkDeviceMemory VmaAllocation_T::GetMemory() const
    7194 {
    7195  switch(m_Type)
    7196  {
    7197  case ALLOCATION_TYPE_BLOCK:
    7198  return m_BlockAllocation.m_Block->GetDeviceMemory();
    7199  case ALLOCATION_TYPE_DEDICATED:
    7200  return m_DedicatedAllocation.m_hMemory;
    7201  default:
    7202  VMA_ASSERT(0);
    7203  return VK_NULL_HANDLE;
    7204  }
    7205 }
    7206 
    7207 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
    7208 {
    7209  switch(m_Type)
    7210  {
    7211  case ALLOCATION_TYPE_BLOCK:
    7212  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
    7213  case ALLOCATION_TYPE_DEDICATED:
    7214  return m_DedicatedAllocation.m_MemoryTypeIndex;
    7215  default:
    7216  VMA_ASSERT(0);
    7217  return UINT32_MAX;
    7218  }
    7219 }
    7220 
    7221 void* VmaAllocation_T::GetMappedData() const
    7222 {
    7223  switch(m_Type)
    7224  {
    7225  case ALLOCATION_TYPE_BLOCK:
    7226  if(m_MapCount != 0)
    7227  {
    7228  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
    7229  VMA_ASSERT(pBlockData != VMA_NULL);
    7230  return (char*)pBlockData + m_BlockAllocation.m_Offset;
    7231  }
    7232  else
    7233  {
    7234  return VMA_NULL;
    7235  }
    7236  break;
    7237  case ALLOCATION_TYPE_DEDICATED:
    7238  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
    7239  return m_DedicatedAllocation.m_pMappedData;
    7240  default:
    7241  VMA_ASSERT(0);
    7242  return VMA_NULL;
    7243  }
    7244 }
    7245 
    7246 bool VmaAllocation_T::CanBecomeLost() const
    7247 {
    7248  switch(m_Type)
    7249  {
    7250  case ALLOCATION_TYPE_BLOCK:
    7251  return m_BlockAllocation.m_CanBecomeLost;
    7252  case ALLOCATION_TYPE_DEDICATED:
    7253  return false;
    7254  default:
    7255  VMA_ASSERT(0);
    7256  return false;
    7257  }
    7258 }
    7259 
    7260 VmaPool VmaAllocation_T::GetPool() const
    7261 {
    7262  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    7263  return m_BlockAllocation.m_hPool;
    7264 }
    7265 
    7266 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    7267 {
    7268  VMA_ASSERT(CanBecomeLost());
    7269 
    7270  /*
    7271  Warning: This is a carefully designed algorithm.
    7272  Do not modify unless you really know what you're doing :)
    7273  */
    7274  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
    7275  for(;;)
    7276  {
    7277  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    7278  {
    7279  VMA_ASSERT(0);
    7280  return false;
    7281  }
    7282  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
    7283  {
    7284  return false;
    7285  }
    7286  else // Last use time earlier than current time.
    7287  {
    7288  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
    7289  {
    7290  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
    7291  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
    7292  return true;
    7293  }
    7294  }
    7295  }
    7296 }
    7297 
    7298 #if VMA_STATS_STRING_ENABLED
    7299 
    7300 // Correspond to values of enum VmaSuballocationType.
    7301 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
    7302  "FREE",
    7303  "UNKNOWN",
    7304  "BUFFER",
    7305  "IMAGE_UNKNOWN",
    7306  "IMAGE_LINEAR",
    7307  "IMAGE_OPTIMAL",
    7308 };
    7309 
    7310 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
    7311 {
    7312  json.WriteString("Type");
    7313  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
    7314 
    7315  json.WriteString("Size");
    7316  json.WriteNumber(m_Size);
    7317 
    7318  if(m_pUserData != VMA_NULL)
    7319  {
    7320  json.WriteString("UserData");
    7321  if(IsUserDataString())
    7322  {
    7323  json.WriteString((const char*)m_pUserData);
    7324  }
    7325  else
    7326  {
    7327  json.BeginString();
    7328  json.ContinueString_Pointer(m_pUserData);
    7329  json.EndString();
    7330  }
    7331  }
    7332 
    7333  json.WriteString("CreationFrameIndex");
    7334  json.WriteNumber(m_CreationFrameIndex);
    7335 
    7336  json.WriteString("LastUseFrameIndex");
    7337  json.WriteNumber(GetLastUseFrameIndex());
    7338 
    7339  if(m_BufferImageUsage != 0)
    7340  {
    7341  json.WriteString("Usage");
    7342  json.WriteNumber(m_BufferImageUsage);
    7343  }
    7344 }
    7345 
    7346 #endif
    7347 
    7348 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
    7349 {
    7350  VMA_ASSERT(IsUserDataString());
    7351  if(m_pUserData != VMA_NULL)
    7352  {
    7353  char* const oldStr = (char*)m_pUserData;
    7354  const size_t oldStrLen = strlen(oldStr);
    7355  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
    7356  m_pUserData = VMA_NULL;
    7357  }
    7358 }
    7359 
    7360 void VmaAllocation_T::BlockAllocMap()
    7361 {
    7362  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    7363 
    7364  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    7365  {
    7366  ++m_MapCount;
    7367  }
    7368  else
    7369  {
    7370  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
    7371  }
    7372 }
    7373 
    7374 void VmaAllocation_T::BlockAllocUnmap()
    7375 {
    7376  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    7377 
    7378  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    7379  {
    7380  --m_MapCount;
    7381  }
    7382  else
    7383  {
    7384  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
    7385  }
    7386 }
    7387 
    7388 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
    7389 {
    7390  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    7391 
    7392  if(m_MapCount != 0)
    7393  {
    7394  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    7395  {
    7396  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
    7397  *ppData = m_DedicatedAllocation.m_pMappedData;
    7398  ++m_MapCount;
    7399  return VK_SUCCESS;
    7400  }
    7401  else
    7402  {
    7403  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
    7404  return VK_ERROR_MEMORY_MAP_FAILED;
    7405  }
    7406  }
    7407  else
    7408  {
    7409  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    7410  hAllocator->m_hDevice,
    7411  m_DedicatedAllocation.m_hMemory,
    7412  0, // offset
    7413  VK_WHOLE_SIZE,
    7414  0, // flags
    7415  ppData);
    7416  if(result == VK_SUCCESS)
    7417  {
    7418  m_DedicatedAllocation.m_pMappedData = *ppData;
    7419  m_MapCount = 1;
    7420  }
    7421  return result;
    7422  }
    7423 }
    7424 
    7425 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
    7426 {
    7427  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    7428 
    7429  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    7430  {
    7431  --m_MapCount;
    7432  if(m_MapCount == 0)
    7433  {
    7434  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
    7435  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
    7436  hAllocator->m_hDevice,
    7437  m_DedicatedAllocation.m_hMemory);
    7438  }
    7439  }
    7440  else
    7441  {
    7442  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
    7443  }
    7444 }
    7445 
    7446 #if VMA_STATS_STRING_ENABLED
    7447 
    7448 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
    7449 {
    7450  json.BeginObject();
    7451 
    7452  json.WriteString("Blocks");
    7453  json.WriteNumber(stat.blockCount);
    7454 
    7455  json.WriteString("Allocations");
    7456  json.WriteNumber(stat.allocationCount);
    7457 
    7458  json.WriteString("UnusedRanges");
    7459  json.WriteNumber(stat.unusedRangeCount);
    7460 
    7461  json.WriteString("UsedBytes");
    7462  json.WriteNumber(stat.usedBytes);
    7463 
    7464  json.WriteString("UnusedBytes");
    7465  json.WriteNumber(stat.unusedBytes);
    7466 
    7467  if(stat.allocationCount > 1)
    7468  {
    7469  json.WriteString("AllocationSize");
    7470  json.BeginObject(true);
    7471  json.WriteString("Min");
    7472  json.WriteNumber(stat.allocationSizeMin);
    7473  json.WriteString("Avg");
    7474  json.WriteNumber(stat.allocationSizeAvg);
    7475  json.WriteString("Max");
    7476  json.WriteNumber(stat.allocationSizeMax);
    7477  json.EndObject();
    7478  }
    7479 
    7480  if(stat.unusedRangeCount > 1)
    7481  {
    7482  json.WriteString("UnusedRangeSize");
    7483  json.BeginObject(true);
    7484  json.WriteString("Min");
    7485  json.WriteNumber(stat.unusedRangeSizeMin);
    7486  json.WriteString("Avg");
    7487  json.WriteNumber(stat.unusedRangeSizeAvg);
    7488  json.WriteString("Max");
    7489  json.WriteNumber(stat.unusedRangeSizeMax);
    7490  json.EndObject();
    7491  }
    7492 
    7493  json.EndObject();
    7494 }
    7495 
    7496 #endif // #if VMA_STATS_STRING_ENABLED
    7497 
    7498 struct VmaSuballocationItemSizeLess
    7499 {
    7500  bool operator()(
    7501  const VmaSuballocationList::iterator lhs,
    7502  const VmaSuballocationList::iterator rhs) const
    7503  {
    7504  return lhs->size < rhs->size;
    7505  }
    7506  bool operator()(
    7507  const VmaSuballocationList::iterator lhs,
    7508  VkDeviceSize rhsSize) const
    7509  {
    7510  return lhs->size < rhsSize;
    7511  }
    7512 };
    7513 
    7514 
    7516 // class VmaBlockMetadata
    7517 
    7518 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
    7519  m_Size(0),
    7520  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
    7521 {
    7522 }
    7523 
    7524 #if VMA_STATS_STRING_ENABLED
    7525 
    7526 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
    7527  VkDeviceSize unusedBytes,
    7528  size_t allocationCount,
    7529  size_t unusedRangeCount) const
    7530 {
    7531  json.BeginObject();
    7532 
    7533  json.WriteString("TotalBytes");
    7534  json.WriteNumber(GetSize());
    7535 
    7536  json.WriteString("UnusedBytes");
    7537  json.WriteNumber(unusedBytes);
    7538 
    7539  json.WriteString("Allocations");
    7540  json.WriteNumber((uint64_t)allocationCount);
    7541 
    7542  json.WriteString("UnusedRanges");
    7543  json.WriteNumber((uint64_t)unusedRangeCount);
    7544 
    7545  json.WriteString("Suballocations");
    7546  json.BeginArray();
    7547 }
    7548 
    7549 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    7550  VkDeviceSize offset,
    7551  VmaAllocation hAllocation) const
    7552 {
    7553  json.BeginObject(true);
    7554 
    7555  json.WriteString("Offset");
    7556  json.WriteNumber(offset);
    7557 
    7558  hAllocation->PrintParameters(json);
    7559 
    7560  json.EndObject();
    7561 }
    7562 
    7563 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    7564  VkDeviceSize offset,
    7565  VkDeviceSize size) const
    7566 {
    7567  json.BeginObject(true);
    7568 
    7569  json.WriteString("Offset");
    7570  json.WriteNumber(offset);
    7571 
    7572  json.WriteString("Type");
    7573  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
    7574 
    7575  json.WriteString("Size");
    7576  json.WriteNumber(size);
    7577 
    7578  json.EndObject();
    7579 }
    7580 
    7581 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
    7582 {
    7583  json.EndArray();
    7584  json.EndObject();
    7585 }
    7586 
    7587 #endif // #if VMA_STATS_STRING_ENABLED
    7588 
    7590 // class VmaBlockMetadata_Generic
    7591 
    7592 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
    7593  VmaBlockMetadata(hAllocator),
    7594  m_FreeCount(0),
    7595  m_SumFreeSize(0),
    7596  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7597  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
    7598 {
    7599 }
    7600 
    7601 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
    7602 {
    7603 }
    7604 
    7605 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
    7606 {
    7607  VmaBlockMetadata::Init(size);
    7608 
    7609  m_FreeCount = 1;
    7610  m_SumFreeSize = size;
    7611 
    7612  VmaSuballocation suballoc = {};
    7613  suballoc.offset = 0;
    7614  suballoc.size = size;
    7615  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7616  suballoc.hAllocation = VK_NULL_HANDLE;
    7617 
    7618  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    7619  m_Suballocations.push_back(suballoc);
    7620  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
    7621  --suballocItem;
    7622  m_FreeSuballocationsBySize.push_back(suballocItem);
    7623 }
    7624 
    7625 bool VmaBlockMetadata_Generic::Validate() const
    7626 {
    7627  VMA_VALIDATE(!m_Suballocations.empty());
    7628 
    7629  // Expected offset of new suballocation as calculated from previous ones.
    7630  VkDeviceSize calculatedOffset = 0;
    7631  // Expected number of free suballocations as calculated from traversing their list.
    7632  uint32_t calculatedFreeCount = 0;
    7633  // Expected sum size of free suballocations as calculated from traversing their list.
    7634  VkDeviceSize calculatedSumFreeSize = 0;
    7635  // Expected number of free suballocations that should be registered in
    7636  // m_FreeSuballocationsBySize calculated from traversing their list.
    7637  size_t freeSuballocationsToRegister = 0;
    7638  // True if previous visited suballocation was free.
    7639  bool prevFree = false;
    7640 
    7641  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    7642  suballocItem != m_Suballocations.cend();
    7643  ++suballocItem)
    7644  {
    7645  const VmaSuballocation& subAlloc = *suballocItem;
    7646 
    7647  // Actual offset of this suballocation doesn't match expected one.
    7648  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
    7649 
    7650  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7651  // Two adjacent free suballocations are invalid. They should be merged.
    7652  VMA_VALIDATE(!prevFree || !currFree);
    7653 
    7654  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
    7655 
    7656  if(currFree)
    7657  {
    7658  calculatedSumFreeSize += subAlloc.size;
    7659  ++calculatedFreeCount;
    7660  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7661  {
    7662  ++freeSuballocationsToRegister;
    7663  }
    7664 
    7665  // Margin required between allocations - every free space must be at least that large.
    7666  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
    7667  }
    7668  else
    7669  {
    7670  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
    7671  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
    7672 
    7673  // Margin required between allocations - previous allocation must be free.
    7674  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
    7675  }
    7676 
    7677  calculatedOffset += subAlloc.size;
    7678  prevFree = currFree;
    7679  }
    7680 
    7681  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
    7682  // match expected one.
    7683  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
    7684 
    7685  VkDeviceSize lastSize = 0;
    7686  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
    7687  {
    7688  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
    7689 
    7690  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
    7691  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    7692  // They must be sorted by size ascending.
    7693  VMA_VALIDATE(suballocItem->size >= lastSize);
    7694 
    7695  lastSize = suballocItem->size;
    7696  }
    7697 
    7698  // Check if totals match calculacted values.
    7699  VMA_VALIDATE(ValidateFreeSuballocationList());
    7700  VMA_VALIDATE(calculatedOffset == GetSize());
    7701  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
    7702  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
    7703 
    7704  return true;
    7705 }
    7706 
    7707 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
    7708 {
    7709  if(!m_FreeSuballocationsBySize.empty())
    7710  {
    7711  return m_FreeSuballocationsBySize.back()->size;
    7712  }
    7713  else
    7714  {
    7715  return 0;
    7716  }
    7717 }
    7718 
    7719 bool VmaBlockMetadata_Generic::IsEmpty() const
    7720 {
    7721  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
    7722 }
    7723 
    7724 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    7725 {
    7726  outInfo.blockCount = 1;
    7727 
    7728  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    7729  outInfo.allocationCount = rangeCount - m_FreeCount;
    7730  outInfo.unusedRangeCount = m_FreeCount;
    7731 
    7732  outInfo.unusedBytes = m_SumFreeSize;
    7733  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
    7734 
    7735  outInfo.allocationSizeMin = UINT64_MAX;
    7736  outInfo.allocationSizeMax = 0;
    7737  outInfo.unusedRangeSizeMin = UINT64_MAX;
    7738  outInfo.unusedRangeSizeMax = 0;
    7739 
    7740  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    7741  suballocItem != m_Suballocations.cend();
    7742  ++suballocItem)
    7743  {
    7744  const VmaSuballocation& suballoc = *suballocItem;
    7745  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    7746  {
    7747  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    7748  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
    7749  }
    7750  else
    7751  {
    7752  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
    7753  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
    7754  }
    7755  }
    7756 }
    7757 
    7758 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
    7759 {
    7760  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    7761 
    7762  inoutStats.size += GetSize();
    7763  inoutStats.unusedSize += m_SumFreeSize;
    7764  inoutStats.allocationCount += rangeCount - m_FreeCount;
    7765  inoutStats.unusedRangeCount += m_FreeCount;
    7766  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    7767 }
    7768 
    7769 #if VMA_STATS_STRING_ENABLED
    7770 
    7771 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
    7772 {
    7773  PrintDetailedMap_Begin(json,
    7774  m_SumFreeSize, // unusedBytes
    7775  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
    7776  m_FreeCount); // unusedRangeCount
    7777 
    7778  size_t i = 0;
    7779  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    7780  suballocItem != m_Suballocations.cend();
    7781  ++suballocItem, ++i)
    7782  {
    7783  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7784  {
    7785  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
    7786  }
    7787  else
    7788  {
    7789  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
    7790  }
    7791  }
    7792 
    7793  PrintDetailedMap_End(json);
    7794 }
    7795 
    7796 #endif // #if VMA_STATS_STRING_ENABLED
    7797 
    7798 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
    7799  uint32_t currentFrameIndex,
    7800  uint32_t frameInUseCount,
    7801  VkDeviceSize bufferImageGranularity,
    7802  VkDeviceSize allocSize,
    7803  VkDeviceSize allocAlignment,
    7804  bool upperAddress,
    7805  VmaSuballocationType allocType,
    7806  bool canMakeOtherLost,
    7807  uint32_t strategy,
    7808  VmaAllocationRequest* pAllocationRequest)
    7809 {
    7810  VMA_ASSERT(allocSize > 0);
    7811  VMA_ASSERT(!upperAddress);
    7812  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    7813  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    7814  VMA_HEAVY_ASSERT(Validate());
    7815 
    7816  // There is not enough total free space in this block to fullfill the request: Early return.
    7817  if(canMakeOtherLost == false &&
    7818  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
    7819  {
    7820  return false;
    7821  }
    7822 
    7823  // New algorithm, efficiently searching freeSuballocationsBySize.
    7824  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
    7825  if(freeSuballocCount > 0)
    7826  {
    7828  {
    7829  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
    7830  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    7831  m_FreeSuballocationsBySize.data(),
    7832  m_FreeSuballocationsBySize.data() + freeSuballocCount,
    7833  allocSize + 2 * VMA_DEBUG_MARGIN,
    7834  VmaSuballocationItemSizeLess());
    7835  size_t index = it - m_FreeSuballocationsBySize.data();
    7836  for(; index < freeSuballocCount; ++index)
    7837  {
    7838  if(CheckAllocation(
    7839  currentFrameIndex,
    7840  frameInUseCount,
    7841  bufferImageGranularity,
    7842  allocSize,
    7843  allocAlignment,
    7844  allocType,
    7845  m_FreeSuballocationsBySize[index],
    7846  false, // canMakeOtherLost
    7847  &pAllocationRequest->offset,
    7848  &pAllocationRequest->itemsToMakeLostCount,
    7849  &pAllocationRequest->sumFreeSize,
    7850  &pAllocationRequest->sumItemSize))
    7851  {
    7852  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    7853  return true;
    7854  }
    7855  }
    7856  }
    7857  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
    7858  {
    7859  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7860  it != m_Suballocations.end();
    7861  ++it)
    7862  {
    7863  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
    7864  currentFrameIndex,
    7865  frameInUseCount,
    7866  bufferImageGranularity,
    7867  allocSize,
    7868  allocAlignment,
    7869  allocType,
    7870  it,
    7871  false, // canMakeOtherLost
    7872  &pAllocationRequest->offset,
    7873  &pAllocationRequest->itemsToMakeLostCount,
    7874  &pAllocationRequest->sumFreeSize,
    7875  &pAllocationRequest->sumItemSize))
    7876  {
    7877  pAllocationRequest->item = it;
    7878  return true;
    7879  }
    7880  }
    7881  }
    7882  else // WORST_FIT, FIRST_FIT
    7883  {
    7884  // Search staring from biggest suballocations.
    7885  for(size_t index = freeSuballocCount; index--; )
    7886  {
    7887  if(CheckAllocation(
    7888  currentFrameIndex,
    7889  frameInUseCount,
    7890  bufferImageGranularity,
    7891  allocSize,
    7892  allocAlignment,
    7893  allocType,
    7894  m_FreeSuballocationsBySize[index],
    7895  false, // canMakeOtherLost
    7896  &pAllocationRequest->offset,
    7897  &pAllocationRequest->itemsToMakeLostCount,
    7898  &pAllocationRequest->sumFreeSize,
    7899  &pAllocationRequest->sumItemSize))
    7900  {
    7901  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    7902  return true;
    7903  }
    7904  }
    7905  }
    7906  }
    7907 
    7908  if(canMakeOtherLost)
    7909  {
    7910  // Brute-force algorithm. TODO: Come up with something better.
    7911 
    7912  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
    7913  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
    7914 
    7915  VmaAllocationRequest tmpAllocRequest = {};
    7916  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
    7917  suballocIt != m_Suballocations.end();
    7918  ++suballocIt)
    7919  {
    7920  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
    7921  suballocIt->hAllocation->CanBecomeLost())
    7922  {
    7923  if(CheckAllocation(
    7924  currentFrameIndex,
    7925  frameInUseCount,
    7926  bufferImageGranularity,
    7927  allocSize,
    7928  allocAlignment,
    7929  allocType,
    7930  suballocIt,
    7931  canMakeOtherLost,
    7932  &tmpAllocRequest.offset,
    7933  &tmpAllocRequest.itemsToMakeLostCount,
    7934  &tmpAllocRequest.sumFreeSize,
    7935  &tmpAllocRequest.sumItemSize))
    7936  {
    7937  tmpAllocRequest.item = suballocIt;
    7938 
    7939  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
    7941  {
    7942  *pAllocationRequest = tmpAllocRequest;
    7943  }
    7944  }
    7945  }
    7946  }
    7947 
    7948  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
    7949  {
    7950  return true;
    7951  }
    7952  }
    7953 
    7954  return false;
    7955 }
    7956 
    7957 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
    7958  uint32_t currentFrameIndex,
    7959  uint32_t frameInUseCount,
    7960  VmaAllocationRequest* pAllocationRequest)
    7961 {
    7962  while(pAllocationRequest->itemsToMakeLostCount > 0)
    7963  {
    7964  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
    7965  {
    7966  ++pAllocationRequest->item;
    7967  }
    7968  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7969  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
    7970  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
    7971  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7972  {
    7973  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
    7974  --pAllocationRequest->itemsToMakeLostCount;
    7975  }
    7976  else
    7977  {
    7978  return false;
    7979  }
    7980  }
    7981 
    7982  VMA_HEAVY_ASSERT(Validate());
    7983  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7984  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7985 
    7986  return true;
    7987 }
    7988 
    7989 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    7990 {
    7991  uint32_t lostAllocationCount = 0;
    7992  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7993  it != m_Suballocations.end();
    7994  ++it)
    7995  {
    7996  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
    7997  it->hAllocation->CanBecomeLost() &&
    7998  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7999  {
    8000  it = FreeSuballocation(it);
    8001  ++lostAllocationCount;
    8002  }
    8003  }
    8004  return lostAllocationCount;
    8005 }
    8006 
    8007 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
    8008 {
    8009  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    8010  it != m_Suballocations.end();
    8011  ++it)
    8012  {
    8013  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    8014  {
    8015  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
    8016  {
    8017  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    8018  return VK_ERROR_VALIDATION_FAILED_EXT;
    8019  }
    8020  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
    8021  {
    8022  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    8023  return VK_ERROR_VALIDATION_FAILED_EXT;
    8024  }
    8025  }
    8026  }
    8027 
    8028  return VK_SUCCESS;
    8029 }
    8030 
    8031 void VmaBlockMetadata_Generic::Alloc(
    8032  const VmaAllocationRequest& request,
    8033  VmaSuballocationType type,
    8034  VkDeviceSize allocSize,
    8035  bool upperAddress,
    8036  VmaAllocation hAllocation)
    8037 {
    8038  VMA_ASSERT(!upperAddress);
    8039  VMA_ASSERT(request.item != m_Suballocations.end());
    8040  VmaSuballocation& suballoc = *request.item;
    8041  // Given suballocation is a free block.
    8042  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8043  // Given offset is inside this suballocation.
    8044  VMA_ASSERT(request.offset >= suballoc.offset);
    8045  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
    8046  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
    8047  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
    8048 
    8049  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
    8050  // it to become used.
    8051  UnregisterFreeSuballocation(request.item);
    8052 
    8053  suballoc.offset = request.offset;
    8054  suballoc.size = allocSize;
    8055  suballoc.type = type;
    8056  suballoc.hAllocation = hAllocation;
    8057 
    8058  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
    8059  if(paddingEnd)
    8060  {
    8061  VmaSuballocation paddingSuballoc = {};
    8062  paddingSuballoc.offset = request.offset + allocSize;
    8063  paddingSuballoc.size = paddingEnd;
    8064  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8065  VmaSuballocationList::iterator next = request.item;
    8066  ++next;
    8067  const VmaSuballocationList::iterator paddingEndItem =
    8068  m_Suballocations.insert(next, paddingSuballoc);
    8069  RegisterFreeSuballocation(paddingEndItem);
    8070  }
    8071 
    8072  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
    8073  if(paddingBegin)
    8074  {
    8075  VmaSuballocation paddingSuballoc = {};
    8076  paddingSuballoc.offset = request.offset - paddingBegin;
    8077  paddingSuballoc.size = paddingBegin;
    8078  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8079  const VmaSuballocationList::iterator paddingBeginItem =
    8080  m_Suballocations.insert(request.item, paddingSuballoc);
    8081  RegisterFreeSuballocation(paddingBeginItem);
    8082  }
    8083 
    8084  // Update totals.
    8085  m_FreeCount = m_FreeCount - 1;
    8086  if(paddingBegin > 0)
    8087  {
    8088  ++m_FreeCount;
    8089  }
    8090  if(paddingEnd > 0)
    8091  {
    8092  ++m_FreeCount;
    8093  }
    8094  m_SumFreeSize -= allocSize;
    8095 }
    8096 
    8097 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
    8098 {
    8099  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    8100  suballocItem != m_Suballocations.end();
    8101  ++suballocItem)
    8102  {
    8103  VmaSuballocation& suballoc = *suballocItem;
    8104  if(suballoc.hAllocation == allocation)
    8105  {
    8106  FreeSuballocation(suballocItem);
    8107  VMA_HEAVY_ASSERT(Validate());
    8108  return;
    8109  }
    8110  }
    8111  VMA_ASSERT(0 && "Not found!");
    8112 }
    8113 
    8114 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
    8115 {
    8116  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    8117  suballocItem != m_Suballocations.end();
    8118  ++suballocItem)
    8119  {
    8120  VmaSuballocation& suballoc = *suballocItem;
    8121  if(suballoc.offset == offset)
    8122  {
    8123  FreeSuballocation(suballocItem);
    8124  return;
    8125  }
    8126  }
    8127  VMA_ASSERT(0 && "Not found!");
    8128 }
    8129 
    8130 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
    8131 {
    8132  typedef VmaSuballocationList::iterator iter_type;
    8133  for(iter_type suballocItem = m_Suballocations.begin();
    8134  suballocItem != m_Suballocations.end();
    8135  ++suballocItem)
    8136  {
    8137  VmaSuballocation& suballoc = *suballocItem;
    8138  if(suballoc.hAllocation == alloc)
    8139  {
    8140  iter_type nextItem = suballocItem;
    8141  ++nextItem;
    8142 
    8143  // Should have been ensured on higher level.
    8144  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
    8145 
    8146  // Shrinking.
    8147  if(newSize < alloc->GetSize())
    8148  {
    8149  const VkDeviceSize sizeDiff = suballoc.size - newSize;
    8150 
    8151  // There is next item.
    8152  if(nextItem != m_Suballocations.end())
    8153  {
    8154  // Next item is free.
    8155  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8156  {
    8157  // Grow this next item backward.
    8158  UnregisterFreeSuballocation(nextItem);
    8159  nextItem->offset -= sizeDiff;
    8160  nextItem->size += sizeDiff;
    8161  RegisterFreeSuballocation(nextItem);
    8162  }
    8163  // Next item is not free.
    8164  else
    8165  {
    8166  // Create free item after current one.
    8167  VmaSuballocation newFreeSuballoc;
    8168  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
    8169  newFreeSuballoc.offset = suballoc.offset + newSize;
    8170  newFreeSuballoc.size = sizeDiff;
    8171  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8172  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
    8173  RegisterFreeSuballocation(newFreeSuballocIt);
    8174 
    8175  ++m_FreeCount;
    8176  }
    8177  }
    8178  // This is the last item.
    8179  else
    8180  {
    8181  // Create free item at the end.
    8182  VmaSuballocation newFreeSuballoc;
    8183  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
    8184  newFreeSuballoc.offset = suballoc.offset + newSize;
    8185  newFreeSuballoc.size = sizeDiff;
    8186  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8187  m_Suballocations.push_back(newFreeSuballoc);
    8188 
    8189  iter_type newFreeSuballocIt = m_Suballocations.end();
    8190  RegisterFreeSuballocation(--newFreeSuballocIt);
    8191 
    8192  ++m_FreeCount;
    8193  }
    8194 
    8195  suballoc.size = newSize;
    8196  m_SumFreeSize += sizeDiff;
    8197  }
    8198  // Growing.
    8199  else
    8200  {
    8201  const VkDeviceSize sizeDiff = newSize - suballoc.size;
    8202 
    8203  // There is next item.
    8204  if(nextItem != m_Suballocations.end())
    8205  {
    8206  // Next item is free.
    8207  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8208  {
    8209  // There is not enough free space, including margin.
    8210  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
    8211  {
    8212  return false;
    8213  }
    8214 
    8215  // There is more free space than required.
    8216  if(nextItem->size > sizeDiff)
    8217  {
    8218  // Move and shrink this next item.
    8219  UnregisterFreeSuballocation(nextItem);
    8220  nextItem->offset += sizeDiff;
    8221  nextItem->size -= sizeDiff;
    8222  RegisterFreeSuballocation(nextItem);
    8223  }
    8224  // There is exactly the amount of free space required.
    8225  else
    8226  {
    8227  // Remove this next free item.
    8228  UnregisterFreeSuballocation(nextItem);
    8229  m_Suballocations.erase(nextItem);
    8230  --m_FreeCount;
    8231  }
    8232  }
    8233  // Next item is not free - there is no space to grow.
    8234  else
    8235  {
    8236  return false;
    8237  }
    8238  }
    8239  // This is the last item - there is no space to grow.
    8240  else
    8241  {
    8242  return false;
    8243  }
    8244 
    8245  suballoc.size = newSize;
    8246  m_SumFreeSize -= sizeDiff;
    8247  }
    8248 
    8249  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
    8250  return true;
    8251  }
    8252  }
    8253  VMA_ASSERT(0 && "Not found!");
    8254  return false;
    8255 }
    8256 
    8257 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
    8258 {
    8259  VkDeviceSize lastSize = 0;
    8260  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
    8261  {
    8262  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
    8263 
    8264  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
    8265  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    8266  VMA_VALIDATE(it->size >= lastSize);
    8267  lastSize = it->size;
    8268  }
    8269  return true;
    8270 }
    8271 
    8272 bool VmaBlockMetadata_Generic::CheckAllocation(
    8273  uint32_t currentFrameIndex,
    8274  uint32_t frameInUseCount,
    8275  VkDeviceSize bufferImageGranularity,
    8276  VkDeviceSize allocSize,
    8277  VkDeviceSize allocAlignment,
    8278  VmaSuballocationType allocType,
    8279  VmaSuballocationList::const_iterator suballocItem,
    8280  bool canMakeOtherLost,
    8281  VkDeviceSize* pOffset,
    8282  size_t* itemsToMakeLostCount,
    8283  VkDeviceSize* pSumFreeSize,
    8284  VkDeviceSize* pSumItemSize) const
    8285 {
    8286  VMA_ASSERT(allocSize > 0);
    8287  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    8288  VMA_ASSERT(suballocItem != m_Suballocations.cend());
    8289  VMA_ASSERT(pOffset != VMA_NULL);
    8290 
    8291  *itemsToMakeLostCount = 0;
    8292  *pSumFreeSize = 0;
    8293  *pSumItemSize = 0;
    8294 
    8295  if(canMakeOtherLost)
    8296  {
    8297  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8298  {
    8299  *pSumFreeSize = suballocItem->size;
    8300  }
    8301  else
    8302  {
    8303  if(suballocItem->hAllocation->CanBecomeLost() &&
    8304  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8305  {
    8306  ++*itemsToMakeLostCount;
    8307  *pSumItemSize = suballocItem->size;
    8308  }
    8309  else
    8310  {
    8311  return false;
    8312  }
    8313  }
    8314 
    8315  // Remaining size is too small for this request: Early return.
    8316  if(GetSize() - suballocItem->offset < allocSize)
    8317  {
    8318  return false;
    8319  }
    8320 
    8321  // Start from offset equal to beginning of this suballocation.
    8322  *pOffset = suballocItem->offset;
    8323 
    8324  // Apply VMA_DEBUG_MARGIN at the beginning.
    8325  if(VMA_DEBUG_MARGIN > 0)
    8326  {
    8327  *pOffset += VMA_DEBUG_MARGIN;
    8328  }
    8329 
    8330  // Apply alignment.
    8331  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    8332 
    8333  // Check previous suballocations for BufferImageGranularity conflicts.
    8334  // Make bigger alignment if necessary.
    8335  if(bufferImageGranularity > 1)
    8336  {
    8337  bool bufferImageGranularityConflict = false;
    8338  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    8339  while(prevSuballocItem != m_Suballocations.cbegin())
    8340  {
    8341  --prevSuballocItem;
    8342  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    8343  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    8344  {
    8345  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8346  {
    8347  bufferImageGranularityConflict = true;
    8348  break;
    8349  }
    8350  }
    8351  else
    8352  // Already on previous page.
    8353  break;
    8354  }
    8355  if(bufferImageGranularityConflict)
    8356  {
    8357  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    8358  }
    8359  }
    8360 
    8361  // Now that we have final *pOffset, check if we are past suballocItem.
    8362  // If yes, return false - this function should be called for another suballocItem as starting point.
    8363  if(*pOffset >= suballocItem->offset + suballocItem->size)
    8364  {
    8365  return false;
    8366  }
    8367 
    8368  // Calculate padding at the beginning based on current offset.
    8369  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
    8370 
    8371  // Calculate required margin at the end.
    8372  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    8373 
    8374  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
    8375  // Another early return check.
    8376  if(suballocItem->offset + totalSize > GetSize())
    8377  {
    8378  return false;
    8379  }
    8380 
    8381  // Advance lastSuballocItem until desired size is reached.
    8382  // Update itemsToMakeLostCount.
    8383  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
    8384  if(totalSize > suballocItem->size)
    8385  {
    8386  VkDeviceSize remainingSize = totalSize - suballocItem->size;
    8387  while(remainingSize > 0)
    8388  {
    8389  ++lastSuballocItem;
    8390  if(lastSuballocItem == m_Suballocations.cend())
    8391  {
    8392  return false;
    8393  }
    8394  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8395  {
    8396  *pSumFreeSize += lastSuballocItem->size;
    8397  }
    8398  else
    8399  {
    8400  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
    8401  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
    8402  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8403  {
    8404  ++*itemsToMakeLostCount;
    8405  *pSumItemSize += lastSuballocItem->size;
    8406  }
    8407  else
    8408  {
    8409  return false;
    8410  }
    8411  }
    8412  remainingSize = (lastSuballocItem->size < remainingSize) ?
    8413  remainingSize - lastSuballocItem->size : 0;
    8414  }
    8415  }
    8416 
    8417  // Check next suballocations for BufferImageGranularity conflicts.
    8418  // If conflict exists, we must mark more allocations lost or fail.
    8419  if(bufferImageGranularity > 1)
    8420  {
    8421  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
    8422  ++nextSuballocItem;
    8423  while(nextSuballocItem != m_Suballocations.cend())
    8424  {
    8425  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    8426  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8427  {
    8428  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8429  {
    8430  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
    8431  if(nextSuballoc.hAllocation->CanBecomeLost() &&
    8432  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8433  {
    8434  ++*itemsToMakeLostCount;
    8435  }
    8436  else
    8437  {
    8438  return false;
    8439  }
    8440  }
    8441  }
    8442  else
    8443  {
    8444  // Already on next page.
    8445  break;
    8446  }
    8447  ++nextSuballocItem;
    8448  }
    8449  }
    8450  }
    8451  else
    8452  {
    8453  const VmaSuballocation& suballoc = *suballocItem;
    8454  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8455 
    8456  *pSumFreeSize = suballoc.size;
    8457 
    8458  // Size of this suballocation is too small for this request: Early return.
    8459  if(suballoc.size < allocSize)
    8460  {
    8461  return false;
    8462  }
    8463 
    8464  // Start from offset equal to beginning of this suballocation.
    8465  *pOffset = suballoc.offset;
    8466 
    8467  // Apply VMA_DEBUG_MARGIN at the beginning.
    8468  if(VMA_DEBUG_MARGIN > 0)
    8469  {
    8470  *pOffset += VMA_DEBUG_MARGIN;
    8471  }
    8472 
    8473  // Apply alignment.
    8474  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    8475 
    8476  // Check previous suballocations for BufferImageGranularity conflicts.
    8477  // Make bigger alignment if necessary.
    8478  if(bufferImageGranularity > 1)
    8479  {
    8480  bool bufferImageGranularityConflict = false;
    8481  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    8482  while(prevSuballocItem != m_Suballocations.cbegin())
    8483  {
    8484  --prevSuballocItem;
    8485  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    8486  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    8487  {
    8488  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8489  {
    8490  bufferImageGranularityConflict = true;
    8491  break;
    8492  }
    8493  }
    8494  else
    8495  // Already on previous page.
    8496  break;
    8497  }
    8498  if(bufferImageGranularityConflict)
    8499  {
    8500  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    8501  }
    8502  }
    8503 
    8504  // Calculate padding at the beginning based on current offset.
    8505  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
    8506 
    8507  // Calculate required margin at the end.
    8508  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    8509 
    8510  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
    8511  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
    8512  {
    8513  return false;
    8514  }
    8515 
    8516  // Check next suballocations for BufferImageGranularity conflicts.
    8517  // If conflict exists, allocation cannot be made here.
    8518  if(bufferImageGranularity > 1)
    8519  {
    8520  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
    8521  ++nextSuballocItem;
    8522  while(nextSuballocItem != m_Suballocations.cend())
    8523  {
    8524  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    8525  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8526  {
    8527  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8528  {
    8529  return false;
    8530  }
    8531  }
    8532  else
    8533  {
    8534  // Already on next page.
    8535  break;
    8536  }
    8537  ++nextSuballocItem;
    8538  }
    8539  }
    8540  }
    8541 
    8542  // All tests passed: Success. pOffset is already filled.
    8543  return true;
    8544 }
    8545 
    8546 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
    8547 {
    8548  VMA_ASSERT(item != m_Suballocations.end());
    8549  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    8550 
    8551  VmaSuballocationList::iterator nextItem = item;
    8552  ++nextItem;
    8553  VMA_ASSERT(nextItem != m_Suballocations.end());
    8554  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    8555 
    8556  item->size += nextItem->size;
    8557  --m_FreeCount;
    8558  m_Suballocations.erase(nextItem);
    8559 }
    8560 
    8561 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
    8562 {
    8563  // Change this suballocation to be marked as free.
    8564  VmaSuballocation& suballoc = *suballocItem;
    8565  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8566  suballoc.hAllocation = VK_NULL_HANDLE;
    8567 
    8568  // Update totals.
    8569  ++m_FreeCount;
    8570  m_SumFreeSize += suballoc.size;
    8571 
    8572  // Merge with previous and/or next suballocation if it's also free.
    8573  bool mergeWithNext = false;
    8574  bool mergeWithPrev = false;
    8575 
    8576  VmaSuballocationList::iterator nextItem = suballocItem;
    8577  ++nextItem;
    8578  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
    8579  {
    8580  mergeWithNext = true;
    8581  }
    8582 
    8583  VmaSuballocationList::iterator prevItem = suballocItem;
    8584  if(suballocItem != m_Suballocations.begin())
    8585  {
    8586  --prevItem;
    8587  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8588  {
    8589  mergeWithPrev = true;
    8590  }
    8591  }
    8592 
    8593  if(mergeWithNext)
    8594  {
    8595  UnregisterFreeSuballocation(nextItem);
    8596  MergeFreeWithNext(suballocItem);
    8597  }
    8598 
    8599  if(mergeWithPrev)
    8600  {
    8601  UnregisterFreeSuballocation(prevItem);
    8602  MergeFreeWithNext(prevItem);
    8603  RegisterFreeSuballocation(prevItem);
    8604  return prevItem;
    8605  }
    8606  else
    8607  {
    8608  RegisterFreeSuballocation(suballocItem);
    8609  return suballocItem;
    8610  }
    8611 }
    8612 
    8613 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
    8614 {
    8615  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    8616  VMA_ASSERT(item->size > 0);
    8617 
    8618  // You may want to enable this validation at the beginning or at the end of
    8619  // this function, depending on what do you want to check.
    8620  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    8621 
    8622  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    8623  {
    8624  if(m_FreeSuballocationsBySize.empty())
    8625  {
    8626  m_FreeSuballocationsBySize.push_back(item);
    8627  }
    8628  else
    8629  {
    8630  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
    8631  }
    8632  }
    8633 
    8634  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    8635 }
    8636 
    8637 
    8638 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
    8639 {
    8640  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    8641  VMA_ASSERT(item->size > 0);
    8642 
    8643  // You may want to enable this validation at the beginning or at the end of
    8644  // this function, depending on what do you want to check.
    8645  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    8646 
    8647  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    8648  {
    8649  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    8650  m_FreeSuballocationsBySize.data(),
    8651  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
    8652  item,
    8653  VmaSuballocationItemSizeLess());
    8654  for(size_t index = it - m_FreeSuballocationsBySize.data();
    8655  index < m_FreeSuballocationsBySize.size();
    8656  ++index)
    8657  {
    8658  if(m_FreeSuballocationsBySize[index] == item)
    8659  {
    8660  VmaVectorRemove(m_FreeSuballocationsBySize, index);
    8661  return;
    8662  }
    8663  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
    8664  }
    8665  VMA_ASSERT(0 && "Not found.");
    8666  }
    8667 
    8668  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    8669 }
    8670 
    8671 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
    8672  VkDeviceSize bufferImageGranularity,
    8673  VmaSuballocationType& inOutPrevSuballocType) const
    8674 {
    8675  if(bufferImageGranularity == 1 || IsEmpty())
    8676  {
    8677  return false;
    8678  }
    8679 
    8680  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
    8681  bool typeConflictFound = false;
    8682  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
    8683  it != m_Suballocations.cend();
    8684  ++it)
    8685  {
    8686  const VmaSuballocationType suballocType = it->type;
    8687  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
    8688  {
    8689  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
    8690  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
    8691  {
    8692  typeConflictFound = true;
    8693  }
    8694  inOutPrevSuballocType = suballocType;
    8695  }
    8696  }
    8697 
    8698  return typeConflictFound || minAlignment >= bufferImageGranularity;
    8699 }
    8700 
    8702 // class VmaBlockMetadata_Linear
    8703 
    8704 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
    8705  VmaBlockMetadata(hAllocator),
    8706  m_SumFreeSize(0),
    8707  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    8708  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    8709  m_1stVectorIndex(0),
    8710  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
    8711  m_1stNullItemsBeginCount(0),
    8712  m_1stNullItemsMiddleCount(0),
    8713  m_2ndNullItemsCount(0)
    8714 {
    8715 }
    8716 
    8717 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
    8718 {
    8719 }
    8720 
    8721 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
    8722 {
    8723  VmaBlockMetadata::Init(size);
    8724  m_SumFreeSize = size;
    8725 }
    8726 
    8727 bool VmaBlockMetadata_Linear::Validate() const
    8728 {
    8729  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8730  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8731 
    8732  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
    8733  VMA_VALIDATE(!suballocations1st.empty() ||
    8734  suballocations2nd.empty() ||
    8735  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
    8736 
    8737  if(!suballocations1st.empty())
    8738  {
    8739  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
    8740  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
    8741  // Null item at the end should be just pop_back().
    8742  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
    8743  }
    8744  if(!suballocations2nd.empty())
    8745  {
    8746  // Null item at the end should be just pop_back().
    8747  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
    8748  }
    8749 
    8750  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
    8751  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
    8752 
    8753  VkDeviceSize sumUsedSize = 0;
    8754  const size_t suballoc1stCount = suballocations1st.size();
    8755  VkDeviceSize offset = VMA_DEBUG_MARGIN;
    8756 
    8757  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8758  {
    8759  const size_t suballoc2ndCount = suballocations2nd.size();
    8760  size_t nullItem2ndCount = 0;
    8761  for(size_t i = 0; i < suballoc2ndCount; ++i)
    8762  {
    8763  const VmaSuballocation& suballoc = suballocations2nd[i];
    8764  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8765 
    8766  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    8767  VMA_VALIDATE(suballoc.offset >= offset);
    8768 
    8769  if(!currFree)
    8770  {
    8771  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    8772  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    8773  sumUsedSize += suballoc.size;
    8774  }
    8775  else
    8776  {
    8777  ++nullItem2ndCount;
    8778  }
    8779 
    8780  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    8781  }
    8782 
    8783  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    8784  }
    8785 
    8786  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
    8787  {
    8788  const VmaSuballocation& suballoc = suballocations1st[i];
    8789  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
    8790  suballoc.hAllocation == VK_NULL_HANDLE);
    8791  }
    8792 
    8793  size_t nullItem1stCount = m_1stNullItemsBeginCount;
    8794 
    8795  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
    8796  {
    8797  const VmaSuballocation& suballoc = suballocations1st[i];
    8798  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8799 
    8800  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    8801  VMA_VALIDATE(suballoc.offset >= offset);
    8802  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
    8803 
    8804  if(!currFree)
    8805  {
    8806  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    8807  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    8808  sumUsedSize += suballoc.size;
    8809  }
    8810  else
    8811  {
    8812  ++nullItem1stCount;
    8813  }
    8814 
    8815  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    8816  }
    8817  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
    8818 
    8819  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8820  {
    8821  const size_t suballoc2ndCount = suballocations2nd.size();
    8822  size_t nullItem2ndCount = 0;
    8823  for(size_t i = suballoc2ndCount; i--; )
    8824  {
    8825  const VmaSuballocation& suballoc = suballocations2nd[i];
    8826  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8827 
    8828  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    8829  VMA_VALIDATE(suballoc.offset >= offset);
    8830 
    8831  if(!currFree)
    8832  {
    8833  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    8834  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    8835  sumUsedSize += suballoc.size;
    8836  }
    8837  else
    8838  {
    8839  ++nullItem2ndCount;
    8840  }
    8841 
    8842  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    8843  }
    8844 
    8845  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    8846  }
    8847 
    8848  VMA_VALIDATE(offset <= GetSize());
    8849  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
    8850 
    8851  return true;
    8852 }
    8853 
    8854 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
    8855 {
    8856  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
    8857  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
    8858 }
    8859 
    8860 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
    8861 {
    8862  const VkDeviceSize size = GetSize();
    8863 
    8864  /*
    8865  We don't consider gaps inside allocation vectors with freed allocations because
    8866  they are not suitable for reuse in linear allocator. We consider only space that
    8867  is available for new allocations.
    8868  */
    8869  if(IsEmpty())
    8870  {
    8871  return size;
    8872  }
    8873 
    8874  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8875 
    8876  switch(m_2ndVectorMode)
    8877  {
    8878  case SECOND_VECTOR_EMPTY:
    8879  /*
    8880  Available space is after end of 1st, as well as before beginning of 1st (which
    8881  whould make it a ring buffer).
    8882  */
    8883  {
    8884  const size_t suballocations1stCount = suballocations1st.size();
    8885  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
    8886  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    8887  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
    8888  return VMA_MAX(
    8889  firstSuballoc.offset,
    8890  size - (lastSuballoc.offset + lastSuballoc.size));
    8891  }
    8892  break;
    8893 
    8894  case SECOND_VECTOR_RING_BUFFER:
    8895  /*
    8896  Available space is only between end of 2nd and beginning of 1st.
    8897  */
    8898  {
    8899  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8900  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
    8901  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
    8902  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
    8903  }
    8904  break;
    8905 
    8906  case SECOND_VECTOR_DOUBLE_STACK:
    8907  /*
    8908  Available space is only between end of 1st and top of 2nd.
    8909  */
    8910  {
    8911  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8912  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
    8913  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
    8914  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
    8915  }
    8916  break;
    8917 
    8918  default:
    8919  VMA_ASSERT(0);
    8920  return 0;
    8921  }
    8922 }
    8923 
    8924 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    8925 {
    8926  const VkDeviceSize size = GetSize();
    8927  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8928  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8929  const size_t suballoc1stCount = suballocations1st.size();
    8930  const size_t suballoc2ndCount = suballocations2nd.size();
    8931 
    8932  outInfo.blockCount = 1;
    8933  outInfo.allocationCount = (uint32_t)GetAllocationCount();
    8934  outInfo.unusedRangeCount = 0;
    8935  outInfo.usedBytes = 0;
    8936  outInfo.allocationSizeMin = UINT64_MAX;
    8937  outInfo.allocationSizeMax = 0;
    8938  outInfo.unusedRangeSizeMin = UINT64_MAX;
    8939  outInfo.unusedRangeSizeMax = 0;
    8940 
    8941  VkDeviceSize lastOffset = 0;
    8942 
    8943  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8944  {
    8945  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8946  size_t nextAlloc2ndIndex = 0;
    8947  while(lastOffset < freeSpace2ndTo1stEnd)
    8948  {
    8949  // Find next non-null allocation or move nextAllocIndex to the end.
    8950  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8951  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8952  {
    8953  ++nextAlloc2ndIndex;
    8954  }
    8955 
    8956  // Found non-null allocation.
    8957  if(nextAlloc2ndIndex < suballoc2ndCount)
    8958  {
    8959  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8960 
    8961  // 1. Process free space before this allocation.
    8962  if(lastOffset < suballoc.offset)
    8963  {
    8964  // There is free space from lastOffset to suballoc.offset.
    8965  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8966  ++outInfo.unusedRangeCount;
    8967  outInfo.unusedBytes += unusedRangeSize;
    8968  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8969  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8970  }
    8971 
    8972  // 2. Process this allocation.
    8973  // There is allocation with suballoc.offset, suballoc.size.
    8974  outInfo.usedBytes += suballoc.size;
    8975  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8976  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8977 
    8978  // 3. Prepare for next iteration.
    8979  lastOffset = suballoc.offset + suballoc.size;
    8980  ++nextAlloc2ndIndex;
    8981  }
    8982  // We are at the end.
    8983  else
    8984  {
    8985  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8986  if(lastOffset < freeSpace2ndTo1stEnd)
    8987  {
    8988  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8989  ++outInfo.unusedRangeCount;
    8990  outInfo.unusedBytes += unusedRangeSize;
    8991  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8992  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8993  }
    8994 
    8995  // End of loop.
    8996  lastOffset = freeSpace2ndTo1stEnd;
    8997  }
    8998  }
    8999  }
    9000 
    9001  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    9002  const VkDeviceSize freeSpace1stTo2ndEnd =
    9003  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    9004  while(lastOffset < freeSpace1stTo2ndEnd)
    9005  {
    9006  // Find next non-null allocation or move nextAllocIndex to the end.
    9007  while(nextAlloc1stIndex < suballoc1stCount &&
    9008  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    9009  {
    9010  ++nextAlloc1stIndex;
    9011  }
    9012 
    9013  // Found non-null allocation.
    9014  if(nextAlloc1stIndex < suballoc1stCount)
    9015  {
    9016  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    9017 
    9018  // 1. Process free space before this allocation.
    9019  if(lastOffset < suballoc.offset)
    9020  {
    9021  // There is free space from lastOffset to suballoc.offset.
    9022  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9023  ++outInfo.unusedRangeCount;
    9024  outInfo.unusedBytes += unusedRangeSize;
    9025  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9026  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9027  }
    9028 
    9029  // 2. Process this allocation.
    9030  // There is allocation with suballoc.offset, suballoc.size.
    9031  outInfo.usedBytes += suballoc.size;
    9032  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    9033  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    9034 
    9035  // 3. Prepare for next iteration.
    9036  lastOffset = suballoc.offset + suballoc.size;
    9037  ++nextAlloc1stIndex;
    9038  }
    9039  // We are at the end.
    9040  else
    9041  {
    9042  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    9043  if(lastOffset < freeSpace1stTo2ndEnd)
    9044  {
    9045  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    9046  ++outInfo.unusedRangeCount;
    9047  outInfo.unusedBytes += unusedRangeSize;
    9048  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9049  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9050  }
    9051 
    9052  // End of loop.
    9053  lastOffset = freeSpace1stTo2ndEnd;
    9054  }
    9055  }
    9056 
    9057  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9058  {
    9059  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    9060  while(lastOffset < size)
    9061  {
    9062  // Find next non-null allocation or move nextAllocIndex to the end.
    9063  while(nextAlloc2ndIndex != SIZE_MAX &&
    9064  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9065  {
    9066  --nextAlloc2ndIndex;
    9067  }
    9068 
    9069  // Found non-null allocation.
    9070  if(nextAlloc2ndIndex != SIZE_MAX)
    9071  {
    9072  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9073 
    9074  // 1. Process free space before this allocation.
    9075  if(lastOffset < suballoc.offset)
    9076  {
    9077  // There is free space from lastOffset to suballoc.offset.
    9078  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9079  ++outInfo.unusedRangeCount;
    9080  outInfo.unusedBytes += unusedRangeSize;
    9081  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9082  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9083  }
    9084 
    9085  // 2. Process this allocation.
    9086  // There is allocation with suballoc.offset, suballoc.size.
    9087  outInfo.usedBytes += suballoc.size;
    9088  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    9089  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    9090 
    9091  // 3. Prepare for next iteration.
    9092  lastOffset = suballoc.offset + suballoc.size;
    9093  --nextAlloc2ndIndex;
    9094  }
    9095  // We are at the end.
    9096  else
    9097  {
    9098  // There is free space from lastOffset to size.
    9099  if(lastOffset < size)
    9100  {
    9101  const VkDeviceSize unusedRangeSize = size - lastOffset;
    9102  ++outInfo.unusedRangeCount;
    9103  outInfo.unusedBytes += unusedRangeSize;
    9104  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9105  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9106  }
    9107 
    9108  // End of loop.
    9109  lastOffset = size;
    9110  }
    9111  }
    9112  }
    9113 
    9114  outInfo.unusedBytes = size - outInfo.usedBytes;
    9115 }
    9116 
    9117 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
    9118 {
    9119  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9120  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9121  const VkDeviceSize size = GetSize();
    9122  const size_t suballoc1stCount = suballocations1st.size();
    9123  const size_t suballoc2ndCount = suballocations2nd.size();
    9124 
    9125  inoutStats.size += size;
    9126 
    9127  VkDeviceSize lastOffset = 0;
    9128 
    9129  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9130  {
    9131  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    9132  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
    9133  while(lastOffset < freeSpace2ndTo1stEnd)
    9134  {
    9135  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9136  while(nextAlloc2ndIndex < suballoc2ndCount &&
    9137  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9138  {
    9139  ++nextAlloc2ndIndex;
    9140  }
    9141 
    9142  // Found non-null allocation.
    9143  if(nextAlloc2ndIndex < suballoc2ndCount)
    9144  {
    9145  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9146 
    9147  // 1. Process free space before this allocation.
    9148  if(lastOffset < suballoc.offset)
    9149  {
    9150  // There is free space from lastOffset to suballoc.offset.
    9151  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9152  inoutStats.unusedSize += unusedRangeSize;
    9153  ++inoutStats.unusedRangeCount;
    9154  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9155  }
    9156 
    9157  // 2. Process this allocation.
    9158  // There is allocation with suballoc.offset, suballoc.size.
    9159  ++inoutStats.allocationCount;
    9160 
    9161  // 3. Prepare for next iteration.
    9162  lastOffset = suballoc.offset + suballoc.size;
    9163  ++nextAlloc2ndIndex;
    9164  }
    9165  // We are at the end.
    9166  else
    9167  {
    9168  if(lastOffset < freeSpace2ndTo1stEnd)
    9169  {
    9170  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    9171  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    9172  inoutStats.unusedSize += unusedRangeSize;
    9173  ++inoutStats.unusedRangeCount;
    9174  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9175  }
    9176 
    9177  // End of loop.
    9178  lastOffset = freeSpace2ndTo1stEnd;
    9179  }
    9180  }
    9181  }
    9182 
    9183  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    9184  const VkDeviceSize freeSpace1stTo2ndEnd =
    9185  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    9186  while(lastOffset < freeSpace1stTo2ndEnd)
    9187  {
    9188  // Find next non-null allocation or move nextAllocIndex to the end.
    9189  while(nextAlloc1stIndex < suballoc1stCount &&
    9190  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    9191  {
    9192  ++nextAlloc1stIndex;
    9193  }
    9194 
    9195  // Found non-null allocation.
    9196  if(nextAlloc1stIndex < suballoc1stCount)
    9197  {
    9198  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    9199 
    9200  // 1. Process free space before this allocation.
    9201  if(lastOffset < suballoc.offset)
    9202  {
    9203  // There is free space from lastOffset to suballoc.offset.
    9204  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9205  inoutStats.unusedSize += unusedRangeSize;
    9206  ++inoutStats.unusedRangeCount;
    9207  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9208  }
    9209 
    9210  // 2. Process this allocation.
    9211  // There is allocation with suballoc.offset, suballoc.size.
    9212  ++inoutStats.allocationCount;
    9213 
    9214  // 3. Prepare for next iteration.
    9215  lastOffset = suballoc.offset + suballoc.size;
    9216  ++nextAlloc1stIndex;
    9217  }
    9218  // We are at the end.
    9219  else
    9220  {
    9221  if(lastOffset < freeSpace1stTo2ndEnd)
    9222  {
    9223  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    9224  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    9225  inoutStats.unusedSize += unusedRangeSize;
    9226  ++inoutStats.unusedRangeCount;
    9227  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9228  }
    9229 
    9230  // End of loop.
    9231  lastOffset = freeSpace1stTo2ndEnd;
    9232  }
    9233  }
    9234 
    9235  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9236  {
    9237  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    9238  while(lastOffset < size)
    9239  {
    9240  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9241  while(nextAlloc2ndIndex != SIZE_MAX &&
    9242  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9243  {
    9244  --nextAlloc2ndIndex;
    9245  }
    9246 
    9247  // Found non-null allocation.
    9248  if(nextAlloc2ndIndex != SIZE_MAX)
    9249  {
    9250  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9251 
    9252  // 1. Process free space before this allocation.
    9253  if(lastOffset < suballoc.offset)
    9254  {
    9255  // There is free space from lastOffset to suballoc.offset.
    9256  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9257  inoutStats.unusedSize += unusedRangeSize;
    9258  ++inoutStats.unusedRangeCount;
    9259  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9260  }
    9261 
    9262  // 2. Process this allocation.
    9263  // There is allocation with suballoc.offset, suballoc.size.
    9264  ++inoutStats.allocationCount;
    9265 
    9266  // 3. Prepare for next iteration.
    9267  lastOffset = suballoc.offset + suballoc.size;
    9268  --nextAlloc2ndIndex;
    9269  }
    9270  // We are at the end.
    9271  else
    9272  {
    9273  if(lastOffset < size)
    9274  {
    9275  // There is free space from lastOffset to size.
    9276  const VkDeviceSize unusedRangeSize = size - lastOffset;
    9277  inoutStats.unusedSize += unusedRangeSize;
    9278  ++inoutStats.unusedRangeCount;
    9279  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9280  }
    9281 
    9282  // End of loop.
    9283  lastOffset = size;
    9284  }
    9285  }
    9286  }
    9287 }
    9288 
    9289 #if VMA_STATS_STRING_ENABLED
    9290 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
    9291 {
    9292  const VkDeviceSize size = GetSize();
    9293  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9294  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9295  const size_t suballoc1stCount = suballocations1st.size();
    9296  const size_t suballoc2ndCount = suballocations2nd.size();
    9297 
    9298  // FIRST PASS
    9299 
    9300  size_t unusedRangeCount = 0;
    9301  VkDeviceSize usedBytes = 0;
    9302 
    9303  VkDeviceSize lastOffset = 0;
    9304 
    9305  size_t alloc2ndCount = 0;
    9306  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9307  {
    9308  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    9309  size_t nextAlloc2ndIndex = 0;
    9310  while(lastOffset < freeSpace2ndTo1stEnd)
    9311  {
    9312  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9313  while(nextAlloc2ndIndex < suballoc2ndCount &&
    9314  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9315  {
    9316  ++nextAlloc2ndIndex;
    9317  }
    9318 
    9319  // Found non-null allocation.
    9320  if(nextAlloc2ndIndex < suballoc2ndCount)
    9321  {
    9322  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9323 
    9324  // 1. Process free space before this allocation.
    9325  if(lastOffset < suballoc.offset)
    9326  {
    9327  // There is free space from lastOffset to suballoc.offset.
    9328  ++unusedRangeCount;
    9329  }
    9330 
    9331  // 2. Process this allocation.
    9332  // There is allocation with suballoc.offset, suballoc.size.
    9333  ++alloc2ndCount;
    9334  usedBytes += suballoc.size;
    9335 
    9336  // 3. Prepare for next iteration.
    9337  lastOffset = suballoc.offset + suballoc.size;
    9338  ++nextAlloc2ndIndex;
    9339  }
    9340  // We are at the end.
    9341  else
    9342  {
    9343  if(lastOffset < freeSpace2ndTo1stEnd)
    9344  {
    9345  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    9346  ++unusedRangeCount;
    9347  }
    9348 
    9349  // End of loop.
    9350  lastOffset = freeSpace2ndTo1stEnd;
    9351  }
    9352  }
    9353  }
    9354 
    9355  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    9356  size_t alloc1stCount = 0;
    9357  const VkDeviceSize freeSpace1stTo2ndEnd =
    9358  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    9359  while(lastOffset < freeSpace1stTo2ndEnd)
    9360  {
    9361  // Find next non-null allocation or move nextAllocIndex to the end.
    9362  while(nextAlloc1stIndex < suballoc1stCount &&
    9363  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    9364  {
    9365  ++nextAlloc1stIndex;
    9366  }
    9367 
    9368  // Found non-null allocation.
    9369  if(nextAlloc1stIndex < suballoc1stCount)
    9370  {
    9371  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    9372 
    9373  // 1. Process free space before this allocation.
    9374  if(lastOffset < suballoc.offset)
    9375  {
    9376  // There is free space from lastOffset to suballoc.offset.
    9377  ++unusedRangeCount;
    9378  }
    9379 
    9380  // 2. Process this allocation.
    9381  // There is allocation with suballoc.offset, suballoc.size.
    9382  ++alloc1stCount;
    9383  usedBytes += suballoc.size;
    9384 
    9385  // 3. Prepare for next iteration.
    9386  lastOffset = suballoc.offset + suballoc.size;
    9387  ++nextAlloc1stIndex;
    9388  }
    9389  // We are at the end.
    9390  else
    9391  {
    9392  if(lastOffset < size)
    9393  {
    9394  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    9395  ++unusedRangeCount;
    9396  }
    9397 
    9398  // End of loop.
    9399  lastOffset = freeSpace1stTo2ndEnd;
    9400  }
    9401  }
    9402 
    9403  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9404  {
    9405  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    9406  while(lastOffset < size)
    9407  {
    9408  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9409  while(nextAlloc2ndIndex != SIZE_MAX &&
    9410  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9411  {
    9412  --nextAlloc2ndIndex;
    9413  }
    9414 
    9415  // Found non-null allocation.
    9416  if(nextAlloc2ndIndex != SIZE_MAX)
    9417  {
    9418  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9419 
    9420  // 1. Process free space before this allocation.
    9421  if(lastOffset < suballoc.offset)
    9422  {
    9423  // There is free space from lastOffset to suballoc.offset.
    9424  ++unusedRangeCount;
    9425  }
    9426 
    9427  // 2. Process this allocation.
    9428  // There is allocation with suballoc.offset, suballoc.size.
    9429  ++alloc2ndCount;
    9430  usedBytes += suballoc.size;
    9431 
    9432  // 3. Prepare for next iteration.
    9433  lastOffset = suballoc.offset + suballoc.size;
    9434  --nextAlloc2ndIndex;
    9435  }
    9436  // We are at the end.
    9437  else
    9438  {
    9439  if(lastOffset < size)
    9440  {
    9441  // There is free space from lastOffset to size.
    9442  ++unusedRangeCount;
    9443  }
    9444 
    9445  // End of loop.
    9446  lastOffset = size;
    9447  }
    9448  }
    9449  }
    9450 
    9451  const VkDeviceSize unusedBytes = size - usedBytes;
    9452  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
    9453 
    9454  // SECOND PASS
    9455  lastOffset = 0;
    9456 
    9457  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9458  {
    9459  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    9460  size_t nextAlloc2ndIndex = 0;
    9461  while(lastOffset < freeSpace2ndTo1stEnd)
    9462  {
    9463  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9464  while(nextAlloc2ndIndex < suballoc2ndCount &&
    9465  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9466  {
    9467  ++nextAlloc2ndIndex;
    9468  }
    9469 
    9470  // Found non-null allocation.
    9471  if(nextAlloc2ndIndex < suballoc2ndCount)
    9472  {
    9473  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9474 
    9475  // 1. Process free space before this allocation.
    9476  if(lastOffset < suballoc.offset)
    9477  {
    9478  // There is free space from lastOffset to suballoc.offset.
    9479  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9480  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9481  }
    9482 
    9483  // 2. Process this allocation.
    9484  // There is allocation with suballoc.offset, suballoc.size.
    9485  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    9486 
    9487  // 3. Prepare for next iteration.
    9488  lastOffset = suballoc.offset + suballoc.size;
    9489  ++nextAlloc2ndIndex;
    9490  }
    9491  // We are at the end.
    9492  else
    9493  {
    9494  if(lastOffset < freeSpace2ndTo1stEnd)
    9495  {
    9496  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    9497  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    9498  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9499  }
    9500 
    9501  // End of loop.
    9502  lastOffset = freeSpace2ndTo1stEnd;
    9503  }
    9504  }
    9505  }
    9506 
    9507  nextAlloc1stIndex = m_1stNullItemsBeginCount;
    9508  while(lastOffset < freeSpace1stTo2ndEnd)
    9509  {
    9510  // Find next non-null allocation or move nextAllocIndex to the end.
    9511  while(nextAlloc1stIndex < suballoc1stCount &&
    9512  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    9513  {
    9514  ++nextAlloc1stIndex;
    9515  }
    9516 
    9517  // Found non-null allocation.
    9518  if(nextAlloc1stIndex < suballoc1stCount)
    9519  {
    9520  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    9521 
    9522  // 1. Process free space before this allocation.
    9523  if(lastOffset < suballoc.offset)
    9524  {
    9525  // There is free space from lastOffset to suballoc.offset.
    9526  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9527  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9528  }
    9529 
    9530  // 2. Process this allocation.
    9531  // There is allocation with suballoc.offset, suballoc.size.
    9532  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    9533 
    9534  // 3. Prepare for next iteration.
    9535  lastOffset = suballoc.offset + suballoc.size;
    9536  ++nextAlloc1stIndex;
    9537  }
    9538  // We are at the end.
    9539  else
    9540  {
    9541  if(lastOffset < freeSpace1stTo2ndEnd)
    9542  {
    9543  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    9544  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    9545  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9546  }
    9547 
    9548  // End of loop.
    9549  lastOffset = freeSpace1stTo2ndEnd;
    9550  }
    9551  }
    9552 
    9553  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9554  {
    9555  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    9556  while(lastOffset < size)
    9557  {
    9558  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9559  while(nextAlloc2ndIndex != SIZE_MAX &&
    9560  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9561  {
    9562  --nextAlloc2ndIndex;
    9563  }
    9564 
    9565  // Found non-null allocation.
    9566  if(nextAlloc2ndIndex != SIZE_MAX)
    9567  {
    9568  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9569 
    9570  // 1. Process free space before this allocation.
    9571  if(lastOffset < suballoc.offset)
    9572  {
    9573  // There is free space from lastOffset to suballoc.offset.
    9574  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9575  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9576  }
    9577 
    9578  // 2. Process this allocation.
    9579  // There is allocation with suballoc.offset, suballoc.size.
    9580  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    9581 
    9582  // 3. Prepare for next iteration.
    9583  lastOffset = suballoc.offset + suballoc.size;
    9584  --nextAlloc2ndIndex;
    9585  }
    9586  // We are at the end.
    9587  else
    9588  {
    9589  if(lastOffset < size)
    9590  {
    9591  // There is free space from lastOffset to size.
    9592  const VkDeviceSize unusedRangeSize = size - lastOffset;
    9593  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9594  }
    9595 
    9596  // End of loop.
    9597  lastOffset = size;
    9598  }
    9599  }
    9600  }
    9601 
    9602  PrintDetailedMap_End(json);
    9603 }
    9604 #endif // #if VMA_STATS_STRING_ENABLED
    9605 
    9606 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
    9607  uint32_t currentFrameIndex,
    9608  uint32_t frameInUseCount,
    9609  VkDeviceSize bufferImageGranularity,
    9610  VkDeviceSize allocSize,
    9611  VkDeviceSize allocAlignment,
    9612  bool upperAddress,
    9613  VmaSuballocationType allocType,
    9614  bool canMakeOtherLost,
    9615  uint32_t strategy,
    9616  VmaAllocationRequest* pAllocationRequest)
    9617 {
    9618  VMA_ASSERT(allocSize > 0);
    9619  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    9620  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    9621  VMA_HEAVY_ASSERT(Validate());
    9622 
    9623  const VkDeviceSize size = GetSize();
    9624  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9625  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9626 
    9627  if(upperAddress)
    9628  {
    9629  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9630  {
    9631  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
    9632  return false;
    9633  }
    9634 
    9635  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
    9636  if(allocSize > size)
    9637  {
    9638  return false;
    9639  }
    9640  VkDeviceSize resultBaseOffset = size - allocSize;
    9641  if(!suballocations2nd.empty())
    9642  {
    9643  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    9644  resultBaseOffset = lastSuballoc.offset - allocSize;
    9645  if(allocSize > lastSuballoc.offset)
    9646  {
    9647  return false;
    9648  }
    9649  }
    9650 
    9651  // Start from offset equal to end of free space.
    9652  VkDeviceSize resultOffset = resultBaseOffset;
    9653 
    9654  // Apply VMA_DEBUG_MARGIN at the end.
    9655  if(VMA_DEBUG_MARGIN > 0)
    9656  {
    9657  if(resultOffset < VMA_DEBUG_MARGIN)
    9658  {
    9659  return false;
    9660  }
    9661  resultOffset -= VMA_DEBUG_MARGIN;
    9662  }
    9663 
    9664  // Apply alignment.
    9665  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
    9666 
    9667  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
    9668  // Make bigger alignment if necessary.
    9669  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    9670  {
    9671  bool bufferImageGranularityConflict = false;
    9672  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    9673  {
    9674  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    9675  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    9676  {
    9677  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
    9678  {
    9679  bufferImageGranularityConflict = true;
    9680  break;
    9681  }
    9682  }
    9683  else
    9684  // Already on previous page.
    9685  break;
    9686  }
    9687  if(bufferImageGranularityConflict)
    9688  {
    9689  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
    9690  }
    9691  }
    9692 
    9693  // There is enough free space.
    9694  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
    9695  suballocations1st.back().offset + suballocations1st.back().size :
    9696  0;
    9697  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
    9698  {
    9699  // Check previous suballocations for BufferImageGranularity conflicts.
    9700  // If conflict exists, allocation cannot be made here.
    9701  if(bufferImageGranularity > 1)
    9702  {
    9703  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    9704  {
    9705  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    9706  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    9707  {
    9708  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
    9709  {
    9710  return false;
    9711  }
    9712  }
    9713  else
    9714  {
    9715  // Already on next page.
    9716  break;
    9717  }
    9718  }
    9719  }
    9720 
    9721  // All tests passed: Success.
    9722  pAllocationRequest->offset = resultOffset;
    9723  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
    9724  pAllocationRequest->sumItemSize = 0;
    9725  // pAllocationRequest->item unused.
    9726  pAllocationRequest->itemsToMakeLostCount = 0;
    9727  return true;
    9728  }
    9729  }
    9730  else // !upperAddress
    9731  {
    9732  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9733  {
    9734  // Try to allocate at the end of 1st vector.
    9735 
    9736  VkDeviceSize resultBaseOffset = 0;
    9737  if(!suballocations1st.empty())
    9738  {
    9739  const VmaSuballocation& lastSuballoc = suballocations1st.back();
    9740  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    9741  }
    9742 
    9743  // Start from offset equal to beginning of free space.
    9744  VkDeviceSize resultOffset = resultBaseOffset;
    9745 
    9746  // Apply VMA_DEBUG_MARGIN at the beginning.
    9747  if(VMA_DEBUG_MARGIN > 0)
    9748  {
    9749  resultOffset += VMA_DEBUG_MARGIN;
    9750  }
    9751 
    9752  // Apply alignment.
    9753  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    9754 
    9755  // Check previous suballocations for BufferImageGranularity conflicts.
    9756  // Make bigger alignment if necessary.
    9757  if(bufferImageGranularity > 1 && !suballocations1st.empty())
    9758  {
    9759  bool bufferImageGranularityConflict = false;
    9760  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    9761  {
    9762  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    9763  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    9764  {
    9765  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    9766  {
    9767  bufferImageGranularityConflict = true;
    9768  break;
    9769  }
    9770  }
    9771  else
    9772  // Already on previous page.
    9773  break;
    9774  }
    9775  if(bufferImageGranularityConflict)
    9776  {
    9777  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    9778  }
    9779  }
    9780 
    9781  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
    9782  suballocations2nd.back().offset : size;
    9783 
    9784  // There is enough free space at the end after alignment.
    9785  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
    9786  {
    9787  // Check next suballocations for BufferImageGranularity conflicts.
    9788  // If conflict exists, allocation cannot be made here.
    9789  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9790  {
    9791  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    9792  {
    9793  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    9794  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    9795  {
    9796  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    9797  {
    9798  return false;
    9799  }
    9800  }
    9801  else
    9802  {
    9803  // Already on previous page.
    9804  break;
    9805  }
    9806  }
    9807  }
    9808 
    9809  // All tests passed: Success.
    9810  pAllocationRequest->offset = resultOffset;
    9811  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
    9812  pAllocationRequest->sumItemSize = 0;
    9813  // pAllocationRequest->item unused.
    9814  pAllocationRequest->itemsToMakeLostCount = 0;
    9815  return true;
    9816  }
    9817  }
    9818 
    9819  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
    9820  // beginning of 1st vector as the end of free space.
    9821  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9822  {
    9823  VMA_ASSERT(!suballocations1st.empty());
    9824 
    9825  VkDeviceSize resultBaseOffset = 0;
    9826  if(!suballocations2nd.empty())
    9827  {
    9828  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    9829  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    9830  }
    9831 
    9832  // Start from offset equal to beginning of free space.
    9833  VkDeviceSize resultOffset = resultBaseOffset;
    9834 
    9835  // Apply VMA_DEBUG_MARGIN at the beginning.
    9836  if(VMA_DEBUG_MARGIN > 0)
    9837  {
    9838  resultOffset += VMA_DEBUG_MARGIN;
    9839  }
    9840 
    9841  // Apply alignment.
    9842  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    9843 
    9844  // Check previous suballocations for BufferImageGranularity conflicts.
    9845  // Make bigger alignment if necessary.
    9846  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    9847  {
    9848  bool bufferImageGranularityConflict = false;
    9849  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
    9850  {
    9851  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
    9852  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    9853  {
    9854  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    9855  {
    9856  bufferImageGranularityConflict = true;
    9857  break;
    9858  }
    9859  }
    9860  else
    9861  // Already on previous page.
    9862  break;
    9863  }
    9864  if(bufferImageGranularityConflict)
    9865  {
    9866  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    9867  }
    9868  }
    9869 
    9870  pAllocationRequest->itemsToMakeLostCount = 0;
    9871  pAllocationRequest->sumItemSize = 0;
    9872  size_t index1st = m_1stNullItemsBeginCount;
    9873 
    9874  if(canMakeOtherLost)
    9875  {
    9876  while(index1st < suballocations1st.size() &&
    9877  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
    9878  {
    9879  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
    9880  const VmaSuballocation& suballoc = suballocations1st[index1st];
    9881  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
    9882  {
    9883  // No problem.
    9884  }
    9885  else
    9886  {
    9887  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    9888  if(suballoc.hAllocation->CanBecomeLost() &&
    9889  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    9890  {
    9891  ++pAllocationRequest->itemsToMakeLostCount;
    9892  pAllocationRequest->sumItemSize += suballoc.size;
    9893  }
    9894  else
    9895  {
    9896  return false;
    9897  }
    9898  }
    9899  ++index1st;
    9900  }
    9901 
    9902  // Check next suballocations for BufferImageGranularity conflicts.
    9903  // If conflict exists, we must mark more allocations lost or fail.
    9904  if(bufferImageGranularity > 1)
    9905  {
    9906  while(index1st < suballocations1st.size())
    9907  {
    9908  const VmaSuballocation& suballoc = suballocations1st[index1st];
    9909  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
    9910  {
    9911  if(suballoc.hAllocation != VK_NULL_HANDLE)
    9912  {
    9913  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
    9914  if(suballoc.hAllocation->CanBecomeLost() &&
    9915  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    9916  {
    9917  ++pAllocationRequest->itemsToMakeLostCount;
    9918  pAllocationRequest->sumItemSize += suballoc.size;
    9919  }
    9920  else
    9921  {
    9922  return false;
    9923  }
    9924  }
    9925  }
    9926  else
    9927  {
    9928  // Already on next page.
    9929  break;
    9930  }
    9931  ++index1st;
    9932  }
    9933  }
    9934  }
    9935 
    9936  // There is enough free space at the end after alignment.
    9937  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
    9938  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
    9939  {
    9940  // Check next suballocations for BufferImageGranularity conflicts.
    9941  // If conflict exists, allocation cannot be made here.
    9942  if(bufferImageGranularity > 1)
    9943  {
    9944  for(size_t nextSuballocIndex = index1st;
    9945  nextSuballocIndex < suballocations1st.size();
    9946  nextSuballocIndex++)
    9947  {
    9948  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
    9949  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    9950  {
    9951  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    9952  {
    9953  return false;
    9954  }
    9955  }
    9956  else
    9957  {
    9958  // Already on next page.
    9959  break;
    9960  }
    9961  }
    9962  }
    9963 
    9964  // All tests passed: Success.
    9965  pAllocationRequest->offset = resultOffset;
    9966  pAllocationRequest->sumFreeSize =
    9967  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
    9968  - resultBaseOffset
    9969  - pAllocationRequest->sumItemSize;
    9970  // pAllocationRequest->item unused.
    9971  return true;
    9972  }
    9973  }
    9974  }
    9975 
    9976  return false;
    9977 }
    9978 
    9979 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
    9980  uint32_t currentFrameIndex,
    9981  uint32_t frameInUseCount,
    9982  VmaAllocationRequest* pAllocationRequest)
    9983 {
    9984  if(pAllocationRequest->itemsToMakeLostCount == 0)
    9985  {
    9986  return true;
    9987  }
    9988 
    9989  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
    9990 
    9991  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9992  size_t index1st = m_1stNullItemsBeginCount;
    9993  size_t madeLostCount = 0;
    9994  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
    9995  {
    9996  VMA_ASSERT(index1st < suballocations1st.size());
    9997  VmaSuballocation& suballoc = suballocations1st[index1st];
    9998  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9999  {
    10000  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    10001  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
    10002  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    10003  {
    10004  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    10005  suballoc.hAllocation = VK_NULL_HANDLE;
    10006  m_SumFreeSize += suballoc.size;
    10007  ++m_1stNullItemsMiddleCount;
    10008  ++madeLostCount;
    10009  }
    10010  else
    10011  {
    10012  return false;
    10013  }
    10014  }
    10015  ++index1st;
    10016  }
    10017 
    10018  CleanupAfterFree();
    10019  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
    10020 
    10021  return true;
    10022 }
    10023 
    10024 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    10025 {
    10026  uint32_t lostAllocationCount = 0;
    10027 
    10028  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10029  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    10030  {
    10031  VmaSuballocation& suballoc = suballocations1st[i];
    10032  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    10033  suballoc.hAllocation->CanBecomeLost() &&
    10034  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    10035  {
    10036  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    10037  suballoc.hAllocation = VK_NULL_HANDLE;
    10038  ++m_1stNullItemsMiddleCount;
    10039  m_SumFreeSize += suballoc.size;
    10040  ++lostAllocationCount;
    10041  }
    10042  }
    10043 
    10044  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10045  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    10046  {
    10047  VmaSuballocation& suballoc = suballocations2nd[i];
    10048  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    10049  suballoc.hAllocation->CanBecomeLost() &&
    10050  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    10051  {
    10052  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    10053  suballoc.hAllocation = VK_NULL_HANDLE;
    10054  ++m_2ndNullItemsCount;
    10055  ++lostAllocationCount;
    10056  }
    10057  }
    10058 
    10059  if(lostAllocationCount)
    10060  {
    10061  CleanupAfterFree();
    10062  }
    10063 
    10064  return lostAllocationCount;
    10065 }
    10066 
    10067 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
    10068 {
    10069  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10070  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    10071  {
    10072  const VmaSuballocation& suballoc = suballocations1st[i];
    10073  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    10074  {
    10075  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    10076  {
    10077  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    10078  return VK_ERROR_VALIDATION_FAILED_EXT;
    10079  }
    10080  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    10081  {
    10082  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    10083  return VK_ERROR_VALIDATION_FAILED_EXT;
    10084  }
    10085  }
    10086  }
    10087 
    10088  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10089  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    10090  {
    10091  const VmaSuballocation& suballoc = suballocations2nd[i];
    10092  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    10093  {
    10094  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    10095  {
    10096  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    10097  return VK_ERROR_VALIDATION_FAILED_EXT;
    10098  }
    10099  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    10100  {
    10101  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    10102  return VK_ERROR_VALIDATION_FAILED_EXT;
    10103  }
    10104  }
    10105  }
    10106 
    10107  return VK_SUCCESS;
    10108 }
    10109 
    10110 void VmaBlockMetadata_Linear::Alloc(
    10111  const VmaAllocationRequest& request,
    10112  VmaSuballocationType type,
    10113  VkDeviceSize allocSize,
    10114  bool upperAddress,
    10115  VmaAllocation hAllocation)
    10116 {
    10117  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
    10118 
    10119  if(upperAddress)
    10120  {
    10121  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
    10122  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
    10123  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10124  suballocations2nd.push_back(newSuballoc);
    10125  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
    10126  }
    10127  else
    10128  {
    10129  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10130 
    10131  // First allocation.
    10132  if(suballocations1st.empty())
    10133  {
    10134  suballocations1st.push_back(newSuballoc);
    10135  }
    10136  else
    10137  {
    10138  // New allocation at the end of 1st vector.
    10139  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
    10140  {
    10141  // Check if it fits before the end of the block.
    10142  VMA_ASSERT(request.offset + allocSize <= GetSize());
    10143  suballocations1st.push_back(newSuballoc);
    10144  }
    10145  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
    10146  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
    10147  {
    10148  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10149 
    10150  switch(m_2ndVectorMode)
    10151  {
    10152  case SECOND_VECTOR_EMPTY:
    10153  // First allocation from second part ring buffer.
    10154  VMA_ASSERT(suballocations2nd.empty());
    10155  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
    10156  break;
    10157  case SECOND_VECTOR_RING_BUFFER:
    10158  // 2-part ring buffer is already started.
    10159  VMA_ASSERT(!suballocations2nd.empty());
    10160  break;
    10161  case SECOND_VECTOR_DOUBLE_STACK:
    10162  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
    10163  break;
    10164  default:
    10165  VMA_ASSERT(0);
    10166  }
    10167 
    10168  suballocations2nd.push_back(newSuballoc);
    10169  }
    10170  else
    10171  {
    10172  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
    10173  }
    10174  }
    10175  }
    10176 
    10177  m_SumFreeSize -= newSuballoc.size;
    10178 }
    10179 
    10180 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
    10181 {
    10182  FreeAtOffset(allocation->GetOffset());
    10183 }
    10184 
    10185 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
    10186 {
    10187  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10188  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10189 
    10190  if(!suballocations1st.empty())
    10191  {
    10192  // First allocation: Mark it as next empty at the beginning.
    10193  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    10194  if(firstSuballoc.offset == offset)
    10195  {
    10196  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    10197  firstSuballoc.hAllocation = VK_NULL_HANDLE;
    10198  m_SumFreeSize += firstSuballoc.size;
    10199  ++m_1stNullItemsBeginCount;
    10200  CleanupAfterFree();
    10201  return;
    10202  }
    10203  }
    10204 
    10205  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
    10206  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
    10207  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    10208  {
    10209  VmaSuballocation& lastSuballoc = suballocations2nd.back();
    10210  if(lastSuballoc.offset == offset)
    10211  {
    10212  m_SumFreeSize += lastSuballoc.size;
    10213  suballocations2nd.pop_back();
    10214  CleanupAfterFree();
    10215  return;
    10216  }
    10217  }
    10218  // Last allocation in 1st vector.
    10219  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
    10220  {
    10221  VmaSuballocation& lastSuballoc = suballocations1st.back();
    10222  if(lastSuballoc.offset == offset)
    10223  {
    10224  m_SumFreeSize += lastSuballoc.size;
    10225  suballocations1st.pop_back();
    10226  CleanupAfterFree();
    10227  return;
    10228  }
    10229  }
    10230 
    10231  // Item from the middle of 1st vector.
    10232  {
    10233  VmaSuballocation refSuballoc;
    10234  refSuballoc.offset = offset;
    10235  // Rest of members stays uninitialized intentionally for better performance.
    10236  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
    10237  suballocations1st.begin() + m_1stNullItemsBeginCount,
    10238  suballocations1st.end(),
    10239  refSuballoc);
    10240  if(it != suballocations1st.end())
    10241  {
    10242  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    10243  it->hAllocation = VK_NULL_HANDLE;
    10244  ++m_1stNullItemsMiddleCount;
    10245  m_SumFreeSize += it->size;
    10246  CleanupAfterFree();
    10247  return;
    10248  }
    10249  }
    10250 
    10251  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
    10252  {
    10253  // Item from the middle of 2nd vector.
    10254  VmaSuballocation refSuballoc;
    10255  refSuballoc.offset = offset;
    10256  // Rest of members stays uninitialized intentionally for better performance.
    10257  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
    10258  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
    10259  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
    10260  if(it != suballocations2nd.end())
    10261  {
    10262  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    10263  it->hAllocation = VK_NULL_HANDLE;
    10264  ++m_2ndNullItemsCount;
    10265  m_SumFreeSize += it->size;
    10266  CleanupAfterFree();
    10267  return;
    10268  }
    10269  }
    10270 
    10271  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
    10272 }
    10273 
    10274 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
    10275 {
    10276  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    10277  const size_t suballocCount = AccessSuballocations1st().size();
    10278  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
    10279 }
    10280 
    10281 void VmaBlockMetadata_Linear::CleanupAfterFree()
    10282 {
    10283  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10284  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10285 
    10286  if(IsEmpty())
    10287  {
    10288  suballocations1st.clear();
    10289  suballocations2nd.clear();
    10290  m_1stNullItemsBeginCount = 0;
    10291  m_1stNullItemsMiddleCount = 0;
    10292  m_2ndNullItemsCount = 0;
    10293  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    10294  }
    10295  else
    10296  {
    10297  const size_t suballoc1stCount = suballocations1st.size();
    10298  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    10299  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
    10300 
    10301  // Find more null items at the beginning of 1st vector.
    10302  while(m_1stNullItemsBeginCount < suballoc1stCount &&
    10303  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    10304  {
    10305  ++m_1stNullItemsBeginCount;
    10306  --m_1stNullItemsMiddleCount;
    10307  }
    10308 
    10309  // Find more null items at the end of 1st vector.
    10310  while(m_1stNullItemsMiddleCount > 0 &&
    10311  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
    10312  {
    10313  --m_1stNullItemsMiddleCount;
    10314  suballocations1st.pop_back();
    10315  }
    10316 
    10317  // Find more null items at the end of 2nd vector.
    10318  while(m_2ndNullItemsCount > 0 &&
    10319  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
    10320  {
    10321  --m_2ndNullItemsCount;
    10322  suballocations2nd.pop_back();
    10323  }
    10324 
    10325  if(ShouldCompact1st())
    10326  {
    10327  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
    10328  size_t srcIndex = m_1stNullItemsBeginCount;
    10329  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
    10330  {
    10331  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
    10332  {
    10333  ++srcIndex;
    10334  }
    10335  if(dstIndex != srcIndex)
    10336  {
    10337  suballocations1st[dstIndex] = suballocations1st[srcIndex];
    10338  }
    10339  ++srcIndex;
    10340  }
    10341  suballocations1st.resize(nonNullItemCount);
    10342  m_1stNullItemsBeginCount = 0;
    10343  m_1stNullItemsMiddleCount = 0;
    10344  }
    10345 
    10346  // 2nd vector became empty.
    10347  if(suballocations2nd.empty())
    10348  {
    10349  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    10350  }
    10351 
    10352  // 1st vector became empty.
    10353  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
    10354  {
    10355  suballocations1st.clear();
    10356  m_1stNullItemsBeginCount = 0;
    10357 
    10358  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    10359  {
    10360  // Swap 1st with 2nd. Now 2nd is empty.
    10361  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    10362  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
    10363  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
    10364  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    10365  {
    10366  ++m_1stNullItemsBeginCount;
    10367  --m_1stNullItemsMiddleCount;
    10368  }
    10369  m_2ndNullItemsCount = 0;
    10370  m_1stVectorIndex ^= 1;
    10371  }
    10372  }
    10373  }
    10374 
    10375  VMA_HEAVY_ASSERT(Validate());
    10376 }
    10377 
    10378 
    10380 // class VmaBlockMetadata_Buddy
    10381 
    10382 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
    10383  VmaBlockMetadata(hAllocator),
    10384  m_Root(VMA_NULL),
    10385  m_AllocationCount(0),
    10386  m_FreeCount(1),
    10387  m_SumFreeSize(0)
    10388 {
    10389  memset(m_FreeList, 0, sizeof(m_FreeList));
    10390 }
    10391 
    10392 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
    10393 {
    10394  DeleteNode(m_Root);
    10395 }
    10396 
    10397 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
    10398 {
    10399  VmaBlockMetadata::Init(size);
    10400 
    10401  m_UsableSize = VmaPrevPow2(size);
    10402  m_SumFreeSize = m_UsableSize;
    10403 
    10404  // Calculate m_LevelCount.
    10405  m_LevelCount = 1;
    10406  while(m_LevelCount < MAX_LEVELS &&
    10407  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
    10408  {
    10409  ++m_LevelCount;
    10410  }
    10411 
    10412  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
    10413  rootNode->offset = 0;
    10414  rootNode->type = Node::TYPE_FREE;
    10415  rootNode->parent = VMA_NULL;
    10416  rootNode->buddy = VMA_NULL;
    10417 
    10418  m_Root = rootNode;
    10419  AddToFreeListFront(0, rootNode);
    10420 }
    10421 
    10422 bool VmaBlockMetadata_Buddy::Validate() const
    10423 {
    10424  // Validate tree.
    10425  ValidationContext ctx;
    10426  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
    10427  {
    10428  VMA_VALIDATE(false && "ValidateNode failed.");
    10429  }
    10430  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
    10431  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
    10432 
    10433  // Validate free node lists.
    10434  for(uint32_t level = 0; level < m_LevelCount; ++level)
    10435  {
    10436  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
    10437  m_FreeList[level].front->free.prev == VMA_NULL);
    10438 
    10439  for(Node* node = m_FreeList[level].front;
    10440  node != VMA_NULL;
    10441  node = node->free.next)
    10442  {
    10443  VMA_VALIDATE(node->type == Node::TYPE_FREE);
    10444 
    10445  if(node->free.next == VMA_NULL)
    10446  {
    10447  VMA_VALIDATE(m_FreeList[level].back == node);
    10448  }
    10449  else
    10450  {
    10451  VMA_VALIDATE(node->free.next->free.prev == node);
    10452  }
    10453  }
    10454  }
    10455 
    10456  // Validate that free lists ar higher levels are empty.
    10457  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
    10458  {
    10459  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
    10460  }
    10461 
    10462  return true;
    10463 }
    10464 
    10465 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
    10466 {
    10467  for(uint32_t level = 0; level < m_LevelCount; ++level)
    10468  {
    10469  if(m_FreeList[level].front != VMA_NULL)
    10470  {
    10471  return LevelToNodeSize(level);
    10472  }
    10473  }
    10474  return 0;
    10475 }
    10476 
    10477 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    10478 {
    10479  const VkDeviceSize unusableSize = GetUnusableSize();
    10480 
    10481  outInfo.blockCount = 1;
    10482 
    10483  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
    10484  outInfo.usedBytes = outInfo.unusedBytes = 0;
    10485 
    10486  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
    10487  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
    10488  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
    10489 
    10490  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
    10491 
    10492  if(unusableSize > 0)
    10493  {
    10494  ++outInfo.unusedRangeCount;
    10495  outInfo.unusedBytes += unusableSize;
    10496  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
    10497  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
    10498  }
    10499 }
    10500 
    10501 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
    10502 {
    10503  const VkDeviceSize unusableSize = GetUnusableSize();
    10504 
    10505  inoutStats.size += GetSize();
    10506  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
    10507  inoutStats.allocationCount += m_AllocationCount;
    10508  inoutStats.unusedRangeCount += m_FreeCount;
    10509  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    10510 
    10511  if(unusableSize > 0)
    10512  {
    10513  ++inoutStats.unusedRangeCount;
    10514  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
    10515  }
    10516 }
    10517 
    10518 #if VMA_STATS_STRING_ENABLED
    10519 
    10520 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
    10521 {
    10522  // TODO optimize
    10523  VmaStatInfo stat;
    10524  CalcAllocationStatInfo(stat);
    10525 
    10526  PrintDetailedMap_Begin(
    10527  json,
    10528  stat.unusedBytes,
    10529  stat.allocationCount,
    10530  stat.unusedRangeCount);
    10531 
    10532  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
    10533 
    10534  const VkDeviceSize unusableSize = GetUnusableSize();
    10535  if(unusableSize > 0)
    10536  {
    10537  PrintDetailedMap_UnusedRange(json,
    10538  m_UsableSize, // offset
    10539  unusableSize); // size
    10540  }
    10541 
    10542  PrintDetailedMap_End(json);
    10543 }
    10544 
    10545 #endif // #if VMA_STATS_STRING_ENABLED
    10546 
    10547 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
    10548  uint32_t currentFrameIndex,
    10549  uint32_t frameInUseCount,
    10550  VkDeviceSize bufferImageGranularity,
    10551  VkDeviceSize allocSize,
    10552  VkDeviceSize allocAlignment,
    10553  bool upperAddress,
    10554  VmaSuballocationType allocType,
    10555  bool canMakeOtherLost,
    10556  uint32_t strategy,
    10557  VmaAllocationRequest* pAllocationRequest)
    10558 {
    10559  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
    10560 
    10561  // Simple way to respect bufferImageGranularity. May be optimized some day.
    10562  // Whenever it might be an OPTIMAL image...
    10563  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
    10564  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    10565  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
    10566  {
    10567  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
    10568  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
    10569  }
    10570 
    10571  if(allocSize > m_UsableSize)
    10572  {
    10573  return false;
    10574  }
    10575 
    10576  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    10577  for(uint32_t level = targetLevel + 1; level--; )
    10578  {
    10579  for(Node* freeNode = m_FreeList[level].front;
    10580  freeNode != VMA_NULL;
    10581  freeNode = freeNode->free.next)
    10582  {
    10583  if(freeNode->offset % allocAlignment == 0)
    10584  {
    10585  pAllocationRequest->offset = freeNode->offset;
    10586  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
    10587  pAllocationRequest->sumItemSize = 0;
    10588  pAllocationRequest->itemsToMakeLostCount = 0;
    10589  pAllocationRequest->customData = (void*)(uintptr_t)level;
    10590  return true;
    10591  }
    10592  }
    10593  }
    10594 
    10595  return false;
    10596 }
    10597 
    10598 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
    10599  uint32_t currentFrameIndex,
    10600  uint32_t frameInUseCount,
    10601  VmaAllocationRequest* pAllocationRequest)
    10602 {
    10603  /*
    10604  Lost allocations are not supported in buddy allocator at the moment.
    10605  Support might be added in the future.
    10606  */
    10607  return pAllocationRequest->itemsToMakeLostCount == 0;
    10608 }
    10609 
    10610 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    10611 {
    10612  /*
    10613  Lost allocations are not supported in buddy allocator at the moment.
    10614  Support might be added in the future.
    10615  */
    10616  return 0;
    10617 }
    10618 
    10619 void VmaBlockMetadata_Buddy::Alloc(
    10620  const VmaAllocationRequest& request,
    10621  VmaSuballocationType type,
    10622  VkDeviceSize allocSize,
    10623  bool upperAddress,
    10624  VmaAllocation hAllocation)
    10625 {
    10626  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    10627  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
    10628 
    10629  Node* currNode = m_FreeList[currLevel].front;
    10630  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    10631  while(currNode->offset != request.offset)
    10632  {
    10633  currNode = currNode->free.next;
    10634  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    10635  }
    10636 
    10637  // Go down, splitting free nodes.
    10638  while(currLevel < targetLevel)
    10639  {
    10640  // currNode is already first free node at currLevel.
    10641  // Remove it from list of free nodes at this currLevel.
    10642  RemoveFromFreeList(currLevel, currNode);
    10643 
    10644  const uint32_t childrenLevel = currLevel + 1;
    10645 
    10646  // Create two free sub-nodes.
    10647  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
    10648  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
    10649 
    10650  leftChild->offset = currNode->offset;
    10651  leftChild->type = Node::TYPE_FREE;
    10652  leftChild->parent = currNode;
    10653  leftChild->buddy = rightChild;
    10654 
    10655  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
    10656  rightChild->type = Node::TYPE_FREE;
    10657  rightChild->parent = currNode;
    10658  rightChild->buddy = leftChild;
    10659 
    10660  // Convert current currNode to split type.
    10661  currNode->type = Node::TYPE_SPLIT;
    10662  currNode->split.leftChild = leftChild;
    10663 
    10664  // Add child nodes to free list. Order is important!
    10665  AddToFreeListFront(childrenLevel, rightChild);
    10666  AddToFreeListFront(childrenLevel, leftChild);
    10667 
    10668  ++m_FreeCount;
    10669  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
    10670  ++currLevel;
    10671  currNode = m_FreeList[currLevel].front;
    10672 
    10673  /*
    10674  We can be sure that currNode, as left child of node previously split,
    10675  also fullfills the alignment requirement.
    10676  */
    10677  }
    10678 
    10679  // Remove from free list.
    10680  VMA_ASSERT(currLevel == targetLevel &&
    10681  currNode != VMA_NULL &&
    10682  currNode->type == Node::TYPE_FREE);
    10683  RemoveFromFreeList(currLevel, currNode);
    10684 
    10685  // Convert to allocation node.
    10686  currNode->type = Node::TYPE_ALLOCATION;
    10687  currNode->allocation.alloc = hAllocation;
    10688 
    10689  ++m_AllocationCount;
    10690  --m_FreeCount;
    10691  m_SumFreeSize -= allocSize;
    10692 }
    10693 
    10694 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
    10695 {
    10696  if(node->type == Node::TYPE_SPLIT)
    10697  {
    10698  DeleteNode(node->split.leftChild->buddy);
    10699  DeleteNode(node->split.leftChild);
    10700  }
    10701 
    10702  vma_delete(GetAllocationCallbacks(), node);
    10703 }
    10704 
    10705 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
    10706 {
    10707  VMA_VALIDATE(level < m_LevelCount);
    10708  VMA_VALIDATE(curr->parent == parent);
    10709  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
    10710  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
    10711  switch(curr->type)
    10712  {
    10713  case Node::TYPE_FREE:
    10714  // curr->free.prev, next are validated separately.
    10715  ctx.calculatedSumFreeSize += levelNodeSize;
    10716  ++ctx.calculatedFreeCount;
    10717  break;
    10718  case Node::TYPE_ALLOCATION:
    10719  ++ctx.calculatedAllocationCount;
    10720  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
    10721  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
    10722  break;
    10723  case Node::TYPE_SPLIT:
    10724  {
    10725  const uint32_t childrenLevel = level + 1;
    10726  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
    10727  const Node* const leftChild = curr->split.leftChild;
    10728  VMA_VALIDATE(leftChild != VMA_NULL);
    10729  VMA_VALIDATE(leftChild->offset == curr->offset);
    10730  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
    10731  {
    10732  VMA_VALIDATE(false && "ValidateNode for left child failed.");
    10733  }
    10734  const Node* const rightChild = leftChild->buddy;
    10735  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
    10736  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
    10737  {
    10738  VMA_VALIDATE(false && "ValidateNode for right child failed.");
    10739  }
    10740  }
    10741  break;
    10742  default:
    10743  return false;
    10744  }
    10745 
    10746  return true;
    10747 }
    10748 
    10749 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
    10750 {
    10751  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
    10752  uint32_t level = 0;
    10753  VkDeviceSize currLevelNodeSize = m_UsableSize;
    10754  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
    10755  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
    10756  {
    10757  ++level;
    10758  currLevelNodeSize = nextLevelNodeSize;
    10759  nextLevelNodeSize = currLevelNodeSize >> 1;
    10760  }
    10761  return level;
    10762 }
    10763 
    10764 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
    10765 {
    10766  // Find node and level.
    10767  Node* node = m_Root;
    10768  VkDeviceSize nodeOffset = 0;
    10769  uint32_t level = 0;
    10770  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
    10771  while(node->type == Node::TYPE_SPLIT)
    10772  {
    10773  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
    10774  if(offset < nodeOffset + nextLevelSize)
    10775  {
    10776  node = node->split.leftChild;
    10777  }
    10778  else
    10779  {
    10780  node = node->split.leftChild->buddy;
    10781  nodeOffset += nextLevelSize;
    10782  }
    10783  ++level;
    10784  levelNodeSize = nextLevelSize;
    10785  }
    10786 
    10787  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
    10788  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
    10789 
    10790  ++m_FreeCount;
    10791  --m_AllocationCount;
    10792  m_SumFreeSize += alloc->GetSize();
    10793 
    10794  node->type = Node::TYPE_FREE;
    10795 
    10796  // Join free nodes if possible.
    10797  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
    10798  {
    10799  RemoveFromFreeList(level, node->buddy);
    10800  Node* const parent = node->parent;
    10801 
    10802  vma_delete(GetAllocationCallbacks(), node->buddy);
    10803  vma_delete(GetAllocationCallbacks(), node);
    10804  parent->type = Node::TYPE_FREE;
    10805 
    10806  node = parent;
    10807  --level;
    10808  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
    10809  --m_FreeCount;
    10810  }
    10811 
    10812  AddToFreeListFront(level, node);
    10813 }
    10814 
    10815 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
    10816 {
    10817  switch(node->type)
    10818  {
    10819  case Node::TYPE_FREE:
    10820  ++outInfo.unusedRangeCount;
    10821  outInfo.unusedBytes += levelNodeSize;
    10822  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
    10823  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
    10824  break;
    10825  case Node::TYPE_ALLOCATION:
    10826  {
    10827  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    10828  ++outInfo.allocationCount;
    10829  outInfo.usedBytes += allocSize;
    10830  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
    10831  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
    10832 
    10833  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
    10834  if(unusedRangeSize > 0)
    10835  {
    10836  ++outInfo.unusedRangeCount;
    10837  outInfo.unusedBytes += unusedRangeSize;
    10838  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
    10839  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
    10840  }
    10841  }
    10842  break;
    10843  case Node::TYPE_SPLIT:
    10844  {
    10845  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    10846  const Node* const leftChild = node->split.leftChild;
    10847  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
    10848  const Node* const rightChild = leftChild->buddy;
    10849  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
    10850  }
    10851  break;
    10852  default:
    10853  VMA_ASSERT(0);
    10854  }
    10855 }
    10856 
    10857 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
    10858 {
    10859  VMA_ASSERT(node->type == Node::TYPE_FREE);
    10860 
    10861  // List is empty.
    10862  Node* const frontNode = m_FreeList[level].front;
    10863  if(frontNode == VMA_NULL)
    10864  {
    10865  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
    10866  node->free.prev = node->free.next = VMA_NULL;
    10867  m_FreeList[level].front = m_FreeList[level].back = node;
    10868  }
    10869  else
    10870  {
    10871  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
    10872  node->free.prev = VMA_NULL;
    10873  node->free.next = frontNode;
    10874  frontNode->free.prev = node;
    10875  m_FreeList[level].front = node;
    10876  }
    10877 }
    10878 
    10879 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
    10880 {
    10881  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
    10882 
    10883  // It is at the front.
    10884  if(node->free.prev == VMA_NULL)
    10885  {
    10886  VMA_ASSERT(m_FreeList[level].front == node);
    10887  m_FreeList[level].front = node->free.next;
    10888  }
    10889  else
    10890  {
    10891  Node* const prevFreeNode = node->free.prev;
    10892  VMA_ASSERT(prevFreeNode->free.next == node);
    10893  prevFreeNode->free.next = node->free.next;
    10894  }
    10895 
    10896  // It is at the back.
    10897  if(node->free.next == VMA_NULL)
    10898  {
    10899  VMA_ASSERT(m_FreeList[level].back == node);
    10900  m_FreeList[level].back = node->free.prev;
    10901  }
    10902  else
    10903  {
    10904  Node* const nextFreeNode = node->free.next;
    10905  VMA_ASSERT(nextFreeNode->free.prev == node);
    10906  nextFreeNode->free.prev = node->free.prev;
    10907  }
    10908 }
    10909 
    10910 #if VMA_STATS_STRING_ENABLED
    10911 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
    10912 {
    10913  switch(node->type)
    10914  {
    10915  case Node::TYPE_FREE:
    10916  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
    10917  break;
    10918  case Node::TYPE_ALLOCATION:
    10919  {
    10920  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
    10921  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    10922  if(allocSize < levelNodeSize)
    10923  {
    10924  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
    10925  }
    10926  }
    10927  break;
    10928  case Node::TYPE_SPLIT:
    10929  {
    10930  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    10931  const Node* const leftChild = node->split.leftChild;
    10932  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
    10933  const Node* const rightChild = leftChild->buddy;
    10934  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
    10935  }
    10936  break;
    10937  default:
    10938  VMA_ASSERT(0);
    10939  }
    10940 }
    10941 #endif // #if VMA_STATS_STRING_ENABLED
    10942 
    10943 
    10945 // class VmaDeviceMemoryBlock
    10946 
    10947 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
    10948  m_pMetadata(VMA_NULL),
    10949  m_MemoryTypeIndex(UINT32_MAX),
    10950  m_Id(0),
    10951  m_hMemory(VK_NULL_HANDLE),
    10952  m_MapCount(0),
    10953  m_pMappedData(VMA_NULL)
    10954 {
    10955 }
    10956 
    10957 void VmaDeviceMemoryBlock::Init(
    10958  VmaAllocator hAllocator,
    10959  uint32_t newMemoryTypeIndex,
    10960  VkDeviceMemory newMemory,
    10961  VkDeviceSize newSize,
    10962  uint32_t id,
    10963  uint32_t algorithm)
    10964 {
    10965  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    10966 
    10967  m_MemoryTypeIndex = newMemoryTypeIndex;
    10968  m_Id = id;
    10969  m_hMemory = newMemory;
    10970 
    10971  switch(algorithm)
    10972  {
    10974  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
    10975  break;
    10977  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
    10978  break;
    10979  default:
    10980  VMA_ASSERT(0);
    10981  // Fall-through.
    10982  case 0:
    10983  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
    10984  }
    10985  m_pMetadata->Init(newSize);
    10986 }
    10987 
    10988 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
    10989 {
    10990  // This is the most important assert in the entire library.
    10991  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
    10992  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
    10993 
    10994  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
    10995  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
    10996  m_hMemory = VK_NULL_HANDLE;
    10997 
    10998  vma_delete(allocator, m_pMetadata);
    10999  m_pMetadata = VMA_NULL;
    11000 }
    11001 
    11002 bool VmaDeviceMemoryBlock::Validate() const
    11003 {
    11004  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
    11005  (m_pMetadata->GetSize() != 0));
    11006 
    11007  return m_pMetadata->Validate();
    11008 }
    11009 
    11010 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
    11011 {
    11012  void* pData = nullptr;
    11013  VkResult res = Map(hAllocator, 1, &pData);
    11014  if(res != VK_SUCCESS)
    11015  {
    11016  return res;
    11017  }
    11018 
    11019  res = m_pMetadata->CheckCorruption(pData);
    11020 
    11021  Unmap(hAllocator, 1);
    11022 
    11023  return res;
    11024 }
    11025 
    11026 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
    11027 {
    11028  if(count == 0)
    11029  {
    11030  return VK_SUCCESS;
    11031  }
    11032 
    11033  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    11034  if(m_MapCount != 0)
    11035  {
    11036  m_MapCount += count;
    11037  VMA_ASSERT(m_pMappedData != VMA_NULL);
    11038  if(ppData != VMA_NULL)
    11039  {
    11040  *ppData = m_pMappedData;
    11041  }
    11042  return VK_SUCCESS;
    11043  }
    11044  else
    11045  {
    11046  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    11047  hAllocator->m_hDevice,
    11048  m_hMemory,
    11049  0, // offset
    11050  VK_WHOLE_SIZE,
    11051  0, // flags
    11052  &m_pMappedData);
    11053  if(result == VK_SUCCESS)
    11054  {
    11055  if(ppData != VMA_NULL)
    11056  {
    11057  *ppData = m_pMappedData;
    11058  }
    11059  m_MapCount = count;
    11060  }
    11061  return result;
    11062  }
    11063 }
    11064 
    11065 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
    11066 {
    11067  if(count == 0)
    11068  {
    11069  return;
    11070  }
    11071 
    11072  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    11073  if(m_MapCount >= count)
    11074  {
    11075  m_MapCount -= count;
    11076  if(m_MapCount == 0)
    11077  {
    11078  m_pMappedData = VMA_NULL;
    11079  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
    11080  }
    11081  }
    11082  else
    11083  {
    11084  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
    11085  }
    11086 }
    11087 
    11088 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    11089 {
    11090  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    11091  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    11092 
    11093  void* pData;
    11094  VkResult res = Map(hAllocator, 1, &pData);
    11095  if(res != VK_SUCCESS)
    11096  {
    11097  return res;
    11098  }
    11099 
    11100  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
    11101  VmaWriteMagicValue(pData, allocOffset + allocSize);
    11102 
    11103  Unmap(hAllocator, 1);
    11104 
    11105  return VK_SUCCESS;
    11106 }
    11107 
    11108 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    11109 {
    11110  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    11111  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    11112 
    11113  void* pData;
    11114  VkResult res = Map(hAllocator, 1, &pData);
    11115  if(res != VK_SUCCESS)
    11116  {
    11117  return res;
    11118  }
    11119 
    11120  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
    11121  {
    11122  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
    11123  }
    11124  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
    11125  {
    11126  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
    11127  }
    11128 
    11129  Unmap(hAllocator, 1);
    11130 
    11131  return VK_SUCCESS;
    11132 }
    11133 
    11134 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
    11135  const VmaAllocator hAllocator,
    11136  const VmaAllocation hAllocation,
    11137  VkBuffer hBuffer)
    11138 {
    11139  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    11140  hAllocation->GetBlock() == this);
    11141  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    11142  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    11143  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
    11144  hAllocator->m_hDevice,
    11145  hBuffer,
    11146  m_hMemory,
    11147  hAllocation->GetOffset());
    11148 }
    11149 
    11150 VkResult VmaDeviceMemoryBlock::BindImageMemory(
    11151  const VmaAllocator hAllocator,
    11152  const VmaAllocation hAllocation,
    11153  VkImage hImage)
    11154 {
    11155  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    11156  hAllocation->GetBlock() == this);
    11157  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    11158  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    11159  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
    11160  hAllocator->m_hDevice,
    11161  hImage,
    11162  m_hMemory,
    11163  hAllocation->GetOffset());
    11164 }
    11165 
    11166 static void InitStatInfo(VmaStatInfo& outInfo)
    11167 {
    11168  memset(&outInfo, 0, sizeof(outInfo));
    11169  outInfo.allocationSizeMin = UINT64_MAX;
    11170  outInfo.unusedRangeSizeMin = UINT64_MAX;
    11171 }
    11172 
    11173 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
    11174 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
    11175 {
    11176  inoutInfo.blockCount += srcInfo.blockCount;
    11177  inoutInfo.allocationCount += srcInfo.allocationCount;
    11178  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
    11179  inoutInfo.usedBytes += srcInfo.usedBytes;
    11180  inoutInfo.unusedBytes += srcInfo.unusedBytes;
    11181  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
    11182  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
    11183  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
    11184  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
    11185 }
    11186 
    11187 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
    11188 {
    11189  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
    11190  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
    11191  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
    11192  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
    11193 }
    11194 
    11195 VmaPool_T::VmaPool_T(
    11196  VmaAllocator hAllocator,
    11197  const VmaPoolCreateInfo& createInfo,
    11198  VkDeviceSize preferredBlockSize) :
    11199  m_BlockVector(
    11200  hAllocator,
    11201  createInfo.memoryTypeIndex,
    11202  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
    11203  createInfo.minBlockCount,
    11204  createInfo.maxBlockCount,
    11205  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
    11206  createInfo.frameInUseCount,
    11207  true, // isCustomPool
    11208  createInfo.blockSize != 0, // explicitBlockSize
    11209  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
    11210  m_Id(0)
    11211 {
    11212 }
    11213 
    11214 VmaPool_T::~VmaPool_T()
    11215 {
    11216 }
    11217 
    11218 #if VMA_STATS_STRING_ENABLED
    11219 
    11220 #endif // #if VMA_STATS_STRING_ENABLED
    11221 
    11222 VmaBlockVector::VmaBlockVector(
    11223  VmaAllocator hAllocator,
    11224  uint32_t memoryTypeIndex,
    11225  VkDeviceSize preferredBlockSize,
    11226  size_t minBlockCount,
    11227  size_t maxBlockCount,
    11228  VkDeviceSize bufferImageGranularity,
    11229  uint32_t frameInUseCount,
    11230  bool isCustomPool,
    11231  bool explicitBlockSize,
    11232  uint32_t algorithm) :
    11233  m_hAllocator(hAllocator),
    11234  m_MemoryTypeIndex(memoryTypeIndex),
    11235  m_PreferredBlockSize(preferredBlockSize),
    11236  m_MinBlockCount(minBlockCount),
    11237  m_MaxBlockCount(maxBlockCount),
    11238  m_BufferImageGranularity(bufferImageGranularity),
    11239  m_FrameInUseCount(frameInUseCount),
    11240  m_IsCustomPool(isCustomPool),
    11241  m_ExplicitBlockSize(explicitBlockSize),
    11242  m_Algorithm(algorithm),
    11243  m_HasEmptyBlock(false),
    11244  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
    11245  m_NextBlockId(0)
    11246 {
    11247 }
    11248 
    11249 VmaBlockVector::~VmaBlockVector()
    11250 {
    11251  for(size_t i = m_Blocks.size(); i--; )
    11252  {
    11253  m_Blocks[i]->Destroy(m_hAllocator);
    11254  vma_delete(m_hAllocator, m_Blocks[i]);
    11255  }
    11256 }
    11257 
    11258 VkResult VmaBlockVector::CreateMinBlocks()
    11259 {
    11260  for(size_t i = 0; i < m_MinBlockCount; ++i)
    11261  {
    11262  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
    11263  if(res != VK_SUCCESS)
    11264  {
    11265  return res;
    11266  }
    11267  }
    11268  return VK_SUCCESS;
    11269 }
    11270 
    11271 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
    11272 {
    11273  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
    11274 
    11275  const size_t blockCount = m_Blocks.size();
    11276 
    11277  pStats->size = 0;
    11278  pStats->unusedSize = 0;
    11279  pStats->allocationCount = 0;
    11280  pStats->unusedRangeCount = 0;
    11281  pStats->unusedRangeSizeMax = 0;
    11282  pStats->blockCount = blockCount;
    11283 
    11284  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11285  {
    11286  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11287  VMA_ASSERT(pBlock);
    11288  VMA_HEAVY_ASSERT(pBlock->Validate());
    11289  pBlock->m_pMetadata->AddPoolStats(*pStats);
    11290  }
    11291 }
    11292 
    11293 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
    11294 {
    11295  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    11296  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
    11297  (VMA_DEBUG_MARGIN > 0) &&
    11298  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
    11299 }
    11300 
    11301 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
    11302 
    11303 VkResult VmaBlockVector::Allocate(
    11304  VmaPool hCurrentPool,
    11305  uint32_t currentFrameIndex,
    11306  VkDeviceSize size,
    11307  VkDeviceSize alignment,
    11308  const VmaAllocationCreateInfo& createInfo,
    11309  VmaSuballocationType suballocType,
    11310  size_t allocationCount,
    11311  VmaAllocation* pAllocations)
    11312 {
    11313  size_t allocIndex;
    11314  VkResult res = VK_SUCCESS;
    11315 
    11316  {
    11317  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
    11318  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    11319  {
    11320  res = AllocatePage(
    11321  hCurrentPool,
    11322  currentFrameIndex,
    11323  size,
    11324  alignment,
    11325  createInfo,
    11326  suballocType,
    11327  pAllocations + allocIndex);
    11328  if(res != VK_SUCCESS)
    11329  {
    11330  break;
    11331  }
    11332  }
    11333  }
    11334 
    11335  if(res != VK_SUCCESS)
    11336  {
    11337  // Free all already created allocations.
    11338  while(allocIndex--)
    11339  {
    11340  Free(pAllocations[allocIndex]);
    11341  }
    11342  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
    11343  }
    11344 
    11345  return res;
    11346 }
    11347 
    11348 VkResult VmaBlockVector::AllocatePage(
    11349  VmaPool hCurrentPool,
    11350  uint32_t currentFrameIndex,
    11351  VkDeviceSize size,
    11352  VkDeviceSize alignment,
    11353  const VmaAllocationCreateInfo& createInfo,
    11354  VmaSuballocationType suballocType,
    11355  VmaAllocation* pAllocation)
    11356 {
    11357  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    11358  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
    11359  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    11360  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    11361  const bool canCreateNewBlock =
    11362  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
    11363  (m_Blocks.size() < m_MaxBlockCount);
    11364  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
    11365 
    11366  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
    11367  // Which in turn is available only when maxBlockCount = 1.
    11368  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
    11369  {
    11370  canMakeOtherLost = false;
    11371  }
    11372 
    11373  // Upper address can only be used with linear allocator and within single memory block.
    11374  if(isUpperAddress &&
    11375  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
    11376  {
    11377  return VK_ERROR_FEATURE_NOT_PRESENT;
    11378  }
    11379 
    11380  // Validate strategy.
    11381  switch(strategy)
    11382  {
    11383  case 0:
    11385  break;
    11389  break;
    11390  default:
    11391  return VK_ERROR_FEATURE_NOT_PRESENT;
    11392  }
    11393 
    11394  // Early reject: requested allocation size is larger that maximum block size for this block vector.
    11395  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
    11396  {
    11397  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    11398  }
    11399 
    11400  /*
    11401  Under certain condition, this whole section can be skipped for optimization, so
    11402  we move on directly to trying to allocate with canMakeOtherLost. That's the case
    11403  e.g. for custom pools with linear algorithm.
    11404  */
    11405  if(!canMakeOtherLost || canCreateNewBlock)
    11406  {
    11407  // 1. Search existing allocations. Try to allocate without making other allocations lost.
    11408  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
    11410 
    11411  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    11412  {
    11413  // Use only last block.
    11414  if(!m_Blocks.empty())
    11415  {
    11416  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
    11417  VMA_ASSERT(pCurrBlock);
    11418  VkResult res = AllocateFromBlock(
    11419  pCurrBlock,
    11420  hCurrentPool,
    11421  currentFrameIndex,
    11422  size,
    11423  alignment,
    11424  allocFlagsCopy,
    11425  createInfo.pUserData,
    11426  suballocType,
    11427  strategy,
    11428  pAllocation);
    11429  if(res == VK_SUCCESS)
    11430  {
    11431  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
    11432  return VK_SUCCESS;
    11433  }
    11434  }
    11435  }
    11436  else
    11437  {
    11439  {
    11440  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    11441  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    11442  {
    11443  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    11444  VMA_ASSERT(pCurrBlock);
    11445  VkResult res = AllocateFromBlock(
    11446  pCurrBlock,
    11447  hCurrentPool,
    11448  currentFrameIndex,
    11449  size,
    11450  alignment,
    11451  allocFlagsCopy,
    11452  createInfo.pUserData,
    11453  suballocType,
    11454  strategy,
    11455  pAllocation);
    11456  if(res == VK_SUCCESS)
    11457  {
    11458  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    11459  return VK_SUCCESS;
    11460  }
    11461  }
    11462  }
    11463  else // WORST_FIT, FIRST_FIT
    11464  {
    11465  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    11466  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    11467  {
    11468  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    11469  VMA_ASSERT(pCurrBlock);
    11470  VkResult res = AllocateFromBlock(
    11471  pCurrBlock,
    11472  hCurrentPool,
    11473  currentFrameIndex,
    11474  size,
    11475  alignment,
    11476  allocFlagsCopy,
    11477  createInfo.pUserData,
    11478  suballocType,
    11479  strategy,
    11480  pAllocation);
    11481  if(res == VK_SUCCESS)
    11482  {
    11483  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    11484  return VK_SUCCESS;
    11485  }
    11486  }
    11487  }
    11488  }
    11489 
    11490  // 2. Try to create new block.
    11491  if(canCreateNewBlock)
    11492  {
    11493  // Calculate optimal size for new block.
    11494  VkDeviceSize newBlockSize = m_PreferredBlockSize;
    11495  uint32_t newBlockSizeShift = 0;
    11496  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
    11497 
    11498  if(!m_ExplicitBlockSize)
    11499  {
    11500  // Allocate 1/8, 1/4, 1/2 as first blocks.
    11501  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
    11502  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
    11503  {
    11504  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    11505  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
    11506  {
    11507  newBlockSize = smallerNewBlockSize;
    11508  ++newBlockSizeShift;
    11509  }
    11510  else
    11511  {
    11512  break;
    11513  }
    11514  }
    11515  }
    11516 
    11517  size_t newBlockIndex = 0;
    11518  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
    11519  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
    11520  if(!m_ExplicitBlockSize)
    11521  {
    11522  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
    11523  {
    11524  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    11525  if(smallerNewBlockSize >= size)
    11526  {
    11527  newBlockSize = smallerNewBlockSize;
    11528  ++newBlockSizeShift;
    11529  res = CreateBlock(newBlockSize, &newBlockIndex);
    11530  }
    11531  else
    11532  {
    11533  break;
    11534  }
    11535  }
    11536  }
    11537 
    11538  if(res == VK_SUCCESS)
    11539  {
    11540  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
    11541  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
    11542 
    11543  res = AllocateFromBlock(
    11544  pBlock,
    11545  hCurrentPool,
    11546  currentFrameIndex,
    11547  size,
    11548  alignment,
    11549  allocFlagsCopy,
    11550  createInfo.pUserData,
    11551  suballocType,
    11552  strategy,
    11553  pAllocation);
    11554  if(res == VK_SUCCESS)
    11555  {
    11556  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
    11557  return VK_SUCCESS;
    11558  }
    11559  else
    11560  {
    11561  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
    11562  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    11563  }
    11564  }
    11565  }
    11566  }
    11567 
    11568  // 3. Try to allocate from existing blocks with making other allocations lost.
    11569  if(canMakeOtherLost)
    11570  {
    11571  uint32_t tryIndex = 0;
    11572  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
    11573  {
    11574  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
    11575  VmaAllocationRequest bestRequest = {};
    11576  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
    11577 
    11578  // 1. Search existing allocations.
    11580  {
    11581  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    11582  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    11583  {
    11584  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    11585  VMA_ASSERT(pCurrBlock);
    11586  VmaAllocationRequest currRequest = {};
    11587  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    11588  currentFrameIndex,
    11589  m_FrameInUseCount,
    11590  m_BufferImageGranularity,
    11591  size,
    11592  alignment,
    11593  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    11594  suballocType,
    11595  canMakeOtherLost,
    11596  strategy,
    11597  &currRequest))
    11598  {
    11599  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    11600  if(pBestRequestBlock == VMA_NULL ||
    11601  currRequestCost < bestRequestCost)
    11602  {
    11603  pBestRequestBlock = pCurrBlock;
    11604  bestRequest = currRequest;
    11605  bestRequestCost = currRequestCost;
    11606 
    11607  if(bestRequestCost == 0)
    11608  {
    11609  break;
    11610  }
    11611  }
    11612  }
    11613  }
    11614  }
    11615  else // WORST_FIT, FIRST_FIT
    11616  {
    11617  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    11618  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    11619  {
    11620  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    11621  VMA_ASSERT(pCurrBlock);
    11622  VmaAllocationRequest currRequest = {};
    11623  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    11624  currentFrameIndex,
    11625  m_FrameInUseCount,
    11626  m_BufferImageGranularity,
    11627  size,
    11628  alignment,
    11629  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    11630  suballocType,
    11631  canMakeOtherLost,
    11632  strategy,
    11633  &currRequest))
    11634  {
    11635  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    11636  if(pBestRequestBlock == VMA_NULL ||
    11637  currRequestCost < bestRequestCost ||
    11639  {
    11640  pBestRequestBlock = pCurrBlock;
    11641  bestRequest = currRequest;
    11642  bestRequestCost = currRequestCost;
    11643 
    11644  if(bestRequestCost == 0 ||
    11646  {
    11647  break;
    11648  }
    11649  }
    11650  }
    11651  }
    11652  }
    11653 
    11654  if(pBestRequestBlock != VMA_NULL)
    11655  {
    11656  if(mapped)
    11657  {
    11658  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
    11659  if(res != VK_SUCCESS)
    11660  {
    11661  return res;
    11662  }
    11663  }
    11664 
    11665  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
    11666  currentFrameIndex,
    11667  m_FrameInUseCount,
    11668  &bestRequest))
    11669  {
    11670  // We no longer have an empty Allocation.
    11671  if(pBestRequestBlock->m_pMetadata->IsEmpty())
    11672  {
    11673  m_HasEmptyBlock = false;
    11674  }
    11675  // Allocate from this pBlock.
    11676  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    11677  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
    11678  (*pAllocation)->InitBlockAllocation(
    11679  hCurrentPool,
    11680  pBestRequestBlock,
    11681  bestRequest.offset,
    11682  alignment,
    11683  size,
    11684  suballocType,
    11685  mapped,
    11686  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    11687  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
    11688  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    11689  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
    11690  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    11691  {
    11692  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    11693  }
    11694  if(IsCorruptionDetectionEnabled())
    11695  {
    11696  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
    11697  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    11698  }
    11699  return VK_SUCCESS;
    11700  }
    11701  // else: Some allocations must have been touched while we are here. Next try.
    11702  }
    11703  else
    11704  {
    11705  // Could not find place in any of the blocks - break outer loop.
    11706  break;
    11707  }
    11708  }
    11709  /* Maximum number of tries exceeded - a very unlike event when many other
    11710  threads are simultaneously touching allocations making it impossible to make
    11711  lost at the same time as we try to allocate. */
    11712  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
    11713  {
    11714  return VK_ERROR_TOO_MANY_OBJECTS;
    11715  }
    11716  }
    11717 
    11718  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    11719 }
    11720 
    11721 void VmaBlockVector::Free(
    11722  VmaAllocation hAllocation)
    11723 {
    11724  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
    11725 
    11726  // Scope for lock.
    11727  {
    11728  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
    11729 
    11730  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    11731 
    11732  if(IsCorruptionDetectionEnabled())
    11733  {
    11734  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
    11735  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
    11736  }
    11737 
    11738  if(hAllocation->IsPersistentMap())
    11739  {
    11740  pBlock->Unmap(m_hAllocator, 1);
    11741  }
    11742 
    11743  pBlock->m_pMetadata->Free(hAllocation);
    11744  VMA_HEAVY_ASSERT(pBlock->Validate());
    11745 
    11746  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
    11747 
    11748  // pBlock became empty after this deallocation.
    11749  if(pBlock->m_pMetadata->IsEmpty())
    11750  {
    11751  // Already has empty Allocation. We don't want to have two, so delete this one.
    11752  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
    11753  {
    11754  pBlockToDelete = pBlock;
    11755  Remove(pBlock);
    11756  }
    11757  // We now have first empty block.
    11758  else
    11759  {
    11760  m_HasEmptyBlock = true;
    11761  }
    11762  }
    11763  // pBlock didn't become empty, but we have another empty block - find and free that one.
    11764  // (This is optional, heuristics.)
    11765  else if(m_HasEmptyBlock)
    11766  {
    11767  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
    11768  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
    11769  {
    11770  pBlockToDelete = pLastBlock;
    11771  m_Blocks.pop_back();
    11772  m_HasEmptyBlock = false;
    11773  }
    11774  }
    11775 
    11776  IncrementallySortBlocks();
    11777  }
    11778 
    11779  // Destruction of a free Allocation. Deferred until this point, outside of mutex
    11780  // lock, for performance reason.
    11781  if(pBlockToDelete != VMA_NULL)
    11782  {
    11783  VMA_DEBUG_LOG(" Deleted empty allocation");
    11784  pBlockToDelete->Destroy(m_hAllocator);
    11785  vma_delete(m_hAllocator, pBlockToDelete);
    11786  }
    11787 }
    11788 
    11789 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
    11790 {
    11791  VkDeviceSize result = 0;
    11792  for(size_t i = m_Blocks.size(); i--; )
    11793  {
    11794  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
    11795  if(result >= m_PreferredBlockSize)
    11796  {
    11797  break;
    11798  }
    11799  }
    11800  return result;
    11801 }
    11802 
    11803 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
    11804 {
    11805  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11806  {
    11807  if(m_Blocks[blockIndex] == pBlock)
    11808  {
    11809  VmaVectorRemove(m_Blocks, blockIndex);
    11810  return;
    11811  }
    11812  }
    11813  VMA_ASSERT(0);
    11814 }
    11815 
    11816 void VmaBlockVector::IncrementallySortBlocks()
    11817 {
    11818  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    11819  {
    11820  // Bubble sort only until first swap.
    11821  for(size_t i = 1; i < m_Blocks.size(); ++i)
    11822  {
    11823  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
    11824  {
    11825  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
    11826  return;
    11827  }
    11828  }
    11829  }
    11830 }
    11831 
    11832 VkResult VmaBlockVector::AllocateFromBlock(
    11833  VmaDeviceMemoryBlock* pBlock,
    11834  VmaPool hCurrentPool,
    11835  uint32_t currentFrameIndex,
    11836  VkDeviceSize size,
    11837  VkDeviceSize alignment,
    11838  VmaAllocationCreateFlags allocFlags,
    11839  void* pUserData,
    11840  VmaSuballocationType suballocType,
    11841  uint32_t strategy,
    11842  VmaAllocation* pAllocation)
    11843 {
    11844  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
    11845  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    11846  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    11847  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    11848 
    11849  VmaAllocationRequest currRequest = {};
    11850  if(pBlock->m_pMetadata->CreateAllocationRequest(
    11851  currentFrameIndex,
    11852  m_FrameInUseCount,
    11853  m_BufferImageGranularity,
    11854  size,
    11855  alignment,
    11856  isUpperAddress,
    11857  suballocType,
    11858  false, // canMakeOtherLost
    11859  strategy,
    11860  &currRequest))
    11861  {
    11862  // Allocate from pCurrBlock.
    11863  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
    11864 
    11865  if(mapped)
    11866  {
    11867  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
    11868  if(res != VK_SUCCESS)
    11869  {
    11870  return res;
    11871  }
    11872  }
    11873 
    11874  // We no longer have an empty Allocation.
    11875  if(pBlock->m_pMetadata->IsEmpty())
    11876  {
    11877  m_HasEmptyBlock = false;
    11878  }
    11879 
    11880  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    11881  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
    11882  (*pAllocation)->InitBlockAllocation(
    11883  hCurrentPool,
    11884  pBlock,
    11885  currRequest.offset,
    11886  alignment,
    11887  size,
    11888  suballocType,
    11889  mapped,
    11890  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    11891  VMA_HEAVY_ASSERT(pBlock->Validate());
    11892  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
    11893  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    11894  {
    11895  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    11896  }
    11897  if(IsCorruptionDetectionEnabled())
    11898  {
    11899  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
    11900  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    11901  }
    11902  return VK_SUCCESS;
    11903  }
    11904  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    11905 }
    11906 
    11907 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
    11908 {
    11909  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    11910  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
    11911  allocInfo.allocationSize = blockSize;
    11912  VkDeviceMemory mem = VK_NULL_HANDLE;
    11913  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
    11914  if(res < 0)
    11915  {
    11916  return res;
    11917  }
    11918 
    11919  // New VkDeviceMemory successfully created.
    11920 
    11921  // Create new Allocation for it.
    11922  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
    11923  pBlock->Init(
    11924  m_hAllocator,
    11925  m_MemoryTypeIndex,
    11926  mem,
    11927  allocInfo.allocationSize,
    11928  m_NextBlockId++,
    11929  m_Algorithm);
    11930 
    11931  m_Blocks.push_back(pBlock);
    11932  if(pNewBlockIndex != VMA_NULL)
    11933  {
    11934  *pNewBlockIndex = m_Blocks.size() - 1;
    11935  }
    11936 
    11937  return VK_SUCCESS;
    11938 }
    11939 
    11940 void VmaBlockVector::ApplyDefragmentationMovesCpu(
    11941  class VmaBlockVectorDefragmentationContext* pDefragCtx,
    11942  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
    11943 {
    11944  const size_t blockCount = m_Blocks.size();
    11945  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
    11946 
    11947  enum BLOCK_FLAG
    11948  {
    11949  BLOCK_FLAG_USED = 0x00000001,
    11950  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
    11951  };
    11952 
    11953  struct BlockInfo
    11954  {
    11955  uint32_t flags;
    11956  void* pMappedData;
    11957  };
    11958  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
    11959  blockInfo(blockCount, VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
    11960  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
    11961 
    11962  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
    11963  const size_t moveCount = moves.size();
    11964  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
    11965  {
    11966  const VmaDefragmentationMove& move = moves[moveIndex];
    11967  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
    11968  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
    11969  }
    11970 
    11971  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
    11972 
    11973  // Go over all blocks. Get mapped pointer or map if necessary.
    11974  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
    11975  {
    11976  BlockInfo& currBlockInfo = blockInfo[blockIndex];
    11977  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    11978  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
    11979  {
    11980  currBlockInfo.pMappedData = pBlock->GetMappedData();
    11981  // It is not originally mapped - map it.
    11982  if(currBlockInfo.pMappedData == VMA_NULL)
    11983  {
    11984  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
    11985  if(pDefragCtx->res == VK_SUCCESS)
    11986  {
    11987  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
    11988  }
    11989  }
    11990  }
    11991  }
    11992 
    11993  // Go over all moves. Do actual data transfer.
    11994  if(pDefragCtx->res == VK_SUCCESS)
    11995  {
    11996  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    11997  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    11998 
    11999  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
    12000  {
    12001  const VmaDefragmentationMove& move = moves[moveIndex];
    12002 
    12003  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
    12004  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
    12005 
    12006  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
    12007 
    12008  // Invalidate source.
    12009  if(isNonCoherent)
    12010  {
    12011  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
    12012  memRange.memory = pSrcBlock->GetDeviceMemory();
    12013  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
    12014  memRange.size = VMA_MIN(
    12015  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
    12016  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
    12017  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
    12018  }
    12019 
    12020  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
    12021  memmove(
    12022  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
    12023  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
    12024  static_cast<size_t>(move.size));
    12025 
    12026  if(IsCorruptionDetectionEnabled())
    12027  {
    12028  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
    12029  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
    12030  }
    12031 
    12032  // Flush destination.
    12033  if(isNonCoherent)
    12034  {
    12035  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
    12036  memRange.memory = pDstBlock->GetDeviceMemory();
    12037  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
    12038  memRange.size = VMA_MIN(
    12039  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
    12040  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
    12041  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
    12042  }
    12043  }
    12044  }
    12045 
    12046  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
    12047  // Regardless of pCtx->res == VK_SUCCESS.
    12048  for(size_t blockIndex = blockCount; blockIndex--; )
    12049  {
    12050  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
    12051  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
    12052  {
    12053  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    12054  pBlock->Unmap(m_hAllocator, 1);
    12055  }
    12056  }
    12057 }
    12058 
    12059 void VmaBlockVector::ApplyDefragmentationMovesGpu(
    12060  class VmaBlockVectorDefragmentationContext* pDefragCtx,
    12061  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    12062  VkCommandBuffer commandBuffer)
    12063 {
    12064  const size_t blockCount = m_Blocks.size();
    12065 
    12066  pDefragCtx->blockContexts.resize(blockCount);
    12067  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
    12068 
    12069  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
    12070  const size_t moveCount = moves.size();
    12071  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
    12072  {
    12073  const VmaDefragmentationMove& move = moves[moveIndex];
    12074  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
    12075  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
    12076  }
    12077 
    12078  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
    12079 
    12080  // Go over all blocks. Create and bind buffer for whole block if necessary.
    12081  {
    12082  VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
    12083  bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
    12084  VK_BUFFER_USAGE_TRANSFER_DST_BIT;
    12085 
    12086  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
    12087  {
    12088  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
    12089  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    12090  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
    12091  {
    12092  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
    12093  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
    12094  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
    12095  if(pDefragCtx->res == VK_SUCCESS)
    12096  {
    12097  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
    12098  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
    12099  }
    12100  }
    12101  }
    12102  }
    12103 
    12104  // Go over all moves. Post data transfer commands to command buffer.
    12105  if(pDefragCtx->res == VK_SUCCESS)
    12106  {
    12107  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    12108  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    12109 
    12110  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
    12111  {
    12112  const VmaDefragmentationMove& move = moves[moveIndex];
    12113 
    12114  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
    12115  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
    12116 
    12117  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
    12118 
    12119  VkBufferCopy region = {
    12120  move.srcOffset,
    12121  move.dstOffset,
    12122  move.size };
    12123  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
    12124  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
    12125  }
    12126  }
    12127 
    12128  // Save buffers to defrag context for later destruction.
    12129  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
    12130  {
    12131  pDefragCtx->res = VK_NOT_READY;
    12132  }
    12133 }
    12134 
    12135 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
    12136 {
    12137  m_HasEmptyBlock = false;
    12138  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    12139  {
    12140  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    12141  if(pBlock->m_pMetadata->IsEmpty())
    12142  {
    12143  if(m_Blocks.size() > m_MinBlockCount)
    12144  {
    12145  if(pDefragmentationStats != VMA_NULL)
    12146  {
    12147  ++pDefragmentationStats->deviceMemoryBlocksFreed;
    12148  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
    12149  }
    12150 
    12151  VmaVectorRemove(m_Blocks, blockIndex);
    12152  pBlock->Destroy(m_hAllocator);
    12153  vma_delete(m_hAllocator, pBlock);
    12154  }
    12155  else
    12156  {
    12157  m_HasEmptyBlock = true;
    12158  }
    12159  }
    12160  }
    12161 }
    12162 
    12163 #if VMA_STATS_STRING_ENABLED
    12164 
    12165 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
    12166 {
    12167  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
    12168 
    12169  json.BeginObject();
    12170 
    12171  if(m_IsCustomPool)
    12172  {
    12173  json.WriteString("MemoryTypeIndex");
    12174  json.WriteNumber(m_MemoryTypeIndex);
    12175 
    12176  json.WriteString("BlockSize");
    12177  json.WriteNumber(m_PreferredBlockSize);
    12178 
    12179  json.WriteString("BlockCount");
    12180  json.BeginObject(true);
    12181  if(m_MinBlockCount > 0)
    12182  {
    12183  json.WriteString("Min");
    12184  json.WriteNumber((uint64_t)m_MinBlockCount);
    12185  }
    12186  if(m_MaxBlockCount < SIZE_MAX)
    12187  {
    12188  json.WriteString("Max");
    12189  json.WriteNumber((uint64_t)m_MaxBlockCount);
    12190  }
    12191  json.WriteString("Cur");
    12192  json.WriteNumber((uint64_t)m_Blocks.size());
    12193  json.EndObject();
    12194 
    12195  if(m_FrameInUseCount > 0)
    12196  {
    12197  json.WriteString("FrameInUseCount");
    12198  json.WriteNumber(m_FrameInUseCount);
    12199  }
    12200 
    12201  if(m_Algorithm != 0)
    12202  {
    12203  json.WriteString("Algorithm");
    12204  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
    12205  }
    12206  }
    12207  else
    12208  {
    12209  json.WriteString("PreferredBlockSize");
    12210  json.WriteNumber(m_PreferredBlockSize);
    12211  }
    12212 
    12213  json.WriteString("Blocks");
    12214  json.BeginObject();
    12215  for(size_t i = 0; i < m_Blocks.size(); ++i)
    12216  {
    12217  json.BeginString();
    12218  json.ContinueString(m_Blocks[i]->GetId());
    12219  json.EndString();
    12220 
    12221  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
    12222  }
    12223  json.EndObject();
    12224 
    12225  json.EndObject();
    12226 }
    12227 
    12228 #endif // #if VMA_STATS_STRING_ENABLED
    12229 
    12230 void VmaBlockVector::Defragment(
    12231  class VmaBlockVectorDefragmentationContext* pCtx,
    12232  VmaDefragmentationStats* pStats,
    12233  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
    12234  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
    12235  VkCommandBuffer commandBuffer)
    12236 {
    12237  pCtx->res = VK_SUCCESS;
    12238 
    12239  const VkMemoryPropertyFlags memPropFlags =
    12240  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
    12241  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
    12242  const bool isHostCoherent = (memPropFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0;
    12243 
    12244  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
    12245  isHostVisible;
    12246  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
    12247  (VMA_DEBUG_DETECT_CORRUPTION == 0 || !(isHostVisible && isHostCoherent));
    12248 
    12249  // There are options to defragment this memory type.
    12250  if(canDefragmentOnCpu || canDefragmentOnGpu)
    12251  {
    12252  bool defragmentOnGpu;
    12253  // There is only one option to defragment this memory type.
    12254  if(canDefragmentOnGpu != canDefragmentOnCpu)
    12255  {
    12256  defragmentOnGpu = canDefragmentOnGpu;
    12257  }
    12258  // Both options are available: Heuristics to choose the best one.
    12259  else
    12260  {
    12261  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
    12262  m_hAllocator->IsIntegratedGpu();
    12263  }
    12264 
    12265  bool overlappingMoveSupported = !defragmentOnGpu;
    12266 
    12267  if(m_hAllocator->m_UseMutex)
    12268  {
    12269  m_Mutex.LockWrite();
    12270  pCtx->mutexLocked = true;
    12271  }
    12272 
    12273  pCtx->Begin(overlappingMoveSupported);
    12274 
    12275  // Defragment.
    12276 
    12277  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
    12278  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
    12279  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
    12280  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
    12281  pCtx->res = pCtx->GetAlgorithm()->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
    12282 
    12283  // Accumulate statistics.
    12284  if(pStats != VMA_NULL)
    12285  {
    12286  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
    12287  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
    12288  pStats->bytesMoved += bytesMoved;
    12289  pStats->allocationsMoved += allocationsMoved;
    12290  VMA_ASSERT(bytesMoved <= maxBytesToMove);
    12291  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
    12292  if(defragmentOnGpu)
    12293  {
    12294  maxGpuBytesToMove -= bytesMoved;
    12295  maxGpuAllocationsToMove -= allocationsMoved;
    12296  }
    12297  else
    12298  {
    12299  maxCpuBytesToMove -= bytesMoved;
    12300  maxCpuAllocationsToMove -= allocationsMoved;
    12301  }
    12302  }
    12303 
    12304  if(pCtx->res >= VK_SUCCESS)
    12305  {
    12306  if(defragmentOnGpu)
    12307  {
    12308  ApplyDefragmentationMovesGpu(pCtx, moves, commandBuffer);
    12309  }
    12310  else
    12311  {
    12312  ApplyDefragmentationMovesCpu(pCtx, moves);
    12313  }
    12314  }
    12315  }
    12316 }
    12317 
    12318 void VmaBlockVector::DefragmentationEnd(
    12319  class VmaBlockVectorDefragmentationContext* pCtx,
    12320  VmaDefragmentationStats* pStats)
    12321 {
    12322  // Destroy buffers.
    12323  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
    12324  {
    12325  VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];
    12326  if(blockCtx.hBuffer)
    12327  {
    12328  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
    12329  m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
    12330  }
    12331  }
    12332 
    12333  if(pCtx->res >= VK_SUCCESS)
    12334  {
    12335  FreeEmptyBlocks(pStats);
    12336  }
    12337 
    12338  if(pCtx->mutexLocked)
    12339  {
    12340  VMA_ASSERT(m_hAllocator->m_UseMutex);
    12341  m_Mutex.UnlockWrite();
    12342  }
    12343 }
    12344 
    12345 size_t VmaBlockVector::CalcAllocationCount() const
    12346 {
    12347  size_t result = 0;
    12348  for(size_t i = 0; i < m_Blocks.size(); ++i)
    12349  {
    12350  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
    12351  }
    12352  return result;
    12353 }
    12354 
    12355 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
    12356 {
    12357  if(m_BufferImageGranularity == 1)
    12358  {
    12359  return false;
    12360  }
    12361  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
    12362  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
    12363  {
    12364  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
    12365  VMA_ASSERT(m_Algorithm == 0);
    12366  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
    12367  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
    12368  {
    12369  return true;
    12370  }
    12371  }
    12372  return false;
    12373 }
    12374 
    12375 void VmaBlockVector::MakePoolAllocationsLost(
    12376  uint32_t currentFrameIndex,
    12377  size_t* pLostAllocationCount)
    12378 {
    12379  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
    12380  size_t lostAllocationCount = 0;
    12381  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    12382  {
    12383  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    12384  VMA_ASSERT(pBlock);
    12385  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
    12386  }
    12387  if(pLostAllocationCount != VMA_NULL)
    12388  {
    12389  *pLostAllocationCount = lostAllocationCount;
    12390  }
    12391 }
    12392 
    12393 VkResult VmaBlockVector::CheckCorruption()
    12394 {
    12395  if(!IsCorruptionDetectionEnabled())
    12396  {
    12397  return VK_ERROR_FEATURE_NOT_PRESENT;
    12398  }
    12399 
    12400  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
    12401  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    12402  {
    12403  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    12404  VMA_ASSERT(pBlock);
    12405  VkResult res = pBlock->CheckCorruption(m_hAllocator);
    12406  if(res != VK_SUCCESS)
    12407  {
    12408  return res;
    12409  }
    12410  }
    12411  return VK_SUCCESS;
    12412 }
    12413 
    12414 void VmaBlockVector::AddStats(VmaStats* pStats)
    12415 {
    12416  const uint32_t memTypeIndex = m_MemoryTypeIndex;
    12417  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
    12418 
    12419  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
    12420 
    12421  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    12422  {
    12423  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    12424  VMA_ASSERT(pBlock);
    12425  VMA_HEAVY_ASSERT(pBlock->Validate());
    12426  VmaStatInfo allocationStatInfo;
    12427  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
    12428  VmaAddStatInfo(pStats->total, allocationStatInfo);
    12429  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    12430  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    12431  }
    12432 }
    12433 
    12435 // VmaDefragmentationAlgorithm_Generic members definition
    12436 
    12437 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
    12438  VmaAllocator hAllocator,
    12439  VmaBlockVector* pBlockVector,
    12440  uint32_t currentFrameIndex,
    12441  bool overlappingMoveSupported) :
    12442  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
    12443  m_AllAllocations(false),
    12444  m_AllocationCount(0),
    12445  m_BytesMoved(0),
    12446  m_AllocationsMoved(0),
    12447  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
    12448 {
    12449  // Create block info for each block.
    12450  const size_t blockCount = m_pBlockVector->m_Blocks.size();
    12451  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    12452  {
    12453  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
    12454  pBlockInfo->m_OriginalBlockIndex = blockIndex;
    12455  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
    12456  m_Blocks.push_back(pBlockInfo);
    12457  }
    12458 
    12459  // Sort them by m_pBlock pointer value.
    12460  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
    12461 }
    12462 
    12463 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
    12464 {
    12465  for(size_t i = m_Blocks.size(); i--; )
    12466  {
    12467  vma_delete(m_hAllocator, m_Blocks[i]);
    12468  }
    12469 }
    12470 
    12471 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    12472 {
    12473  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
    12474  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    12475  {
    12476  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
    12477  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
    12478  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
    12479  {
    12480  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
    12481  (*it)->m_Allocations.push_back(allocInfo);
    12482  }
    12483  else
    12484  {
    12485  VMA_ASSERT(0);
    12486  }
    12487 
    12488  ++m_AllocationCount;
    12489  }
    12490 }
    12491 
    12492 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
    12493  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    12494  VkDeviceSize maxBytesToMove,
    12495  uint32_t maxAllocationsToMove)
    12496 {
    12497  if(m_Blocks.empty())
    12498  {
    12499  return VK_SUCCESS;
    12500  }
    12501 
    12502  // This is a choice based on research.
    12503  // Option 1:
    12504  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
    12505  // Option 2:
    12506  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
    12507  // Option 3:
    12508  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
    12509 
    12510  size_t srcBlockMinIndex = 0;
    12511  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
    12512  /*
    12513  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
    12514  {
    12515  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
    12516  if(blocksWithNonMovableCount > 0)
    12517  {
    12518  srcBlockMinIndex = blocksWithNonMovableCount - 1;
    12519  }
    12520  }
    12521  */
    12522 
    12523  size_t srcBlockIndex = m_Blocks.size() - 1;
    12524  size_t srcAllocIndex = SIZE_MAX;
    12525  for(;;)
    12526  {
    12527  // 1. Find next allocation to move.
    12528  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
    12529  // 1.2. Then start from last to first m_Allocations.
    12530  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
    12531  {
    12532  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
    12533  {
    12534  // Finished: no more allocations to process.
    12535  if(srcBlockIndex == srcBlockMinIndex)
    12536  {
    12537  return VK_SUCCESS;
    12538  }
    12539  else
    12540  {
    12541  --srcBlockIndex;
    12542  srcAllocIndex = SIZE_MAX;
    12543  }
    12544  }
    12545  else
    12546  {
    12547  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
    12548  }
    12549  }
    12550 
    12551  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
    12552  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
    12553 
    12554  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
    12555  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
    12556  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
    12557  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
    12558 
    12559  // 2. Try to find new place for this allocation in preceding or current block.
    12560  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
    12561  {
    12562  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
    12563  VmaAllocationRequest dstAllocRequest;
    12564  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
    12565  m_CurrentFrameIndex,
    12566  m_pBlockVector->GetFrameInUseCount(),
    12567  m_pBlockVector->GetBufferImageGranularity(),
    12568  size,
    12569  alignment,
    12570  false, // upperAddress
    12571  suballocType,
    12572  false, // canMakeOtherLost
    12573  strategy,
    12574  &dstAllocRequest) &&
    12575  MoveMakesSense(
    12576  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
    12577  {
    12578  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
    12579 
    12580  // Reached limit on number of allocations or bytes to move.
    12581  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
    12582  (m_BytesMoved + size > maxBytesToMove))
    12583  {
    12584  return VK_SUCCESS;
    12585  }
    12586 
    12587  VmaDefragmentationMove move;
    12588  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
    12589  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
    12590  move.srcOffset = srcOffset;
    12591  move.dstOffset = dstAllocRequest.offset;
    12592  move.size = size;
    12593  moves.push_back(move);
    12594 
    12595  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
    12596  dstAllocRequest,
    12597  suballocType,
    12598  size,
    12599  false, // upperAddress
    12600  allocInfo.m_hAllocation);
    12601  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
    12602 
    12603  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
    12604 
    12605  if(allocInfo.m_pChanged != VMA_NULL)
    12606  {
    12607  *allocInfo.m_pChanged = VK_TRUE;
    12608  }
    12609 
    12610  ++m_AllocationsMoved;
    12611  m_BytesMoved += size;
    12612 
    12613  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
    12614 
    12615  break;
    12616  }
    12617  }
    12618 
    12619  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
    12620 
    12621  if(srcAllocIndex > 0)
    12622  {
    12623  --srcAllocIndex;
    12624  }
    12625  else
    12626  {
    12627  if(srcBlockIndex > 0)
    12628  {
    12629  --srcBlockIndex;
    12630  srcAllocIndex = SIZE_MAX;
    12631  }
    12632  else
    12633  {
    12634  return VK_SUCCESS;
    12635  }
    12636  }
    12637  }
    12638 }
    12639 
    12640 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
    12641 {
    12642  size_t result = 0;
    12643  for(size_t i = 0; i < m_Blocks.size(); ++i)
    12644  {
    12645  if(m_Blocks[i]->m_HasNonMovableAllocations)
    12646  {
    12647  ++result;
    12648  }
    12649  }
    12650  return result;
    12651 }
    12652 
    12653 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
    12654  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    12655  VkDeviceSize maxBytesToMove,
    12656  uint32_t maxAllocationsToMove)
    12657 {
    12658  if(!m_AllAllocations && m_AllocationCount == 0)
    12659  {
    12660  return VK_SUCCESS;
    12661  }
    12662 
    12663  const size_t blockCount = m_Blocks.size();
    12664  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    12665  {
    12666  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
    12667 
    12668  if(m_AllAllocations)
    12669  {
    12670  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
    12671  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
    12672  it != pMetadata->m_Suballocations.end();
    12673  ++it)
    12674  {
    12675  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    12676  {
    12677  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
    12678  pBlockInfo->m_Allocations.push_back(allocInfo);
    12679  }
    12680  }
    12681  }
    12682 
    12683  pBlockInfo->CalcHasNonMovableAllocations();
    12684 
    12685  // This is a choice based on research.
    12686  // Option 1:
    12687  pBlockInfo->SortAllocationsByOffsetDescending();
    12688  // Option 2:
    12689  //pBlockInfo->SortAllocationsBySizeDescending();
    12690  }
    12691 
    12692  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
    12693  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
    12694 
    12695  // This is a choice based on research.
    12696  const uint32_t roundCount = 2;
    12697 
    12698  // Execute defragmentation rounds (the main part).
    12699  VkResult result = VK_SUCCESS;
    12700  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
    12701  {
    12702  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove);
    12703  }
    12704 
    12705  return result;
    12706 }
    12707 
    12708 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
    12709  size_t dstBlockIndex, VkDeviceSize dstOffset,
    12710  size_t srcBlockIndex, VkDeviceSize srcOffset)
    12711 {
    12712  if(dstBlockIndex < srcBlockIndex)
    12713  {
    12714  return true;
    12715  }
    12716  if(dstBlockIndex > srcBlockIndex)
    12717  {
    12718  return false;
    12719  }
    12720  if(dstOffset < srcOffset)
    12721  {
    12722  return true;
    12723  }
    12724  return false;
    12725 }
    12726 
    12728 // VmaDefragmentationAlgorithm_Fast
    12729 
    12730 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
    12731  VmaAllocator hAllocator,
    12732  VmaBlockVector* pBlockVector,
    12733  uint32_t currentFrameIndex,
    12734  bool overlappingMoveSupported) :
    12735  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
    12736  m_OverlappingMoveSupported(overlappingMoveSupported),
    12737  m_AllocationCount(0),
    12738  m_AllAllocations(false),
    12739  m_BytesMoved(0),
    12740  m_AllocationsMoved(0),
    12741  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
    12742 {
    12743  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
    12744 
    12745 }
    12746 
    12747 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
    12748 {
    12749 }
    12750 
    12751 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
    12752  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    12753  VkDeviceSize maxBytesToMove,
    12754  uint32_t maxAllocationsToMove)
    12755 {
    12756  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
    12757 
    12758  const size_t blockCount = m_pBlockVector->GetBlockCount();
    12759  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
    12760  {
    12761  return VK_SUCCESS;
    12762  }
    12763 
    12764  PreprocessMetadata();
    12765 
    12766  // Sort blocks in order from most destination.
    12767 
    12768  m_BlockInfos.resize(blockCount);
    12769  for(size_t i = 0; i < blockCount; ++i)
    12770  {
    12771  m_BlockInfos[i].origBlockIndex = i;
    12772  }
    12773 
    12774  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
    12775  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
    12776  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
    12777  });
    12778 
    12779  // THE MAIN ALGORITHM
    12780 
    12781  FreeSpaceDatabase freeSpaceDb;
    12782 
    12783  size_t dstBlockInfoIndex = 0;
    12784  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
    12785  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
    12786  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
    12787  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
    12788  VkDeviceSize dstOffset = 0;
    12789 
    12790  bool end = false;
    12791  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
    12792  {
    12793  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
    12794  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
    12795  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
    12796  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
    12797  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
    12798  {
    12799  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
    12800  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
    12801  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
    12802  if(m_AllocationsMoved == maxAllocationsToMove ||
    12803  m_BytesMoved + srcAllocSize > maxBytesToMove)
    12804  {
    12805  end = true;
    12806  break;
    12807  }
    12808  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
    12809 
    12810  // Try to place it in one of free spaces from the database.
    12811  size_t freeSpaceInfoIndex;
    12812  VkDeviceSize dstAllocOffset;
    12813  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
    12814  freeSpaceInfoIndex, dstAllocOffset))
    12815  {
    12816  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
    12817  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
    12818  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
    12819  VkDeviceSize freeSpaceBlockSize = pFreeSpaceMetadata->GetSize();
    12820 
    12821  // Same block
    12822  if(freeSpaceInfoIndex == srcBlockInfoIndex)
    12823  {
    12824  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
    12825 
    12826  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
    12827 
    12828  VmaSuballocation suballoc = *srcSuballocIt;
    12829  suballoc.offset = dstAllocOffset;
    12830  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
    12831  m_BytesMoved += srcAllocSize;
    12832  ++m_AllocationsMoved;
    12833 
    12834  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
    12835  ++nextSuballocIt;
    12836  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
    12837  srcSuballocIt = nextSuballocIt;
    12838 
    12839  InsertSuballoc(pFreeSpaceMetadata, suballoc);
    12840 
    12841  VmaDefragmentationMove move = {
    12842  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
    12843  srcAllocOffset, dstAllocOffset,
    12844  srcAllocSize };
    12845  moves.push_back(move);
    12846  }
    12847  // Different block
    12848  else
    12849  {
    12850  // MOVE OPTION 2: Move the allocation to a different block.
    12851 
    12852  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
    12853 
    12854  VmaSuballocation suballoc = *srcSuballocIt;
    12855  suballoc.offset = dstAllocOffset;
    12856  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
    12857  m_BytesMoved += srcAllocSize;
    12858  ++m_AllocationsMoved;
    12859 
    12860  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
    12861  ++nextSuballocIt;
    12862  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
    12863  srcSuballocIt = nextSuballocIt;
    12864 
    12865  InsertSuballoc(pFreeSpaceMetadata, suballoc);
    12866 
    12867  VmaDefragmentationMove move = {
    12868  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
    12869  srcAllocOffset, dstAllocOffset,
    12870  srcAllocSize };
    12871  moves.push_back(move);
    12872  }
    12873  }
    12874  else
    12875  {
    12876  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
    12877 
    12878  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
    12879  while(dstBlockInfoIndex < srcBlockInfoIndex &&
    12880  dstAllocOffset + srcAllocSize > dstBlockSize)
    12881  {
    12882  // But before that, register remaining free space at the end of dst block.
    12883  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
    12884 
    12885  ++dstBlockInfoIndex;
    12886  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
    12887  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
    12888  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
    12889  dstBlockSize = pDstMetadata->GetSize();
    12890  dstOffset = 0;
    12891  dstAllocOffset = 0;
    12892  }
    12893 
    12894  // Same block
    12895  if(dstBlockInfoIndex == srcBlockInfoIndex)
    12896  {
    12897  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
    12898 
    12899  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
    12900 
    12901  bool skipOver = overlap;
    12902  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
    12903  {
    12904  // If destination and source place overlap, skip if it would move it
    12905  // by only < 1/64 of its size.
    12906  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
    12907  }
    12908 
    12909  if(skipOver)
    12910  {
    12911  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
    12912 
    12913  dstOffset = srcAllocOffset + srcAllocSize;
    12914  ++srcSuballocIt;
    12915  }
    12916  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
    12917  else
    12918  {
    12919  srcSuballocIt->offset = dstAllocOffset;
    12920  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
    12921  dstOffset = dstAllocOffset + srcAllocSize;
    12922  m_BytesMoved += srcAllocSize;
    12923  ++m_AllocationsMoved;
    12924  ++srcSuballocIt;
    12925  VmaDefragmentationMove move = {
    12926  srcOrigBlockIndex, dstOrigBlockIndex,
    12927  srcAllocOffset, dstAllocOffset,
    12928  srcAllocSize };
    12929  moves.push_back(move);
    12930  }
    12931  }
    12932  // Different block
    12933  else
    12934  {
    12935  // MOVE OPTION 2: Move the allocation to a different block.
    12936 
    12937  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
    12938  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
    12939 
    12940  VmaSuballocation suballoc = *srcSuballocIt;
    12941  suballoc.offset = dstAllocOffset;
    12942  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
    12943  dstOffset = dstAllocOffset + srcAllocSize;
    12944  m_BytesMoved += srcAllocSize;
    12945  ++m_AllocationsMoved;
    12946 
    12947  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
    12948  ++nextSuballocIt;
    12949  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
    12950  srcSuballocIt = nextSuballocIt;
    12951 
    12952  pDstMetadata->m_Suballocations.push_back(suballoc);
    12953 
    12954  VmaDefragmentationMove move = {
    12955  srcOrigBlockIndex, dstOrigBlockIndex,
    12956  srcAllocOffset, dstAllocOffset,
    12957  srcAllocSize };
    12958  moves.push_back(move);
    12959  }
    12960  }
    12961  }
    12962  }
    12963 
    12964  m_BlockInfos.clear();
    12965 
    12966  PostprocessMetadata();
    12967 
    12968  return VK_SUCCESS;
    12969 }
    12970 
    12971 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
    12972 {
    12973  const size_t blockCount = m_pBlockVector->GetBlockCount();
    12974  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    12975  {
    12976  VmaBlockMetadata_Generic* const pMetadata =
    12977  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
    12978  pMetadata->m_FreeCount = 0;
    12979  pMetadata->m_SumFreeSize = pMetadata->GetSize();
    12980  pMetadata->m_FreeSuballocationsBySize.clear();
    12981  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
    12982  it != pMetadata->m_Suballocations.end(); )
    12983  {
    12984  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
    12985  {
    12986  VmaSuballocationList::iterator nextIt = it;
    12987  ++nextIt;
    12988  pMetadata->m_Suballocations.erase(it);
    12989  it = nextIt;
    12990  }
    12991  else
    12992  {
    12993  ++it;
    12994  }
    12995  }
    12996  }
    12997 }
    12998 
    12999 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
    13000 {
    13001  const size_t blockCount = m_pBlockVector->GetBlockCount();
    13002  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    13003  {
    13004  VmaBlockMetadata_Generic* const pMetadata =
    13005  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
    13006  const VkDeviceSize blockSize = pMetadata->GetSize();
    13007 
    13008  // No allocations in this block - entire area is free.
    13009  if(pMetadata->m_Suballocations.empty())
    13010  {
    13011  pMetadata->m_FreeCount = 1;
    13012  //pMetadata->m_SumFreeSize is already set to blockSize.
    13013  VmaSuballocation suballoc = {
    13014  0, // offset
    13015  blockSize, // size
    13016  VMA_NULL, // hAllocation
    13017  VMA_SUBALLOCATION_TYPE_FREE };
    13018  pMetadata->m_Suballocations.push_back(suballoc);
    13019  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
    13020  }
    13021  // There are some allocations in this block.
    13022  else
    13023  {
    13024  VkDeviceSize offset = 0;
    13025  VmaSuballocationList::iterator it;
    13026  for(it = pMetadata->m_Suballocations.begin();
    13027  it != pMetadata->m_Suballocations.end();
    13028  ++it)
    13029  {
    13030  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
    13031  VMA_ASSERT(it->offset >= offset);
    13032 
    13033  // Need to insert preceding free space.
    13034  if(it->offset > offset)
    13035  {
    13036  ++pMetadata->m_FreeCount;
    13037  const VkDeviceSize freeSize = it->offset - offset;
    13038  VmaSuballocation suballoc = {
    13039  offset, // offset
    13040  freeSize, // size
    13041  VMA_NULL, // hAllocation
    13042  VMA_SUBALLOCATION_TYPE_FREE };
    13043  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
    13044  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    13045  {
    13046  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
    13047  }
    13048  }
    13049 
    13050  pMetadata->m_SumFreeSize -= it->size;
    13051  offset = it->offset + it->size;
    13052  }
    13053 
    13054  // Need to insert trailing free space.
    13055  if(offset < blockSize)
    13056  {
    13057  ++pMetadata->m_FreeCount;
    13058  const VkDeviceSize freeSize = blockSize - offset;
    13059  VmaSuballocation suballoc = {
    13060  offset, // offset
    13061  freeSize, // size
    13062  VMA_NULL, // hAllocation
    13063  VMA_SUBALLOCATION_TYPE_FREE };
    13064  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
    13065  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
    13066  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    13067  {
    13068  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
    13069  }
    13070  }
    13071 
    13072  VMA_SORT(
    13073  pMetadata->m_FreeSuballocationsBySize.begin(),
    13074  pMetadata->m_FreeSuballocationsBySize.end(),
    13075  VmaSuballocationItemSizeLess());
    13076  }
    13077 
    13078  VMA_HEAVY_ASSERT(pMetadata->Validate());
    13079  }
    13080 }
    13081 
    13082 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
    13083 {
    13084  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
    13085  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
    13086  while(it != pMetadata->m_Suballocations.end())
    13087  {
    13088  if(it->offset < suballoc.offset)
    13089  {
    13090  ++it;
    13091  }
    13092  }
    13093  pMetadata->m_Suballocations.insert(it, suballoc);
    13094 }
    13095 
    13097 // VmaBlockVectorDefragmentationContext
    13098 
    13099 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
    13100  VmaAllocator hAllocator,
    13101  VmaPool hCustomPool,
    13102  VmaBlockVector* pBlockVector,
    13103  uint32_t currFrameIndex,
    13104  uint32_t algorithmFlags) :
    13105  res(VK_SUCCESS),
    13106  mutexLocked(false),
    13107  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
    13108  m_hAllocator(hAllocator),
    13109  m_hCustomPool(hCustomPool),
    13110  m_pBlockVector(pBlockVector),
    13111  m_CurrFrameIndex(currFrameIndex),
    13112  m_AlgorithmFlags(algorithmFlags),
    13113  m_pAlgorithm(VMA_NULL),
    13114  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
    13115  m_AllAllocations(false)
    13116 {
    13117 }
    13118 
    13119 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
    13120 {
    13121  vma_delete(m_hAllocator, m_pAlgorithm);
    13122 }
    13123 
    13124 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    13125 {
    13126  AllocInfo info = { hAlloc, pChanged };
    13127  m_Allocations.push_back(info);
    13128 }
    13129 
    13130 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported)
    13131 {
    13132  const bool allAllocations = m_AllAllocations ||
    13133  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
    13134 
    13135  /********************************
    13136  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
    13137  ********************************/
    13138 
    13139  /*
    13140  Fast algorithm is supported only when certain criteria are met:
    13141  - VMA_DEBUG_MARGIN is 0.
    13142  - All allocations in this block vector are moveable.
    13143  - There is no possibility of image/buffer granularity conflict.
    13144  */
    13145  if(VMA_DEBUG_MARGIN == 0 &&
    13146  allAllocations &&
    13147  !m_pBlockVector->IsBufferImageGranularityConflictPossible())
    13148  {
    13149  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
    13150  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
    13151  }
    13152  else
    13153  {
    13154  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
    13155  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
    13156  }
    13157 
    13158  if(allAllocations)
    13159  {
    13160  m_pAlgorithm->AddAll();
    13161  }
    13162  else
    13163  {
    13164  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
    13165  {
    13166  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
    13167  }
    13168  }
    13169 }
    13170 
    13172 // VmaDefragmentationContext
    13173 
    13174 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
    13175  VmaAllocator hAllocator,
    13176  uint32_t currFrameIndex,
    13177  uint32_t flags,
    13178  VmaDefragmentationStats* pStats) :
    13179  m_hAllocator(hAllocator),
    13180  m_CurrFrameIndex(currFrameIndex),
    13181  m_Flags(flags),
    13182  m_pStats(pStats),
    13183  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
    13184 {
    13185  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
    13186 }
    13187 
    13188 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
    13189 {
    13190  for(size_t i = m_CustomPoolContexts.size(); i--; )
    13191  {
    13192  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
    13193  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
    13194  vma_delete(m_hAllocator, pBlockVectorCtx);
    13195  }
    13196  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
    13197  {
    13198  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
    13199  if(pBlockVectorCtx)
    13200  {
    13201  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
    13202  vma_delete(m_hAllocator, pBlockVectorCtx);
    13203  }
    13204  }
    13205 }
    13206 
    13207 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
    13208 {
    13209  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    13210  {
    13211  VmaPool pool = pPools[poolIndex];
    13212  VMA_ASSERT(pool);
    13213  // Pools with algorithm other than default are not defragmented.
    13214  if(pool->m_BlockVector.GetAlgorithm() == 0)
    13215  {
    13216  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
    13217 
    13218  for(size_t i = m_CustomPoolContexts.size(); i--; )
    13219  {
    13220  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
    13221  {
    13222  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
    13223  break;
    13224  }
    13225  }
    13226 
    13227  if(!pBlockVectorDefragCtx)
    13228  {
    13229  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
    13230  m_hAllocator,
    13231  pool,
    13232  &pool->m_BlockVector,
    13233  m_CurrFrameIndex,
    13234  m_Flags);
    13235  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
    13236  }
    13237 
    13238  pBlockVectorDefragCtx->AddAll();
    13239  }
    13240  }
    13241 }
    13242 
    13243 void VmaDefragmentationContext_T::AddAllocations(
    13244  uint32_t allocationCount,
    13245  VmaAllocation* pAllocations,
    13246  VkBool32* pAllocationsChanged)
    13247 {
    13248  // Dispatch pAllocations among defragmentators. Create them when necessary.
    13249  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    13250  {
    13251  const VmaAllocation hAlloc = pAllocations[allocIndex];
    13252  VMA_ASSERT(hAlloc);
    13253  // DedicatedAlloc cannot be defragmented.
    13254  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
    13255  // Lost allocation cannot be defragmented.
    13256  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
    13257  {
    13258  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
    13259 
    13260  const VmaPool hAllocPool = hAlloc->GetPool();
    13261  // This allocation belongs to custom pool.
    13262  if(hAllocPool != VK_NULL_HANDLE)
    13263  {
    13264  // Pools with algorithm other than default are not defragmented.
    13265  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
    13266  {
    13267  for(size_t i = m_CustomPoolContexts.size(); i--; )
    13268  {
    13269  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
    13270  {
    13271  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
    13272  break;
    13273  }
    13274  }
    13275  if(!pBlockVectorDefragCtx)
    13276  {
    13277  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
    13278  m_hAllocator,
    13279  hAllocPool,
    13280  &hAllocPool->m_BlockVector,
    13281  m_CurrFrameIndex,
    13282  m_Flags);
    13283  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
    13284  }
    13285  }
    13286  }
    13287  // This allocation belongs to default pool.
    13288  else
    13289  {
    13290  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
    13291  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
    13292  if(!pBlockVectorDefragCtx)
    13293  {
    13294  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
    13295  m_hAllocator,
    13296  VMA_NULL, // hCustomPool
    13297  m_hAllocator->m_pBlockVectors[memTypeIndex],
    13298  m_CurrFrameIndex,
    13299  m_Flags);
    13300  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
    13301  }
    13302  }
    13303 
    13304  if(pBlockVectorDefragCtx)
    13305  {
    13306  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
    13307  &pAllocationsChanged[allocIndex] : VMA_NULL;
    13308  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
    13309  }
    13310  }
    13311  }
    13312 }
    13313 
    13314 VkResult VmaDefragmentationContext_T::Defragment(
    13315  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
    13316  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
    13317  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats)
    13318 {
    13319  if(pStats)
    13320  {
    13321  memset(pStats, 0, sizeof(VmaDefragmentationStats));
    13322  }
    13323 
    13324  if(commandBuffer == VK_NULL_HANDLE)
    13325  {
    13326  maxGpuBytesToMove = 0;
    13327  maxGpuAllocationsToMove = 0;
    13328  }
    13329 
    13330  VkResult res = VK_SUCCESS;
    13331 
    13332  // Process default pools.
    13333  for(uint32_t memTypeIndex = 0;
    13334  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
    13335  ++memTypeIndex)
    13336  {
    13337  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
    13338  if(pBlockVectorCtx)
    13339  {
    13340  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
    13341  pBlockVectorCtx->GetBlockVector()->Defragment(
    13342  pBlockVectorCtx,
    13343  pStats,
    13344  maxCpuBytesToMove, maxCpuAllocationsToMove,
    13345  maxGpuBytesToMove, maxGpuAllocationsToMove,
    13346  commandBuffer);
    13347  if(pBlockVectorCtx->res != VK_SUCCESS)
    13348  {
    13349  res = pBlockVectorCtx->res;
    13350  }
    13351  }
    13352  }
    13353 
    13354  // Process custom pools.
    13355  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
    13356  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
    13357  ++customCtxIndex)
    13358  {
    13359  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
    13360  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
    13361  pBlockVectorCtx->GetBlockVector()->Defragment(
    13362  pBlockVectorCtx,
    13363  pStats,
    13364  maxCpuBytesToMove, maxCpuAllocationsToMove,
    13365  maxGpuBytesToMove, maxGpuAllocationsToMove,
    13366  commandBuffer);
    13367  if(pBlockVectorCtx->res != VK_SUCCESS)
    13368  {
    13369  res = pBlockVectorCtx->res;
    13370  }
    13371  }
    13372 
    13373  return res;
    13374 }
    13375 
    13377 // VmaRecorder
    13378 
    13379 #if VMA_RECORDING_ENABLED
    13380 
    13381 VmaRecorder::VmaRecorder() :
    13382  m_UseMutex(true),
    13383  m_Flags(0),
    13384  m_File(VMA_NULL),
    13385  m_Freq(INT64_MAX),
    13386  m_StartCounter(INT64_MAX)
    13387 {
    13388 }
    13389 
    13390 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
    13391 {
    13392  m_UseMutex = useMutex;
    13393  m_Flags = settings.flags;
    13394 
    13395  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
    13396  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
    13397 
    13398  // Open file for writing.
    13399  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
    13400  if(err != 0)
    13401  {
    13402  return VK_ERROR_INITIALIZATION_FAILED;
    13403  }
    13404 
    13405  // Write header.
    13406  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
    13407  fprintf(m_File, "%s\n", "1,5");
    13408 
    13409  return VK_SUCCESS;
    13410 }
    13411 
    13412 VmaRecorder::~VmaRecorder()
    13413 {
    13414  if(m_File != VMA_NULL)
    13415  {
    13416  fclose(m_File);
    13417  }
    13418 }
    13419 
    13420 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
    13421 {
    13422  CallParams callParams;
    13423  GetBasicParams(callParams);
    13424 
    13425  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13426  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
    13427  Flush();
    13428 }
    13429 
    13430 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
    13431 {
    13432  CallParams callParams;
    13433  GetBasicParams(callParams);
    13434 
    13435  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13436  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
    13437  Flush();
    13438 }
    13439 
    13440 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
    13441 {
    13442  CallParams callParams;
    13443  GetBasicParams(callParams);
    13444 
    13445  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13446  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
    13447  createInfo.memoryTypeIndex,
    13448  createInfo.flags,
    13449  createInfo.blockSize,
    13450  (uint64_t)createInfo.minBlockCount,
    13451  (uint64_t)createInfo.maxBlockCount,
    13452  createInfo.frameInUseCount,
    13453  pool);
    13454  Flush();
    13455 }
    13456 
    13457 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
    13458 {
    13459  CallParams callParams;
    13460  GetBasicParams(callParams);
    13461 
    13462  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13463  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
    13464  pool);
    13465  Flush();
    13466 }
    13467 
    13468 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
    13469  const VkMemoryRequirements& vkMemReq,
    13470  const VmaAllocationCreateInfo& createInfo,
    13471  VmaAllocation allocation)
    13472 {
    13473  CallParams callParams;
    13474  GetBasicParams(callParams);
    13475 
    13476  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13477  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    13478  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13479  vkMemReq.size,
    13480  vkMemReq.alignment,
    13481  vkMemReq.memoryTypeBits,
    13482  createInfo.flags,
    13483  createInfo.usage,
    13484  createInfo.requiredFlags,
    13485  createInfo.preferredFlags,
    13486  createInfo.memoryTypeBits,
    13487  createInfo.pool,
    13488  allocation,
    13489  userDataStr.GetString());
    13490  Flush();
    13491 }
    13492 
    13493 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
    13494  const VkMemoryRequirements& vkMemReq,
    13495  const VmaAllocationCreateInfo& createInfo,
    13496  uint64_t allocationCount,
    13497  const VmaAllocation* pAllocations)
    13498 {
    13499  CallParams callParams;
    13500  GetBasicParams(callParams);
    13501 
    13502  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13503  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    13504  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
    13505  vkMemReq.size,
    13506  vkMemReq.alignment,
    13507  vkMemReq.memoryTypeBits,
    13508  createInfo.flags,
    13509  createInfo.usage,
    13510  createInfo.requiredFlags,
    13511  createInfo.preferredFlags,
    13512  createInfo.memoryTypeBits,
    13513  createInfo.pool);
    13514  PrintPointerList(allocationCount, pAllocations);
    13515  fprintf(m_File, ",%s\n", userDataStr.GetString());
    13516  Flush();
    13517 }
    13518 
    13519 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    13520  const VkMemoryRequirements& vkMemReq,
    13521  bool requiresDedicatedAllocation,
    13522  bool prefersDedicatedAllocation,
    13523  const VmaAllocationCreateInfo& createInfo,
    13524  VmaAllocation allocation)
    13525 {
    13526  CallParams callParams;
    13527  GetBasicParams(callParams);
    13528 
    13529  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13530  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    13531  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13532  vkMemReq.size,
    13533  vkMemReq.alignment,
    13534  vkMemReq.memoryTypeBits,
    13535  requiresDedicatedAllocation ? 1 : 0,
    13536  prefersDedicatedAllocation ? 1 : 0,
    13537  createInfo.flags,
    13538  createInfo.usage,
    13539  createInfo.requiredFlags,
    13540  createInfo.preferredFlags,
    13541  createInfo.memoryTypeBits,
    13542  createInfo.pool,
    13543  allocation,
    13544  userDataStr.GetString());
    13545  Flush();
    13546 }
    13547 
    13548 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
    13549  const VkMemoryRequirements& vkMemReq,
    13550  bool requiresDedicatedAllocation,
    13551  bool prefersDedicatedAllocation,
    13552  const VmaAllocationCreateInfo& createInfo,
    13553  VmaAllocation allocation)
    13554 {
    13555  CallParams callParams;
    13556  GetBasicParams(callParams);
    13557 
    13558  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13559  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    13560  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13561  vkMemReq.size,
    13562  vkMemReq.alignment,
    13563  vkMemReq.memoryTypeBits,
    13564  requiresDedicatedAllocation ? 1 : 0,
    13565  prefersDedicatedAllocation ? 1 : 0,
    13566  createInfo.flags,
    13567  createInfo.usage,
    13568  createInfo.requiredFlags,
    13569  createInfo.preferredFlags,
    13570  createInfo.memoryTypeBits,
    13571  createInfo.pool,
    13572  allocation,
    13573  userDataStr.GetString());
    13574  Flush();
    13575 }
    13576 
    13577 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
    13578  VmaAllocation allocation)
    13579 {
    13580  CallParams callParams;
    13581  GetBasicParams(callParams);
    13582 
    13583  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13584  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    13585  allocation);
    13586  Flush();
    13587 }
    13588 
    13589 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
    13590  uint64_t allocationCount,
    13591  const VmaAllocation* pAllocations)
    13592 {
    13593  CallParams callParams;
    13594  GetBasicParams(callParams);
    13595 
    13596  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13597  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
    13598  PrintPointerList(allocationCount, pAllocations);
    13599  fprintf(m_File, "\n");
    13600  Flush();
    13601 }
    13602 
    13603 void VmaRecorder::RecordResizeAllocation(
    13604  uint32_t frameIndex,
    13605  VmaAllocation allocation,
    13606  VkDeviceSize newSize)
    13607 {
    13608  CallParams callParams;
    13609  GetBasicParams(callParams);
    13610 
    13611  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13612  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
    13613  allocation, newSize);
    13614  Flush();
    13615 }
    13616 
    13617 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
    13618  VmaAllocation allocation,
    13619  const void* pUserData)
    13620 {
    13621  CallParams callParams;
    13622  GetBasicParams(callParams);
    13623 
    13624  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13625  UserDataString userDataStr(
    13626  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
    13627  pUserData);
    13628  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13629  allocation,
    13630  userDataStr.GetString());
    13631  Flush();
    13632 }
    13633 
    13634 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
    13635  VmaAllocation allocation)
    13636 {
    13637  CallParams callParams;
    13638  GetBasicParams(callParams);
    13639 
    13640  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13641  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    13642  allocation);
    13643  Flush();
    13644 }
    13645 
    13646 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
    13647  VmaAllocation allocation)
    13648 {
    13649  CallParams callParams;
    13650  GetBasicParams(callParams);
    13651 
    13652  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13653  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    13654  allocation);
    13655  Flush();
    13656 }
    13657 
    13658 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
    13659  VmaAllocation allocation)
    13660 {
    13661  CallParams callParams;
    13662  GetBasicParams(callParams);
    13663 
    13664  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13665  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    13666  allocation);
    13667  Flush();
    13668 }
    13669 
    13670 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
    13671  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    13672 {
    13673  CallParams callParams;
    13674  GetBasicParams(callParams);
    13675 
    13676  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13677  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    13678  allocation,
    13679  offset,
    13680  size);
    13681  Flush();
    13682 }
    13683 
    13684 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
    13685  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    13686 {
    13687  CallParams callParams;
    13688  GetBasicParams(callParams);
    13689 
    13690  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13691  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    13692  allocation,
    13693  offset,
    13694  size);
    13695  Flush();
    13696 }
    13697 
    13698 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
    13699  const VkBufferCreateInfo& bufCreateInfo,
    13700  const VmaAllocationCreateInfo& allocCreateInfo,
    13701  VmaAllocation allocation)
    13702 {
    13703  CallParams callParams;
    13704  GetBasicParams(callParams);
    13705 
    13706  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13707  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    13708  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13709  bufCreateInfo.flags,
    13710  bufCreateInfo.size,
    13711  bufCreateInfo.usage,
    13712  bufCreateInfo.sharingMode,
    13713  allocCreateInfo.flags,
    13714  allocCreateInfo.usage,
    13715  allocCreateInfo.requiredFlags,
    13716  allocCreateInfo.preferredFlags,
    13717  allocCreateInfo.memoryTypeBits,
    13718  allocCreateInfo.pool,
    13719  allocation,
    13720  userDataStr.GetString());
    13721  Flush();
    13722 }
    13723 
    13724 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
    13725  const VkImageCreateInfo& imageCreateInfo,
    13726  const VmaAllocationCreateInfo& allocCreateInfo,
    13727  VmaAllocation allocation)
    13728 {
    13729  CallParams callParams;
    13730  GetBasicParams(callParams);
    13731 
    13732  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13733  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    13734  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13735  imageCreateInfo.flags,
    13736  imageCreateInfo.imageType,
    13737  imageCreateInfo.format,
    13738  imageCreateInfo.extent.width,
    13739  imageCreateInfo.extent.height,
    13740  imageCreateInfo.extent.depth,
    13741  imageCreateInfo.mipLevels,
    13742  imageCreateInfo.arrayLayers,
    13743  imageCreateInfo.samples,
    13744  imageCreateInfo.tiling,
    13745  imageCreateInfo.usage,
    13746  imageCreateInfo.sharingMode,
    13747  imageCreateInfo.initialLayout,
    13748  allocCreateInfo.flags,
    13749  allocCreateInfo.usage,
    13750  allocCreateInfo.requiredFlags,
    13751  allocCreateInfo.preferredFlags,
    13752  allocCreateInfo.memoryTypeBits,
    13753  allocCreateInfo.pool,
    13754  allocation,
    13755  userDataStr.GetString());
    13756  Flush();
    13757 }
    13758 
    13759 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
    13760  VmaAllocation allocation)
    13761 {
    13762  CallParams callParams;
    13763  GetBasicParams(callParams);
    13764 
    13765  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13766  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
    13767  allocation);
    13768  Flush();
    13769 }
    13770 
    13771 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
    13772  VmaAllocation allocation)
    13773 {
    13774  CallParams callParams;
    13775  GetBasicParams(callParams);
    13776 
    13777  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13778  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
    13779  allocation);
    13780  Flush();
    13781 }
    13782 
    13783 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
    13784  VmaAllocation allocation)
    13785 {
    13786  CallParams callParams;
    13787  GetBasicParams(callParams);
    13788 
    13789  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13790  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    13791  allocation);
    13792  Flush();
    13793 }
    13794 
    13795 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
    13796  VmaAllocation allocation)
    13797 {
    13798  CallParams callParams;
    13799  GetBasicParams(callParams);
    13800 
    13801  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13802  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
    13803  allocation);
    13804  Flush();
    13805 }
    13806 
    13807 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
    13808  VmaPool pool)
    13809 {
    13810  CallParams callParams;
    13811  GetBasicParams(callParams);
    13812 
    13813  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13814  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
    13815  pool);
    13816  Flush();
    13817 }
    13818 
    13819 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
    13820  const VmaDefragmentationInfo2& info,
    13822 {
    13823  CallParams callParams;
    13824  GetBasicParams(callParams);
    13825 
    13826  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13827  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
    13828  info.flags);
    13829  PrintPointerList(info.allocationCount, info.pAllocations);
    13830  fprintf(m_File, ",");
    13831  PrintPointerList(info.poolCount, info.pPools);
    13832  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
    13833  info.maxCpuBytesToMove,
    13835  info.maxGpuBytesToMove,
    13837  info.commandBuffer,
    13838  ctx);
    13839  Flush();
    13840 }
    13841 
    13842 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
    13844 {
    13845  CallParams callParams;
    13846  GetBasicParams(callParams);
    13847 
    13848  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13849  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
    13850  ctx);
    13851  Flush();
    13852 }
    13853 
    13854 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
    13855 {
    13856  if(pUserData != VMA_NULL)
    13857  {
    13858  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
    13859  {
    13860  m_Str = (const char*)pUserData;
    13861  }
    13862  else
    13863  {
    13864  sprintf_s(m_PtrStr, "%p", pUserData);
    13865  m_Str = m_PtrStr;
    13866  }
    13867  }
    13868  else
    13869  {
    13870  m_Str = "";
    13871  }
    13872 }
    13873 
    13874 void VmaRecorder::WriteConfiguration(
    13875  const VkPhysicalDeviceProperties& devProps,
    13876  const VkPhysicalDeviceMemoryProperties& memProps,
    13877  bool dedicatedAllocationExtensionEnabled)
    13878 {
    13879  fprintf(m_File, "Config,Begin\n");
    13880 
    13881  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
    13882  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
    13883  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
    13884  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
    13885  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
    13886  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
    13887 
    13888  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
    13889  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
    13890  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
    13891 
    13892  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
    13893  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
    13894  {
    13895  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
    13896  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
    13897  }
    13898  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
    13899  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
    13900  {
    13901  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
    13902  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
    13903  }
    13904 
    13905  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
    13906 
    13907  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
    13908  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
    13909  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
    13910  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
    13911  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
    13912  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
    13913  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
    13914  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
    13915  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    13916 
    13917  fprintf(m_File, "Config,End\n");
    13918 }
    13919 
    13920 void VmaRecorder::GetBasicParams(CallParams& outParams)
    13921 {
    13922  outParams.threadId = GetCurrentThreadId();
    13923 
    13924  LARGE_INTEGER counter;
    13925  QueryPerformanceCounter(&counter);
    13926  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
    13927 }
    13928 
    13929 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
    13930 {
    13931  if(count)
    13932  {
    13933  fprintf(m_File, "%p", pItems[0]);
    13934  for(uint64_t i = 1; i < count; ++i)
    13935  {
    13936  fprintf(m_File, " %p", pItems[i]);
    13937  }
    13938  }
    13939 }
    13940 
    13941 void VmaRecorder::Flush()
    13942 {
    13943  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
    13944  {
    13945  fflush(m_File);
    13946  }
    13947 }
    13948 
    13949 #endif // #if VMA_RECORDING_ENABLED
    13950 
    13952 // VmaAllocator_T
    13953 
    13954 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
    13955  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
    13956  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
    13957  m_hDevice(pCreateInfo->device),
    13958  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
    13959  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
    13960  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
    13961  m_PreferredLargeHeapBlockSize(0),
    13962  m_PhysicalDevice(pCreateInfo->physicalDevice),
    13963  m_CurrentFrameIndex(0),
    13964  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
    13965  m_NextPoolId(0)
    13967  ,m_pRecorder(VMA_NULL)
    13968 #endif
    13969 {
    13970  if(VMA_DEBUG_DETECT_CORRUPTION)
    13971  {
    13972  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
    13973  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
    13974  }
    13975 
    13976  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
    13977 
    13978 #if !(VMA_DEDICATED_ALLOCATION)
    13980  {
    13981  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
    13982  }
    13983 #endif
    13984 
    13985  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
    13986  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
    13987  memset(&m_MemProps, 0, sizeof(m_MemProps));
    13988 
    13989  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
    13990  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
    13991 
    13992  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    13993  {
    13994  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
    13995  }
    13996 
    13997  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
    13998  {
    13999  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
    14000  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
    14001  }
    14002 
    14003  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
    14004 
    14005  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
    14006  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
    14007 
    14008  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
    14009  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
    14010  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
    14011  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
    14012 
    14013  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
    14014  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    14015 
    14016  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
    14017  {
    14018  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
    14019  {
    14020  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
    14021  if(limit != VK_WHOLE_SIZE)
    14022  {
    14023  m_HeapSizeLimit[heapIndex] = limit;
    14024  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
    14025  {
    14026  m_MemProps.memoryHeaps[heapIndex].size = limit;
    14027  }
    14028  }
    14029  }
    14030  }
    14031 
    14032  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    14033  {
    14034  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
    14035 
    14036  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
    14037  this,
    14038  memTypeIndex,
    14039  preferredBlockSize,
    14040  0,
    14041  SIZE_MAX,
    14042  GetBufferImageGranularity(),
    14043  pCreateInfo->frameInUseCount,
    14044  false, // isCustomPool
    14045  false, // explicitBlockSize
    14046  false); // linearAlgorithm
    14047  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
    14048  // becase minBlockCount is 0.
    14049  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
    14050 
    14051  }
    14052 }
    14053 
    14054 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
    14055 {
    14056  VkResult res = VK_SUCCESS;
    14057 
    14058  if(pCreateInfo->pRecordSettings != VMA_NULL &&
    14059  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
    14060  {
    14061 #if VMA_RECORDING_ENABLED
    14062  m_pRecorder = vma_new(this, VmaRecorder)();
    14063  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
    14064  if(res != VK_SUCCESS)
    14065  {
    14066  return res;
    14067  }
    14068  m_pRecorder->WriteConfiguration(
    14069  m_PhysicalDeviceProperties,
    14070  m_MemProps,
    14071  m_UseKhrDedicatedAllocation);
    14072  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
    14073 #else
    14074  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
    14075  return VK_ERROR_FEATURE_NOT_PRESENT;
    14076 #endif
    14077  }
    14078 
    14079  return res;
    14080 }
    14081 
    14082 VmaAllocator_T::~VmaAllocator_T()
    14083 {
    14084 #if VMA_RECORDING_ENABLED
    14085  if(m_pRecorder != VMA_NULL)
    14086  {
    14087  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
    14088  vma_delete(this, m_pRecorder);
    14089  }
    14090 #endif
    14091 
    14092  VMA_ASSERT(m_Pools.empty());
    14093 
    14094  for(size_t i = GetMemoryTypeCount(); i--; )
    14095  {
    14096  vma_delete(this, m_pDedicatedAllocations[i]);
    14097  vma_delete(this, m_pBlockVectors[i]);
    14098  }
    14099 }
    14100 
    14101 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
    14102 {
    14103 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    14104  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
    14105  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
    14106  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    14107  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
    14108  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
    14109  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
    14110  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
    14111  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
    14112  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
    14113  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
    14114  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
    14115  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
    14116  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
    14117  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
    14118  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
    14119  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
    14120  m_VulkanFunctions.vkCmdCopyBuffer = &vkCmdCopyBuffer;
    14121 #if VMA_DEDICATED_ALLOCATION
    14122  if(m_UseKhrDedicatedAllocation)
    14123  {
    14124  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
    14125  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
    14126  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
    14127  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
    14128  }
    14129 #endif // #if VMA_DEDICATED_ALLOCATION
    14130 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    14131 
    14132 #define VMA_COPY_IF_NOT_NULL(funcName) \
    14133  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
    14134 
    14135  if(pVulkanFunctions != VMA_NULL)
    14136  {
    14137  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
    14138  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
    14139  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
    14140  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
    14141  VMA_COPY_IF_NOT_NULL(vkMapMemory);
    14142  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
    14143  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
    14144  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
    14145  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
    14146  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
    14147  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
    14148  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
    14149  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
    14150  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
    14151  VMA_COPY_IF_NOT_NULL(vkCreateImage);
    14152  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
    14153  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
    14154 #if VMA_DEDICATED_ALLOCATION
    14155  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
    14156  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
    14157 #endif
    14158  }
    14159 
    14160 #undef VMA_COPY_IF_NOT_NULL
    14161 
    14162  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
    14163  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
    14164  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
    14165  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
    14166  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
    14167  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
    14168  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
    14169  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
    14170  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
    14171  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
    14172  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
    14173  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
    14174  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
    14175  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
    14176  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
    14177  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
    14178  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
    14179  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
    14180  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
    14181 #if VMA_DEDICATED_ALLOCATION
    14182  if(m_UseKhrDedicatedAllocation)
    14183  {
    14184  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
    14185  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
    14186  }
    14187 #endif
    14188 }
    14189 
    14190 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
    14191 {
    14192  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    14193  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
    14194  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
    14195  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
    14196 }
    14197 
    14198 VkResult VmaAllocator_T::AllocateMemoryOfType(
    14199  VkDeviceSize size,
    14200  VkDeviceSize alignment,
    14201  bool dedicatedAllocation,
    14202  VkBuffer dedicatedBuffer,
    14203  VkImage dedicatedImage,
    14204  const VmaAllocationCreateInfo& createInfo,
    14205  uint32_t memTypeIndex,
    14206  VmaSuballocationType suballocType,
    14207  size_t allocationCount,
    14208  VmaAllocation* pAllocations)
    14209 {
    14210  VMA_ASSERT(pAllocations != VMA_NULL);
    14211  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, vkMemReq.size);
    14212 
    14213  VmaAllocationCreateInfo finalCreateInfo = createInfo;
    14214 
    14215  // If memory type is not HOST_VISIBLE, disable MAPPED.
    14216  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    14217  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    14218  {
    14219  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
    14220  }
    14221 
    14222  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
    14223  VMA_ASSERT(blockVector);
    14224 
    14225  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
    14226  bool preferDedicatedMemory =
    14227  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
    14228  dedicatedAllocation ||
    14229  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
    14230  size > preferredBlockSize / 2;
    14231 
    14232  if(preferDedicatedMemory &&
    14233  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
    14234  finalCreateInfo.pool == VK_NULL_HANDLE)
    14235  {
    14237  }
    14238 
    14239  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
    14240  {
    14241  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    14242  {
    14243  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14244  }
    14245  else
    14246  {
    14247  return AllocateDedicatedMemory(
    14248  size,
    14249  suballocType,
    14250  memTypeIndex,
    14251  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    14252  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    14253  finalCreateInfo.pUserData,
    14254  dedicatedBuffer,
    14255  dedicatedImage,
    14256  allocationCount,
    14257  pAllocations);
    14258  }
    14259  }
    14260  else
    14261  {
    14262  VkResult res = blockVector->Allocate(
    14263  VK_NULL_HANDLE, // hCurrentPool
    14264  m_CurrentFrameIndex.load(),
    14265  size,
    14266  alignment,
    14267  finalCreateInfo,
    14268  suballocType,
    14269  allocationCount,
    14270  pAllocations);
    14271  if(res == VK_SUCCESS)
    14272  {
    14273  return res;
    14274  }
    14275 
    14276  // 5. Try dedicated memory.
    14277  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    14278  {
    14279  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14280  }
    14281  else
    14282  {
    14283  res = AllocateDedicatedMemory(
    14284  size,
    14285  suballocType,
    14286  memTypeIndex,
    14287  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    14288  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    14289  finalCreateInfo.pUserData,
    14290  dedicatedBuffer,
    14291  dedicatedImage,
    14292  allocationCount,
    14293  pAllocations);
    14294  if(res == VK_SUCCESS)
    14295  {
    14296  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
    14297  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
    14298  return VK_SUCCESS;
    14299  }
    14300  else
    14301  {
    14302  // Everything failed: Return error code.
    14303  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    14304  return res;
    14305  }
    14306  }
    14307  }
    14308 }
    14309 
    14310 VkResult VmaAllocator_T::AllocateDedicatedMemory(
    14311  VkDeviceSize size,
    14312  VmaSuballocationType suballocType,
    14313  uint32_t memTypeIndex,
    14314  bool map,
    14315  bool isUserDataString,
    14316  void* pUserData,
    14317  VkBuffer dedicatedBuffer,
    14318  VkImage dedicatedImage,
    14319  size_t allocationCount,
    14320  VmaAllocation* pAllocations)
    14321 {
    14322  VMA_ASSERT(allocationCount > 0 && pAllocations);
    14323 
    14324  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    14325  allocInfo.memoryTypeIndex = memTypeIndex;
    14326  allocInfo.allocationSize = size;
    14327 
    14328 #if VMA_DEDICATED_ALLOCATION
    14329  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
    14330  if(m_UseKhrDedicatedAllocation)
    14331  {
    14332  if(dedicatedBuffer != VK_NULL_HANDLE)
    14333  {
    14334  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
    14335  dedicatedAllocInfo.buffer = dedicatedBuffer;
    14336  allocInfo.pNext = &dedicatedAllocInfo;
    14337  }
    14338  else if(dedicatedImage != VK_NULL_HANDLE)
    14339  {
    14340  dedicatedAllocInfo.image = dedicatedImage;
    14341  allocInfo.pNext = &dedicatedAllocInfo;
    14342  }
    14343  }
    14344 #endif // #if VMA_DEDICATED_ALLOCATION
    14345 
    14346  size_t allocIndex;
    14347  VkResult res;
    14348  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    14349  {
    14350  res = AllocateDedicatedMemoryPage(
    14351  size,
    14352  suballocType,
    14353  memTypeIndex,
    14354  allocInfo,
    14355  map,
    14356  isUserDataString,
    14357  pUserData,
    14358  pAllocations + allocIndex);
    14359  if(res != VK_SUCCESS)
    14360  {
    14361  break;
    14362  }
    14363  }
    14364 
    14365  if(res == VK_SUCCESS)
    14366  {
    14367  // Register them in m_pDedicatedAllocations.
    14368  {
    14369  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    14370  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    14371  VMA_ASSERT(pDedicatedAllocations);
    14372  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    14373  {
    14374  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
    14375  }
    14376  }
    14377 
    14378  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
    14379  }
    14380  else
    14381  {
    14382  // Free all already created allocations.
    14383  while(allocIndex--)
    14384  {
    14385  VmaAllocation currAlloc = pAllocations[allocIndex];
    14386  VkDeviceMemory hMemory = currAlloc->GetMemory();
    14387 
    14388  /*
    14389  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
    14390  before vkFreeMemory.
    14391 
    14392  if(currAlloc->GetMappedData() != VMA_NULL)
    14393  {
    14394  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    14395  }
    14396  */
    14397 
    14398  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
    14399 
    14400  currAlloc->SetUserData(this, VMA_NULL);
    14401  vma_delete(this, currAlloc);
    14402  }
    14403 
    14404  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
    14405  }
    14406 
    14407  return res;
    14408 }
    14409 
    14410 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
    14411  VkDeviceSize size,
    14412  VmaSuballocationType suballocType,
    14413  uint32_t memTypeIndex,
    14414  const VkMemoryAllocateInfo& allocInfo,
    14415  bool map,
    14416  bool isUserDataString,
    14417  void* pUserData,
    14418  VmaAllocation* pAllocation)
    14419 {
    14420  VkDeviceMemory hMemory = VK_NULL_HANDLE;
    14421  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
    14422  if(res < 0)
    14423  {
    14424  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    14425  return res;
    14426  }
    14427 
    14428  void* pMappedData = VMA_NULL;
    14429  if(map)
    14430  {
    14431  res = (*m_VulkanFunctions.vkMapMemory)(
    14432  m_hDevice,
    14433  hMemory,
    14434  0,
    14435  VK_WHOLE_SIZE,
    14436  0,
    14437  &pMappedData);
    14438  if(res < 0)
    14439  {
    14440  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    14441  FreeVulkanMemory(memTypeIndex, size, hMemory);
    14442  return res;
    14443  }
    14444  }
    14445 
    14446  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
    14447  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
    14448  (*pAllocation)->SetUserData(this, pUserData);
    14449  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    14450  {
    14451  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    14452  }
    14453 
    14454  return VK_SUCCESS;
    14455 }
    14456 
    14457 void VmaAllocator_T::GetBufferMemoryRequirements(
    14458  VkBuffer hBuffer,
    14459  VkMemoryRequirements& memReq,
    14460  bool& requiresDedicatedAllocation,
    14461  bool& prefersDedicatedAllocation) const
    14462 {
    14463 #if VMA_DEDICATED_ALLOCATION
    14464  if(m_UseKhrDedicatedAllocation)
    14465  {
    14466  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
    14467  memReqInfo.buffer = hBuffer;
    14468 
    14469  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    14470 
    14471  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    14472  memReq2.pNext = &memDedicatedReq;
    14473 
    14474  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    14475 
    14476  memReq = memReq2.memoryRequirements;
    14477  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    14478  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    14479  }
    14480  else
    14481 #endif // #if VMA_DEDICATED_ALLOCATION
    14482  {
    14483  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
    14484  requiresDedicatedAllocation = false;
    14485  prefersDedicatedAllocation = false;
    14486  }
    14487 }
    14488 
    14489 void VmaAllocator_T::GetImageMemoryRequirements(
    14490  VkImage hImage,
    14491  VkMemoryRequirements& memReq,
    14492  bool& requiresDedicatedAllocation,
    14493  bool& prefersDedicatedAllocation) const
    14494 {
    14495 #if VMA_DEDICATED_ALLOCATION
    14496  if(m_UseKhrDedicatedAllocation)
    14497  {
    14498  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
    14499  memReqInfo.image = hImage;
    14500 
    14501  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    14502 
    14503  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    14504  memReq2.pNext = &memDedicatedReq;
    14505 
    14506  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    14507 
    14508  memReq = memReq2.memoryRequirements;
    14509  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    14510  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    14511  }
    14512  else
    14513 #endif // #if VMA_DEDICATED_ALLOCATION
    14514  {
    14515  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
    14516  requiresDedicatedAllocation = false;
    14517  prefersDedicatedAllocation = false;
    14518  }
    14519 }
    14520 
    14521 VkResult VmaAllocator_T::AllocateMemory(
    14522  const VkMemoryRequirements& vkMemReq,
    14523  bool requiresDedicatedAllocation,
    14524  bool prefersDedicatedAllocation,
    14525  VkBuffer dedicatedBuffer,
    14526  VkImage dedicatedImage,
    14527  const VmaAllocationCreateInfo& createInfo,
    14528  VmaSuballocationType suballocType,
    14529  size_t allocationCount,
    14530  VmaAllocation* pAllocations)
    14531 {
    14532  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
    14533 
    14534  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
    14535 
    14536  if(vkMemReq.size == 0)
    14537  {
    14538  return VK_ERROR_VALIDATION_FAILED_EXT;
    14539  }
    14540  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
    14541  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    14542  {
    14543  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
    14544  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14545  }
    14546  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    14548  {
    14549  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
    14550  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14551  }
    14552  if(requiresDedicatedAllocation)
    14553  {
    14554  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    14555  {
    14556  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
    14557  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14558  }
    14559  if(createInfo.pool != VK_NULL_HANDLE)
    14560  {
    14561  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
    14562  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14563  }
    14564  }
    14565  if((createInfo.pool != VK_NULL_HANDLE) &&
    14566  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
    14567  {
    14568  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
    14569  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14570  }
    14571 
    14572  if(createInfo.pool != VK_NULL_HANDLE)
    14573  {
    14574  const VkDeviceSize alignmentForPool = VMA_MAX(
    14575  vkMemReq.alignment,
    14576  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
    14577  return createInfo.pool->m_BlockVector.Allocate(
    14578  createInfo.pool,
    14579  m_CurrentFrameIndex.load(),
    14580  vkMemReq.size,
    14581  alignmentForPool,
    14582  createInfo,
    14583  suballocType,
    14584  allocationCount,
    14585  pAllocations);
    14586  }
    14587  else
    14588  {
    14589  // Bit mask of memory Vulkan types acceptable for this allocation.
    14590  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
    14591  uint32_t memTypeIndex = UINT32_MAX;
    14592  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    14593  if(res == VK_SUCCESS)
    14594  {
    14595  VkDeviceSize alignmentForMemType = VMA_MAX(
    14596  vkMemReq.alignment,
    14597  GetMemoryTypeMinAlignment(memTypeIndex));
    14598 
    14599  res = AllocateMemoryOfType(
    14600  vkMemReq.size,
    14601  alignmentForMemType,
    14602  requiresDedicatedAllocation || prefersDedicatedAllocation,
    14603  dedicatedBuffer,
    14604  dedicatedImage,
    14605  createInfo,
    14606  memTypeIndex,
    14607  suballocType,
    14608  allocationCount,
    14609  pAllocations);
    14610  // Succeeded on first try.
    14611  if(res == VK_SUCCESS)
    14612  {
    14613  return res;
    14614  }
    14615  // Allocation from this memory type failed. Try other compatible memory types.
    14616  else
    14617  {
    14618  for(;;)
    14619  {
    14620  // Remove old memTypeIndex from list of possibilities.
    14621  memoryTypeBits &= ~(1u << memTypeIndex);
    14622  // Find alternative memTypeIndex.
    14623  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    14624  if(res == VK_SUCCESS)
    14625  {
    14626  alignmentForMemType = VMA_MAX(
    14627  vkMemReq.alignment,
    14628  GetMemoryTypeMinAlignment(memTypeIndex));
    14629 
    14630  res = AllocateMemoryOfType(
    14631  vkMemReq.size,
    14632  alignmentForMemType,
    14633  requiresDedicatedAllocation || prefersDedicatedAllocation,
    14634  dedicatedBuffer,
    14635  dedicatedImage,
    14636  createInfo,
    14637  memTypeIndex,
    14638  suballocType,
    14639  allocationCount,
    14640  pAllocations);
    14641  // Allocation from this alternative memory type succeeded.
    14642  if(res == VK_SUCCESS)
    14643  {
    14644  return res;
    14645  }
    14646  // else: Allocation from this memory type failed. Try next one - next loop iteration.
    14647  }
    14648  // No other matching memory type index could be found.
    14649  else
    14650  {
    14651  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
    14652  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14653  }
    14654  }
    14655  }
    14656  }
    14657  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
    14658  else
    14659  return res;
    14660  }
    14661 }
    14662 
    14663 void VmaAllocator_T::FreeMemory(
    14664  size_t allocationCount,
    14665  const VmaAllocation* pAllocations)
    14666 {
    14667  VMA_ASSERT(pAllocations);
    14668 
    14669  for(size_t allocIndex = allocationCount; allocIndex--; )
    14670  {
    14671  VmaAllocation allocation = pAllocations[allocIndex];
    14672 
    14673  if(allocation != VK_NULL_HANDLE)
    14674  {
    14675  if(TouchAllocation(allocation))
    14676  {
    14677  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    14678  {
    14679  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
    14680  }
    14681 
    14682  switch(allocation->GetType())
    14683  {
    14684  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    14685  {
    14686  VmaBlockVector* pBlockVector = VMA_NULL;
    14687  VmaPool hPool = allocation->GetPool();
    14688  if(hPool != VK_NULL_HANDLE)
    14689  {
    14690  pBlockVector = &hPool->m_BlockVector;
    14691  }
    14692  else
    14693  {
    14694  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    14695  pBlockVector = m_pBlockVectors[memTypeIndex];
    14696  }
    14697  pBlockVector->Free(allocation);
    14698  }
    14699  break;
    14700  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    14701  FreeDedicatedMemory(allocation);
    14702  break;
    14703  default:
    14704  VMA_ASSERT(0);
    14705  }
    14706  }
    14707 
    14708  allocation->SetUserData(this, VMA_NULL);
    14709  vma_delete(this, allocation);
    14710  }
    14711  }
    14712 }
    14713 
    14714 VkResult VmaAllocator_T::ResizeAllocation(
    14715  const VmaAllocation alloc,
    14716  VkDeviceSize newSize)
    14717 {
    14718  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
    14719  {
    14720  return VK_ERROR_VALIDATION_FAILED_EXT;
    14721  }
    14722  if(newSize == alloc->GetSize())
    14723  {
    14724  return VK_SUCCESS;
    14725  }
    14726 
    14727  switch(alloc->GetType())
    14728  {
    14729  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    14730  return VK_ERROR_FEATURE_NOT_PRESENT;
    14731  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    14732  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
    14733  {
    14734  alloc->ChangeSize(newSize);
    14735  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
    14736  return VK_SUCCESS;
    14737  }
    14738  else
    14739  {
    14740  return VK_ERROR_OUT_OF_POOL_MEMORY;
    14741  }
    14742  default:
    14743  VMA_ASSERT(0);
    14744  return VK_ERROR_VALIDATION_FAILED_EXT;
    14745  }
    14746 }
    14747 
    14748 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
    14749 {
    14750  // Initialize.
    14751  InitStatInfo(pStats->total);
    14752  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
    14753  InitStatInfo(pStats->memoryType[i]);
    14754  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    14755  InitStatInfo(pStats->memoryHeap[i]);
    14756 
    14757  // Process default pools.
    14758  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    14759  {
    14760  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    14761  VMA_ASSERT(pBlockVector);
    14762  pBlockVector->AddStats(pStats);
    14763  }
    14764 
    14765  // Process custom pools.
    14766  {
    14767  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
    14768  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    14769  {
    14770  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
    14771  }
    14772  }
    14773 
    14774  // Process dedicated allocations.
    14775  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    14776  {
    14777  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    14778  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    14779  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    14780  VMA_ASSERT(pDedicatedAllocVector);
    14781  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
    14782  {
    14783  VmaStatInfo allocationStatInfo;
    14784  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
    14785  VmaAddStatInfo(pStats->total, allocationStatInfo);
    14786  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    14787  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    14788  }
    14789  }
    14790 
    14791  // Postprocess.
    14792  VmaPostprocessCalcStatInfo(pStats->total);
    14793  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
    14794  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
    14795  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
    14796  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
    14797 }
    14798 
    14799 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
    14800 
    14801 VkResult VmaAllocator_T::DefragmentationBegin(
    14802  const VmaDefragmentationInfo2& info,
    14803  VmaDefragmentationStats* pStats,
    14804  VmaDefragmentationContext* pContext)
    14805 {
    14806  if(info.pAllocationsChanged != VMA_NULL)
    14807  {
    14808  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
    14809  }
    14810 
    14811  *pContext = vma_new(this, VmaDefragmentationContext_T)(
    14812  this, m_CurrentFrameIndex.load(), info.flags, pStats);
    14813 
    14814  (*pContext)->AddPools(info.poolCount, info.pPools);
    14815  (*pContext)->AddAllocations(
    14817 
    14818  VkResult res = (*pContext)->Defragment(
    14821  info.commandBuffer, pStats);
    14822 
    14823  if(res != VK_NOT_READY)
    14824  {
    14825  vma_delete(this, *pContext);
    14826  *pContext = VMA_NULL;
    14827  }
    14828 
    14829  return res;
    14830 }
    14831 
    14832 VkResult VmaAllocator_T::DefragmentationEnd(
    14833  VmaDefragmentationContext context)
    14834 {
    14835  vma_delete(this, context);
    14836  return VK_SUCCESS;
    14837 }
    14838 
    14839 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
    14840 {
    14841  if(hAllocation->CanBecomeLost())
    14842  {
    14843  /*
    14844  Warning: This is a carefully designed algorithm.
    14845  Do not modify unless you really know what you're doing :)
    14846  */
    14847  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    14848  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    14849  for(;;)
    14850  {
    14851  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    14852  {
    14853  pAllocationInfo->memoryType = UINT32_MAX;
    14854  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
    14855  pAllocationInfo->offset = 0;
    14856  pAllocationInfo->size = hAllocation->GetSize();
    14857  pAllocationInfo->pMappedData = VMA_NULL;
    14858  pAllocationInfo->pUserData = hAllocation->GetUserData();
    14859  return;
    14860  }
    14861  else if(localLastUseFrameIndex == localCurrFrameIndex)
    14862  {
    14863  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    14864  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    14865  pAllocationInfo->offset = hAllocation->GetOffset();
    14866  pAllocationInfo->size = hAllocation->GetSize();
    14867  pAllocationInfo->pMappedData = VMA_NULL;
    14868  pAllocationInfo->pUserData = hAllocation->GetUserData();
    14869  return;
    14870  }
    14871  else // Last use time earlier than current time.
    14872  {
    14873  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    14874  {
    14875  localLastUseFrameIndex = localCurrFrameIndex;
    14876  }
    14877  }
    14878  }
    14879  }
    14880  else
    14881  {
    14882 #if VMA_STATS_STRING_ENABLED
    14883  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    14884  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    14885  for(;;)
    14886  {
    14887  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    14888  if(localLastUseFrameIndex == localCurrFrameIndex)
    14889  {
    14890  break;
    14891  }
    14892  else // Last use time earlier than current time.
    14893  {
    14894  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    14895  {
    14896  localLastUseFrameIndex = localCurrFrameIndex;
    14897  }
    14898  }
    14899  }
    14900 #endif
    14901 
    14902  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    14903  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    14904  pAllocationInfo->offset = hAllocation->GetOffset();
    14905  pAllocationInfo->size = hAllocation->GetSize();
    14906  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    14907  pAllocationInfo->pUserData = hAllocation->GetUserData();
    14908  }
    14909 }
    14910 
    14911 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
    14912 {
    14913  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
    14914  if(hAllocation->CanBecomeLost())
    14915  {
    14916  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    14917  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    14918  for(;;)
    14919  {
    14920  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    14921  {
    14922  return false;
    14923  }
    14924  else if(localLastUseFrameIndex == localCurrFrameIndex)
    14925  {
    14926  return true;
    14927  }
    14928  else // Last use time earlier than current time.
    14929  {
    14930  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    14931  {
    14932  localLastUseFrameIndex = localCurrFrameIndex;
    14933  }
    14934  }
    14935  }
    14936  }
    14937  else
    14938  {
    14939 #if VMA_STATS_STRING_ENABLED
    14940  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    14941  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    14942  for(;;)
    14943  {
    14944  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    14945  if(localLastUseFrameIndex == localCurrFrameIndex)
    14946  {
    14947  break;
    14948  }
    14949  else // Last use time earlier than current time.
    14950  {
    14951  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    14952  {
    14953  localLastUseFrameIndex = localCurrFrameIndex;
    14954  }
    14955  }
    14956  }
    14957 #endif
    14958 
    14959  return true;
    14960  }
    14961 }
    14962 
    14963 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
    14964 {
    14965  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
    14966 
    14967  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
    14968 
    14969  if(newCreateInfo.maxBlockCount == 0)
    14970  {
    14971  newCreateInfo.maxBlockCount = SIZE_MAX;
    14972  }
    14973  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
    14974  {
    14975  return VK_ERROR_INITIALIZATION_FAILED;
    14976  }
    14977 
    14978  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
    14979 
    14980  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
    14981 
    14982  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
    14983  if(res != VK_SUCCESS)
    14984  {
    14985  vma_delete(this, *pPool);
    14986  *pPool = VMA_NULL;
    14987  return res;
    14988  }
    14989 
    14990  // Add to m_Pools.
    14991  {
    14992  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
    14993  (*pPool)->SetId(m_NextPoolId++);
    14994  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
    14995  }
    14996 
    14997  return VK_SUCCESS;
    14998 }
    14999 
    15000 void VmaAllocator_T::DestroyPool(VmaPool pool)
    15001 {
    15002  // Remove from m_Pools.
    15003  {
    15004  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
    15005  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
    15006  VMA_ASSERT(success && "Pool not found in Allocator.");
    15007  }
    15008 
    15009  vma_delete(this, pool);
    15010 }
    15011 
    15012 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
    15013 {
    15014  pool->m_BlockVector.GetPoolStats(pPoolStats);
    15015 }
    15016 
    15017 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
    15018 {
    15019  m_CurrentFrameIndex.store(frameIndex);
    15020 }
    15021 
    15022 void VmaAllocator_T::MakePoolAllocationsLost(
    15023  VmaPool hPool,
    15024  size_t* pLostAllocationCount)
    15025 {
    15026  hPool->m_BlockVector.MakePoolAllocationsLost(
    15027  m_CurrentFrameIndex.load(),
    15028  pLostAllocationCount);
    15029 }
    15030 
    15031 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
    15032 {
    15033  return hPool->m_BlockVector.CheckCorruption();
    15034 }
    15035 
    15036 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
    15037 {
    15038  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
    15039 
    15040  // Process default pools.
    15041  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    15042  {
    15043  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
    15044  {
    15045  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    15046  VMA_ASSERT(pBlockVector);
    15047  VkResult localRes = pBlockVector->CheckCorruption();
    15048  switch(localRes)
    15049  {
    15050  case VK_ERROR_FEATURE_NOT_PRESENT:
    15051  break;
    15052  case VK_SUCCESS:
    15053  finalRes = VK_SUCCESS;
    15054  break;
    15055  default:
    15056  return localRes;
    15057  }
    15058  }
    15059  }
    15060 
    15061  // Process custom pools.
    15062  {
    15063  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
    15064  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    15065  {
    15066  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
    15067  {
    15068  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
    15069  switch(localRes)
    15070  {
    15071  case VK_ERROR_FEATURE_NOT_PRESENT:
    15072  break;
    15073  case VK_SUCCESS:
    15074  finalRes = VK_SUCCESS;
    15075  break;
    15076  default:
    15077  return localRes;
    15078  }
    15079  }
    15080  }
    15081  }
    15082 
    15083  return finalRes;
    15084 }
    15085 
    15086 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
    15087 {
    15088  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
    15089  (*pAllocation)->InitLost();
    15090 }
    15091 
    15092 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
    15093 {
    15094  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
    15095 
    15096  VkResult res;
    15097  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    15098  {
    15099  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    15100  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
    15101  {
    15102  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    15103  if(res == VK_SUCCESS)
    15104  {
    15105  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
    15106  }
    15107  }
    15108  else
    15109  {
    15110  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
    15111  }
    15112  }
    15113  else
    15114  {
    15115  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    15116  }
    15117 
    15118  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
    15119  {
    15120  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
    15121  }
    15122 
    15123  return res;
    15124 }
    15125 
    15126 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
    15127 {
    15128  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
    15129  {
    15130  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
    15131  }
    15132 
    15133  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
    15134 
    15135  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
    15136  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    15137  {
    15138  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    15139  m_HeapSizeLimit[heapIndex] += size;
    15140  }
    15141 }
    15142 
    15143 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
    15144 {
    15145  if(hAllocation->CanBecomeLost())
    15146  {
    15147  return VK_ERROR_MEMORY_MAP_FAILED;
    15148  }
    15149 
    15150  switch(hAllocation->GetType())
    15151  {
    15152  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15153  {
    15154  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    15155  char *pBytes = VMA_NULL;
    15156  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
    15157  if(res == VK_SUCCESS)
    15158  {
    15159  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
    15160  hAllocation->BlockAllocMap();
    15161  }
    15162  return res;
    15163  }
    15164  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15165  return hAllocation->DedicatedAllocMap(this, ppData);
    15166  default:
    15167  VMA_ASSERT(0);
    15168  return VK_ERROR_MEMORY_MAP_FAILED;
    15169  }
    15170 }
    15171 
    15172 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
    15173 {
    15174  switch(hAllocation->GetType())
    15175  {
    15176  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15177  {
    15178  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    15179  hAllocation->BlockAllocUnmap();
    15180  pBlock->Unmap(this, 1);
    15181  }
    15182  break;
    15183  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15184  hAllocation->DedicatedAllocUnmap(this);
    15185  break;
    15186  default:
    15187  VMA_ASSERT(0);
    15188  }
    15189 }
    15190 
    15191 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
    15192 {
    15193  VkResult res = VK_SUCCESS;
    15194  switch(hAllocation->GetType())
    15195  {
    15196  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15197  res = GetVulkanFunctions().vkBindBufferMemory(
    15198  m_hDevice,
    15199  hBuffer,
    15200  hAllocation->GetMemory(),
    15201  0); //memoryOffset
    15202  break;
    15203  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15204  {
    15205  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    15206  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
    15207  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
    15208  break;
    15209  }
    15210  default:
    15211  VMA_ASSERT(0);
    15212  }
    15213  return res;
    15214 }
    15215 
    15216 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
    15217 {
    15218  VkResult res = VK_SUCCESS;
    15219  switch(hAllocation->GetType())
    15220  {
    15221  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15222  res = GetVulkanFunctions().vkBindImageMemory(
    15223  m_hDevice,
    15224  hImage,
    15225  hAllocation->GetMemory(),
    15226  0); //memoryOffset
    15227  break;
    15228  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15229  {
    15230  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    15231  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
    15232  res = pBlock->BindImageMemory(this, hAllocation, hImage);
    15233  break;
    15234  }
    15235  default:
    15236  VMA_ASSERT(0);
    15237  }
    15238  return res;
    15239 }
    15240 
    15241 void VmaAllocator_T::FlushOrInvalidateAllocation(
    15242  VmaAllocation hAllocation,
    15243  VkDeviceSize offset, VkDeviceSize size,
    15244  VMA_CACHE_OPERATION op)
    15245 {
    15246  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
    15247  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
    15248  {
    15249  const VkDeviceSize allocationSize = hAllocation->GetSize();
    15250  VMA_ASSERT(offset <= allocationSize);
    15251 
    15252  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    15253 
    15254  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    15255  memRange.memory = hAllocation->GetMemory();
    15256 
    15257  switch(hAllocation->GetType())
    15258  {
    15259  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15260  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    15261  if(size == VK_WHOLE_SIZE)
    15262  {
    15263  memRange.size = allocationSize - memRange.offset;
    15264  }
    15265  else
    15266  {
    15267  VMA_ASSERT(offset + size <= allocationSize);
    15268  memRange.size = VMA_MIN(
    15269  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
    15270  allocationSize - memRange.offset);
    15271  }
    15272  break;
    15273 
    15274  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15275  {
    15276  // 1. Still within this allocation.
    15277  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    15278  if(size == VK_WHOLE_SIZE)
    15279  {
    15280  size = allocationSize - offset;
    15281  }
    15282  else
    15283  {
    15284  VMA_ASSERT(offset + size <= allocationSize);
    15285  }
    15286  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
    15287 
    15288  // 2. Adjust to whole block.
    15289  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
    15290  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
    15291  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
    15292  memRange.offset += allocationOffset;
    15293  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
    15294 
    15295  break;
    15296  }
    15297 
    15298  default:
    15299  VMA_ASSERT(0);
    15300  }
    15301 
    15302  switch(op)
    15303  {
    15304  case VMA_CACHE_FLUSH:
    15305  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
    15306  break;
    15307  case VMA_CACHE_INVALIDATE:
    15308  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
    15309  break;
    15310  default:
    15311  VMA_ASSERT(0);
    15312  }
    15313  }
    15314  // else: Just ignore this call.
    15315 }
    15316 
    15317 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
    15318 {
    15319  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
    15320 
    15321  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    15322  {
    15323  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    15324  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    15325  VMA_ASSERT(pDedicatedAllocations);
    15326  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
    15327  VMA_ASSERT(success);
    15328  }
    15329 
    15330  VkDeviceMemory hMemory = allocation->GetMemory();
    15331 
    15332  /*
    15333  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
    15334  before vkFreeMemory.
    15335 
    15336  if(allocation->GetMappedData() != VMA_NULL)
    15337  {
    15338  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    15339  }
    15340  */
    15341 
    15342  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
    15343 
    15344  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
    15345 }
    15346 
    15347 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
    15348 {
    15349  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
    15350  !hAllocation->CanBecomeLost() &&
    15351  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    15352  {
    15353  void* pData = VMA_NULL;
    15354  VkResult res = Map(hAllocation, &pData);
    15355  if(res == VK_SUCCESS)
    15356  {
    15357  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
    15358  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
    15359  Unmap(hAllocation);
    15360  }
    15361  else
    15362  {
    15363  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
    15364  }
    15365  }
    15366 }
    15367 
    15368 #if VMA_STATS_STRING_ENABLED
    15369 
    15370 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
    15371 {
    15372  bool dedicatedAllocationsStarted = false;
    15373  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    15374  {
    15375  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    15376  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    15377  VMA_ASSERT(pDedicatedAllocVector);
    15378  if(pDedicatedAllocVector->empty() == false)
    15379  {
    15380  if(dedicatedAllocationsStarted == false)
    15381  {
    15382  dedicatedAllocationsStarted = true;
    15383  json.WriteString("DedicatedAllocations");
    15384  json.BeginObject();
    15385  }
    15386 
    15387  json.BeginString("Type ");
    15388  json.ContinueString(memTypeIndex);
    15389  json.EndString();
    15390 
    15391  json.BeginArray();
    15392 
    15393  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
    15394  {
    15395  json.BeginObject(true);
    15396  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
    15397  hAlloc->PrintParameters(json);
    15398  json.EndObject();
    15399  }
    15400 
    15401  json.EndArray();
    15402  }
    15403  }
    15404  if(dedicatedAllocationsStarted)
    15405  {
    15406  json.EndObject();
    15407  }
    15408 
    15409  {
    15410  bool allocationsStarted = false;
    15411  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    15412  {
    15413  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
    15414  {
    15415  if(allocationsStarted == false)
    15416  {
    15417  allocationsStarted = true;
    15418  json.WriteString("DefaultPools");
    15419  json.BeginObject();
    15420  }
    15421 
    15422  json.BeginString("Type ");
    15423  json.ContinueString(memTypeIndex);
    15424  json.EndString();
    15425 
    15426  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
    15427  }
    15428  }
    15429  if(allocationsStarted)
    15430  {
    15431  json.EndObject();
    15432  }
    15433  }
    15434 
    15435  // Custom pools
    15436  {
    15437  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
    15438  const size_t poolCount = m_Pools.size();
    15439  if(poolCount > 0)
    15440  {
    15441  json.WriteString("Pools");
    15442  json.BeginObject();
    15443  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    15444  {
    15445  json.BeginString();
    15446  json.ContinueString(m_Pools[poolIndex]->GetId());
    15447  json.EndString();
    15448 
    15449  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
    15450  }
    15451  json.EndObject();
    15452  }
    15453  }
    15454 }
    15455 
    15456 #endif // #if VMA_STATS_STRING_ENABLED
    15457 
    15459 // Public interface
    15460 
    15461 VkResult vmaCreateAllocator(
    15462  const VmaAllocatorCreateInfo* pCreateInfo,
    15463  VmaAllocator* pAllocator)
    15464 {
    15465  VMA_ASSERT(pCreateInfo && pAllocator);
    15466  VMA_DEBUG_LOG("vmaCreateAllocator");
    15467  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
    15468  return (*pAllocator)->Init(pCreateInfo);
    15469 }
    15470 
    15471 void vmaDestroyAllocator(
    15472  VmaAllocator allocator)
    15473 {
    15474  if(allocator != VK_NULL_HANDLE)
    15475  {
    15476  VMA_DEBUG_LOG("vmaDestroyAllocator");
    15477  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
    15478  vma_delete(&allocationCallbacks, allocator);
    15479  }
    15480 }
    15481 
    15483  VmaAllocator allocator,
    15484  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    15485 {
    15486  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
    15487  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
    15488 }
    15489 
    15491  VmaAllocator allocator,
    15492  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
    15493 {
    15494  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
    15495  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
    15496 }
    15497 
    15499  VmaAllocator allocator,
    15500  uint32_t memoryTypeIndex,
    15501  VkMemoryPropertyFlags* pFlags)
    15502 {
    15503  VMA_ASSERT(allocator && pFlags);
    15504  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
    15505  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
    15506 }
    15507 
    15509  VmaAllocator allocator,
    15510  uint32_t frameIndex)
    15511 {
    15512  VMA_ASSERT(allocator);
    15513  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
    15514 
    15515  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15516 
    15517  allocator->SetCurrentFrameIndex(frameIndex);
    15518 }
    15519 
    15520 void vmaCalculateStats(
    15521  VmaAllocator allocator,
    15522  VmaStats* pStats)
    15523 {
    15524  VMA_ASSERT(allocator && pStats);
    15525  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15526  allocator->CalculateStats(pStats);
    15527 }
    15528 
    15529 #if VMA_STATS_STRING_ENABLED
    15530 
    15531 void vmaBuildStatsString(
    15532  VmaAllocator allocator,
    15533  char** ppStatsString,
    15534  VkBool32 detailedMap)
    15535 {
    15536  VMA_ASSERT(allocator && ppStatsString);
    15537  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15538 
    15539  VmaStringBuilder sb(allocator);
    15540  {
    15541  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
    15542  json.BeginObject();
    15543 
    15544  VmaStats stats;
    15545  allocator->CalculateStats(&stats);
    15546 
    15547  json.WriteString("Total");
    15548  VmaPrintStatInfo(json, stats.total);
    15549 
    15550  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
    15551  {
    15552  json.BeginString("Heap ");
    15553  json.ContinueString(heapIndex);
    15554  json.EndString();
    15555  json.BeginObject();
    15556 
    15557  json.WriteString("Size");
    15558  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
    15559 
    15560  json.WriteString("Flags");
    15561  json.BeginArray(true);
    15562  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    15563  {
    15564  json.WriteString("DEVICE_LOCAL");
    15565  }
    15566  json.EndArray();
    15567 
    15568  if(stats.memoryHeap[heapIndex].blockCount > 0)
    15569  {
    15570  json.WriteString("Stats");
    15571  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
    15572  }
    15573 
    15574  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
    15575  {
    15576  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
    15577  {
    15578  json.BeginString("Type ");
    15579  json.ContinueString(typeIndex);
    15580  json.EndString();
    15581 
    15582  json.BeginObject();
    15583 
    15584  json.WriteString("Flags");
    15585  json.BeginArray(true);
    15586  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
    15587  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    15588  {
    15589  json.WriteString("DEVICE_LOCAL");
    15590  }
    15591  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    15592  {
    15593  json.WriteString("HOST_VISIBLE");
    15594  }
    15595  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
    15596  {
    15597  json.WriteString("HOST_COHERENT");
    15598  }
    15599  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
    15600  {
    15601  json.WriteString("HOST_CACHED");
    15602  }
    15603  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
    15604  {
    15605  json.WriteString("LAZILY_ALLOCATED");
    15606  }
    15607  json.EndArray();
    15608 
    15609  if(stats.memoryType[typeIndex].blockCount > 0)
    15610  {
    15611  json.WriteString("Stats");
    15612  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
    15613  }
    15614 
    15615  json.EndObject();
    15616  }
    15617  }
    15618 
    15619  json.EndObject();
    15620  }
    15621  if(detailedMap == VK_TRUE)
    15622  {
    15623  allocator->PrintDetailedMap(json);
    15624  }
    15625 
    15626  json.EndObject();
    15627  }
    15628 
    15629  const size_t len = sb.GetLength();
    15630  char* const pChars = vma_new_array(allocator, char, len + 1);
    15631  if(len > 0)
    15632  {
    15633  memcpy(pChars, sb.GetData(), len);
    15634  }
    15635  pChars[len] = '\0';
    15636  *ppStatsString = pChars;
    15637 }
    15638 
    15639 void vmaFreeStatsString(
    15640  VmaAllocator allocator,
    15641  char* pStatsString)
    15642 {
    15643  if(pStatsString != VMA_NULL)
    15644  {
    15645  VMA_ASSERT(allocator);
    15646  size_t len = strlen(pStatsString);
    15647  vma_delete_array(allocator, pStatsString, len + 1);
    15648  }
    15649 }
    15650 
    15651 #endif // #if VMA_STATS_STRING_ENABLED
    15652 
    15653 /*
    15654 This function is not protected by any mutex because it just reads immutable data.
    15655 */
    15656 VkResult vmaFindMemoryTypeIndex(
    15657  VmaAllocator allocator,
    15658  uint32_t memoryTypeBits,
    15659  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    15660  uint32_t* pMemoryTypeIndex)
    15661 {
    15662  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    15663  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    15664  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    15665 
    15666  if(pAllocationCreateInfo->memoryTypeBits != 0)
    15667  {
    15668  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
    15669  }
    15670 
    15671  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
    15672  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
    15673 
    15674  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    15675  if(mapped)
    15676  {
    15677  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    15678  }
    15679 
    15680  // Convert usage to requiredFlags and preferredFlags.
    15681  switch(pAllocationCreateInfo->usage)
    15682  {
    15684  break;
    15686  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    15687  {
    15688  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    15689  }
    15690  break;
    15692  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    15693  break;
    15695  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    15696  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    15697  {
    15698  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    15699  }
    15700  break;
    15702  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    15703  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
    15704  break;
    15705  default:
    15706  break;
    15707  }
    15708 
    15709  *pMemoryTypeIndex = UINT32_MAX;
    15710  uint32_t minCost = UINT32_MAX;
    15711  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
    15712  memTypeIndex < allocator->GetMemoryTypeCount();
    15713  ++memTypeIndex, memTypeBit <<= 1)
    15714  {
    15715  // This memory type is acceptable according to memoryTypeBits bitmask.
    15716  if((memTypeBit & memoryTypeBits) != 0)
    15717  {
    15718  const VkMemoryPropertyFlags currFlags =
    15719  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    15720  // This memory type contains requiredFlags.
    15721  if((requiredFlags & ~currFlags) == 0)
    15722  {
    15723  // Calculate cost as number of bits from preferredFlags not present in this memory type.
    15724  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
    15725  // Remember memory type with lowest cost.
    15726  if(currCost < minCost)
    15727  {
    15728  *pMemoryTypeIndex = memTypeIndex;
    15729  if(currCost == 0)
    15730  {
    15731  return VK_SUCCESS;
    15732  }
    15733  minCost = currCost;
    15734  }
    15735  }
    15736  }
    15737  }
    15738  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
    15739 }
    15740 
    15742  VmaAllocator allocator,
    15743  const VkBufferCreateInfo* pBufferCreateInfo,
    15744  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    15745  uint32_t* pMemoryTypeIndex)
    15746 {
    15747  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    15748  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
    15749  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    15750  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    15751 
    15752  const VkDevice hDev = allocator->m_hDevice;
    15753  VkBuffer hBuffer = VK_NULL_HANDLE;
    15754  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
    15755  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
    15756  if(res == VK_SUCCESS)
    15757  {
    15758  VkMemoryRequirements memReq = {};
    15759  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
    15760  hDev, hBuffer, &memReq);
    15761 
    15762  res = vmaFindMemoryTypeIndex(
    15763  allocator,
    15764  memReq.memoryTypeBits,
    15765  pAllocationCreateInfo,
    15766  pMemoryTypeIndex);
    15767 
    15768  allocator->GetVulkanFunctions().vkDestroyBuffer(
    15769  hDev, hBuffer, allocator->GetAllocationCallbacks());
    15770  }
    15771  return res;
    15772 }
    15773 
    15775  VmaAllocator allocator,
    15776  const VkImageCreateInfo* pImageCreateInfo,
    15777  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    15778  uint32_t* pMemoryTypeIndex)
    15779 {
    15780  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    15781  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
    15782  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    15783  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    15784 
    15785  const VkDevice hDev = allocator->m_hDevice;
    15786  VkImage hImage = VK_NULL_HANDLE;
    15787  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
    15788  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
    15789  if(res == VK_SUCCESS)
    15790  {
    15791  VkMemoryRequirements memReq = {};
    15792  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
    15793  hDev, hImage, &memReq);
    15794 
    15795  res = vmaFindMemoryTypeIndex(
    15796  allocator,
    15797  memReq.memoryTypeBits,
    15798  pAllocationCreateInfo,
    15799  pMemoryTypeIndex);
    15800 
    15801  allocator->GetVulkanFunctions().vkDestroyImage(
    15802  hDev, hImage, allocator->GetAllocationCallbacks());
    15803  }
    15804  return res;
    15805 }
    15806 
    15807 VkResult vmaCreatePool(
    15808  VmaAllocator allocator,
    15809  const VmaPoolCreateInfo* pCreateInfo,
    15810  VmaPool* pPool)
    15811 {
    15812  VMA_ASSERT(allocator && pCreateInfo && pPool);
    15813 
    15814  VMA_DEBUG_LOG("vmaCreatePool");
    15815 
    15816  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15817 
    15818  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
    15819 
    15820 #if VMA_RECORDING_ENABLED
    15821  if(allocator->GetRecorder() != VMA_NULL)
    15822  {
    15823  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
    15824  }
    15825 #endif
    15826 
    15827  return res;
    15828 }
    15829 
    15830 void vmaDestroyPool(
    15831  VmaAllocator allocator,
    15832  VmaPool pool)
    15833 {
    15834  VMA_ASSERT(allocator);
    15835 
    15836  if(pool == VK_NULL_HANDLE)
    15837  {
    15838  return;
    15839  }
    15840 
    15841  VMA_DEBUG_LOG("vmaDestroyPool");
    15842 
    15843  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15844 
    15845 #if VMA_RECORDING_ENABLED
    15846  if(allocator->GetRecorder() != VMA_NULL)
    15847  {
    15848  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
    15849  }
    15850 #endif
    15851 
    15852  allocator->DestroyPool(pool);
    15853 }
    15854 
    15855 void vmaGetPoolStats(
    15856  VmaAllocator allocator,
    15857  VmaPool pool,
    15858  VmaPoolStats* pPoolStats)
    15859 {
    15860  VMA_ASSERT(allocator && pool && pPoolStats);
    15861 
    15862  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15863 
    15864  allocator->GetPoolStats(pool, pPoolStats);
    15865 }
    15866 
    15868  VmaAllocator allocator,
    15869  VmaPool pool,
    15870  size_t* pLostAllocationCount)
    15871 {
    15872  VMA_ASSERT(allocator && pool);
    15873 
    15874  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15875 
    15876 #if VMA_RECORDING_ENABLED
    15877  if(allocator->GetRecorder() != VMA_NULL)
    15878  {
    15879  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
    15880  }
    15881 #endif
    15882 
    15883  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
    15884 }
    15885 
    15886 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    15887 {
    15888  VMA_ASSERT(allocator && pool);
    15889 
    15890  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15891 
    15892  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
    15893 
    15894  return allocator->CheckPoolCorruption(pool);
    15895 }
    15896 
    15897 VkResult vmaAllocateMemory(
    15898  VmaAllocator allocator,
    15899  const VkMemoryRequirements* pVkMemoryRequirements,
    15900  const VmaAllocationCreateInfo* pCreateInfo,
    15901  VmaAllocation* pAllocation,
    15902  VmaAllocationInfo* pAllocationInfo)
    15903 {
    15904  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
    15905 
    15906  VMA_DEBUG_LOG("vmaAllocateMemory");
    15907 
    15908  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15909 
    15910  VkResult result = allocator->AllocateMemory(
    15911  *pVkMemoryRequirements,
    15912  false, // requiresDedicatedAllocation
    15913  false, // prefersDedicatedAllocation
    15914  VK_NULL_HANDLE, // dedicatedBuffer
    15915  VK_NULL_HANDLE, // dedicatedImage
    15916  *pCreateInfo,
    15917  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    15918  1, // allocationCount
    15919  pAllocation);
    15920 
    15921 #if VMA_RECORDING_ENABLED
    15922  if(allocator->GetRecorder() != VMA_NULL)
    15923  {
    15924  allocator->GetRecorder()->RecordAllocateMemory(
    15925  allocator->GetCurrentFrameIndex(),
    15926  *pVkMemoryRequirements,
    15927  *pCreateInfo,
    15928  *pAllocation);
    15929  }
    15930 #endif
    15931 
    15932  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
    15933  {
    15934  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    15935  }
    15936 
    15937  return result;
    15938 }
    15939 
    15940 VkResult vmaAllocateMemoryPages(
    15941  VmaAllocator allocator,
    15942  const VkMemoryRequirements* pVkMemoryRequirements,
    15943  const VmaAllocationCreateInfo* pCreateInfo,
    15944  size_t allocationCount,
    15945  VmaAllocation* pAllocations,
    15946  VmaAllocationInfo* pAllocationInfo)
    15947 {
    15948  if(allocationCount == 0)
    15949  {
    15950  return VK_SUCCESS;
    15951  }
    15952 
    15953  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
    15954 
    15955  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
    15956 
    15957  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15958 
    15959  VkResult result = allocator->AllocateMemory(
    15960  *pVkMemoryRequirements,
    15961  false, // requiresDedicatedAllocation
    15962  false, // prefersDedicatedAllocation
    15963  VK_NULL_HANDLE, // dedicatedBuffer
    15964  VK_NULL_HANDLE, // dedicatedImage
    15965  *pCreateInfo,
    15966  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    15967  allocationCount,
    15968  pAllocations);
    15969 
    15970 #if VMA_RECORDING_ENABLED
    15971  if(allocator->GetRecorder() != VMA_NULL)
    15972  {
    15973  allocator->GetRecorder()->RecordAllocateMemoryPages(
    15974  allocator->GetCurrentFrameIndex(),
    15975  *pVkMemoryRequirements,
    15976  *pCreateInfo,
    15977  (uint64_t)allocationCount,
    15978  pAllocations);
    15979  }
    15980 #endif
    15981 
    15982  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
    15983  {
    15984  for(size_t i = 0; i < allocationCount; ++i)
    15985  {
    15986  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
    15987  }
    15988  }
    15989 
    15990  return result;
    15991 }
    15992 
    15994  VmaAllocator allocator,
    15995  VkBuffer buffer,
    15996  const VmaAllocationCreateInfo* pCreateInfo,
    15997  VmaAllocation* pAllocation,
    15998  VmaAllocationInfo* pAllocationInfo)
    15999 {
    16000  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    16001 
    16002  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
    16003 
    16004  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16005 
    16006  VkMemoryRequirements vkMemReq = {};
    16007  bool requiresDedicatedAllocation = false;
    16008  bool prefersDedicatedAllocation = false;
    16009  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
    16010  requiresDedicatedAllocation,
    16011  prefersDedicatedAllocation);
    16012 
    16013  VkResult result = allocator->AllocateMemory(
    16014  vkMemReq,
    16015  requiresDedicatedAllocation,
    16016  prefersDedicatedAllocation,
    16017  buffer, // dedicatedBuffer
    16018  VK_NULL_HANDLE, // dedicatedImage
    16019  *pCreateInfo,
    16020  VMA_SUBALLOCATION_TYPE_BUFFER,
    16021  1, // allocationCount
    16022  pAllocation);
    16023 
    16024 #if VMA_RECORDING_ENABLED
    16025  if(allocator->GetRecorder() != VMA_NULL)
    16026  {
    16027  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
    16028  allocator->GetCurrentFrameIndex(),
    16029  vkMemReq,
    16030  requiresDedicatedAllocation,
    16031  prefersDedicatedAllocation,
    16032  *pCreateInfo,
    16033  *pAllocation);
    16034  }
    16035 #endif
    16036 
    16037  if(pAllocationInfo && result == VK_SUCCESS)
    16038  {
    16039  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16040  }
    16041 
    16042  return result;
    16043 }
    16044 
    16045 VkResult vmaAllocateMemoryForImage(
    16046  VmaAllocator allocator,
    16047  VkImage image,
    16048  const VmaAllocationCreateInfo* pCreateInfo,
    16049  VmaAllocation* pAllocation,
    16050  VmaAllocationInfo* pAllocationInfo)
    16051 {
    16052  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    16053 
    16054  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
    16055 
    16056  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16057 
    16058  VkMemoryRequirements vkMemReq = {};
    16059  bool requiresDedicatedAllocation = false;
    16060  bool prefersDedicatedAllocation = false;
    16061  allocator->GetImageMemoryRequirements(image, vkMemReq,
    16062  requiresDedicatedAllocation, prefersDedicatedAllocation);
    16063 
    16064  VkResult result = allocator->AllocateMemory(
    16065  vkMemReq,
    16066  requiresDedicatedAllocation,
    16067  prefersDedicatedAllocation,
    16068  VK_NULL_HANDLE, // dedicatedBuffer
    16069  image, // dedicatedImage
    16070  *pCreateInfo,
    16071  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
    16072  1, // allocationCount
    16073  pAllocation);
    16074 
    16075 #if VMA_RECORDING_ENABLED
    16076  if(allocator->GetRecorder() != VMA_NULL)
    16077  {
    16078  allocator->GetRecorder()->RecordAllocateMemoryForImage(
    16079  allocator->GetCurrentFrameIndex(),
    16080  vkMemReq,
    16081  requiresDedicatedAllocation,
    16082  prefersDedicatedAllocation,
    16083  *pCreateInfo,
    16084  *pAllocation);
    16085  }
    16086 #endif
    16087 
    16088  if(pAllocationInfo && result == VK_SUCCESS)
    16089  {
    16090  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16091  }
    16092 
    16093  return result;
    16094 }
    16095 
    16096 void vmaFreeMemory(
    16097  VmaAllocator allocator,
    16098  VmaAllocation allocation)
    16099 {
    16100  VMA_ASSERT(allocator);
    16101 
    16102  if(allocation == VK_NULL_HANDLE)
    16103  {
    16104  return;
    16105  }
    16106 
    16107  VMA_DEBUG_LOG("vmaFreeMemory");
    16108 
    16109  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16110 
    16111 #if VMA_RECORDING_ENABLED
    16112  if(allocator->GetRecorder() != VMA_NULL)
    16113  {
    16114  allocator->GetRecorder()->RecordFreeMemory(
    16115  allocator->GetCurrentFrameIndex(),
    16116  allocation);
    16117  }
    16118 #endif
    16119 
    16120  allocator->FreeMemory(
    16121  1, // allocationCount
    16122  &allocation);
    16123 }
    16124 
    16125 void vmaFreeMemoryPages(
    16126  VmaAllocator allocator,
    16127  size_t allocationCount,
    16128  VmaAllocation* pAllocations)
    16129 {
    16130  if(allocationCount == 0)
    16131  {
    16132  return;
    16133  }
    16134 
    16135  VMA_ASSERT(allocator);
    16136 
    16137  VMA_DEBUG_LOG("vmaFreeMemoryPages");
    16138 
    16139  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16140 
    16141 #if VMA_RECORDING_ENABLED
    16142  if(allocator->GetRecorder() != VMA_NULL)
    16143  {
    16144  allocator->GetRecorder()->RecordFreeMemoryPages(
    16145  allocator->GetCurrentFrameIndex(),
    16146  (uint64_t)allocationCount,
    16147  pAllocations);
    16148  }
    16149 #endif
    16150 
    16151  allocator->FreeMemory(allocationCount, pAllocations);
    16152 }
    16153 
    16154 VkResult vmaResizeAllocation(
    16155  VmaAllocator allocator,
    16156  VmaAllocation allocation,
    16157  VkDeviceSize newSize)
    16158 {
    16159  VMA_ASSERT(allocator && allocation);
    16160 
    16161  VMA_DEBUG_LOG("vmaResizeAllocation");
    16162 
    16163  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16164 
    16165 #if VMA_RECORDING_ENABLED
    16166  if(allocator->GetRecorder() != VMA_NULL)
    16167  {
    16168  allocator->GetRecorder()->RecordResizeAllocation(
    16169  allocator->GetCurrentFrameIndex(),
    16170  allocation,
    16171  newSize);
    16172  }
    16173 #endif
    16174 
    16175  return allocator->ResizeAllocation(allocation, newSize);
    16176 }
    16177 
    16179  VmaAllocator allocator,
    16180  VmaAllocation allocation,
    16181  VmaAllocationInfo* pAllocationInfo)
    16182 {
    16183  VMA_ASSERT(allocator && allocation && pAllocationInfo);
    16184 
    16185  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16186 
    16187 #if VMA_RECORDING_ENABLED
    16188  if(allocator->GetRecorder() != VMA_NULL)
    16189  {
    16190  allocator->GetRecorder()->RecordGetAllocationInfo(
    16191  allocator->GetCurrentFrameIndex(),
    16192  allocation);
    16193  }
    16194 #endif
    16195 
    16196  allocator->GetAllocationInfo(allocation, pAllocationInfo);
    16197 }
    16198 
    16199 VkBool32 vmaTouchAllocation(
    16200  VmaAllocator allocator,
    16201  VmaAllocation allocation)
    16202 {
    16203  VMA_ASSERT(allocator && allocation);
    16204 
    16205  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16206 
    16207 #if VMA_RECORDING_ENABLED
    16208  if(allocator->GetRecorder() != VMA_NULL)
    16209  {
    16210  allocator->GetRecorder()->RecordTouchAllocation(
    16211  allocator->GetCurrentFrameIndex(),
    16212  allocation);
    16213  }
    16214 #endif
    16215 
    16216  return allocator->TouchAllocation(allocation);
    16217 }
    16218 
    16220  VmaAllocator allocator,
    16221  VmaAllocation allocation,
    16222  void* pUserData)
    16223 {
    16224  VMA_ASSERT(allocator && allocation);
    16225 
    16226  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16227 
    16228  allocation->SetUserData(allocator, pUserData);
    16229 
    16230 #if VMA_RECORDING_ENABLED
    16231  if(allocator->GetRecorder() != VMA_NULL)
    16232  {
    16233  allocator->GetRecorder()->RecordSetAllocationUserData(
    16234  allocator->GetCurrentFrameIndex(),
    16235  allocation,
    16236  pUserData);
    16237  }
    16238 #endif
    16239 }
    16240 
    16242  VmaAllocator allocator,
    16243  VmaAllocation* pAllocation)
    16244 {
    16245  VMA_ASSERT(allocator && pAllocation);
    16246 
    16247  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
    16248 
    16249  allocator->CreateLostAllocation(pAllocation);
    16250 
    16251 #if VMA_RECORDING_ENABLED
    16252  if(allocator->GetRecorder() != VMA_NULL)
    16253  {
    16254  allocator->GetRecorder()->RecordCreateLostAllocation(
    16255  allocator->GetCurrentFrameIndex(),
    16256  *pAllocation);
    16257  }
    16258 #endif
    16259 }
    16260 
    16261 VkResult vmaMapMemory(
    16262  VmaAllocator allocator,
    16263  VmaAllocation allocation,
    16264  void** ppData)
    16265 {
    16266  VMA_ASSERT(allocator && allocation && ppData);
    16267 
    16268  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16269 
    16270  VkResult res = allocator->Map(allocation, ppData);
    16271 
    16272 #if VMA_RECORDING_ENABLED
    16273  if(allocator->GetRecorder() != VMA_NULL)
    16274  {
    16275  allocator->GetRecorder()->RecordMapMemory(
    16276  allocator->GetCurrentFrameIndex(),
    16277  allocation);
    16278  }
    16279 #endif
    16280 
    16281  return res;
    16282 }
    16283 
    16284 void vmaUnmapMemory(
    16285  VmaAllocator allocator,
    16286  VmaAllocation allocation)
    16287 {
    16288  VMA_ASSERT(allocator && allocation);
    16289 
    16290  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16291 
    16292 #if VMA_RECORDING_ENABLED
    16293  if(allocator->GetRecorder() != VMA_NULL)
    16294  {
    16295  allocator->GetRecorder()->RecordUnmapMemory(
    16296  allocator->GetCurrentFrameIndex(),
    16297  allocation);
    16298  }
    16299 #endif
    16300 
    16301  allocator->Unmap(allocation);
    16302 }
    16303 
    16304 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    16305 {
    16306  VMA_ASSERT(allocator && allocation);
    16307 
    16308  VMA_DEBUG_LOG("vmaFlushAllocation");
    16309 
    16310  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16311 
    16312  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
    16313 
    16314 #if VMA_RECORDING_ENABLED
    16315  if(allocator->GetRecorder() != VMA_NULL)
    16316  {
    16317  allocator->GetRecorder()->RecordFlushAllocation(
    16318  allocator->GetCurrentFrameIndex(),
    16319  allocation, offset, size);
    16320  }
    16321 #endif
    16322 }
    16323 
    16324 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    16325 {
    16326  VMA_ASSERT(allocator && allocation);
    16327 
    16328  VMA_DEBUG_LOG("vmaInvalidateAllocation");
    16329 
    16330  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16331 
    16332  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
    16333 
    16334 #if VMA_RECORDING_ENABLED
    16335  if(allocator->GetRecorder() != VMA_NULL)
    16336  {
    16337  allocator->GetRecorder()->RecordInvalidateAllocation(
    16338  allocator->GetCurrentFrameIndex(),
    16339  allocation, offset, size);
    16340  }
    16341 #endif
    16342 }
    16343 
    16344 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    16345 {
    16346  VMA_ASSERT(allocator);
    16347 
    16348  VMA_DEBUG_LOG("vmaCheckCorruption");
    16349 
    16350  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16351 
    16352  return allocator->CheckCorruption(memoryTypeBits);
    16353 }
    16354 
    16355 VkResult vmaDefragment(
    16356  VmaAllocator allocator,
    16357  VmaAllocation* pAllocations,
    16358  size_t allocationCount,
    16359  VkBool32* pAllocationsChanged,
    16360  const VmaDefragmentationInfo *pDefragmentationInfo,
    16361  VmaDefragmentationStats* pDefragmentationStats)
    16362 {
    16363  // Deprecated interface, reimplemented using new one.
    16364 
    16365  VmaDefragmentationInfo2 info2 = {};
    16366  info2.allocationCount = (uint32_t)allocationCount;
    16367  info2.pAllocations = pAllocations;
    16368  info2.pAllocationsChanged = pAllocationsChanged;
    16369  if(pDefragmentationInfo != VMA_NULL)
    16370  {
    16371  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
    16372  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
    16373  }
    16374  else
    16375  {
    16376  info2.maxCpuAllocationsToMove = UINT32_MAX;
    16377  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
    16378  }
    16379  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
    16380 
    16382  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
    16383  if(res == VK_NOT_READY)
    16384  {
    16385  res = vmaDefragmentationEnd( allocator, ctx);
    16386  }
    16387  return res;
    16388 }
    16389 
    16390 VkResult vmaDefragmentationBegin(
    16391  VmaAllocator allocator,
    16392  const VmaDefragmentationInfo2* pInfo,
    16393  VmaDefragmentationStats* pStats,
    16394  VmaDefragmentationContext *pContext)
    16395 {
    16396  VMA_ASSERT(allocator && pInfo && pContext);
    16397 
    16398  // Degenerate case: Nothing to defragment.
    16399  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
    16400  {
    16401  return VK_SUCCESS;
    16402  }
    16403 
    16404  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
    16405  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
    16406  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
    16407  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
    16408 
    16409  VMA_DEBUG_LOG("vmaDefragmentationBegin");
    16410 
    16411  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16412 
    16413  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
    16414 
    16415 #if VMA_RECORDING_ENABLED
    16416  if(allocator->GetRecorder() != VMA_NULL)
    16417  {
    16418  allocator->GetRecorder()->RecordDefragmentationBegin(
    16419  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
    16420  }
    16421 #endif
    16422 
    16423  return res;
    16424 }
    16425 
    16426 VkResult vmaDefragmentationEnd(
    16427  VmaAllocator allocator,
    16428  VmaDefragmentationContext context)
    16429 {
    16430  VMA_ASSERT(allocator);
    16431 
    16432  VMA_DEBUG_LOG("vmaDefragmentationEnd");
    16433 
    16434  if(context != VK_NULL_HANDLE)
    16435  {
    16436  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16437 
    16438 #if VMA_RECORDING_ENABLED
    16439  if(allocator->GetRecorder() != VMA_NULL)
    16440  {
    16441  allocator->GetRecorder()->RecordDefragmentationEnd(
    16442  allocator->GetCurrentFrameIndex(), context);
    16443  }
    16444 #endif
    16445 
    16446  return allocator->DefragmentationEnd(context);
    16447  }
    16448  else
    16449  {
    16450  return VK_SUCCESS;
    16451  }
    16452 }
    16453 
    16454 VkResult vmaBindBufferMemory(
    16455  VmaAllocator allocator,
    16456  VmaAllocation allocation,
    16457  VkBuffer buffer)
    16458 {
    16459  VMA_ASSERT(allocator && allocation && buffer);
    16460 
    16461  VMA_DEBUG_LOG("vmaBindBufferMemory");
    16462 
    16463  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16464 
    16465  return allocator->BindBufferMemory(allocation, buffer);
    16466 }
    16467 
    16468 VkResult vmaBindImageMemory(
    16469  VmaAllocator allocator,
    16470  VmaAllocation allocation,
    16471  VkImage image)
    16472 {
    16473  VMA_ASSERT(allocator && allocation && image);
    16474 
    16475  VMA_DEBUG_LOG("vmaBindImageMemory");
    16476 
    16477  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16478 
    16479  return allocator->BindImageMemory(allocation, image);
    16480 }
    16481 
    16482 VkResult vmaCreateBuffer(
    16483  VmaAllocator allocator,
    16484  const VkBufferCreateInfo* pBufferCreateInfo,
    16485  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    16486  VkBuffer* pBuffer,
    16487  VmaAllocation* pAllocation,
    16488  VmaAllocationInfo* pAllocationInfo)
    16489 {
    16490  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
    16491 
    16492  if(pBufferCreateInfo->size == 0)
    16493  {
    16494  return VK_ERROR_VALIDATION_FAILED_EXT;
    16495  }
    16496 
    16497  VMA_DEBUG_LOG("vmaCreateBuffer");
    16498 
    16499  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16500 
    16501  *pBuffer = VK_NULL_HANDLE;
    16502  *pAllocation = VK_NULL_HANDLE;
    16503 
    16504  // 1. Create VkBuffer.
    16505  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
    16506  allocator->m_hDevice,
    16507  pBufferCreateInfo,
    16508  allocator->GetAllocationCallbacks(),
    16509  pBuffer);
    16510  if(res >= 0)
    16511  {
    16512  // 2. vkGetBufferMemoryRequirements.
    16513  VkMemoryRequirements vkMemReq = {};
    16514  bool requiresDedicatedAllocation = false;
    16515  bool prefersDedicatedAllocation = false;
    16516  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
    16517  requiresDedicatedAllocation, prefersDedicatedAllocation);
    16518 
    16519  // Make sure alignment requirements for specific buffer usages reported
    16520  // in Physical Device Properties are included in alignment reported by memory requirements.
    16521  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
    16522  {
    16523  VMA_ASSERT(vkMemReq.alignment %
    16524  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
    16525  }
    16526  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
    16527  {
    16528  VMA_ASSERT(vkMemReq.alignment %
    16529  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
    16530  }
    16531  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
    16532  {
    16533  VMA_ASSERT(vkMemReq.alignment %
    16534  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
    16535  }
    16536 
    16537  // 3. Allocate memory using allocator.
    16538  res = allocator->AllocateMemory(
    16539  vkMemReq,
    16540  requiresDedicatedAllocation,
    16541  prefersDedicatedAllocation,
    16542  *pBuffer, // dedicatedBuffer
    16543  VK_NULL_HANDLE, // dedicatedImage
    16544  *pAllocationCreateInfo,
    16545  VMA_SUBALLOCATION_TYPE_BUFFER,
    16546  1, // allocationCount
    16547  pAllocation);
    16548 
    16549 #if VMA_RECORDING_ENABLED
    16550  if(allocator->GetRecorder() != VMA_NULL)
    16551  {
    16552  allocator->GetRecorder()->RecordCreateBuffer(
    16553  allocator->GetCurrentFrameIndex(),
    16554  *pBufferCreateInfo,
    16555  *pAllocationCreateInfo,
    16556  *pAllocation);
    16557  }
    16558 #endif
    16559 
    16560  if(res >= 0)
    16561  {
    16562  // 3. Bind buffer with memory.
    16563  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
    16564  if(res >= 0)
    16565  {
    16566  // All steps succeeded.
    16567  #if VMA_STATS_STRING_ENABLED
    16568  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
    16569  #endif
    16570  if(pAllocationInfo != VMA_NULL)
    16571  {
    16572  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16573  }
    16574 
    16575  return VK_SUCCESS;
    16576  }
    16577  allocator->FreeMemory(
    16578  1, // allocationCount
    16579  pAllocation);
    16580  *pAllocation = VK_NULL_HANDLE;
    16581  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    16582  *pBuffer = VK_NULL_HANDLE;
    16583  return res;
    16584  }
    16585  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    16586  *pBuffer = VK_NULL_HANDLE;
    16587  return res;
    16588  }
    16589  return res;
    16590 }
    16591 
    16592 void vmaDestroyBuffer(
    16593  VmaAllocator allocator,
    16594  VkBuffer buffer,
    16595  VmaAllocation allocation)
    16596 {
    16597  VMA_ASSERT(allocator);
    16598 
    16599  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    16600  {
    16601  return;
    16602  }
    16603 
    16604  VMA_DEBUG_LOG("vmaDestroyBuffer");
    16605 
    16606  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16607 
    16608 #if VMA_RECORDING_ENABLED
    16609  if(allocator->GetRecorder() != VMA_NULL)
    16610  {
    16611  allocator->GetRecorder()->RecordDestroyBuffer(
    16612  allocator->GetCurrentFrameIndex(),
    16613  allocation);
    16614  }
    16615 #endif
    16616 
    16617  if(buffer != VK_NULL_HANDLE)
    16618  {
    16619  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
    16620  }
    16621 
    16622  if(allocation != VK_NULL_HANDLE)
    16623  {
    16624  allocator->FreeMemory(
    16625  1, // allocationCount
    16626  &allocation);
    16627  }
    16628 }
    16629 
    16630 VkResult vmaCreateImage(
    16631  VmaAllocator allocator,
    16632  const VkImageCreateInfo* pImageCreateInfo,
    16633  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    16634  VkImage* pImage,
    16635  VmaAllocation* pAllocation,
    16636  VmaAllocationInfo* pAllocationInfo)
    16637 {
    16638  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
    16639 
    16640  if(pImageCreateInfo->extent.width == 0 ||
    16641  pImageCreateInfo->extent.height == 0 ||
    16642  pImageCreateInfo->extent.depth == 0 ||
    16643  pImageCreateInfo->mipLevels == 0 ||
    16644  pImageCreateInfo->arrayLayers == 0)
    16645  {
    16646  return VK_ERROR_VALIDATION_FAILED_EXT;
    16647  }
    16648 
    16649  VMA_DEBUG_LOG("vmaCreateImage");
    16650 
    16651  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16652 
    16653  *pImage = VK_NULL_HANDLE;
    16654  *pAllocation = VK_NULL_HANDLE;
    16655 
    16656  // 1. Create VkImage.
    16657  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
    16658  allocator->m_hDevice,
    16659  pImageCreateInfo,
    16660  allocator->GetAllocationCallbacks(),
    16661  pImage);
    16662  if(res >= 0)
    16663  {
    16664  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
    16665  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
    16666  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
    16667 
    16668  // 2. Allocate memory using allocator.
    16669  VkMemoryRequirements vkMemReq = {};
    16670  bool requiresDedicatedAllocation = false;
    16671  bool prefersDedicatedAllocation = false;
    16672  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
    16673  requiresDedicatedAllocation, prefersDedicatedAllocation);
    16674 
    16675  res = allocator->AllocateMemory(
    16676  vkMemReq,
    16677  requiresDedicatedAllocation,
    16678  prefersDedicatedAllocation,
    16679  VK_NULL_HANDLE, // dedicatedBuffer
    16680  *pImage, // dedicatedImage
    16681  *pAllocationCreateInfo,
    16682  suballocType,
    16683  1, // allocationCount
    16684  pAllocation);
    16685 
    16686 #if VMA_RECORDING_ENABLED
    16687  if(allocator->GetRecorder() != VMA_NULL)
    16688  {
    16689  allocator->GetRecorder()->RecordCreateImage(
    16690  allocator->GetCurrentFrameIndex(),
    16691  *pImageCreateInfo,
    16692  *pAllocationCreateInfo,
    16693  *pAllocation);
    16694  }
    16695 #endif
    16696 
    16697  if(res >= 0)
    16698  {
    16699  // 3. Bind image with memory.
    16700  res = allocator->BindImageMemory(*pAllocation, *pImage);
    16701  if(res >= 0)
    16702  {
    16703  // All steps succeeded.
    16704  #if VMA_STATS_STRING_ENABLED
    16705  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
    16706  #endif
    16707  if(pAllocationInfo != VMA_NULL)
    16708  {
    16709  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16710  }
    16711 
    16712  return VK_SUCCESS;
    16713  }
    16714  allocator->FreeMemory(
    16715  1, // allocationCount
    16716  pAllocation);
    16717  *pAllocation = VK_NULL_HANDLE;
    16718  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    16719  *pImage = VK_NULL_HANDLE;
    16720  return res;
    16721  }
    16722  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    16723  *pImage = VK_NULL_HANDLE;
    16724  return res;
    16725  }
    16726  return res;
    16727 }
    16728 
    16729 void vmaDestroyImage(
    16730  VmaAllocator allocator,
    16731  VkImage image,
    16732  VmaAllocation allocation)
    16733 {
    16734  VMA_ASSERT(allocator);
    16735 
    16736  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    16737  {
    16738  return;
    16739  }
    16740 
    16741  VMA_DEBUG_LOG("vmaDestroyImage");
    16742 
    16743  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16744 
    16745 #if VMA_RECORDING_ENABLED
    16746  if(allocator->GetRecorder() != VMA_NULL)
    16747  {
    16748  allocator->GetRecorder()->RecordDestroyImage(
    16749  allocator->GetCurrentFrameIndex(),
    16750  allocation);
    16751  }
    16752 #endif
    16753 
    16754  if(image != VK_NULL_HANDLE)
    16755  {
    16756  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
    16757  }
    16758  if(allocation != VK_NULL_HANDLE)
    16759  {
    16760  allocator->FreeMemory(
    16761  1, // allocationCount
    16762  &allocation);
    16763  }
    16764 }
    16765 
    16766 #endif // #ifdef VMA_IMPLEMENTATION
    PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
    Definition: vk_mem_alloc.h:1723
    +
    Set this flag if the allocation should have its own memory block.
    Definition: vk_mem_alloc.h:2026
    void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
    Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
    -
    VkPhysicalDevice physicalDevice
    Vulkan physical device.
    Definition: vk_mem_alloc.h:1785
    -
    uint32_t maxCpuAllocationsToMove
    Maximum number of allocations that can be moved to a different place using transfers on CPU side...
    Definition: vk_mem_alloc.h:2782
    +
    VkPhysicalDevice physicalDevice
    Vulkan physical device.
    Definition: vk_mem_alloc.h:1781
    +
    uint32_t maxCpuAllocationsToMove
    Maximum number of allocations that can be moved to a different place using transfers on CPU side...
    Definition: vk_mem_alloc.h:2823
    VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
    Deprecated. Compacts memory by moving allocations.
    void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    Invalidates memory of given allocation.
    Represents single memory allocation.
    -
    Definition: vk_mem_alloc.h:1759
    -
    size_t blockCount
    Number of VkDeviceMemory blocks allocated for this pool.
    Definition: vk_mem_alloc.h:2355
    -
    PFN_vkCreateBuffer vkCreateBuffer
    Definition: vk_mem_alloc.h:1739
    +
    Definition: vk_mem_alloc.h:1755
    +
    size_t blockCount
    Number of VkDeviceMemory blocks allocated for this pool.
    Definition: vk_mem_alloc.h:2351
    +
    PFN_vkCreateBuffer vkCreateBuffer
    Definition: vk_mem_alloc.h:1735
    void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
    struct VmaStats VmaStats
    General statistics from current state of Allocator.
    -
    Definition: vk_mem_alloc.h:1987
    -
    Definition: vk_mem_alloc.h:2090
    -
    VmaDefragmentationFlags flags
    Reserved for future use. Should be 0.
    Definition: vk_mem_alloc.h:2735
    -
    PFN_vkMapMemory vkMapMemory
    Definition: vk_mem_alloc.h:1731
    -
    VkDeviceMemory deviceMemory
    Handle to Vulkan memory object.
    Definition: vk_mem_alloc.h:2455
    -
    VmaAllocatorCreateFlags flags
    Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1782
    -
    uint32_t maxAllocationsToMove
    Maximum number of allocations that can be moved to different place.
    Definition: vk_mem_alloc.h:2818
    -
    Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
    Definition: vk_mem_alloc.h:2244
    -
    #define VMA_RECORDING_ENABLED
    Definition: vk_mem_alloc.h:1626
    +
    Definition: vk_mem_alloc.h:1983
    +
    Definition: vk_mem_alloc.h:2086
    +
    VmaDefragmentationFlags flags
    Reserved for future use. Should be 0.
    Definition: vk_mem_alloc.h:2776
    +
    PFN_vkMapMemory vkMapMemory
    Definition: vk_mem_alloc.h:1727
    +
    VkDeviceMemory deviceMemory
    Handle to Vulkan memory object.
    Definition: vk_mem_alloc.h:2451
    +
    VmaAllocatorCreateFlags flags
    Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1778
    +
    uint32_t maxAllocationsToMove
    Maximum number of allocations that can be moved to different place.
    Definition: vk_mem_alloc.h:2859
    +
    Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
    Definition: vk_mem_alloc.h:2240
    +
    #define VMA_RECORDING_ENABLED
    Definition: vk_mem_alloc.h:1622
    void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
    Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
    -
    VkDeviceSize size
    Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
    Definition: vk_mem_alloc.h:2336
    -
    Definition: vk_mem_alloc.h:2067
    -
    uint32_t allocationCount
    Number of allocations in pAllocations array.
    Definition: vk_mem_alloc.h:2738
    -
    VkFlags VmaAllocatorCreateFlags
    Definition: vk_mem_alloc.h:1720
    -
    VkMemoryPropertyFlags preferredFlags
    Flags that preferably should be set in a memory type chosen for an allocation.
    Definition: vk_mem_alloc.h:2143
    -
    Definition: vk_mem_alloc.h:2014
    -
    const VkAllocationCallbacks * pAllocationCallbacks
    Custom CPU memory allocation callbacks. Optional.
    Definition: vk_mem_alloc.h:1794
    -
    Enables alternative, buddy allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2272
    +
    VkDeviceSize size
    Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
    Definition: vk_mem_alloc.h:2332
    +
    Definition: vk_mem_alloc.h:2063
    +
    uint32_t allocationCount
    Number of allocations in pAllocations array.
    Definition: vk_mem_alloc.h:2779
    +
    VkFlags VmaAllocatorCreateFlags
    Definition: vk_mem_alloc.h:1716
    +
    VkMemoryPropertyFlags preferredFlags
    Flags that preferably should be set in a memory type chosen for an allocation.
    Definition: vk_mem_alloc.h:2139
    +
    Definition: vk_mem_alloc.h:2010
    +
    const VkAllocationCallbacks * pAllocationCallbacks
    Custom CPU memory allocation callbacks. Optional.
    Definition: vk_mem_alloc.h:1790
    +
    Enables alternative, buddy allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2268
    void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
    Retrieves statistics from current state of the Allocator.
    -
    const VmaVulkanFunctions * pVulkanFunctions
    Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
    Definition: vk_mem_alloc.h:1848
    -
    Description of a Allocator to be created.
    Definition: vk_mem_alloc.h:1779
    +
    const VmaVulkanFunctions * pVulkanFunctions
    Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
    Definition: vk_mem_alloc.h:1844
    +
    Description of a Allocator to be created.
    Definition: vk_mem_alloc.h:1775
    void vmaDestroyAllocator(VmaAllocator allocator)
    Destroys allocator object.
    -
    VmaAllocationCreateFlagBits
    Flags to be passed as VmaAllocationCreateInfo::flags.
    Definition: vk_mem_alloc.h:2018
    +
    VmaAllocationCreateFlagBits
    Flags to be passed as VmaAllocationCreateInfo::flags.
    Definition: vk_mem_alloc.h:2014
    void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
    Returns current information about specified allocation and atomically marks it as used in current fra...
    -
    VkDeviceSize allocationSizeMax
    Definition: vk_mem_alloc.h:1920
    -
    PFN_vkBindImageMemory vkBindImageMemory
    Definition: vk_mem_alloc.h:1736
    -
    VmaPool * pPools
    Either null or pointer to array of pools to be defragmented.
    Definition: vk_mem_alloc.h:2772
    -
    VkDeviceSize unusedBytes
    Total number of bytes occupied by unused ranges.
    Definition: vk_mem_alloc.h:1919
    -
    Statistics returned by function vmaDefragment().
    Definition: vk_mem_alloc.h:2822
    +
    VkDeviceSize allocationSizeMax
    Definition: vk_mem_alloc.h:1916
    +
    PFN_vkBindImageMemory vkBindImageMemory
    Definition: vk_mem_alloc.h:1732
    +
    VmaPool * pPools
    Either null or pointer to array of pools to be defragmented.
    Definition: vk_mem_alloc.h:2813
    +
    VkDeviceSize unusedBytes
    Total number of bytes occupied by unused ranges.
    Definition: vk_mem_alloc.h:1915
    +
    Statistics returned by function vmaDefragment().
    Definition: vk_mem_alloc.h:2863
    void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
    Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
    -
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:1811
    -
    VmaStatInfo total
    Definition: vk_mem_alloc.h:1929
    -
    uint32_t deviceMemoryBlocksFreed
    Number of empty VkDeviceMemory objects that have been released to the system.
    Definition: vk_mem_alloc.h:2830
    -
    VmaAllocationCreateFlags flags
    Use VmaAllocationCreateFlagBits enum.
    Definition: vk_mem_alloc.h:2127
    -
    VkDeviceSize maxBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2813
    -
    PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
    Definition: vk_mem_alloc.h:1737
    -
    void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called after successful vkAllocateMemory.
    Definition: vk_mem_alloc.h:1662
    +
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:1807
    +
    VmaStatInfo total
    Definition: vk_mem_alloc.h:1925
    +
    uint32_t deviceMemoryBlocksFreed
    Number of empty VkDeviceMemory objects that have been released to the system.
    Definition: vk_mem_alloc.h:2871
    +
    VmaAllocationCreateFlags flags
    Use VmaAllocationCreateFlagBits enum.
    Definition: vk_mem_alloc.h:2123
    +
    VkDeviceSize maxBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2854
    +
    PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
    Definition: vk_mem_alloc.h:1733
    +
    void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called after successful vkAllocateMemory.
    Definition: vk_mem_alloc.h:1658
    Represents main object of this library initialized.
    -
    VkDevice device
    Vulkan device.
    Definition: vk_mem_alloc.h:1788
    +
    VkDevice device
    Vulkan device.
    Definition: vk_mem_alloc.h:1784
    +
    void vmaFreeMemoryPages(VmaAllocator allocator, size_t allocationCount, VmaAllocation *pAllocations)
    Frees memory and destroys multiple allocations.
    VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
    Binds buffer to allocation.
    -
    Describes parameter of created VmaPool.
    Definition: vk_mem_alloc.h:2286
    -
    Definition: vk_mem_alloc.h:2280
    -
    PFN_vkCmdCopyBuffer vkCmdCopyBuffer
    Definition: vk_mem_alloc.h:1743
    -
    const VmaRecordSettings * pRecordSettings
    Parameters for recording of VMA calls. Can be null.
    Definition: vk_mem_alloc.h:1855
    -
    VkDeviceSize size
    Size of this allocation, in bytes.
    Definition: vk_mem_alloc.h:2465
    +
    Describes parameter of created VmaPool.
    Definition: vk_mem_alloc.h:2282
    +
    Definition: vk_mem_alloc.h:2276
    +
    PFN_vkCmdCopyBuffer vkCmdCopyBuffer
    Definition: vk_mem_alloc.h:1739
    +
    const VmaRecordSettings * pRecordSettings
    Parameters for recording of VMA calls. Can be null.
    Definition: vk_mem_alloc.h:1851
    +
    VkDeviceSize size
    Size of this allocation, in bytes.
    Definition: vk_mem_alloc.h:2461
    void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
    Given Memory Type Index, returns Property Flags of this memory type.
    -
    PFN_vkUnmapMemory vkUnmapMemory
    Definition: vk_mem_alloc.h:1732
    +
    PFN_vkUnmapMemory vkUnmapMemory
    Definition: vk_mem_alloc.h:1728
    VkResult vmaDefragmentationBegin(VmaAllocator allocator, const VmaDefragmentationInfo2 *pInfo, VmaDefragmentationStats *pStats, VmaDefragmentationContext *pContext)
    Begins defragmentation process.
    -
    Enables flush after recording every function call.
    Definition: vk_mem_alloc.h:1757
    -
    void * pUserData
    Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
    Definition: vk_mem_alloc.h:2164
    -
    size_t minBlockCount
    Minimum number of blocks to be always allocated in this pool, even if they stay empty.
    Definition: vk_mem_alloc.h:2306
    -
    size_t allocationCount
    Number of VmaAllocation objects created from this pool that were not destroyed or lost...
    Definition: vk_mem_alloc.h:2342
    +
    Enables flush after recording every function call.
    Definition: vk_mem_alloc.h:1753
    +
    void * pUserData
    Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
    Definition: vk_mem_alloc.h:2160
    +
    size_t minBlockCount
    Minimum number of blocks to be always allocated in this pool, even if they stay empty.
    Definition: vk_mem_alloc.h:2302
    +
    size_t allocationCount
    Number of VmaAllocation objects created from this pool that were not destroyed or lost...
    Definition: vk_mem_alloc.h:2338
    struct VmaVulkanFunctions VmaVulkanFunctions
    Pointers to some Vulkan functions - a subset used by the library.
    -
    Definition: vk_mem_alloc.h:1718
    -
    uint32_t memoryTypeIndex
    Vulkan memory type index to allocate this pool from.
    Definition: vk_mem_alloc.h:2289
    +
    Definition: vk_mem_alloc.h:1714
    +
    uint32_t memoryTypeIndex
    Vulkan memory type index to allocate this pool from.
    Definition: vk_mem_alloc.h:2285
    VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
    -
    VkDeviceSize maxGpuBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
    Definition: vk_mem_alloc.h:2787
    -
    VmaMemoryUsage
    Definition: vk_mem_alloc.h:1965
    +
    VkDeviceSize maxGpuBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
    Definition: vk_mem_alloc.h:2828
    +
    VmaMemoryUsage
    Definition: vk_mem_alloc.h:1961
    struct VmaAllocationInfo VmaAllocationInfo
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    -
    VmaAllocation * pAllocations
    Pointer to array of allocations that can be defragmented.
    Definition: vk_mem_alloc.h:2747
    +
    VmaAllocation * pAllocations
    Pointer to array of allocations that can be defragmented.
    Definition: vk_mem_alloc.h:2788
    void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    Flushes memory of given allocation.
    -
    Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
    Definition: vk_mem_alloc.h:2808
    +
    Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
    Definition: vk_mem_alloc.h:2849
    struct VmaPoolCreateInfo VmaPoolCreateInfo
    Describes parameter of created VmaPool.
    void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
    Destroys VmaPool object and frees Vulkan device memory.
    -
    VkDeviceSize bytesFreed
    Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
    Definition: vk_mem_alloc.h:2826
    -
    Definition: vk_mem_alloc.h:2004
    -
    uint32_t memoryTypeBits
    Bitmask containing one bit set for every memory type acceptable for this allocation.
    Definition: vk_mem_alloc.h:2151
    -
    PFN_vkBindBufferMemory vkBindBufferMemory
    Definition: vk_mem_alloc.h:1735
    +
    VkDeviceSize bytesFreed
    Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
    Definition: vk_mem_alloc.h:2867
    +
    Definition: vk_mem_alloc.h:2000
    +
    uint32_t memoryTypeBits
    Bitmask containing one bit set for every memory type acceptable for this allocation.
    Definition: vk_mem_alloc.h:2147
    +
    PFN_vkBindBufferMemory vkBindBufferMemory
    Definition: vk_mem_alloc.h:1731
    Represents custom memory pool.
    void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
    Retrieves statistics of existing VmaPool object.
    struct VmaDefragmentationInfo VmaDefragmentationInfo
    Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
    VkResult vmaDefragmentationEnd(VmaAllocator allocator, VmaDefragmentationContext context)
    Ends defragmentation process.
    -
    General statistics from current state of Allocator.
    Definition: vk_mem_alloc.h:1925
    -
    void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called before vkFreeMemory.
    Definition: vk_mem_alloc.h:1668
    -
    VkFlags VmaDefragmentationFlags
    Definition: vk_mem_alloc.h:2726
    +
    General statistics from current state of Allocator.
    Definition: vk_mem_alloc.h:1921
    +
    void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called before vkFreeMemory.
    Definition: vk_mem_alloc.h:1664
    +
    VkFlags VmaDefragmentationFlags
    Definition: vk_mem_alloc.h:2767
    void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
    Sets pUserData in given allocation to new value.
    -
    Definition: vk_mem_alloc.h:2724
    - -
    VkBool32 * pAllocationsChanged
    Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
    Definition: vk_mem_alloc.h:2753
    +
    Definition: vk_mem_alloc.h:2765
    + +
    VkBool32 * pAllocationsChanged
    Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
    Definition: vk_mem_alloc.h:2794
    VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
    Allocates Vulkan device memory and creates VmaPool object.
    -
    VmaAllocatorCreateFlagBits
    Flags for created VmaAllocator.
    Definition: vk_mem_alloc.h:1689
    +
    VmaAllocatorCreateFlagBits
    Flags for created VmaAllocator.
    Definition: vk_mem_alloc.h:1685
    VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
    Binds image to allocation.
    struct VmaStatInfo VmaStatInfo
    Calculated statistics of memory usage in entire allocator.
    -
    VkFlags VmaRecordFlags
    Definition: vk_mem_alloc.h:1761
    -
    Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
    Definition: vk_mem_alloc.h:1694
    -
    uint32_t allocationsMoved
    Number of allocations that have been moved to different places.
    Definition: vk_mem_alloc.h:2828
    +
    VkFlags VmaRecordFlags
    Definition: vk_mem_alloc.h:1757
    +
    Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
    Definition: vk_mem_alloc.h:1690
    +
    uint32_t allocationsMoved
    Number of allocations that have been moved to different places.
    Definition: vk_mem_alloc.h:2869
    void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
    Creates new allocation that is in lost state from the beginning.
    -
    VkMemoryPropertyFlags requiredFlags
    Flags that must be set in a Memory Type chosen for an allocation.
    Definition: vk_mem_alloc.h:2138
    -
    VkDeviceSize unusedRangeSizeMax
    Size of the largest continuous free memory region available for new allocation.
    Definition: vk_mem_alloc.h:2352
    +
    VkMemoryPropertyFlags requiredFlags
    Flags that must be set in a Memory Type chosen for an allocation.
    Definition: vk_mem_alloc.h:2134
    +
    VkDeviceSize unusedRangeSizeMax
    Size of the largest continuous free memory region available for new allocation.
    Definition: vk_mem_alloc.h:2348
    void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
    Builds and returns statistics as string in JSON format.
    -
    PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
    Definition: vk_mem_alloc.h:1728
    -
    Calculated statistics of memory usage in entire allocator.
    Definition: vk_mem_alloc.h:1908
    -
    VkDeviceSize blockSize
    Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes. Optional.
    Definition: vk_mem_alloc.h:2301
    -
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    Definition: vk_mem_alloc.h:1681
    -
    Definition: vk_mem_alloc.h:2276
    +
    PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
    Definition: vk_mem_alloc.h:1724
    +
    Calculated statistics of memory usage in entire allocator.
    Definition: vk_mem_alloc.h:1904
    +
    VkDeviceSize blockSize
    Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes. Optional.
    Definition: vk_mem_alloc.h:2297
    +
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    Definition: vk_mem_alloc.h:1677
    +
    Definition: vk_mem_alloc.h:2272
    VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    -
    Definition: vk_mem_alloc.h:2074
    +
    Definition: vk_mem_alloc.h:2070
    Represents Opaque object that represents started defragmentation process.
    -
    VkDeviceSize unusedRangeSizeMin
    Definition: vk_mem_alloc.h:1921
    -
    PFN_vmaFreeDeviceMemoryFunction pfnFree
    Optional, can be null.
    Definition: vk_mem_alloc.h:1685
    -
    Definition: vk_mem_alloc.h:2101
    -
    VmaPoolCreateFlags flags
    Use combination of VmaPoolCreateFlagBits.
    Definition: vk_mem_alloc.h:2292
    -
    Definition: vk_mem_alloc.h:2013
    -
    PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
    Definition: vk_mem_alloc.h:1734
    +
    VkDeviceSize unusedRangeSizeMin
    Definition: vk_mem_alloc.h:1917
    +
    PFN_vmaFreeDeviceMemoryFunction pfnFree
    Optional, can be null.
    Definition: vk_mem_alloc.h:1681
    +
    Definition: vk_mem_alloc.h:2097
    +
    VmaPoolCreateFlags flags
    Use combination of VmaPoolCreateFlagBits.
    Definition: vk_mem_alloc.h:2288
    +
    Definition: vk_mem_alloc.h:2009
    +
    PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
    Definition: vk_mem_alloc.h:1730
    struct VmaPoolStats VmaPoolStats
    Describes parameter of existing VmaPool.
    VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    Function similar to vmaCreateBuffer().
    -
    VmaMemoryUsage usage
    Intended usage of memory.
    Definition: vk_mem_alloc.h:2133
    -
    Definition: vk_mem_alloc.h:2124
    +
    VmaMemoryUsage usage
    Intended usage of memory.
    Definition: vk_mem_alloc.h:2129
    +
    Definition: vk_mem_alloc.h:2120
    VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
    -
    uint32_t blockCount
    Number of VkDeviceMemory Vulkan memory blocks allocated.
    Definition: vk_mem_alloc.h:1911
    -
    PFN_vkFreeMemory vkFreeMemory
    Definition: vk_mem_alloc.h:1730
    -
    size_t maxBlockCount
    Maximum number of blocks that can be allocated in this pool. Optional.
    Definition: vk_mem_alloc.h:2314
    -
    const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
    Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
    Definition: vk_mem_alloc.h:1797
    -
    size_t unusedRangeCount
    Number of continuous memory ranges in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2345
    -
    VkFlags VmaAllocationCreateFlags
    Definition: vk_mem_alloc.h:2122
    -
    VkDeviceSize maxCpuBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
    Definition: vk_mem_alloc.h:2777
    -
    VmaPool pool
    Pool that this allocation should be created in.
    Definition: vk_mem_alloc.h:2157
    +
    uint32_t blockCount
    Number of VkDeviceMemory Vulkan memory blocks allocated.
    Definition: vk_mem_alloc.h:1907
    +
    PFN_vkFreeMemory vkFreeMemory
    Definition: vk_mem_alloc.h:1726
    +
    size_t maxBlockCount
    Maximum number of blocks that can be allocated in this pool. Optional.
    Definition: vk_mem_alloc.h:2310
    +
    const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
    Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
    Definition: vk_mem_alloc.h:1793
    +
    size_t unusedRangeCount
    Number of continuous memory ranges in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2341
    +
    VkFlags VmaAllocationCreateFlags
    Definition: vk_mem_alloc.h:2118
    +
    VkDeviceSize maxCpuBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
    Definition: vk_mem_alloc.h:2818
    +
    VmaPool pool
    Pool that this allocation should be created in.
    Definition: vk_mem_alloc.h:2153
    void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
    -
    const VkDeviceSize * pHeapSizeLimit
    Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
    Definition: vk_mem_alloc.h:1836
    -
    VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
    Definition: vk_mem_alloc.h:1927
    -
    Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
    Definition: vk_mem_alloc.h:2054
    -
    VkDeviceSize allocationSizeMin
    Definition: vk_mem_alloc.h:1920
    +
    const VkDeviceSize * pHeapSizeLimit
    Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
    Definition: vk_mem_alloc.h:1832
    +
    VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
    Definition: vk_mem_alloc.h:1923
    +
    Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
    Definition: vk_mem_alloc.h:2050
    +
    VkDeviceSize allocationSizeMin
    Definition: vk_mem_alloc.h:1916
    VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
    -
    PFN_vkCreateImage vkCreateImage
    Definition: vk_mem_alloc.h:1741
    -
    VmaRecordFlags flags
    Flags for recording. Use VmaRecordFlagBits enum.
    Definition: vk_mem_alloc.h:1767
    -
    VmaDefragmentationFlagBits
    Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use...
    Definition: vk_mem_alloc.h:2723
    -
    VkCommandBuffer commandBuffer
    Optional. Command buffer where GPU copy commands will be posted.
    Definition: vk_mem_alloc.h:2801
    -
    PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
    Optional, can be null.
    Definition: vk_mem_alloc.h:1683
    -
    PFN_vkDestroyBuffer vkDestroyBuffer
    Definition: vk_mem_alloc.h:1740
    +
    PFN_vkCreateImage vkCreateImage
    Definition: vk_mem_alloc.h:1737
    +
    VmaRecordFlags flags
    Flags for recording. Use VmaRecordFlagBits enum.
    Definition: vk_mem_alloc.h:1763
    +
    VmaDefragmentationFlagBits
    Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use...
    Definition: vk_mem_alloc.h:2764
    +
    VkCommandBuffer commandBuffer
    Optional. Command buffer where GPU copy commands will be posted.
    Definition: vk_mem_alloc.h:2842
    +
    PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
    Optional, can be null.
    Definition: vk_mem_alloc.h:1679
    +
    PFN_vkDestroyBuffer vkDestroyBuffer
    Definition: vk_mem_alloc.h:1736
    VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
    Maps memory represented by given allocation and returns pointer to it.
    -
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:2328
    -
    PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
    Definition: vk_mem_alloc.h:1733
    -
    Definition: vk_mem_alloc.h:2085
    +
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:2324
    +
    PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
    Definition: vk_mem_alloc.h:1729
    +
    Definition: vk_mem_alloc.h:2081
    VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    Function similar to vmaAllocateMemoryForBuffer().
    struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
    Description of a Allocator to be created.
    -
    const char * pFilePath
    Path to the file that should be written by the recording.
    Definition: vk_mem_alloc.h:1775
    -
    void * pUserData
    Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
    Definition: vk_mem_alloc.h:2479
    -
    VkDeviceSize preferredLargeHeapBlockSize
    Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
    Definition: vk_mem_alloc.h:1791
    -
    VkDeviceSize allocationSizeAvg
    Definition: vk_mem_alloc.h:1920
    -
    VkDeviceSize usedBytes
    Total number of bytes occupied by all allocations.
    Definition: vk_mem_alloc.h:1917
    +
    const char * pFilePath
    Path to the file that should be written by the recording.
    Definition: vk_mem_alloc.h:1771
    +
    void * pUserData
    Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
    Definition: vk_mem_alloc.h:2475
    +
    VkDeviceSize preferredLargeHeapBlockSize
    Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
    Definition: vk_mem_alloc.h:1787
    +
    VkDeviceSize allocationSizeAvg
    Definition: vk_mem_alloc.h:1916
    +
    VkResult vmaAllocateMemoryPages(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, size_t allocationCount, VmaAllocation *pAllocations, VmaAllocationInfo *pAllocationInfo)
    General purpose memory allocation for multiple allocation objects at once.
    +
    VkDeviceSize usedBytes
    Total number of bytes occupied by all allocations.
    Definition: vk_mem_alloc.h:1913
    struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    Checks magic number in margins around all allocations in given memory types (in both default and cust...
    -
    Describes parameter of existing VmaPool.
    Definition: vk_mem_alloc.h:2333
    -
    Parameters for defragmentation.
    Definition: vk_mem_alloc.h:2732
    +
    Describes parameter of existing VmaPool.
    Definition: vk_mem_alloc.h:2329
    +
    Parameters for defragmentation.
    Definition: vk_mem_alloc.h:2773
    VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    Checks magic number in margins around all allocations in given memory pool in search for corruptions...
    -
    Definition: vk_mem_alloc.h:2094
    -
    VkDeviceSize offset
    Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
    Definition: vk_mem_alloc.h:2460
    -
    Definition: vk_mem_alloc.h:2108
    -
    Definition: vk_mem_alloc.h:2120
    -
    VkDeviceSize bytesMoved
    Total number of bytes that have been copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2824
    -
    Pointers to some Vulkan functions - a subset used by the library.
    Definition: vk_mem_alloc.h:1726
    +
    Definition: vk_mem_alloc.h:2090
    +
    VkDeviceSize offset
    Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
    Definition: vk_mem_alloc.h:2456
    +
    Definition: vk_mem_alloc.h:2104
    +
    Definition: vk_mem_alloc.h:2116
    +
    VkDeviceSize bytesMoved
    Total number of bytes that have been copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2865
    +
    Pointers to some Vulkan functions - a subset used by the library.
    Definition: vk_mem_alloc.h:1722
    VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
    Creates Allocator object.
    -
    uint32_t unusedRangeCount
    Number of free ranges of memory between allocations.
    Definition: vk_mem_alloc.h:1915
    -
    Definition: vk_mem_alloc.h:1970
    -
    VkFlags VmaPoolCreateFlags
    Definition: vk_mem_alloc.h:2282
    +
    uint32_t unusedRangeCount
    Number of free ranges of memory between allocations.
    Definition: vk_mem_alloc.h:1911
    +
    Definition: vk_mem_alloc.h:1966
    +
    VkFlags VmaPoolCreateFlags
    Definition: vk_mem_alloc.h:2278
    void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    -
    Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
    Definition: vk_mem_alloc.h:1764
    -
    uint32_t allocationCount
    Number of VmaAllocation allocation objects allocated.
    Definition: vk_mem_alloc.h:1913
    -
    PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
    Definition: vk_mem_alloc.h:1738
    -
    PFN_vkDestroyImage vkDestroyImage
    Definition: vk_mem_alloc.h:1742
    -
    Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
    Definition: vk_mem_alloc.h:2041
    -
    Definition: vk_mem_alloc.h:2115
    -
    Definition: vk_mem_alloc.h:1997
    -
    void * pMappedData
    Pointer to the beginning of this allocation as mapped data.
    Definition: vk_mem_alloc.h:2474
    +
    Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
    Definition: vk_mem_alloc.h:1760
    +
    uint32_t allocationCount
    Number of VmaAllocation allocation objects allocated.
    Definition: vk_mem_alloc.h:1909
    +
    PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
    Definition: vk_mem_alloc.h:1734
    +
    PFN_vkDestroyImage vkDestroyImage
    Definition: vk_mem_alloc.h:1738
    +
    Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
    Definition: vk_mem_alloc.h:2037
    +
    Definition: vk_mem_alloc.h:2111
    +
    Definition: vk_mem_alloc.h:1993
    +
    void * pMappedData
    Pointer to the beginning of this allocation as mapped data.
    Definition: vk_mem_alloc.h:2470
    void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
    Destroys Vulkan image and frees allocated memory.
    -
    Enables usage of VK_KHR_dedicated_allocation extension.
    Definition: vk_mem_alloc.h:1716
    +
    Enables usage of VK_KHR_dedicated_allocation extension.
    Definition: vk_mem_alloc.h:1712
    struct VmaDefragmentationStats VmaDefragmentationStats
    Statistics returned by function vmaDefragment().
    -
    PFN_vkAllocateMemory vkAllocateMemory
    Definition: vk_mem_alloc.h:1729
    -
    Enables alternative, linear allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2261
    +
    PFN_vkAllocateMemory vkAllocateMemory
    Definition: vk_mem_alloc.h:1725
    +
    Enables alternative, linear allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2257
    VkResult vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
    Tries to resize an allocation in place, if there is enough free memory after it.
    -
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    Definition: vk_mem_alloc.h:2441
    +
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    Definition: vk_mem_alloc.h:2437
    VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    General purpose memory allocation.
    void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
    Sets index of the current frame.
    struct VmaAllocationCreateInfo VmaAllocationCreateInfo
    VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    -
    Definition: vk_mem_alloc.h:2105
    -
    VmaPoolCreateFlagBits
    Flags to be passed as VmaPoolCreateInfo::flags.
    Definition: vk_mem_alloc.h:2226
    -
    VkDeviceSize unusedRangeSizeAvg
    Definition: vk_mem_alloc.h:1921
    +
    Definition: vk_mem_alloc.h:2101
    +
    VmaPoolCreateFlagBits
    Flags to be passed as VmaPoolCreateInfo::flags.
    Definition: vk_mem_alloc.h:2222
    +
    VkDeviceSize unusedRangeSizeAvg
    Definition: vk_mem_alloc.h:1917
    VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
    Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame...
    - -
    VmaRecordFlagBits
    Flags to be used in VmaRecordSettings::flags.
    Definition: vk_mem_alloc.h:1751
    -
    VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
    Definition: vk_mem_alloc.h:1928
    + +
    VmaRecordFlagBits
    Flags to be used in VmaRecordSettings::flags.
    Definition: vk_mem_alloc.h:1747
    +
    VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
    Definition: vk_mem_alloc.h:1924
    void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
    Destroys Vulkan buffer and frees allocated memory.
    -
    VkDeviceSize unusedSize
    Total number of bytes in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2339
    -
    VkDeviceSize unusedRangeSizeMax
    Definition: vk_mem_alloc.h:1921
    +
    VkDeviceSize unusedSize
    Total number of bytes in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2335
    +
    VkDeviceSize unusedRangeSizeMax
    Definition: vk_mem_alloc.h:1917
    struct VmaDefragmentationInfo2 VmaDefragmentationInfo2
    Parameters for defragmentation.
    -
    uint32_t maxGpuAllocationsToMove
    Maximum number of allocations that can be moved to a different place using transfers on GPU side...
    Definition: vk_mem_alloc.h:2792
    +
    uint32_t maxGpuAllocationsToMove
    Maximum number of allocations that can be moved to a different place using transfers on GPU side...
    Definition: vk_mem_alloc.h:2833
    struct VmaRecordSettings VmaRecordSettings
    Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
    -
    uint32_t memoryType
    Memory type index that this allocation was allocated from.
    Definition: vk_mem_alloc.h:2446
    -
    uint32_t poolCount
    Numer of pools in pPools array.
    Definition: vk_mem_alloc.h:2756
    +
    uint32_t memoryType
    Memory type index that this allocation was allocated from.
    Definition: vk_mem_alloc.h:2442
    +
    uint32_t poolCount
    Numer of pools in pPools array.
    Definition: vk_mem_alloc.h:2797