From 40fd505639d323db5e6ce84d8641881ef7763a0b Mon Sep 17 00:00:00 2001 From: Adam Sawicki Date: Mon, 25 Sep 2017 16:44:48 +0200 Subject: [PATCH] Refactored some code from class VmaDeviceMemoryBlock to new class VmaBlockMetadata. --- bin/VulkanSample_Release_2015.exe | Bin 100352 -> 99840 bytes docs/html/vk__mem__alloc_8h_source.html | 2 +- src/vk_mem_alloc.h | 796 +++++++++++++----------- 3 files changed, 434 insertions(+), 364 deletions(-) diff --git a/bin/VulkanSample_Release_2015.exe b/bin/VulkanSample_Release_2015.exe index c67d1c011ac35f3c5061fa7de249c5556a1c7ee6..d0de1b80bc1604e0f88133b44abc3d75d047c66c 100644 GIT binary patch delta 33005 zcmeHwdstM}_xCvm21aCGgI&eUQ)D7Eh{vuF}jeM;&tB7+UFo>-}m=Czvp@Xc>nm$^X&Poz4qE` zuYKQpoik9l+N*G-*G4g{d$S{>1npBJgb9r$KS3~wg0N6!Ue>cPaf&%>q$qfTE0l;r z72#@8=*eCcr%6$M%tvbHb7?r3p-36QdPq?nrjktkCq)oSAIY0MA%B7(^z;QE*##6N z%x0^k_F^I1A+=|pNM?3bYAP<<=H>C6XxsURAQ(rM%H_I7i&bs1!?3@xDEw`->3&@# z3UblG$SeML1mU<`^joBB-hKHt_41cAc?m2ofAMyme8PSC2Kpu6I{h{+!$==KEDq0+6)R+8kGp=k#@^AlYqxXSeHDqS3K1%-bt=rHV&FaYw>~9cbVD79q@({e z*1wTOT+h}tvWq8JE1%s`^Cs-9Ph0UiyX})DzRNNjw-W7adgE47fj_f1eqQ{X1^IRp zcd%i;^Q6`$cHDP7Se*=&Qnr!ZGvp%U<9<`cSJ-L4sZxp`>t&oOUS{ta=SoWqtc~eI z@d7(#nkMD@vR?jEC0}3mp8pbYEsJdOk@zM1rpYvEnh)#WbgJax!?rbD!Zvybcn!&u z6-zEF^={R?ToQ%VvSVPVEBP`;o~#T*od-r!TBEf*t>|Dh+jzExEhV;8w)@MDQGw;D zdg@|XxhdO+n5e0d=NJckh06A<@JQYPu0OxWs4#IJ`p9TPLmT~5Oh$h>R95>A(JkP59B`Zf=W3Rj2vXUe#CuL=xNp_47T?@C8#{PP%W05{R_OeAeNf|@amQN0k zJnZSU$fDRFh@vm<6$FdYpREZGvEBaXUwQ&HJ+j06yEhG7a=ndZ<*aMnRmybCwff&hZMD_qc9{D-R92IJmvvuSHAyXNsmYa4 zUsxxtu<(BumReudCNHgCHmzct{{C0Rn%0Te7nZ8@(_56KIdW93>mWCeSMbxn)Czv| zZ;Ma7ueFZD+_$bwt;kp^()M5%Zmbh;Smsi_D_boLjrl>1T2{Y*mo=#_v%@SuR8|FV zl?O&kJeMBSU*DkPTz}Oy1BMhv7L6)DO)G|zT#g}5Q=)E2jlMsm=I^N+Qh{Ubee@a1 z+{}#P>&9iTOM+mvPY=(vIy$#$L~{@WVSZgH^<#JKdult?4cfz)CgKBw*5iH&A-3Ru zE9jvqz@Y7-dvMUc#=GZ%%KO$y*SE7}Qa=pb>4yZ@zJE8?g?Cl+)QMn=6y@UI^ZnBj z9~1I10?tsSdi=YTceGp$TX@pHl^52DIm}D2ziQHazWu~|`vKF3zAM=HG;g34V=~a? z!!`2(>Hb3FN~ouYxhZk`Hx{u!2;u( zLTk(5cFiT{W|rG7(r*S=GFWfisT(JWY(u-2wscJ|x*JWp#G;He$?l`By`KsKhK`p;y7sk&6}uVqfBG9~Q#=IStCu7cO3YM<~d7Xes>Zo%{XeBP-Jask?g0?k@=A zj3Mn)1Y^iD3}ljO(7KXqE6>Pn-5_#X`xR@tR9nZ^*?MmmGLvQbv;J+*m&!3!XG^Gc|mRjuvF4>-5D?7#ovJ0(4 zrD!e}@};*RD1)eD9jR_(dYQc+=*#&Y?tXe}hovSahje0bf!joZxdOwaOQ)G{i-BS$ z%Wjb+ZH!?bv>1ViuH`_a4{3Q(+7-j%T0LQtF*@=xl#yt>K7q2`+tPh%;OuYdoHJi` z1jkgm-5JVkQ|c~SvaHHm;p>2wq>40UWq2Wx#y%Am_X(@VU$Lhx%3=A&U)HFr8TPpF zjHGkMXG$S~N*Obh8Q7M6!>1VMoYzmW7?)OLBpt%Sh9(XQYV2WA?y-(ReWc15HZv&5 z!{Tv*If8n6$R3B-@t{}_t79yag1bwu7?vD7EXV&5K{(l25Dp>SMes(Pcq_XI!c2sL z2-yge5QtYAEeOvc6eDaw*nvR2!Wco=fb=ZjWQ5rW#AA6O@r|&erv%DkUlaQvBo6KN zV@S_{xyH!!D%ml-3aiExqb^kYAc}WMG{9LFlsB(RX#+K0~&orB+1_T+ON?CyUEj zdWYUUt5A|v88RP3T^WVJx8G7$(&}p+M%a7h28Bi zqSb=msF}{9MRQ%r{Gfr{nDwEf53r>jWl=b*TJ3YtgwL#|Dk?#>vhs*r z{B3>=vPQbvUU9owM!&X=?^M&Nw%Bbfp3MeCMT=!@QB;Iy(ytg2Z%1_r67IRNBiPr6 z*72Z#m6)FN9n`>j(FZg#K&1%$}u0<_@(k)ua2 zrP*@=%Tt1gWuG5fjk$MFRz9yp6qBEd2+dG3a0&XP0<3h*XvgqMvcUBf->r&E!A?{3 zsbyC@QVTDNvXW`CAcI9o*IO0u)bf--Qf*=JU0Sfco&9ZtJh22^_m+&s3KA^JGwK4; zFVt8>wX`U6O?1M}wxaE_W#wxu#jee0!Q#>dy;BR9#t8-8`og2z5Q)OD! zC7K#0KF$-3#erff6|qX~cNAb%dX`!3kA-5lNyHKx2|C*1=$|4hGU8-`-5Q=lcZqDH ze|jR4W!5?-+BV(w|Ex7bd21%|bHe|oH6PEg&yUWq=Om^tms75MiZfxV(!A_f&%#S0 zE>W4rWg9u`puKn_bwE)C7Vh6-k5Gj$O=&r|XB5H@fLlK_*fZ8ylo||9OZQdkm>WW; zOy%ue?DH;>{@R$^$j99Btnfs}I<*gT1^woBFDuROAuH(>a#>Y??2%rQf#ZNqPC7g% zJVW`DB}aF%ku^_SFPW`{X?Uh<7j7p; zlYo9&E*H&MC+3G$zpoT;6tJ|rOVw5=s>Lufp=bp*U0H$&Y$^I&ohg2)MN6W^0`K(Y zn^?=3INODv{ue#`sybgh_%}XWw9y+81((_4|=TiR;=RKrsCx8PlV><+G0V6sJw#T zs#_Z);UU4+gm0Q<(Xt~eJjLP|6zM9)CBjm)mp7@g_(e2BYSGeY5tAKc;l9oIZZG67 zl9gv^kjY8?1Lw}PDAzG8u{2GS6(`Ni*ivJNU}ZyMgKZg(Jhvt3>p7vw!6&W5x565= ze~`oTP%W*#jv;!t*@8+~l&@vSb5ySD*6V^$?S1%JDooss&KBhEPAYD0KdY+67*c;$ zwJ2$oSpQ`#%$}8UlAJitPj>H@lXwo>?2FY{Zv!f^bwfy!?R{!xyHEY9z4Kyy^Zy7M z+r@RNM~GasG*qCaeFTQ{K)stq!uKPW_X~v`7#f4HIQBCswDh~8!gz|FQYa_2%F+@ink>dNIQ1~T?fqnQ zL1rPcg3h+ur{o~yBIF^=x7s&R5`e}L8eX(rUQ1O5`C4)O zQ zH&A=1M-zJ-MjhIPm2?XX$f2WoHqJA-)W4}?sE64%-G&?1!c&Ik;zx&%SkHu)rAK~d z=MwV83YL**l^(y$UP+9WhWy07NSq;FWl7yX6?d_Jy3ds?Ke8D;W=P|HWEXlYgD-GG z(sapoiJeTEAq8G!y?QcREKQXuIC`iTJBwDa0g5j?YGsk+iIUHu8t(Fpq&>U^a91d@ z;Q}`uCjjl@nyy_aN6AWp75lpdE)a}g*WP04yngyrTvvUp%Dh0j^u*!zK-h^&W|_$? zrKLZx>B$?!?^)wseGHGEt*t| zzT{Y%z2Kx(nU5WO$q%xU7s(#)-Cmr(?YZ9Hh%EtWOW#(f=+de%uILm4|S#PV-xyBOHp4k)+epwo8?%} zPN(il^q^d;qA{3V&w)pZa3-N70S6-2qVjEG-|?cjhfPSED*nZerxl3bZtIioEs8&~ z0sY#U_f&GbLTKrUrd6Z*NA;8{E@SWZ>)Sbnr#l?0187>DKEaK3kg+iE%Tl*{cP0i{ znxjvp>&y<;qJM;K6=dp`2o>Q1UvL_>*`E?QD?;TYU~p=&7{W0t7(C1?8#GH zGE|LusA+u3Q*;GtzGyYAx0;qXNR9fd8B^5QXf;t*V_VgXnQCmF8p~E=F>1aXH5Pk{ zHI{!D-`aLl9wLe#vw;IfNQ;iMHwV0FSbDU+b6;k|2DS}8ypbB`{tnuufxzgaj162b z&e#@h9WCO#zcM3H%00pkXT*pOR-4gL%HG4m2ep^F>|q&$dP#qN#vFs5F?@8mzKAba z=->py*ajRsn>;w%Pp@+L_zXY1?daeT$#CpIeZCvpt`F%gN+aLfmN0Co z8Y1N#*ycAnL=?-}BV%SrL(15zV`9Wr?BtkZ(x828{n!u04Q$}JJinVe>aB^`$&QR$ z1F6yDKadvfWsP$_5I@B74**1J)b5UCS zDVsT|kDuqq^#ai!v%QmgNNJndtx1tBC*st{pY0Tf8fh20g#+d;tI~O5Yu0mewN&sa z>+sk!;yQotu{;uGa!5$ibh z2*${@sZ*rc+t`?Cgm>)+4{0|6A|XTk8#LUtmw=2T9}Sv)A&w`)$GGuJ)BZDq>H&&F`?E z^E)O?e&az)vv|DT(lj2|EKN{_D_pHv|ALkJ#(rS1a0&cSj5j>*fu9CVv0qcJwJUYegtKf0P(Dg2f zo4vtFGKlz=aq!pUPN0{KefJ~fug0RF=oQ^}z@#!`pEAX8^-gO6(&xaz2-H&!6G4q6YfxLa6n7Xl$B&WaEzTlQtIKeZRLD}h&lN3f^lu$f@c=nm>JJH%F}l} zP1Sb{UuU%^dw>J@jkQ=9BOYY~7lw&WHgjP+DNoOaEQw1PfQ;(#n|2?#5PHgrIf65f zxzbO=7j92(hG=zoTH+#j4EmyV%w|0k7d6!jTrBvhZ9$91i24_5`A&L>6)tYo?i=ivla~aotnVb9(brvpo ziXon~+0$isp15Zu6elSqx0V?Bl&%ep?g{*g5TX1@N|-Bih7op*U!y+ zw7Hv&FIwFArF*<@ngCd2VO!Q{S(s$G$2u?T==my!KHFQ^!XTI5@126X?5SmOL-*0F zPzLMW&cyoh)sZt#x*o0I-O~zD>K+@$SAQB`E3`}*#(=mj>WF0LJ!sk^*rYAA+2cbN5= z_nPc|o?^Orc=tigSj_a#4sYho*@j54-R6#JQKN6OInTyNUA`q?pl%jPjXk}@ZBAOl z75Z}7DLf+zLPs8DUq0KZ(IN0LF}vMj`sJOa-nUr7^3JBsT+C4kEwop)VEN03jrjZ~ zVh(dR^s3t&scL;!<@KSSw(E`|<~AGDq*ZECZ!UgwHL7@oo|D#IdpQZBwMa#=F*jMT zy;YzUv2=RD0IpEF+xo$Jw|UA8jaNcEy_}rrZ1mYr3MRlstzGSuxml8BSL9 zM6eOJVZ7||C1lV&zuh1(oB_DaU7jP|?lkzJbI^w5?Z|;lg(~wAmx+aWX#O16tytb8 zN$|kd3BINZF6V+i5F|~LNz-7cnhaG-Rf)&B#1pHys>!aBu3S|kO%(|aR|R` z$n*aYz#kr5&SomZjFTXb9$&`UhGJH!JYs4Fo)TY3*$ykZ#W$GW%C?OxOS$0m>#WDh zUQ*l*ws2*;z~hw7UR;WfQw+^ra0q9Z6rJQ^yH@tM9fBIS*|?JOnR7@I6Wm1Yt{C3s zX`>yKHat+an^TZZHO##8vV^aTGg`khl@yI z7Vc_Xp`klUw>!olt{7teW>;2C59?1=u_v#lLhQ-!^0N8|CiXYcM8|NQca_b1u3ytG z+)7$A_%+9&>vfe?J{KW1zshbr7Z*|X7g_btl5(6gPr5Ge;Y-TrEOT{0)8+RiwKZhk zUu@p$q`)4;w-+zxO|bfY6a2}Ju8wRm6^Ux~^VI5>!5p{QV+CuxCc=9RQr+gSS;Cqj z0h4REnp#N^__|V*99+ZJuX)mJLxS5pgs-B8**ucJoP|9fJ2-)7N#CgTdS0qy2rl}D z94_=tK+B(Fa8wL(0~*FWN$s+K?%XKf(R$(h9?-vFKzJLAVkUB>zsl$Actz6qje;u0L=FDRD_1a!i?uu>AUVK)R=KacEb2gJE{kmTswR$;RN_Mf-mxJOS$13JFkFu#Y zV`gQ;=AoRKRj4u7now1+lQ=uMkga(+EU9cUKiT%j`MsRZ?;r7a6x=#)etv(Q$BW$j z{QiO}T#KiAuK82gA1}ADJ*KkyBL4x(KU~d!PR1dV&j0CZ{@Xl%qAGljP69zw$e7^x zpLuQC5k#Gn%Wiwn-KVYWKE}sF0AvkAum<7Oi^`$icYXqB^S$<`4C0y36EDhql%VXE zhszR9zB4#@bn|$ME|;g&LN^?QA5v#1y7k0Ue9vK-sN!=#_cZ(Vl}@&?WQsj~xxD~> zo?@P4PhY3#{(-zbeWRkQ;lx{Y8zlTAU|>Bt$0w9JT;`*%;Fv{yWjBucx{Ax7qI91U zl)(->1%G-7d9D>RZ-M&3-?bOdnvIni1Gfcl9`gW#dYM*SBwiJPlW*7hkbY-iggT>B zXvb9y7m&wo{sRjZ7M(vIPTk8>E0Kzu1nqpZM?aavf?w?ZN9Ed=p+dOXx zlI@)np#%0lK|r>kjO03zEL9|fiv<7tBT4GcvQ4i>9J;aWDGwcZ$%7Dh9~Vd#qq?|N~H+kIF!3vNtp z!Ac9CZ?(@@E<1FT&QD47yarF{@eI(_Jdm>L5)0YVjqQV)A)8px1uNE1B@LSXM zNtJ@oUDu(Kt$C|cZ*OQ;TWX^~RWKa>j_eDZOI3wW7mfx;B^uTB!ef-Idk4vA^>b>g zA7OXiN)>-#z20u$GXW`R`WyfhVrZV^`sD9P-->*m$S!D!?bO65o;V|)z5cd2*AF~< zy0tu=`ixY?tJ1iTC&I8lxnWO2QF(e*BqY$k_H-8l_6}Vm$bzgXPsdT4MDD0cbfJ{M zU!*Z?Oa>rEXkTflbYN$^1NO~J7?n%BrMnahlrjZ8%c^m5T3V!;I{!?=S*I=^~l zNnCLF9JXdtKgnFg&ToonbMorSNrfZ02Z=b6+XAN4KLf*S z?l>e>;|b+ydNOPH3dy`817}j^SUKs3f^X&S@!?Q{H;NQ2>z3 zR7GY`niA|qk~m?p4O>!dgQg;4kKf6Y4Ci>#k$fZhOb@6{aF8q3>39;oHKdrkYlTpy z9|KL5#$7AE4cB6xttC@~_S2HhXQuIjMsN{B7cO!n-;bAcRZVMkhE3brIq3Y4f0m*Gw1BEw~M6Bz$ttB9e!u(2pQhElL#tX9<EIbV6cZ12CM+Ve9CY|IMjA#@ntyR7#rz#P0N2-03_EUM>%nGS%FHDZBVUFc zZgbjXXe~94y#BXr>{#=+aa8<@1*GM$tH&6^S|Z~?qJnsYV09FeS;ojp#21Il)JTwr zwcM1EX8dxcq?;7KkR2%rYJMY`M0pN6P9r1w!RHjaQ!+?D5lw@JI>3g0)M3)6v^y*2 z{gpJN;NL<+@%i*5gcNfZn4RY6i+d#JA3=9TQn$Iy=O3qhyjgeDlgGF=-34f)KAw+a z^rzVOA9WG;F~5(~TE8)c7p+jbV|WdQ#IBPhikF5I-CdUVu|*pAIQ#75An)HH;x@m= zzW=zT^uhpk>*HYCj53Jc|AmlY1Xq$p#tfVUW3AzlaNl=|!mov*(!#@wk1Xw8T)H$q z$?zod73h<6V}bb*cEbeHqZq~^zg;II;x_xhuGk~419 z_P@AoSimb2`I=F5F!2D5U%ae?v)SEkZJX6hC#7Gywhu*a%=B24kb8o4+ul(;&L(V+ z5cjfYx5r3}C$QbyLqp%&r&T4rf9w&r`TgMl>;nMoaGN7vlio)Xbx0^YsAtMRZ5 zE%bxioC*dgyki-BSnb2{C)oH;TJ`uQhX!a0DRpEd*wcHXD-~Th;^@oWctO`>Pwy++ zmn2~6f+*VBbb_XU?k3y*Nt%?E!)|>N)N2ru@NBVV(Gp{m{BUT(zSf7tITcA%_hkDQ zXvBT49yi?X>Rnhpm5ly$hw0IuW$k!EKa!7Z|MBe5j<)(B)atlve?L~UBS6~HkA1#7 zSbDb?yRf?zOWE03^3GwicD78cCaDIGvcBc<`iDHncwBqPvsV?aaJ_9|WjiyC9%u%) z`SY=SXYP51RqhDXAE%gWb~-P1OgbB}D@4?>xx0$ITVZr!tA75|7rmEa%;L2B{HI;m z@lUtvUqS!l7Co9hwY#i=#Wo@$kurR#gu zrW?nt8^)&Zj~VdnUT)od!2e)fe{S7q2&vXthwcecGW zVHo@3z(`3yjI}%1H8E(|gVWEK$LpuxouQPjO}{^R1dsi{?#Wgk9PNL8JPNdn@>69r zK;7o2*qwu|O^;%U#&yL*drJI1>v$+EI-4^cx~g%M#p`gt#lqo!Pr`7#CK!h_x%`Xe;|UL#a?vgBvGn2bqFdZ!c^A7bge&Q1p%>$@x51g0 zkwE**?P$i1-@(spSAX%6GmFv|JMYSFUfyTWVzu=8mY7rRBzN+kG$?x}|iIaJ}-oz{(*G%lM3Ud>qqG94; znAo07Oz*@jr@Bc6>FlXft)+}~_WG%CY1U9yekxmv@5|bpj=|e0!%p`Id#4X|Jv<_Z zLEs`O=KJq9iPVWoCOrh%_Xzv&bYE%VBkbDg$x=`nd-Thei9#AT^8TA2HGL^wKcuU8 zTpQ9~@(70X?#_HiIre47urZwX#D2YmlGIyl&+BFU5}T@u-`5?()ysi;`C#7I>zEl##&a;{H_3#kTCZGcVx^(c$ZkViFsBHc~8MOTOME{=&MQ zZ6|e&=aTRAW=p>gWAC1wDSpBN&+YOagSE+RzQ@jPyR$79TI(nBh-Y_p`a+I0rVFGy zv7GNFhzHpI@77D*5?IdnL&e=}+xI7%y_ZT#tk{%j^snc!UftNbA3_Fxp73C|ZRPR$ zrhS#iwWeLI3Rk%1wZSQV$AN081Sjx}8X8?IgPBw{Dzrl)O=|p-UW`q9<_$>L`{v8`Ik~i2;G&BWxC@gE zt6GJt+(ZiCnZTQzELITk`vUxiYEEu7UJ+iD%Vj;|*vl73KWal!#|J#<4(WM)KOabw zD6PR{AI9VLGpR3+Ycr{vDois8drjn(W^C}K6saJVt+^D^Itw;HQ-$kBOBm+JZZ8-8 zSOxoQtM#tSE!l}nEySOg>(Y$TA*cbFQ;Z2+Kdx1CJSeS!Ie&Ad_2&G@J^YXjP8MV|tK#@gnB=;0 z2lpF?=Vx&C{_!#-?YbK;2LK(O+vsKKXc<5H$15tjZ`h)rdrBEys26RWx@h*D#76Q^ zFYX~0x33rJczSA$U#Xl!WV;|uL_IyM$@CPpYz%V;bENvl93{<^=hmjiw&f>}O zc?1s{_?Rlr4Se_JeFJ-uq+QqlKN{FkHE>2}ZlJZZX5gQ^u^%w-0+7b-V?abm9h%!8wKxL3dN+ zd2pisMl5dN2q#fA1k*s(8~8In1)00>&wIzP(`_iwQf)8~hD=J)1GyyvDu%`T6g`XtQnlhpZ8&jPm z-3VnbRkumpO?eyKP2c13`nmZsk85+&p$b>H^6>{>st2<+zt5DubFnqQhe&riu#XYj z^E(Usqq8)&0~_*3p7iE3?7$zhq*Y1V;{Npa4EnGwUxIb>R#BC>^zRPoUxgu^-V2N&-zEJ851czYsClA6@4>>O`qv!hoNUl|QWEK!-JI71-Zo5( zBMx@=?^e>~FxK)~2iuiM%Fs9#P6i*@jvvW7Wc;jW5{iblIIhh@#tBTl7CCgEIt1bV z&s!j}=ArnJhvKh46t^^trwrS+|Jo~3tFtS~&o!naJaWNbs`R6JL?ZUDWb<#tNX=u} z`!@!4|7;1lwSI-O%)A98In6ez*jYu>s8Fc6YK**%wBTiGT5{nM*0pw*6c)>VtG&^t zLu*tGk3w-`qo1fc2ARl1NXr(gIPS{A@0jPU36e9IJ$@@#tYyWwiX_i-%;)w8kpVa^ z%65}%&&H2QbY2wWt>TV&v?Y2f!pgsUwPaPd&E}onI2Bg7HW}dsJcTdJ0N4A>=T3T; z$B^P`P5fbvan2h=Uqe*sF~l7O8XK?9;&VW&8m*RQWO|ih|Q-}zz zS}&XejuZ?i*X>3Ej*jRuvl2pJm=17__M>A$zRxZ<(ysib3Jg#Ms?iLtI0%&M7B6Mv z?mjE++{u2qyHfgV?zVz^!$c`)F+1vR)g)mt74jKr$5XWBy7r6N4fm)H>lY#W+QwYJ zXV=o|g(!0)@t)#5;|rzLk7J(yp1*d1*!7WAq*(3gRR}Kpevq){s?p!j9eljInNef( z*Tr+%-;lg$?Rl|dlTp|1xeFG^lXNRVpk9`1PO~Vk4d}73uGv+`Lt*mxh0eXA80EKU z4z==moFih*Hx@W=h+=E$>;h+VNgN=(-_kir5?_$wTR3k@Vpx)8He{0y6wHEmD-bWI z(NdtK2g1E(a+}v926saY(UB^Rr?F~$c@yUd53zgZnI|d7m~dIX0YAL%1Vu3z#vb55 z&c_7VC8Y3)5?gwJj)ODiL%jA6vZvTG=rL%Jl~-N7GuNK;q!>M+bDR76JLhX z6iWFtLG4TZoO`{*wzkG|F;L#48o*^s$Azn4(BlHk!BU!APg7jceMRPaDHvgHb1PoCZM9Gv>qg_#Q96g;_iq}UpL>h((r$zE znzz_iGMk+(^kVDw2I!OR^(*OdTE0_F=wJ_?f?wS^bglsBIK9|H`Xs=)0GYa-p?&Q? zW;(`Gc%~9g!u~Qos3rtBztD?8Qr`e)wO;HO7TX6p(EFjT2($wAt*k~LrOG+XbNV=k zH4;1c*G>}zY9`|wX-ZkS?x$%^Ma?f+_Yb5q=x&+DY)S?Q{h??GRW13yJb)M7T zM@*FF%ySO(L3zXHIiK(myS8Y9L6vIUeKBCp%yL}?9KxiuT(^I!bGwfiZF>Qnq6&Sv zt_Xw&lCGz_LhZUGh~+mest7IDO$CXO2Jw$)7=q+c%;xkqPIA`oqu2F-#HEVcR1%B{}wo3;@@NV zw-^7`^KT#i?aROY_;)7H)r5bCb51+Zk?sf+XYJ?T)A{!Z{+-Lef5r0Yj#$=cr;o2V zRSbyqq3CY@y@!9_YwTQZ5TiQ;+`i|gy339gd1$RGo^^3$MIqwwTU8PJyIanS26WH9 zTTZt@Y;Sw<7FN@G5saNHY=@gPK~W0jSOy7AaVEv{>*F~TpIjeLp?DT)bgZZ(TGrCE z{OgE!c_NLo6`@40vuRdp0(qFaSF%t(1WJgORXT-)kLPlJCh?&lbx_Ihl~pNmeOkkz8uBvn zwQ`oKK1V|z4L_Kv@+WK9QNv3!RQ}5vW@#9x;U)ZaNcyZ&(I)iJ7$>Hw3F|b>*0809 zC$zuWxk^JX4R<}E=6g@W7d2d=;cN{vHH_5odM@R&3Box}2*Q31H){BdhC?;%reTnV zeIEKFqW828acOu?!^0YWtltxMvx z_UwNFJBm;DazLbduZ|58Xnd7e`vT@E3l(hkX6HM4X0>0Q^Q9zJkyvWCxSI9 zXOgPtR}D{SxLd=QH7wL{Do2dJK^h}bLthQAO;i6?r5aAr(5hh%4Z}4I(D08ORpB`eKh|)ghO0E3tKk?8t-!G~ zYPxBRa1Biw){fU&K*Qr2Zr5<7hWQ$1X_%s6q_eE0*t`k;c2$b1h!0UA)7uFT-Z;ZD zFO{F-4P z|A!kOl-NzpI1mfl2h?l9Sx-3UJcS(H6CYw{BA%!<8D$_N=W)MqDkwazEoLo+cS zF0_P_n)~NGlp~L`yPM>CML7Qom565o>C>>lhH_M3sIv@OyEiP1E{OB{KiI`&FNq7_ z*L|p%P^W+7L)mGBRP_7L)vZjg&ra$JofDAV_At8&VxdT;Po42p+L(v3uco-Bu}+S# zVQt8q&<6Ubc6F^E*_UVT3KsTUghVhxDS}U(;D1!;6gyz7^z0xm7kkwGMRe*cA9M+=$6gT*RuzK|ERb75EUsA$-QQXZMF{^4}Jb4Z-nG@1Ss zdky7(g4p4_wrSb0qn?KcbNE}5;8miTHMuzAMLJ)P6Z?xjomb+-*KAiYFcafNArDU- z;Nft$z?krfGYCGjy>hlR6NPBxXrq$^9lQh^A@c{qx>QkU1AfE!!}(}t)a6ToS*s8# zwR{cY9fcMK{8jmh@N2>l6AU3Eegj>F3`GA9q>tJ8WH&LS1^wBUVF-y>z{rOCap$|; z;3}&@QbaRBn64!^jEi29;MG;iX=C&fjF%e;KKXh<7w09Uz}ugVu$l5uRkOT#d@Fb* znjT7T7;l(I4D=BKC;JM4S&f|o6GVfpg%Je}m7=LiuDJRtQ8lP)B_V3n_5XlOT zN)7VR0NdLQit(xG=Iqg34D+M(by}}w20Ev97u(sW*XR?Ppyt^Pq|fuo>Y}n0NV@z( zeT2}-{z6ceNeGNH3ckj9XtJGzE(~f`FBocmHG`9dc*krJkAL{#E>wu zONL!Jebn+wf%G|mcao^a-&>q-^bo^rkr%}5Cj}KsGtYqXk|>M zhCKf;A0ceAK?u&m`WDw%@EPh=<5?x~0iCV2Kuv=dsLWA|?Kc6+@p%eJ=Bx+8npn4g zTOXlqeq*6E25XC~MnXWGp4Q4LYSL&ZtWgEs%0!-D*fi03G+AsNL+N_0Kmmt6svDm{ zkRy8JBvroIWVOH;XNz89$Pmg_h0U2nObxP8JP7d;h!Z{FF*SSVscLpBkofCns2=%t z@IX;LQl0i*a0^op?1I`blPVU?kDvIzCK{4ul%0@1Imk|Na>&t#e1VkG6A$rdXg?05 z&t4#%Zhi)me8bIgHOdRc(?zg-+?_$tC(GHrx0v8V)yzfu>Lt#Fy|ww8g7|3^LV7Ya zyT$;i+*~bANa=)_5<)(Z4oQVtI!GaNsjA~3kUr;tq~m8G**e~)9<=-lwF6e6&|ILS z-BgY7BaoDT{gks~iWnwsY33Z1BK9GC9`Sb6n1L{sMlcz(8iiNApwdBW!NlkNwa#xK z%NxE12eCI$0LgCD4BY~(fh_S>ZRA-5{p+fs0h`qLlR!%6Yv^QezW0}I#{2#VnY1eK z4Mor6ufK~)nxW1SO4s6keA3*{82Ee-q`Ymnr~~BFt!iUG@`2ixFMX&s_7^2W0&na~B`V|d z?OJ04sds(^QsHAiQ56;d>9gt+=fpnP+31W%zsKaO(fmgQeQp>0#Y&{_#^y0dZGeXH z7ENLN)A(!T+SyR_M0}!l*29h+x(}lWp@Qdy;(zgkhB8j?zM`2%LhHU_M_b+rK{$vI zH&`vWVZ7l~*r*-F^G>R*=HJp&@E>Z(^UZ0@4_)(-PCXHgmZi@)AW?Z5F4g$-mk3F& z5QtAp;WZE>cnS#H1)o!DEB16=>?^i1<5DWboW|m+*~jBf|1`1x5E=xTC>Bk_Rj$^G zzq2pktjtsB6^+;{3epBbYAYxQ9?Dqs7Bih9(qx34M!DC&c5<8R_Ea4pjYf z(V{RBrcsM-)T$qPMUB4+q))K(uXM3FU(HJ(PNF3cC8V`7^fy)fFCZ8HUG0xlXZL>M zVCkyaxuT!=sI3%P=z@{Lh19Pak@xOl;BELulN`JJTy#y`zBVe6GJ3OoR4={$jhfwD#s`by?jCXUn^)0lxz2 z^U*!QdAq;Z;t{G!8-&VXgeZG5t&$CSBwiPH_L1>;I2zNdA3}5%@lm_FxYHqvUHI*- zu)SKbEjqOwzW~ywk(U~8L}%*BjRYfhTI0~hd9|>YB-VT51fds&4X~Mzc>aMpA#h?q zRg*mb91|*BC;Mh2Q8q7WK}9j^gz`5aML4qokOo)>2kd^Ur*u`LF<%H+HX@M<7f(IE4$ z;N^iA*+9;ek6{O1U;|z>c%|SKHmF>wbB0xH_20)~|9u=rBf7!ozmLQI`#7v_IQ;i< z*nb~~{r~$oY^PIZs5SSanJT6%>`jG5(s-CtOMQ98{N=TO&;sLfp~ohGK8lPsz4K- zMW_K?1$;9Ea~s}Ya!iMX;$aKuXkaNqHfX|Qp*Gdid<>B~hcaPSJG{UGA;QNHHi9PH z(jM+}&<)+k;$)E8Y%-MUvVeU4SmL~ZD1zE!XhLPk~26D zbVH{wIg+E|@IpBFgg+sy2TfRwunTlUhcP*hw;)u2Pv}Cp0GjX*gc_0uniJ?TGCgww zaV#0uf+p{6Uf$YAdnN5klrbc1Wjl~=n0xIo8q9!`+5f+*D;{UyV@oL ztq7VtqA^3zWuW2ZvSKz%>-QLKogct#7{ej58Mj(9DaDq-E?DYaGnJYKyv|` z!T+@!G&#gZA($|`bAXN|=o*^oz{3bRpeunHa8}I+P0p;|Ps70lItBPP0$sgJfVCQ3 zSOncGaM!Xz53uJ8804UBDIm@wtV4txRMl(IPoT;1v?T7ymjR+pj6;MWlLW6eMa2VDvraX@XM9Qxaa zjSs4QZvxIbq|$l7w-BfqOMwN4)y##!?FgI?Y*nGkhXOBYbPaIw5mhD+IP0iJ1D`mi z(ZE^9LEBJ39*9|AXbHes@D@=44LwEVK^gEqkW2lZwt0K%0QqG+Ou@GXsJ6HNdZ~sdN?a;6LapN~i=84wn)YOs=NgH?XjP zUrJc3)eM+<6D=_eT>~6`3w+Qyz)$X?#-Pbdbn+h7643B72qw4MO*uffM%O!oxKqd` zI0R9+fDDBD5NbdZo|&lr8NjWK@)C5 z$OK&i#Je>@4rp>K#d~2>%fkAf2m){P@BxK4i-g|ZDvh^`@Wv4eqa_yjgho#X{-}pS zD7p;1jzBV|Mxqdda1Q)9U>*XW#=!Fk7r?&&>;*TH=Wr}gzy&sZQRcriZ9WKw zKp9Ga{}>^Fgj%50r1HlAU(jg6?-6KT66arM5@UR)J13o$Gk06lI$AJ;>)LsCeJhty3 zTn1eV^v6>?ft+T*J_zT)r^mGPh?bs?=v$)y+qYCB1oT2gafS)6f(WO8`FzBv`m_UrB2~&X5oUaSv5>my#$gCPd0OM$>K# zEsCnD-lDE`P23W73#GKQLbV;YHr19WHQ&$L=Zu8D@9+6N&-b72AMZTRp3mBAuf6u# z_r2FSljJU~ky}t>y%@4ge`1KBeJX?yp_b$=2tJ}9%u$(1o-9NhXIeN|6x_iT3Pquu zus{?t*;;Xuq!^i})YP-~ATWcGlEFGj5zQx)Ow}h|5Q`(3IlzPY`I+gqZgUWFhS2N%gDx|(xTZL-Y}Vi1KtjTYVQH$*|s zI~soL)qe%yl$>`r+;Qo5mwe+Y`RLBighu7J*m^M-Z^hkD3EQv;~gLUffkjO0U5H=JB`wk-{)z0MOuOr6NXEF~5!|q_|X=NX5y2*$jQWUtDz^F{Go}->gJ$ z76X`XO{>_4y;pOuWcOu*J)4L#*&CkeVslpJ*-)&>?s_(q{`O%tYrP|OVQXr25IeAM zYP}|{GqMz~QDA-QRVH2bX7dc=k?~i$i}GO&Z*6UDKtxpAiSuNT{3{6ZYg z`uR+f{;I_e`b?CT)?zJu=ZYq_#dn)HfDNrZNxD;$mDHXn<<(^E>dduJYw2a{k!+n48cuxxjb}LEE0`D~A|18LF`j)WuAC+tf{i}t z0HbfbkI`2KbA*qq^oS?naM?CMbnGjp;gPJD-L7caCWmIoN_J?b(bxU5tPBrL!wiTY zg#p1nZP;CmWWP6z4d1p3_NC@OFfK&@qxa_1C~J4ctelaHue-~|kKE<_6WLJ(EG?jm z_!V0gP`8dTC|)oIErgsPn3cCE1KScXQraytePB~@8fz06BF3=(fqSIw0`qD#PE2Go z8nyFV#0M~0=YHAf+ta^Lyt9wR1|JlCSY>dCbpLx6(73ml#l|;ImkOG*gN+9vVhZVv z^wA;Lr7g`_Qj-am6x1jyRT-=|E8YENt4C_=bpPpBXmFxy0uwGfon~dCPr@E*_Y`GQ zDAms#SDK_O4b3Ie*u7MCo|fHiQAZq;AO0an+%{WVhNi|{HojQ|i3FpMF;$t8W%Tt5 z9cP^Joqn9zIIlD{?wE1m2Gq1=Xe~Ec`ImJH?JgBHXY)b>++?@YY;9;~H?!Mub~Ut} zTdHj&s~r|AU2e`&!v}##xQFl%fp~M83BpK(I0PAC00Qv}!v(>Dkb|%W zVKV~pvYHFRGNdN~(-FoX5RVl#>LsO4&9thJ6|oEM;=JpgNFK`h;T?*bo&9DT!;{Np+rV;JIXTW~NiKK9)nrGSwh+TvS~F8x zmaO#B%ZfyUNGDsxJMOYI5gmm^Yu!au+o|ND$&(j8AQjZLv!Ew?5;WzMJND~oL=Q%O z^t9O1-`Sfh?fL&hX>66!Dx;t(%7Z%Ax8t2UMxPqS<}fqJgMHkr*<`*n@~`*-0|yOG zpal|h^;U{?EG>Cor(lYryzd>Ux1CN|iIbJHvhtdbY#SsxCSp9uN>6=?ZH7L%-A&9H z$`zcncx-6+arYW;n3Z`D94tD{eMvK=$2#Dz5vl&HLWh8pviUeDr(`gMYRBr&OCuyjrPq!yP@;ve24qZmLB^|F@`quDmwW zt`~~h#hc~1;nD=sX*6D{b)Dmys~s?=Ftlh`Jz1w#fY#s=tpBu#oAH}W#xHP6-j@mjS;)hcd&sb{!ZnS4xeeE#2-I;qL4^S}}=%K7)Q zeUoBd7P8U%7pYFo|69t+|CZwD|6k?PTw*rU-G|Scet~!ObEYS(6)b#`SGQfU_&XkP z1NdP4?S?u+e)+FWZdHXXexb1O|1Ip|n*X+wVhQna*s2O!fWlH@U-Hj>87m5w7q56A zJ6OM-^yV6NseZWkaIU1U-nd(L`fq_58Z@v(YkD!F(4=$C${-)v`L*M-&#*rz7CjA| z{8#^lUIhb3&U?cjJ13UC6a{Nw_VjAy=yT^Rej?n7fQImJH3W`DkqIM5jT|xTRuamg-GG%f^J|F{IST!X zP7pZ44aeyhn?Za|d)c;nF|m|4FlSHl6+V!8hKF>;yo+gfMLQVMs+>aSF-+@fM=7(> zN5Zk_%lSQ~RD55M4}{ z6^vzTGVVjzl+jhPBF9h*WXRSO+%a%d$uRmR`=?l^2=BCxv|Ri3|4oY~sZE;9n-t^Y zjN{sGPG_=WbhV{f8T`MrrP`D%uvKk~*_suZY8@YwyjYIEbqG6Sf?_Pb<&Lg^$!W%g zn>p*4HGe%dZ(gZ0Rrwj$0&6;seE6CrTL>bM(Bd#&=> z9%OdSVRRKWTZ+>8a=Rkdt&P6{!*M+yj*C-5W0*(FreQQdDzNx3EXwYL3tOpNT<#~k zB^T0}TPMdIpAnj>{K~eqX=x!N7xck2LQ=v3=oO3$=cE4l+C zGb^VYYtXbt9|66$Sk9ZWO3V(q^Qn@*Ug#hQ&LXvO@+vX(d{8v*S!uUk!_+qC?GMd` zMSK?F8Y|{_BrpDa3pnWgh(166|K0oSPGtRvYBLCpPe#di|CcGmqM{nrPOxSL$VxsH zXST+Nr&=@F=%~<$0}s$x*)lcxxZU8JrXDhrkvR8Jwh*<4v!L_<%G z>*+re+w+5VCuq@{A}e+pb?u6bK}kqu#ccyEW?Pn1j{9~-F!J!3Z1XBji8$QH=6p+DuLxgw!O6&`5m`62*MqYHmmapRtC=Dn5l;>zBN zJN6wpQjYBv`I@)cS(FmTGgziyzf-%n%@2vauzN_dwR@#(^{iUan_aJJ%K@MZaQ)1l zBr8GYym`Swc3tfJ7|y-*P8M!LCztdHhEdo@`sh<}?nzK+UvM-C;YoT*qO54hj^9w_ zdRuS^IgwpQR^XmN6M33ZOQoi>>`qMxIZYwF;?&KG*3EfG)94KF%w0x9BSpD7I3Yb@ zPy)GsX0X~8Kg-njf2XQfy-g;IOxWSEs2NwJw3IeleidFXnuWZTlvE$ox%@Jx!O zC?yGK6SLy&_$L?>@yM;7P>U!zhce~7Vt?#jWD2T_#;KX`6_SO$q&wM`;DM_9?4^eK zia1WwFXhx^6_7SAT*}(DZ)QodW`?I&Gb3fCI4Nd~kJ*?6{~*?RE4)l4J>*dI0UAf04(G%)rP`fn$dF$vu@G zUQ|I^EMY@ZXj=T@$?$E_ChPd<2lDG2Bz0yRSI3WC6}X0IORj8<#yCSuu+|;?YiG&o zQTFJpgi#426NZmE!iIJjXfXX)HRz(){toX-pa0AK)FeXwucax>Y zOcAHEeKB9bqmmOlQ#x^j-HV+f{dj{7@3auE#Os|VOU~=8Z`>4VRXIBt$1H8gEmk!- z%%~QhcFLt*iq|8@a4h&{XGv<@e(sT)eofA^;53V+4m&D8#3yTq>k+cjF-4i_W5!*O zR))j*)J{G033R$nQC{<>i4tRWo*|5MZ9J^3a|7w=kL+&eb>c{-bm?yR_1mht)?wdw z=_d|hExUFx3{&}fI!e99W_BIWC>O1oq-2FBL_D?5J(*<9Ih&%)icGQ2y(TMJKJ3@7 z&BUzj#%|w>4NiVjrSl=^0$E9|NB-Q~%q`&!aVoPU#I(@y1lzJKPZ(~~)jfs@Nu8Kt zGu%GvbSBsarzP8Z20I!TvFizu(kJIxo$g63@=CFQoln>k<3_obMPl$erhx}K3kMTQ z5^ySV3@hC}zxyar?98qwP845dDM>lvuL_8j6=R)HI}EQ(HaLWf2%qp)z~35EvAne zeOt{Kug0#ZiLx4NrN+|K*h5uKh8o+Z=F3!L+sl~J^Pae4`)s+Nh(r5H^C0Q*N!Fy- z2ZpC7s(QBp`=(cuz|9+|ah~m?UsMyga)QiEg_-TmiD-ftbMO8VpwOB^U0npa~DWUB^Vi%31lg}BZM zpk;AtW>vj{)Kq@E=*>0`+T=YCr!Cis;oSo)eejpz$H9X0f%cfkbDnFK<@>9Ii}thD z83XVbVnN0Z?`8X{O5M7TB@M}uJ~_hn4cUfk-jtz1QpMrz%Z3JtVo&z@uqo2HA{ID2 zO7v!Zho6+r9Ap6_J{KFYlOwXcXYZ;i;KNBarQX=?-i2vM1X%cAl8SB40isJ>JD0zdTQD$6gvxV3EOw3fcTk&3x*Bim_Zdd%)m8zKGh;r4!6Z;Hm9sGHNY z`yA;6vU>TQoY-T;C^HDv$b(@Kw2*QKQsSS$~ z^2#N6zlI_%+fuU}hL=$;nPVH6g~g>1${g0$*~RG{W5$1g2dwpDVs?PkZ8i(aj`eQ29=oxZ>{i zSjX1oL^Rw7{v8il!PT4Gs5p+vFUjuMWbS{)ZX!Jl>FA9Lrzw#r#fGJikAkC%Sc{p_ zct$vCW;@@FYbos{_kzgIuzhRU2Qxz~ZEzrQ)PBq9%zG?l>+&A!bNuP4Ql`_imfUAH zLjwpnO$CVHGW?te1uM6;mMGx5mGg>X`0@#fn-+l+XPA#~87F-`E&+Pkn0Oc|uY#xO z72ODuC^mL4Rt(dhG{S=*Lk2h)fqKf}L(<6QL!uqonAZ|~u0N*Q;8d2JCM#X=z;gR* zgQdMSw|mVph?s&6W{+$fs>d@}n_&f@BzeyQs=jUDsuXLNJW8;3naoztjuLyZle0s_ zdhEB^O{EGCc5Y5|$FDu;IWL{JY4?F6p|h-*KIF`kj^y)j4qCgcMD$K*Qp_9h8ziE2 zOe;Lt{MYN~bGcZcxP8s*!$rMcCErPt- zb0z8Ykk)X|jcu4)zrQDq2qoF^OlCIMkWL3z!%m{D$ue&KI6TwA$2vC#0mJ+Jdl+LE z|HOev;j)3NNjCp{b~x)g&);x~?lid3BROy~i<;L#%D1oy^JYmO{KbBn7h3Zhg^KO6 zOxo^a`B{|CitJ&oIXwWkMJ$3@3Ch`VNycf0mKdjIw{M9K%K~j2e2-{THt2E2Y3(VU z7_DZgj4lxxLnmoOVDs(E=f{aYwHEX8jz8rcYJbYEEtpfQKE zo%ol1me;~Pka%5l>l@^fXT9_H6DBT+kvP-$_FFYd(LPz#u0~WQcnT*0QFLym+ zix;($4n1PK7q#*U=PGSw&_Y|)Blg##0fV~GlO#M)T1ka&QMEQ#d56K1t-9WbIZZY- z$y-f2#KmWW*X{&8G^+gT-8hKOLMn>gR>{^bZs>ms6D^rMdGL%vD3ad5!A-D|l`U=~ z<=#SiTg2Q*3MF3HalnSYZ-fs?8CB?zg|b`bORuR?wQP=Xcz_poTe|9k#1)a zT+^9oeDZE&LPl0)qPWcVw{UMw56yJ^(~kGZzQ0MGDj1{+>bc+&2$H6;q^U0?#zNK8 z2UM!zSAf%W3yu+}8tW)*%T-x5RU~*(6+FlViy(+xLy(S(E&8J;`)$bp<4-)UbLU{t zu{5Q%&q_p;_5xXOT+uywz?LaZYBsk}M%~@N*)%GATKz1w`U$YpY09%Qr6A1XDpH-ML2P$HKfjy5b2XKcAn1M7d2*e*<6~ z)lNJBf`_>G0W(_DK0ch>ivgxG9Ia=YoB3`0{c7|Au7zItAXCr@jMG$>j7 z8$0n%b6g1je5Y{(|CO{=Sn?w=-!VAwef#vgaf)u~J=U7l6~nhDvqoa1cuy;d{N;+F zKeRhdR~OQbLq7BTBD7nI^*|XlpUre~31iEL1Ybrbr>SlJGpSMxJ4juomDFY$&Ae80 z*U#iaWeeG$6wDkm2#ewAKx4Ec%G9V-~CkKO4`;kUSU(;ZEX4Z z4W2&{`HxWki##52S;iTYj{nDaJnu2jzeg3mOecXS#1Y$iai;>T`u^=TqI zPjZ|6AZs{>isR6$u2pkLQk(Cyeom#_rXf7HsTV=I_8}K+=lWWh@ird=BU&HtfBYmLG_3OZT^1)c zxi(0+N1z?+$z?t;)!|b43m*t*FWHHczD`!{h;`Y4_puiABTuzr;w?}$_}d=jSufA! z19vqy()2Y!mCSowWTz?uCtu8(pdJGu!VOpvPdlz+7|w;JqRFx7OnWi4D^E>BDlQVV z^U)sNZ!}xGrls`LO?Glk{qE&z9;fLcRm0k94;inRZV`|zDC2-jt?y_~%dTleG~Ru0b)qUro^!*uoWkKbtGcQx%P^CPFphB9~t0BC;xS{RU!M4Oh5C zqE&N_QJNCil!LAw0fsFbxUO#PAuiGJq)pM?y3XdVtJm!Ebv)f@g36w)rnaF=_G`7| z?KjxIb>_yo7m$-5M&dolw0lW#r^n-|oqBZ3)l{_LPg!+) zEbQs~%>q^;o0!uEE7r@o#7_-&T&TwdY!;TVzI|P9D#@BWt0W)k_{3A@A#CY-o6n?j z@@K+HP=R5HXWOj#hTsu1&<0q#G9`eb3sYm`5yQ~zh79V!RaZ8HHyBRSo|#n1*z;tG z&haH)b)mLM;*#m1Nf>;;TqC*inQZfhu(YpuWIHmEYt$M@@hXq2jeA$`j7O?%1<_c6 zoTk%|CZ&;@(nu(s&-FZ>$r^49^GWBCGZ0HqeozcKENx>wF^!Gi7~Hq>IW=!g3feCQ z7pYp1Rlb3S`#+Ldr;tf8)P)o-a(;?V0F^fGo{0e*RgD?>P!$Y4exRaOhMsKQhb_!4VTmnEf&+?g3bqxIyhetb;5$v< zzKTqFNAz;uZ(`0>D8smeMY?5G%Jy-=i_@9!<{r|D1#HOXu*TCD2zc4vH}wLHZ~M&n z0vJ!lIZZv-`w|ttWR8d<0>7m-q)ILD|@F2lB+K4LzC1t2XrZJg#l%;i@n? z<*Z|J1{?ZuZ>eWG`{?61%MBb^oTlYd1!0@$IDMb*R3k6J;0Laiu?P|vJb2E6ddSxH z@kqrgcaA_?JlDEJyi}2$s>suCt3+ORiC9q-wA`RH#bm0Zsp)_EbvB+uMVj=Pp~MUHL1=^~c@C{$?@ z2#mGm0Ek<{AfK>@2ENnO4|S&*qMJpDR^4oZ*!Euovcr)z3Iww5CAMO?14Gp3*G^MC zwbj=16jMzCtun_T#Vg~birk~EMG1V9i(JWOjXsUHoS6WgwS7lbZwA#9&y|hjGEG#Q zAc=U2E}!S3H;5F|SENX*^v|HF5ZtxeZ{P)(?rX_(JUFf;n+8te1%1m!3}0}O6WQLp zr0Htfdl%T9Ph0sEhS+<)g-h1F=mQ!?85Gecr7@2r029XQD0IU`sOk<`<%Pn(K2OX!85&f{FjLj+R zqrX8LvZ705=L?&^6w4!~?qxKj;NL<+@$vfy_=@QZu#;?wxJPpS33OLDbsNr!uaFYn ztR1Sz-m2>1$VPoU3&kuhVtW+Rx9J&56QW#Vw+hy zIgZ7B8Q?JmB2H5f8~J4eDZ4j&{mVehy&{OxAmtD94c}5uoTXsQ$uTfCB{Uq~`<4;- zu~9@)XlVY4d9nFL^V-K5{y@GQeVpzGV0M_*aGmrhh98mNsw+jrXf`eNef;3enxo5i*PE*7vXmyq(?@klI zx1wua-w_|SHxut#;8kH*2El7iQ!yBz@bYC~4_ZSM-L=!~+Kz^uh9UuD{UCU@)Q;BV zZstjt$l&y0+V(B6simu=^O#$6(7QHh`x-g2pzB8cf8CZb_lnwIc z8f#~VLKF72!{DjZ7NTOi)I`SYLyk@loz6RZ&=N{&PrAc2klBTu6ZDs0mech1NS3^- ziGIyUY?zLXJz3T+KWS-C)@5&?WbMX=?`_D6cD0fgjbgv=Y7jFCdaFMl8ph*Q&xd;R zxb}Rgt14XT$nV7xcBdKhFy5S|E|?_PnLA%(X}kPAQYhx=&!+7%Nu7JJZ+8cYKeGqB z^E}?eIK@uQa`&v%KgB@AX?GZ#zo#`z`RX%$L(aH3l!<#A>3<$dD()n5%P%H!%cah+ z{IvegSjza}5EiwM>CN z3y*8{i&TY69j_&!enD*Nfm+?J9Ht)oK1DH|0A*HAm?OT%kEkR$?%WJNIpRCB&3!e- z%nlOckC5cJlg7MV0$+DFA7*`LoLz@A7_N_Q86gU(8WtF@ps~=53S+{;BBU0taC?*$Bo?v%B+;C z#=2EAHUM{j-I`~4xwVDdTEntpw&x(M9nGyB$nGADN(zD`to7mQiZI1sE%b7~LPVJ( zt0NU?cvM6{cK%8fIUTs2-=VA@irDBw{^7NtK{btppS5Fk(@6Rz+fZbZzAa*V4#AR4 zZb>@3dnn5C3bMhH44%7syW#aCt=-%lt7ySc3~>-r?P*TbbM^!g=lOOs{CSAvsgtjR z`X7zck#(oohGLU+@Q`ZVC~n;VcDE?X(ii!VJ)UR(U#&A!F@`1(QmykP>N)E?i1UJV zpU}#}ty>N1f3z-utXoKO{~3cb531IVGQxA=TOw zL_KHip#$ud!>h!=caDTer_AifZ<|Ti`?J&|gQdm&+2$i{W7hP4e)=ut@v7-JhsU+) zH%%2Tb?oZGe2xzD9Wn~#Sw(q%F%3|sNn~@5HuCwtFMl1tO?yrp!L}X^iM-62HeDua zDO+nC?=eR>-s4r+jW+{#AWbfLGZ#AOgXQ>(hBZANinkhiACHg@rmF9)DA;&svrQ`2Pj%`9(SL zvhr3gX~&ir=`=m<%Q8#a;r)l@B_Gt&sUibqMRAG5J55H`r_?NtV5>?;NN;Ur4@;SJ zKb0*#(M~#$%8F0?EFCwqEniPC-06)omrxhFohA!wbaFt{6>zZ0&8vYQBRKDr%gVrd z#$6*G-f03q$ouIemwsb)Mz1K^=M#S)YUcf#^2-E#bm2UCB=Yp&EHFqoy6wKV^g z{Je=bdA!QR3p}owSfUDZ6Kh4m#N#lr8JQT?ik&FyApM=h#B+_L(@CuHxlrl%eyr!Y z3~6U~w&7e9UQhYvT&Iv`-Kp#06zKs1cS$kZcdw5~ov5VIbCCDjur}utrJ-$D_W7~W znnd>f`35n$iQLF%?|x+Qc-4^3mt+JDzXf3pXruZv@u-;JwRqca}!=D>UYbS8eRVHnKQ9(7_r ze3vP0YsifHq!RrPaDD;}?E*+3rGTGmGuE_DnJ!8v}{kvmc$&fuvPG`J== zVhgSf3H}%&`9=6$%R>yHq5?@8xTcc@28GhQx9$(9p=0~#;&pSjqKRC?C(NC0CZLWXA<5d&s2#;$MX_qQY6A8OV_^k)*?Dcr* z@3zeMMo^;*ZMmjW$EAia%$Cti&iknx_E+A~JB~JFeQwkjUuNTPOd0k*j3smOF`ui3 zwQ5ctrByR$29H;n^AeA1<_uDWpP93=5sUn(o3tW|z4}v7U^ZqtRN;u$;6L_^WvMqB zh&opK(~?GCMncZ9r~V5<_;ue|$Hw|B>t=lnpd~lMV-BkdpH1g4dAw?1Y~*omV61SJ zel>vIzBxiV(1wh%tZAbe^}hcL<>?(uiKSe|;q3UkM4iMC$1qDx9{L;qR75aK~>q;0ks3u&Xe1(BH;Gn z!IEm9O%;cIZjRM>bPQu2!@hAOY1K_zZ35cd|9sBA{;S!OpW~z-T5$vSV-M!NdIfLw67p|-oT&1LVdV_A3u6#AiaDJ14~x@PX=yX z#SOgIk{fucrDot+F8f>y?k5&E(3@L820pP@Z^1$Qh++B_Oc>}Oc;3JtRB>+LSNJi9 z)`Gi8(yBXT|DO!pXy*q08^H~{8lf3@jgN-s3_L|FZs4~oi6R4!f~xA`ai|`cU-0Di zg#^zV_@OG!4LpI%r)J&lfe%* zd{Xd0`0|}5X65`)Svh8oI43{6O;2ZK<*;1r=q9_B;rAQ~R>|5AKNq}&-&*FZl9kS( z^xFo1nz}yblEfHVL4ola;usEl^N#9Wzhq&1@8n4T{>q~621#qeS^v9JrK@+@;k&J* zf19yC?q*4e`Rt{8)1=m&wqLmC>mJY-yBIbXT|FDsn>z&!wZL7r^?!X4A=@sS5lBIf*rrEKl} zhSI{uZ14T%7Hc!gP^%prD4wzve>TD<<0ndPC>q)>(6VAh%N)_}EO?l}kNNXGso`s2{%IWE0^Y{J87e(gk4V@Uh50{-l6FV2-Vgf3 z4x2+RyW4OCno>ZLLoi;&Rx0`o35J^c#_-2T3k+4$x&+T*=O65m4o9)24<9x@5r8`3 zF)xmy^rKu`A0P65(u$00*~nY(US*pqM@t<8n8%|)oRq^JEJ*w#m%hwqq2t$`7l zfuGChHdBn>#izrJCF+~07o?7@XLbzC0@;b9W9n!or*p3yoxPJGxMR#6D zneoJ%&w0k>MR#7oYGs?j>O0%EzCIgM+nQXC;J|Mo2`la!eYrOn@1>?z7=3jYAw#?$ zX0zGO7PTMUf9lMcEx)8|`R7wCWu{yA*-qyQzwc+kdfw`)LnXJ1bJmKHVuW|Y=}1t% z!}+M4$v$g^U2G)1HfzOhv6s}lzTHg}mrEDx*;k8VNZhR~NX8w>nFg<&zthy+mog^% z%lMV8)6^L;rAUqKhL#UnIrYJ!JJbvNAcg=n`(VttbyYS3(Wm0kC$g%WML z(H8q>lGs|>GSmKpBsK`pL4&NUb?}Z{aoL?>^yJZL`pIao<0f{K-Zk3O-NgPo(<3<~5;ZI^g`HMSu}Bt;r}p@?@D zYM*JakJO1xET3d!kZh$Iz!goWpm)d;JZixFE24Q7!Y$LiL15M0ouNv`gRVf<65Vld zNO6g-2o#OW65ZDtwMS2iClx`V;V|VaaTg`9s`w)m!^X)-u%@^NySVfY*a{aDIis*s zIs|$NCJ#<*G)nw{LHtiqVSM40Q!iv!wpT)JJ0}{SE(n?2iLu(tKB9)$UK;9UUtU9O zBJHeW|Eh-AsM%+AsKBaK6ypPbvvc7&l7sBdL8I+QABGbJY2|MU=> z`>vWO2-Hl*jY&#zi7tPly_H^!l-{3dAEg(&)d@thGa;|kJ*=Xe)0kw{rOvc((~B`u zotgGKdX)Efj=g40v2Fcg^jU&&?{&W!Q%iK?;pir%CAu*a?CCYdNJ}(0d8PUiT@w&) zNP3X$2)6115zDTVR~lTR^8|^Jh=&e(P<&M;2Rgk`!zUO`tyrCaG~)`)v1%V@n1qGL z8Ro~ofAl1}jDH{I-xoabn7|qK8;=B|3!P!_a?y1Dy^?AKecW?C+CyIWh z7|hPFp*87yIR9SfX%F`jBb)Dd^wddpmu*Y1Xy6anx#G%_T*TpwD<}5cNA~$%=$=uJ z?C*Pt%`6=tCRd4Il;7IE(@C13D4ueJx)j-#q*2_jDxOJk_o{e2#s8|LwRuSy(GJkA z4gqf!mmfppY)LTDdo2GvCy<4ydn+5|L!gl8@hTlp!gpWc{50aHsr&+>lT>@j;Bb<2 z|CHNL@ggjTx5j?X^Ocfp2Iy)AWevlSyXF1~7r-t8i?(t)n1nx7X?iL3R>eo0Ur4l{ zN^?UNLneM8x$*hW)9Hm0+jPHXuyi!E|LiSBC%%E> zz^NCURj+Vfm9uIvk5`?}d-Aw;I`34&KHMmFl{UKD*BHesmZMW)Kz1j`l)s)j9pmwZ zdu}m!*w?oLT(`9!@iPzgPe!h%4EWIBid;;Ba8ides6w^k4 zOW!h0pQS{%igMsX3Uf*Fd4&rp846G1h~Oy27Yv+FVv1=Zz@@P3GXpaz13sirCc)?R z-JOCo=$lLWW?58)x4?wL^8lB^-|%A*wZ^5C10PaYNP^ER+(F4u*oqW}tNON)m|}Vt zpz4zxPoEiBKpF5MeG#)rvYNs>e)y9u{7nLEhL6W#;Jx!J27zMueVje0wm5%6(^oK* z@k!HgO|}~Eti|`_sPRo2F4l0OhJ7^bq+wGHy)>-U^xd#;OA+f?7HCmkCcgGhQ`Hw} z*h#}5r>gwr8V=R4wuWC%QPUS{C~H_-!-Lx2HJzy8(@9+3B9y9xuvo)h8a|zQw8Ws?ud?8n3Ox19mhOUBAveX)P&@fcPu^OJzde)(Jm89{f zYUrh*OJ6koAsl={fmHspPxuTqp-{tg4KHX4WG%k(1%nxfyd0*7kxIT|k3aHWRpG#shv*`dV`Ygne?_Zr@uj=xn) zpA@aY7%jn}#a$N1XbLxL>7Q!`eXYr@(r}rE3pIRQ!z>L4Y1myuPm4zU;!>z#k%pT! zbQ$u?3r(rl#!BTlwIaW0cwWOVHC(6RQVp{-v}9^Te+@fp7^={mrQsS4vo!3bVM`4gX!!IcRnKh=zt*r& z!*?~Dr{Q>x7=N8LMyQ598a^4LDlF4*uZEj7T(05k8V=P^*08OH0UAmgR*Y8leoIK> z|8tG8Qo}hKPSh}6!z2wGX{gum2EBoWPnm`XHQcP>QVr*5I8MWKU@B#d*BId%`e^uM zl&biqhWj<#q~Sshvoy@G7dH^=)m~p;j8`*~a{L8Wz5R9rairm>EPGI=h#Kiu*-zi{0E*>?qE%&u=Q$^L?=a!iinSgcGK) zzgLwOau=1`&mqU?_!rn2h{qtAK9qrsple%&xhgxU6Cf8%h>uH-@I?#u*k5f1!^Xdm zeHLfO;xEc_4fKVhr*Ii@ItbIprIkvrE=Q$@+l!$!wt8XIzqviDjG{OhPvUy;T^}kY z-0mCxLUtN1Wj+4qx?f1H%1-JQ+eah2`dER<8}2 z6JAZ9t2Wi-+CfCUH)mql2lJRZ%w2u9C4t1%581sZVRwt>;=5ue*Wa6`rX7K>ql+l) zh*E`JX}J3exDpC}T}8pKtt-v`w1wC}TBhIKAVU1S1{@40qQo~k2zpUyo&EBx?9eIv zL|uS!8;razUT9>bP)Iyjq4H)4Wr4e2Z!3N+8XPF4^(^(z(#P2QMT>R1&>y<5p!^ev z9g63fI5y**bh1a|@!Bs2uMADB@uEpC!@eO}>?vm2Z$*pmTl&{Sm$ny$EOv3k6|0e|yN_g@n%iI zGg~j{qH736cvx^5!9jVbs_7m)-pU`Vf~JSktH-P75&b;{|FK?ze|jx@t&XC>PxR9Z zeqi{)*WwrLDfmtI$+8!96hmwJp!Q=BCfo0I6zh3CizbSKy=9D8-|ibD2H2Bh#2`yn z1pW>;LIu(qlVd5{Td2+xYt;~H_4g>(Ww~d%rAd*E4eo;BFK;#6x>hdiD5oc|6@M&zdJbS`ac3)aI$dn@2#?323_K(xCLYI3G1Ngsk?0 z@Bu3QEzdL`9kENVPw|U{6<~XB_tL|eBU6w5R~pC)QdI>HQ&*dyJbn4 zq%}*^T7+3maxQ(DTE6{2`nbib^QRGzo6Z_7$g+WMerw{pr%)kKg(ku(n$;oA=_lwR^DALFFp}@@JZ~Y+A*@1{oPJt zNTbD=yTOoq2Wh!TBilyt6O16do@)QTlh~*q>8fm})~}53#Dee>P(+@34MZArLJMFa z@=^ZIeZatHERbYY0!iiokk<2+c5|HAtU0MIgW95gYPKf*k%mtnAgMW|#s9FcjuV?i zWu*(k34|@!!x{@%K>sob!c8yXc|OL7OU`~TPHY%OrEJlRkB5<@nL4R(pjyh{!D^{* z0J*XD*v?{5Y7wGcGvH^yM%=ibyHM{a7J+nc&YJ{&2Jw-TWUAs=f*~hlRedzC9;(bQA-aUrk0)xq|(O&$-;}+jVS&Pkm8~{RzI|1V|=+|l4XvMO@Ul2*i#5b)f-}KF!Zlkfpw?IHC6Olv6CdQRJHvB zAmxqyTy5;nft0s?q1xD~+k}q1wa08z8C`a2jSZy2UILPWAAn@c-$43!@3N2SE;hB$ zk&Avg%9o~Dtp)w~HbU-KNZ*GePam}bs>kcsf$yfbHNK*%`sJz`uYLj-AY1rDwIb3zh1_yXXarZQJ{RZ5PfwxXI<`Or zmy8wKq^_>-DG8o!YoyUqx)>(bujMAx>h6(N1EXEz z^%ffY*AV=(1)Q_oFzO{CzETiIA-HzY>T!`y|6^+k#%R4@>|ZOZet?_cE!GvJG@3>B z1wF;4%}8;k)*cm_LBUSd@LoXr_zI%^WKXev>r7D;<|7E{G)qv2u{DI}c|x_gy|#=8 z#|1i3*o=_L^WaS=SKK~A7TfSkU2ZeAfxJA_{%8fH&jc-A^M!h3z-E$of~#t6Pf>UZ zy9tTs>#r01$M}`k&hpLlLBqLZ-D-(K8-##BRd-E)J#I2S_F87q(7`2?34z53GK2_g z(vc;kO37m{QFs%VI4jcMG}!x?#TK<)d3Sn?!jA|W0_{u8;(}VGeriD(gGBqNmZC|Q zH3*GhkLo2h3({+8R7uyzCBFyq*C1c(GUR!l!Q$JEP4?hcqPxAYm*`V}{0q5hEX52a z<=^!Z8@dUqyjO40&n*%?*QB=?Whol2T0^Dzzk)FwiW+8A@iM^^f<+;IXcaGhJdPNR zu|p22;(d>Y2?dbbQcZ4QmM8>=VCx@NCAVh=^n(|!mW#6tK9|8<-$WEON!X={~ni3Qv2fn9+&Zvhp+#8T=svD z%UomO{~nk9-{Z3X?>;WuU4MZ1Yu(-PwZ$TFcf|zJUvJLnjXRHzAPhyw1YH0;q|rsd z-!-}d804$U1OxkObQ;h?w7uVSv9HD7PY_lmVAlq^yxfFq5blCcxC24xjtv&*a%dBF zMhFF;FdZQVG&!)}MUX*1^HPH*?{%xXc*p{pa6H1A?s&Ri-B(RM?9c1rrUN0ug9w*F zSNA(t_eqm)x@&!$VBsw$?1vBxns6{eBVn!XB*zApmrB*K&2YGr6CC zZVMwI^RorS1BA(l5O$7+5umHPlrzEGfUp#N!k60%!Y0s!lMsqPS9c|o+qq>2Tr|Na zoP=-_G~ufVm7vMBys0Bbf<76CCJ=++*^LKHp59IIc&J2tAUSHuaZ8wsKn`2No!!yA zpb5_)B!MRU9w8kxd1~kM!nJ_pfg4ic>jX_+)ouN7dj$Ml%jW%><4>w-@him(tgVK~Ba*@8X> zQHqd((N2!c^ijCKL8!VDlAM#@Bly5UNq7e#6f~hV6PHoYgxyE0{4^jr6=m=V6JEkB z1DytJ1@B%NX!7=5wZIn_+zZVGTnRT?0ci58eTU$W*QA4+)<=~?tZ14%18Z==d!W_`#Q|h$~gO_OF+^2J{~N7cSB0RDbVr7M7;$5o+t;NJ+889)CPq7f)_Jn+3z zRen8i%n6mw0^UR*nF`?VUu!h*)=7;9{(cIyg@i!-ep*Wa{tgEe&s@zBMGmQ}2=Oq2 z(1DN+nw(Fq$}r79M*?>uP+oE>m7K%)03UB72zWDr4{*Yl5J-j`M;4bSiLloi5LXuS3yBwxhhLNxHR%PyN!6&4@D7FSPUa=6KBG4=& zAJQxE4;7|i{U=9JlOM6nLMRegia9nc zw5w+VJNypg`eUF0zeFH@5pZRN%3lv${s+bZWYz;0-pAAeT>#AfQ>Al(WB*d=EMWTI zDxC>TdVv0?3~3-XAy5J2x$0Djg@XzPx*SS`4-m!6LzhQZ5AUOM^|BqMlS^xA@J@7zFi0Xe-(_iIISg=0RtPURtE#8Xml3vfJPSq4MD1m4{(G=ic0T} Nsx591H|L5q{vQhn+>QVM diff --git a/docs/html/vk__mem__alloc_8h_source.html b/docs/html/vk__mem__alloc_8h_source.html index 21bf07d..e6626c8 100644 --- a/docs/html/vk__mem__alloc_8h_source.html +++ b/docs/html/vk__mem__alloc_8h_source.html @@ -62,7 +62,7 @@ $(function() {
vk_mem_alloc.h
-Go to the documentation of this file.
1 //
2 // Copyright (c) 2017 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
387 #include <vulkan/vulkan.h>
388 
390 
394 VK_DEFINE_HANDLE(VmaAllocator)
395 
396 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
398  VmaAllocator allocator,
399  uint32_t memoryType,
400  VkDeviceMemory memory,
401  VkDeviceSize size);
403 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
404  VmaAllocator allocator,
405  uint32_t memoryType,
406  VkDeviceMemory memory,
407  VkDeviceSize size);
408 
416 typedef struct VmaDeviceMemoryCallbacks {
422 
424 typedef enum VmaAllocatorFlagBits {
430 
433 typedef VkFlags VmaAllocatorFlags;
434 
439 typedef struct VmaVulkanFunctions {
440  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
441  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
442  PFN_vkAllocateMemory vkAllocateMemory;
443  PFN_vkFreeMemory vkFreeMemory;
444  PFN_vkMapMemory vkMapMemory;
445  PFN_vkUnmapMemory vkUnmapMemory;
446  PFN_vkBindBufferMemory vkBindBufferMemory;
447  PFN_vkBindImageMemory vkBindImageMemory;
448  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
449  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
450  PFN_vkCreateBuffer vkCreateBuffer;
451  PFN_vkDestroyBuffer vkDestroyBuffer;
452  PFN_vkCreateImage vkCreateImage;
453  PFN_vkDestroyImage vkDestroyImage;
455 
458 {
462 
463  VkPhysicalDevice physicalDevice;
465 
466  VkDevice device;
468 
471 
474 
475  const VkAllocationCallbacks* pAllocationCallbacks;
477 
492  uint32_t frameInUseCount;
510  const VkDeviceSize* pHeapSizeLimit;
524 
526 VkResult vmaCreateAllocator(
527  const VmaAllocatorCreateInfo* pCreateInfo,
528  VmaAllocator* pAllocator);
529 
532  VmaAllocator allocator);
533 
539  VmaAllocator allocator,
540  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
541 
547  VmaAllocator allocator,
548  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
549 
557  VmaAllocator allocator,
558  uint32_t memoryTypeIndex,
559  VkMemoryPropertyFlags* pFlags);
560 
570  VmaAllocator allocator,
571  uint32_t frameIndex);
572 
575 typedef struct VmaStatInfo
576 {
578  uint32_t blockCount;
580  uint32_t allocationCount;
584  VkDeviceSize usedBytes;
586  VkDeviceSize unusedBytes;
587  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
588  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
589 } VmaStatInfo;
590 
592 typedef struct VmaStats
593 {
594  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
595  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
597 } VmaStats;
598 
600 void vmaCalculateStats(
601  VmaAllocator allocator,
602  VmaStats* pStats);
603 
604 #define VMA_STATS_STRING_ENABLED 1
605 
606 #if VMA_STATS_STRING_ENABLED
607 
609 
612  VmaAllocator allocator,
613  char** ppStatsString,
614  VkBool32 detailedMap);
615 
616 void vmaFreeStatsString(
617  VmaAllocator allocator,
618  char* pStatsString);
619 
620 #endif // #if VMA_STATS_STRING_ENABLED
621 
624 
629 VK_DEFINE_HANDLE(VmaPool)
630 
631 typedef enum VmaMemoryUsage
632 {
638 
641 
644 
648 
663 
702 
705 typedef VkFlags VmaAllocationCreateFlags;
706 
708 {
721  VkMemoryPropertyFlags requiredFlags;
727  VkMemoryPropertyFlags preferredFlags;
729  void* pUserData;
734  VmaPool pool;
736 
751 VkResult vmaFindMemoryTypeIndex(
752  VmaAllocator allocator,
753  uint32_t memoryTypeBits,
754  const VmaAllocationCreateInfo* pAllocationCreateInfo,
755  uint32_t* pMemoryTypeIndex);
756 
759 
764 typedef enum VmaPoolCreateFlagBits {
793 
796 typedef VkFlags VmaPoolCreateFlags;
797 
800 typedef struct VmaPoolCreateInfo {
803  uint32_t memoryTypeIndex;
811  VkDeviceSize blockSize;
838  uint32_t frameInUseCount;
840 
843 typedef struct VmaPoolStats {
846  VkDeviceSize size;
849  VkDeviceSize unusedSize;
862  VkDeviceSize unusedRangeSizeMax;
863 } VmaPoolStats;
864 
871 VkResult vmaCreatePool(
872  VmaAllocator allocator,
873  const VmaPoolCreateInfo* pCreateInfo,
874  VmaPool* pPool);
875 
878 void vmaDestroyPool(
879  VmaAllocator allocator,
880  VmaPool pool);
881 
888 void vmaGetPoolStats(
889  VmaAllocator allocator,
890  VmaPool pool,
891  VmaPoolStats* pPoolStats);
892 
900  VmaAllocator allocator,
901  VmaPool pool,
902  size_t* pLostAllocationCount);
903 
904 VK_DEFINE_HANDLE(VmaAllocation)
905 
906 
908 typedef struct VmaAllocationInfo {
913  uint32_t memoryType;
922  VkDeviceMemory deviceMemory;
927  VkDeviceSize offset;
932  VkDeviceSize size;
938  void* pMappedData;
943  void* pUserData;
945 
956 VkResult vmaAllocateMemory(
957  VmaAllocator allocator,
958  const VkMemoryRequirements* pVkMemoryRequirements,
959  const VmaAllocationCreateInfo* pCreateInfo,
960  VmaAllocation* pAllocation,
961  VmaAllocationInfo* pAllocationInfo);
962 
970  VmaAllocator allocator,
971  VkBuffer buffer,
972  const VmaAllocationCreateInfo* pCreateInfo,
973  VmaAllocation* pAllocation,
974  VmaAllocationInfo* pAllocationInfo);
975 
978  VmaAllocator allocator,
979  VkImage image,
980  const VmaAllocationCreateInfo* pCreateInfo,
981  VmaAllocation* pAllocation,
982  VmaAllocationInfo* pAllocationInfo);
983 
985 void vmaFreeMemory(
986  VmaAllocator allocator,
987  VmaAllocation allocation);
988 
991  VmaAllocator allocator,
992  VmaAllocation allocation,
993  VmaAllocationInfo* pAllocationInfo);
994 
997  VmaAllocator allocator,
998  VmaAllocation allocation,
999  void* pUserData);
1000 
1012  VmaAllocator allocator,
1013  VmaAllocation* pAllocation);
1014 
1023 VkResult vmaMapMemory(
1024  VmaAllocator allocator,
1025  VmaAllocation allocation,
1026  void** ppData);
1027 
1028 void vmaUnmapMemory(
1029  VmaAllocator allocator,
1030  VmaAllocation allocation);
1031 
1053 void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator);
1054 
1062 VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator);
1063 
1065 typedef struct VmaDefragmentationInfo {
1070  VkDeviceSize maxBytesToMove;
1077 
1079 typedef struct VmaDefragmentationStats {
1081  VkDeviceSize bytesMoved;
1083  VkDeviceSize bytesFreed;
1089 
1160 VkResult vmaDefragment(
1161  VmaAllocator allocator,
1162  VmaAllocation* pAllocations,
1163  size_t allocationCount,
1164  VkBool32* pAllocationsChanged,
1165  const VmaDefragmentationInfo *pDefragmentationInfo,
1166  VmaDefragmentationStats* pDefragmentationStats);
1167 
1170 
1193 VkResult vmaCreateBuffer(
1194  VmaAllocator allocator,
1195  const VkBufferCreateInfo* pBufferCreateInfo,
1196  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1197  VkBuffer* pBuffer,
1198  VmaAllocation* pAllocation,
1199  VmaAllocationInfo* pAllocationInfo);
1200 
1209 void vmaDestroyBuffer(
1210  VmaAllocator allocator,
1211  VkBuffer buffer,
1212  VmaAllocation allocation);
1213 
1215 VkResult vmaCreateImage(
1216  VmaAllocator allocator,
1217  const VkImageCreateInfo* pImageCreateInfo,
1218  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1219  VkImage* pImage,
1220  VmaAllocation* pAllocation,
1221  VmaAllocationInfo* pAllocationInfo);
1222 
1231 void vmaDestroyImage(
1232  VmaAllocator allocator,
1233  VkImage image,
1234  VmaAllocation allocation);
1235 
1238 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
1239 
1240 // For Visual Studio IntelliSense.
1241 #ifdef __INTELLISENSE__
1242 #define VMA_IMPLEMENTATION
1243 #endif
1244 
1245 #ifdef VMA_IMPLEMENTATION
1246 #undef VMA_IMPLEMENTATION
1247 
1248 #include <cstdint>
1249 #include <cstdlib>
1250 #include <cstring>
1251 
1252 /*******************************************************************************
1253 CONFIGURATION SECTION
1254 
1255 Define some of these macros before each #include of this header or change them
1256 here if you need other then default behavior depending on your environment.
1257 */
1258 
1259 /*
1260 Define this macro to 1 to make the library fetch pointers to Vulkan functions
1261 internally, like:
1262 
1263  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
1264 
1265 Define to 0 if you are going to provide you own pointers to Vulkan functions via
1266 VmaAllocatorCreateInfo::pVulkanFunctions.
1267 */
1268 #ifndef VMA_STATIC_VULKAN_FUNCTIONS
1269 #define VMA_STATIC_VULKAN_FUNCTIONS 1
1270 #endif
1271 
1272 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
1273 //#define VMA_USE_STL_CONTAINERS 1
1274 
1275 /* Set this macro to 1 to make the library including and using STL containers:
1276 std::pair, std::vector, std::list, std::unordered_map.
1277 
1278 Set it to 0 or undefined to make the library using its own implementation of
1279 the containers.
1280 */
1281 #if VMA_USE_STL_CONTAINERS
1282  #define VMA_USE_STL_VECTOR 1
1283  #define VMA_USE_STL_UNORDERED_MAP 1
1284  #define VMA_USE_STL_LIST 1
1285 #endif
1286 
1287 #if VMA_USE_STL_VECTOR
1288  #include <vector>
1289 #endif
1290 
1291 #if VMA_USE_STL_UNORDERED_MAP
1292  #include <unordered_map>
1293 #endif
1294 
1295 #if VMA_USE_STL_LIST
1296  #include <list>
1297 #endif
1298 
1299 /*
1300 Following headers are used in this CONFIGURATION section only, so feel free to
1301 remove them if not needed.
1302 */
1303 #include <cassert> // for assert
1304 #include <algorithm> // for min, max
1305 #include <mutex> // for std::mutex
1306 #include <atomic> // for std::atomic
1307 
1308 #if !defined(_WIN32)
1309  #include <malloc.h> // for aligned_alloc()
1310 #endif
1311 
1312 // Normal assert to check for programmer's errors, especially in Debug configuration.
1313 #ifndef VMA_ASSERT
1314  #ifdef _DEBUG
1315  #define VMA_ASSERT(expr) assert(expr)
1316  #else
1317  #define VMA_ASSERT(expr)
1318  #endif
1319 #endif
1320 
1321 // Assert that will be called very often, like inside data structures e.g. operator[].
1322 // Making it non-empty can make program slow.
1323 #ifndef VMA_HEAVY_ASSERT
1324  #ifdef _DEBUG
1325  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
1326  #else
1327  #define VMA_HEAVY_ASSERT(expr)
1328  #endif
1329 #endif
1330 
1331 #ifndef VMA_NULL
1332  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
1333  #define VMA_NULL nullptr
1334 #endif
1335 
1336 #ifndef VMA_ALIGN_OF
1337  #define VMA_ALIGN_OF(type) (__alignof(type))
1338 #endif
1339 
1340 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
1341  #if defined(_WIN32)
1342  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
1343  #else
1344  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
1345  #endif
1346 #endif
1347 
1348 #ifndef VMA_SYSTEM_FREE
1349  #if defined(_WIN32)
1350  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
1351  #else
1352  #define VMA_SYSTEM_FREE(ptr) free(ptr)
1353  #endif
1354 #endif
1355 
1356 #ifndef VMA_MIN
1357  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
1358 #endif
1359 
1360 #ifndef VMA_MAX
1361  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
1362 #endif
1363 
1364 #ifndef VMA_SWAP
1365  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
1366 #endif
1367 
1368 #ifndef VMA_SORT
1369  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
1370 #endif
1371 
1372 #ifndef VMA_DEBUG_LOG
1373  #define VMA_DEBUG_LOG(format, ...)
1374  /*
1375  #define VMA_DEBUG_LOG(format, ...) do { \
1376  printf(format, __VA_ARGS__); \
1377  printf("\n"); \
1378  } while(false)
1379  */
1380 #endif
1381 
1382 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
1383 #if VMA_STATS_STRING_ENABLED
1384  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
1385  {
1386  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
1387  }
1388  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
1389  {
1390  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
1391  }
1392  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
1393  {
1394  snprintf(outStr, strLen, "%p", ptr);
1395  }
1396 #endif
1397 
1398 #ifndef VMA_MUTEX
1399  class VmaMutex
1400  {
1401  public:
1402  VmaMutex() { }
1403  ~VmaMutex() { }
1404  void Lock() { m_Mutex.lock(); }
1405  void Unlock() { m_Mutex.unlock(); }
1406  private:
1407  std::mutex m_Mutex;
1408  };
1409  #define VMA_MUTEX VmaMutex
1410 #endif
1411 
1412 /*
1413 If providing your own implementation, you need to implement a subset of std::atomic:
1414 
1415 - Constructor(uint32_t desired)
1416 - uint32_t load() const
1417 - void store(uint32_t desired)
1418 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
1419 */
1420 #ifndef VMA_ATOMIC_UINT32
1421  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
1422 #endif
1423 
1424 #ifndef VMA_BEST_FIT
1425 
1437  #define VMA_BEST_FIT (1)
1438 #endif
1439 
1440 #ifndef VMA_DEBUG_ALWAYS_OWN_MEMORY
1441 
1445  #define VMA_DEBUG_ALWAYS_OWN_MEMORY (0)
1446 #endif
1447 
1448 #ifndef VMA_DEBUG_ALIGNMENT
1449 
1453  #define VMA_DEBUG_ALIGNMENT (1)
1454 #endif
1455 
1456 #ifndef VMA_DEBUG_MARGIN
1457 
1461  #define VMA_DEBUG_MARGIN (0)
1462 #endif
1463 
1464 #ifndef VMA_DEBUG_GLOBAL_MUTEX
1465 
1469  #define VMA_DEBUG_GLOBAL_MUTEX (0)
1470 #endif
1471 
1472 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
1473 
1477  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
1478 #endif
1479 
1480 #ifndef VMA_SMALL_HEAP_MAX_SIZE
1481  #define VMA_SMALL_HEAP_MAX_SIZE (512 * 1024 * 1024)
1483 #endif
1484 
1485 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
1486  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256 * 1024 * 1024)
1488 #endif
1489 
1490 #ifndef VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE
1491  #define VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE (64 * 1024 * 1024)
1493 #endif
1494 
1495 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
1496 
1497 /*******************************************************************************
1498 END OF CONFIGURATION
1499 */
1500 
1501 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
1502  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
1503 
1504 // Returns number of bits set to 1 in (v).
1505 static inline uint32_t CountBitsSet(uint32_t v)
1506 {
1507  uint32_t c = v - ((v >> 1) & 0x55555555);
1508  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
1509  c = ((c >> 4) + c) & 0x0F0F0F0F;
1510  c = ((c >> 8) + c) & 0x00FF00FF;
1511  c = ((c >> 16) + c) & 0x0000FFFF;
1512  return c;
1513 }
1514 
1515 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
1516 // Use types like uint32_t, uint64_t as T.
1517 template <typename T>
1518 static inline T VmaAlignUp(T val, T align)
1519 {
1520  return (val + align - 1) / align * align;
1521 }
1522 
1523 // Division with mathematical rounding to nearest number.
1524 template <typename T>
1525 inline T VmaRoundDiv(T x, T y)
1526 {
1527  return (x + (y / (T)2)) / y;
1528 }
1529 
1530 #ifndef VMA_SORT
1531 
1532 template<typename Iterator, typename Compare>
1533 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
1534 {
1535  Iterator centerValue = end; --centerValue;
1536  Iterator insertIndex = beg;
1537  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
1538  {
1539  if(cmp(*memTypeIndex, *centerValue))
1540  {
1541  if(insertIndex != memTypeIndex)
1542  {
1543  VMA_SWAP(*memTypeIndex, *insertIndex);
1544  }
1545  ++insertIndex;
1546  }
1547  }
1548  if(insertIndex != centerValue)
1549  {
1550  VMA_SWAP(*insertIndex, *centerValue);
1551  }
1552  return insertIndex;
1553 }
1554 
1555 template<typename Iterator, typename Compare>
1556 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
1557 {
1558  if(beg < end)
1559  {
1560  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
1561  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
1562  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
1563  }
1564 }
1565 
1566 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
1567 
1568 #endif // #ifndef VMA_SORT
1569 
1570 /*
1571 Returns true if two memory blocks occupy overlapping pages.
1572 ResourceA must be in less memory offset than ResourceB.
1573 
1574 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
1575 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
1576 */
1577 static inline bool VmaBlocksOnSamePage(
1578  VkDeviceSize resourceAOffset,
1579  VkDeviceSize resourceASize,
1580  VkDeviceSize resourceBOffset,
1581  VkDeviceSize pageSize)
1582 {
1583  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
1584  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
1585  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
1586  VkDeviceSize resourceBStart = resourceBOffset;
1587  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
1588  return resourceAEndPage == resourceBStartPage;
1589 }
1590 
1591 enum VmaSuballocationType
1592 {
1593  VMA_SUBALLOCATION_TYPE_FREE = 0,
1594  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
1595  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
1596  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
1597  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
1598  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
1599  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
1600 };
1601 
1602 /*
1603 Returns true if given suballocation types could conflict and must respect
1604 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
1605 or linear image and another one is optimal image. If type is unknown, behave
1606 conservatively.
1607 */
1608 static inline bool VmaIsBufferImageGranularityConflict(
1609  VmaSuballocationType suballocType1,
1610  VmaSuballocationType suballocType2)
1611 {
1612  if(suballocType1 > suballocType2)
1613  {
1614  VMA_SWAP(suballocType1, suballocType2);
1615  }
1616 
1617  switch(suballocType1)
1618  {
1619  case VMA_SUBALLOCATION_TYPE_FREE:
1620  return false;
1621  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
1622  return true;
1623  case VMA_SUBALLOCATION_TYPE_BUFFER:
1624  return
1625  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
1626  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1627  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
1628  return
1629  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
1630  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
1631  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1632  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
1633  return
1634  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1635  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
1636  return false;
1637  default:
1638  VMA_ASSERT(0);
1639  return true;
1640  }
1641 }
1642 
1643 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
1644 struct VmaMutexLock
1645 {
1646 public:
1647  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
1648  m_pMutex(useMutex ? &mutex : VMA_NULL)
1649  {
1650  if(m_pMutex)
1651  {
1652  m_pMutex->Lock();
1653  }
1654  }
1655 
1656  ~VmaMutexLock()
1657  {
1658  if(m_pMutex)
1659  {
1660  m_pMutex->Unlock();
1661  }
1662  }
1663 
1664 private:
1665  VMA_MUTEX* m_pMutex;
1666 };
1667 
1668 #if VMA_DEBUG_GLOBAL_MUTEX
1669  static VMA_MUTEX gDebugGlobalMutex;
1670  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
1671 #else
1672  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
1673 #endif
1674 
1675 // Minimum size of a free suballocation to register it in the free suballocation collection.
1676 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
1677 
1678 /*
1679 Performs binary search and returns iterator to first element that is greater or
1680 equal to (key), according to comparison (cmp).
1681 
1682 Cmp should return true if first argument is less than second argument.
1683 
1684 Returned value is the found element, if present in the collection or place where
1685 new element with value (key) should be inserted.
1686 */
1687 template <typename IterT, typename KeyT, typename CmpT>
1688 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
1689 {
1690  size_t down = 0, up = (end - beg);
1691  while(down < up)
1692  {
1693  const size_t mid = (down + up) / 2;
1694  if(cmp(*(beg+mid), key))
1695  {
1696  down = mid + 1;
1697  }
1698  else
1699  {
1700  up = mid;
1701  }
1702  }
1703  return beg + down;
1704 }
1705 
1707 // Memory allocation
1708 
1709 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
1710 {
1711  if((pAllocationCallbacks != VMA_NULL) &&
1712  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
1713  {
1714  return (*pAllocationCallbacks->pfnAllocation)(
1715  pAllocationCallbacks->pUserData,
1716  size,
1717  alignment,
1718  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1719  }
1720  else
1721  {
1722  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
1723  }
1724 }
1725 
1726 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
1727 {
1728  if((pAllocationCallbacks != VMA_NULL) &&
1729  (pAllocationCallbacks->pfnFree != VMA_NULL))
1730  {
1731  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
1732  }
1733  else
1734  {
1735  VMA_SYSTEM_FREE(ptr);
1736  }
1737 }
1738 
1739 template<typename T>
1740 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
1741 {
1742  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
1743 }
1744 
1745 template<typename T>
1746 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
1747 {
1748  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
1749 }
1750 
1751 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
1752 
1753 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
1754 
1755 template<typename T>
1756 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
1757 {
1758  ptr->~T();
1759  VmaFree(pAllocationCallbacks, ptr);
1760 }
1761 
1762 template<typename T>
1763 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
1764 {
1765  if(ptr != VMA_NULL)
1766  {
1767  for(size_t i = count; i--; )
1768  {
1769  ptr[i].~T();
1770  }
1771  VmaFree(pAllocationCallbacks, ptr);
1772  }
1773 }
1774 
1775 // STL-compatible allocator.
1776 template<typename T>
1777 class VmaStlAllocator
1778 {
1779 public:
1780  const VkAllocationCallbacks* const m_pCallbacks;
1781  typedef T value_type;
1782 
1783  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
1784  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
1785 
1786  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
1787  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
1788 
1789  template<typename U>
1790  bool operator==(const VmaStlAllocator<U>& rhs) const
1791  {
1792  return m_pCallbacks == rhs.m_pCallbacks;
1793  }
1794  template<typename U>
1795  bool operator!=(const VmaStlAllocator<U>& rhs) const
1796  {
1797  return m_pCallbacks != rhs.m_pCallbacks;
1798  }
1799 
1800  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
1801 };
1802 
1803 #if VMA_USE_STL_VECTOR
1804 
1805 #define VmaVector std::vector
1806 
1807 template<typename T, typename allocatorT>
1808 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
1809 {
1810  vec.insert(vec.begin() + index, item);
1811 }
1812 
1813 template<typename T, typename allocatorT>
1814 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
1815 {
1816  vec.erase(vec.begin() + index);
1817 }
1818 
1819 #else // #if VMA_USE_STL_VECTOR
1820 
1821 /* Class with interface compatible with subset of std::vector.
1822 T must be POD because constructors and destructors are not called and memcpy is
1823 used for these objects. */
1824 template<typename T, typename AllocatorT>
1825 class VmaVector
1826 {
1827 public:
1828  typedef T value_type;
1829 
1830  VmaVector(const AllocatorT& allocator) :
1831  m_Allocator(allocator),
1832  m_pArray(VMA_NULL),
1833  m_Count(0),
1834  m_Capacity(0)
1835  {
1836  }
1837 
1838  VmaVector(size_t count, const AllocatorT& allocator) :
1839  m_Allocator(allocator),
1840  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
1841  m_Count(count),
1842  m_Capacity(count)
1843  {
1844  }
1845 
1846  VmaVector(const VmaVector<T, AllocatorT>& src) :
1847  m_Allocator(src.m_Allocator),
1848  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
1849  m_Count(src.m_Count),
1850  m_Capacity(src.m_Count)
1851  {
1852  if(m_Count != 0)
1853  {
1854  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
1855  }
1856  }
1857 
1858  ~VmaVector()
1859  {
1860  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
1861  }
1862 
1863  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
1864  {
1865  if(&rhs != this)
1866  {
1867  resize(rhs.m_Count);
1868  if(m_Count != 0)
1869  {
1870  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
1871  }
1872  }
1873  return *this;
1874  }
1875 
1876  bool empty() const { return m_Count == 0; }
1877  size_t size() const { return m_Count; }
1878  T* data() { return m_pArray; }
1879  const T* data() const { return m_pArray; }
1880 
1881  T& operator[](size_t index)
1882  {
1883  VMA_HEAVY_ASSERT(index < m_Count);
1884  return m_pArray[index];
1885  }
1886  const T& operator[](size_t index) const
1887  {
1888  VMA_HEAVY_ASSERT(index < m_Count);
1889  return m_pArray[index];
1890  }
1891 
1892  T& front()
1893  {
1894  VMA_HEAVY_ASSERT(m_Count > 0);
1895  return m_pArray[0];
1896  }
1897  const T& front() const
1898  {
1899  VMA_HEAVY_ASSERT(m_Count > 0);
1900  return m_pArray[0];
1901  }
1902  T& back()
1903  {
1904  VMA_HEAVY_ASSERT(m_Count > 0);
1905  return m_pArray[m_Count - 1];
1906  }
1907  const T& back() const
1908  {
1909  VMA_HEAVY_ASSERT(m_Count > 0);
1910  return m_pArray[m_Count - 1];
1911  }
1912 
1913  void reserve(size_t newCapacity, bool freeMemory = false)
1914  {
1915  newCapacity = VMA_MAX(newCapacity, m_Count);
1916 
1917  if((newCapacity < m_Capacity) && !freeMemory)
1918  {
1919  newCapacity = m_Capacity;
1920  }
1921 
1922  if(newCapacity != m_Capacity)
1923  {
1924  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
1925  if(m_Count != 0)
1926  {
1927  memcpy(newArray, m_pArray, m_Count * sizeof(T));
1928  }
1929  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
1930  m_Capacity = newCapacity;
1931  m_pArray = newArray;
1932  }
1933  }
1934 
1935  void resize(size_t newCount, bool freeMemory = false)
1936  {
1937  size_t newCapacity = m_Capacity;
1938  if(newCount > m_Capacity)
1939  {
1940  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
1941  }
1942  else if(freeMemory)
1943  {
1944  newCapacity = newCount;
1945  }
1946 
1947  if(newCapacity != m_Capacity)
1948  {
1949  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
1950  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
1951  if(elementsToCopy != 0)
1952  {
1953  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
1954  }
1955  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
1956  m_Capacity = newCapacity;
1957  m_pArray = newArray;
1958  }
1959 
1960  m_Count = newCount;
1961  }
1962 
1963  void clear(bool freeMemory = false)
1964  {
1965  resize(0, freeMemory);
1966  }
1967 
1968  void insert(size_t index, const T& src)
1969  {
1970  VMA_HEAVY_ASSERT(index <= m_Count);
1971  const size_t oldCount = size();
1972  resize(oldCount + 1);
1973  if(index < oldCount)
1974  {
1975  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
1976  }
1977  m_pArray[index] = src;
1978  }
1979 
1980  void remove(size_t index)
1981  {
1982  VMA_HEAVY_ASSERT(index < m_Count);
1983  const size_t oldCount = size();
1984  if(index < oldCount - 1)
1985  {
1986  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
1987  }
1988  resize(oldCount - 1);
1989  }
1990 
1991  void push_back(const T& src)
1992  {
1993  const size_t newIndex = size();
1994  resize(newIndex + 1);
1995  m_pArray[newIndex] = src;
1996  }
1997 
1998  void pop_back()
1999  {
2000  VMA_HEAVY_ASSERT(m_Count > 0);
2001  resize(size() - 1);
2002  }
2003 
2004  void push_front(const T& src)
2005  {
2006  insert(0, src);
2007  }
2008 
2009  void pop_front()
2010  {
2011  VMA_HEAVY_ASSERT(m_Count > 0);
2012  remove(0);
2013  }
2014 
2015  typedef T* iterator;
2016 
2017  iterator begin() { return m_pArray; }
2018  iterator end() { return m_pArray + m_Count; }
2019 
2020 private:
2021  AllocatorT m_Allocator;
2022  T* m_pArray;
2023  size_t m_Count;
2024  size_t m_Capacity;
2025 };
2026 
2027 template<typename T, typename allocatorT>
2028 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
2029 {
2030  vec.insert(index, item);
2031 }
2032 
2033 template<typename T, typename allocatorT>
2034 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
2035 {
2036  vec.remove(index);
2037 }
2038 
2039 #endif // #if VMA_USE_STL_VECTOR
2040 
2041 template<typename CmpLess, typename VectorT>
2042 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
2043 {
2044  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
2045  vector.data(),
2046  vector.data() + vector.size(),
2047  value,
2048  CmpLess()) - vector.data();
2049  VmaVectorInsert(vector, indexToInsert, value);
2050  return indexToInsert;
2051 }
2052 
2053 template<typename CmpLess, typename VectorT>
2054 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
2055 {
2056  CmpLess comparator;
2057  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2058  vector.begin(),
2059  vector.end(),
2060  value,
2061  comparator);
2062  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
2063  {
2064  size_t indexToRemove = it - vector.begin();
2065  VmaVectorRemove(vector, indexToRemove);
2066  return true;
2067  }
2068  return false;
2069 }
2070 
2071 template<typename CmpLess, typename VectorT>
2072 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
2073 {
2074  CmpLess comparator;
2075  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2076  vector.data(),
2077  vector.data() + vector.size(),
2078  value,
2079  comparator);
2080  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
2081  {
2082  return it - vector.begin();
2083  }
2084  else
2085  {
2086  return vector.size();
2087  }
2088 }
2089 
2091 // class VmaPoolAllocator
2092 
2093 /*
2094 Allocator for objects of type T using a list of arrays (pools) to speed up
2095 allocation. Number of elements that can be allocated is not bounded because
2096 allocator can create multiple blocks.
2097 */
2098 template<typename T>
2099 class VmaPoolAllocator
2100 {
2101 public:
2102  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
2103  ~VmaPoolAllocator();
2104  void Clear();
2105  T* Alloc();
2106  void Free(T* ptr);
2107 
2108 private:
2109  union Item
2110  {
2111  uint32_t NextFreeIndex;
2112  T Value;
2113  };
2114 
2115  struct ItemBlock
2116  {
2117  Item* pItems;
2118  uint32_t FirstFreeIndex;
2119  };
2120 
2121  const VkAllocationCallbacks* m_pAllocationCallbacks;
2122  size_t m_ItemsPerBlock;
2123  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
2124 
2125  ItemBlock& CreateNewBlock();
2126 };
2127 
2128 template<typename T>
2129 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
2130  m_pAllocationCallbacks(pAllocationCallbacks),
2131  m_ItemsPerBlock(itemsPerBlock),
2132  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
2133 {
2134  VMA_ASSERT(itemsPerBlock > 0);
2135 }
2136 
2137 template<typename T>
2138 VmaPoolAllocator<T>::~VmaPoolAllocator()
2139 {
2140  Clear();
2141 }
2142 
2143 template<typename T>
2144 void VmaPoolAllocator<T>::Clear()
2145 {
2146  for(size_t i = m_ItemBlocks.size(); i--; )
2147  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
2148  m_ItemBlocks.clear();
2149 }
2150 
2151 template<typename T>
2152 T* VmaPoolAllocator<T>::Alloc()
2153 {
2154  for(size_t i = m_ItemBlocks.size(); i--; )
2155  {
2156  ItemBlock& block = m_ItemBlocks[i];
2157  // This block has some free items: Use first one.
2158  if(block.FirstFreeIndex != UINT32_MAX)
2159  {
2160  Item* const pItem = &block.pItems[block.FirstFreeIndex];
2161  block.FirstFreeIndex = pItem->NextFreeIndex;
2162  return &pItem->Value;
2163  }
2164  }
2165 
2166  // No block has free item: Create new one and use it.
2167  ItemBlock& newBlock = CreateNewBlock();
2168  Item* const pItem = &newBlock.pItems[0];
2169  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
2170  return &pItem->Value;
2171 }
2172 
2173 template<typename T>
2174 void VmaPoolAllocator<T>::Free(T* ptr)
2175 {
2176  // Search all memory blocks to find ptr.
2177  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
2178  {
2179  ItemBlock& block = m_ItemBlocks[i];
2180 
2181  // Casting to union.
2182  Item* pItemPtr;
2183  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
2184 
2185  // Check if pItemPtr is in address range of this block.
2186  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
2187  {
2188  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
2189  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
2190  block.FirstFreeIndex = index;
2191  return;
2192  }
2193  }
2194  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
2195 }
2196 
2197 template<typename T>
2198 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
2199 {
2200  ItemBlock newBlock = {
2201  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
2202 
2203  m_ItemBlocks.push_back(newBlock);
2204 
2205  // Setup singly-linked list of all free items in this block.
2206  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
2207  newBlock.pItems[i].NextFreeIndex = i + 1;
2208  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
2209  return m_ItemBlocks.back();
2210 }
2211 
2213 // class VmaRawList, VmaList
2214 
2215 #if VMA_USE_STL_LIST
2216 
2217 #define VmaList std::list
2218 
2219 #else // #if VMA_USE_STL_LIST
2220 
2221 template<typename T>
2222 struct VmaListItem
2223 {
2224  VmaListItem* pPrev;
2225  VmaListItem* pNext;
2226  T Value;
2227 };
2228 
2229 // Doubly linked list.
2230 template<typename T>
2231 class VmaRawList
2232 {
2233 public:
2234  typedef VmaListItem<T> ItemType;
2235 
2236  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
2237  ~VmaRawList();
2238  void Clear();
2239 
2240  size_t GetCount() const { return m_Count; }
2241  bool IsEmpty() const { return m_Count == 0; }
2242 
2243  ItemType* Front() { return m_pFront; }
2244  const ItemType* Front() const { return m_pFront; }
2245  ItemType* Back() { return m_pBack; }
2246  const ItemType* Back() const { return m_pBack; }
2247 
2248  ItemType* PushBack();
2249  ItemType* PushFront();
2250  ItemType* PushBack(const T& value);
2251  ItemType* PushFront(const T& value);
2252  void PopBack();
2253  void PopFront();
2254 
2255  // Item can be null - it means PushBack.
2256  ItemType* InsertBefore(ItemType* pItem);
2257  // Item can be null - it means PushFront.
2258  ItemType* InsertAfter(ItemType* pItem);
2259 
2260  ItemType* InsertBefore(ItemType* pItem, const T& value);
2261  ItemType* InsertAfter(ItemType* pItem, const T& value);
2262 
2263  void Remove(ItemType* pItem);
2264 
2265 private:
2266  const VkAllocationCallbacks* const m_pAllocationCallbacks;
2267  VmaPoolAllocator<ItemType> m_ItemAllocator;
2268  ItemType* m_pFront;
2269  ItemType* m_pBack;
2270  size_t m_Count;
2271 
2272  // Declared not defined, to block copy constructor and assignment operator.
2273  VmaRawList(const VmaRawList<T>& src);
2274  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
2275 };
2276 
2277 template<typename T>
2278 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
2279  m_pAllocationCallbacks(pAllocationCallbacks),
2280  m_ItemAllocator(pAllocationCallbacks, 128),
2281  m_pFront(VMA_NULL),
2282  m_pBack(VMA_NULL),
2283  m_Count(0)
2284 {
2285 }
2286 
2287 template<typename T>
2288 VmaRawList<T>::~VmaRawList()
2289 {
2290  // Intentionally not calling Clear, because that would be unnecessary
2291  // computations to return all items to m_ItemAllocator as free.
2292 }
2293 
2294 template<typename T>
2295 void VmaRawList<T>::Clear()
2296 {
2297  if(IsEmpty() == false)
2298  {
2299  ItemType* pItem = m_pBack;
2300  while(pItem != VMA_NULL)
2301  {
2302  ItemType* const pPrevItem = pItem->pPrev;
2303  m_ItemAllocator.Free(pItem);
2304  pItem = pPrevItem;
2305  }
2306  m_pFront = VMA_NULL;
2307  m_pBack = VMA_NULL;
2308  m_Count = 0;
2309  }
2310 }
2311 
2312 template<typename T>
2313 VmaListItem<T>* VmaRawList<T>::PushBack()
2314 {
2315  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2316  pNewItem->pNext = VMA_NULL;
2317  if(IsEmpty())
2318  {
2319  pNewItem->pPrev = VMA_NULL;
2320  m_pFront = pNewItem;
2321  m_pBack = pNewItem;
2322  m_Count = 1;
2323  }
2324  else
2325  {
2326  pNewItem->pPrev = m_pBack;
2327  m_pBack->pNext = pNewItem;
2328  m_pBack = pNewItem;
2329  ++m_Count;
2330  }
2331  return pNewItem;
2332 }
2333 
2334 template<typename T>
2335 VmaListItem<T>* VmaRawList<T>::PushFront()
2336 {
2337  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2338  pNewItem->pPrev = VMA_NULL;
2339  if(IsEmpty())
2340  {
2341  pNewItem->pNext = VMA_NULL;
2342  m_pFront = pNewItem;
2343  m_pBack = pNewItem;
2344  m_Count = 1;
2345  }
2346  else
2347  {
2348  pNewItem->pNext = m_pFront;
2349  m_pFront->pPrev = pNewItem;
2350  m_pFront = pNewItem;
2351  ++m_Count;
2352  }
2353  return pNewItem;
2354 }
2355 
2356 template<typename T>
2357 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
2358 {
2359  ItemType* const pNewItem = PushBack();
2360  pNewItem->Value = value;
2361  return pNewItem;
2362 }
2363 
2364 template<typename T>
2365 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
2366 {
2367  ItemType* const pNewItem = PushFront();
2368  pNewItem->Value = value;
2369  return pNewItem;
2370 }
2371 
2372 template<typename T>
2373 void VmaRawList<T>::PopBack()
2374 {
2375  VMA_HEAVY_ASSERT(m_Count > 0);
2376  ItemType* const pBackItem = m_pBack;
2377  ItemType* const pPrevItem = pBackItem->pPrev;
2378  if(pPrevItem != VMA_NULL)
2379  {
2380  pPrevItem->pNext = VMA_NULL;
2381  }
2382  m_pBack = pPrevItem;
2383  m_ItemAllocator.Free(pBackItem);
2384  --m_Count;
2385 }
2386 
2387 template<typename T>
2388 void VmaRawList<T>::PopFront()
2389 {
2390  VMA_HEAVY_ASSERT(m_Count > 0);
2391  ItemType* const pFrontItem = m_pFront;
2392  ItemType* const pNextItem = pFrontItem->pNext;
2393  if(pNextItem != VMA_NULL)
2394  {
2395  pNextItem->pPrev = VMA_NULL;
2396  }
2397  m_pFront = pNextItem;
2398  m_ItemAllocator.Free(pFrontItem);
2399  --m_Count;
2400 }
2401 
2402 template<typename T>
2403 void VmaRawList<T>::Remove(ItemType* pItem)
2404 {
2405  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
2406  VMA_HEAVY_ASSERT(m_Count > 0);
2407 
2408  if(pItem->pPrev != VMA_NULL)
2409  {
2410  pItem->pPrev->pNext = pItem->pNext;
2411  }
2412  else
2413  {
2414  VMA_HEAVY_ASSERT(m_pFront == pItem);
2415  m_pFront = pItem->pNext;
2416  }
2417 
2418  if(pItem->pNext != VMA_NULL)
2419  {
2420  pItem->pNext->pPrev = pItem->pPrev;
2421  }
2422  else
2423  {
2424  VMA_HEAVY_ASSERT(m_pBack == pItem);
2425  m_pBack = pItem->pPrev;
2426  }
2427 
2428  m_ItemAllocator.Free(pItem);
2429  --m_Count;
2430 }
2431 
2432 template<typename T>
2433 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
2434 {
2435  if(pItem != VMA_NULL)
2436  {
2437  ItemType* const prevItem = pItem->pPrev;
2438  ItemType* const newItem = m_ItemAllocator.Alloc();
2439  newItem->pPrev = prevItem;
2440  newItem->pNext = pItem;
2441  pItem->pPrev = newItem;
2442  if(prevItem != VMA_NULL)
2443  {
2444  prevItem->pNext = newItem;
2445  }
2446  else
2447  {
2448  VMA_HEAVY_ASSERT(m_pFront == pItem);
2449  m_pFront = newItem;
2450  }
2451  ++m_Count;
2452  return newItem;
2453  }
2454  else
2455  return PushBack();
2456 }
2457 
2458 template<typename T>
2459 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
2460 {
2461  if(pItem != VMA_NULL)
2462  {
2463  ItemType* const nextItem = pItem->pNext;
2464  ItemType* const newItem = m_ItemAllocator.Alloc();
2465  newItem->pNext = nextItem;
2466  newItem->pPrev = pItem;
2467  pItem->pNext = newItem;
2468  if(nextItem != VMA_NULL)
2469  {
2470  nextItem->pPrev = newItem;
2471  }
2472  else
2473  {
2474  VMA_HEAVY_ASSERT(m_pBack == pItem);
2475  m_pBack = newItem;
2476  }
2477  ++m_Count;
2478  return newItem;
2479  }
2480  else
2481  return PushFront();
2482 }
2483 
2484 template<typename T>
2485 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
2486 {
2487  ItemType* const newItem = InsertBefore(pItem);
2488  newItem->Value = value;
2489  return newItem;
2490 }
2491 
2492 template<typename T>
2493 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
2494 {
2495  ItemType* const newItem = InsertAfter(pItem);
2496  newItem->Value = value;
2497  return newItem;
2498 }
2499 
2500 template<typename T, typename AllocatorT>
2501 class VmaList
2502 {
2503 public:
2504  class iterator
2505  {
2506  public:
2507  iterator() :
2508  m_pList(VMA_NULL),
2509  m_pItem(VMA_NULL)
2510  {
2511  }
2512 
2513  T& operator*() const
2514  {
2515  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2516  return m_pItem->Value;
2517  }
2518  T* operator->() const
2519  {
2520  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2521  return &m_pItem->Value;
2522  }
2523 
2524  iterator& operator++()
2525  {
2526  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2527  m_pItem = m_pItem->pNext;
2528  return *this;
2529  }
2530  iterator& operator--()
2531  {
2532  if(m_pItem != VMA_NULL)
2533  {
2534  m_pItem = m_pItem->pPrev;
2535  }
2536  else
2537  {
2538  VMA_HEAVY_ASSERT(!m_pList.IsEmpty());
2539  m_pItem = m_pList->Back();
2540  }
2541  return *this;
2542  }
2543 
2544  iterator operator++(int)
2545  {
2546  iterator result = *this;
2547  ++*this;
2548  return result;
2549  }
2550  iterator operator--(int)
2551  {
2552  iterator result = *this;
2553  --*this;
2554  return result;
2555  }
2556 
2557  bool operator==(const iterator& rhs) const
2558  {
2559  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2560  return m_pItem == rhs.m_pItem;
2561  }
2562  bool operator!=(const iterator& rhs) const
2563  {
2564  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2565  return m_pItem != rhs.m_pItem;
2566  }
2567 
2568  private:
2569  VmaRawList<T>* m_pList;
2570  VmaListItem<T>* m_pItem;
2571 
2572  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
2573  m_pList(pList),
2574  m_pItem(pItem)
2575  {
2576  }
2577 
2578  friend class VmaList<T, AllocatorT>;
2579  };
2580 
2581  class const_iterator
2582  {
2583  public:
2584  const_iterator() :
2585  m_pList(VMA_NULL),
2586  m_pItem(VMA_NULL)
2587  {
2588  }
2589 
2590  const_iterator(const iterator& src) :
2591  m_pList(src.m_pList),
2592  m_pItem(src.m_pItem)
2593  {
2594  }
2595 
2596  const T& operator*() const
2597  {
2598  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2599  return m_pItem->Value;
2600  }
2601  const T* operator->() const
2602  {
2603  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2604  return &m_pItem->Value;
2605  }
2606 
2607  const_iterator& operator++()
2608  {
2609  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2610  m_pItem = m_pItem->pNext;
2611  return *this;
2612  }
2613  const_iterator& operator--()
2614  {
2615  if(m_pItem != VMA_NULL)
2616  {
2617  m_pItem = m_pItem->pPrev;
2618  }
2619  else
2620  {
2621  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
2622  m_pItem = m_pList->Back();
2623  }
2624  return *this;
2625  }
2626 
2627  const_iterator operator++(int)
2628  {
2629  const_iterator result = *this;
2630  ++*this;
2631  return result;
2632  }
2633  const_iterator operator--(int)
2634  {
2635  const_iterator result = *this;
2636  --*this;
2637  return result;
2638  }
2639 
2640  bool operator==(const const_iterator& rhs) const
2641  {
2642  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2643  return m_pItem == rhs.m_pItem;
2644  }
2645  bool operator!=(const const_iterator& rhs) const
2646  {
2647  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2648  return m_pItem != rhs.m_pItem;
2649  }
2650 
2651  private:
2652  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
2653  m_pList(pList),
2654  m_pItem(pItem)
2655  {
2656  }
2657 
2658  const VmaRawList<T>* m_pList;
2659  const VmaListItem<T>* m_pItem;
2660 
2661  friend class VmaList<T, AllocatorT>;
2662  };
2663 
2664  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
2665 
2666  bool empty() const { return m_RawList.IsEmpty(); }
2667  size_t size() const { return m_RawList.GetCount(); }
2668 
2669  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
2670  iterator end() { return iterator(&m_RawList, VMA_NULL); }
2671 
2672  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
2673  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
2674 
2675  void clear() { m_RawList.Clear(); }
2676  void push_back(const T& value) { m_RawList.PushBack(value); }
2677  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
2678  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
2679 
2680 private:
2681  VmaRawList<T> m_RawList;
2682 };
2683 
2684 #endif // #if VMA_USE_STL_LIST
2685 
2687 // class VmaMap
2688 
2689 // Unused in this version.
2690 #if 0
2691 
2692 #if VMA_USE_STL_UNORDERED_MAP
2693 
2694 #define VmaPair std::pair
2695 
2696 #define VMA_MAP_TYPE(KeyT, ValueT) \
2697  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
2698 
2699 #else // #if VMA_USE_STL_UNORDERED_MAP
2700 
2701 template<typename T1, typename T2>
2702 struct VmaPair
2703 {
2704  T1 first;
2705  T2 second;
2706 
2707  VmaPair() : first(), second() { }
2708  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
2709 };
2710 
2711 /* Class compatible with subset of interface of std::unordered_map.
2712 KeyT, ValueT must be POD because they will be stored in VmaVector.
2713 */
2714 template<typename KeyT, typename ValueT>
2715 class VmaMap
2716 {
2717 public:
2718  typedef VmaPair<KeyT, ValueT> PairType;
2719  typedef PairType* iterator;
2720 
2721  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
2722 
2723  iterator begin() { return m_Vector.begin(); }
2724  iterator end() { return m_Vector.end(); }
2725 
2726  void insert(const PairType& pair);
2727  iterator find(const KeyT& key);
2728  void erase(iterator it);
2729 
2730 private:
2731  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
2732 };
2733 
2734 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
2735 
2736 template<typename FirstT, typename SecondT>
2737 struct VmaPairFirstLess
2738 {
2739  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
2740  {
2741  return lhs.first < rhs.first;
2742  }
2743  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
2744  {
2745  return lhs.first < rhsFirst;
2746  }
2747 };
2748 
2749 template<typename KeyT, typename ValueT>
2750 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
2751 {
2752  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
2753  m_Vector.data(),
2754  m_Vector.data() + m_Vector.size(),
2755  pair,
2756  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
2757  VmaVectorInsert(m_Vector, indexToInsert, pair);
2758 }
2759 
2760 template<typename KeyT, typename ValueT>
2761 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
2762 {
2763  PairType* it = VmaBinaryFindFirstNotLess(
2764  m_Vector.data(),
2765  m_Vector.data() + m_Vector.size(),
2766  key,
2767  VmaPairFirstLess<KeyT, ValueT>());
2768  if((it != m_Vector.end()) && (it->first == key))
2769  {
2770  return it;
2771  }
2772  else
2773  {
2774  return m_Vector.end();
2775  }
2776 }
2777 
2778 template<typename KeyT, typename ValueT>
2779 void VmaMap<KeyT, ValueT>::erase(iterator it)
2780 {
2781  VmaVectorRemove(m_Vector, it - m_Vector.begin());
2782 }
2783 
2784 #endif // #if VMA_USE_STL_UNORDERED_MAP
2785 
2786 #endif // #if 0
2787 
2789 
2790 class VmaDeviceMemoryBlock;
2791 
2792 enum VMA_BLOCK_VECTOR_TYPE
2793 {
2794  VMA_BLOCK_VECTOR_TYPE_UNMAPPED,
2795  VMA_BLOCK_VECTOR_TYPE_MAPPED,
2796  VMA_BLOCK_VECTOR_TYPE_COUNT
2797 };
2798 
2799 static VMA_BLOCK_VECTOR_TYPE VmaAllocationCreateFlagsToBlockVectorType(VmaAllocationCreateFlags flags)
2800 {
2801  return (flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0 ?
2802  VMA_BLOCK_VECTOR_TYPE_MAPPED :
2803  VMA_BLOCK_VECTOR_TYPE_UNMAPPED;
2804 }
2805 
2806 struct VmaAllocation_T
2807 {
2808 public:
2809  enum ALLOCATION_TYPE
2810  {
2811  ALLOCATION_TYPE_NONE,
2812  ALLOCATION_TYPE_BLOCK,
2813  ALLOCATION_TYPE_OWN,
2814  };
2815 
2816  VmaAllocation_T(uint32_t currentFrameIndex) :
2817  m_Alignment(1),
2818  m_Size(0),
2819  m_pUserData(VMA_NULL),
2820  m_Type(ALLOCATION_TYPE_NONE),
2821  m_SuballocationType(VMA_SUBALLOCATION_TYPE_UNKNOWN),
2822  m_LastUseFrameIndex(currentFrameIndex)
2823  {
2824  }
2825 
2826  void InitBlockAllocation(
2827  VmaPool hPool,
2828  VmaDeviceMemoryBlock* block,
2829  VkDeviceSize offset,
2830  VkDeviceSize alignment,
2831  VkDeviceSize size,
2832  VmaSuballocationType suballocationType,
2833  void* pUserData,
2834  bool canBecomeLost)
2835  {
2836  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
2837  VMA_ASSERT(block != VMA_NULL);
2838  m_Type = ALLOCATION_TYPE_BLOCK;
2839  m_Alignment = alignment;
2840  m_Size = size;
2841  m_pUserData = pUserData;
2842  m_SuballocationType = suballocationType;
2843  m_BlockAllocation.m_hPool = hPool;
2844  m_BlockAllocation.m_Block = block;
2845  m_BlockAllocation.m_Offset = offset;
2846  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
2847  }
2848 
2849  void InitLost()
2850  {
2851  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
2852  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
2853  m_Type = ALLOCATION_TYPE_BLOCK;
2854  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
2855  m_BlockAllocation.m_Block = VMA_NULL;
2856  m_BlockAllocation.m_Offset = 0;
2857  m_BlockAllocation.m_CanBecomeLost = true;
2858  }
2859 
2860  void ChangeBlockAllocation(
2861  VmaDeviceMemoryBlock* block,
2862  VkDeviceSize offset)
2863  {
2864  VMA_ASSERT(block != VMA_NULL);
2865  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
2866  m_BlockAllocation.m_Block = block;
2867  m_BlockAllocation.m_Offset = offset;
2868  }
2869 
2870  void InitOwnAllocation(
2871  uint32_t memoryTypeIndex,
2872  VkDeviceMemory hMemory,
2873  VmaSuballocationType suballocationType,
2874  bool persistentMap,
2875  void* pMappedData,
2876  VkDeviceSize size,
2877  void* pUserData)
2878  {
2879  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
2880  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
2881  m_Type = ALLOCATION_TYPE_OWN;
2882  m_Alignment = 0;
2883  m_Size = size;
2884  m_pUserData = pUserData;
2885  m_SuballocationType = suballocationType;
2886  m_OwnAllocation.m_MemoryTypeIndex = memoryTypeIndex;
2887  m_OwnAllocation.m_hMemory = hMemory;
2888  m_OwnAllocation.m_PersistentMap = persistentMap;
2889  m_OwnAllocation.m_pMappedData = pMappedData;
2890  }
2891 
2892  ALLOCATION_TYPE GetType() const { return m_Type; }
2893  VkDeviceSize GetAlignment() const { return m_Alignment; }
2894  VkDeviceSize GetSize() const { return m_Size; }
2895  void* GetUserData() const { return m_pUserData; }
2896  void SetUserData(void* pUserData) { m_pUserData = pUserData; }
2897  VmaSuballocationType GetSuballocationType() const { return m_SuballocationType; }
2898 
2899  VmaDeviceMemoryBlock* GetBlock() const
2900  {
2901  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
2902  return m_BlockAllocation.m_Block;
2903  }
2904  VkDeviceSize GetOffset() const;
2905  VkDeviceMemory GetMemory() const;
2906  uint32_t GetMemoryTypeIndex() const;
2907  VMA_BLOCK_VECTOR_TYPE GetBlockVectorType() const;
2908  void* GetMappedData() const;
2909  bool CanBecomeLost() const;
2910  VmaPool GetPool() const;
2911 
2912  VkResult OwnAllocMapPersistentlyMappedMemory(VmaAllocator hAllocator);
2913  void OwnAllocUnmapPersistentlyMappedMemory(VmaAllocator hAllocator);
2914 
2915  uint32_t GetLastUseFrameIndex() const
2916  {
2917  return m_LastUseFrameIndex.load();
2918  }
2919  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
2920  {
2921  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
2922  }
2923  /*
2924  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
2925  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
2926  - Else, returns false.
2927 
2928  If hAllocation is already lost, assert - you should not call it then.
2929  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
2930  */
2931  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
2932 
2933  void OwnAllocCalcStatsInfo(VmaStatInfo& outInfo)
2934  {
2935  VMA_ASSERT(m_Type == ALLOCATION_TYPE_OWN);
2936  outInfo.blockCount = 1;
2937  outInfo.allocationCount = 1;
2938  outInfo.unusedRangeCount = 0;
2939  outInfo.usedBytes = m_Size;
2940  outInfo.unusedBytes = 0;
2941  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
2942  outInfo.unusedRangeSizeMin = UINT64_MAX;
2943  outInfo.unusedRangeSizeMax = 0;
2944  }
2945 
2946 private:
2947  VkDeviceSize m_Alignment;
2948  VkDeviceSize m_Size;
2949  void* m_pUserData;
2950  ALLOCATION_TYPE m_Type;
2951  VmaSuballocationType m_SuballocationType;
2952  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
2953 
2954  // Allocation out of VmaDeviceMemoryBlock.
2955  struct BlockAllocation
2956  {
2957  VmaPool m_hPool; // Null if belongs to general memory.
2958  VmaDeviceMemoryBlock* m_Block;
2959  VkDeviceSize m_Offset;
2960  bool m_CanBecomeLost;
2961  };
2962 
2963  // Allocation for an object that has its own private VkDeviceMemory.
2964  struct OwnAllocation
2965  {
2966  uint32_t m_MemoryTypeIndex;
2967  VkDeviceMemory m_hMemory;
2968  bool m_PersistentMap;
2969  void* m_pMappedData;
2970  };
2971 
2972  union
2973  {
2974  // Allocation out of VmaDeviceMemoryBlock.
2975  BlockAllocation m_BlockAllocation;
2976  // Allocation for an object that has its own private VkDeviceMemory.
2977  OwnAllocation m_OwnAllocation;
2978  };
2979 };
2980 
2981 /*
2982 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
2983 allocated memory block or free.
2984 */
2985 struct VmaSuballocation
2986 {
2987  VkDeviceSize offset;
2988  VkDeviceSize size;
2989  VmaAllocation hAllocation;
2990  VmaSuballocationType type;
2991 };
2992 
2993 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
2994 
2995 // Cost of one additional allocation lost, as equivalent in bytes.
2996 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
2997 
2998 /*
2999 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
3000 
3001 If canMakeOtherLost was false:
3002 - item points to a FREE suballocation.
3003 - itemsToMakeLostCount is 0.
3004 
3005 If canMakeOtherLost was true:
3006 - item points to first of sequence of suballocations, which are either FREE,
3007  or point to VmaAllocations that can become lost.
3008 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
3009  the requested allocation to succeed.
3010 */
3011 struct VmaAllocationRequest
3012 {
3013  VkDeviceSize offset;
3014  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
3015  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
3016  VmaSuballocationList::iterator item;
3017  size_t itemsToMakeLostCount;
3018 
3019  VkDeviceSize CalcCost() const
3020  {
3021  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
3022  }
3023 };
3024 
3025 /*
3026 Represents a single block of device memory (VkDeviceMemory ) with all the
3027 data about its regions (aka suballocations, VmaAllocation), assigned and free.
3028 
3029 Thread-safety: This class must be externally synchronized.
3030 */
3031 class VmaDeviceMemoryBlock
3032 {
3033 public:
3034  uint32_t m_MemoryTypeIndex;
3035  VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
3036  VkDeviceMemory m_hMemory;
3037  VkDeviceSize m_Size;
3038  bool m_PersistentMap;
3039  void* m_pMappedData;
3040  uint32_t m_FreeCount;
3041  VkDeviceSize m_SumFreeSize;
3042  VmaSuballocationList m_Suballocations;
3043  // Suballocations that are free and have size greater than certain threshold.
3044  // Sorted by size, ascending.
3045  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
3046 
3047  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
3048 
3049  ~VmaDeviceMemoryBlock()
3050  {
3051  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
3052  }
3053 
3054  // Always call after construction.
3055  void Init(
3056  uint32_t newMemoryTypeIndex,
3057  VMA_BLOCK_VECTOR_TYPE newBlockVectorType,
3058  VkDeviceMemory newMemory,
3059  VkDeviceSize newSize,
3060  bool persistentMap,
3061  void* pMappedData);
3062  // Always call before destruction.
3063  void Destroy(VmaAllocator allocator);
3064 
3065  // Validates all data structures inside this object. If not valid, returns false.
3066  bool Validate() const;
3067 
3068  VkDeviceSize GetUnusedRangeSizeMax() const;
3069 
3070  // Tries to find a place for suballocation with given parameters inside this allocation.
3071  // If succeeded, fills pAllocationRequest and returns true.
3072  // If failed, returns false.
3073  bool CreateAllocationRequest(
3074  uint32_t currentFrameIndex,
3075  uint32_t frameInUseCount,
3076  VkDeviceSize bufferImageGranularity,
3077  VkDeviceSize allocSize,
3078  VkDeviceSize allocAlignment,
3079  VmaSuballocationType allocType,
3080  bool canMakeOtherLost,
3081  VmaAllocationRequest* pAllocationRequest);
3082 
3083  bool MakeRequestedAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount, VmaAllocationRequest* pAllocationRequest);
3084 
3085  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3086 
3087  // Returns true if this allocation is empty - contains only single free suballocation.
3088  bool IsEmpty() const;
3089 
3090  // Makes actual allocation based on request. Request must already be checked
3091  // and valid.
3092  void Alloc(
3093  const VmaAllocationRequest& request,
3094  VmaSuballocationType type,
3095  VkDeviceSize allocSize,
3096  VmaAllocation hAllocation);
3097 
3098  // Frees suballocation assigned to given memory region.
3099  void Free(const VmaAllocation allocation);
3100 
3101 #if VMA_STATS_STRING_ENABLED
3102  void PrintDetailedMap(class VmaJsonWriter& json) const;
3103 #endif
3104 
3105 private:
3106  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
3107  // If yes, fills pOffset and returns true. If no, returns false.
3108  bool CheckAllocation(
3109  uint32_t currentFrameIndex,
3110  uint32_t frameInUseCount,
3111  VkDeviceSize bufferImageGranularity,
3112  VkDeviceSize allocSize,
3113  VkDeviceSize allocAlignment,
3114  VmaSuballocationType allocType,
3115  VmaSuballocationList::const_iterator suballocItem,
3116  bool canMakeOtherLost,
3117  VkDeviceSize* pOffset,
3118  size_t* itemsToMakeLostCount,
3119  VkDeviceSize* pSumFreeSize,
3120  VkDeviceSize* pSumItemSize) const;
3121 
3122  // Given free suballocation, it merges it with following one, which must also be free.
3123  void MergeFreeWithNext(VmaSuballocationList::iterator item);
3124  // Releases given suballocation, making it free.
3125  // Merges it with adjacent free suballocations if applicable.
3126  // Returns iterator to new free suballocation at this place.
3127  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
3128  // Given free suballocation, it inserts it into sorted list of
3129  // m_FreeSuballocationsBySize if it's suitable.
3130  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
3131  // Given free suballocation, it removes it from sorted list of
3132  // m_FreeSuballocationsBySize if it's suitable.
3133  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
3134 
3135  bool ValidateFreeSuballocationList() const;
3136 };
3137 
3138 struct VmaPointerLess
3139 {
3140  bool operator()(const void* lhs, const void* rhs) const
3141  {
3142  return lhs < rhs;
3143  }
3144 };
3145 
3146 class VmaDefragmentator;
3147 
3148 /*
3149 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
3150 Vulkan memory type.
3151 
3152 Synchronized internally with a mutex.
3153 */
3154 struct VmaBlockVector
3155 {
3156  VmaBlockVector(
3157  VmaAllocator hAllocator,
3158  uint32_t memoryTypeIndex,
3159  VMA_BLOCK_VECTOR_TYPE blockVectorType,
3160  VkDeviceSize preferredBlockSize,
3161  size_t minBlockCount,
3162  size_t maxBlockCount,
3163  VkDeviceSize bufferImageGranularity,
3164  uint32_t frameInUseCount,
3165  bool isCustomPool);
3166  ~VmaBlockVector();
3167 
3168  VkResult CreateMinBlocks();
3169 
3170  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
3171  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
3172  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
3173  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
3174  VMA_BLOCK_VECTOR_TYPE GetBlockVectorType() const { return m_BlockVectorType; }
3175 
3176  void GetPoolStats(VmaPoolStats* pStats);
3177 
3178  bool IsEmpty() const { return m_Blocks.empty(); }
3179 
3180  VkResult Allocate(
3181  VmaPool hCurrentPool,
3182  uint32_t currentFrameIndex,
3183  const VkMemoryRequirements& vkMemReq,
3184  const VmaAllocationCreateInfo& createInfo,
3185  VmaSuballocationType suballocType,
3186  VmaAllocation* pAllocation);
3187 
3188  void Free(
3189  VmaAllocation hAllocation);
3190 
3191  // Adds statistics of this BlockVector to pStats.
3192  void AddStats(VmaStats* pStats);
3193 
3194 #if VMA_STATS_STRING_ENABLED
3195  void PrintDetailedMap(class VmaJsonWriter& json);
3196 #endif
3197 
3198  void UnmapPersistentlyMappedMemory();
3199  VkResult MapPersistentlyMappedMemory();
3200 
3201  void MakePoolAllocationsLost(
3202  uint32_t currentFrameIndex,
3203  size_t* pLostAllocationCount);
3204 
3205  VmaDefragmentator* EnsureDefragmentator(
3206  VmaAllocator hAllocator,
3207  uint32_t currentFrameIndex);
3208 
3209  VkResult Defragment(
3210  VmaDefragmentationStats* pDefragmentationStats,
3211  VkDeviceSize& maxBytesToMove,
3212  uint32_t& maxAllocationsToMove);
3213 
3214  void DestroyDefragmentator();
3215 
3216 private:
3217  friend class VmaDefragmentator;
3218 
3219  const VmaAllocator m_hAllocator;
3220  const uint32_t m_MemoryTypeIndex;
3221  const VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
3222  const VkDeviceSize m_PreferredBlockSize;
3223  const size_t m_MinBlockCount;
3224  const size_t m_MaxBlockCount;
3225  const VkDeviceSize m_BufferImageGranularity;
3226  const uint32_t m_FrameInUseCount;
3227  const bool m_IsCustomPool;
3228  VMA_MUTEX m_Mutex;
3229  // Incrementally sorted by sumFreeSize, ascending.
3230  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
3231  /* There can be at most one allocation that is completely empty - a
3232  hysteresis to avoid pessimistic case of alternating creation and destruction
3233  of a VkDeviceMemory. */
3234  bool m_HasEmptyBlock;
3235  VmaDefragmentator* m_pDefragmentator;
3236 
3237  // Finds and removes given block from vector.
3238  void Remove(VmaDeviceMemoryBlock* pBlock);
3239 
3240  // Performs single step in sorting m_Blocks. They may not be fully sorted
3241  // after this call.
3242  void IncrementallySortBlocks();
3243 
3244  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
3245 };
3246 
3247 struct VmaPool_T
3248 {
3249 public:
3250  VmaBlockVector m_BlockVector;
3251 
3252  // Takes ownership.
3253  VmaPool_T(
3254  VmaAllocator hAllocator,
3255  const VmaPoolCreateInfo& createInfo);
3256  ~VmaPool_T();
3257 
3258  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
3259 
3260 #if VMA_STATS_STRING_ENABLED
3261  //void PrintDetailedMap(class VmaStringBuilder& sb);
3262 #endif
3263 };
3264 
3265 class VmaDefragmentator
3266 {
3267  const VmaAllocator m_hAllocator;
3268  VmaBlockVector* const m_pBlockVector;
3269  uint32_t m_CurrentFrameIndex;
3270  VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
3271  VkDeviceSize m_BytesMoved;
3272  uint32_t m_AllocationsMoved;
3273 
3274  struct AllocationInfo
3275  {
3276  VmaAllocation m_hAllocation;
3277  VkBool32* m_pChanged;
3278 
3279  AllocationInfo() :
3280  m_hAllocation(VK_NULL_HANDLE),
3281  m_pChanged(VMA_NULL)
3282  {
3283  }
3284  };
3285 
3286  struct AllocationInfoSizeGreater
3287  {
3288  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
3289  {
3290  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
3291  }
3292  };
3293 
3294  // Used between AddAllocation and Defragment.
3295  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3296 
3297  struct BlockInfo
3298  {
3299  VmaDeviceMemoryBlock* m_pBlock;
3300  bool m_HasNonMovableAllocations;
3301  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3302 
3303  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
3304  m_pBlock(VMA_NULL),
3305  m_HasNonMovableAllocations(true),
3306  m_Allocations(pAllocationCallbacks),
3307  m_pMappedDataForDefragmentation(VMA_NULL)
3308  {
3309  }
3310 
3311  void CalcHasNonMovableAllocations()
3312  {
3313  const size_t blockAllocCount =
3314  m_pBlock->m_Suballocations.size() - m_pBlock->m_FreeCount;
3315  const size_t defragmentAllocCount = m_Allocations.size();
3316  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
3317  }
3318 
3319  void SortAllocationsBySizeDescecnding()
3320  {
3321  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
3322  }
3323 
3324  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
3325  void Unmap(VmaAllocator hAllocator);
3326 
3327  private:
3328  // Not null if mapped for defragmentation only, not persistently mapped.
3329  void* m_pMappedDataForDefragmentation;
3330  };
3331 
3332  struct BlockPointerLess
3333  {
3334  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
3335  {
3336  return pLhsBlockInfo->m_pBlock < pRhsBlock;
3337  }
3338  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
3339  {
3340  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
3341  }
3342  };
3343 
3344  // 1. Blocks with some non-movable allocations go first.
3345  // 2. Blocks with smaller sumFreeSize go first.
3346  struct BlockInfoCompareMoveDestination
3347  {
3348  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
3349  {
3350  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
3351  {
3352  return true;
3353  }
3354  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
3355  {
3356  return false;
3357  }
3358  if(pLhsBlockInfo->m_pBlock->m_SumFreeSize < pRhsBlockInfo->m_pBlock->m_SumFreeSize)
3359  {
3360  return true;
3361  }
3362  return false;
3363  }
3364  };
3365 
3366  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
3367  BlockInfoVector m_Blocks;
3368 
3369  VkResult DefragmentRound(
3370  VkDeviceSize maxBytesToMove,
3371  uint32_t maxAllocationsToMove);
3372 
3373  static bool MoveMakesSense(
3374  size_t dstBlockIndex, VkDeviceSize dstOffset,
3375  size_t srcBlockIndex, VkDeviceSize srcOffset);
3376 
3377 public:
3378  VmaDefragmentator(
3379  VmaAllocator hAllocator,
3380  VmaBlockVector* pBlockVector,
3381  uint32_t currentFrameIndex);
3382 
3383  ~VmaDefragmentator();
3384 
3385  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
3386  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
3387 
3388  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
3389 
3390  VkResult Defragment(
3391  VkDeviceSize maxBytesToMove,
3392  uint32_t maxAllocationsToMove);
3393 };
3394 
3395 // Main allocator object.
3396 struct VmaAllocator_T
3397 {
3398  bool m_UseMutex;
3399  VkDevice m_hDevice;
3400  bool m_AllocationCallbacksSpecified;
3401  VkAllocationCallbacks m_AllocationCallbacks;
3402  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
3403  // Non-zero when we are inside UnmapPersistentlyMappedMemory...MapPersistentlyMappedMemory.
3404  // Counter to allow nested calls to these functions.
3405  uint32_t m_UnmapPersistentlyMappedMemoryCounter;
3406 
3407  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
3408  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
3409  VMA_MUTEX m_HeapSizeLimitMutex;
3410 
3411  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
3412  VkPhysicalDeviceMemoryProperties m_MemProps;
3413 
3414  // Default pools.
3415  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES][VMA_BLOCK_VECTOR_TYPE_COUNT];
3416 
3417  // Each vector is sorted by memory (handle value).
3418  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
3419  AllocationVectorType* m_pOwnAllocations[VK_MAX_MEMORY_TYPES][VMA_BLOCK_VECTOR_TYPE_COUNT];
3420  VMA_MUTEX m_OwnAllocationsMutex[VK_MAX_MEMORY_TYPES];
3421 
3422  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
3423  ~VmaAllocator_T();
3424 
3425  const VkAllocationCallbacks* GetAllocationCallbacks() const
3426  {
3427  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
3428  }
3429  const VmaVulkanFunctions& GetVulkanFunctions() const
3430  {
3431  return m_VulkanFunctions;
3432  }
3433 
3434  VkDeviceSize GetBufferImageGranularity() const
3435  {
3436  return VMA_MAX(
3437  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
3438  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
3439  }
3440 
3441  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
3442  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
3443 
3444  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
3445  {
3446  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
3447  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
3448  }
3449 
3450  // Main allocation function.
3451  VkResult AllocateMemory(
3452  const VkMemoryRequirements& vkMemReq,
3453  const VmaAllocationCreateInfo& createInfo,
3454  VmaSuballocationType suballocType,
3455  VmaAllocation* pAllocation);
3456 
3457  // Main deallocation function.
3458  void FreeMemory(const VmaAllocation allocation);
3459 
3460  void CalculateStats(VmaStats* pStats);
3461 
3462 #if VMA_STATS_STRING_ENABLED
3463  void PrintDetailedMap(class VmaJsonWriter& json);
3464 #endif
3465 
3466  void UnmapPersistentlyMappedMemory();
3467  VkResult MapPersistentlyMappedMemory();
3468 
3469  VkResult Defragment(
3470  VmaAllocation* pAllocations,
3471  size_t allocationCount,
3472  VkBool32* pAllocationsChanged,
3473  const VmaDefragmentationInfo* pDefragmentationInfo,
3474  VmaDefragmentationStats* pDefragmentationStats);
3475 
3476  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
3477 
3478  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
3479  void DestroyPool(VmaPool pool);
3480  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
3481 
3482  void SetCurrentFrameIndex(uint32_t frameIndex);
3483 
3484  void MakePoolAllocationsLost(
3485  VmaPool hPool,
3486  size_t* pLostAllocationCount);
3487 
3488  void CreateLostAllocation(VmaAllocation* pAllocation);
3489 
3490  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
3491  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
3492 
3493 private:
3494  VkDeviceSize m_PreferredLargeHeapBlockSize;
3495  VkDeviceSize m_PreferredSmallHeapBlockSize;
3496 
3497  VkPhysicalDevice m_PhysicalDevice;
3498  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
3499 
3500  VMA_MUTEX m_PoolsMutex;
3501  // Protected by m_PoolsMutex. Sorted by pointer value.
3502  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
3503 
3504  VmaVulkanFunctions m_VulkanFunctions;
3505 
3506  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
3507 
3508  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
3509 
3510  VkResult AllocateMemoryOfType(
3511  const VkMemoryRequirements& vkMemReq,
3512  const VmaAllocationCreateInfo& createInfo,
3513  uint32_t memTypeIndex,
3514  VmaSuballocationType suballocType,
3515  VmaAllocation* pAllocation);
3516 
3517  // Allocates and registers new VkDeviceMemory specifically for single allocation.
3518  VkResult AllocateOwnMemory(
3519  VkDeviceSize size,
3520  VmaSuballocationType suballocType,
3521  uint32_t memTypeIndex,
3522  bool map,
3523  void* pUserData,
3524  VmaAllocation* pAllocation);
3525 
3526  // Tries to free pMemory as Own Memory. Returns true if found and freed.
3527  void FreeOwnMemory(VmaAllocation allocation);
3528 };
3529 
3531 // Memory allocation #2 after VmaAllocator_T definition
3532 
3533 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
3534 {
3535  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
3536 }
3537 
3538 static void VmaFree(VmaAllocator hAllocator, void* ptr)
3539 {
3540  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
3541 }
3542 
3543 template<typename T>
3544 static T* VmaAllocate(VmaAllocator hAllocator)
3545 {
3546  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
3547 }
3548 
3549 template<typename T>
3550 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
3551 {
3552  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
3553 }
3554 
3555 template<typename T>
3556 static void vma_delete(VmaAllocator hAllocator, T* ptr)
3557 {
3558  if(ptr != VMA_NULL)
3559  {
3560  ptr->~T();
3561  VmaFree(hAllocator, ptr);
3562  }
3563 }
3564 
3565 template<typename T>
3566 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
3567 {
3568  if(ptr != VMA_NULL)
3569  {
3570  for(size_t i = count; i--; )
3571  ptr[i].~T();
3572  VmaFree(hAllocator, ptr);
3573  }
3574 }
3575 
3577 // VmaStringBuilder
3578 
3579 #if VMA_STATS_STRING_ENABLED
3580 
3581 class VmaStringBuilder
3582 {
3583 public:
3584  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
3585  size_t GetLength() const { return m_Data.size(); }
3586  const char* GetData() const { return m_Data.data(); }
3587 
3588  void Add(char ch) { m_Data.push_back(ch); }
3589  void Add(const char* pStr);
3590  void AddNewLine() { Add('\n'); }
3591  void AddNumber(uint32_t num);
3592  void AddNumber(uint64_t num);
3593  void AddPointer(const void* ptr);
3594 
3595 private:
3596  VmaVector< char, VmaStlAllocator<char> > m_Data;
3597 };
3598 
3599 void VmaStringBuilder::Add(const char* pStr)
3600 {
3601  const size_t strLen = strlen(pStr);
3602  if(strLen > 0)
3603  {
3604  const size_t oldCount = m_Data.size();
3605  m_Data.resize(oldCount + strLen);
3606  memcpy(m_Data.data() + oldCount, pStr, strLen);
3607  }
3608 }
3609 
3610 void VmaStringBuilder::AddNumber(uint32_t num)
3611 {
3612  char buf[11];
3613  VmaUint32ToStr(buf, sizeof(buf), num);
3614  Add(buf);
3615 }
3616 
3617 void VmaStringBuilder::AddNumber(uint64_t num)
3618 {
3619  char buf[21];
3620  VmaUint64ToStr(buf, sizeof(buf), num);
3621  Add(buf);
3622 }
3623 
3624 void VmaStringBuilder::AddPointer(const void* ptr)
3625 {
3626  char buf[21];
3627  VmaPtrToStr(buf, sizeof(buf), ptr);
3628  Add(buf);
3629 }
3630 
3631 #endif // #if VMA_STATS_STRING_ENABLED
3632 
3634 // VmaJsonWriter
3635 
3636 #if VMA_STATS_STRING_ENABLED
3637 
3638 class VmaJsonWriter
3639 {
3640 public:
3641  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
3642  ~VmaJsonWriter();
3643 
3644  void BeginObject(bool singleLine = false);
3645  void EndObject();
3646 
3647  void BeginArray(bool singleLine = false);
3648  void EndArray();
3649 
3650  void WriteString(const char* pStr);
3651  void BeginString(const char* pStr = VMA_NULL);
3652  void ContinueString(const char* pStr);
3653  void ContinueString(uint32_t n);
3654  void ContinueString(uint64_t n);
3655  void EndString(const char* pStr = VMA_NULL);
3656 
3657  void WriteNumber(uint32_t n);
3658  void WriteNumber(uint64_t n);
3659  void WriteBool(bool b);
3660  void WriteNull();
3661 
3662 private:
3663  static const char* const INDENT;
3664 
3665  enum COLLECTION_TYPE
3666  {
3667  COLLECTION_TYPE_OBJECT,
3668  COLLECTION_TYPE_ARRAY,
3669  };
3670  struct StackItem
3671  {
3672  COLLECTION_TYPE type;
3673  uint32_t valueCount;
3674  bool singleLineMode;
3675  };
3676 
3677  VmaStringBuilder& m_SB;
3678  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
3679  bool m_InsideString;
3680 
3681  void BeginValue(bool isString);
3682  void WriteIndent(bool oneLess = false);
3683 };
3684 
3685 const char* const VmaJsonWriter::INDENT = " ";
3686 
3687 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
3688  m_SB(sb),
3689  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
3690  m_InsideString(false)
3691 {
3692 }
3693 
3694 VmaJsonWriter::~VmaJsonWriter()
3695 {
3696  VMA_ASSERT(!m_InsideString);
3697  VMA_ASSERT(m_Stack.empty());
3698 }
3699 
3700 void VmaJsonWriter::BeginObject(bool singleLine)
3701 {
3702  VMA_ASSERT(!m_InsideString);
3703 
3704  BeginValue(false);
3705  m_SB.Add('{');
3706 
3707  StackItem item;
3708  item.type = COLLECTION_TYPE_OBJECT;
3709  item.valueCount = 0;
3710  item.singleLineMode = singleLine;
3711  m_Stack.push_back(item);
3712 }
3713 
3714 void VmaJsonWriter::EndObject()
3715 {
3716  VMA_ASSERT(!m_InsideString);
3717 
3718  WriteIndent(true);
3719  m_SB.Add('}');
3720 
3721  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
3722  m_Stack.pop_back();
3723 }
3724 
3725 void VmaJsonWriter::BeginArray(bool singleLine)
3726 {
3727  VMA_ASSERT(!m_InsideString);
3728 
3729  BeginValue(false);
3730  m_SB.Add('[');
3731 
3732  StackItem item;
3733  item.type = COLLECTION_TYPE_ARRAY;
3734  item.valueCount = 0;
3735  item.singleLineMode = singleLine;
3736  m_Stack.push_back(item);
3737 }
3738 
3739 void VmaJsonWriter::EndArray()
3740 {
3741  VMA_ASSERT(!m_InsideString);
3742 
3743  WriteIndent(true);
3744  m_SB.Add(']');
3745 
3746  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
3747  m_Stack.pop_back();
3748 }
3749 
3750 void VmaJsonWriter::WriteString(const char* pStr)
3751 {
3752  BeginString(pStr);
3753  EndString();
3754 }
3755 
3756 void VmaJsonWriter::BeginString(const char* pStr)
3757 {
3758  VMA_ASSERT(!m_InsideString);
3759 
3760  BeginValue(true);
3761  m_SB.Add('"');
3762  m_InsideString = true;
3763  if(pStr != VMA_NULL && pStr[0] != '\0')
3764  {
3765  ContinueString(pStr);
3766  }
3767 }
3768 
3769 void VmaJsonWriter::ContinueString(const char* pStr)
3770 {
3771  VMA_ASSERT(m_InsideString);
3772 
3773  const size_t strLen = strlen(pStr);
3774  for(size_t i = 0; i < strLen; ++i)
3775  {
3776  char ch = pStr[i];
3777  if(ch == '\'')
3778  {
3779  m_SB.Add("\\\\");
3780  }
3781  else if(ch == '"')
3782  {
3783  m_SB.Add("\\\"");
3784  }
3785  else if(ch >= 32)
3786  {
3787  m_SB.Add(ch);
3788  }
3789  else switch(ch)
3790  {
3791  case '\n':
3792  m_SB.Add("\\n");
3793  break;
3794  case '\r':
3795  m_SB.Add("\\r");
3796  break;
3797  case '\t':
3798  m_SB.Add("\\t");
3799  break;
3800  default:
3801  VMA_ASSERT(0 && "Character not currently supported.");
3802  break;
3803  }
3804  }
3805 }
3806 
3807 void VmaJsonWriter::ContinueString(uint32_t n)
3808 {
3809  VMA_ASSERT(m_InsideString);
3810  m_SB.AddNumber(n);
3811 }
3812 
3813 void VmaJsonWriter::ContinueString(uint64_t n)
3814 {
3815  VMA_ASSERT(m_InsideString);
3816  m_SB.AddNumber(n);
3817 }
3818 
3819 void VmaJsonWriter::EndString(const char* pStr)
3820 {
3821  VMA_ASSERT(m_InsideString);
3822  if(pStr != VMA_NULL && pStr[0] != '\0')
3823  {
3824  ContinueString(pStr);
3825  }
3826  m_SB.Add('"');
3827  m_InsideString = false;
3828 }
3829 
3830 void VmaJsonWriter::WriteNumber(uint32_t n)
3831 {
3832  VMA_ASSERT(!m_InsideString);
3833  BeginValue(false);
3834  m_SB.AddNumber(n);
3835 }
3836 
3837 void VmaJsonWriter::WriteNumber(uint64_t n)
3838 {
3839  VMA_ASSERT(!m_InsideString);
3840  BeginValue(false);
3841  m_SB.AddNumber(n);
3842 }
3843 
3844 void VmaJsonWriter::WriteBool(bool b)
3845 {
3846  VMA_ASSERT(!m_InsideString);
3847  BeginValue(false);
3848  m_SB.Add(b ? "true" : "false");
3849 }
3850 
3851 void VmaJsonWriter::WriteNull()
3852 {
3853  VMA_ASSERT(!m_InsideString);
3854  BeginValue(false);
3855  m_SB.Add("null");
3856 }
3857 
3858 void VmaJsonWriter::BeginValue(bool isString)
3859 {
3860  if(!m_Stack.empty())
3861  {
3862  StackItem& currItem = m_Stack.back();
3863  if(currItem.type == COLLECTION_TYPE_OBJECT &&
3864  currItem.valueCount % 2 == 0)
3865  {
3866  VMA_ASSERT(isString);
3867  }
3868 
3869  if(currItem.type == COLLECTION_TYPE_OBJECT &&
3870  currItem.valueCount % 2 != 0)
3871  {
3872  m_SB.Add(": ");
3873  }
3874  else if(currItem.valueCount > 0)
3875  {
3876  m_SB.Add(", ");
3877  WriteIndent();
3878  }
3879  else
3880  {
3881  WriteIndent();
3882  }
3883  ++currItem.valueCount;
3884  }
3885 }
3886 
3887 void VmaJsonWriter::WriteIndent(bool oneLess)
3888 {
3889  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
3890  {
3891  m_SB.AddNewLine();
3892 
3893  size_t count = m_Stack.size();
3894  if(count > 0 && oneLess)
3895  {
3896  --count;
3897  }
3898  for(size_t i = 0; i < count; ++i)
3899  {
3900  m_SB.Add(INDENT);
3901  }
3902  }
3903 }
3904 
3905 #endif // #if VMA_STATS_STRING_ENABLED
3906 
3908 
3909 VkDeviceSize VmaAllocation_T::GetOffset() const
3910 {
3911  switch(m_Type)
3912  {
3913  case ALLOCATION_TYPE_BLOCK:
3914  return m_BlockAllocation.m_Offset;
3915  case ALLOCATION_TYPE_OWN:
3916  return 0;
3917  default:
3918  VMA_ASSERT(0);
3919  return 0;
3920  }
3921 }
3922 
3923 VkDeviceMemory VmaAllocation_T::GetMemory() const
3924 {
3925  switch(m_Type)
3926  {
3927  case ALLOCATION_TYPE_BLOCK:
3928  return m_BlockAllocation.m_Block->m_hMemory;
3929  case ALLOCATION_TYPE_OWN:
3930  return m_OwnAllocation.m_hMemory;
3931  default:
3932  VMA_ASSERT(0);
3933  return VK_NULL_HANDLE;
3934  }
3935 }
3936 
3937 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
3938 {
3939  switch(m_Type)
3940  {
3941  case ALLOCATION_TYPE_BLOCK:
3942  return m_BlockAllocation.m_Block->m_MemoryTypeIndex;
3943  case ALLOCATION_TYPE_OWN:
3944  return m_OwnAllocation.m_MemoryTypeIndex;
3945  default:
3946  VMA_ASSERT(0);
3947  return UINT32_MAX;
3948  }
3949 }
3950 
3951 VMA_BLOCK_VECTOR_TYPE VmaAllocation_T::GetBlockVectorType() const
3952 {
3953  switch(m_Type)
3954  {
3955  case ALLOCATION_TYPE_BLOCK:
3956  return m_BlockAllocation.m_Block->m_BlockVectorType;
3957  case ALLOCATION_TYPE_OWN:
3958  return (m_OwnAllocation.m_PersistentMap ? VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED);
3959  default:
3960  VMA_ASSERT(0);
3961  return VMA_BLOCK_VECTOR_TYPE_COUNT;
3962  }
3963 }
3964 
3965 void* VmaAllocation_T::GetMappedData() const
3966 {
3967  switch(m_Type)
3968  {
3969  case ALLOCATION_TYPE_BLOCK:
3970  if(m_BlockAllocation.m_Block->m_pMappedData != VMA_NULL)
3971  {
3972  return (char*)m_BlockAllocation.m_Block->m_pMappedData + m_BlockAllocation.m_Offset;
3973  }
3974  else
3975  {
3976  return VMA_NULL;
3977  }
3978  break;
3979  case ALLOCATION_TYPE_OWN:
3980  return m_OwnAllocation.m_pMappedData;
3981  default:
3982  VMA_ASSERT(0);
3983  return VMA_NULL;
3984  }
3985 }
3986 
3987 bool VmaAllocation_T::CanBecomeLost() const
3988 {
3989  switch(m_Type)
3990  {
3991  case ALLOCATION_TYPE_BLOCK:
3992  return m_BlockAllocation.m_CanBecomeLost;
3993  case ALLOCATION_TYPE_OWN:
3994  return false;
3995  default:
3996  VMA_ASSERT(0);
3997  return false;
3998  }
3999 }
4000 
4001 VmaPool VmaAllocation_T::GetPool() const
4002 {
4003  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4004  return m_BlockAllocation.m_hPool;
4005 }
4006 
4007 VkResult VmaAllocation_T::OwnAllocMapPersistentlyMappedMemory(VmaAllocator hAllocator)
4008 {
4009  VMA_ASSERT(m_Type == ALLOCATION_TYPE_OWN);
4010  if(m_OwnAllocation.m_PersistentMap)
4011  {
4012  return (*hAllocator->GetVulkanFunctions().vkMapMemory)(
4013  hAllocator->m_hDevice,
4014  m_OwnAllocation.m_hMemory,
4015  0,
4016  VK_WHOLE_SIZE,
4017  0,
4018  &m_OwnAllocation.m_pMappedData);
4019  }
4020  return VK_SUCCESS;
4021 }
4022 void VmaAllocation_T::OwnAllocUnmapPersistentlyMappedMemory(VmaAllocator hAllocator)
4023 {
4024  VMA_ASSERT(m_Type == ALLOCATION_TYPE_OWN);
4025  if(m_OwnAllocation.m_pMappedData)
4026  {
4027  VMA_ASSERT(m_OwnAllocation.m_PersistentMap);
4028  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_OwnAllocation.m_hMemory);
4029  m_OwnAllocation.m_pMappedData = VMA_NULL;
4030  }
4031 }
4032 
4033 
4034 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
4035 {
4036  VMA_ASSERT(CanBecomeLost());
4037 
4038  /*
4039  Warning: This is a carefully designed algorithm.
4040  Do not modify unless you really know what you're doing :)
4041  */
4042  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
4043  for(;;)
4044  {
4045  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
4046  {
4047  VMA_ASSERT(0);
4048  return false;
4049  }
4050  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
4051  {
4052  return false;
4053  }
4054  else // Last use time earlier than current time.
4055  {
4056  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
4057  {
4058  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
4059  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
4060  return true;
4061  }
4062  }
4063  }
4064 }
4065 
4066 #if VMA_STATS_STRING_ENABLED
4067 
4068 // Correspond to values of enum VmaSuballocationType.
4069 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
4070  "FREE",
4071  "UNKNOWN",
4072  "BUFFER",
4073  "IMAGE_UNKNOWN",
4074  "IMAGE_LINEAR",
4075  "IMAGE_OPTIMAL",
4076 };
4077 
4078 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
4079 {
4080  json.BeginObject();
4081 
4082  json.WriteString("Blocks");
4083  json.WriteNumber(stat.blockCount);
4084 
4085  json.WriteString("Allocations");
4086  json.WriteNumber(stat.allocationCount);
4087 
4088  json.WriteString("UnusedRanges");
4089  json.WriteNumber(stat.unusedRangeCount);
4090 
4091  json.WriteString("UsedBytes");
4092  json.WriteNumber(stat.usedBytes);
4093 
4094  json.WriteString("UnusedBytes");
4095  json.WriteNumber(stat.unusedBytes);
4096 
4097  if(stat.allocationCount > 1)
4098  {
4099  json.WriteString("AllocationSize");
4100  json.BeginObject(true);
4101  json.WriteString("Min");
4102  json.WriteNumber(stat.allocationSizeMin);
4103  json.WriteString("Avg");
4104  json.WriteNumber(stat.allocationSizeAvg);
4105  json.WriteString("Max");
4106  json.WriteNumber(stat.allocationSizeMax);
4107  json.EndObject();
4108  }
4109 
4110  if(stat.unusedRangeCount > 1)
4111  {
4112  json.WriteString("UnusedRangeSize");
4113  json.BeginObject(true);
4114  json.WriteString("Min");
4115  json.WriteNumber(stat.unusedRangeSizeMin);
4116  json.WriteString("Avg");
4117  json.WriteNumber(stat.unusedRangeSizeAvg);
4118  json.WriteString("Max");
4119  json.WriteNumber(stat.unusedRangeSizeMax);
4120  json.EndObject();
4121  }
4122 
4123  json.EndObject();
4124 }
4125 
4126 #endif // #if VMA_STATS_STRING_ENABLED
4127 
4128 struct VmaSuballocationItemSizeLess
4129 {
4130  bool operator()(
4131  const VmaSuballocationList::iterator lhs,
4132  const VmaSuballocationList::iterator rhs) const
4133  {
4134  return lhs->size < rhs->size;
4135  }
4136  bool operator()(
4137  const VmaSuballocationList::iterator lhs,
4138  VkDeviceSize rhsSize) const
4139  {
4140  return lhs->size < rhsSize;
4141  }
4142 };
4143 
4144 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
4145  m_MemoryTypeIndex(UINT32_MAX),
4146  m_BlockVectorType(VMA_BLOCK_VECTOR_TYPE_COUNT),
4147  m_hMemory(VK_NULL_HANDLE),
4148  m_Size(0),
4149  m_PersistentMap(false),
4150  m_pMappedData(VMA_NULL),
4151  m_FreeCount(0),
4152  m_SumFreeSize(0),
4153  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
4154  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
4155 {
4156 }
4157 
4158 void VmaDeviceMemoryBlock::Init(
4159  uint32_t newMemoryTypeIndex,
4160  VMA_BLOCK_VECTOR_TYPE newBlockVectorType,
4161  VkDeviceMemory newMemory,
4162  VkDeviceSize newSize,
4163  bool persistentMap,
4164  void* pMappedData)
4165 {
4166  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
4167 
4168  m_MemoryTypeIndex = newMemoryTypeIndex;
4169  m_BlockVectorType = newBlockVectorType;
4170  m_hMemory = newMemory;
4171  m_Size = newSize;
4172  m_PersistentMap = persistentMap;
4173  m_pMappedData = pMappedData;
4174  m_FreeCount = 1;
4175  m_SumFreeSize = newSize;
4176 
4177  m_Suballocations.clear();
4178  m_FreeSuballocationsBySize.clear();
4179 
4180  VmaSuballocation suballoc = {};
4181  suballoc.offset = 0;
4182  suballoc.size = newSize;
4183  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4184  suballoc.hAllocation = VK_NULL_HANDLE;
4185 
4186  m_Suballocations.push_back(suballoc);
4187  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
4188  --suballocItem;
4189  m_FreeSuballocationsBySize.push_back(suballocItem);
4190 }
4191 
4192 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
4193 {
4194  // This is the most important assert in the entire library.
4195  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
4196  VMA_ASSERT(IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
4197 
4198  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
4199  if(m_pMappedData != VMA_NULL)
4200  {
4201  (allocator->GetVulkanFunctions().vkUnmapMemory)(allocator->m_hDevice, m_hMemory);
4202  m_pMappedData = VMA_NULL;
4203  }
4204 
4205  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Size, m_hMemory);
4206  m_hMemory = VK_NULL_HANDLE;
4207 }
4208 
4209 bool VmaDeviceMemoryBlock::Validate() const
4210 {
4211  if((m_hMemory == VK_NULL_HANDLE) ||
4212  (m_Size == 0) ||
4213  m_Suballocations.empty())
4214  {
4215  return false;
4216  }
4217 
4218  // Expected offset of new suballocation as calculates from previous ones.
4219  VkDeviceSize calculatedOffset = 0;
4220  // Expected number of free suballocations as calculated from traversing their list.
4221  uint32_t calculatedFreeCount = 0;
4222  // Expected sum size of free suballocations as calculated from traversing their list.
4223  VkDeviceSize calculatedSumFreeSize = 0;
4224  // Expected number of free suballocations that should be registered in
4225  // m_FreeSuballocationsBySize calculated from traversing their list.
4226  size_t freeSuballocationsToRegister = 0;
4227  // True if previous visisted suballocation was free.
4228  bool prevFree = false;
4229 
4230  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
4231  suballocItem != m_Suballocations.cend();
4232  ++suballocItem)
4233  {
4234  const VmaSuballocation& subAlloc = *suballocItem;
4235 
4236  // Actual offset of this suballocation doesn't match expected one.
4237  if(subAlloc.offset != calculatedOffset)
4238  {
4239  return false;
4240  }
4241 
4242  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
4243  // Two adjacent free suballocations are invalid. They should be merged.
4244  if(prevFree && currFree)
4245  {
4246  return false;
4247  }
4248  prevFree = currFree;
4249 
4250  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
4251  {
4252  return false;
4253  }
4254 
4255  if(currFree)
4256  {
4257  calculatedSumFreeSize += subAlloc.size;
4258  ++calculatedFreeCount;
4259  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
4260  {
4261  ++freeSuballocationsToRegister;
4262  }
4263  }
4264 
4265  calculatedOffset += subAlloc.size;
4266  }
4267 
4268  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
4269  // match expected one.
4270  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
4271  {
4272  return false;
4273  }
4274 
4275  VkDeviceSize lastSize = 0;
4276  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
4277  {
4278  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
4279 
4280  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
4281  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
4282  {
4283  return false;
4284  }
4285  // They must be sorted by size ascending.
4286  if(suballocItem->size < lastSize)
4287  {
4288  return false;
4289  }
4290 
4291  lastSize = suballocItem->size;
4292  }
4293 
4294  // Check if totals match calculacted values.
4295  return
4296  (calculatedOffset == m_Size) &&
4297  (calculatedSumFreeSize == m_SumFreeSize) &&
4298  (calculatedFreeCount == m_FreeCount);
4299 }
4300 
4301 VkDeviceSize VmaDeviceMemoryBlock::GetUnusedRangeSizeMax() const
4302 {
4303  if(!m_FreeSuballocationsBySize.empty())
4304  {
4305  return m_FreeSuballocationsBySize.back()->size;
4306  }
4307  else
4308  {
4309  return 0;
4310  }
4311 }
4312 
4313 /*
4314 How many suitable free suballocations to analyze before choosing best one.
4315 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
4316  be chosen.
4317 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
4318  suballocations will be analized and best one will be chosen.
4319 - Any other value is also acceptable.
4320 */
4321 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
4322 
4323 bool VmaDeviceMemoryBlock::CreateAllocationRequest(
4324  uint32_t currentFrameIndex,
4325  uint32_t frameInUseCount,
4326  VkDeviceSize bufferImageGranularity,
4327  VkDeviceSize allocSize,
4328  VkDeviceSize allocAlignment,
4329  VmaSuballocationType allocType,
4330  bool canMakeOtherLost,
4331  VmaAllocationRequest* pAllocationRequest)
4332 {
4333  VMA_ASSERT(allocSize > 0);
4334  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
4335  VMA_ASSERT(pAllocationRequest != VMA_NULL);
4336  VMA_HEAVY_ASSERT(Validate());
4337 
4338  // There is not enough total free space in this block to fullfill the request: Early return.
4339  if(canMakeOtherLost == false && m_SumFreeSize < allocSize)
4340  {
4341  return false;
4342  }
4343 
4344  // New algorithm, efficiently searching freeSuballocationsBySize.
4345  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
4346  if(freeSuballocCount > 0)
4347  {
4348  if(VMA_BEST_FIT)
4349  {
4350  // Find first free suballocation with size not less than allocSize.
4351  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
4352  m_FreeSuballocationsBySize.data(),
4353  m_FreeSuballocationsBySize.data() + freeSuballocCount,
4354  allocSize,
4355  VmaSuballocationItemSizeLess());
4356  size_t index = it - m_FreeSuballocationsBySize.data();
4357  for(; index < freeSuballocCount; ++index)
4358  {
4359  if(CheckAllocation(
4360  currentFrameIndex,
4361  frameInUseCount,
4362  bufferImageGranularity,
4363  allocSize,
4364  allocAlignment,
4365  allocType,
4366  m_FreeSuballocationsBySize[index],
4367  false, // canMakeOtherLost
4368  &pAllocationRequest->offset,
4369  &pAllocationRequest->itemsToMakeLostCount,
4370  &pAllocationRequest->sumFreeSize,
4371  &pAllocationRequest->sumItemSize))
4372  {
4373  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
4374  return true;
4375  }
4376  }
4377  }
4378  else
4379  {
4380  // Search staring from biggest suballocations.
4381  for(size_t index = freeSuballocCount; index--; )
4382  {
4383  if(CheckAllocation(
4384  currentFrameIndex,
4385  frameInUseCount,
4386  bufferImageGranularity,
4387  allocSize,
4388  allocAlignment,
4389  allocType,
4390  m_FreeSuballocationsBySize[index],
4391  false, // canMakeOtherLost
4392  &pAllocationRequest->offset,
4393  &pAllocationRequest->itemsToMakeLostCount,
4394  &pAllocationRequest->sumFreeSize,
4395  &pAllocationRequest->sumItemSize))
4396  {
4397  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
4398  return true;
4399  }
4400  }
4401  }
4402  }
4403 
4404  if(canMakeOtherLost)
4405  {
4406  // Brute-force algorithm. TODO: Come up with something better.
4407 
4408  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
4409  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
4410 
4411  VmaAllocationRequest tmpAllocRequest = {};
4412  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
4413  suballocIt != m_Suballocations.end();
4414  ++suballocIt)
4415  {
4416  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
4417  suballocIt->hAllocation->CanBecomeLost())
4418  {
4419  if(CheckAllocation(
4420  currentFrameIndex,
4421  frameInUseCount,
4422  bufferImageGranularity,
4423  allocSize,
4424  allocAlignment,
4425  allocType,
4426  suballocIt,
4427  canMakeOtherLost,
4428  &tmpAllocRequest.offset,
4429  &tmpAllocRequest.itemsToMakeLostCount,
4430  &tmpAllocRequest.sumFreeSize,
4431  &tmpAllocRequest.sumItemSize))
4432  {
4433  tmpAllocRequest.item = suballocIt;
4434 
4435  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
4436  {
4437  *pAllocationRequest = tmpAllocRequest;
4438  }
4439  }
4440  }
4441  }
4442 
4443  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
4444  {
4445  return true;
4446  }
4447  }
4448 
4449  return false;
4450 }
4451 
4452 bool VmaDeviceMemoryBlock::MakeRequestedAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount, VmaAllocationRequest* pAllocationRequest)
4453 {
4454  while(pAllocationRequest->itemsToMakeLostCount > 0)
4455  {
4456  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
4457  {
4458  ++pAllocationRequest->item;
4459  }
4460  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
4461  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
4462  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
4463  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
4464  {
4465  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
4466  --pAllocationRequest->itemsToMakeLostCount;
4467  }
4468  else
4469  {
4470  return false;
4471  }
4472  }
4473 
4474  VMA_HEAVY_ASSERT(Validate());
4475  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
4476  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
4477 
4478  return true;
4479 }
4480 
4481 uint32_t VmaDeviceMemoryBlock::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
4482 {
4483  uint32_t lostAllocationCount = 0;
4484  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
4485  it != m_Suballocations.end();
4486  ++it)
4487  {
4488  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
4489  it->hAllocation->CanBecomeLost() &&
4490  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
4491  {
4492  it = FreeSuballocation(it);
4493  ++lostAllocationCount;
4494  }
4495  }
4496  return lostAllocationCount;
4497 }
4498 
4499 bool VmaDeviceMemoryBlock::CheckAllocation(
4500  uint32_t currentFrameIndex,
4501  uint32_t frameInUseCount,
4502  VkDeviceSize bufferImageGranularity,
4503  VkDeviceSize allocSize,
4504  VkDeviceSize allocAlignment,
4505  VmaSuballocationType allocType,
4506  VmaSuballocationList::const_iterator suballocItem,
4507  bool canMakeOtherLost,
4508  VkDeviceSize* pOffset,
4509  size_t* itemsToMakeLostCount,
4510  VkDeviceSize* pSumFreeSize,
4511  VkDeviceSize* pSumItemSize) const
4512 {
4513  VMA_ASSERT(allocSize > 0);
4514  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
4515  VMA_ASSERT(suballocItem != m_Suballocations.cend());
4516  VMA_ASSERT(pOffset != VMA_NULL);
4517 
4518  *itemsToMakeLostCount = 0;
4519  *pSumFreeSize = 0;
4520  *pSumItemSize = 0;
4521 
4522  if(canMakeOtherLost)
4523  {
4524  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
4525  {
4526  *pSumFreeSize = suballocItem->size;
4527  }
4528  else
4529  {
4530  if(suballocItem->hAllocation->CanBecomeLost() &&
4531  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
4532  {
4533  ++*itemsToMakeLostCount;
4534  *pSumItemSize = suballocItem->size;
4535  }
4536  else
4537  {
4538  return false;
4539  }
4540  }
4541 
4542  // Remaining size is too small for this request: Early return.
4543  if(m_Size - suballocItem->offset < allocSize)
4544  {
4545  return false;
4546  }
4547 
4548  // Start from offset equal to beginning of this suballocation.
4549  *pOffset = suballocItem->offset;
4550 
4551  // Apply VMA_DEBUG_MARGIN at the beginning.
4552  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
4553  {
4554  *pOffset += VMA_DEBUG_MARGIN;
4555  }
4556 
4557  // Apply alignment.
4558  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
4559  *pOffset = VmaAlignUp(*pOffset, alignment);
4560 
4561  // Check previous suballocations for BufferImageGranularity conflicts.
4562  // Make bigger alignment if necessary.
4563  if(bufferImageGranularity > 1)
4564  {
4565  bool bufferImageGranularityConflict = false;
4566  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
4567  while(prevSuballocItem != m_Suballocations.cbegin())
4568  {
4569  --prevSuballocItem;
4570  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
4571  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
4572  {
4573  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
4574  {
4575  bufferImageGranularityConflict = true;
4576  break;
4577  }
4578  }
4579  else
4580  // Already on previous page.
4581  break;
4582  }
4583  if(bufferImageGranularityConflict)
4584  {
4585  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
4586  }
4587  }
4588 
4589  // Now that we have final *pOffset, check if we are past suballocItem.
4590  // If yes, return false - this function should be called for another suballocItem as starting point.
4591  if(*pOffset >= suballocItem->offset + suballocItem->size)
4592  {
4593  return false;
4594  }
4595 
4596  // Calculate padding at the beginning based on current offset.
4597  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
4598 
4599  // Calculate required margin at the end if this is not last suballocation.
4600  VmaSuballocationList::const_iterator next = suballocItem;
4601  ++next;
4602  const VkDeviceSize requiredEndMargin =
4603  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
4604 
4605  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
4606  // Another early return check.
4607  if(suballocItem->offset + totalSize > m_Size)
4608  {
4609  return false;
4610  }
4611 
4612  // Advance lastSuballocItem until desired size is reached.
4613  // Update itemsToMakeLostCount.
4614  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
4615  if(totalSize > suballocItem->size)
4616  {
4617  VkDeviceSize remainingSize = totalSize - suballocItem->size;
4618  while(remainingSize > 0)
4619  {
4620  ++lastSuballocItem;
4621  if(lastSuballocItem == m_Suballocations.cend())
4622  {
4623  return false;
4624  }
4625  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
4626  {
4627  *pSumFreeSize += lastSuballocItem->size;
4628  }
4629  else
4630  {
4631  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
4632  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
4633  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
4634  {
4635  ++*itemsToMakeLostCount;
4636  *pSumItemSize += lastSuballocItem->size;
4637  }
4638  else
4639  {
4640  return false;
4641  }
4642  }
4643  remainingSize = (lastSuballocItem->size < remainingSize) ?
4644  remainingSize - lastSuballocItem->size : 0;
4645  }
4646  }
4647 
4648  // Check next suballocations for BufferImageGranularity conflicts.
4649  // If conflict exists, we must mark more allocations lost or fail.
4650  if(bufferImageGranularity > 1)
4651  {
4652  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
4653  ++nextSuballocItem;
4654  while(nextSuballocItem != m_Suballocations.cend())
4655  {
4656  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
4657  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
4658  {
4659  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
4660  {
4661  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
4662  if(nextSuballoc.hAllocation->CanBecomeLost() &&
4663  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
4664  {
4665  ++*itemsToMakeLostCount;
4666  }
4667  else
4668  {
4669  return false;
4670  }
4671  }
4672  }
4673  else
4674  {
4675  // Already on next page.
4676  break;
4677  }
4678  ++nextSuballocItem;
4679  }
4680  }
4681  }
4682  else
4683  {
4684  const VmaSuballocation& suballoc = *suballocItem;
4685  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
4686 
4687  *pSumFreeSize = suballoc.size;
4688 
4689  // Size of this suballocation is too small for this request: Early return.
4690  if(suballoc.size < allocSize)
4691  {
4692  return false;
4693  }
4694 
4695  // Start from offset equal to beginning of this suballocation.
4696  *pOffset = suballoc.offset;
4697 
4698  // Apply VMA_DEBUG_MARGIN at the beginning.
4699  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
4700  {
4701  *pOffset += VMA_DEBUG_MARGIN;
4702  }
4703 
4704  // Apply alignment.
4705  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
4706  *pOffset = VmaAlignUp(*pOffset, alignment);
4707 
4708  // Check previous suballocations for BufferImageGranularity conflicts.
4709  // Make bigger alignment if necessary.
4710  if(bufferImageGranularity > 1)
4711  {
4712  bool bufferImageGranularityConflict = false;
4713  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
4714  while(prevSuballocItem != m_Suballocations.cbegin())
4715  {
4716  --prevSuballocItem;
4717  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
4718  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
4719  {
4720  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
4721  {
4722  bufferImageGranularityConflict = true;
4723  break;
4724  }
4725  }
4726  else
4727  // Already on previous page.
4728  break;
4729  }
4730  if(bufferImageGranularityConflict)
4731  {
4732  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
4733  }
4734  }
4735 
4736  // Calculate padding at the beginning based on current offset.
4737  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
4738 
4739  // Calculate required margin at the end if this is not last suballocation.
4740  VmaSuballocationList::const_iterator next = suballocItem;
4741  ++next;
4742  const VkDeviceSize requiredEndMargin =
4743  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
4744 
4745  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
4746  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
4747  {
4748  return false;
4749  }
4750 
4751  // Check next suballocations for BufferImageGranularity conflicts.
4752  // If conflict exists, allocation cannot be made here.
4753  if(bufferImageGranularity > 1)
4754  {
4755  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
4756  ++nextSuballocItem;
4757  while(nextSuballocItem != m_Suballocations.cend())
4758  {
4759  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
4760  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
4761  {
4762  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
4763  {
4764  return false;
4765  }
4766  }
4767  else
4768  {
4769  // Already on next page.
4770  break;
4771  }
4772  ++nextSuballocItem;
4773  }
4774  }
4775  }
4776 
4777  // All tests passed: Success. pOffset is already filled.
4778  return true;
4779 }
4780 
4781 bool VmaDeviceMemoryBlock::IsEmpty() const
4782 {
4783  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
4784 }
4785 
4786 void VmaDeviceMemoryBlock::Alloc(
4787  const VmaAllocationRequest& request,
4788  VmaSuballocationType type,
4789  VkDeviceSize allocSize,
4790  VmaAllocation hAllocation)
4791 {
4792  VMA_ASSERT(request.item != m_Suballocations.end());
4793  VmaSuballocation& suballoc = *request.item;
4794  // Given suballocation is a free block.
4795  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
4796  // Given offset is inside this suballocation.
4797  VMA_ASSERT(request.offset >= suballoc.offset);
4798  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
4799  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
4800  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
4801 
4802  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
4803  // it to become used.
4804  UnregisterFreeSuballocation(request.item);
4805 
4806  suballoc.offset = request.offset;
4807  suballoc.size = allocSize;
4808  suballoc.type = type;
4809  suballoc.hAllocation = hAllocation;
4810 
4811  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
4812  if(paddingEnd)
4813  {
4814  VmaSuballocation paddingSuballoc = {};
4815  paddingSuballoc.offset = request.offset + allocSize;
4816  paddingSuballoc.size = paddingEnd;
4817  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4818  VmaSuballocationList::iterator next = request.item;
4819  ++next;
4820  const VmaSuballocationList::iterator paddingEndItem =
4821  m_Suballocations.insert(next, paddingSuballoc);
4822  RegisterFreeSuballocation(paddingEndItem);
4823  }
4824 
4825  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
4826  if(paddingBegin)
4827  {
4828  VmaSuballocation paddingSuballoc = {};
4829  paddingSuballoc.offset = request.offset - paddingBegin;
4830  paddingSuballoc.size = paddingBegin;
4831  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4832  const VmaSuballocationList::iterator paddingBeginItem =
4833  m_Suballocations.insert(request.item, paddingSuballoc);
4834  RegisterFreeSuballocation(paddingBeginItem);
4835  }
4836 
4837  // Update totals.
4838  m_FreeCount = m_FreeCount - 1;
4839  if(paddingBegin > 0)
4840  {
4841  ++m_FreeCount;
4842  }
4843  if(paddingEnd > 0)
4844  {
4845  ++m_FreeCount;
4846  }
4847  m_SumFreeSize -= allocSize;
4848 }
4849 
4850 VmaSuballocationList::iterator VmaDeviceMemoryBlock::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
4851 {
4852  // Change this suballocation to be marked as free.
4853  VmaSuballocation& suballoc = *suballocItem;
4854  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4855  suballoc.hAllocation = VK_NULL_HANDLE;
4856 
4857  // Update totals.
4858  ++m_FreeCount;
4859  m_SumFreeSize += suballoc.size;
4860 
4861  // Merge with previous and/or next suballocation if it's also free.
4862  bool mergeWithNext = false;
4863  bool mergeWithPrev = false;
4864 
4865  VmaSuballocationList::iterator nextItem = suballocItem;
4866  ++nextItem;
4867  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
4868  {
4869  mergeWithNext = true;
4870  }
4871 
4872  VmaSuballocationList::iterator prevItem = suballocItem;
4873  if(suballocItem != m_Suballocations.begin())
4874  {
4875  --prevItem;
4876  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
4877  {
4878  mergeWithPrev = true;
4879  }
4880  }
4881 
4882  if(mergeWithNext)
4883  {
4884  UnregisterFreeSuballocation(nextItem);
4885  MergeFreeWithNext(suballocItem);
4886  }
4887 
4888  if(mergeWithPrev)
4889  {
4890  UnregisterFreeSuballocation(prevItem);
4891  MergeFreeWithNext(prevItem);
4892  RegisterFreeSuballocation(prevItem);
4893  return prevItem;
4894  }
4895  else
4896  {
4897  RegisterFreeSuballocation(suballocItem);
4898  return suballocItem;
4899  }
4900 }
4901 
4902 void VmaDeviceMemoryBlock::Free(const VmaAllocation allocation)
4903 {
4904  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
4905  suballocItem != m_Suballocations.end();
4906  ++suballocItem)
4907  {
4908  VmaSuballocation& suballoc = *suballocItem;
4909  if(suballoc.hAllocation == allocation)
4910  {
4911  FreeSuballocation(suballocItem);
4912  VMA_HEAVY_ASSERT(Validate());
4913  return;
4914  }
4915  }
4916  VMA_ASSERT(0 && "Not found!");
4917 }
4918 
4919 #if VMA_STATS_STRING_ENABLED
4920 
4921 void VmaDeviceMemoryBlock::PrintDetailedMap(class VmaJsonWriter& json) const
4922 {
4923  json.BeginObject();
4924 
4925  json.WriteString("TotalBytes");
4926  json.WriteNumber(m_Size);
4927 
4928  json.WriteString("UnusedBytes");
4929  json.WriteNumber(m_SumFreeSize);
4930 
4931  json.WriteString("Allocations");
4932  json.WriteNumber(m_Suballocations.size() - m_FreeCount);
4933 
4934  json.WriteString("UnusedRanges");
4935  json.WriteNumber(m_FreeCount);
4936 
4937  json.WriteString("Suballocations");
4938  json.BeginArray();
4939  size_t i = 0;
4940  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
4941  suballocItem != m_Suballocations.cend();
4942  ++suballocItem, ++i)
4943  {
4944  json.BeginObject(true);
4945 
4946  json.WriteString("Type");
4947  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
4948 
4949  json.WriteString("Size");
4950  json.WriteNumber(suballocItem->size);
4951 
4952  json.WriteString("Offset");
4953  json.WriteNumber(suballocItem->offset);
4954 
4955  json.EndObject();
4956  }
4957  json.EndArray();
4958 
4959  json.EndObject();
4960 }
4961 
4962 #endif // #if VMA_STATS_STRING_ENABLED
4963 
4964 void VmaDeviceMemoryBlock::MergeFreeWithNext(VmaSuballocationList::iterator item)
4965 {
4966  VMA_ASSERT(item != m_Suballocations.end());
4967  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
4968 
4969  VmaSuballocationList::iterator nextItem = item;
4970  ++nextItem;
4971  VMA_ASSERT(nextItem != m_Suballocations.end());
4972  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
4973 
4974  item->size += nextItem->size;
4975  --m_FreeCount;
4976  m_Suballocations.erase(nextItem);
4977 }
4978 
4979 void VmaDeviceMemoryBlock::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
4980 {
4981  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
4982  VMA_ASSERT(item->size > 0);
4983 
4984  // You may want to enable this validation at the beginning or at the end of
4985  // this function, depending on what do you want to check.
4986  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
4987 
4988  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
4989  {
4990  if(m_FreeSuballocationsBySize.empty())
4991  {
4992  m_FreeSuballocationsBySize.push_back(item);
4993  }
4994  else
4995  {
4996  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
4997  }
4998  }
4999 
5000  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5001 }
5002 
5003 
5004 void VmaDeviceMemoryBlock::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
5005 {
5006  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5007  VMA_ASSERT(item->size > 0);
5008 
5009  // You may want to enable this validation at the beginning or at the end of
5010  // this function, depending on what do you want to check.
5011  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5012 
5013  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5014  {
5015  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
5016  m_FreeSuballocationsBySize.data(),
5017  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
5018  item,
5019  VmaSuballocationItemSizeLess());
5020  for(size_t index = it - m_FreeSuballocationsBySize.data();
5021  index < m_FreeSuballocationsBySize.size();
5022  ++index)
5023  {
5024  if(m_FreeSuballocationsBySize[index] == item)
5025  {
5026  VmaVectorRemove(m_FreeSuballocationsBySize, index);
5027  return;
5028  }
5029  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
5030  }
5031  VMA_ASSERT(0 && "Not found.");
5032  }
5033 
5034  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5035 }
5036 
5037 bool VmaDeviceMemoryBlock::ValidateFreeSuballocationList() const
5038 {
5039  VkDeviceSize lastSize = 0;
5040  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
5041  {
5042  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
5043 
5044  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
5045  {
5046  VMA_ASSERT(0);
5047  return false;
5048  }
5049  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5050  {
5051  VMA_ASSERT(0);
5052  return false;
5053  }
5054  if(it->size < lastSize)
5055  {
5056  VMA_ASSERT(0);
5057  return false;
5058  }
5059 
5060  lastSize = it->size;
5061  }
5062  return true;
5063 }
5064 
5065 static void InitStatInfo(VmaStatInfo& outInfo)
5066 {
5067  memset(&outInfo, 0, sizeof(outInfo));
5068  outInfo.allocationSizeMin = UINT64_MAX;
5069  outInfo.unusedRangeSizeMin = UINT64_MAX;
5070 }
5071 
5072 static void CalcAllocationStatInfo(VmaStatInfo& outInfo, const VmaDeviceMemoryBlock& block)
5073 {
5074  outInfo.blockCount = 1;
5075 
5076  const uint32_t rangeCount = (uint32_t)block.m_Suballocations.size();
5077  outInfo.allocationCount = rangeCount - block.m_FreeCount;
5078  outInfo.unusedRangeCount = block.m_FreeCount;
5079 
5080  outInfo.unusedBytes = block.m_SumFreeSize;
5081  outInfo.usedBytes = block.m_Size - outInfo.unusedBytes;
5082 
5083  outInfo.allocationSizeMin = UINT64_MAX;
5084  outInfo.allocationSizeMax = 0;
5085  outInfo.unusedRangeSizeMin = UINT64_MAX;
5086  outInfo.unusedRangeSizeMax = 0;
5087 
5088  for(VmaSuballocationList::const_iterator suballocItem = block.m_Suballocations.cbegin();
5089  suballocItem != block.m_Suballocations.cend();
5090  ++suballocItem)
5091  {
5092  const VmaSuballocation& suballoc = *suballocItem;
5093  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
5094  {
5095  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
5096  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
5097  }
5098  else
5099  {
5100  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
5101  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
5102  }
5103  }
5104 }
5105 
5106 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
5107 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
5108 {
5109  inoutInfo.blockCount += srcInfo.blockCount;
5110  inoutInfo.allocationCount += srcInfo.allocationCount;
5111  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
5112  inoutInfo.usedBytes += srcInfo.usedBytes;
5113  inoutInfo.unusedBytes += srcInfo.unusedBytes;
5114  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
5115  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
5116  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
5117  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
5118 }
5119 
5120 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
5121 {
5122  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
5123  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
5124  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
5125  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
5126 }
5127 
5128 VmaPool_T::VmaPool_T(
5129  VmaAllocator hAllocator,
5130  const VmaPoolCreateInfo& createInfo) :
5131  m_BlockVector(
5132  hAllocator,
5133  createInfo.memoryTypeIndex,
5134  (createInfo.flags & VMA_POOL_CREATE_PERSISTENT_MAP_BIT) != 0 ?
5135  VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED,
5136  createInfo.blockSize,
5137  createInfo.minBlockCount,
5138  createInfo.maxBlockCount,
5139  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
5140  createInfo.frameInUseCount,
5141  true) // isCustomPool
5142 {
5143 }
5144 
5145 VmaPool_T::~VmaPool_T()
5146 {
5147 }
5148 
5149 #if VMA_STATS_STRING_ENABLED
5150 
5151 #endif // #if VMA_STATS_STRING_ENABLED
5152 
5153 VmaBlockVector::VmaBlockVector(
5154  VmaAllocator hAllocator,
5155  uint32_t memoryTypeIndex,
5156  VMA_BLOCK_VECTOR_TYPE blockVectorType,
5157  VkDeviceSize preferredBlockSize,
5158  size_t minBlockCount,
5159  size_t maxBlockCount,
5160  VkDeviceSize bufferImageGranularity,
5161  uint32_t frameInUseCount,
5162  bool isCustomPool) :
5163  m_hAllocator(hAllocator),
5164  m_MemoryTypeIndex(memoryTypeIndex),
5165  m_BlockVectorType(blockVectorType),
5166  m_PreferredBlockSize(preferredBlockSize),
5167  m_MinBlockCount(minBlockCount),
5168  m_MaxBlockCount(maxBlockCount),
5169  m_BufferImageGranularity(bufferImageGranularity),
5170  m_FrameInUseCount(frameInUseCount),
5171  m_IsCustomPool(isCustomPool),
5172  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
5173  m_HasEmptyBlock(false),
5174  m_pDefragmentator(VMA_NULL)
5175 {
5176 }
5177 
5178 VmaBlockVector::~VmaBlockVector()
5179 {
5180  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
5181 
5182  for(size_t i = m_Blocks.size(); i--; )
5183  {
5184  m_Blocks[i]->Destroy(m_hAllocator);
5185  vma_delete(m_hAllocator, m_Blocks[i]);
5186  }
5187 }
5188 
5189 VkResult VmaBlockVector::CreateMinBlocks()
5190 {
5191  for(size_t i = 0; i < m_MinBlockCount; ++i)
5192  {
5193  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
5194  if(res != VK_SUCCESS)
5195  {
5196  return res;
5197  }
5198  }
5199  return VK_SUCCESS;
5200 }
5201 
5202 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
5203 {
5204  pStats->size = 0;
5205  pStats->unusedSize = 0;
5206  pStats->allocationCount = 0;
5207  pStats->unusedRangeCount = 0;
5208  pStats->unusedRangeSizeMax = 0;
5209 
5210  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5211 
5212  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5213  {
5214  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
5215  VMA_ASSERT(pBlock);
5216  VMA_HEAVY_ASSERT(pBlock->Validate());
5217 
5218  const uint32_t rangeCount = (uint32_t)pBlock->m_Suballocations.size();
5219 
5220  pStats->size += pBlock->m_Size;
5221  pStats->unusedSize += pBlock->m_SumFreeSize;
5222  pStats->allocationCount += rangeCount - pBlock->m_FreeCount;
5223  pStats->unusedRangeCount += pBlock->m_FreeCount;
5224  pStats->unusedRangeSizeMax = VMA_MAX(pStats->unusedRangeSizeMax, pBlock->GetUnusedRangeSizeMax());
5225  }
5226 }
5227 
5228 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
5229 
5230 VkResult VmaBlockVector::Allocate(
5231  VmaPool hCurrentPool,
5232  uint32_t currentFrameIndex,
5233  const VkMemoryRequirements& vkMemReq,
5234  const VmaAllocationCreateInfo& createInfo,
5235  VmaSuballocationType suballocType,
5236  VmaAllocation* pAllocation)
5237 {
5238  // Validate flags.
5239  if(((createInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0) !=
5240  (m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED))
5241  {
5242  VMA_ASSERT(0 && "Usage of VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT must match VMA_POOL_CREATE_PERSISTENT_MAP_BIT.");
5243  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5244  }
5245 
5246  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5247 
5248  // 1. Search existing allocations. Try to allocate without making other allocations lost.
5249  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
5250  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
5251  {
5252  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
5253  VMA_ASSERT(pCurrBlock);
5254  VmaAllocationRequest currRequest = {};
5255  if(pCurrBlock->CreateAllocationRequest(
5256  currentFrameIndex,
5257  m_FrameInUseCount,
5258  m_BufferImageGranularity,
5259  vkMemReq.size,
5260  vkMemReq.alignment,
5261  suballocType,
5262  false, // canMakeOtherLost
5263  &currRequest))
5264  {
5265  // Allocate from pCurrBlock.
5266  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
5267 
5268  // We no longer have an empty Allocation.
5269  if(pCurrBlock->IsEmpty())
5270  {
5271  m_HasEmptyBlock = false;
5272  }
5273 
5274  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
5275  pCurrBlock->Alloc(currRequest, suballocType, vkMemReq.size, *pAllocation);
5276  (*pAllocation)->InitBlockAllocation(
5277  hCurrentPool,
5278  pCurrBlock,
5279  currRequest.offset,
5280  vkMemReq.alignment,
5281  vkMemReq.size,
5282  suballocType,
5283  createInfo.pUserData,
5284  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
5285  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
5286  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
5287  return VK_SUCCESS;
5288  }
5289  }
5290 
5291  const bool canCreateNewBlock =
5292  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
5293  (m_Blocks.size() < m_MaxBlockCount);
5294 
5295  // 2. Try to create new block.
5296  if(canCreateNewBlock)
5297  {
5298  // 2.1. Start with full preferredBlockSize.
5299  VkDeviceSize blockSize = m_PreferredBlockSize;
5300  size_t newBlockIndex = 0;
5301  VkResult res = CreateBlock(blockSize, &newBlockIndex);
5302  // Allocating blocks of other sizes is allowed only in default pools.
5303  // In custom pools block size is fixed.
5304  if(res < 0 && m_IsCustomPool == false)
5305  {
5306  // 2.2. Try half the size.
5307  blockSize /= 2;
5308  if(blockSize >= vkMemReq.size)
5309  {
5310  res = CreateBlock(blockSize, &newBlockIndex);
5311  if(res < 0)
5312  {
5313  // 2.3. Try quarter the size.
5314  blockSize /= 2;
5315  if(blockSize >= vkMemReq.size)
5316  {
5317  res = CreateBlock(blockSize, &newBlockIndex);
5318  }
5319  }
5320  }
5321  }
5322  if(res == VK_SUCCESS)
5323  {
5324  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
5325  VMA_ASSERT(pBlock->m_Size >= vkMemReq.size);
5326 
5327  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
5328  VmaAllocationRequest allocRequest = {};
5329  allocRequest.item = pBlock->m_Suballocations.begin();
5330  allocRequest.offset = 0;
5331  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
5332  pBlock->Alloc(allocRequest, suballocType, vkMemReq.size, *pAllocation);
5333  (*pAllocation)->InitBlockAllocation(
5334  hCurrentPool,
5335  pBlock,
5336  allocRequest.offset,
5337  vkMemReq.alignment,
5338  vkMemReq.size,
5339  suballocType,
5340  createInfo.pUserData,
5341  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
5342  VMA_HEAVY_ASSERT(pBlock->Validate());
5343  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
5344 
5345  return VK_SUCCESS;
5346  }
5347  }
5348 
5349  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
5350 
5351  // 3. Try to allocate from existing blocks with making other allocations lost.
5352  if(canMakeOtherLost)
5353  {
5354  uint32_t tryIndex = 0;
5355  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
5356  {
5357  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
5358  VmaAllocationRequest bestRequest = {};
5359  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
5360 
5361  // 1. Search existing allocations.
5362  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
5363  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
5364  {
5365  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
5366  VMA_ASSERT(pCurrBlock);
5367  VmaAllocationRequest currRequest = {};
5368  if(pCurrBlock->CreateAllocationRequest(
5369  currentFrameIndex,
5370  m_FrameInUseCount,
5371  m_BufferImageGranularity,
5372  vkMemReq.size,
5373  vkMemReq.alignment,
5374  suballocType,
5375  canMakeOtherLost,
5376  &currRequest))
5377  {
5378  const VkDeviceSize currRequestCost = currRequest.CalcCost();
5379  if(pBestRequestBlock == VMA_NULL ||
5380  currRequestCost < bestRequestCost)
5381  {
5382  pBestRequestBlock = pCurrBlock;
5383  bestRequest = currRequest;
5384  bestRequestCost = currRequestCost;
5385 
5386  if(bestRequestCost == 0)
5387  {
5388  break;
5389  }
5390  }
5391  }
5392  }
5393 
5394  if(pBestRequestBlock != VMA_NULL)
5395  {
5396  if(pBestRequestBlock->MakeRequestedAllocationsLost(
5397  currentFrameIndex,
5398  m_FrameInUseCount,
5399  &bestRequest))
5400  {
5401  // We no longer have an empty Allocation.
5402  if(pBestRequestBlock->IsEmpty())
5403  {
5404  m_HasEmptyBlock = false;
5405  }
5406  // Allocate from this pBlock.
5407  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
5408  pBestRequestBlock->Alloc(bestRequest, suballocType, vkMemReq.size, *pAllocation);
5409  (*pAllocation)->InitBlockAllocation(
5410  hCurrentPool,
5411  pBestRequestBlock,
5412  bestRequest.offset,
5413  vkMemReq.alignment,
5414  vkMemReq.size,
5415  suballocType,
5416  createInfo.pUserData,
5417  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
5418  VMA_HEAVY_ASSERT(pBlock->Validate());
5419  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
5420  return VK_SUCCESS;
5421  }
5422  // else: Some allocations must have been touched while we are here. Next try.
5423  }
5424  else
5425  {
5426  // Could not find place in any of the blocks - break outer loop.
5427  break;
5428  }
5429  }
5430  /* Maximum number of tries exceeded - a very unlike event when many other
5431  threads are simultaneously touching allocations making it impossible to make
5432  lost at the same time as we try to allocate. */
5433  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
5434  {
5435  return VK_ERROR_TOO_MANY_OBJECTS;
5436  }
5437  }
5438 
5439  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5440 }
5441 
5442 void VmaBlockVector::Free(
5443  VmaAllocation hAllocation)
5444 {
5445  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
5446 
5447  // Scope for lock.
5448  {
5449  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5450 
5451  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
5452 
5453  pBlock->Free(hAllocation);
5454  VMA_HEAVY_ASSERT(pBlock->Validate());
5455 
5456  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
5457 
5458  // pBlock became empty after this deallocation.
5459  if(pBlock->IsEmpty())
5460  {
5461  // Already has empty Allocation. We don't want to have two, so delete this one.
5462  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
5463  {
5464  pBlockToDelete = pBlock;
5465  Remove(pBlock);
5466  }
5467  // We now have first empty Allocation.
5468  else
5469  {
5470  m_HasEmptyBlock = true;
5471  }
5472  }
5473  // Must be called after srcBlockIndex is used, because later it may become invalid!
5474  IncrementallySortBlocks();
5475  }
5476 
5477  // Destruction of a free Allocation. Deferred until this point, outside of mutex
5478  // lock, for performance reason.
5479  if(pBlockToDelete != VMA_NULL)
5480  {
5481  VMA_DEBUG_LOG(" Deleted empty allocation");
5482  pBlockToDelete->Destroy(m_hAllocator);
5483  vma_delete(m_hAllocator, pBlockToDelete);
5484  }
5485 }
5486 
5487 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
5488 {
5489  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5490  {
5491  if(m_Blocks[blockIndex] == pBlock)
5492  {
5493  VmaVectorRemove(m_Blocks, blockIndex);
5494  return;
5495  }
5496  }
5497  VMA_ASSERT(0);
5498 }
5499 
5500 void VmaBlockVector::IncrementallySortBlocks()
5501 {
5502  // Bubble sort only until first swap.
5503  for(size_t i = 1; i < m_Blocks.size(); ++i)
5504  {
5505  if(m_Blocks[i - 1]->m_SumFreeSize > m_Blocks[i]->m_SumFreeSize)
5506  {
5507  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
5508  return;
5509  }
5510  }
5511 }
5512 
5513 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
5514 {
5515  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
5516  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
5517  allocInfo.allocationSize = blockSize;
5518  VkDeviceMemory mem = VK_NULL_HANDLE;
5519  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
5520  if(res < 0)
5521  {
5522  return res;
5523  }
5524 
5525  // New VkDeviceMemory successfully created.
5526 
5527  // Map memory if needed.
5528  void* pMappedData = VMA_NULL;
5529  const bool persistentMap = (m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED);
5530  if(persistentMap && m_hAllocator->m_UnmapPersistentlyMappedMemoryCounter == 0)
5531  {
5532  res = (*m_hAllocator->GetVulkanFunctions().vkMapMemory)(
5533  m_hAllocator->m_hDevice,
5534  mem,
5535  0,
5536  VK_WHOLE_SIZE,
5537  0,
5538  &pMappedData);
5539  if(res < 0)
5540  {
5541  VMA_DEBUG_LOG(" vkMapMemory FAILED");
5542  m_hAllocator->FreeVulkanMemory(m_MemoryTypeIndex, blockSize, mem);
5543  return res;
5544  }
5545  }
5546 
5547  // Create new Allocation for it.
5548  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
5549  pBlock->Init(
5550  m_MemoryTypeIndex,
5551  (VMA_BLOCK_VECTOR_TYPE)m_BlockVectorType,
5552  mem,
5553  allocInfo.allocationSize,
5554  persistentMap,
5555  pMappedData);
5556 
5557  m_Blocks.push_back(pBlock);
5558  if(pNewBlockIndex != VMA_NULL)
5559  {
5560  *pNewBlockIndex = m_Blocks.size() - 1;
5561  }
5562 
5563  return VK_SUCCESS;
5564 }
5565 
5566 #if VMA_STATS_STRING_ENABLED
5567 
5568 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
5569 {
5570  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5571 
5572  json.BeginObject();
5573 
5574  if(m_IsCustomPool)
5575  {
5576  json.WriteString("MemoryTypeIndex");
5577  json.WriteNumber(m_MemoryTypeIndex);
5578 
5579  if(m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
5580  {
5581  json.WriteString("Mapped");
5582  json.WriteBool(true);
5583  }
5584 
5585  json.WriteString("BlockSize");
5586  json.WriteNumber(m_PreferredBlockSize);
5587 
5588  json.WriteString("BlockCount");
5589  json.BeginObject(true);
5590  if(m_MinBlockCount > 0)
5591  {
5592  json.WriteString("Min");
5593  json.WriteNumber(m_MinBlockCount);
5594  }
5595  if(m_MaxBlockCount < SIZE_MAX)
5596  {
5597  json.WriteString("Max");
5598  json.WriteNumber(m_MaxBlockCount);
5599  }
5600  json.WriteString("Cur");
5601  json.WriteNumber(m_Blocks.size());
5602  json.EndObject();
5603 
5604  if(m_FrameInUseCount > 0)
5605  {
5606  json.WriteString("FrameInUseCount");
5607  json.WriteNumber(m_FrameInUseCount);
5608  }
5609  }
5610  else
5611  {
5612  json.WriteString("PreferredBlockSize");
5613  json.WriteNumber(m_PreferredBlockSize);
5614  }
5615 
5616  json.WriteString("Blocks");
5617  json.BeginArray();
5618  for(size_t i = 0; i < m_Blocks.size(); ++i)
5619  {
5620  m_Blocks[i]->PrintDetailedMap(json);
5621  }
5622  json.EndArray();
5623 
5624  json.EndObject();
5625 }
5626 
5627 #endif // #if VMA_STATS_STRING_ENABLED
5628 
5629 void VmaBlockVector::UnmapPersistentlyMappedMemory()
5630 {
5631  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5632 
5633  for(size_t i = m_Blocks.size(); i--; )
5634  {
5635  VmaDeviceMemoryBlock* pBlock = m_Blocks[i];
5636  if(pBlock->m_pMappedData != VMA_NULL)
5637  {
5638  VMA_ASSERT(pBlock->m_PersistentMap != false);
5639  (m_hAllocator->GetVulkanFunctions().vkUnmapMemory)(m_hAllocator->m_hDevice, pBlock->m_hMemory);
5640  pBlock->m_pMappedData = VMA_NULL;
5641  }
5642  }
5643 }
5644 
5645 VkResult VmaBlockVector::MapPersistentlyMappedMemory()
5646 {
5647  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5648 
5649  VkResult finalResult = VK_SUCCESS;
5650  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
5651  {
5652  VmaDeviceMemoryBlock* pBlock = m_Blocks[i];
5653  if(pBlock->m_PersistentMap)
5654  {
5655  VMA_ASSERT(pBlock->m_pMappedData == nullptr);
5656  VkResult localResult = (*m_hAllocator->GetVulkanFunctions().vkMapMemory)(
5657  m_hAllocator->m_hDevice,
5658  pBlock->m_hMemory,
5659  0,
5660  VK_WHOLE_SIZE,
5661  0,
5662  &pBlock->m_pMappedData);
5663  if(localResult != VK_SUCCESS)
5664  {
5665  finalResult = localResult;
5666  }
5667  }
5668  }
5669  return finalResult;
5670 }
5671 
5672 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
5673  VmaAllocator hAllocator,
5674  uint32_t currentFrameIndex)
5675 {
5676  if(m_pDefragmentator == VMA_NULL)
5677  {
5678  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
5679  hAllocator,
5680  this,
5681  currentFrameIndex);
5682  }
5683 
5684  return m_pDefragmentator;
5685 }
5686 
5687 VkResult VmaBlockVector::Defragment(
5688  VmaDefragmentationStats* pDefragmentationStats,
5689  VkDeviceSize& maxBytesToMove,
5690  uint32_t& maxAllocationsToMove)
5691 {
5692  if(m_pDefragmentator == VMA_NULL)
5693  {
5694  return VK_SUCCESS;
5695  }
5696 
5697  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5698 
5699  // Defragment.
5700  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
5701 
5702  // Accumulate statistics.
5703  if(pDefragmentationStats != VMA_NULL)
5704  {
5705  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
5706  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
5707  pDefragmentationStats->bytesMoved += bytesMoved;
5708  pDefragmentationStats->allocationsMoved += allocationsMoved;
5709  VMA_ASSERT(bytesMoved <= maxBytesToMove);
5710  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
5711  maxBytesToMove -= bytesMoved;
5712  maxAllocationsToMove -= allocationsMoved;
5713  }
5714 
5715  // Free empty blocks.
5716  m_HasEmptyBlock = false;
5717  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
5718  {
5719  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
5720  if(pBlock->IsEmpty())
5721  {
5722  if(m_Blocks.size() > m_MinBlockCount)
5723  {
5724  if(pDefragmentationStats != VMA_NULL)
5725  {
5726  ++pDefragmentationStats->deviceMemoryBlocksFreed;
5727  pDefragmentationStats->bytesFreed += pBlock->m_Size;
5728  }
5729 
5730  VmaVectorRemove(m_Blocks, blockIndex);
5731  pBlock->Destroy(m_hAllocator);
5732  vma_delete(m_hAllocator, pBlock);
5733  }
5734  else
5735  {
5736  m_HasEmptyBlock = true;
5737  }
5738  }
5739  }
5740 
5741  return result;
5742 }
5743 
5744 void VmaBlockVector::DestroyDefragmentator()
5745 {
5746  if(m_pDefragmentator != VMA_NULL)
5747  {
5748  vma_delete(m_hAllocator, m_pDefragmentator);
5749  m_pDefragmentator = VMA_NULL;
5750  }
5751 }
5752 
5753 void VmaBlockVector::MakePoolAllocationsLost(
5754  uint32_t currentFrameIndex,
5755  size_t* pLostAllocationCount)
5756 {
5757  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5758 
5759  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5760  {
5761  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
5762  VMA_ASSERT(pBlock);
5763  pBlock->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
5764  }
5765 }
5766 
5767 void VmaBlockVector::AddStats(VmaStats* pStats)
5768 {
5769  const uint32_t memTypeIndex = m_MemoryTypeIndex;
5770  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
5771 
5772  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5773 
5774  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5775  {
5776  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
5777  VMA_ASSERT(pBlock);
5778  VMA_HEAVY_ASSERT(pBlock->Validate());
5779  VmaStatInfo allocationStatInfo;
5780  CalcAllocationStatInfo(allocationStatInfo, *pBlock);
5781  VmaAddStatInfo(pStats->total, allocationStatInfo);
5782  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
5783  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
5784  }
5785 }
5786 
5788 // VmaDefragmentator members definition
5789 
5790 VmaDefragmentator::VmaDefragmentator(
5791  VmaAllocator hAllocator,
5792  VmaBlockVector* pBlockVector,
5793  uint32_t currentFrameIndex) :
5794  m_hAllocator(hAllocator),
5795  m_pBlockVector(pBlockVector),
5796  m_CurrentFrameIndex(currentFrameIndex),
5797  m_BytesMoved(0),
5798  m_AllocationsMoved(0),
5799  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
5800  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
5801 {
5802 }
5803 
5804 VmaDefragmentator::~VmaDefragmentator()
5805 {
5806  for(size_t i = m_Blocks.size(); i--; )
5807  {
5808  vma_delete(m_hAllocator, m_Blocks[i]);
5809  }
5810 }
5811 
5812 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
5813 {
5814  AllocationInfo allocInfo;
5815  allocInfo.m_hAllocation = hAlloc;
5816  allocInfo.m_pChanged = pChanged;
5817  m_Allocations.push_back(allocInfo);
5818 }
5819 
5820 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
5821 {
5822  // It has already been mapped for defragmentation.
5823  if(m_pMappedDataForDefragmentation)
5824  {
5825  *ppMappedData = m_pMappedDataForDefragmentation;
5826  return VK_SUCCESS;
5827  }
5828 
5829  // It is persistently mapped.
5830  if(m_pBlock->m_PersistentMap)
5831  {
5832  VMA_ASSERT(m_pBlock->m_pMappedData != VMA_NULL);
5833  *ppMappedData = m_pBlock->m_pMappedData;
5834  return VK_SUCCESS;
5835  }
5836 
5837  // Map on first usage.
5838  VkResult res = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
5839  hAllocator->m_hDevice,
5840  m_pBlock->m_hMemory,
5841  0,
5842  VK_WHOLE_SIZE,
5843  0,
5844  &m_pMappedDataForDefragmentation);
5845  *ppMappedData = m_pMappedDataForDefragmentation;
5846  return res;
5847 }
5848 
5849 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
5850 {
5851  if(m_pMappedDataForDefragmentation != VMA_NULL)
5852  {
5853  (hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_pBlock->m_hMemory);
5854  }
5855 }
5856 
5857 VkResult VmaDefragmentator::DefragmentRound(
5858  VkDeviceSize maxBytesToMove,
5859  uint32_t maxAllocationsToMove)
5860 {
5861  if(m_Blocks.empty())
5862  {
5863  return VK_SUCCESS;
5864  }
5865 
5866  size_t srcBlockIndex = m_Blocks.size() - 1;
5867  size_t srcAllocIndex = SIZE_MAX;
5868  for(;;)
5869  {
5870  // 1. Find next allocation to move.
5871  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
5872  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
5873  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
5874  {
5875  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
5876  {
5877  // Finished: no more allocations to process.
5878  if(srcBlockIndex == 0)
5879  {
5880  return VK_SUCCESS;
5881  }
5882  else
5883  {
5884  --srcBlockIndex;
5885  srcAllocIndex = SIZE_MAX;
5886  }
5887  }
5888  else
5889  {
5890  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
5891  }
5892  }
5893 
5894  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
5895  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
5896 
5897  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
5898  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
5899  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
5900  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
5901 
5902  // 2. Try to find new place for this allocation in preceding or current block.
5903  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
5904  {
5905  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
5906  VmaAllocationRequest dstAllocRequest;
5907  if(pDstBlockInfo->m_pBlock->CreateAllocationRequest(
5908  m_CurrentFrameIndex,
5909  m_pBlockVector->GetFrameInUseCount(),
5910  m_pBlockVector->GetBufferImageGranularity(),
5911  size,
5912  alignment,
5913  suballocType,
5914  false, // canMakeOtherLost
5915  &dstAllocRequest) &&
5916  MoveMakesSense(
5917  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
5918  {
5919  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
5920 
5921  // Reached limit on number of allocations or bytes to move.
5922  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
5923  (m_BytesMoved + size > maxBytesToMove))
5924  {
5925  return VK_INCOMPLETE;
5926  }
5927 
5928  void* pDstMappedData = VMA_NULL;
5929  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
5930  if(res != VK_SUCCESS)
5931  {
5932  return res;
5933  }
5934 
5935  void* pSrcMappedData = VMA_NULL;
5936  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
5937  if(res != VK_SUCCESS)
5938  {
5939  return res;
5940  }
5941 
5942  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
5943  memcpy(
5944  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
5945  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
5946  static_cast<size_t>(size));
5947 
5948  pDstBlockInfo->m_pBlock->Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
5949  pSrcBlockInfo->m_pBlock->Free(allocInfo.m_hAllocation);
5950 
5951  allocInfo.m_hAllocation->ChangeBlockAllocation(pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
5952 
5953  if(allocInfo.m_pChanged != VMA_NULL)
5954  {
5955  *allocInfo.m_pChanged = VK_TRUE;
5956  }
5957 
5958  ++m_AllocationsMoved;
5959  m_BytesMoved += size;
5960 
5961  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
5962 
5963  break;
5964  }
5965  }
5966 
5967  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
5968 
5969  if(srcAllocIndex > 0)
5970  {
5971  --srcAllocIndex;
5972  }
5973  else
5974  {
5975  if(srcBlockIndex > 0)
5976  {
5977  --srcBlockIndex;
5978  srcAllocIndex = SIZE_MAX;
5979  }
5980  else
5981  {
5982  return VK_SUCCESS;
5983  }
5984  }
5985  }
5986 }
5987 
5988 VkResult VmaDefragmentator::Defragment(
5989  VkDeviceSize maxBytesToMove,
5990  uint32_t maxAllocationsToMove)
5991 {
5992  if(m_Allocations.empty())
5993  {
5994  return VK_SUCCESS;
5995  }
5996 
5997  // Create block info for each block.
5998  const size_t blockCount = m_pBlockVector->m_Blocks.size();
5999  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6000  {
6001  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
6002  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
6003  m_Blocks.push_back(pBlockInfo);
6004  }
6005 
6006  // Sort them by m_pBlock pointer value.
6007  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
6008 
6009  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
6010  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
6011  {
6012  AllocationInfo& allocInfo = m_Allocations[blockIndex];
6013  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
6014  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
6015  {
6016  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
6017  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
6018  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
6019  {
6020  (*it)->m_Allocations.push_back(allocInfo);
6021  }
6022  else
6023  {
6024  VMA_ASSERT(0);
6025  }
6026  }
6027  }
6028  m_Allocations.clear();
6029 
6030  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6031  {
6032  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
6033  pBlockInfo->CalcHasNonMovableAllocations();
6034  pBlockInfo->SortAllocationsBySizeDescecnding();
6035  }
6036 
6037  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
6038  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
6039 
6040  // Execute defragmentation rounds (the main part).
6041  VkResult result = VK_SUCCESS;
6042  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
6043  {
6044  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
6045  }
6046 
6047  // Unmap blocks that were mapped for defragmentation.
6048  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6049  {
6050  m_Blocks[blockIndex]->Unmap(m_hAllocator);
6051  }
6052 
6053  return result;
6054 }
6055 
6056 bool VmaDefragmentator::MoveMakesSense(
6057  size_t dstBlockIndex, VkDeviceSize dstOffset,
6058  size_t srcBlockIndex, VkDeviceSize srcOffset)
6059 {
6060  if(dstBlockIndex < srcBlockIndex)
6061  {
6062  return true;
6063  }
6064  if(dstBlockIndex > srcBlockIndex)
6065  {
6066  return false;
6067  }
6068  if(dstOffset < srcOffset)
6069  {
6070  return true;
6071  }
6072  return false;
6073 }
6074 
6076 // VmaAllocator_T
6077 
6078 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
6079  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
6080  m_PhysicalDevice(pCreateInfo->physicalDevice),
6081  m_hDevice(pCreateInfo->device),
6082  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
6083  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
6084  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
6085  m_UnmapPersistentlyMappedMemoryCounter(0),
6086  m_PreferredLargeHeapBlockSize(0),
6087  m_PreferredSmallHeapBlockSize(0),
6088  m_CurrentFrameIndex(0),
6089  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks()))
6090 {
6091  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
6092 
6093  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
6094  memset(&m_MemProps, 0, sizeof(m_MemProps));
6095  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
6096 
6097  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
6098  memset(&m_pOwnAllocations, 0, sizeof(m_pOwnAllocations));
6099 
6100  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
6101  {
6102  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
6103  }
6104 
6105  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
6106  {
6107  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
6108  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
6109  }
6110 
6111  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
6112 
6113  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
6114  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
6115 
6116  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
6117  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
6118  m_PreferredSmallHeapBlockSize = (pCreateInfo->preferredSmallHeapBlockSize != 0) ?
6119  pCreateInfo->preferredSmallHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE);
6120 
6121  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
6122  {
6123  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
6124  {
6125  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
6126  if(limit != VK_WHOLE_SIZE)
6127  {
6128  m_HeapSizeLimit[heapIndex] = limit;
6129  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
6130  {
6131  m_MemProps.memoryHeaps[heapIndex].size = limit;
6132  }
6133  }
6134  }
6135  }
6136 
6137  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6138  {
6139  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
6140 
6141  for(size_t blockVectorTypeIndex = 0; blockVectorTypeIndex < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorTypeIndex)
6142  {
6143  m_pBlockVectors[memTypeIndex][blockVectorTypeIndex] = vma_new(this, VmaBlockVector)(
6144  this,
6145  memTypeIndex,
6146  static_cast<VMA_BLOCK_VECTOR_TYPE>(blockVectorTypeIndex),
6147  preferredBlockSize,
6148  0,
6149  SIZE_MAX,
6150  GetBufferImageGranularity(),
6151  pCreateInfo->frameInUseCount,
6152  false); // isCustomPool
6153  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
6154  // becase minBlockCount is 0.
6155  m_pOwnAllocations[memTypeIndex][blockVectorTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
6156  }
6157  }
6158 }
6159 
6160 VmaAllocator_T::~VmaAllocator_T()
6161 {
6162  VMA_ASSERT(m_Pools.empty());
6163 
6164  for(size_t i = GetMemoryTypeCount(); i--; )
6165  {
6166  for(size_t j = VMA_BLOCK_VECTOR_TYPE_COUNT; j--; )
6167  {
6168  vma_delete(this, m_pOwnAllocations[i][j]);
6169  vma_delete(this, m_pBlockVectors[i][j]);
6170  }
6171  }
6172 }
6173 
6174 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
6175 {
6176 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
6177  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
6178  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
6179  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
6180  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
6181  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
6182  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
6183  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
6184  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
6185  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
6186  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
6187  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
6188  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
6189  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
6190  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
6191 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
6192 
6193  if(pVulkanFunctions != VMA_NULL)
6194  {
6195  m_VulkanFunctions = *pVulkanFunctions;
6196  }
6197 
6198  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
6199  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
6200  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
6201  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
6202  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
6203  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
6204  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
6205  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
6206  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
6207  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
6208  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
6209  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
6210  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
6211  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
6212  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
6213  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
6214 }
6215 
6216 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
6217 {
6218  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
6219  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
6220  return (heapSize <= VMA_SMALL_HEAP_MAX_SIZE) ?
6221  m_PreferredSmallHeapBlockSize : m_PreferredLargeHeapBlockSize;
6222 }
6223 
6224 VkResult VmaAllocator_T::AllocateMemoryOfType(
6225  const VkMemoryRequirements& vkMemReq,
6226  const VmaAllocationCreateInfo& createInfo,
6227  uint32_t memTypeIndex,
6228  VmaSuballocationType suballocType,
6229  VmaAllocation* pAllocation)
6230 {
6231  VMA_ASSERT(pAllocation != VMA_NULL);
6232  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
6233 
6234  uint32_t blockVectorType = VmaAllocationCreateFlagsToBlockVectorType(createInfo.flags);
6235  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
6236  VMA_ASSERT(blockVector);
6237 
6238  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
6239  // Heuristics: Allocate own memory if requested size if greater than half of preferred block size.
6240  const bool ownMemory =
6241  (createInfo.flags & VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT) != 0 ||
6242  VMA_DEBUG_ALWAYS_OWN_MEMORY ||
6243  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
6244  vkMemReq.size > preferredBlockSize / 2);
6245 
6246  if(ownMemory)
6247  {
6248  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
6249  {
6250  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6251  }
6252  else
6253  {
6254  return AllocateOwnMemory(
6255  vkMemReq.size,
6256  suballocType,
6257  memTypeIndex,
6258  (createInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0,
6259  createInfo.pUserData,
6260  pAllocation);
6261  }
6262  }
6263  else
6264  {
6265  VkResult res = blockVector->Allocate(
6266  VK_NULL_HANDLE, // hCurrentPool
6267  m_CurrentFrameIndex.load(),
6268  vkMemReq,
6269  createInfo,
6270  suballocType,
6271  pAllocation);
6272  if(res == VK_SUCCESS)
6273  {
6274  return res;
6275  }
6276 
6277  // 5. Try own memory.
6278  res = AllocateOwnMemory(
6279  vkMemReq.size,
6280  suballocType,
6281  memTypeIndex,
6282  (createInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0,
6283  createInfo.pUserData,
6284  pAllocation);
6285  if(res == VK_SUCCESS)
6286  {
6287  // Succeeded: AllocateOwnMemory function already filld pMemory, nothing more to do here.
6288  VMA_DEBUG_LOG(" Allocated as OwnMemory");
6289  return VK_SUCCESS;
6290  }
6291  else
6292  {
6293  // Everything failed: Return error code.
6294  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
6295  return res;
6296  }
6297  }
6298 }
6299 
6300 VkResult VmaAllocator_T::AllocateOwnMemory(
6301  VkDeviceSize size,
6302  VmaSuballocationType suballocType,
6303  uint32_t memTypeIndex,
6304  bool map,
6305  void* pUserData,
6306  VmaAllocation* pAllocation)
6307 {
6308  VMA_ASSERT(pAllocation);
6309 
6310  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
6311  allocInfo.memoryTypeIndex = memTypeIndex;
6312  allocInfo.allocationSize = size;
6313 
6314  // Allocate VkDeviceMemory.
6315  VkDeviceMemory hMemory = VK_NULL_HANDLE;
6316  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
6317  if(res < 0)
6318  {
6319  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
6320  return res;
6321  }
6322 
6323  void* pMappedData = nullptr;
6324  if(map)
6325  {
6326  if(m_UnmapPersistentlyMappedMemoryCounter == 0)
6327  {
6328  res = vkMapMemory(m_hDevice, hMemory, 0, VK_WHOLE_SIZE, 0, &pMappedData);
6329  if(res < 0)
6330  {
6331  VMA_DEBUG_LOG(" vkMapMemory FAILED");
6332  FreeVulkanMemory(memTypeIndex, size, hMemory);
6333  return res;
6334  }
6335  }
6336  }
6337 
6338  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load());
6339  (*pAllocation)->InitOwnAllocation(memTypeIndex, hMemory, suballocType, map, pMappedData, size, pUserData);
6340 
6341  // Register it in m_pOwnAllocations.
6342  {
6343  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
6344  AllocationVectorType* pOwnAllocations = m_pOwnAllocations[memTypeIndex][map ? VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED];
6345  VMA_ASSERT(pOwnAllocations);
6346  VmaVectorInsertSorted<VmaPointerLess>(*pOwnAllocations, *pAllocation);
6347  }
6348 
6349  VMA_DEBUG_LOG(" Allocated OwnMemory MemoryTypeIndex=#%u", memTypeIndex);
6350 
6351  return VK_SUCCESS;
6352 }
6353 
6354 VkResult VmaAllocator_T::AllocateMemory(
6355  const VkMemoryRequirements& vkMemReq,
6356  const VmaAllocationCreateInfo& createInfo,
6357  VmaSuballocationType suballocType,
6358  VmaAllocation* pAllocation)
6359 {
6360  if((createInfo.flags & VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT) != 0 &&
6361  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
6362  {
6363  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
6364  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6365  }
6366  if((createInfo.pool != VK_NULL_HANDLE) &&
6367  ((createInfo.flags & (VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT)) != 0))
6368  {
6369  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT when pool != null is invalid.");
6370  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6371  }
6372 
6373  if(createInfo.pool != VK_NULL_HANDLE)
6374  {
6375  return createInfo.pool->m_BlockVector.Allocate(
6376  createInfo.pool,
6377  m_CurrentFrameIndex.load(),
6378  vkMemReq,
6379  createInfo,
6380  suballocType,
6381  pAllocation);
6382  }
6383  else
6384  {
6385  // Bit mask of memory Vulkan types acceptable for this allocation.
6386  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
6387  uint32_t memTypeIndex = UINT32_MAX;
6388  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
6389  if(res == VK_SUCCESS)
6390  {
6391  res = AllocateMemoryOfType(vkMemReq, createInfo, memTypeIndex, suballocType, pAllocation);
6392  // Succeeded on first try.
6393  if(res == VK_SUCCESS)
6394  {
6395  return res;
6396  }
6397  // Allocation from this memory type failed. Try other compatible memory types.
6398  else
6399  {
6400  for(;;)
6401  {
6402  // Remove old memTypeIndex from list of possibilities.
6403  memoryTypeBits &= ~(1u << memTypeIndex);
6404  // Find alternative memTypeIndex.
6405  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
6406  if(res == VK_SUCCESS)
6407  {
6408  res = AllocateMemoryOfType(vkMemReq, createInfo, memTypeIndex, suballocType, pAllocation);
6409  // Allocation from this alternative memory type succeeded.
6410  if(res == VK_SUCCESS)
6411  {
6412  return res;
6413  }
6414  // else: Allocation from this memory type failed. Try next one - next loop iteration.
6415  }
6416  // No other matching memory type index could be found.
6417  else
6418  {
6419  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
6420  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6421  }
6422  }
6423  }
6424  }
6425  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
6426  else
6427  return res;
6428  }
6429 }
6430 
6431 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
6432 {
6433  VMA_ASSERT(allocation);
6434 
6435  if(allocation->CanBecomeLost() == false ||
6436  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
6437  {
6438  switch(allocation->GetType())
6439  {
6440  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
6441  {
6442  VmaBlockVector* pBlockVector = VMA_NULL;
6443  VmaPool hPool = allocation->GetPool();
6444  if(hPool != VK_NULL_HANDLE)
6445  {
6446  pBlockVector = &hPool->m_BlockVector;
6447  }
6448  else
6449  {
6450  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
6451  const VMA_BLOCK_VECTOR_TYPE blockVectorType = allocation->GetBlockVectorType();
6452  pBlockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
6453  }
6454  pBlockVector->Free(allocation);
6455  }
6456  break;
6457  case VmaAllocation_T::ALLOCATION_TYPE_OWN:
6458  FreeOwnMemory(allocation);
6459  break;
6460  default:
6461  VMA_ASSERT(0);
6462  }
6463  }
6464 
6465  vma_delete(this, allocation);
6466 }
6467 
6468 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
6469 {
6470  // Initialize.
6471  InitStatInfo(pStats->total);
6472  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
6473  InitStatInfo(pStats->memoryType[i]);
6474  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
6475  InitStatInfo(pStats->memoryHeap[i]);
6476 
6477  // Process default pools.
6478  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6479  {
6480  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
6481  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
6482  {
6483  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
6484  VMA_ASSERT(pBlockVector);
6485  pBlockVector->AddStats(pStats);
6486  }
6487  }
6488 
6489  // Process custom pools.
6490  {
6491  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6492  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
6493  {
6494  m_Pools[poolIndex]->GetBlockVector().AddStats(pStats);
6495  }
6496  }
6497 
6498  // Process own allocations.
6499  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6500  {
6501  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
6502  VmaMutexLock ownAllocationsLock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
6503  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
6504  {
6505  AllocationVectorType* const pOwnAllocVector = m_pOwnAllocations[memTypeIndex][blockVectorType];
6506  VMA_ASSERT(pOwnAllocVector);
6507  for(size_t allocIndex = 0, allocCount = pOwnAllocVector->size(); allocIndex < allocCount; ++allocIndex)
6508  {
6509  VmaStatInfo allocationStatInfo;
6510  (*pOwnAllocVector)[allocIndex]->OwnAllocCalcStatsInfo(allocationStatInfo);
6511  VmaAddStatInfo(pStats->total, allocationStatInfo);
6512  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
6513  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
6514  }
6515  }
6516  }
6517 
6518  // Postprocess.
6519  VmaPostprocessCalcStatInfo(pStats->total);
6520  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
6521  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
6522  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
6523  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
6524 }
6525 
6526 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
6527 
6528 void VmaAllocator_T::UnmapPersistentlyMappedMemory()
6529 {
6530  if(m_UnmapPersistentlyMappedMemoryCounter++ == 0)
6531  {
6532  if(m_PhysicalDeviceProperties.vendorID == VMA_VENDOR_ID_AMD)
6533  {
6534  for(uint32_t memTypeIndex = m_MemProps.memoryTypeCount; memTypeIndex--; )
6535  {
6536  const VkMemoryPropertyFlags memFlags = m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
6537  if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0 &&
6538  (memFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
6539  {
6540  // Process OwnAllocations.
6541  {
6542  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
6543  AllocationVectorType* pOwnAllocationsVector = m_pOwnAllocations[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
6544  for(size_t ownAllocIndex = pOwnAllocationsVector->size(); ownAllocIndex--; )
6545  {
6546  VmaAllocation hAlloc = (*pOwnAllocationsVector)[ownAllocIndex];
6547  hAlloc->OwnAllocUnmapPersistentlyMappedMemory(this);
6548  }
6549  }
6550 
6551  // Process normal Allocations.
6552  {
6553  VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
6554  pBlockVector->UnmapPersistentlyMappedMemory();
6555  }
6556  }
6557  }
6558 
6559  // Process custom pools.
6560  {
6561  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6562  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
6563  {
6564  m_Pools[poolIndex]->GetBlockVector().UnmapPersistentlyMappedMemory();
6565  }
6566  }
6567  }
6568  }
6569 }
6570 
6571 VkResult VmaAllocator_T::MapPersistentlyMappedMemory()
6572 {
6573  VMA_ASSERT(m_UnmapPersistentlyMappedMemoryCounter > 0);
6574  if(--m_UnmapPersistentlyMappedMemoryCounter == 0)
6575  {
6576  VkResult finalResult = VK_SUCCESS;
6577  if(m_PhysicalDeviceProperties.vendorID == VMA_VENDOR_ID_AMD)
6578  {
6579  // Process custom pools.
6580  {
6581  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6582  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
6583  {
6584  m_Pools[poolIndex]->GetBlockVector().MapPersistentlyMappedMemory();
6585  }
6586  }
6587 
6588  for(uint32_t memTypeIndex = 0; memTypeIndex < m_MemProps.memoryTypeCount; ++memTypeIndex)
6589  {
6590  const VkMemoryPropertyFlags memFlags = m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
6591  if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0 &&
6592  (memFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
6593  {
6594  // Process OwnAllocations.
6595  {
6596  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
6597  AllocationVectorType* pAllocationsVector = m_pOwnAllocations[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
6598  for(size_t ownAllocIndex = 0, ownAllocCount = pAllocationsVector->size(); ownAllocIndex < ownAllocCount; ++ownAllocIndex)
6599  {
6600  VmaAllocation hAlloc = (*pAllocationsVector)[ownAllocIndex];
6601  hAlloc->OwnAllocMapPersistentlyMappedMemory(this);
6602  }
6603  }
6604 
6605  // Process normal Allocations.
6606  {
6607  VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
6608  VkResult localResult = pBlockVector->MapPersistentlyMappedMemory();
6609  if(localResult != VK_SUCCESS)
6610  {
6611  finalResult = localResult;
6612  }
6613  }
6614  }
6615  }
6616  }
6617  return finalResult;
6618  }
6619  else
6620  return VK_SUCCESS;
6621 }
6622 
6623 VkResult VmaAllocator_T::Defragment(
6624  VmaAllocation* pAllocations,
6625  size_t allocationCount,
6626  VkBool32* pAllocationsChanged,
6627  const VmaDefragmentationInfo* pDefragmentationInfo,
6628  VmaDefragmentationStats* pDefragmentationStats)
6629 {
6630  if(pAllocationsChanged != VMA_NULL)
6631  {
6632  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
6633  }
6634  if(pDefragmentationStats != VMA_NULL)
6635  {
6636  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
6637  }
6638 
6639  if(m_UnmapPersistentlyMappedMemoryCounter > 0)
6640  {
6641  VMA_DEBUG_LOG("ERROR: Cannot defragment when inside vmaUnmapPersistentlyMappedMemory.");
6642  return VK_ERROR_MEMORY_MAP_FAILED;
6643  }
6644 
6645  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
6646 
6647  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
6648 
6649  const size_t poolCount = m_Pools.size();
6650 
6651  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
6652  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
6653  {
6654  VmaAllocation hAlloc = pAllocations[allocIndex];
6655  VMA_ASSERT(hAlloc);
6656  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
6657  // OwnAlloc cannot be defragmented.
6658  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
6659  // Only HOST_VISIBLE memory types can be defragmented.
6660  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
6661  // Lost allocation cannot be defragmented.
6662  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
6663  {
6664  VmaBlockVector* pAllocBlockVector = nullptr;
6665 
6666  const VmaPool hAllocPool = hAlloc->GetPool();
6667  // This allocation belongs to custom pool.
6668  if(hAllocPool != VK_NULL_HANDLE)
6669  {
6670  pAllocBlockVector = &hAllocPool->GetBlockVector();
6671  }
6672  // This allocation belongs to general pool.
6673  else
6674  {
6675  pAllocBlockVector = m_pBlockVectors[memTypeIndex][hAlloc->GetBlockVectorType()];
6676  }
6677 
6678  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
6679 
6680  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
6681  &pAllocationsChanged[allocIndex] : VMA_NULL;
6682  pDefragmentator->AddAllocation(hAlloc, pChanged);
6683  }
6684  }
6685 
6686  VkResult result = VK_SUCCESS;
6687 
6688  // ======== Main processing.
6689 
6690  VkDeviceSize maxBytesToMove = SIZE_MAX;
6691  uint32_t maxAllocationsToMove = UINT32_MAX;
6692  if(pDefragmentationInfo != VMA_NULL)
6693  {
6694  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
6695  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
6696  }
6697 
6698  // Process standard memory.
6699  for(uint32_t memTypeIndex = 0;
6700  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
6701  ++memTypeIndex)
6702  {
6703  // Only HOST_VISIBLE memory types can be defragmented.
6704  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
6705  {
6706  for(uint32_t blockVectorType = 0;
6707  (blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT) && (result == VK_SUCCESS);
6708  ++blockVectorType)
6709  {
6710  result = m_pBlockVectors[memTypeIndex][blockVectorType]->Defragment(
6711  pDefragmentationStats,
6712  maxBytesToMove,
6713  maxAllocationsToMove);
6714  }
6715  }
6716  }
6717 
6718  // Process custom pools.
6719  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
6720  {
6721  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
6722  pDefragmentationStats,
6723  maxBytesToMove,
6724  maxAllocationsToMove);
6725  }
6726 
6727  // ======== Destroy defragmentators.
6728 
6729  // Process custom pools.
6730  for(size_t poolIndex = poolCount; poolIndex--; )
6731  {
6732  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
6733  }
6734 
6735  // Process standard memory.
6736  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
6737  {
6738  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
6739  {
6740  for(size_t blockVectorType = VMA_BLOCK_VECTOR_TYPE_COUNT; blockVectorType--; )
6741  {
6742  m_pBlockVectors[memTypeIndex][blockVectorType]->DestroyDefragmentator();
6743  }
6744  }
6745  }
6746 
6747  return result;
6748 }
6749 
6750 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
6751 {
6752  if(hAllocation->CanBecomeLost())
6753  {
6754  /*
6755  Warning: This is a carefully designed algorithm.
6756  Do not modify unless you really know what you're doing :)
6757  */
6758  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
6759  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
6760  for(;;)
6761  {
6762  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
6763  {
6764  pAllocationInfo->memoryType = UINT32_MAX;
6765  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
6766  pAllocationInfo->offset = 0;
6767  pAllocationInfo->size = hAllocation->GetSize();
6768  pAllocationInfo->pMappedData = VMA_NULL;
6769  pAllocationInfo->pUserData = hAllocation->GetUserData();
6770  return;
6771  }
6772  else if(localLastUseFrameIndex == localCurrFrameIndex)
6773  {
6774  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
6775  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
6776  pAllocationInfo->offset = hAllocation->GetOffset();
6777  pAllocationInfo->size = hAllocation->GetSize();
6778  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
6779  pAllocationInfo->pUserData = hAllocation->GetUserData();
6780  return;
6781  }
6782  else // Last use time earlier than current time.
6783  {
6784  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
6785  {
6786  localLastUseFrameIndex = localCurrFrameIndex;
6787  }
6788  }
6789  }
6790  }
6791  // We could use the same code here, but for performance reasons we don't need to use the hAllocation.LastUseFrameIndex atomic.
6792  else
6793  {
6794  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
6795  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
6796  pAllocationInfo->offset = hAllocation->GetOffset();
6797  pAllocationInfo->size = hAllocation->GetSize();
6798  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
6799  pAllocationInfo->pUserData = hAllocation->GetUserData();
6800  }
6801 }
6802 
6803 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
6804 {
6805  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
6806 
6807  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
6808 
6809  if(newCreateInfo.maxBlockCount == 0)
6810  {
6811  newCreateInfo.maxBlockCount = SIZE_MAX;
6812  }
6813  if(newCreateInfo.blockSize == 0)
6814  {
6815  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
6816  }
6817 
6818  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
6819 
6820  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
6821  if(res != VK_SUCCESS)
6822  {
6823  vma_delete(this, *pPool);
6824  *pPool = VMA_NULL;
6825  return res;
6826  }
6827 
6828  // Add to m_Pools.
6829  {
6830  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6831  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
6832  }
6833 
6834  return VK_SUCCESS;
6835 }
6836 
6837 void VmaAllocator_T::DestroyPool(VmaPool pool)
6838 {
6839  // Remove from m_Pools.
6840  {
6841  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6842  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
6843  VMA_ASSERT(success && "Pool not found in Allocator.");
6844  }
6845 
6846  vma_delete(this, pool);
6847 }
6848 
6849 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
6850 {
6851  pool->m_BlockVector.GetPoolStats(pPoolStats);
6852 }
6853 
6854 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
6855 {
6856  m_CurrentFrameIndex.store(frameIndex);
6857 }
6858 
6859 void VmaAllocator_T::MakePoolAllocationsLost(
6860  VmaPool hPool,
6861  size_t* pLostAllocationCount)
6862 {
6863  hPool->m_BlockVector.MakePoolAllocationsLost(
6864  m_CurrentFrameIndex.load(),
6865  pLostAllocationCount);
6866 }
6867 
6868 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
6869 {
6870  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST);
6871  (*pAllocation)->InitLost();
6872 }
6873 
6874 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
6875 {
6876  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
6877 
6878  VkResult res;
6879  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
6880  {
6881  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
6882  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
6883  {
6884  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
6885  if(res == VK_SUCCESS)
6886  {
6887  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
6888  }
6889  }
6890  else
6891  {
6892  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
6893  }
6894  }
6895  else
6896  {
6897  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
6898  }
6899 
6900  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
6901  {
6902  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
6903  }
6904 
6905  return res;
6906 }
6907 
6908 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
6909 {
6910  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
6911  {
6912  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
6913  }
6914 
6915  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
6916 
6917  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
6918  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
6919  {
6920  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
6921  m_HeapSizeLimit[heapIndex] += size;
6922  }
6923 }
6924 
6925 void VmaAllocator_T::FreeOwnMemory(VmaAllocation allocation)
6926 {
6927  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_OWN);
6928 
6929  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
6930  {
6931  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
6932  AllocationVectorType* const pOwnAllocations = m_pOwnAllocations[memTypeIndex][allocation->GetBlockVectorType()];
6933  VMA_ASSERT(pOwnAllocations);
6934  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pOwnAllocations, allocation);
6935  VMA_ASSERT(success);
6936  }
6937 
6938  VkDeviceMemory hMemory = allocation->GetMemory();
6939 
6940  if(allocation->GetMappedData() != VMA_NULL)
6941  {
6942  vkUnmapMemory(m_hDevice, hMemory);
6943  }
6944 
6945  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
6946 
6947  VMA_DEBUG_LOG(" Freed OwnMemory MemoryTypeIndex=%u", memTypeIndex);
6948 }
6949 
6950 #if VMA_STATS_STRING_ENABLED
6951 
6952 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
6953 {
6954  bool ownAllocationsStarted = false;
6955  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6956  {
6957  VmaMutexLock ownAllocationsLock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
6958  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
6959  {
6960  AllocationVectorType* const pOwnAllocVector = m_pOwnAllocations[memTypeIndex][blockVectorType];
6961  VMA_ASSERT(pOwnAllocVector);
6962  if(pOwnAllocVector->empty() == false)
6963  {
6964  if(ownAllocationsStarted == false)
6965  {
6966  ownAllocationsStarted = true;
6967  json.WriteString("OwnAllocations");
6968  json.BeginObject();
6969  }
6970 
6971  json.BeginString("Type ");
6972  json.ContinueString(memTypeIndex);
6973  if(blockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
6974  {
6975  json.ContinueString(" Mapped");
6976  }
6977  json.EndString();
6978 
6979  json.BeginArray();
6980 
6981  for(size_t i = 0; i < pOwnAllocVector->size(); ++i)
6982  {
6983  const VmaAllocation hAlloc = (*pOwnAllocVector)[i];
6984  json.BeginObject(true);
6985 
6986  json.WriteString("Size");
6987  json.WriteNumber(hAlloc->GetSize());
6988 
6989  json.WriteString("Type");
6990  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[hAlloc->GetSuballocationType()]);
6991 
6992  json.EndObject();
6993  }
6994 
6995  json.EndArray();
6996  }
6997  }
6998  }
6999  if(ownAllocationsStarted)
7000  {
7001  json.EndObject();
7002  }
7003 
7004  {
7005  bool allocationsStarted = false;
7006  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7007  {
7008  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
7009  {
7010  if(m_pBlockVectors[memTypeIndex][blockVectorType]->IsEmpty() == false)
7011  {
7012  if(allocationsStarted == false)
7013  {
7014  allocationsStarted = true;
7015  json.WriteString("DefaultPools");
7016  json.BeginObject();
7017  }
7018 
7019  json.BeginString("Type ");
7020  json.ContinueString(memTypeIndex);
7021  if(blockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
7022  {
7023  json.ContinueString(" Mapped");
7024  }
7025  json.EndString();
7026 
7027  m_pBlockVectors[memTypeIndex][blockVectorType]->PrintDetailedMap(json);
7028  }
7029  }
7030  }
7031  if(allocationsStarted)
7032  {
7033  json.EndObject();
7034  }
7035  }
7036 
7037  {
7038  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7039  const size_t poolCount = m_Pools.size();
7040  if(poolCount > 0)
7041  {
7042  json.WriteString("Pools");
7043  json.BeginArray();
7044  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
7045  {
7046  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
7047  }
7048  json.EndArray();
7049  }
7050  }
7051 }
7052 
7053 #endif // #if VMA_STATS_STRING_ENABLED
7054 
7055 static VkResult AllocateMemoryForImage(
7056  VmaAllocator allocator,
7057  VkImage image,
7058  const VmaAllocationCreateInfo* pAllocationCreateInfo,
7059  VmaSuballocationType suballocType,
7060  VmaAllocation* pAllocation)
7061 {
7062  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
7063 
7064  VkMemoryRequirements vkMemReq = {};
7065  (*allocator->GetVulkanFunctions().vkGetImageMemoryRequirements)(allocator->m_hDevice, image, &vkMemReq);
7066 
7067  return allocator->AllocateMemory(
7068  vkMemReq,
7069  *pAllocationCreateInfo,
7070  suballocType,
7071  pAllocation);
7072 }
7073 
7075 // Public interface
7076 
7077 VkResult vmaCreateAllocator(
7078  const VmaAllocatorCreateInfo* pCreateInfo,
7079  VmaAllocator* pAllocator)
7080 {
7081  VMA_ASSERT(pCreateInfo && pAllocator);
7082  VMA_DEBUG_LOG("vmaCreateAllocator");
7083  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
7084  return VK_SUCCESS;
7085 }
7086 
7087 void vmaDestroyAllocator(
7088  VmaAllocator allocator)
7089 {
7090  if(allocator != VK_NULL_HANDLE)
7091  {
7092  VMA_DEBUG_LOG("vmaDestroyAllocator");
7093  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
7094  vma_delete(&allocationCallbacks, allocator);
7095  }
7096 }
7097 
7099  VmaAllocator allocator,
7100  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
7101 {
7102  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
7103  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
7104 }
7105 
7107  VmaAllocator allocator,
7108  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
7109 {
7110  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
7111  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
7112 }
7113 
7115  VmaAllocator allocator,
7116  uint32_t memoryTypeIndex,
7117  VkMemoryPropertyFlags* pFlags)
7118 {
7119  VMA_ASSERT(allocator && pFlags);
7120  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
7121  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
7122 }
7123 
7125  VmaAllocator allocator,
7126  uint32_t frameIndex)
7127 {
7128  VMA_ASSERT(allocator);
7129  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
7130 
7131  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7132 
7133  allocator->SetCurrentFrameIndex(frameIndex);
7134 }
7135 
7136 void vmaCalculateStats(
7137  VmaAllocator allocator,
7138  VmaStats* pStats)
7139 {
7140  VMA_ASSERT(allocator && pStats);
7141  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7142  allocator->CalculateStats(pStats);
7143 }
7144 
7145 #if VMA_STATS_STRING_ENABLED
7146 
7147 void vmaBuildStatsString(
7148  VmaAllocator allocator,
7149  char** ppStatsString,
7150  VkBool32 detailedMap)
7151 {
7152  VMA_ASSERT(allocator && ppStatsString);
7153  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7154 
7155  VmaStringBuilder sb(allocator);
7156  {
7157  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
7158  json.BeginObject();
7159 
7160  VmaStats stats;
7161  allocator->CalculateStats(&stats);
7162 
7163  json.WriteString("Total");
7164  VmaPrintStatInfo(json, stats.total);
7165 
7166  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
7167  {
7168  json.BeginString("Heap ");
7169  json.ContinueString(heapIndex);
7170  json.EndString();
7171  json.BeginObject();
7172 
7173  json.WriteString("Size");
7174  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
7175 
7176  json.WriteString("Flags");
7177  json.BeginArray(true);
7178  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
7179  {
7180  json.WriteString("DEVICE_LOCAL");
7181  }
7182  json.EndArray();
7183 
7184  if(stats.memoryHeap[heapIndex].blockCount > 0)
7185  {
7186  json.WriteString("Stats");
7187  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
7188  }
7189 
7190  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
7191  {
7192  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
7193  {
7194  json.BeginString("Type ");
7195  json.ContinueString(typeIndex);
7196  json.EndString();
7197 
7198  json.BeginObject();
7199 
7200  json.WriteString("Flags");
7201  json.BeginArray(true);
7202  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
7203  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
7204  {
7205  json.WriteString("DEVICE_LOCAL");
7206  }
7207  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7208  {
7209  json.WriteString("HOST_VISIBLE");
7210  }
7211  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
7212  {
7213  json.WriteString("HOST_COHERENT");
7214  }
7215  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
7216  {
7217  json.WriteString("HOST_CACHED");
7218  }
7219  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
7220  {
7221  json.WriteString("LAZILY_ALLOCATED");
7222  }
7223  json.EndArray();
7224 
7225  if(stats.memoryType[typeIndex].blockCount > 0)
7226  {
7227  json.WriteString("Stats");
7228  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
7229  }
7230 
7231  json.EndObject();
7232  }
7233  }
7234 
7235  json.EndObject();
7236  }
7237  if(detailedMap == VK_TRUE)
7238  {
7239  allocator->PrintDetailedMap(json);
7240  }
7241 
7242  json.EndObject();
7243  }
7244 
7245  const size_t len = sb.GetLength();
7246  char* const pChars = vma_new_array(allocator, char, len + 1);
7247  if(len > 0)
7248  {
7249  memcpy(pChars, sb.GetData(), len);
7250  }
7251  pChars[len] = '\0';
7252  *ppStatsString = pChars;
7253 }
7254 
7255 void vmaFreeStatsString(
7256  VmaAllocator allocator,
7257  char* pStatsString)
7258 {
7259  if(pStatsString != VMA_NULL)
7260  {
7261  VMA_ASSERT(allocator);
7262  size_t len = strlen(pStatsString);
7263  vma_delete_array(allocator, pStatsString, len + 1);
7264  }
7265 }
7266 
7267 #endif // #if VMA_STATS_STRING_ENABLED
7268 
7271 VkResult vmaFindMemoryTypeIndex(
7272  VmaAllocator allocator,
7273  uint32_t memoryTypeBits,
7274  const VmaAllocationCreateInfo* pAllocationCreateInfo,
7275  uint32_t* pMemoryTypeIndex)
7276 {
7277  VMA_ASSERT(allocator != VK_NULL_HANDLE);
7278  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
7279  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
7280 
7281  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
7282  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
7283  if(preferredFlags == 0)
7284  {
7285  preferredFlags = requiredFlags;
7286  }
7287  // preferredFlags, if not 0, must be a superset of requiredFlags.
7288  VMA_ASSERT((requiredFlags & ~preferredFlags) == 0);
7289 
7290  // Convert usage to requiredFlags and preferredFlags.
7291  switch(pAllocationCreateInfo->usage)
7292  {
7294  break;
7296  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
7297  break;
7299  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
7300  break;
7302  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
7303  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
7304  break;
7306  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
7307  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
7308  break;
7309  default:
7310  break;
7311  }
7312 
7313  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0)
7314  {
7315  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
7316  }
7317 
7318  *pMemoryTypeIndex = UINT32_MAX;
7319  uint32_t minCost = UINT32_MAX;
7320  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
7321  memTypeIndex < allocator->GetMemoryTypeCount();
7322  ++memTypeIndex, memTypeBit <<= 1)
7323  {
7324  // This memory type is acceptable according to memoryTypeBits bitmask.
7325  if((memTypeBit & memoryTypeBits) != 0)
7326  {
7327  const VkMemoryPropertyFlags currFlags =
7328  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
7329  // This memory type contains requiredFlags.
7330  if((requiredFlags & ~currFlags) == 0)
7331  {
7332  // Calculate cost as number of bits from preferredFlags not present in this memory type.
7333  uint32_t currCost = CountBitsSet(preferredFlags & ~currFlags);
7334  // Remember memory type with lowest cost.
7335  if(currCost < minCost)
7336  {
7337  *pMemoryTypeIndex = memTypeIndex;
7338  if(currCost == 0)
7339  {
7340  return VK_SUCCESS;
7341  }
7342  minCost = currCost;
7343  }
7344  }
7345  }
7346  }
7347  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
7348 }
7349 
7350 VkResult vmaCreatePool(
7351  VmaAllocator allocator,
7352  const VmaPoolCreateInfo* pCreateInfo,
7353  VmaPool* pPool)
7354 {
7355  VMA_ASSERT(allocator && pCreateInfo && pPool);
7356 
7357  VMA_DEBUG_LOG("vmaCreatePool");
7358 
7359  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7360 
7361  return allocator->CreatePool(pCreateInfo, pPool);
7362 }
7363 
7364 void vmaDestroyPool(
7365  VmaAllocator allocator,
7366  VmaPool pool)
7367 {
7368  VMA_ASSERT(allocator && pool);
7369 
7370  VMA_DEBUG_LOG("vmaDestroyPool");
7371 
7372  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7373 
7374  allocator->DestroyPool(pool);
7375 }
7376 
7377 void vmaGetPoolStats(
7378  VmaAllocator allocator,
7379  VmaPool pool,
7380  VmaPoolStats* pPoolStats)
7381 {
7382  VMA_ASSERT(allocator && pool && pPoolStats);
7383 
7384  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7385 
7386  allocator->GetPoolStats(pool, pPoolStats);
7387 }
7388 
7390  VmaAllocator allocator,
7391  VmaPool pool,
7392  size_t* pLostAllocationCount)
7393 {
7394  VMA_ASSERT(allocator && pool);
7395 
7396  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7397 
7398  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
7399 }
7400 
7401 VkResult vmaAllocateMemory(
7402  VmaAllocator allocator,
7403  const VkMemoryRequirements* pVkMemoryRequirements,
7404  const VmaAllocationCreateInfo* pCreateInfo,
7405  VmaAllocation* pAllocation,
7406  VmaAllocationInfo* pAllocationInfo)
7407 {
7408  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
7409 
7410  VMA_DEBUG_LOG("vmaAllocateMemory");
7411 
7412  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7413 
7414  VkResult result = allocator->AllocateMemory(
7415  *pVkMemoryRequirements,
7416  *pCreateInfo,
7417  VMA_SUBALLOCATION_TYPE_UNKNOWN,
7418  pAllocation);
7419 
7420  if(pAllocationInfo && result == VK_SUCCESS)
7421  {
7422  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7423  }
7424 
7425  return result;
7426 }
7427 
7429  VmaAllocator allocator,
7430  VkBuffer buffer,
7431  const VmaAllocationCreateInfo* pCreateInfo,
7432  VmaAllocation* pAllocation,
7433  VmaAllocationInfo* pAllocationInfo)
7434 {
7435  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
7436 
7437  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
7438 
7439  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7440 
7441  VkMemoryRequirements vkMemReq = {};
7442  (*allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements)(allocator->m_hDevice, buffer, &vkMemReq);
7443 
7444  VkResult result = allocator->AllocateMemory(
7445  vkMemReq,
7446  *pCreateInfo,
7447  VMA_SUBALLOCATION_TYPE_BUFFER,
7448  pAllocation);
7449 
7450  if(pAllocationInfo && result == VK_SUCCESS)
7451  {
7452  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7453  }
7454 
7455  return result;
7456 }
7457 
7458 VkResult vmaAllocateMemoryForImage(
7459  VmaAllocator allocator,
7460  VkImage image,
7461  const VmaAllocationCreateInfo* pCreateInfo,
7462  VmaAllocation* pAllocation,
7463  VmaAllocationInfo* pAllocationInfo)
7464 {
7465  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
7466 
7467  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
7468 
7469  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7470 
7471  VkResult result = AllocateMemoryForImage(
7472  allocator,
7473  image,
7474  pCreateInfo,
7475  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
7476  pAllocation);
7477 
7478  if(pAllocationInfo && result == VK_SUCCESS)
7479  {
7480  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7481  }
7482 
7483  return result;
7484 }
7485 
7486 void vmaFreeMemory(
7487  VmaAllocator allocator,
7488  VmaAllocation allocation)
7489 {
7490  VMA_ASSERT(allocator && allocation);
7491 
7492  VMA_DEBUG_LOG("vmaFreeMemory");
7493 
7494  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7495 
7496  allocator->FreeMemory(allocation);
7497 }
7498 
7500  VmaAllocator allocator,
7501  VmaAllocation allocation,
7502  VmaAllocationInfo* pAllocationInfo)
7503 {
7504  VMA_ASSERT(allocator && allocation && pAllocationInfo);
7505 
7506  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7507 
7508  allocator->GetAllocationInfo(allocation, pAllocationInfo);
7509 }
7510 
7512  VmaAllocator allocator,
7513  VmaAllocation allocation,
7514  void* pUserData)
7515 {
7516  VMA_ASSERT(allocator && allocation);
7517 
7518  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7519 
7520  allocation->SetUserData(pUserData);
7521 }
7522 
7524  VmaAllocator allocator,
7525  VmaAllocation* pAllocation)
7526 {
7527  VMA_ASSERT(allocator && pAllocation);
7528 
7529  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
7530 
7531  allocator->CreateLostAllocation(pAllocation);
7532 }
7533 
7534 VkResult vmaMapMemory(
7535  VmaAllocator allocator,
7536  VmaAllocation allocation,
7537  void** ppData)
7538 {
7539  VMA_ASSERT(allocator && allocation && ppData);
7540 
7541  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7542 
7543  return vkMapMemory(allocator->m_hDevice, allocation->GetMemory(),
7544  allocation->GetOffset(), allocation->GetSize(), 0, ppData);
7545 }
7546 
7547 void vmaUnmapMemory(
7548  VmaAllocator allocator,
7549  VmaAllocation allocation)
7550 {
7551  VMA_ASSERT(allocator && allocation);
7552 
7553  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7554 
7555  vkUnmapMemory(allocator->m_hDevice, allocation->GetMemory());
7556 }
7557 
7558 void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator)
7559 {
7560  VMA_ASSERT(allocator);
7561 
7562  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7563 
7564  allocator->UnmapPersistentlyMappedMemory();
7565 }
7566 
7567 VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator)
7568 {
7569  VMA_ASSERT(allocator);
7570 
7571  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7572 
7573  return allocator->MapPersistentlyMappedMemory();
7574 }
7575 
7576 VkResult vmaDefragment(
7577  VmaAllocator allocator,
7578  VmaAllocation* pAllocations,
7579  size_t allocationCount,
7580  VkBool32* pAllocationsChanged,
7581  const VmaDefragmentationInfo *pDefragmentationInfo,
7582  VmaDefragmentationStats* pDefragmentationStats)
7583 {
7584  VMA_ASSERT(allocator && pAllocations);
7585 
7586  VMA_DEBUG_LOG("vmaDefragment");
7587 
7588  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7589 
7590  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
7591 }
7592 
7593 VkResult vmaCreateBuffer(
7594  VmaAllocator allocator,
7595  const VkBufferCreateInfo* pBufferCreateInfo,
7596  const VmaAllocationCreateInfo* pAllocationCreateInfo,
7597  VkBuffer* pBuffer,
7598  VmaAllocation* pAllocation,
7599  VmaAllocationInfo* pAllocationInfo)
7600 {
7601  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
7602 
7603  VMA_DEBUG_LOG("vmaCreateBuffer");
7604 
7605  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7606 
7607  *pBuffer = VK_NULL_HANDLE;
7608  *pAllocation = VK_NULL_HANDLE;
7609 
7610  // 1. Create VkBuffer.
7611  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
7612  allocator->m_hDevice,
7613  pBufferCreateInfo,
7614  allocator->GetAllocationCallbacks(),
7615  pBuffer);
7616  if(res >= 0)
7617  {
7618  // 2. vkGetBufferMemoryRequirements.
7619  VkMemoryRequirements vkMemReq = {};
7620  (*allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements)(allocator->m_hDevice, *pBuffer, &vkMemReq);
7621 
7622  // 3. Allocate memory using allocator.
7623  res = allocator->AllocateMemory(
7624  vkMemReq,
7625  *pAllocationCreateInfo,
7626  VMA_SUBALLOCATION_TYPE_BUFFER,
7627  pAllocation);
7628  if(res >= 0)
7629  {
7630  // 3. Bind buffer with memory.
7631  res = (*allocator->GetVulkanFunctions().vkBindBufferMemory)(
7632  allocator->m_hDevice,
7633  *pBuffer,
7634  (*pAllocation)->GetMemory(),
7635  (*pAllocation)->GetOffset());
7636  if(res >= 0)
7637  {
7638  // All steps succeeded.
7639  if(pAllocationInfo != VMA_NULL)
7640  {
7641  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7642  }
7643  return VK_SUCCESS;
7644  }
7645  allocator->FreeMemory(*pAllocation);
7646  *pAllocation = VK_NULL_HANDLE;
7647  return res;
7648  }
7649  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
7650  *pBuffer = VK_NULL_HANDLE;
7651  return res;
7652  }
7653  return res;
7654 }
7655 
7656 void vmaDestroyBuffer(
7657  VmaAllocator allocator,
7658  VkBuffer buffer,
7659  VmaAllocation allocation)
7660 {
7661  if(buffer != VK_NULL_HANDLE)
7662  {
7663  VMA_ASSERT(allocator);
7664 
7665  VMA_DEBUG_LOG("vmaDestroyBuffer");
7666 
7667  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7668 
7669  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
7670 
7671  allocator->FreeMemory(allocation);
7672  }
7673 }
7674 
7675 VkResult vmaCreateImage(
7676  VmaAllocator allocator,
7677  const VkImageCreateInfo* pImageCreateInfo,
7678  const VmaAllocationCreateInfo* pAllocationCreateInfo,
7679  VkImage* pImage,
7680  VmaAllocation* pAllocation,
7681  VmaAllocationInfo* pAllocationInfo)
7682 {
7683  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
7684 
7685  VMA_DEBUG_LOG("vmaCreateImage");
7686 
7687  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7688 
7689  *pImage = VK_NULL_HANDLE;
7690  *pAllocation = VK_NULL_HANDLE;
7691 
7692  // 1. Create VkImage.
7693  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
7694  allocator->m_hDevice,
7695  pImageCreateInfo,
7696  allocator->GetAllocationCallbacks(),
7697  pImage);
7698  if(res >= 0)
7699  {
7700  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
7701  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
7702  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
7703 
7704  // 2. Allocate memory using allocator.
7705  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
7706  if(res >= 0)
7707  {
7708  // 3. Bind image with memory.
7709  res = (*allocator->GetVulkanFunctions().vkBindImageMemory)(
7710  allocator->m_hDevice,
7711  *pImage,
7712  (*pAllocation)->GetMemory(),
7713  (*pAllocation)->GetOffset());
7714  if(res >= 0)
7715  {
7716  // All steps succeeded.
7717  if(pAllocationInfo != VMA_NULL)
7718  {
7719  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7720  }
7721  return VK_SUCCESS;
7722  }
7723  allocator->FreeMemory(*pAllocation);
7724  *pAllocation = VK_NULL_HANDLE;
7725  return res;
7726  }
7727  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
7728  *pImage = VK_NULL_HANDLE;
7729  return res;
7730  }
7731  return res;
7732 }
7733 
7734 void vmaDestroyImage(
7735  VmaAllocator allocator,
7736  VkImage image,
7737  VmaAllocation allocation)
7738 {
7739  if(image != VK_NULL_HANDLE)
7740  {
7741  VMA_ASSERT(allocator);
7742 
7743  VMA_DEBUG_LOG("vmaDestroyImage");
7744 
7745  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7746 
7747  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
7748 
7749  allocator->FreeMemory(allocation);
7750  }
7751 }
7752 
7753 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:440
+Go to the documentation of this file.
1 //
2 // Copyright (c) 2017 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
387 #include <vulkan/vulkan.h>
388 
390 
394 VK_DEFINE_HANDLE(VmaAllocator)
395 
396 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
398  VmaAllocator allocator,
399  uint32_t memoryType,
400  VkDeviceMemory memory,
401  VkDeviceSize size);
403 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
404  VmaAllocator allocator,
405  uint32_t memoryType,
406  VkDeviceMemory memory,
407  VkDeviceSize size);
408 
416 typedef struct VmaDeviceMemoryCallbacks {
422 
424 typedef enum VmaAllocatorFlagBits {
430 
433 typedef VkFlags VmaAllocatorFlags;
434 
439 typedef struct VmaVulkanFunctions {
440  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
441  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
442  PFN_vkAllocateMemory vkAllocateMemory;
443  PFN_vkFreeMemory vkFreeMemory;
444  PFN_vkMapMemory vkMapMemory;
445  PFN_vkUnmapMemory vkUnmapMemory;
446  PFN_vkBindBufferMemory vkBindBufferMemory;
447  PFN_vkBindImageMemory vkBindImageMemory;
448  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
449  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
450  PFN_vkCreateBuffer vkCreateBuffer;
451  PFN_vkDestroyBuffer vkDestroyBuffer;
452  PFN_vkCreateImage vkCreateImage;
453  PFN_vkDestroyImage vkDestroyImage;
455 
458 {
462 
463  VkPhysicalDevice physicalDevice;
465 
466  VkDevice device;
468 
471 
474 
475  const VkAllocationCallbacks* pAllocationCallbacks;
477 
492  uint32_t frameInUseCount;
510  const VkDeviceSize* pHeapSizeLimit;
524 
526 VkResult vmaCreateAllocator(
527  const VmaAllocatorCreateInfo* pCreateInfo,
528  VmaAllocator* pAllocator);
529 
532  VmaAllocator allocator);
533 
539  VmaAllocator allocator,
540  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
541 
547  VmaAllocator allocator,
548  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
549 
557  VmaAllocator allocator,
558  uint32_t memoryTypeIndex,
559  VkMemoryPropertyFlags* pFlags);
560 
570  VmaAllocator allocator,
571  uint32_t frameIndex);
572 
575 typedef struct VmaStatInfo
576 {
578  uint32_t blockCount;
580  uint32_t allocationCount;
584  VkDeviceSize usedBytes;
586  VkDeviceSize unusedBytes;
587  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
588  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
589 } VmaStatInfo;
590 
592 typedef struct VmaStats
593 {
594  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
595  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
597 } VmaStats;
598 
600 void vmaCalculateStats(
601  VmaAllocator allocator,
602  VmaStats* pStats);
603 
604 #define VMA_STATS_STRING_ENABLED 1
605 
606 #if VMA_STATS_STRING_ENABLED
607 
609 
612  VmaAllocator allocator,
613  char** ppStatsString,
614  VkBool32 detailedMap);
615 
616 void vmaFreeStatsString(
617  VmaAllocator allocator,
618  char* pStatsString);
619 
620 #endif // #if VMA_STATS_STRING_ENABLED
621 
624 
629 VK_DEFINE_HANDLE(VmaPool)
630 
631 typedef enum VmaMemoryUsage
632 {
638 
641 
644 
648 
663 
702 
705 typedef VkFlags VmaAllocationCreateFlags;
706 
708 {
721  VkMemoryPropertyFlags requiredFlags;
727  VkMemoryPropertyFlags preferredFlags;
729  void* pUserData;
734  VmaPool pool;
736 
751 VkResult vmaFindMemoryTypeIndex(
752  VmaAllocator allocator,
753  uint32_t memoryTypeBits,
754  const VmaAllocationCreateInfo* pAllocationCreateInfo,
755  uint32_t* pMemoryTypeIndex);
756 
759 
764 typedef enum VmaPoolCreateFlagBits {
793 
796 typedef VkFlags VmaPoolCreateFlags;
797 
800 typedef struct VmaPoolCreateInfo {
803  uint32_t memoryTypeIndex;
811  VkDeviceSize blockSize;
838  uint32_t frameInUseCount;
840 
843 typedef struct VmaPoolStats {
846  VkDeviceSize size;
849  VkDeviceSize unusedSize;
862  VkDeviceSize unusedRangeSizeMax;
863 } VmaPoolStats;
864 
871 VkResult vmaCreatePool(
872  VmaAllocator allocator,
873  const VmaPoolCreateInfo* pCreateInfo,
874  VmaPool* pPool);
875 
878 void vmaDestroyPool(
879  VmaAllocator allocator,
880  VmaPool pool);
881 
888 void vmaGetPoolStats(
889  VmaAllocator allocator,
890  VmaPool pool,
891  VmaPoolStats* pPoolStats);
892 
900  VmaAllocator allocator,
901  VmaPool pool,
902  size_t* pLostAllocationCount);
903 
904 VK_DEFINE_HANDLE(VmaAllocation)
905 
906 
908 typedef struct VmaAllocationInfo {
913  uint32_t memoryType;
922  VkDeviceMemory deviceMemory;
927  VkDeviceSize offset;
932  VkDeviceSize size;
938  void* pMappedData;
943  void* pUserData;
945 
956 VkResult vmaAllocateMemory(
957  VmaAllocator allocator,
958  const VkMemoryRequirements* pVkMemoryRequirements,
959  const VmaAllocationCreateInfo* pCreateInfo,
960  VmaAllocation* pAllocation,
961  VmaAllocationInfo* pAllocationInfo);
962 
970  VmaAllocator allocator,
971  VkBuffer buffer,
972  const VmaAllocationCreateInfo* pCreateInfo,
973  VmaAllocation* pAllocation,
974  VmaAllocationInfo* pAllocationInfo);
975 
978  VmaAllocator allocator,
979  VkImage image,
980  const VmaAllocationCreateInfo* pCreateInfo,
981  VmaAllocation* pAllocation,
982  VmaAllocationInfo* pAllocationInfo);
983 
985 void vmaFreeMemory(
986  VmaAllocator allocator,
987  VmaAllocation allocation);
988 
991  VmaAllocator allocator,
992  VmaAllocation allocation,
993  VmaAllocationInfo* pAllocationInfo);
994 
997  VmaAllocator allocator,
998  VmaAllocation allocation,
999  void* pUserData);
1000 
1012  VmaAllocator allocator,
1013  VmaAllocation* pAllocation);
1014 
1023 VkResult vmaMapMemory(
1024  VmaAllocator allocator,
1025  VmaAllocation allocation,
1026  void** ppData);
1027 
1028 void vmaUnmapMemory(
1029  VmaAllocator allocator,
1030  VmaAllocation allocation);
1031 
1053 void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator);
1054 
1062 VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator);
1063 
1065 typedef struct VmaDefragmentationInfo {
1070  VkDeviceSize maxBytesToMove;
1077 
1079 typedef struct VmaDefragmentationStats {
1081  VkDeviceSize bytesMoved;
1083  VkDeviceSize bytesFreed;
1089 
1160 VkResult vmaDefragment(
1161  VmaAllocator allocator,
1162  VmaAllocation* pAllocations,
1163  size_t allocationCount,
1164  VkBool32* pAllocationsChanged,
1165  const VmaDefragmentationInfo *pDefragmentationInfo,
1166  VmaDefragmentationStats* pDefragmentationStats);
1167 
1170 
1193 VkResult vmaCreateBuffer(
1194  VmaAllocator allocator,
1195  const VkBufferCreateInfo* pBufferCreateInfo,
1196  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1197  VkBuffer* pBuffer,
1198  VmaAllocation* pAllocation,
1199  VmaAllocationInfo* pAllocationInfo);
1200 
1209 void vmaDestroyBuffer(
1210  VmaAllocator allocator,
1211  VkBuffer buffer,
1212  VmaAllocation allocation);
1213 
1215 VkResult vmaCreateImage(
1216  VmaAllocator allocator,
1217  const VkImageCreateInfo* pImageCreateInfo,
1218  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1219  VkImage* pImage,
1220  VmaAllocation* pAllocation,
1221  VmaAllocationInfo* pAllocationInfo);
1222 
1231 void vmaDestroyImage(
1232  VmaAllocator allocator,
1233  VkImage image,
1234  VmaAllocation allocation);
1235 
1238 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
1239 
1240 // For Visual Studio IntelliSense.
1241 #ifdef __INTELLISENSE__
1242 #define VMA_IMPLEMENTATION
1243 #endif
1244 
1245 #ifdef VMA_IMPLEMENTATION
1246 #undef VMA_IMPLEMENTATION
1247 
1248 #include <cstdint>
1249 #include <cstdlib>
1250 #include <cstring>
1251 
1252 /*******************************************************************************
1253 CONFIGURATION SECTION
1254 
1255 Define some of these macros before each #include of this header or change them
1256 here if you need other then default behavior depending on your environment.
1257 */
1258 
1259 /*
1260 Define this macro to 1 to make the library fetch pointers to Vulkan functions
1261 internally, like:
1262 
1263  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
1264 
1265 Define to 0 if you are going to provide you own pointers to Vulkan functions via
1266 VmaAllocatorCreateInfo::pVulkanFunctions.
1267 */
1268 #ifndef VMA_STATIC_VULKAN_FUNCTIONS
1269 #define VMA_STATIC_VULKAN_FUNCTIONS 1
1270 #endif
1271 
1272 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
1273 //#define VMA_USE_STL_CONTAINERS 1
1274 
1275 /* Set this macro to 1 to make the library including and using STL containers:
1276 std::pair, std::vector, std::list, std::unordered_map.
1277 
1278 Set it to 0 or undefined to make the library using its own implementation of
1279 the containers.
1280 */
1281 #if VMA_USE_STL_CONTAINERS
1282  #define VMA_USE_STL_VECTOR 1
1283  #define VMA_USE_STL_UNORDERED_MAP 1
1284  #define VMA_USE_STL_LIST 1
1285 #endif
1286 
1287 #if VMA_USE_STL_VECTOR
1288  #include <vector>
1289 #endif
1290 
1291 #if VMA_USE_STL_UNORDERED_MAP
1292  #include <unordered_map>
1293 #endif
1294 
1295 #if VMA_USE_STL_LIST
1296  #include <list>
1297 #endif
1298 
1299 /*
1300 Following headers are used in this CONFIGURATION section only, so feel free to
1301 remove them if not needed.
1302 */
1303 #include <cassert> // for assert
1304 #include <algorithm> // for min, max
1305 #include <mutex> // for std::mutex
1306 #include <atomic> // for std::atomic
1307 
1308 #if !defined(_WIN32)
1309  #include <malloc.h> // for aligned_alloc()
1310 #endif
1311 
1312 // Normal assert to check for programmer's errors, especially in Debug configuration.
1313 #ifndef VMA_ASSERT
1314  #ifdef _DEBUG
1315  #define VMA_ASSERT(expr) assert(expr)
1316  #else
1317  #define VMA_ASSERT(expr)
1318  #endif
1319 #endif
1320 
1321 // Assert that will be called very often, like inside data structures e.g. operator[].
1322 // Making it non-empty can make program slow.
1323 #ifndef VMA_HEAVY_ASSERT
1324  #ifdef _DEBUG
1325  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
1326  #else
1327  #define VMA_HEAVY_ASSERT(expr)
1328  #endif
1329 #endif
1330 
1331 #ifndef VMA_NULL
1332  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
1333  #define VMA_NULL nullptr
1334 #endif
1335 
1336 #ifndef VMA_ALIGN_OF
1337  #define VMA_ALIGN_OF(type) (__alignof(type))
1338 #endif
1339 
1340 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
1341  #if defined(_WIN32)
1342  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
1343  #else
1344  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
1345  #endif
1346 #endif
1347 
1348 #ifndef VMA_SYSTEM_FREE
1349  #if defined(_WIN32)
1350  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
1351  #else
1352  #define VMA_SYSTEM_FREE(ptr) free(ptr)
1353  #endif
1354 #endif
1355 
1356 #ifndef VMA_MIN
1357  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
1358 #endif
1359 
1360 #ifndef VMA_MAX
1361  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
1362 #endif
1363 
1364 #ifndef VMA_SWAP
1365  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
1366 #endif
1367 
1368 #ifndef VMA_SORT
1369  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
1370 #endif
1371 
1372 #ifndef VMA_DEBUG_LOG
1373  #define VMA_DEBUG_LOG(format, ...)
1374  /*
1375  #define VMA_DEBUG_LOG(format, ...) do { \
1376  printf(format, __VA_ARGS__); \
1377  printf("\n"); \
1378  } while(false)
1379  */
1380 #endif
1381 
1382 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
1383 #if VMA_STATS_STRING_ENABLED
1384  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
1385  {
1386  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
1387  }
1388  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
1389  {
1390  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
1391  }
1392  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
1393  {
1394  snprintf(outStr, strLen, "%p", ptr);
1395  }
1396 #endif
1397 
1398 #ifndef VMA_MUTEX
1399  class VmaMutex
1400  {
1401  public:
1402  VmaMutex() { }
1403  ~VmaMutex() { }
1404  void Lock() { m_Mutex.lock(); }
1405  void Unlock() { m_Mutex.unlock(); }
1406  private:
1407  std::mutex m_Mutex;
1408  };
1409  #define VMA_MUTEX VmaMutex
1410 #endif
1411 
1412 /*
1413 If providing your own implementation, you need to implement a subset of std::atomic:
1414 
1415 - Constructor(uint32_t desired)
1416 - uint32_t load() const
1417 - void store(uint32_t desired)
1418 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
1419 */
1420 #ifndef VMA_ATOMIC_UINT32
1421  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
1422 #endif
1423 
1424 #ifndef VMA_BEST_FIT
1425 
1437  #define VMA_BEST_FIT (1)
1438 #endif
1439 
1440 #ifndef VMA_DEBUG_ALWAYS_OWN_MEMORY
1441 
1445  #define VMA_DEBUG_ALWAYS_OWN_MEMORY (0)
1446 #endif
1447 
1448 #ifndef VMA_DEBUG_ALIGNMENT
1449 
1453  #define VMA_DEBUG_ALIGNMENT (1)
1454 #endif
1455 
1456 #ifndef VMA_DEBUG_MARGIN
1457 
1461  #define VMA_DEBUG_MARGIN (0)
1462 #endif
1463 
1464 #ifndef VMA_DEBUG_GLOBAL_MUTEX
1465 
1469  #define VMA_DEBUG_GLOBAL_MUTEX (0)
1470 #endif
1471 
1472 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
1473 
1477  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
1478 #endif
1479 
1480 #ifndef VMA_SMALL_HEAP_MAX_SIZE
1481  #define VMA_SMALL_HEAP_MAX_SIZE (512 * 1024 * 1024)
1483 #endif
1484 
1485 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
1486  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256 * 1024 * 1024)
1488 #endif
1489 
1490 #ifndef VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE
1491  #define VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE (64 * 1024 * 1024)
1493 #endif
1494 
1495 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
1496 
1497 /*******************************************************************************
1498 END OF CONFIGURATION
1499 */
1500 
1501 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
1502  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
1503 
1504 // Returns number of bits set to 1 in (v).
1505 static inline uint32_t CountBitsSet(uint32_t v)
1506 {
1507  uint32_t c = v - ((v >> 1) & 0x55555555);
1508  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
1509  c = ((c >> 4) + c) & 0x0F0F0F0F;
1510  c = ((c >> 8) + c) & 0x00FF00FF;
1511  c = ((c >> 16) + c) & 0x0000FFFF;
1512  return c;
1513 }
1514 
1515 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
1516 // Use types like uint32_t, uint64_t as T.
1517 template <typename T>
1518 static inline T VmaAlignUp(T val, T align)
1519 {
1520  return (val + align - 1) / align * align;
1521 }
1522 
1523 // Division with mathematical rounding to nearest number.
1524 template <typename T>
1525 inline T VmaRoundDiv(T x, T y)
1526 {
1527  return (x + (y / (T)2)) / y;
1528 }
1529 
1530 #ifndef VMA_SORT
1531 
1532 template<typename Iterator, typename Compare>
1533 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
1534 {
1535  Iterator centerValue = end; --centerValue;
1536  Iterator insertIndex = beg;
1537  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
1538  {
1539  if(cmp(*memTypeIndex, *centerValue))
1540  {
1541  if(insertIndex != memTypeIndex)
1542  {
1543  VMA_SWAP(*memTypeIndex, *insertIndex);
1544  }
1545  ++insertIndex;
1546  }
1547  }
1548  if(insertIndex != centerValue)
1549  {
1550  VMA_SWAP(*insertIndex, *centerValue);
1551  }
1552  return insertIndex;
1553 }
1554 
1555 template<typename Iterator, typename Compare>
1556 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
1557 {
1558  if(beg < end)
1559  {
1560  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
1561  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
1562  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
1563  }
1564 }
1565 
1566 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
1567 
1568 #endif // #ifndef VMA_SORT
1569 
1570 /*
1571 Returns true if two memory blocks occupy overlapping pages.
1572 ResourceA must be in less memory offset than ResourceB.
1573 
1574 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
1575 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
1576 */
1577 static inline bool VmaBlocksOnSamePage(
1578  VkDeviceSize resourceAOffset,
1579  VkDeviceSize resourceASize,
1580  VkDeviceSize resourceBOffset,
1581  VkDeviceSize pageSize)
1582 {
1583  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
1584  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
1585  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
1586  VkDeviceSize resourceBStart = resourceBOffset;
1587  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
1588  return resourceAEndPage == resourceBStartPage;
1589 }
1590 
1591 enum VmaSuballocationType
1592 {
1593  VMA_SUBALLOCATION_TYPE_FREE = 0,
1594  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
1595  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
1596  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
1597  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
1598  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
1599  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
1600 };
1601 
1602 /*
1603 Returns true if given suballocation types could conflict and must respect
1604 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
1605 or linear image and another one is optimal image. If type is unknown, behave
1606 conservatively.
1607 */
1608 static inline bool VmaIsBufferImageGranularityConflict(
1609  VmaSuballocationType suballocType1,
1610  VmaSuballocationType suballocType2)
1611 {
1612  if(suballocType1 > suballocType2)
1613  {
1614  VMA_SWAP(suballocType1, suballocType2);
1615  }
1616 
1617  switch(suballocType1)
1618  {
1619  case VMA_SUBALLOCATION_TYPE_FREE:
1620  return false;
1621  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
1622  return true;
1623  case VMA_SUBALLOCATION_TYPE_BUFFER:
1624  return
1625  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
1626  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1627  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
1628  return
1629  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
1630  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
1631  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1632  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
1633  return
1634  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1635  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
1636  return false;
1637  default:
1638  VMA_ASSERT(0);
1639  return true;
1640  }
1641 }
1642 
1643 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
1644 struct VmaMutexLock
1645 {
1646 public:
1647  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
1648  m_pMutex(useMutex ? &mutex : VMA_NULL)
1649  {
1650  if(m_pMutex)
1651  {
1652  m_pMutex->Lock();
1653  }
1654  }
1655 
1656  ~VmaMutexLock()
1657  {
1658  if(m_pMutex)
1659  {
1660  m_pMutex->Unlock();
1661  }
1662  }
1663 
1664 private:
1665  VMA_MUTEX* m_pMutex;
1666 };
1667 
1668 #if VMA_DEBUG_GLOBAL_MUTEX
1669  static VMA_MUTEX gDebugGlobalMutex;
1670  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
1671 #else
1672  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
1673 #endif
1674 
1675 // Minimum size of a free suballocation to register it in the free suballocation collection.
1676 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
1677 
1678 /*
1679 Performs binary search and returns iterator to first element that is greater or
1680 equal to (key), according to comparison (cmp).
1681 
1682 Cmp should return true if first argument is less than second argument.
1683 
1684 Returned value is the found element, if present in the collection or place where
1685 new element with value (key) should be inserted.
1686 */
1687 template <typename IterT, typename KeyT, typename CmpT>
1688 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
1689 {
1690  size_t down = 0, up = (end - beg);
1691  while(down < up)
1692  {
1693  const size_t mid = (down + up) / 2;
1694  if(cmp(*(beg+mid), key))
1695  {
1696  down = mid + 1;
1697  }
1698  else
1699  {
1700  up = mid;
1701  }
1702  }
1703  return beg + down;
1704 }
1705 
1707 // Memory allocation
1708 
1709 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
1710 {
1711  if((pAllocationCallbacks != VMA_NULL) &&
1712  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
1713  {
1714  return (*pAllocationCallbacks->pfnAllocation)(
1715  pAllocationCallbacks->pUserData,
1716  size,
1717  alignment,
1718  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1719  }
1720  else
1721  {
1722  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
1723  }
1724 }
1725 
1726 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
1727 {
1728  if((pAllocationCallbacks != VMA_NULL) &&
1729  (pAllocationCallbacks->pfnFree != VMA_NULL))
1730  {
1731  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
1732  }
1733  else
1734  {
1735  VMA_SYSTEM_FREE(ptr);
1736  }
1737 }
1738 
1739 template<typename T>
1740 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
1741 {
1742  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
1743 }
1744 
1745 template<typename T>
1746 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
1747 {
1748  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
1749 }
1750 
1751 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
1752 
1753 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
1754 
1755 template<typename T>
1756 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
1757 {
1758  ptr->~T();
1759  VmaFree(pAllocationCallbacks, ptr);
1760 }
1761 
1762 template<typename T>
1763 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
1764 {
1765  if(ptr != VMA_NULL)
1766  {
1767  for(size_t i = count; i--; )
1768  {
1769  ptr[i].~T();
1770  }
1771  VmaFree(pAllocationCallbacks, ptr);
1772  }
1773 }
1774 
1775 // STL-compatible allocator.
1776 template<typename T>
1777 class VmaStlAllocator
1778 {
1779 public:
1780  const VkAllocationCallbacks* const m_pCallbacks;
1781  typedef T value_type;
1782 
1783  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
1784  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
1785 
1786  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
1787  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
1788 
1789  template<typename U>
1790  bool operator==(const VmaStlAllocator<U>& rhs) const
1791  {
1792  return m_pCallbacks == rhs.m_pCallbacks;
1793  }
1794  template<typename U>
1795  bool operator!=(const VmaStlAllocator<U>& rhs) const
1796  {
1797  return m_pCallbacks != rhs.m_pCallbacks;
1798  }
1799 
1800  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
1801 };
1802 
1803 #if VMA_USE_STL_VECTOR
1804 
1805 #define VmaVector std::vector
1806 
1807 template<typename T, typename allocatorT>
1808 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
1809 {
1810  vec.insert(vec.begin() + index, item);
1811 }
1812 
1813 template<typename T, typename allocatorT>
1814 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
1815 {
1816  vec.erase(vec.begin() + index);
1817 }
1818 
1819 #else // #if VMA_USE_STL_VECTOR
1820 
1821 /* Class with interface compatible with subset of std::vector.
1822 T must be POD because constructors and destructors are not called and memcpy is
1823 used for these objects. */
1824 template<typename T, typename AllocatorT>
1825 class VmaVector
1826 {
1827 public:
1828  typedef T value_type;
1829 
1830  VmaVector(const AllocatorT& allocator) :
1831  m_Allocator(allocator),
1832  m_pArray(VMA_NULL),
1833  m_Count(0),
1834  m_Capacity(0)
1835  {
1836  }
1837 
1838  VmaVector(size_t count, const AllocatorT& allocator) :
1839  m_Allocator(allocator),
1840  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
1841  m_Count(count),
1842  m_Capacity(count)
1843  {
1844  }
1845 
1846  VmaVector(const VmaVector<T, AllocatorT>& src) :
1847  m_Allocator(src.m_Allocator),
1848  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
1849  m_Count(src.m_Count),
1850  m_Capacity(src.m_Count)
1851  {
1852  if(m_Count != 0)
1853  {
1854  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
1855  }
1856  }
1857 
1858  ~VmaVector()
1859  {
1860  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
1861  }
1862 
1863  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
1864  {
1865  if(&rhs != this)
1866  {
1867  resize(rhs.m_Count);
1868  if(m_Count != 0)
1869  {
1870  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
1871  }
1872  }
1873  return *this;
1874  }
1875 
1876  bool empty() const { return m_Count == 0; }
1877  size_t size() const { return m_Count; }
1878  T* data() { return m_pArray; }
1879  const T* data() const { return m_pArray; }
1880 
1881  T& operator[](size_t index)
1882  {
1883  VMA_HEAVY_ASSERT(index < m_Count);
1884  return m_pArray[index];
1885  }
1886  const T& operator[](size_t index) const
1887  {
1888  VMA_HEAVY_ASSERT(index < m_Count);
1889  return m_pArray[index];
1890  }
1891 
1892  T& front()
1893  {
1894  VMA_HEAVY_ASSERT(m_Count > 0);
1895  return m_pArray[0];
1896  }
1897  const T& front() const
1898  {
1899  VMA_HEAVY_ASSERT(m_Count > 0);
1900  return m_pArray[0];
1901  }
1902  T& back()
1903  {
1904  VMA_HEAVY_ASSERT(m_Count > 0);
1905  return m_pArray[m_Count - 1];
1906  }
1907  const T& back() const
1908  {
1909  VMA_HEAVY_ASSERT(m_Count > 0);
1910  return m_pArray[m_Count - 1];
1911  }
1912 
1913  void reserve(size_t newCapacity, bool freeMemory = false)
1914  {
1915  newCapacity = VMA_MAX(newCapacity, m_Count);
1916 
1917  if((newCapacity < m_Capacity) && !freeMemory)
1918  {
1919  newCapacity = m_Capacity;
1920  }
1921 
1922  if(newCapacity != m_Capacity)
1923  {
1924  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
1925  if(m_Count != 0)
1926  {
1927  memcpy(newArray, m_pArray, m_Count * sizeof(T));
1928  }
1929  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
1930  m_Capacity = newCapacity;
1931  m_pArray = newArray;
1932  }
1933  }
1934 
1935  void resize(size_t newCount, bool freeMemory = false)
1936  {
1937  size_t newCapacity = m_Capacity;
1938  if(newCount > m_Capacity)
1939  {
1940  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
1941  }
1942  else if(freeMemory)
1943  {
1944  newCapacity = newCount;
1945  }
1946 
1947  if(newCapacity != m_Capacity)
1948  {
1949  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
1950  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
1951  if(elementsToCopy != 0)
1952  {
1953  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
1954  }
1955  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
1956  m_Capacity = newCapacity;
1957  m_pArray = newArray;
1958  }
1959 
1960  m_Count = newCount;
1961  }
1962 
1963  void clear(bool freeMemory = false)
1964  {
1965  resize(0, freeMemory);
1966  }
1967 
1968  void insert(size_t index, const T& src)
1969  {
1970  VMA_HEAVY_ASSERT(index <= m_Count);
1971  const size_t oldCount = size();
1972  resize(oldCount + 1);
1973  if(index < oldCount)
1974  {
1975  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
1976  }
1977  m_pArray[index] = src;
1978  }
1979 
1980  void remove(size_t index)
1981  {
1982  VMA_HEAVY_ASSERT(index < m_Count);
1983  const size_t oldCount = size();
1984  if(index < oldCount - 1)
1985  {
1986  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
1987  }
1988  resize(oldCount - 1);
1989  }
1990 
1991  void push_back(const T& src)
1992  {
1993  const size_t newIndex = size();
1994  resize(newIndex + 1);
1995  m_pArray[newIndex] = src;
1996  }
1997 
1998  void pop_back()
1999  {
2000  VMA_HEAVY_ASSERT(m_Count > 0);
2001  resize(size() - 1);
2002  }
2003 
2004  void push_front(const T& src)
2005  {
2006  insert(0, src);
2007  }
2008 
2009  void pop_front()
2010  {
2011  VMA_HEAVY_ASSERT(m_Count > 0);
2012  remove(0);
2013  }
2014 
2015  typedef T* iterator;
2016 
2017  iterator begin() { return m_pArray; }
2018  iterator end() { return m_pArray + m_Count; }
2019 
2020 private:
2021  AllocatorT m_Allocator;
2022  T* m_pArray;
2023  size_t m_Count;
2024  size_t m_Capacity;
2025 };
2026 
2027 template<typename T, typename allocatorT>
2028 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
2029 {
2030  vec.insert(index, item);
2031 }
2032 
2033 template<typename T, typename allocatorT>
2034 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
2035 {
2036  vec.remove(index);
2037 }
2038 
2039 #endif // #if VMA_USE_STL_VECTOR
2040 
2041 template<typename CmpLess, typename VectorT>
2042 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
2043 {
2044  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
2045  vector.data(),
2046  vector.data() + vector.size(),
2047  value,
2048  CmpLess()) - vector.data();
2049  VmaVectorInsert(vector, indexToInsert, value);
2050  return indexToInsert;
2051 }
2052 
2053 template<typename CmpLess, typename VectorT>
2054 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
2055 {
2056  CmpLess comparator;
2057  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2058  vector.begin(),
2059  vector.end(),
2060  value,
2061  comparator);
2062  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
2063  {
2064  size_t indexToRemove = it - vector.begin();
2065  VmaVectorRemove(vector, indexToRemove);
2066  return true;
2067  }
2068  return false;
2069 }
2070 
2071 template<typename CmpLess, typename VectorT>
2072 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
2073 {
2074  CmpLess comparator;
2075  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2076  vector.data(),
2077  vector.data() + vector.size(),
2078  value,
2079  comparator);
2080  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
2081  {
2082  return it - vector.begin();
2083  }
2084  else
2085  {
2086  return vector.size();
2087  }
2088 }
2089 
2091 // class VmaPoolAllocator
2092 
2093 /*
2094 Allocator for objects of type T using a list of arrays (pools) to speed up
2095 allocation. Number of elements that can be allocated is not bounded because
2096 allocator can create multiple blocks.
2097 */
2098 template<typename T>
2099 class VmaPoolAllocator
2100 {
2101 public:
2102  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
2103  ~VmaPoolAllocator();
2104  void Clear();
2105  T* Alloc();
2106  void Free(T* ptr);
2107 
2108 private:
2109  union Item
2110  {
2111  uint32_t NextFreeIndex;
2112  T Value;
2113  };
2114 
2115  struct ItemBlock
2116  {
2117  Item* pItems;
2118  uint32_t FirstFreeIndex;
2119  };
2120 
2121  const VkAllocationCallbacks* m_pAllocationCallbacks;
2122  size_t m_ItemsPerBlock;
2123  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
2124 
2125  ItemBlock& CreateNewBlock();
2126 };
2127 
2128 template<typename T>
2129 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
2130  m_pAllocationCallbacks(pAllocationCallbacks),
2131  m_ItemsPerBlock(itemsPerBlock),
2132  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
2133 {
2134  VMA_ASSERT(itemsPerBlock > 0);
2135 }
2136 
2137 template<typename T>
2138 VmaPoolAllocator<T>::~VmaPoolAllocator()
2139 {
2140  Clear();
2141 }
2142 
2143 template<typename T>
2144 void VmaPoolAllocator<T>::Clear()
2145 {
2146  for(size_t i = m_ItemBlocks.size(); i--; )
2147  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
2148  m_ItemBlocks.clear();
2149 }
2150 
2151 template<typename T>
2152 T* VmaPoolAllocator<T>::Alloc()
2153 {
2154  for(size_t i = m_ItemBlocks.size(); i--; )
2155  {
2156  ItemBlock& block = m_ItemBlocks[i];
2157  // This block has some free items: Use first one.
2158  if(block.FirstFreeIndex != UINT32_MAX)
2159  {
2160  Item* const pItem = &block.pItems[block.FirstFreeIndex];
2161  block.FirstFreeIndex = pItem->NextFreeIndex;
2162  return &pItem->Value;
2163  }
2164  }
2165 
2166  // No block has free item: Create new one and use it.
2167  ItemBlock& newBlock = CreateNewBlock();
2168  Item* const pItem = &newBlock.pItems[0];
2169  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
2170  return &pItem->Value;
2171 }
2172 
2173 template<typename T>
2174 void VmaPoolAllocator<T>::Free(T* ptr)
2175 {
2176  // Search all memory blocks to find ptr.
2177  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
2178  {
2179  ItemBlock& block = m_ItemBlocks[i];
2180 
2181  // Casting to union.
2182  Item* pItemPtr;
2183  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
2184 
2185  // Check if pItemPtr is in address range of this block.
2186  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
2187  {
2188  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
2189  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
2190  block.FirstFreeIndex = index;
2191  return;
2192  }
2193  }
2194  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
2195 }
2196 
2197 template<typename T>
2198 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
2199 {
2200  ItemBlock newBlock = {
2201  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
2202 
2203  m_ItemBlocks.push_back(newBlock);
2204 
2205  // Setup singly-linked list of all free items in this block.
2206  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
2207  newBlock.pItems[i].NextFreeIndex = i + 1;
2208  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
2209  return m_ItemBlocks.back();
2210 }
2211 
2213 // class VmaRawList, VmaList
2214 
2215 #if VMA_USE_STL_LIST
2216 
2217 #define VmaList std::list
2218 
2219 #else // #if VMA_USE_STL_LIST
2220 
2221 template<typename T>
2222 struct VmaListItem
2223 {
2224  VmaListItem* pPrev;
2225  VmaListItem* pNext;
2226  T Value;
2227 };
2228 
2229 // Doubly linked list.
2230 template<typename T>
2231 class VmaRawList
2232 {
2233 public:
2234  typedef VmaListItem<T> ItemType;
2235 
2236  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
2237  ~VmaRawList();
2238  void Clear();
2239 
2240  size_t GetCount() const { return m_Count; }
2241  bool IsEmpty() const { return m_Count == 0; }
2242 
2243  ItemType* Front() { return m_pFront; }
2244  const ItemType* Front() const { return m_pFront; }
2245  ItemType* Back() { return m_pBack; }
2246  const ItemType* Back() const { return m_pBack; }
2247 
2248  ItemType* PushBack();
2249  ItemType* PushFront();
2250  ItemType* PushBack(const T& value);
2251  ItemType* PushFront(const T& value);
2252  void PopBack();
2253  void PopFront();
2254 
2255  // Item can be null - it means PushBack.
2256  ItemType* InsertBefore(ItemType* pItem);
2257  // Item can be null - it means PushFront.
2258  ItemType* InsertAfter(ItemType* pItem);
2259 
2260  ItemType* InsertBefore(ItemType* pItem, const T& value);
2261  ItemType* InsertAfter(ItemType* pItem, const T& value);
2262 
2263  void Remove(ItemType* pItem);
2264 
2265 private:
2266  const VkAllocationCallbacks* const m_pAllocationCallbacks;
2267  VmaPoolAllocator<ItemType> m_ItemAllocator;
2268  ItemType* m_pFront;
2269  ItemType* m_pBack;
2270  size_t m_Count;
2271 
2272  // Declared not defined, to block copy constructor and assignment operator.
2273  VmaRawList(const VmaRawList<T>& src);
2274  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
2275 };
2276 
2277 template<typename T>
2278 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
2279  m_pAllocationCallbacks(pAllocationCallbacks),
2280  m_ItemAllocator(pAllocationCallbacks, 128),
2281  m_pFront(VMA_NULL),
2282  m_pBack(VMA_NULL),
2283  m_Count(0)
2284 {
2285 }
2286 
2287 template<typename T>
2288 VmaRawList<T>::~VmaRawList()
2289 {
2290  // Intentionally not calling Clear, because that would be unnecessary
2291  // computations to return all items to m_ItemAllocator as free.
2292 }
2293 
2294 template<typename T>
2295 void VmaRawList<T>::Clear()
2296 {
2297  if(IsEmpty() == false)
2298  {
2299  ItemType* pItem = m_pBack;
2300  while(pItem != VMA_NULL)
2301  {
2302  ItemType* const pPrevItem = pItem->pPrev;
2303  m_ItemAllocator.Free(pItem);
2304  pItem = pPrevItem;
2305  }
2306  m_pFront = VMA_NULL;
2307  m_pBack = VMA_NULL;
2308  m_Count = 0;
2309  }
2310 }
2311 
2312 template<typename T>
2313 VmaListItem<T>* VmaRawList<T>::PushBack()
2314 {
2315  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2316  pNewItem->pNext = VMA_NULL;
2317  if(IsEmpty())
2318  {
2319  pNewItem->pPrev = VMA_NULL;
2320  m_pFront = pNewItem;
2321  m_pBack = pNewItem;
2322  m_Count = 1;
2323  }
2324  else
2325  {
2326  pNewItem->pPrev = m_pBack;
2327  m_pBack->pNext = pNewItem;
2328  m_pBack = pNewItem;
2329  ++m_Count;
2330  }
2331  return pNewItem;
2332 }
2333 
2334 template<typename T>
2335 VmaListItem<T>* VmaRawList<T>::PushFront()
2336 {
2337  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2338  pNewItem->pPrev = VMA_NULL;
2339  if(IsEmpty())
2340  {
2341  pNewItem->pNext = VMA_NULL;
2342  m_pFront = pNewItem;
2343  m_pBack = pNewItem;
2344  m_Count = 1;
2345  }
2346  else
2347  {
2348  pNewItem->pNext = m_pFront;
2349  m_pFront->pPrev = pNewItem;
2350  m_pFront = pNewItem;
2351  ++m_Count;
2352  }
2353  return pNewItem;
2354 }
2355 
2356 template<typename T>
2357 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
2358 {
2359  ItemType* const pNewItem = PushBack();
2360  pNewItem->Value = value;
2361  return pNewItem;
2362 }
2363 
2364 template<typename T>
2365 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
2366 {
2367  ItemType* const pNewItem = PushFront();
2368  pNewItem->Value = value;
2369  return pNewItem;
2370 }
2371 
2372 template<typename T>
2373 void VmaRawList<T>::PopBack()
2374 {
2375  VMA_HEAVY_ASSERT(m_Count > 0);
2376  ItemType* const pBackItem = m_pBack;
2377  ItemType* const pPrevItem = pBackItem->pPrev;
2378  if(pPrevItem != VMA_NULL)
2379  {
2380  pPrevItem->pNext = VMA_NULL;
2381  }
2382  m_pBack = pPrevItem;
2383  m_ItemAllocator.Free(pBackItem);
2384  --m_Count;
2385 }
2386 
2387 template<typename T>
2388 void VmaRawList<T>::PopFront()
2389 {
2390  VMA_HEAVY_ASSERT(m_Count > 0);
2391  ItemType* const pFrontItem = m_pFront;
2392  ItemType* const pNextItem = pFrontItem->pNext;
2393  if(pNextItem != VMA_NULL)
2394  {
2395  pNextItem->pPrev = VMA_NULL;
2396  }
2397  m_pFront = pNextItem;
2398  m_ItemAllocator.Free(pFrontItem);
2399  --m_Count;
2400 }
2401 
2402 template<typename T>
2403 void VmaRawList<T>::Remove(ItemType* pItem)
2404 {
2405  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
2406  VMA_HEAVY_ASSERT(m_Count > 0);
2407 
2408  if(pItem->pPrev != VMA_NULL)
2409  {
2410  pItem->pPrev->pNext = pItem->pNext;
2411  }
2412  else
2413  {
2414  VMA_HEAVY_ASSERT(m_pFront == pItem);
2415  m_pFront = pItem->pNext;
2416  }
2417 
2418  if(pItem->pNext != VMA_NULL)
2419  {
2420  pItem->pNext->pPrev = pItem->pPrev;
2421  }
2422  else
2423  {
2424  VMA_HEAVY_ASSERT(m_pBack == pItem);
2425  m_pBack = pItem->pPrev;
2426  }
2427 
2428  m_ItemAllocator.Free(pItem);
2429  --m_Count;
2430 }
2431 
2432 template<typename T>
2433 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
2434 {
2435  if(pItem != VMA_NULL)
2436  {
2437  ItemType* const prevItem = pItem->pPrev;
2438  ItemType* const newItem = m_ItemAllocator.Alloc();
2439  newItem->pPrev = prevItem;
2440  newItem->pNext = pItem;
2441  pItem->pPrev = newItem;
2442  if(prevItem != VMA_NULL)
2443  {
2444  prevItem->pNext = newItem;
2445  }
2446  else
2447  {
2448  VMA_HEAVY_ASSERT(m_pFront == pItem);
2449  m_pFront = newItem;
2450  }
2451  ++m_Count;
2452  return newItem;
2453  }
2454  else
2455  return PushBack();
2456 }
2457 
2458 template<typename T>
2459 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
2460 {
2461  if(pItem != VMA_NULL)
2462  {
2463  ItemType* const nextItem = pItem->pNext;
2464  ItemType* const newItem = m_ItemAllocator.Alloc();
2465  newItem->pNext = nextItem;
2466  newItem->pPrev = pItem;
2467  pItem->pNext = newItem;
2468  if(nextItem != VMA_NULL)
2469  {
2470  nextItem->pPrev = newItem;
2471  }
2472  else
2473  {
2474  VMA_HEAVY_ASSERT(m_pBack == pItem);
2475  m_pBack = newItem;
2476  }
2477  ++m_Count;
2478  return newItem;
2479  }
2480  else
2481  return PushFront();
2482 }
2483 
2484 template<typename T>
2485 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
2486 {
2487  ItemType* const newItem = InsertBefore(pItem);
2488  newItem->Value = value;
2489  return newItem;
2490 }
2491 
2492 template<typename T>
2493 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
2494 {
2495  ItemType* const newItem = InsertAfter(pItem);
2496  newItem->Value = value;
2497  return newItem;
2498 }
2499 
2500 template<typename T, typename AllocatorT>
2501 class VmaList
2502 {
2503 public:
2504  class iterator
2505  {
2506  public:
2507  iterator() :
2508  m_pList(VMA_NULL),
2509  m_pItem(VMA_NULL)
2510  {
2511  }
2512 
2513  T& operator*() const
2514  {
2515  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2516  return m_pItem->Value;
2517  }
2518  T* operator->() const
2519  {
2520  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2521  return &m_pItem->Value;
2522  }
2523 
2524  iterator& operator++()
2525  {
2526  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2527  m_pItem = m_pItem->pNext;
2528  return *this;
2529  }
2530  iterator& operator--()
2531  {
2532  if(m_pItem != VMA_NULL)
2533  {
2534  m_pItem = m_pItem->pPrev;
2535  }
2536  else
2537  {
2538  VMA_HEAVY_ASSERT(!m_pList.IsEmpty());
2539  m_pItem = m_pList->Back();
2540  }
2541  return *this;
2542  }
2543 
2544  iterator operator++(int)
2545  {
2546  iterator result = *this;
2547  ++*this;
2548  return result;
2549  }
2550  iterator operator--(int)
2551  {
2552  iterator result = *this;
2553  --*this;
2554  return result;
2555  }
2556 
2557  bool operator==(const iterator& rhs) const
2558  {
2559  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2560  return m_pItem == rhs.m_pItem;
2561  }
2562  bool operator!=(const iterator& rhs) const
2563  {
2564  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2565  return m_pItem != rhs.m_pItem;
2566  }
2567 
2568  private:
2569  VmaRawList<T>* m_pList;
2570  VmaListItem<T>* m_pItem;
2571 
2572  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
2573  m_pList(pList),
2574  m_pItem(pItem)
2575  {
2576  }
2577 
2578  friend class VmaList<T, AllocatorT>;
2579  };
2580 
2581  class const_iterator
2582  {
2583  public:
2584  const_iterator() :
2585  m_pList(VMA_NULL),
2586  m_pItem(VMA_NULL)
2587  {
2588  }
2589 
2590  const_iterator(const iterator& src) :
2591  m_pList(src.m_pList),
2592  m_pItem(src.m_pItem)
2593  {
2594  }
2595 
2596  const T& operator*() const
2597  {
2598  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2599  return m_pItem->Value;
2600  }
2601  const T* operator->() const
2602  {
2603  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2604  return &m_pItem->Value;
2605  }
2606 
2607  const_iterator& operator++()
2608  {
2609  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2610  m_pItem = m_pItem->pNext;
2611  return *this;
2612  }
2613  const_iterator& operator--()
2614  {
2615  if(m_pItem != VMA_NULL)
2616  {
2617  m_pItem = m_pItem->pPrev;
2618  }
2619  else
2620  {
2621  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
2622  m_pItem = m_pList->Back();
2623  }
2624  return *this;
2625  }
2626 
2627  const_iterator operator++(int)
2628  {
2629  const_iterator result = *this;
2630  ++*this;
2631  return result;
2632  }
2633  const_iterator operator--(int)
2634  {
2635  const_iterator result = *this;
2636  --*this;
2637  return result;
2638  }
2639 
2640  bool operator==(const const_iterator& rhs) const
2641  {
2642  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2643  return m_pItem == rhs.m_pItem;
2644  }
2645  bool operator!=(const const_iterator& rhs) const
2646  {
2647  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2648  return m_pItem != rhs.m_pItem;
2649  }
2650 
2651  private:
2652  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
2653  m_pList(pList),
2654  m_pItem(pItem)
2655  {
2656  }
2657 
2658  const VmaRawList<T>* m_pList;
2659  const VmaListItem<T>* m_pItem;
2660 
2661  friend class VmaList<T, AllocatorT>;
2662  };
2663 
2664  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
2665 
2666  bool empty() const { return m_RawList.IsEmpty(); }
2667  size_t size() const { return m_RawList.GetCount(); }
2668 
2669  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
2670  iterator end() { return iterator(&m_RawList, VMA_NULL); }
2671 
2672  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
2673  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
2674 
2675  void clear() { m_RawList.Clear(); }
2676  void push_back(const T& value) { m_RawList.PushBack(value); }
2677  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
2678  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
2679 
2680 private:
2681  VmaRawList<T> m_RawList;
2682 };
2683 
2684 #endif // #if VMA_USE_STL_LIST
2685 
2687 // class VmaMap
2688 
2689 // Unused in this version.
2690 #if 0
2691 
2692 #if VMA_USE_STL_UNORDERED_MAP
2693 
2694 #define VmaPair std::pair
2695 
2696 #define VMA_MAP_TYPE(KeyT, ValueT) \
2697  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
2698 
2699 #else // #if VMA_USE_STL_UNORDERED_MAP
2700 
2701 template<typename T1, typename T2>
2702 struct VmaPair
2703 {
2704  T1 first;
2705  T2 second;
2706 
2707  VmaPair() : first(), second() { }
2708  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
2709 };
2710 
2711 /* Class compatible with subset of interface of std::unordered_map.
2712 KeyT, ValueT must be POD because they will be stored in VmaVector.
2713 */
2714 template<typename KeyT, typename ValueT>
2715 class VmaMap
2716 {
2717 public:
2718  typedef VmaPair<KeyT, ValueT> PairType;
2719  typedef PairType* iterator;
2720 
2721  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
2722 
2723  iterator begin() { return m_Vector.begin(); }
2724  iterator end() { return m_Vector.end(); }
2725 
2726  void insert(const PairType& pair);
2727  iterator find(const KeyT& key);
2728  void erase(iterator it);
2729 
2730 private:
2731  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
2732 };
2733 
2734 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
2735 
2736 template<typename FirstT, typename SecondT>
2737 struct VmaPairFirstLess
2738 {
2739  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
2740  {
2741  return lhs.first < rhs.first;
2742  }
2743  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
2744  {
2745  return lhs.first < rhsFirst;
2746  }
2747 };
2748 
2749 template<typename KeyT, typename ValueT>
2750 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
2751 {
2752  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
2753  m_Vector.data(),
2754  m_Vector.data() + m_Vector.size(),
2755  pair,
2756  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
2757  VmaVectorInsert(m_Vector, indexToInsert, pair);
2758 }
2759 
2760 template<typename KeyT, typename ValueT>
2761 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
2762 {
2763  PairType* it = VmaBinaryFindFirstNotLess(
2764  m_Vector.data(),
2765  m_Vector.data() + m_Vector.size(),
2766  key,
2767  VmaPairFirstLess<KeyT, ValueT>());
2768  if((it != m_Vector.end()) && (it->first == key))
2769  {
2770  return it;
2771  }
2772  else
2773  {
2774  return m_Vector.end();
2775  }
2776 }
2777 
2778 template<typename KeyT, typename ValueT>
2779 void VmaMap<KeyT, ValueT>::erase(iterator it)
2780 {
2781  VmaVectorRemove(m_Vector, it - m_Vector.begin());
2782 }
2783 
2784 #endif // #if VMA_USE_STL_UNORDERED_MAP
2785 
2786 #endif // #if 0
2787 
2789 
2790 class VmaDeviceMemoryBlock;
2791 
2792 enum VMA_BLOCK_VECTOR_TYPE
2793 {
2794  VMA_BLOCK_VECTOR_TYPE_UNMAPPED,
2795  VMA_BLOCK_VECTOR_TYPE_MAPPED,
2796  VMA_BLOCK_VECTOR_TYPE_COUNT
2797 };
2798 
2799 static VMA_BLOCK_VECTOR_TYPE VmaAllocationCreateFlagsToBlockVectorType(VmaAllocationCreateFlags flags)
2800 {
2801  return (flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0 ?
2802  VMA_BLOCK_VECTOR_TYPE_MAPPED :
2803  VMA_BLOCK_VECTOR_TYPE_UNMAPPED;
2804 }
2805 
2806 struct VmaAllocation_T
2807 {
2808 public:
2809  enum ALLOCATION_TYPE
2810  {
2811  ALLOCATION_TYPE_NONE,
2812  ALLOCATION_TYPE_BLOCK,
2813  ALLOCATION_TYPE_OWN,
2814  };
2815 
2816  VmaAllocation_T(uint32_t currentFrameIndex) :
2817  m_Alignment(1),
2818  m_Size(0),
2819  m_pUserData(VMA_NULL),
2820  m_Type(ALLOCATION_TYPE_NONE),
2821  m_SuballocationType(VMA_SUBALLOCATION_TYPE_UNKNOWN),
2822  m_LastUseFrameIndex(currentFrameIndex)
2823  {
2824  }
2825 
2826  void InitBlockAllocation(
2827  VmaPool hPool,
2828  VmaDeviceMemoryBlock* block,
2829  VkDeviceSize offset,
2830  VkDeviceSize alignment,
2831  VkDeviceSize size,
2832  VmaSuballocationType suballocationType,
2833  void* pUserData,
2834  bool canBecomeLost)
2835  {
2836  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
2837  VMA_ASSERT(block != VMA_NULL);
2838  m_Type = ALLOCATION_TYPE_BLOCK;
2839  m_Alignment = alignment;
2840  m_Size = size;
2841  m_pUserData = pUserData;
2842  m_SuballocationType = suballocationType;
2843  m_BlockAllocation.m_hPool = hPool;
2844  m_BlockAllocation.m_Block = block;
2845  m_BlockAllocation.m_Offset = offset;
2846  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
2847  }
2848 
2849  void InitLost()
2850  {
2851  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
2852  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
2853  m_Type = ALLOCATION_TYPE_BLOCK;
2854  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
2855  m_BlockAllocation.m_Block = VMA_NULL;
2856  m_BlockAllocation.m_Offset = 0;
2857  m_BlockAllocation.m_CanBecomeLost = true;
2858  }
2859 
2860  void ChangeBlockAllocation(
2861  VmaDeviceMemoryBlock* block,
2862  VkDeviceSize offset)
2863  {
2864  VMA_ASSERT(block != VMA_NULL);
2865  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
2866  m_BlockAllocation.m_Block = block;
2867  m_BlockAllocation.m_Offset = offset;
2868  }
2869 
2870  void InitOwnAllocation(
2871  uint32_t memoryTypeIndex,
2872  VkDeviceMemory hMemory,
2873  VmaSuballocationType suballocationType,
2874  bool persistentMap,
2875  void* pMappedData,
2876  VkDeviceSize size,
2877  void* pUserData)
2878  {
2879  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
2880  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
2881  m_Type = ALLOCATION_TYPE_OWN;
2882  m_Alignment = 0;
2883  m_Size = size;
2884  m_pUserData = pUserData;
2885  m_SuballocationType = suballocationType;
2886  m_OwnAllocation.m_MemoryTypeIndex = memoryTypeIndex;
2887  m_OwnAllocation.m_hMemory = hMemory;
2888  m_OwnAllocation.m_PersistentMap = persistentMap;
2889  m_OwnAllocation.m_pMappedData = pMappedData;
2890  }
2891 
2892  ALLOCATION_TYPE GetType() const { return m_Type; }
2893  VkDeviceSize GetAlignment() const { return m_Alignment; }
2894  VkDeviceSize GetSize() const { return m_Size; }
2895  void* GetUserData() const { return m_pUserData; }
2896  void SetUserData(void* pUserData) { m_pUserData = pUserData; }
2897  VmaSuballocationType GetSuballocationType() const { return m_SuballocationType; }
2898 
2899  VmaDeviceMemoryBlock* GetBlock() const
2900  {
2901  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
2902  return m_BlockAllocation.m_Block;
2903  }
2904  VkDeviceSize GetOffset() const;
2905  VkDeviceMemory GetMemory() const;
2906  uint32_t GetMemoryTypeIndex() const;
2907  VMA_BLOCK_VECTOR_TYPE GetBlockVectorType() const;
2908  void* GetMappedData() const;
2909  bool CanBecomeLost() const;
2910  VmaPool GetPool() const;
2911 
2912  VkResult OwnAllocMapPersistentlyMappedMemory(VmaAllocator hAllocator);
2913  void OwnAllocUnmapPersistentlyMappedMemory(VmaAllocator hAllocator);
2914 
2915  uint32_t GetLastUseFrameIndex() const
2916  {
2917  return m_LastUseFrameIndex.load();
2918  }
2919  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
2920  {
2921  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
2922  }
2923  /*
2924  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
2925  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
2926  - Else, returns false.
2927 
2928  If hAllocation is already lost, assert - you should not call it then.
2929  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
2930  */
2931  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
2932 
2933  void OwnAllocCalcStatsInfo(VmaStatInfo& outInfo)
2934  {
2935  VMA_ASSERT(m_Type == ALLOCATION_TYPE_OWN);
2936  outInfo.blockCount = 1;
2937  outInfo.allocationCount = 1;
2938  outInfo.unusedRangeCount = 0;
2939  outInfo.usedBytes = m_Size;
2940  outInfo.unusedBytes = 0;
2941  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
2942  outInfo.unusedRangeSizeMin = UINT64_MAX;
2943  outInfo.unusedRangeSizeMax = 0;
2944  }
2945 
2946 private:
2947  VkDeviceSize m_Alignment;
2948  VkDeviceSize m_Size;
2949  void* m_pUserData;
2950  ALLOCATION_TYPE m_Type;
2951  VmaSuballocationType m_SuballocationType;
2952  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
2953 
2954  // Allocation out of VmaDeviceMemoryBlock.
2955  struct BlockAllocation
2956  {
2957  VmaPool m_hPool; // Null if belongs to general memory.
2958  VmaDeviceMemoryBlock* m_Block;
2959  VkDeviceSize m_Offset;
2960  bool m_CanBecomeLost;
2961  };
2962 
2963  // Allocation for an object that has its own private VkDeviceMemory.
2964  struct OwnAllocation
2965  {
2966  uint32_t m_MemoryTypeIndex;
2967  VkDeviceMemory m_hMemory;
2968  bool m_PersistentMap;
2969  void* m_pMappedData;
2970  };
2971 
2972  union
2973  {
2974  // Allocation out of VmaDeviceMemoryBlock.
2975  BlockAllocation m_BlockAllocation;
2976  // Allocation for an object that has its own private VkDeviceMemory.
2977  OwnAllocation m_OwnAllocation;
2978  };
2979 };
2980 
2981 /*
2982 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
2983 allocated memory block or free.
2984 */
2985 struct VmaSuballocation
2986 {
2987  VkDeviceSize offset;
2988  VkDeviceSize size;
2989  VmaAllocation hAllocation;
2990  VmaSuballocationType type;
2991 };
2992 
2993 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
2994 
2995 // Cost of one additional allocation lost, as equivalent in bytes.
2996 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
2997 
2998 /*
2999 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
3000 
3001 If canMakeOtherLost was false:
3002 - item points to a FREE suballocation.
3003 - itemsToMakeLostCount is 0.
3004 
3005 If canMakeOtherLost was true:
3006 - item points to first of sequence of suballocations, which are either FREE,
3007  or point to VmaAllocations that can become lost.
3008 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
3009  the requested allocation to succeed.
3010 */
3011 struct VmaAllocationRequest
3012 {
3013  VkDeviceSize offset;
3014  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
3015  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
3016  VmaSuballocationList::iterator item;
3017  size_t itemsToMakeLostCount;
3018 
3019  VkDeviceSize CalcCost() const
3020  {
3021  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
3022  }
3023 };
3024 
3025 /*
3026 Data structure used for bookkeeping of allocations and unused ranges of memory
3027 in a single VkDeviceMemory block.
3028 */
3029 class VmaBlockMetadata
3030 {
3031 public:
3032  VmaBlockMetadata(VmaAllocator hAllocator);
3033  ~VmaBlockMetadata();
3034  void Init(VkDeviceSize size);
3035 
3036  // Validates all data structures inside this object. If not valid, returns false.
3037  bool Validate() const;
3038  size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
3039  VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
3040  VkDeviceSize GetUnusedRangeSizeMax() const;
3041  // Returns true if this block is empty - contains only single free suballocation.
3042  bool IsEmpty() const;
3043 
3044  void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
3045  void AddPoolStats(VmaPoolStats& inoutStats) const;
3046 
3047 #if VMA_STATS_STRING_ENABLED
3048  void PrintDetailedMap(class VmaJsonWriter& json) const;
3049 #endif
3050 
3051  // Creates trivial request for case when block is empty.
3052  void CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest);
3053 
3054  // Tries to find a place for suballocation with given parameters inside this block.
3055  // If succeeded, fills pAllocationRequest and returns true.
3056  // If failed, returns false.
3057  bool CreateAllocationRequest(
3058  uint32_t currentFrameIndex,
3059  uint32_t frameInUseCount,
3060  VkDeviceSize bufferImageGranularity,
3061  VkDeviceSize allocSize,
3062  VkDeviceSize allocAlignment,
3063  VmaSuballocationType allocType,
3064  bool canMakeOtherLost,
3065  VmaAllocationRequest* pAllocationRequest);
3066 
3067  bool MakeRequestedAllocationsLost(
3068  uint32_t currentFrameIndex,
3069  uint32_t frameInUseCount,
3070  VmaAllocationRequest* pAllocationRequest);
3071 
3072  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3073 
3074  // Makes actual allocation based on request. Request must already be checked and valid.
3075  void Alloc(
3076  const VmaAllocationRequest& request,
3077  VmaSuballocationType type,
3078  VkDeviceSize allocSize,
3079  VmaAllocation hAllocation);
3080 
3081  // Frees suballocation assigned to given memory region.
3082  void Free(const VmaAllocation allocation);
3083 
3084 private:
3085  VkDeviceSize m_Size;
3086  uint32_t m_FreeCount;
3087  VkDeviceSize m_SumFreeSize;
3088  VmaSuballocationList m_Suballocations;
3089  // Suballocations that are free and have size greater than certain threshold.
3090  // Sorted by size, ascending.
3091  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
3092 
3093  bool ValidateFreeSuballocationList() const;
3094 
3095  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
3096  // If yes, fills pOffset and returns true. If no, returns false.
3097  bool CheckAllocation(
3098  uint32_t currentFrameIndex,
3099  uint32_t frameInUseCount,
3100  VkDeviceSize bufferImageGranularity,
3101  VkDeviceSize allocSize,
3102  VkDeviceSize allocAlignment,
3103  VmaSuballocationType allocType,
3104  VmaSuballocationList::const_iterator suballocItem,
3105  bool canMakeOtherLost,
3106  VkDeviceSize* pOffset,
3107  size_t* itemsToMakeLostCount,
3108  VkDeviceSize* pSumFreeSize,
3109  VkDeviceSize* pSumItemSize) const;
3110  // Given free suballocation, it merges it with following one, which must also be free.
3111  void MergeFreeWithNext(VmaSuballocationList::iterator item);
3112  // Releases given suballocation, making it free.
3113  // Merges it with adjacent free suballocations if applicable.
3114  // Returns iterator to new free suballocation at this place.
3115  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
3116  // Given free suballocation, it inserts it into sorted list of
3117  // m_FreeSuballocationsBySize if it's suitable.
3118  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
3119  // Given free suballocation, it removes it from sorted list of
3120  // m_FreeSuballocationsBySize if it's suitable.
3121  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
3122 };
3123 
3124 /*
3125 Represents a single block of device memory (`VkDeviceMemory`) with all the
3126 data about its regions (aka suballocations, `VmaAllocation`), assigned and free.
3127 
3128 Thread-safety: This class must be externally synchronized.
3129 */
3130 class VmaDeviceMemoryBlock
3131 {
3132 public:
3133  uint32_t m_MemoryTypeIndex;
3134  VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
3135  VkDeviceMemory m_hMemory;
3136  VkDeviceSize m_Size;
3137  bool m_PersistentMap;
3138  void* m_pMappedData;
3139  VmaBlockMetadata m_Metadata;
3140 
3141  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
3142 
3143  ~VmaDeviceMemoryBlock()
3144  {
3145  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
3146  }
3147 
3148  // Always call after construction.
3149  void Init(
3150  uint32_t newMemoryTypeIndex,
3151  VMA_BLOCK_VECTOR_TYPE newBlockVectorType,
3152  VkDeviceMemory newMemory,
3153  VkDeviceSize newSize,
3154  bool persistentMap,
3155  void* pMappedData);
3156  // Always call before destruction.
3157  void Destroy(VmaAllocator allocator);
3158 
3159  // Validates all data structures inside this object. If not valid, returns false.
3160  bool Validate() const;
3161 };
3162 
3163 struct VmaPointerLess
3164 {
3165  bool operator()(const void* lhs, const void* rhs) const
3166  {
3167  return lhs < rhs;
3168  }
3169 };
3170 
3171 class VmaDefragmentator;
3172 
3173 /*
3174 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
3175 Vulkan memory type.
3176 
3177 Synchronized internally with a mutex.
3178 */
3179 struct VmaBlockVector
3180 {
3181  VmaBlockVector(
3182  VmaAllocator hAllocator,
3183  uint32_t memoryTypeIndex,
3184  VMA_BLOCK_VECTOR_TYPE blockVectorType,
3185  VkDeviceSize preferredBlockSize,
3186  size_t minBlockCount,
3187  size_t maxBlockCount,
3188  VkDeviceSize bufferImageGranularity,
3189  uint32_t frameInUseCount,
3190  bool isCustomPool);
3191  ~VmaBlockVector();
3192 
3193  VkResult CreateMinBlocks();
3194 
3195  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
3196  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
3197  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
3198  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
3199  VMA_BLOCK_VECTOR_TYPE GetBlockVectorType() const { return m_BlockVectorType; }
3200 
3201  void GetPoolStats(VmaPoolStats* pStats);
3202 
3203  bool IsEmpty() const { return m_Blocks.empty(); }
3204 
3205  VkResult Allocate(
3206  VmaPool hCurrentPool,
3207  uint32_t currentFrameIndex,
3208  const VkMemoryRequirements& vkMemReq,
3209  const VmaAllocationCreateInfo& createInfo,
3210  VmaSuballocationType suballocType,
3211  VmaAllocation* pAllocation);
3212 
3213  void Free(
3214  VmaAllocation hAllocation);
3215 
3216  // Adds statistics of this BlockVector to pStats.
3217  void AddStats(VmaStats* pStats);
3218 
3219 #if VMA_STATS_STRING_ENABLED
3220  void PrintDetailedMap(class VmaJsonWriter& json);
3221 #endif
3222 
3223  void UnmapPersistentlyMappedMemory();
3224  VkResult MapPersistentlyMappedMemory();
3225 
3226  void MakePoolAllocationsLost(
3227  uint32_t currentFrameIndex,
3228  size_t* pLostAllocationCount);
3229 
3230  VmaDefragmentator* EnsureDefragmentator(
3231  VmaAllocator hAllocator,
3232  uint32_t currentFrameIndex);
3233 
3234  VkResult Defragment(
3235  VmaDefragmentationStats* pDefragmentationStats,
3236  VkDeviceSize& maxBytesToMove,
3237  uint32_t& maxAllocationsToMove);
3238 
3239  void DestroyDefragmentator();
3240 
3241 private:
3242  friend class VmaDefragmentator;
3243 
3244  const VmaAllocator m_hAllocator;
3245  const uint32_t m_MemoryTypeIndex;
3246  const VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
3247  const VkDeviceSize m_PreferredBlockSize;
3248  const size_t m_MinBlockCount;
3249  const size_t m_MaxBlockCount;
3250  const VkDeviceSize m_BufferImageGranularity;
3251  const uint32_t m_FrameInUseCount;
3252  const bool m_IsCustomPool;
3253  VMA_MUTEX m_Mutex;
3254  // Incrementally sorted by sumFreeSize, ascending.
3255  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
3256  /* There can be at most one allocation that is completely empty - a
3257  hysteresis to avoid pessimistic case of alternating creation and destruction
3258  of a VkDeviceMemory. */
3259  bool m_HasEmptyBlock;
3260  VmaDefragmentator* m_pDefragmentator;
3261 
3262  // Finds and removes given block from vector.
3263  void Remove(VmaDeviceMemoryBlock* pBlock);
3264 
3265  // Performs single step in sorting m_Blocks. They may not be fully sorted
3266  // after this call.
3267  void IncrementallySortBlocks();
3268 
3269  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
3270 };
3271 
3272 struct VmaPool_T
3273 {
3274 public:
3275  VmaBlockVector m_BlockVector;
3276 
3277  // Takes ownership.
3278  VmaPool_T(
3279  VmaAllocator hAllocator,
3280  const VmaPoolCreateInfo& createInfo);
3281  ~VmaPool_T();
3282 
3283  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
3284 
3285 #if VMA_STATS_STRING_ENABLED
3286  //void PrintDetailedMap(class VmaStringBuilder& sb);
3287 #endif
3288 };
3289 
3290 class VmaDefragmentator
3291 {
3292  const VmaAllocator m_hAllocator;
3293  VmaBlockVector* const m_pBlockVector;
3294  uint32_t m_CurrentFrameIndex;
3295  VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
3296  VkDeviceSize m_BytesMoved;
3297  uint32_t m_AllocationsMoved;
3298 
3299  struct AllocationInfo
3300  {
3301  VmaAllocation m_hAllocation;
3302  VkBool32* m_pChanged;
3303 
3304  AllocationInfo() :
3305  m_hAllocation(VK_NULL_HANDLE),
3306  m_pChanged(VMA_NULL)
3307  {
3308  }
3309  };
3310 
3311  struct AllocationInfoSizeGreater
3312  {
3313  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
3314  {
3315  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
3316  }
3317  };
3318 
3319  // Used between AddAllocation and Defragment.
3320  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3321 
3322  struct BlockInfo
3323  {
3324  VmaDeviceMemoryBlock* m_pBlock;
3325  bool m_HasNonMovableAllocations;
3326  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3327 
3328  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
3329  m_pBlock(VMA_NULL),
3330  m_HasNonMovableAllocations(true),
3331  m_Allocations(pAllocationCallbacks),
3332  m_pMappedDataForDefragmentation(VMA_NULL)
3333  {
3334  }
3335 
3336  void CalcHasNonMovableAllocations()
3337  {
3338  const size_t blockAllocCount = m_pBlock->m_Metadata.GetAllocationCount();
3339  const size_t defragmentAllocCount = m_Allocations.size();
3340  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
3341  }
3342 
3343  void SortAllocationsBySizeDescecnding()
3344  {
3345  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
3346  }
3347 
3348  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
3349  void Unmap(VmaAllocator hAllocator);
3350 
3351  private:
3352  // Not null if mapped for defragmentation only, not persistently mapped.
3353  void* m_pMappedDataForDefragmentation;
3354  };
3355 
3356  struct BlockPointerLess
3357  {
3358  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
3359  {
3360  return pLhsBlockInfo->m_pBlock < pRhsBlock;
3361  }
3362  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
3363  {
3364  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
3365  }
3366  };
3367 
3368  // 1. Blocks with some non-movable allocations go first.
3369  // 2. Blocks with smaller sumFreeSize go first.
3370  struct BlockInfoCompareMoveDestination
3371  {
3372  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
3373  {
3374  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
3375  {
3376  return true;
3377  }
3378  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
3379  {
3380  return false;
3381  }
3382  if(pLhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize())
3383  {
3384  return true;
3385  }
3386  return false;
3387  }
3388  };
3389 
3390  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
3391  BlockInfoVector m_Blocks;
3392 
3393  VkResult DefragmentRound(
3394  VkDeviceSize maxBytesToMove,
3395  uint32_t maxAllocationsToMove);
3396 
3397  static bool MoveMakesSense(
3398  size_t dstBlockIndex, VkDeviceSize dstOffset,
3399  size_t srcBlockIndex, VkDeviceSize srcOffset);
3400 
3401 public:
3402  VmaDefragmentator(
3403  VmaAllocator hAllocator,
3404  VmaBlockVector* pBlockVector,
3405  uint32_t currentFrameIndex);
3406 
3407  ~VmaDefragmentator();
3408 
3409  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
3410  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
3411 
3412  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
3413 
3414  VkResult Defragment(
3415  VkDeviceSize maxBytesToMove,
3416  uint32_t maxAllocationsToMove);
3417 };
3418 
3419 // Main allocator object.
3420 struct VmaAllocator_T
3421 {
3422  bool m_UseMutex;
3423  VkDevice m_hDevice;
3424  bool m_AllocationCallbacksSpecified;
3425  VkAllocationCallbacks m_AllocationCallbacks;
3426  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
3427  // Non-zero when we are inside UnmapPersistentlyMappedMemory...MapPersistentlyMappedMemory.
3428  // Counter to allow nested calls to these functions.
3429  uint32_t m_UnmapPersistentlyMappedMemoryCounter;
3430 
3431  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
3432  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
3433  VMA_MUTEX m_HeapSizeLimitMutex;
3434 
3435  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
3436  VkPhysicalDeviceMemoryProperties m_MemProps;
3437 
3438  // Default pools.
3439  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES][VMA_BLOCK_VECTOR_TYPE_COUNT];
3440 
3441  // Each vector is sorted by memory (handle value).
3442  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
3443  AllocationVectorType* m_pOwnAllocations[VK_MAX_MEMORY_TYPES][VMA_BLOCK_VECTOR_TYPE_COUNT];
3444  VMA_MUTEX m_OwnAllocationsMutex[VK_MAX_MEMORY_TYPES];
3445 
3446  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
3447  ~VmaAllocator_T();
3448 
3449  const VkAllocationCallbacks* GetAllocationCallbacks() const
3450  {
3451  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
3452  }
3453  const VmaVulkanFunctions& GetVulkanFunctions() const
3454  {
3455  return m_VulkanFunctions;
3456  }
3457 
3458  VkDeviceSize GetBufferImageGranularity() const
3459  {
3460  return VMA_MAX(
3461  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
3462  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
3463  }
3464 
3465  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
3466  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
3467 
3468  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
3469  {
3470  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
3471  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
3472  }
3473 
3474  // Main allocation function.
3475  VkResult AllocateMemory(
3476  const VkMemoryRequirements& vkMemReq,
3477  const VmaAllocationCreateInfo& createInfo,
3478  VmaSuballocationType suballocType,
3479  VmaAllocation* pAllocation);
3480 
3481  // Main deallocation function.
3482  void FreeMemory(const VmaAllocation allocation);
3483 
3484  void CalculateStats(VmaStats* pStats);
3485 
3486 #if VMA_STATS_STRING_ENABLED
3487  void PrintDetailedMap(class VmaJsonWriter& json);
3488 #endif
3489 
3490  void UnmapPersistentlyMappedMemory();
3491  VkResult MapPersistentlyMappedMemory();
3492 
3493  VkResult Defragment(
3494  VmaAllocation* pAllocations,
3495  size_t allocationCount,
3496  VkBool32* pAllocationsChanged,
3497  const VmaDefragmentationInfo* pDefragmentationInfo,
3498  VmaDefragmentationStats* pDefragmentationStats);
3499 
3500  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
3501 
3502  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
3503  void DestroyPool(VmaPool pool);
3504  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
3505 
3506  void SetCurrentFrameIndex(uint32_t frameIndex);
3507 
3508  void MakePoolAllocationsLost(
3509  VmaPool hPool,
3510  size_t* pLostAllocationCount);
3511 
3512  void CreateLostAllocation(VmaAllocation* pAllocation);
3513 
3514  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
3515  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
3516 
3517 private:
3518  VkDeviceSize m_PreferredLargeHeapBlockSize;
3519  VkDeviceSize m_PreferredSmallHeapBlockSize;
3520 
3521  VkPhysicalDevice m_PhysicalDevice;
3522  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
3523 
3524  VMA_MUTEX m_PoolsMutex;
3525  // Protected by m_PoolsMutex. Sorted by pointer value.
3526  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
3527 
3528  VmaVulkanFunctions m_VulkanFunctions;
3529 
3530  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
3531 
3532  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
3533 
3534  VkResult AllocateMemoryOfType(
3535  const VkMemoryRequirements& vkMemReq,
3536  const VmaAllocationCreateInfo& createInfo,
3537  uint32_t memTypeIndex,
3538  VmaSuballocationType suballocType,
3539  VmaAllocation* pAllocation);
3540 
3541  // Allocates and registers new VkDeviceMemory specifically for single allocation.
3542  VkResult AllocateOwnMemory(
3543  VkDeviceSize size,
3544  VmaSuballocationType suballocType,
3545  uint32_t memTypeIndex,
3546  bool map,
3547  void* pUserData,
3548  VmaAllocation* pAllocation);
3549 
3550  // Tries to free pMemory as Own Memory. Returns true if found and freed.
3551  void FreeOwnMemory(VmaAllocation allocation);
3552 };
3553 
3555 // Memory allocation #2 after VmaAllocator_T definition
3556 
3557 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
3558 {
3559  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
3560 }
3561 
3562 static void VmaFree(VmaAllocator hAllocator, void* ptr)
3563 {
3564  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
3565 }
3566 
3567 template<typename T>
3568 static T* VmaAllocate(VmaAllocator hAllocator)
3569 {
3570  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
3571 }
3572 
3573 template<typename T>
3574 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
3575 {
3576  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
3577 }
3578 
3579 template<typename T>
3580 static void vma_delete(VmaAllocator hAllocator, T* ptr)
3581 {
3582  if(ptr != VMA_NULL)
3583  {
3584  ptr->~T();
3585  VmaFree(hAllocator, ptr);
3586  }
3587 }
3588 
3589 template<typename T>
3590 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
3591 {
3592  if(ptr != VMA_NULL)
3593  {
3594  for(size_t i = count; i--; )
3595  ptr[i].~T();
3596  VmaFree(hAllocator, ptr);
3597  }
3598 }
3599 
3601 // VmaStringBuilder
3602 
3603 #if VMA_STATS_STRING_ENABLED
3604 
3605 class VmaStringBuilder
3606 {
3607 public:
3608  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
3609  size_t GetLength() const { return m_Data.size(); }
3610  const char* GetData() const { return m_Data.data(); }
3611 
3612  void Add(char ch) { m_Data.push_back(ch); }
3613  void Add(const char* pStr);
3614  void AddNewLine() { Add('\n'); }
3615  void AddNumber(uint32_t num);
3616  void AddNumber(uint64_t num);
3617  void AddPointer(const void* ptr);
3618 
3619 private:
3620  VmaVector< char, VmaStlAllocator<char> > m_Data;
3621 };
3622 
3623 void VmaStringBuilder::Add(const char* pStr)
3624 {
3625  const size_t strLen = strlen(pStr);
3626  if(strLen > 0)
3627  {
3628  const size_t oldCount = m_Data.size();
3629  m_Data.resize(oldCount + strLen);
3630  memcpy(m_Data.data() + oldCount, pStr, strLen);
3631  }
3632 }
3633 
3634 void VmaStringBuilder::AddNumber(uint32_t num)
3635 {
3636  char buf[11];
3637  VmaUint32ToStr(buf, sizeof(buf), num);
3638  Add(buf);
3639 }
3640 
3641 void VmaStringBuilder::AddNumber(uint64_t num)
3642 {
3643  char buf[21];
3644  VmaUint64ToStr(buf, sizeof(buf), num);
3645  Add(buf);
3646 }
3647 
3648 void VmaStringBuilder::AddPointer(const void* ptr)
3649 {
3650  char buf[21];
3651  VmaPtrToStr(buf, sizeof(buf), ptr);
3652  Add(buf);
3653 }
3654 
3655 #endif // #if VMA_STATS_STRING_ENABLED
3656 
3658 // VmaJsonWriter
3659 
3660 #if VMA_STATS_STRING_ENABLED
3661 
3662 class VmaJsonWriter
3663 {
3664 public:
3665  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
3666  ~VmaJsonWriter();
3667 
3668  void BeginObject(bool singleLine = false);
3669  void EndObject();
3670 
3671  void BeginArray(bool singleLine = false);
3672  void EndArray();
3673 
3674  void WriteString(const char* pStr);
3675  void BeginString(const char* pStr = VMA_NULL);
3676  void ContinueString(const char* pStr);
3677  void ContinueString(uint32_t n);
3678  void ContinueString(uint64_t n);
3679  void EndString(const char* pStr = VMA_NULL);
3680 
3681  void WriteNumber(uint32_t n);
3682  void WriteNumber(uint64_t n);
3683  void WriteBool(bool b);
3684  void WriteNull();
3685 
3686 private:
3687  static const char* const INDENT;
3688 
3689  enum COLLECTION_TYPE
3690  {
3691  COLLECTION_TYPE_OBJECT,
3692  COLLECTION_TYPE_ARRAY,
3693  };
3694  struct StackItem
3695  {
3696  COLLECTION_TYPE type;
3697  uint32_t valueCount;
3698  bool singleLineMode;
3699  };
3700 
3701  VmaStringBuilder& m_SB;
3702  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
3703  bool m_InsideString;
3704 
3705  void BeginValue(bool isString);
3706  void WriteIndent(bool oneLess = false);
3707 };
3708 
3709 const char* const VmaJsonWriter::INDENT = " ";
3710 
3711 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
3712  m_SB(sb),
3713  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
3714  m_InsideString(false)
3715 {
3716 }
3717 
3718 VmaJsonWriter::~VmaJsonWriter()
3719 {
3720  VMA_ASSERT(!m_InsideString);
3721  VMA_ASSERT(m_Stack.empty());
3722 }
3723 
3724 void VmaJsonWriter::BeginObject(bool singleLine)
3725 {
3726  VMA_ASSERT(!m_InsideString);
3727 
3728  BeginValue(false);
3729  m_SB.Add('{');
3730 
3731  StackItem item;
3732  item.type = COLLECTION_TYPE_OBJECT;
3733  item.valueCount = 0;
3734  item.singleLineMode = singleLine;
3735  m_Stack.push_back(item);
3736 }
3737 
3738 void VmaJsonWriter::EndObject()
3739 {
3740  VMA_ASSERT(!m_InsideString);
3741 
3742  WriteIndent(true);
3743  m_SB.Add('}');
3744 
3745  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
3746  m_Stack.pop_back();
3747 }
3748 
3749 void VmaJsonWriter::BeginArray(bool singleLine)
3750 {
3751  VMA_ASSERT(!m_InsideString);
3752 
3753  BeginValue(false);
3754  m_SB.Add('[');
3755 
3756  StackItem item;
3757  item.type = COLLECTION_TYPE_ARRAY;
3758  item.valueCount = 0;
3759  item.singleLineMode = singleLine;
3760  m_Stack.push_back(item);
3761 }
3762 
3763 void VmaJsonWriter::EndArray()
3764 {
3765  VMA_ASSERT(!m_InsideString);
3766 
3767  WriteIndent(true);
3768  m_SB.Add(']');
3769 
3770  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
3771  m_Stack.pop_back();
3772 }
3773 
3774 void VmaJsonWriter::WriteString(const char* pStr)
3775 {
3776  BeginString(pStr);
3777  EndString();
3778 }
3779 
3780 void VmaJsonWriter::BeginString(const char* pStr)
3781 {
3782  VMA_ASSERT(!m_InsideString);
3783 
3784  BeginValue(true);
3785  m_SB.Add('"');
3786  m_InsideString = true;
3787  if(pStr != VMA_NULL && pStr[0] != '\0')
3788  {
3789  ContinueString(pStr);
3790  }
3791 }
3792 
3793 void VmaJsonWriter::ContinueString(const char* pStr)
3794 {
3795  VMA_ASSERT(m_InsideString);
3796 
3797  const size_t strLen = strlen(pStr);
3798  for(size_t i = 0; i < strLen; ++i)
3799  {
3800  char ch = pStr[i];
3801  if(ch == '\'')
3802  {
3803  m_SB.Add("\\\\");
3804  }
3805  else if(ch == '"')
3806  {
3807  m_SB.Add("\\\"");
3808  }
3809  else if(ch >= 32)
3810  {
3811  m_SB.Add(ch);
3812  }
3813  else switch(ch)
3814  {
3815  case '\n':
3816  m_SB.Add("\\n");
3817  break;
3818  case '\r':
3819  m_SB.Add("\\r");
3820  break;
3821  case '\t':
3822  m_SB.Add("\\t");
3823  break;
3824  default:
3825  VMA_ASSERT(0 && "Character not currently supported.");
3826  break;
3827  }
3828  }
3829 }
3830 
3831 void VmaJsonWriter::ContinueString(uint32_t n)
3832 {
3833  VMA_ASSERT(m_InsideString);
3834  m_SB.AddNumber(n);
3835 }
3836 
3837 void VmaJsonWriter::ContinueString(uint64_t n)
3838 {
3839  VMA_ASSERT(m_InsideString);
3840  m_SB.AddNumber(n);
3841 }
3842 
3843 void VmaJsonWriter::EndString(const char* pStr)
3844 {
3845  VMA_ASSERT(m_InsideString);
3846  if(pStr != VMA_NULL && pStr[0] != '\0')
3847  {
3848  ContinueString(pStr);
3849  }
3850  m_SB.Add('"');
3851  m_InsideString = false;
3852 }
3853 
3854 void VmaJsonWriter::WriteNumber(uint32_t n)
3855 {
3856  VMA_ASSERT(!m_InsideString);
3857  BeginValue(false);
3858  m_SB.AddNumber(n);
3859 }
3860 
3861 void VmaJsonWriter::WriteNumber(uint64_t n)
3862 {
3863  VMA_ASSERT(!m_InsideString);
3864  BeginValue(false);
3865  m_SB.AddNumber(n);
3866 }
3867 
3868 void VmaJsonWriter::WriteBool(bool b)
3869 {
3870  VMA_ASSERT(!m_InsideString);
3871  BeginValue(false);
3872  m_SB.Add(b ? "true" : "false");
3873 }
3874 
3875 void VmaJsonWriter::WriteNull()
3876 {
3877  VMA_ASSERT(!m_InsideString);
3878  BeginValue(false);
3879  m_SB.Add("null");
3880 }
3881 
3882 void VmaJsonWriter::BeginValue(bool isString)
3883 {
3884  if(!m_Stack.empty())
3885  {
3886  StackItem& currItem = m_Stack.back();
3887  if(currItem.type == COLLECTION_TYPE_OBJECT &&
3888  currItem.valueCount % 2 == 0)
3889  {
3890  VMA_ASSERT(isString);
3891  }
3892 
3893  if(currItem.type == COLLECTION_TYPE_OBJECT &&
3894  currItem.valueCount % 2 != 0)
3895  {
3896  m_SB.Add(": ");
3897  }
3898  else if(currItem.valueCount > 0)
3899  {
3900  m_SB.Add(", ");
3901  WriteIndent();
3902  }
3903  else
3904  {
3905  WriteIndent();
3906  }
3907  ++currItem.valueCount;
3908  }
3909 }
3910 
3911 void VmaJsonWriter::WriteIndent(bool oneLess)
3912 {
3913  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
3914  {
3915  m_SB.AddNewLine();
3916 
3917  size_t count = m_Stack.size();
3918  if(count > 0 && oneLess)
3919  {
3920  --count;
3921  }
3922  for(size_t i = 0; i < count; ++i)
3923  {
3924  m_SB.Add(INDENT);
3925  }
3926  }
3927 }
3928 
3929 #endif // #if VMA_STATS_STRING_ENABLED
3930 
3932 
3933 VkDeviceSize VmaAllocation_T::GetOffset() const
3934 {
3935  switch(m_Type)
3936  {
3937  case ALLOCATION_TYPE_BLOCK:
3938  return m_BlockAllocation.m_Offset;
3939  case ALLOCATION_TYPE_OWN:
3940  return 0;
3941  default:
3942  VMA_ASSERT(0);
3943  return 0;
3944  }
3945 }
3946 
3947 VkDeviceMemory VmaAllocation_T::GetMemory() const
3948 {
3949  switch(m_Type)
3950  {
3951  case ALLOCATION_TYPE_BLOCK:
3952  return m_BlockAllocation.m_Block->m_hMemory;
3953  case ALLOCATION_TYPE_OWN:
3954  return m_OwnAllocation.m_hMemory;
3955  default:
3956  VMA_ASSERT(0);
3957  return VK_NULL_HANDLE;
3958  }
3959 }
3960 
3961 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
3962 {
3963  switch(m_Type)
3964  {
3965  case ALLOCATION_TYPE_BLOCK:
3966  return m_BlockAllocation.m_Block->m_MemoryTypeIndex;
3967  case ALLOCATION_TYPE_OWN:
3968  return m_OwnAllocation.m_MemoryTypeIndex;
3969  default:
3970  VMA_ASSERT(0);
3971  return UINT32_MAX;
3972  }
3973 }
3974 
3975 VMA_BLOCK_VECTOR_TYPE VmaAllocation_T::GetBlockVectorType() const
3976 {
3977  switch(m_Type)
3978  {
3979  case ALLOCATION_TYPE_BLOCK:
3980  return m_BlockAllocation.m_Block->m_BlockVectorType;
3981  case ALLOCATION_TYPE_OWN:
3982  return (m_OwnAllocation.m_PersistentMap ? VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED);
3983  default:
3984  VMA_ASSERT(0);
3985  return VMA_BLOCK_VECTOR_TYPE_COUNT;
3986  }
3987 }
3988 
3989 void* VmaAllocation_T::GetMappedData() const
3990 {
3991  switch(m_Type)
3992  {
3993  case ALLOCATION_TYPE_BLOCK:
3994  if(m_BlockAllocation.m_Block->m_pMappedData != VMA_NULL)
3995  {
3996  return (char*)m_BlockAllocation.m_Block->m_pMappedData + m_BlockAllocation.m_Offset;
3997  }
3998  else
3999  {
4000  return VMA_NULL;
4001  }
4002  break;
4003  case ALLOCATION_TYPE_OWN:
4004  return m_OwnAllocation.m_pMappedData;
4005  default:
4006  VMA_ASSERT(0);
4007  return VMA_NULL;
4008  }
4009 }
4010 
4011 bool VmaAllocation_T::CanBecomeLost() const
4012 {
4013  switch(m_Type)
4014  {
4015  case ALLOCATION_TYPE_BLOCK:
4016  return m_BlockAllocation.m_CanBecomeLost;
4017  case ALLOCATION_TYPE_OWN:
4018  return false;
4019  default:
4020  VMA_ASSERT(0);
4021  return false;
4022  }
4023 }
4024 
4025 VmaPool VmaAllocation_T::GetPool() const
4026 {
4027  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4028  return m_BlockAllocation.m_hPool;
4029 }
4030 
4031 VkResult VmaAllocation_T::OwnAllocMapPersistentlyMappedMemory(VmaAllocator hAllocator)
4032 {
4033  VMA_ASSERT(m_Type == ALLOCATION_TYPE_OWN);
4034  if(m_OwnAllocation.m_PersistentMap)
4035  {
4036  return (*hAllocator->GetVulkanFunctions().vkMapMemory)(
4037  hAllocator->m_hDevice,
4038  m_OwnAllocation.m_hMemory,
4039  0,
4040  VK_WHOLE_SIZE,
4041  0,
4042  &m_OwnAllocation.m_pMappedData);
4043  }
4044  return VK_SUCCESS;
4045 }
4046 void VmaAllocation_T::OwnAllocUnmapPersistentlyMappedMemory(VmaAllocator hAllocator)
4047 {
4048  VMA_ASSERT(m_Type == ALLOCATION_TYPE_OWN);
4049  if(m_OwnAllocation.m_pMappedData)
4050  {
4051  VMA_ASSERT(m_OwnAllocation.m_PersistentMap);
4052  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_OwnAllocation.m_hMemory);
4053  m_OwnAllocation.m_pMappedData = VMA_NULL;
4054  }
4055 }
4056 
4057 
4058 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
4059 {
4060  VMA_ASSERT(CanBecomeLost());
4061 
4062  /*
4063  Warning: This is a carefully designed algorithm.
4064  Do not modify unless you really know what you're doing :)
4065  */
4066  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
4067  for(;;)
4068  {
4069  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
4070  {
4071  VMA_ASSERT(0);
4072  return false;
4073  }
4074  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
4075  {
4076  return false;
4077  }
4078  else // Last use time earlier than current time.
4079  {
4080  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
4081  {
4082  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
4083  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
4084  return true;
4085  }
4086  }
4087  }
4088 }
4089 
4090 #if VMA_STATS_STRING_ENABLED
4091 
4092 // Correspond to values of enum VmaSuballocationType.
4093 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
4094  "FREE",
4095  "UNKNOWN",
4096  "BUFFER",
4097  "IMAGE_UNKNOWN",
4098  "IMAGE_LINEAR",
4099  "IMAGE_OPTIMAL",
4100 };
4101 
4102 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
4103 {
4104  json.BeginObject();
4105 
4106  json.WriteString("Blocks");
4107  json.WriteNumber(stat.blockCount);
4108 
4109  json.WriteString("Allocations");
4110  json.WriteNumber(stat.allocationCount);
4111 
4112  json.WriteString("UnusedRanges");
4113  json.WriteNumber(stat.unusedRangeCount);
4114 
4115  json.WriteString("UsedBytes");
4116  json.WriteNumber(stat.usedBytes);
4117 
4118  json.WriteString("UnusedBytes");
4119  json.WriteNumber(stat.unusedBytes);
4120 
4121  if(stat.allocationCount > 1)
4122  {
4123  json.WriteString("AllocationSize");
4124  json.BeginObject(true);
4125  json.WriteString("Min");
4126  json.WriteNumber(stat.allocationSizeMin);
4127  json.WriteString("Avg");
4128  json.WriteNumber(stat.allocationSizeAvg);
4129  json.WriteString("Max");
4130  json.WriteNumber(stat.allocationSizeMax);
4131  json.EndObject();
4132  }
4133 
4134  if(stat.unusedRangeCount > 1)
4135  {
4136  json.WriteString("UnusedRangeSize");
4137  json.BeginObject(true);
4138  json.WriteString("Min");
4139  json.WriteNumber(stat.unusedRangeSizeMin);
4140  json.WriteString("Avg");
4141  json.WriteNumber(stat.unusedRangeSizeAvg);
4142  json.WriteString("Max");
4143  json.WriteNumber(stat.unusedRangeSizeMax);
4144  json.EndObject();
4145  }
4146 
4147  json.EndObject();
4148 }
4149 
4150 #endif // #if VMA_STATS_STRING_ENABLED
4151 
4152 struct VmaSuballocationItemSizeLess
4153 {
4154  bool operator()(
4155  const VmaSuballocationList::iterator lhs,
4156  const VmaSuballocationList::iterator rhs) const
4157  {
4158  return lhs->size < rhs->size;
4159  }
4160  bool operator()(
4161  const VmaSuballocationList::iterator lhs,
4162  VkDeviceSize rhsSize) const
4163  {
4164  return lhs->size < rhsSize;
4165  }
4166 };
4167 
4169 // class VmaBlockMetadata
4170 
4171 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
4172  m_Size(0),
4173  m_FreeCount(0),
4174  m_SumFreeSize(0),
4175  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
4176  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
4177 {
4178 }
4179 
4180 VmaBlockMetadata::~VmaBlockMetadata()
4181 {
4182 }
4183 
4184 void VmaBlockMetadata::Init(VkDeviceSize size)
4185 {
4186  m_Size = size;
4187  m_FreeCount = 1;
4188  m_SumFreeSize = size;
4189 
4190  VmaSuballocation suballoc = {};
4191  suballoc.offset = 0;
4192  suballoc.size = size;
4193  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4194  suballoc.hAllocation = VK_NULL_HANDLE;
4195 
4196  m_Suballocations.push_back(suballoc);
4197  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
4198  --suballocItem;
4199  m_FreeSuballocationsBySize.push_back(suballocItem);
4200 }
4201 
4202 bool VmaBlockMetadata::Validate() const
4203 {
4204  if(m_Suballocations.empty())
4205  {
4206  return false;
4207  }
4208 
4209  // Expected offset of new suballocation as calculates from previous ones.
4210  VkDeviceSize calculatedOffset = 0;
4211  // Expected number of free suballocations as calculated from traversing their list.
4212  uint32_t calculatedFreeCount = 0;
4213  // Expected sum size of free suballocations as calculated from traversing their list.
4214  VkDeviceSize calculatedSumFreeSize = 0;
4215  // Expected number of free suballocations that should be registered in
4216  // m_FreeSuballocationsBySize calculated from traversing their list.
4217  size_t freeSuballocationsToRegister = 0;
4218  // True if previous visisted suballocation was free.
4219  bool prevFree = false;
4220 
4221  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
4222  suballocItem != m_Suballocations.cend();
4223  ++suballocItem)
4224  {
4225  const VmaSuballocation& subAlloc = *suballocItem;
4226 
4227  // Actual offset of this suballocation doesn't match expected one.
4228  if(subAlloc.offset != calculatedOffset)
4229  {
4230  return false;
4231  }
4232 
4233  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
4234  // Two adjacent free suballocations are invalid. They should be merged.
4235  if(prevFree && currFree)
4236  {
4237  return false;
4238  }
4239  prevFree = currFree;
4240 
4241  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
4242  {
4243  return false;
4244  }
4245 
4246  if(currFree)
4247  {
4248  calculatedSumFreeSize += subAlloc.size;
4249  ++calculatedFreeCount;
4250  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
4251  {
4252  ++freeSuballocationsToRegister;
4253  }
4254  }
4255 
4256  calculatedOffset += subAlloc.size;
4257  }
4258 
4259  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
4260  // match expected one.
4261  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
4262  {
4263  return false;
4264  }
4265 
4266  VkDeviceSize lastSize = 0;
4267  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
4268  {
4269  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
4270 
4271  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
4272  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
4273  {
4274  return false;
4275  }
4276  // They must be sorted by size ascending.
4277  if(suballocItem->size < lastSize)
4278  {
4279  return false;
4280  }
4281 
4282  lastSize = suballocItem->size;
4283  }
4284 
4285  // Check if totals match calculacted values.
4286  return
4287  ValidateFreeSuballocationList() &&
4288  (calculatedOffset == m_Size) &&
4289  (calculatedSumFreeSize == m_SumFreeSize) &&
4290  (calculatedFreeCount == m_FreeCount);
4291 }
4292 
4293 VkDeviceSize VmaBlockMetadata::GetUnusedRangeSizeMax() const
4294 {
4295  if(!m_FreeSuballocationsBySize.empty())
4296  {
4297  return m_FreeSuballocationsBySize.back()->size;
4298  }
4299  else
4300  {
4301  return 0;
4302  }
4303 }
4304 
4305 bool VmaBlockMetadata::IsEmpty() const
4306 {
4307  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
4308 }
4309 
4310 void VmaBlockMetadata::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
4311 {
4312  outInfo.blockCount = 1;
4313 
4314  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
4315  outInfo.allocationCount = rangeCount - m_FreeCount;
4316  outInfo.unusedRangeCount = m_FreeCount;
4317 
4318  outInfo.unusedBytes = m_SumFreeSize;
4319  outInfo.usedBytes = m_Size - outInfo.unusedBytes;
4320 
4321  outInfo.allocationSizeMin = UINT64_MAX;
4322  outInfo.allocationSizeMax = 0;
4323  outInfo.unusedRangeSizeMin = UINT64_MAX;
4324  outInfo.unusedRangeSizeMax = 0;
4325 
4326  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
4327  suballocItem != m_Suballocations.cend();
4328  ++suballocItem)
4329  {
4330  const VmaSuballocation& suballoc = *suballocItem;
4331  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
4332  {
4333  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
4334  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
4335  }
4336  else
4337  {
4338  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
4339  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
4340  }
4341  }
4342 }
4343 
4344 void VmaBlockMetadata::AddPoolStats(VmaPoolStats& inoutStats) const
4345 {
4346  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
4347 
4348  inoutStats.size += m_Size;
4349  inoutStats.unusedSize += m_SumFreeSize;
4350  inoutStats.allocationCount += rangeCount - m_FreeCount;
4351  inoutStats.unusedRangeCount += m_FreeCount;
4352  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
4353 }
4354 
4355 #if VMA_STATS_STRING_ENABLED
4356 
4357 void VmaBlockMetadata::PrintDetailedMap(class VmaJsonWriter& json) const
4358 {
4359  json.BeginObject();
4360 
4361  json.WriteString("TotalBytes");
4362  json.WriteNumber(m_Size);
4363 
4364  json.WriteString("UnusedBytes");
4365  json.WriteNumber(m_SumFreeSize);
4366 
4367  json.WriteString("Allocations");
4368  json.WriteNumber(m_Suballocations.size() - m_FreeCount);
4369 
4370  json.WriteString("UnusedRanges");
4371  json.WriteNumber(m_FreeCount);
4372 
4373  json.WriteString("Suballocations");
4374  json.BeginArray();
4375  size_t i = 0;
4376  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
4377  suballocItem != m_Suballocations.cend();
4378  ++suballocItem, ++i)
4379  {
4380  json.BeginObject(true);
4381 
4382  json.WriteString("Type");
4383  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
4384 
4385  json.WriteString("Size");
4386  json.WriteNumber(suballocItem->size);
4387 
4388  json.WriteString("Offset");
4389  json.WriteNumber(suballocItem->offset);
4390 
4391  json.EndObject();
4392  }
4393  json.EndArray();
4394 
4395  json.EndObject();
4396 }
4397 
4398 #endif // #if VMA_STATS_STRING_ENABLED
4399 
4400 /*
4401 How many suitable free suballocations to analyze before choosing best one.
4402 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
4403  be chosen.
4404 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
4405  suballocations will be analized and best one will be chosen.
4406 - Any other value is also acceptable.
4407 */
4408 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
4409 
4410 void VmaBlockMetadata::CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest)
4411 {
4412  VMA_ASSERT(IsEmpty());
4413  pAllocationRequest->offset = 0;
4414  pAllocationRequest->sumFreeSize = m_SumFreeSize;
4415  pAllocationRequest->sumItemSize = 0;
4416  pAllocationRequest->item = m_Suballocations.begin();
4417  pAllocationRequest->itemsToMakeLostCount = 0;
4418 }
4419 
4420 bool VmaBlockMetadata::CreateAllocationRequest(
4421  uint32_t currentFrameIndex,
4422  uint32_t frameInUseCount,
4423  VkDeviceSize bufferImageGranularity,
4424  VkDeviceSize allocSize,
4425  VkDeviceSize allocAlignment,
4426  VmaSuballocationType allocType,
4427  bool canMakeOtherLost,
4428  VmaAllocationRequest* pAllocationRequest)
4429 {
4430  VMA_ASSERT(allocSize > 0);
4431  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
4432  VMA_ASSERT(pAllocationRequest != VMA_NULL);
4433  VMA_HEAVY_ASSERT(Validate());
4434 
4435  // There is not enough total free space in this block to fullfill the request: Early return.
4436  if(canMakeOtherLost == false && m_SumFreeSize < allocSize)
4437  {
4438  return false;
4439  }
4440 
4441  // New algorithm, efficiently searching freeSuballocationsBySize.
4442  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
4443  if(freeSuballocCount > 0)
4444  {
4445  if(VMA_BEST_FIT)
4446  {
4447  // Find first free suballocation with size not less than allocSize.
4448  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
4449  m_FreeSuballocationsBySize.data(),
4450  m_FreeSuballocationsBySize.data() + freeSuballocCount,
4451  allocSize,
4452  VmaSuballocationItemSizeLess());
4453  size_t index = it - m_FreeSuballocationsBySize.data();
4454  for(; index < freeSuballocCount; ++index)
4455  {
4456  if(CheckAllocation(
4457  currentFrameIndex,
4458  frameInUseCount,
4459  bufferImageGranularity,
4460  allocSize,
4461  allocAlignment,
4462  allocType,
4463  m_FreeSuballocationsBySize[index],
4464  false, // canMakeOtherLost
4465  &pAllocationRequest->offset,
4466  &pAllocationRequest->itemsToMakeLostCount,
4467  &pAllocationRequest->sumFreeSize,
4468  &pAllocationRequest->sumItemSize))
4469  {
4470  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
4471  return true;
4472  }
4473  }
4474  }
4475  else
4476  {
4477  // Search staring from biggest suballocations.
4478  for(size_t index = freeSuballocCount; index--; )
4479  {
4480  if(CheckAllocation(
4481  currentFrameIndex,
4482  frameInUseCount,
4483  bufferImageGranularity,
4484  allocSize,
4485  allocAlignment,
4486  allocType,
4487  m_FreeSuballocationsBySize[index],
4488  false, // canMakeOtherLost
4489  &pAllocationRequest->offset,
4490  &pAllocationRequest->itemsToMakeLostCount,
4491  &pAllocationRequest->sumFreeSize,
4492  &pAllocationRequest->sumItemSize))
4493  {
4494  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
4495  return true;
4496  }
4497  }
4498  }
4499  }
4500 
4501  if(canMakeOtherLost)
4502  {
4503  // Brute-force algorithm. TODO: Come up with something better.
4504 
4505  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
4506  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
4507 
4508  VmaAllocationRequest tmpAllocRequest = {};
4509  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
4510  suballocIt != m_Suballocations.end();
4511  ++suballocIt)
4512  {
4513  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
4514  suballocIt->hAllocation->CanBecomeLost())
4515  {
4516  if(CheckAllocation(
4517  currentFrameIndex,
4518  frameInUseCount,
4519  bufferImageGranularity,
4520  allocSize,
4521  allocAlignment,
4522  allocType,
4523  suballocIt,
4524  canMakeOtherLost,
4525  &tmpAllocRequest.offset,
4526  &tmpAllocRequest.itemsToMakeLostCount,
4527  &tmpAllocRequest.sumFreeSize,
4528  &tmpAllocRequest.sumItemSize))
4529  {
4530  tmpAllocRequest.item = suballocIt;
4531 
4532  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
4533  {
4534  *pAllocationRequest = tmpAllocRequest;
4535  }
4536  }
4537  }
4538  }
4539 
4540  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
4541  {
4542  return true;
4543  }
4544  }
4545 
4546  return false;
4547 }
4548 
4549 bool VmaBlockMetadata::MakeRequestedAllocationsLost(
4550  uint32_t currentFrameIndex,
4551  uint32_t frameInUseCount,
4552  VmaAllocationRequest* pAllocationRequest)
4553 {
4554  while(pAllocationRequest->itemsToMakeLostCount > 0)
4555  {
4556  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
4557  {
4558  ++pAllocationRequest->item;
4559  }
4560  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
4561  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
4562  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
4563  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
4564  {
4565  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
4566  --pAllocationRequest->itemsToMakeLostCount;
4567  }
4568  else
4569  {
4570  return false;
4571  }
4572  }
4573 
4574  VMA_HEAVY_ASSERT(Validate());
4575  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
4576  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
4577 
4578  return true;
4579 }
4580 
4581 uint32_t VmaBlockMetadata::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
4582 {
4583  uint32_t lostAllocationCount = 0;
4584  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
4585  it != m_Suballocations.end();
4586  ++it)
4587  {
4588  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
4589  it->hAllocation->CanBecomeLost() &&
4590  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
4591  {
4592  it = FreeSuballocation(it);
4593  ++lostAllocationCount;
4594  }
4595  }
4596  return lostAllocationCount;
4597 }
4598 
4599 void VmaBlockMetadata::Alloc(
4600  const VmaAllocationRequest& request,
4601  VmaSuballocationType type,
4602  VkDeviceSize allocSize,
4603  VmaAllocation hAllocation)
4604 {
4605  VMA_ASSERT(request.item != m_Suballocations.end());
4606  VmaSuballocation& suballoc = *request.item;
4607  // Given suballocation is a free block.
4608  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
4609  // Given offset is inside this suballocation.
4610  VMA_ASSERT(request.offset >= suballoc.offset);
4611  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
4612  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
4613  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
4614 
4615  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
4616  // it to become used.
4617  UnregisterFreeSuballocation(request.item);
4618 
4619  suballoc.offset = request.offset;
4620  suballoc.size = allocSize;
4621  suballoc.type = type;
4622  suballoc.hAllocation = hAllocation;
4623 
4624  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
4625  if(paddingEnd)
4626  {
4627  VmaSuballocation paddingSuballoc = {};
4628  paddingSuballoc.offset = request.offset + allocSize;
4629  paddingSuballoc.size = paddingEnd;
4630  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4631  VmaSuballocationList::iterator next = request.item;
4632  ++next;
4633  const VmaSuballocationList::iterator paddingEndItem =
4634  m_Suballocations.insert(next, paddingSuballoc);
4635  RegisterFreeSuballocation(paddingEndItem);
4636  }
4637 
4638  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
4639  if(paddingBegin)
4640  {
4641  VmaSuballocation paddingSuballoc = {};
4642  paddingSuballoc.offset = request.offset - paddingBegin;
4643  paddingSuballoc.size = paddingBegin;
4644  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4645  const VmaSuballocationList::iterator paddingBeginItem =
4646  m_Suballocations.insert(request.item, paddingSuballoc);
4647  RegisterFreeSuballocation(paddingBeginItem);
4648  }
4649 
4650  // Update totals.
4651  m_FreeCount = m_FreeCount - 1;
4652  if(paddingBegin > 0)
4653  {
4654  ++m_FreeCount;
4655  }
4656  if(paddingEnd > 0)
4657  {
4658  ++m_FreeCount;
4659  }
4660  m_SumFreeSize -= allocSize;
4661 }
4662 
4663 void VmaBlockMetadata::Free(const VmaAllocation allocation)
4664 {
4665  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
4666  suballocItem != m_Suballocations.end();
4667  ++suballocItem)
4668  {
4669  VmaSuballocation& suballoc = *suballocItem;
4670  if(suballoc.hAllocation == allocation)
4671  {
4672  FreeSuballocation(suballocItem);
4673  VMA_HEAVY_ASSERT(Validate());
4674  return;
4675  }
4676  }
4677  VMA_ASSERT(0 && "Not found!");
4678 }
4679 
4680 bool VmaBlockMetadata::ValidateFreeSuballocationList() const
4681 {
4682  VkDeviceSize lastSize = 0;
4683  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
4684  {
4685  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
4686 
4687  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
4688  {
4689  VMA_ASSERT(0);
4690  return false;
4691  }
4692  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
4693  {
4694  VMA_ASSERT(0);
4695  return false;
4696  }
4697  if(it->size < lastSize)
4698  {
4699  VMA_ASSERT(0);
4700  return false;
4701  }
4702 
4703  lastSize = it->size;
4704  }
4705  return true;
4706 }
4707 
4708 bool VmaBlockMetadata::CheckAllocation(
4709  uint32_t currentFrameIndex,
4710  uint32_t frameInUseCount,
4711  VkDeviceSize bufferImageGranularity,
4712  VkDeviceSize allocSize,
4713  VkDeviceSize allocAlignment,
4714  VmaSuballocationType allocType,
4715  VmaSuballocationList::const_iterator suballocItem,
4716  bool canMakeOtherLost,
4717  VkDeviceSize* pOffset,
4718  size_t* itemsToMakeLostCount,
4719  VkDeviceSize* pSumFreeSize,
4720  VkDeviceSize* pSumItemSize) const
4721 {
4722  VMA_ASSERT(allocSize > 0);
4723  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
4724  VMA_ASSERT(suballocItem != m_Suballocations.cend());
4725  VMA_ASSERT(pOffset != VMA_NULL);
4726 
4727  *itemsToMakeLostCount = 0;
4728  *pSumFreeSize = 0;
4729  *pSumItemSize = 0;
4730 
4731  if(canMakeOtherLost)
4732  {
4733  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
4734  {
4735  *pSumFreeSize = suballocItem->size;
4736  }
4737  else
4738  {
4739  if(suballocItem->hAllocation->CanBecomeLost() &&
4740  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
4741  {
4742  ++*itemsToMakeLostCount;
4743  *pSumItemSize = suballocItem->size;
4744  }
4745  else
4746  {
4747  return false;
4748  }
4749  }
4750 
4751  // Remaining size is too small for this request: Early return.
4752  if(m_Size - suballocItem->offset < allocSize)
4753  {
4754  return false;
4755  }
4756 
4757  // Start from offset equal to beginning of this suballocation.
4758  *pOffset = suballocItem->offset;
4759 
4760  // Apply VMA_DEBUG_MARGIN at the beginning.
4761  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
4762  {
4763  *pOffset += VMA_DEBUG_MARGIN;
4764  }
4765 
4766  // Apply alignment.
4767  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
4768  *pOffset = VmaAlignUp(*pOffset, alignment);
4769 
4770  // Check previous suballocations for BufferImageGranularity conflicts.
4771  // Make bigger alignment if necessary.
4772  if(bufferImageGranularity > 1)
4773  {
4774  bool bufferImageGranularityConflict = false;
4775  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
4776  while(prevSuballocItem != m_Suballocations.cbegin())
4777  {
4778  --prevSuballocItem;
4779  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
4780  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
4781  {
4782  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
4783  {
4784  bufferImageGranularityConflict = true;
4785  break;
4786  }
4787  }
4788  else
4789  // Already on previous page.
4790  break;
4791  }
4792  if(bufferImageGranularityConflict)
4793  {
4794  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
4795  }
4796  }
4797 
4798  // Now that we have final *pOffset, check if we are past suballocItem.
4799  // If yes, return false - this function should be called for another suballocItem as starting point.
4800  if(*pOffset >= suballocItem->offset + suballocItem->size)
4801  {
4802  return false;
4803  }
4804 
4805  // Calculate padding at the beginning based on current offset.
4806  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
4807 
4808  // Calculate required margin at the end if this is not last suballocation.
4809  VmaSuballocationList::const_iterator next = suballocItem;
4810  ++next;
4811  const VkDeviceSize requiredEndMargin =
4812  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
4813 
4814  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
4815  // Another early return check.
4816  if(suballocItem->offset + totalSize > m_Size)
4817  {
4818  return false;
4819  }
4820 
4821  // Advance lastSuballocItem until desired size is reached.
4822  // Update itemsToMakeLostCount.
4823  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
4824  if(totalSize > suballocItem->size)
4825  {
4826  VkDeviceSize remainingSize = totalSize - suballocItem->size;
4827  while(remainingSize > 0)
4828  {
4829  ++lastSuballocItem;
4830  if(lastSuballocItem == m_Suballocations.cend())
4831  {
4832  return false;
4833  }
4834  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
4835  {
4836  *pSumFreeSize += lastSuballocItem->size;
4837  }
4838  else
4839  {
4840  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
4841  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
4842  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
4843  {
4844  ++*itemsToMakeLostCount;
4845  *pSumItemSize += lastSuballocItem->size;
4846  }
4847  else
4848  {
4849  return false;
4850  }
4851  }
4852  remainingSize = (lastSuballocItem->size < remainingSize) ?
4853  remainingSize - lastSuballocItem->size : 0;
4854  }
4855  }
4856 
4857  // Check next suballocations for BufferImageGranularity conflicts.
4858  // If conflict exists, we must mark more allocations lost or fail.
4859  if(bufferImageGranularity > 1)
4860  {
4861  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
4862  ++nextSuballocItem;
4863  while(nextSuballocItem != m_Suballocations.cend())
4864  {
4865  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
4866  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
4867  {
4868  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
4869  {
4870  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
4871  if(nextSuballoc.hAllocation->CanBecomeLost() &&
4872  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
4873  {
4874  ++*itemsToMakeLostCount;
4875  }
4876  else
4877  {
4878  return false;
4879  }
4880  }
4881  }
4882  else
4883  {
4884  // Already on next page.
4885  break;
4886  }
4887  ++nextSuballocItem;
4888  }
4889  }
4890  }
4891  else
4892  {
4893  const VmaSuballocation& suballoc = *suballocItem;
4894  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
4895 
4896  *pSumFreeSize = suballoc.size;
4897 
4898  // Size of this suballocation is too small for this request: Early return.
4899  if(suballoc.size < allocSize)
4900  {
4901  return false;
4902  }
4903 
4904  // Start from offset equal to beginning of this suballocation.
4905  *pOffset = suballoc.offset;
4906 
4907  // Apply VMA_DEBUG_MARGIN at the beginning.
4908  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
4909  {
4910  *pOffset += VMA_DEBUG_MARGIN;
4911  }
4912 
4913  // Apply alignment.
4914  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
4915  *pOffset = VmaAlignUp(*pOffset, alignment);
4916 
4917  // Check previous suballocations for BufferImageGranularity conflicts.
4918  // Make bigger alignment if necessary.
4919  if(bufferImageGranularity > 1)
4920  {
4921  bool bufferImageGranularityConflict = false;
4922  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
4923  while(prevSuballocItem != m_Suballocations.cbegin())
4924  {
4925  --prevSuballocItem;
4926  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
4927  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
4928  {
4929  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
4930  {
4931  bufferImageGranularityConflict = true;
4932  break;
4933  }
4934  }
4935  else
4936  // Already on previous page.
4937  break;
4938  }
4939  if(bufferImageGranularityConflict)
4940  {
4941  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
4942  }
4943  }
4944 
4945  // Calculate padding at the beginning based on current offset.
4946  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
4947 
4948  // Calculate required margin at the end if this is not last suballocation.
4949  VmaSuballocationList::const_iterator next = suballocItem;
4950  ++next;
4951  const VkDeviceSize requiredEndMargin =
4952  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
4953 
4954  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
4955  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
4956  {
4957  return false;
4958  }
4959 
4960  // Check next suballocations for BufferImageGranularity conflicts.
4961  // If conflict exists, allocation cannot be made here.
4962  if(bufferImageGranularity > 1)
4963  {
4964  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
4965  ++nextSuballocItem;
4966  while(nextSuballocItem != m_Suballocations.cend())
4967  {
4968  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
4969  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
4970  {
4971  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
4972  {
4973  return false;
4974  }
4975  }
4976  else
4977  {
4978  // Already on next page.
4979  break;
4980  }
4981  ++nextSuballocItem;
4982  }
4983  }
4984  }
4985 
4986  // All tests passed: Success. pOffset is already filled.
4987  return true;
4988 }
4989 
4990 void VmaBlockMetadata::MergeFreeWithNext(VmaSuballocationList::iterator item)
4991 {
4992  VMA_ASSERT(item != m_Suballocations.end());
4993  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
4994 
4995  VmaSuballocationList::iterator nextItem = item;
4996  ++nextItem;
4997  VMA_ASSERT(nextItem != m_Suballocations.end());
4998  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
4999 
5000  item->size += nextItem->size;
5001  --m_FreeCount;
5002  m_Suballocations.erase(nextItem);
5003 }
5004 
5005 VmaSuballocationList::iterator VmaBlockMetadata::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
5006 {
5007  // Change this suballocation to be marked as free.
5008  VmaSuballocation& suballoc = *suballocItem;
5009  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5010  suballoc.hAllocation = VK_NULL_HANDLE;
5011 
5012  // Update totals.
5013  ++m_FreeCount;
5014  m_SumFreeSize += suballoc.size;
5015 
5016  // Merge with previous and/or next suballocation if it's also free.
5017  bool mergeWithNext = false;
5018  bool mergeWithPrev = false;
5019 
5020  VmaSuballocationList::iterator nextItem = suballocItem;
5021  ++nextItem;
5022  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
5023  {
5024  mergeWithNext = true;
5025  }
5026 
5027  VmaSuballocationList::iterator prevItem = suballocItem;
5028  if(suballocItem != m_Suballocations.begin())
5029  {
5030  --prevItem;
5031  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5032  {
5033  mergeWithPrev = true;
5034  }
5035  }
5036 
5037  if(mergeWithNext)
5038  {
5039  UnregisterFreeSuballocation(nextItem);
5040  MergeFreeWithNext(suballocItem);
5041  }
5042 
5043  if(mergeWithPrev)
5044  {
5045  UnregisterFreeSuballocation(prevItem);
5046  MergeFreeWithNext(prevItem);
5047  RegisterFreeSuballocation(prevItem);
5048  return prevItem;
5049  }
5050  else
5051  {
5052  RegisterFreeSuballocation(suballocItem);
5053  return suballocItem;
5054  }
5055 }
5056 
5057 void VmaBlockMetadata::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
5058 {
5059  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5060  VMA_ASSERT(item->size > 0);
5061 
5062  // You may want to enable this validation at the beginning or at the end of
5063  // this function, depending on what do you want to check.
5064  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5065 
5066  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5067  {
5068  if(m_FreeSuballocationsBySize.empty())
5069  {
5070  m_FreeSuballocationsBySize.push_back(item);
5071  }
5072  else
5073  {
5074  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
5075  }
5076  }
5077 
5078  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5079 }
5080 
5081 
5082 void VmaBlockMetadata::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
5083 {
5084  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5085  VMA_ASSERT(item->size > 0);
5086 
5087  // You may want to enable this validation at the beginning or at the end of
5088  // this function, depending on what do you want to check.
5089  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5090 
5091  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5092  {
5093  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
5094  m_FreeSuballocationsBySize.data(),
5095  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
5096  item,
5097  VmaSuballocationItemSizeLess());
5098  for(size_t index = it - m_FreeSuballocationsBySize.data();
5099  index < m_FreeSuballocationsBySize.size();
5100  ++index)
5101  {
5102  if(m_FreeSuballocationsBySize[index] == item)
5103  {
5104  VmaVectorRemove(m_FreeSuballocationsBySize, index);
5105  return;
5106  }
5107  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
5108  }
5109  VMA_ASSERT(0 && "Not found.");
5110  }
5111 
5112  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5113 }
5114 
5116 // class VmaDeviceMemoryBlock
5117 
5118 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
5119  m_MemoryTypeIndex(UINT32_MAX),
5120  m_BlockVectorType(VMA_BLOCK_VECTOR_TYPE_COUNT),
5121  m_hMemory(VK_NULL_HANDLE),
5122  m_Size(0),
5123  m_PersistentMap(false),
5124  m_pMappedData(VMA_NULL),
5125  m_Metadata(hAllocator)
5126 {
5127 }
5128 
5129 void VmaDeviceMemoryBlock::Init(
5130  uint32_t newMemoryTypeIndex,
5131  VMA_BLOCK_VECTOR_TYPE newBlockVectorType,
5132  VkDeviceMemory newMemory,
5133  VkDeviceSize newSize,
5134  bool persistentMap,
5135  void* pMappedData)
5136 {
5137  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5138 
5139  m_MemoryTypeIndex = newMemoryTypeIndex;
5140  m_BlockVectorType = newBlockVectorType;
5141  m_hMemory = newMemory;
5142  m_Size = newSize;
5143  m_PersistentMap = persistentMap;
5144  m_pMappedData = pMappedData;
5145 
5146  m_Metadata.Init(newSize);
5147 }
5148 
5149 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
5150 {
5151  // This is the most important assert in the entire library.
5152  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
5153  VMA_ASSERT(m_Metadata.IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
5154 
5155  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
5156  if(m_pMappedData != VMA_NULL)
5157  {
5158  (allocator->GetVulkanFunctions().vkUnmapMemory)(allocator->m_hDevice, m_hMemory);
5159  m_pMappedData = VMA_NULL;
5160  }
5161 
5162  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Size, m_hMemory);
5163  m_hMemory = VK_NULL_HANDLE;
5164 }
5165 
5166 bool VmaDeviceMemoryBlock::Validate() const
5167 {
5168  if((m_hMemory == VK_NULL_HANDLE) ||
5169  (m_Size == 0))
5170  {
5171  return false;
5172  }
5173 
5174  return m_Metadata.Validate();
5175 }
5176 
5177 static void InitStatInfo(VmaStatInfo& outInfo)
5178 {
5179  memset(&outInfo, 0, sizeof(outInfo));
5180  outInfo.allocationSizeMin = UINT64_MAX;
5181  outInfo.unusedRangeSizeMin = UINT64_MAX;
5182 }
5183 
5184 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
5185 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
5186 {
5187  inoutInfo.blockCount += srcInfo.blockCount;
5188  inoutInfo.allocationCount += srcInfo.allocationCount;
5189  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
5190  inoutInfo.usedBytes += srcInfo.usedBytes;
5191  inoutInfo.unusedBytes += srcInfo.unusedBytes;
5192  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
5193  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
5194  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
5195  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
5196 }
5197 
5198 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
5199 {
5200  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
5201  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
5202  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
5203  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
5204 }
5205 
5206 VmaPool_T::VmaPool_T(
5207  VmaAllocator hAllocator,
5208  const VmaPoolCreateInfo& createInfo) :
5209  m_BlockVector(
5210  hAllocator,
5211  createInfo.memoryTypeIndex,
5212  (createInfo.flags & VMA_POOL_CREATE_PERSISTENT_MAP_BIT) != 0 ?
5213  VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED,
5214  createInfo.blockSize,
5215  createInfo.minBlockCount,
5216  createInfo.maxBlockCount,
5217  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
5218  createInfo.frameInUseCount,
5219  true) // isCustomPool
5220 {
5221 }
5222 
5223 VmaPool_T::~VmaPool_T()
5224 {
5225 }
5226 
5227 #if VMA_STATS_STRING_ENABLED
5228 
5229 #endif // #if VMA_STATS_STRING_ENABLED
5230 
5231 VmaBlockVector::VmaBlockVector(
5232  VmaAllocator hAllocator,
5233  uint32_t memoryTypeIndex,
5234  VMA_BLOCK_VECTOR_TYPE blockVectorType,
5235  VkDeviceSize preferredBlockSize,
5236  size_t minBlockCount,
5237  size_t maxBlockCount,
5238  VkDeviceSize bufferImageGranularity,
5239  uint32_t frameInUseCount,
5240  bool isCustomPool) :
5241  m_hAllocator(hAllocator),
5242  m_MemoryTypeIndex(memoryTypeIndex),
5243  m_BlockVectorType(blockVectorType),
5244  m_PreferredBlockSize(preferredBlockSize),
5245  m_MinBlockCount(minBlockCount),
5246  m_MaxBlockCount(maxBlockCount),
5247  m_BufferImageGranularity(bufferImageGranularity),
5248  m_FrameInUseCount(frameInUseCount),
5249  m_IsCustomPool(isCustomPool),
5250  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
5251  m_HasEmptyBlock(false),
5252  m_pDefragmentator(VMA_NULL)
5253 {
5254 }
5255 
5256 VmaBlockVector::~VmaBlockVector()
5257 {
5258  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
5259 
5260  for(size_t i = m_Blocks.size(); i--; )
5261  {
5262  m_Blocks[i]->Destroy(m_hAllocator);
5263  vma_delete(m_hAllocator, m_Blocks[i]);
5264  }
5265 }
5266 
5267 VkResult VmaBlockVector::CreateMinBlocks()
5268 {
5269  for(size_t i = 0; i < m_MinBlockCount; ++i)
5270  {
5271  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
5272  if(res != VK_SUCCESS)
5273  {
5274  return res;
5275  }
5276  }
5277  return VK_SUCCESS;
5278 }
5279 
5280 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
5281 {
5282  pStats->size = 0;
5283  pStats->unusedSize = 0;
5284  pStats->allocationCount = 0;
5285  pStats->unusedRangeCount = 0;
5286  pStats->unusedRangeSizeMax = 0;
5287 
5288  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5289 
5290  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5291  {
5292  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
5293  VMA_ASSERT(pBlock);
5294  VMA_HEAVY_ASSERT(pBlock->Validate());
5295  pBlock->m_Metadata.AddPoolStats(*pStats);
5296  }
5297 }
5298 
5299 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
5300 
5301 VkResult VmaBlockVector::Allocate(
5302  VmaPool hCurrentPool,
5303  uint32_t currentFrameIndex,
5304  const VkMemoryRequirements& vkMemReq,
5305  const VmaAllocationCreateInfo& createInfo,
5306  VmaSuballocationType suballocType,
5307  VmaAllocation* pAllocation)
5308 {
5309  // Validate flags.
5310  if(((createInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0) !=
5311  (m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED))
5312  {
5313  VMA_ASSERT(0 && "Usage of VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT must match VMA_POOL_CREATE_PERSISTENT_MAP_BIT.");
5314  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5315  }
5316 
5317  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5318 
5319  // 1. Search existing allocations. Try to allocate without making other allocations lost.
5320  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
5321  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
5322  {
5323  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
5324  VMA_ASSERT(pCurrBlock);
5325  VmaAllocationRequest currRequest = {};
5326  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
5327  currentFrameIndex,
5328  m_FrameInUseCount,
5329  m_BufferImageGranularity,
5330  vkMemReq.size,
5331  vkMemReq.alignment,
5332  suballocType,
5333  false, // canMakeOtherLost
5334  &currRequest))
5335  {
5336  // Allocate from pCurrBlock.
5337  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
5338 
5339  // We no longer have an empty Allocation.
5340  if(pCurrBlock->m_Metadata.IsEmpty())
5341  {
5342  m_HasEmptyBlock = false;
5343  }
5344 
5345  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
5346  pCurrBlock->m_Metadata.Alloc(currRequest, suballocType, vkMemReq.size, *pAllocation);
5347  (*pAllocation)->InitBlockAllocation(
5348  hCurrentPool,
5349  pCurrBlock,
5350  currRequest.offset,
5351  vkMemReq.alignment,
5352  vkMemReq.size,
5353  suballocType,
5354  createInfo.pUserData,
5355  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
5356  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
5357  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
5358  return VK_SUCCESS;
5359  }
5360  }
5361 
5362  const bool canCreateNewBlock =
5363  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
5364  (m_Blocks.size() < m_MaxBlockCount);
5365 
5366  // 2. Try to create new block.
5367  if(canCreateNewBlock)
5368  {
5369  // 2.1. Start with full preferredBlockSize.
5370  VkDeviceSize blockSize = m_PreferredBlockSize;
5371  size_t newBlockIndex = 0;
5372  VkResult res = CreateBlock(blockSize, &newBlockIndex);
5373  // Allocating blocks of other sizes is allowed only in default pools.
5374  // In custom pools block size is fixed.
5375  if(res < 0 && m_IsCustomPool == false)
5376  {
5377  // 2.2. Try half the size.
5378  blockSize /= 2;
5379  if(blockSize >= vkMemReq.size)
5380  {
5381  res = CreateBlock(blockSize, &newBlockIndex);
5382  if(res < 0)
5383  {
5384  // 2.3. Try quarter the size.
5385  blockSize /= 2;
5386  if(blockSize >= vkMemReq.size)
5387  {
5388  res = CreateBlock(blockSize, &newBlockIndex);
5389  }
5390  }
5391  }
5392  }
5393  if(res == VK_SUCCESS)
5394  {
5395  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
5396  VMA_ASSERT(pBlock->m_Size >= vkMemReq.size);
5397 
5398  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
5399  VmaAllocationRequest allocRequest;
5400  pBlock->m_Metadata.CreateFirstAllocationRequest(&allocRequest);
5401  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
5402  pBlock->m_Metadata.Alloc(allocRequest, suballocType, vkMemReq.size, *pAllocation);
5403  (*pAllocation)->InitBlockAllocation(
5404  hCurrentPool,
5405  pBlock,
5406  allocRequest.offset,
5407  vkMemReq.alignment,
5408  vkMemReq.size,
5409  suballocType,
5410  createInfo.pUserData,
5411  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
5412  VMA_HEAVY_ASSERT(pBlock->Validate());
5413  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
5414 
5415  return VK_SUCCESS;
5416  }
5417  }
5418 
5419  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
5420 
5421  // 3. Try to allocate from existing blocks with making other allocations lost.
5422  if(canMakeOtherLost)
5423  {
5424  uint32_t tryIndex = 0;
5425  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
5426  {
5427  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
5428  VmaAllocationRequest bestRequest = {};
5429  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
5430 
5431  // 1. Search existing allocations.
5432  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
5433  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
5434  {
5435  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
5436  VMA_ASSERT(pCurrBlock);
5437  VmaAllocationRequest currRequest = {};
5438  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
5439  currentFrameIndex,
5440  m_FrameInUseCount,
5441  m_BufferImageGranularity,
5442  vkMemReq.size,
5443  vkMemReq.alignment,
5444  suballocType,
5445  canMakeOtherLost,
5446  &currRequest))
5447  {
5448  const VkDeviceSize currRequestCost = currRequest.CalcCost();
5449  if(pBestRequestBlock == VMA_NULL ||
5450  currRequestCost < bestRequestCost)
5451  {
5452  pBestRequestBlock = pCurrBlock;
5453  bestRequest = currRequest;
5454  bestRequestCost = currRequestCost;
5455 
5456  if(bestRequestCost == 0)
5457  {
5458  break;
5459  }
5460  }
5461  }
5462  }
5463 
5464  if(pBestRequestBlock != VMA_NULL)
5465  {
5466  if(pBestRequestBlock->m_Metadata.MakeRequestedAllocationsLost(
5467  currentFrameIndex,
5468  m_FrameInUseCount,
5469  &bestRequest))
5470  {
5471  // We no longer have an empty Allocation.
5472  if(pBestRequestBlock->m_Metadata.IsEmpty())
5473  {
5474  m_HasEmptyBlock = false;
5475  }
5476  // Allocate from this pBlock.
5477  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
5478  pBestRequestBlock->m_Metadata.Alloc(bestRequest, suballocType, vkMemReq.size, *pAllocation);
5479  (*pAllocation)->InitBlockAllocation(
5480  hCurrentPool,
5481  pBestRequestBlock,
5482  bestRequest.offset,
5483  vkMemReq.alignment,
5484  vkMemReq.size,
5485  suballocType,
5486  createInfo.pUserData,
5487  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
5488  VMA_HEAVY_ASSERT(pBlock->Validate());
5489  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
5490  return VK_SUCCESS;
5491  }
5492  // else: Some allocations must have been touched while we are here. Next try.
5493  }
5494  else
5495  {
5496  // Could not find place in any of the blocks - break outer loop.
5497  break;
5498  }
5499  }
5500  /* Maximum number of tries exceeded - a very unlike event when many other
5501  threads are simultaneously touching allocations making it impossible to make
5502  lost at the same time as we try to allocate. */
5503  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
5504  {
5505  return VK_ERROR_TOO_MANY_OBJECTS;
5506  }
5507  }
5508 
5509  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5510 }
5511 
5512 void VmaBlockVector::Free(
5513  VmaAllocation hAllocation)
5514 {
5515  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
5516 
5517  // Scope for lock.
5518  {
5519  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5520 
5521  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
5522 
5523  pBlock->m_Metadata.Free(hAllocation);
5524  VMA_HEAVY_ASSERT(pBlock->Validate());
5525 
5526  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
5527 
5528  // pBlock became empty after this deallocation.
5529  if(pBlock->m_Metadata.IsEmpty())
5530  {
5531  // Already has empty Allocation. We don't want to have two, so delete this one.
5532  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
5533  {
5534  pBlockToDelete = pBlock;
5535  Remove(pBlock);
5536  }
5537  // We now have first empty Allocation.
5538  else
5539  {
5540  m_HasEmptyBlock = true;
5541  }
5542  }
5543  // Must be called after srcBlockIndex is used, because later it may become invalid!
5544  IncrementallySortBlocks();
5545  }
5546 
5547  // Destruction of a free Allocation. Deferred until this point, outside of mutex
5548  // lock, for performance reason.
5549  if(pBlockToDelete != VMA_NULL)
5550  {
5551  VMA_DEBUG_LOG(" Deleted empty allocation");
5552  pBlockToDelete->Destroy(m_hAllocator);
5553  vma_delete(m_hAllocator, pBlockToDelete);
5554  }
5555 }
5556 
5557 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
5558 {
5559  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5560  {
5561  if(m_Blocks[blockIndex] == pBlock)
5562  {
5563  VmaVectorRemove(m_Blocks, blockIndex);
5564  return;
5565  }
5566  }
5567  VMA_ASSERT(0);
5568 }
5569 
5570 void VmaBlockVector::IncrementallySortBlocks()
5571 {
5572  // Bubble sort only until first swap.
5573  for(size_t i = 1; i < m_Blocks.size(); ++i)
5574  {
5575  if(m_Blocks[i - 1]->m_Metadata.GetSumFreeSize() > m_Blocks[i]->m_Metadata.GetSumFreeSize())
5576  {
5577  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
5578  return;
5579  }
5580  }
5581 }
5582 
5583 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
5584 {
5585  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
5586  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
5587  allocInfo.allocationSize = blockSize;
5588  VkDeviceMemory mem = VK_NULL_HANDLE;
5589  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
5590  if(res < 0)
5591  {
5592  return res;
5593  }
5594 
5595  // New VkDeviceMemory successfully created.
5596 
5597  // Map memory if needed.
5598  void* pMappedData = VMA_NULL;
5599  const bool persistentMap = (m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED);
5600  if(persistentMap && m_hAllocator->m_UnmapPersistentlyMappedMemoryCounter == 0)
5601  {
5602  res = (*m_hAllocator->GetVulkanFunctions().vkMapMemory)(
5603  m_hAllocator->m_hDevice,
5604  mem,
5605  0,
5606  VK_WHOLE_SIZE,
5607  0,
5608  &pMappedData);
5609  if(res < 0)
5610  {
5611  VMA_DEBUG_LOG(" vkMapMemory FAILED");
5612  m_hAllocator->FreeVulkanMemory(m_MemoryTypeIndex, blockSize, mem);
5613  return res;
5614  }
5615  }
5616 
5617  // Create new Allocation for it.
5618  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
5619  pBlock->Init(
5620  m_MemoryTypeIndex,
5621  (VMA_BLOCK_VECTOR_TYPE)m_BlockVectorType,
5622  mem,
5623  allocInfo.allocationSize,
5624  persistentMap,
5625  pMappedData);
5626 
5627  m_Blocks.push_back(pBlock);
5628  if(pNewBlockIndex != VMA_NULL)
5629  {
5630  *pNewBlockIndex = m_Blocks.size() - 1;
5631  }
5632 
5633  return VK_SUCCESS;
5634 }
5635 
5636 #if VMA_STATS_STRING_ENABLED
5637 
5638 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
5639 {
5640  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5641 
5642  json.BeginObject();
5643 
5644  if(m_IsCustomPool)
5645  {
5646  json.WriteString("MemoryTypeIndex");
5647  json.WriteNumber(m_MemoryTypeIndex);
5648 
5649  if(m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
5650  {
5651  json.WriteString("Mapped");
5652  json.WriteBool(true);
5653  }
5654 
5655  json.WriteString("BlockSize");
5656  json.WriteNumber(m_PreferredBlockSize);
5657 
5658  json.WriteString("BlockCount");
5659  json.BeginObject(true);
5660  if(m_MinBlockCount > 0)
5661  {
5662  json.WriteString("Min");
5663  json.WriteNumber(m_MinBlockCount);
5664  }
5665  if(m_MaxBlockCount < SIZE_MAX)
5666  {
5667  json.WriteString("Max");
5668  json.WriteNumber(m_MaxBlockCount);
5669  }
5670  json.WriteString("Cur");
5671  json.WriteNumber(m_Blocks.size());
5672  json.EndObject();
5673 
5674  if(m_FrameInUseCount > 0)
5675  {
5676  json.WriteString("FrameInUseCount");
5677  json.WriteNumber(m_FrameInUseCount);
5678  }
5679  }
5680  else
5681  {
5682  json.WriteString("PreferredBlockSize");
5683  json.WriteNumber(m_PreferredBlockSize);
5684  }
5685 
5686  json.WriteString("Blocks");
5687  json.BeginArray();
5688  for(size_t i = 0; i < m_Blocks.size(); ++i)
5689  {
5690  m_Blocks[i]->m_Metadata.PrintDetailedMap(json);
5691  }
5692  json.EndArray();
5693 
5694  json.EndObject();
5695 }
5696 
5697 #endif // #if VMA_STATS_STRING_ENABLED
5698 
5699 void VmaBlockVector::UnmapPersistentlyMappedMemory()
5700 {
5701  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5702 
5703  for(size_t i = m_Blocks.size(); i--; )
5704  {
5705  VmaDeviceMemoryBlock* pBlock = m_Blocks[i];
5706  if(pBlock->m_pMappedData != VMA_NULL)
5707  {
5708  VMA_ASSERT(pBlock->m_PersistentMap != false);
5709  (m_hAllocator->GetVulkanFunctions().vkUnmapMemory)(m_hAllocator->m_hDevice, pBlock->m_hMemory);
5710  pBlock->m_pMappedData = VMA_NULL;
5711  }
5712  }
5713 }
5714 
5715 VkResult VmaBlockVector::MapPersistentlyMappedMemory()
5716 {
5717  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5718 
5719  VkResult finalResult = VK_SUCCESS;
5720  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
5721  {
5722  VmaDeviceMemoryBlock* pBlock = m_Blocks[i];
5723  if(pBlock->m_PersistentMap)
5724  {
5725  VMA_ASSERT(pBlock->m_pMappedData == nullptr);
5726  VkResult localResult = (*m_hAllocator->GetVulkanFunctions().vkMapMemory)(
5727  m_hAllocator->m_hDevice,
5728  pBlock->m_hMemory,
5729  0,
5730  VK_WHOLE_SIZE,
5731  0,
5732  &pBlock->m_pMappedData);
5733  if(localResult != VK_SUCCESS)
5734  {
5735  finalResult = localResult;
5736  }
5737  }
5738  }
5739  return finalResult;
5740 }
5741 
5742 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
5743  VmaAllocator hAllocator,
5744  uint32_t currentFrameIndex)
5745 {
5746  if(m_pDefragmentator == VMA_NULL)
5747  {
5748  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
5749  hAllocator,
5750  this,
5751  currentFrameIndex);
5752  }
5753 
5754  return m_pDefragmentator;
5755 }
5756 
5757 VkResult VmaBlockVector::Defragment(
5758  VmaDefragmentationStats* pDefragmentationStats,
5759  VkDeviceSize& maxBytesToMove,
5760  uint32_t& maxAllocationsToMove)
5761 {
5762  if(m_pDefragmentator == VMA_NULL)
5763  {
5764  return VK_SUCCESS;
5765  }
5766 
5767  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5768 
5769  // Defragment.
5770  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
5771 
5772  // Accumulate statistics.
5773  if(pDefragmentationStats != VMA_NULL)
5774  {
5775  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
5776  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
5777  pDefragmentationStats->bytesMoved += bytesMoved;
5778  pDefragmentationStats->allocationsMoved += allocationsMoved;
5779  VMA_ASSERT(bytesMoved <= maxBytesToMove);
5780  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
5781  maxBytesToMove -= bytesMoved;
5782  maxAllocationsToMove -= allocationsMoved;
5783  }
5784 
5785  // Free empty blocks.
5786  m_HasEmptyBlock = false;
5787  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
5788  {
5789  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
5790  if(pBlock->m_Metadata.IsEmpty())
5791  {
5792  if(m_Blocks.size() > m_MinBlockCount)
5793  {
5794  if(pDefragmentationStats != VMA_NULL)
5795  {
5796  ++pDefragmentationStats->deviceMemoryBlocksFreed;
5797  pDefragmentationStats->bytesFreed += pBlock->m_Size;
5798  }
5799 
5800  VmaVectorRemove(m_Blocks, blockIndex);
5801  pBlock->Destroy(m_hAllocator);
5802  vma_delete(m_hAllocator, pBlock);
5803  }
5804  else
5805  {
5806  m_HasEmptyBlock = true;
5807  }
5808  }
5809  }
5810 
5811  return result;
5812 }
5813 
5814 void VmaBlockVector::DestroyDefragmentator()
5815 {
5816  if(m_pDefragmentator != VMA_NULL)
5817  {
5818  vma_delete(m_hAllocator, m_pDefragmentator);
5819  m_pDefragmentator = VMA_NULL;
5820  }
5821 }
5822 
5823 void VmaBlockVector::MakePoolAllocationsLost(
5824  uint32_t currentFrameIndex,
5825  size_t* pLostAllocationCount)
5826 {
5827  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5828 
5829  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5830  {
5831  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
5832  VMA_ASSERT(pBlock);
5833  pBlock->m_Metadata.MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
5834  }
5835 }
5836 
5837 void VmaBlockVector::AddStats(VmaStats* pStats)
5838 {
5839  const uint32_t memTypeIndex = m_MemoryTypeIndex;
5840  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
5841 
5842  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5843 
5844  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5845  {
5846  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
5847  VMA_ASSERT(pBlock);
5848  VMA_HEAVY_ASSERT(pBlock->Validate());
5849  VmaStatInfo allocationStatInfo;
5850  pBlock->m_Metadata.CalcAllocationStatInfo(allocationStatInfo);
5851  VmaAddStatInfo(pStats->total, allocationStatInfo);
5852  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
5853  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
5854  }
5855 }
5856 
5858 // VmaDefragmentator members definition
5859 
5860 VmaDefragmentator::VmaDefragmentator(
5861  VmaAllocator hAllocator,
5862  VmaBlockVector* pBlockVector,
5863  uint32_t currentFrameIndex) :
5864  m_hAllocator(hAllocator),
5865  m_pBlockVector(pBlockVector),
5866  m_CurrentFrameIndex(currentFrameIndex),
5867  m_BytesMoved(0),
5868  m_AllocationsMoved(0),
5869  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
5870  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
5871 {
5872 }
5873 
5874 VmaDefragmentator::~VmaDefragmentator()
5875 {
5876  for(size_t i = m_Blocks.size(); i--; )
5877  {
5878  vma_delete(m_hAllocator, m_Blocks[i]);
5879  }
5880 }
5881 
5882 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
5883 {
5884  AllocationInfo allocInfo;
5885  allocInfo.m_hAllocation = hAlloc;
5886  allocInfo.m_pChanged = pChanged;
5887  m_Allocations.push_back(allocInfo);
5888 }
5889 
5890 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
5891 {
5892  // It has already been mapped for defragmentation.
5893  if(m_pMappedDataForDefragmentation)
5894  {
5895  *ppMappedData = m_pMappedDataForDefragmentation;
5896  return VK_SUCCESS;
5897  }
5898 
5899  // It is persistently mapped.
5900  if(m_pBlock->m_PersistentMap)
5901  {
5902  VMA_ASSERT(m_pBlock->m_pMappedData != VMA_NULL);
5903  *ppMappedData = m_pBlock->m_pMappedData;
5904  return VK_SUCCESS;
5905  }
5906 
5907  // Map on first usage.
5908  VkResult res = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
5909  hAllocator->m_hDevice,
5910  m_pBlock->m_hMemory,
5911  0,
5912  VK_WHOLE_SIZE,
5913  0,
5914  &m_pMappedDataForDefragmentation);
5915  *ppMappedData = m_pMappedDataForDefragmentation;
5916  return res;
5917 }
5918 
5919 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
5920 {
5921  if(m_pMappedDataForDefragmentation != VMA_NULL)
5922  {
5923  (hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_pBlock->m_hMemory);
5924  }
5925 }
5926 
5927 VkResult VmaDefragmentator::DefragmentRound(
5928  VkDeviceSize maxBytesToMove,
5929  uint32_t maxAllocationsToMove)
5930 {
5931  if(m_Blocks.empty())
5932  {
5933  return VK_SUCCESS;
5934  }
5935 
5936  size_t srcBlockIndex = m_Blocks.size() - 1;
5937  size_t srcAllocIndex = SIZE_MAX;
5938  for(;;)
5939  {
5940  // 1. Find next allocation to move.
5941  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
5942  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
5943  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
5944  {
5945  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
5946  {
5947  // Finished: no more allocations to process.
5948  if(srcBlockIndex == 0)
5949  {
5950  return VK_SUCCESS;
5951  }
5952  else
5953  {
5954  --srcBlockIndex;
5955  srcAllocIndex = SIZE_MAX;
5956  }
5957  }
5958  else
5959  {
5960  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
5961  }
5962  }
5963 
5964  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
5965  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
5966 
5967  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
5968  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
5969  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
5970  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
5971 
5972  // 2. Try to find new place for this allocation in preceding or current block.
5973  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
5974  {
5975  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
5976  VmaAllocationRequest dstAllocRequest;
5977  if(pDstBlockInfo->m_pBlock->m_Metadata.CreateAllocationRequest(
5978  m_CurrentFrameIndex,
5979  m_pBlockVector->GetFrameInUseCount(),
5980  m_pBlockVector->GetBufferImageGranularity(),
5981  size,
5982  alignment,
5983  suballocType,
5984  false, // canMakeOtherLost
5985  &dstAllocRequest) &&
5986  MoveMakesSense(
5987  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
5988  {
5989  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
5990 
5991  // Reached limit on number of allocations or bytes to move.
5992  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
5993  (m_BytesMoved + size > maxBytesToMove))
5994  {
5995  return VK_INCOMPLETE;
5996  }
5997 
5998  void* pDstMappedData = VMA_NULL;
5999  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
6000  if(res != VK_SUCCESS)
6001  {
6002  return res;
6003  }
6004 
6005  void* pSrcMappedData = VMA_NULL;
6006  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
6007  if(res != VK_SUCCESS)
6008  {
6009  return res;
6010  }
6011 
6012  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
6013  memcpy(
6014  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
6015  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
6016  static_cast<size_t>(size));
6017 
6018  pDstBlockInfo->m_pBlock->m_Metadata.Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
6019  pSrcBlockInfo->m_pBlock->m_Metadata.Free(allocInfo.m_hAllocation);
6020 
6021  allocInfo.m_hAllocation->ChangeBlockAllocation(pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
6022 
6023  if(allocInfo.m_pChanged != VMA_NULL)
6024  {
6025  *allocInfo.m_pChanged = VK_TRUE;
6026  }
6027 
6028  ++m_AllocationsMoved;
6029  m_BytesMoved += size;
6030 
6031  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
6032 
6033  break;
6034  }
6035  }
6036 
6037  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
6038 
6039  if(srcAllocIndex > 0)
6040  {
6041  --srcAllocIndex;
6042  }
6043  else
6044  {
6045  if(srcBlockIndex > 0)
6046  {
6047  --srcBlockIndex;
6048  srcAllocIndex = SIZE_MAX;
6049  }
6050  else
6051  {
6052  return VK_SUCCESS;
6053  }
6054  }
6055  }
6056 }
6057 
6058 VkResult VmaDefragmentator::Defragment(
6059  VkDeviceSize maxBytesToMove,
6060  uint32_t maxAllocationsToMove)
6061 {
6062  if(m_Allocations.empty())
6063  {
6064  return VK_SUCCESS;
6065  }
6066 
6067  // Create block info for each block.
6068  const size_t blockCount = m_pBlockVector->m_Blocks.size();
6069  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6070  {
6071  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
6072  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
6073  m_Blocks.push_back(pBlockInfo);
6074  }
6075 
6076  // Sort them by m_pBlock pointer value.
6077  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
6078 
6079  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
6080  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
6081  {
6082  AllocationInfo& allocInfo = m_Allocations[blockIndex];
6083  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
6084  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
6085  {
6086  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
6087  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
6088  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
6089  {
6090  (*it)->m_Allocations.push_back(allocInfo);
6091  }
6092  else
6093  {
6094  VMA_ASSERT(0);
6095  }
6096  }
6097  }
6098  m_Allocations.clear();
6099 
6100  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6101  {
6102  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
6103  pBlockInfo->CalcHasNonMovableAllocations();
6104  pBlockInfo->SortAllocationsBySizeDescecnding();
6105  }
6106 
6107  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
6108  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
6109 
6110  // Execute defragmentation rounds (the main part).
6111  VkResult result = VK_SUCCESS;
6112  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
6113  {
6114  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
6115  }
6116 
6117  // Unmap blocks that were mapped for defragmentation.
6118  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6119  {
6120  m_Blocks[blockIndex]->Unmap(m_hAllocator);
6121  }
6122 
6123  return result;
6124 }
6125 
6126 bool VmaDefragmentator::MoveMakesSense(
6127  size_t dstBlockIndex, VkDeviceSize dstOffset,
6128  size_t srcBlockIndex, VkDeviceSize srcOffset)
6129 {
6130  if(dstBlockIndex < srcBlockIndex)
6131  {
6132  return true;
6133  }
6134  if(dstBlockIndex > srcBlockIndex)
6135  {
6136  return false;
6137  }
6138  if(dstOffset < srcOffset)
6139  {
6140  return true;
6141  }
6142  return false;
6143 }
6144 
6146 // VmaAllocator_T
6147 
6148 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
6149  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
6150  m_PhysicalDevice(pCreateInfo->physicalDevice),
6151  m_hDevice(pCreateInfo->device),
6152  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
6153  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
6154  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
6155  m_UnmapPersistentlyMappedMemoryCounter(0),
6156  m_PreferredLargeHeapBlockSize(0),
6157  m_PreferredSmallHeapBlockSize(0),
6158  m_CurrentFrameIndex(0),
6159  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks()))
6160 {
6161  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
6162 
6163  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
6164  memset(&m_MemProps, 0, sizeof(m_MemProps));
6165  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
6166 
6167  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
6168  memset(&m_pOwnAllocations, 0, sizeof(m_pOwnAllocations));
6169 
6170  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
6171  {
6172  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
6173  }
6174 
6175  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
6176  {
6177  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
6178  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
6179  }
6180 
6181  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
6182 
6183  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
6184  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
6185 
6186  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
6187  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
6188  m_PreferredSmallHeapBlockSize = (pCreateInfo->preferredSmallHeapBlockSize != 0) ?
6189  pCreateInfo->preferredSmallHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE);
6190 
6191  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
6192  {
6193  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
6194  {
6195  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
6196  if(limit != VK_WHOLE_SIZE)
6197  {
6198  m_HeapSizeLimit[heapIndex] = limit;
6199  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
6200  {
6201  m_MemProps.memoryHeaps[heapIndex].size = limit;
6202  }
6203  }
6204  }
6205  }
6206 
6207  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6208  {
6209  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
6210 
6211  for(size_t blockVectorTypeIndex = 0; blockVectorTypeIndex < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorTypeIndex)
6212  {
6213  m_pBlockVectors[memTypeIndex][blockVectorTypeIndex] = vma_new(this, VmaBlockVector)(
6214  this,
6215  memTypeIndex,
6216  static_cast<VMA_BLOCK_VECTOR_TYPE>(blockVectorTypeIndex),
6217  preferredBlockSize,
6218  0,
6219  SIZE_MAX,
6220  GetBufferImageGranularity(),
6221  pCreateInfo->frameInUseCount,
6222  false); // isCustomPool
6223  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
6224  // becase minBlockCount is 0.
6225  m_pOwnAllocations[memTypeIndex][blockVectorTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
6226  }
6227  }
6228 }
6229 
6230 VmaAllocator_T::~VmaAllocator_T()
6231 {
6232  VMA_ASSERT(m_Pools.empty());
6233 
6234  for(size_t i = GetMemoryTypeCount(); i--; )
6235  {
6236  for(size_t j = VMA_BLOCK_VECTOR_TYPE_COUNT; j--; )
6237  {
6238  vma_delete(this, m_pOwnAllocations[i][j]);
6239  vma_delete(this, m_pBlockVectors[i][j]);
6240  }
6241  }
6242 }
6243 
6244 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
6245 {
6246 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
6247  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
6248  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
6249  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
6250  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
6251  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
6252  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
6253  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
6254  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
6255  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
6256  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
6257  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
6258  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
6259  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
6260  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
6261 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
6262 
6263  if(pVulkanFunctions != VMA_NULL)
6264  {
6265  m_VulkanFunctions = *pVulkanFunctions;
6266  }
6267 
6268  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
6269  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
6270  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
6271  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
6272  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
6273  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
6274  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
6275  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
6276  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
6277  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
6278  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
6279  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
6280  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
6281  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
6282  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
6283  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
6284 }
6285 
6286 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
6287 {
6288  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
6289  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
6290  return (heapSize <= VMA_SMALL_HEAP_MAX_SIZE) ?
6291  m_PreferredSmallHeapBlockSize : m_PreferredLargeHeapBlockSize;
6292 }
6293 
6294 VkResult VmaAllocator_T::AllocateMemoryOfType(
6295  const VkMemoryRequirements& vkMemReq,
6296  const VmaAllocationCreateInfo& createInfo,
6297  uint32_t memTypeIndex,
6298  VmaSuballocationType suballocType,
6299  VmaAllocation* pAllocation)
6300 {
6301  VMA_ASSERT(pAllocation != VMA_NULL);
6302  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
6303 
6304  uint32_t blockVectorType = VmaAllocationCreateFlagsToBlockVectorType(createInfo.flags);
6305  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
6306  VMA_ASSERT(blockVector);
6307 
6308  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
6309  // Heuristics: Allocate own memory if requested size if greater than half of preferred block size.
6310  const bool ownMemory =
6311  (createInfo.flags & VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT) != 0 ||
6312  VMA_DEBUG_ALWAYS_OWN_MEMORY ||
6313  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
6314  vkMemReq.size > preferredBlockSize / 2);
6315 
6316  if(ownMemory)
6317  {
6318  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
6319  {
6320  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6321  }
6322  else
6323  {
6324  return AllocateOwnMemory(
6325  vkMemReq.size,
6326  suballocType,
6327  memTypeIndex,
6328  (createInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0,
6329  createInfo.pUserData,
6330  pAllocation);
6331  }
6332  }
6333  else
6334  {
6335  VkResult res = blockVector->Allocate(
6336  VK_NULL_HANDLE, // hCurrentPool
6337  m_CurrentFrameIndex.load(),
6338  vkMemReq,
6339  createInfo,
6340  suballocType,
6341  pAllocation);
6342  if(res == VK_SUCCESS)
6343  {
6344  return res;
6345  }
6346 
6347  // 5. Try own memory.
6348  res = AllocateOwnMemory(
6349  vkMemReq.size,
6350  suballocType,
6351  memTypeIndex,
6352  (createInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0,
6353  createInfo.pUserData,
6354  pAllocation);
6355  if(res == VK_SUCCESS)
6356  {
6357  // Succeeded: AllocateOwnMemory function already filld pMemory, nothing more to do here.
6358  VMA_DEBUG_LOG(" Allocated as OwnMemory");
6359  return VK_SUCCESS;
6360  }
6361  else
6362  {
6363  // Everything failed: Return error code.
6364  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
6365  return res;
6366  }
6367  }
6368 }
6369 
6370 VkResult VmaAllocator_T::AllocateOwnMemory(
6371  VkDeviceSize size,
6372  VmaSuballocationType suballocType,
6373  uint32_t memTypeIndex,
6374  bool map,
6375  void* pUserData,
6376  VmaAllocation* pAllocation)
6377 {
6378  VMA_ASSERT(pAllocation);
6379 
6380  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
6381  allocInfo.memoryTypeIndex = memTypeIndex;
6382  allocInfo.allocationSize = size;
6383 
6384  // Allocate VkDeviceMemory.
6385  VkDeviceMemory hMemory = VK_NULL_HANDLE;
6386  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
6387  if(res < 0)
6388  {
6389  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
6390  return res;
6391  }
6392 
6393  void* pMappedData = nullptr;
6394  if(map)
6395  {
6396  if(m_UnmapPersistentlyMappedMemoryCounter == 0)
6397  {
6398  res = vkMapMemory(m_hDevice, hMemory, 0, VK_WHOLE_SIZE, 0, &pMappedData);
6399  if(res < 0)
6400  {
6401  VMA_DEBUG_LOG(" vkMapMemory FAILED");
6402  FreeVulkanMemory(memTypeIndex, size, hMemory);
6403  return res;
6404  }
6405  }
6406  }
6407 
6408  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load());
6409  (*pAllocation)->InitOwnAllocation(memTypeIndex, hMemory, suballocType, map, pMappedData, size, pUserData);
6410 
6411  // Register it in m_pOwnAllocations.
6412  {
6413  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
6414  AllocationVectorType* pOwnAllocations = m_pOwnAllocations[memTypeIndex][map ? VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED];
6415  VMA_ASSERT(pOwnAllocations);
6416  VmaVectorInsertSorted<VmaPointerLess>(*pOwnAllocations, *pAllocation);
6417  }
6418 
6419  VMA_DEBUG_LOG(" Allocated OwnMemory MemoryTypeIndex=#%u", memTypeIndex);
6420 
6421  return VK_SUCCESS;
6422 }
6423 
6424 VkResult VmaAllocator_T::AllocateMemory(
6425  const VkMemoryRequirements& vkMemReq,
6426  const VmaAllocationCreateInfo& createInfo,
6427  VmaSuballocationType suballocType,
6428  VmaAllocation* pAllocation)
6429 {
6430  if((createInfo.flags & VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT) != 0 &&
6431  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
6432  {
6433  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
6434  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6435  }
6436  if((createInfo.pool != VK_NULL_HANDLE) &&
6437  ((createInfo.flags & (VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT)) != 0))
6438  {
6439  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT when pool != null is invalid.");
6440  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6441  }
6442 
6443  if(createInfo.pool != VK_NULL_HANDLE)
6444  {
6445  return createInfo.pool->m_BlockVector.Allocate(
6446  createInfo.pool,
6447  m_CurrentFrameIndex.load(),
6448  vkMemReq,
6449  createInfo,
6450  suballocType,
6451  pAllocation);
6452  }
6453  else
6454  {
6455  // Bit mask of memory Vulkan types acceptable for this allocation.
6456  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
6457  uint32_t memTypeIndex = UINT32_MAX;
6458  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
6459  if(res == VK_SUCCESS)
6460  {
6461  res = AllocateMemoryOfType(vkMemReq, createInfo, memTypeIndex, suballocType, pAllocation);
6462  // Succeeded on first try.
6463  if(res == VK_SUCCESS)
6464  {
6465  return res;
6466  }
6467  // Allocation from this memory type failed. Try other compatible memory types.
6468  else
6469  {
6470  for(;;)
6471  {
6472  // Remove old memTypeIndex from list of possibilities.
6473  memoryTypeBits &= ~(1u << memTypeIndex);
6474  // Find alternative memTypeIndex.
6475  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
6476  if(res == VK_SUCCESS)
6477  {
6478  res = AllocateMemoryOfType(vkMemReq, createInfo, memTypeIndex, suballocType, pAllocation);
6479  // Allocation from this alternative memory type succeeded.
6480  if(res == VK_SUCCESS)
6481  {
6482  return res;
6483  }
6484  // else: Allocation from this memory type failed. Try next one - next loop iteration.
6485  }
6486  // No other matching memory type index could be found.
6487  else
6488  {
6489  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
6490  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6491  }
6492  }
6493  }
6494  }
6495  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
6496  else
6497  return res;
6498  }
6499 }
6500 
6501 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
6502 {
6503  VMA_ASSERT(allocation);
6504 
6505  if(allocation->CanBecomeLost() == false ||
6506  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
6507  {
6508  switch(allocation->GetType())
6509  {
6510  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
6511  {
6512  VmaBlockVector* pBlockVector = VMA_NULL;
6513  VmaPool hPool = allocation->GetPool();
6514  if(hPool != VK_NULL_HANDLE)
6515  {
6516  pBlockVector = &hPool->m_BlockVector;
6517  }
6518  else
6519  {
6520  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
6521  const VMA_BLOCK_VECTOR_TYPE blockVectorType = allocation->GetBlockVectorType();
6522  pBlockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
6523  }
6524  pBlockVector->Free(allocation);
6525  }
6526  break;
6527  case VmaAllocation_T::ALLOCATION_TYPE_OWN:
6528  FreeOwnMemory(allocation);
6529  break;
6530  default:
6531  VMA_ASSERT(0);
6532  }
6533  }
6534 
6535  vma_delete(this, allocation);
6536 }
6537 
6538 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
6539 {
6540  // Initialize.
6541  InitStatInfo(pStats->total);
6542  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
6543  InitStatInfo(pStats->memoryType[i]);
6544  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
6545  InitStatInfo(pStats->memoryHeap[i]);
6546 
6547  // Process default pools.
6548  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6549  {
6550  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
6551  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
6552  {
6553  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
6554  VMA_ASSERT(pBlockVector);
6555  pBlockVector->AddStats(pStats);
6556  }
6557  }
6558 
6559  // Process custom pools.
6560  {
6561  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6562  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
6563  {
6564  m_Pools[poolIndex]->GetBlockVector().AddStats(pStats);
6565  }
6566  }
6567 
6568  // Process own allocations.
6569  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6570  {
6571  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
6572  VmaMutexLock ownAllocationsLock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
6573  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
6574  {
6575  AllocationVectorType* const pOwnAllocVector = m_pOwnAllocations[memTypeIndex][blockVectorType];
6576  VMA_ASSERT(pOwnAllocVector);
6577  for(size_t allocIndex = 0, allocCount = pOwnAllocVector->size(); allocIndex < allocCount; ++allocIndex)
6578  {
6579  VmaStatInfo allocationStatInfo;
6580  (*pOwnAllocVector)[allocIndex]->OwnAllocCalcStatsInfo(allocationStatInfo);
6581  VmaAddStatInfo(pStats->total, allocationStatInfo);
6582  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
6583  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
6584  }
6585  }
6586  }
6587 
6588  // Postprocess.
6589  VmaPostprocessCalcStatInfo(pStats->total);
6590  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
6591  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
6592  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
6593  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
6594 }
6595 
6596 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
6597 
6598 void VmaAllocator_T::UnmapPersistentlyMappedMemory()
6599 {
6600  if(m_UnmapPersistentlyMappedMemoryCounter++ == 0)
6601  {
6602  if(m_PhysicalDeviceProperties.vendorID == VMA_VENDOR_ID_AMD)
6603  {
6604  for(uint32_t memTypeIndex = m_MemProps.memoryTypeCount; memTypeIndex--; )
6605  {
6606  const VkMemoryPropertyFlags memFlags = m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
6607  if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0 &&
6608  (memFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
6609  {
6610  // Process OwnAllocations.
6611  {
6612  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
6613  AllocationVectorType* pOwnAllocationsVector = m_pOwnAllocations[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
6614  for(size_t ownAllocIndex = pOwnAllocationsVector->size(); ownAllocIndex--; )
6615  {
6616  VmaAllocation hAlloc = (*pOwnAllocationsVector)[ownAllocIndex];
6617  hAlloc->OwnAllocUnmapPersistentlyMappedMemory(this);
6618  }
6619  }
6620 
6621  // Process normal Allocations.
6622  {
6623  VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
6624  pBlockVector->UnmapPersistentlyMappedMemory();
6625  }
6626  }
6627  }
6628 
6629  // Process custom pools.
6630  {
6631  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6632  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
6633  {
6634  m_Pools[poolIndex]->GetBlockVector().UnmapPersistentlyMappedMemory();
6635  }
6636  }
6637  }
6638  }
6639 }
6640 
6641 VkResult VmaAllocator_T::MapPersistentlyMappedMemory()
6642 {
6643  VMA_ASSERT(m_UnmapPersistentlyMappedMemoryCounter > 0);
6644  if(--m_UnmapPersistentlyMappedMemoryCounter == 0)
6645  {
6646  VkResult finalResult = VK_SUCCESS;
6647  if(m_PhysicalDeviceProperties.vendorID == VMA_VENDOR_ID_AMD)
6648  {
6649  // Process custom pools.
6650  {
6651  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6652  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
6653  {
6654  m_Pools[poolIndex]->GetBlockVector().MapPersistentlyMappedMemory();
6655  }
6656  }
6657 
6658  for(uint32_t memTypeIndex = 0; memTypeIndex < m_MemProps.memoryTypeCount; ++memTypeIndex)
6659  {
6660  const VkMemoryPropertyFlags memFlags = m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
6661  if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0 &&
6662  (memFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
6663  {
6664  // Process OwnAllocations.
6665  {
6666  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
6667  AllocationVectorType* pAllocationsVector = m_pOwnAllocations[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
6668  for(size_t ownAllocIndex = 0, ownAllocCount = pAllocationsVector->size(); ownAllocIndex < ownAllocCount; ++ownAllocIndex)
6669  {
6670  VmaAllocation hAlloc = (*pAllocationsVector)[ownAllocIndex];
6671  hAlloc->OwnAllocMapPersistentlyMappedMemory(this);
6672  }
6673  }
6674 
6675  // Process normal Allocations.
6676  {
6677  VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
6678  VkResult localResult = pBlockVector->MapPersistentlyMappedMemory();
6679  if(localResult != VK_SUCCESS)
6680  {
6681  finalResult = localResult;
6682  }
6683  }
6684  }
6685  }
6686  }
6687  return finalResult;
6688  }
6689  else
6690  return VK_SUCCESS;
6691 }
6692 
6693 VkResult VmaAllocator_T::Defragment(
6694  VmaAllocation* pAllocations,
6695  size_t allocationCount,
6696  VkBool32* pAllocationsChanged,
6697  const VmaDefragmentationInfo* pDefragmentationInfo,
6698  VmaDefragmentationStats* pDefragmentationStats)
6699 {
6700  if(pAllocationsChanged != VMA_NULL)
6701  {
6702  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
6703  }
6704  if(pDefragmentationStats != VMA_NULL)
6705  {
6706  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
6707  }
6708 
6709  if(m_UnmapPersistentlyMappedMemoryCounter > 0)
6710  {
6711  VMA_DEBUG_LOG("ERROR: Cannot defragment when inside vmaUnmapPersistentlyMappedMemory.");
6712  return VK_ERROR_MEMORY_MAP_FAILED;
6713  }
6714 
6715  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
6716 
6717  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
6718 
6719  const size_t poolCount = m_Pools.size();
6720 
6721  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
6722  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
6723  {
6724  VmaAllocation hAlloc = pAllocations[allocIndex];
6725  VMA_ASSERT(hAlloc);
6726  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
6727  // OwnAlloc cannot be defragmented.
6728  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
6729  // Only HOST_VISIBLE memory types can be defragmented.
6730  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
6731  // Lost allocation cannot be defragmented.
6732  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
6733  {
6734  VmaBlockVector* pAllocBlockVector = nullptr;
6735 
6736  const VmaPool hAllocPool = hAlloc->GetPool();
6737  // This allocation belongs to custom pool.
6738  if(hAllocPool != VK_NULL_HANDLE)
6739  {
6740  pAllocBlockVector = &hAllocPool->GetBlockVector();
6741  }
6742  // This allocation belongs to general pool.
6743  else
6744  {
6745  pAllocBlockVector = m_pBlockVectors[memTypeIndex][hAlloc->GetBlockVectorType()];
6746  }
6747 
6748  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
6749 
6750  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
6751  &pAllocationsChanged[allocIndex] : VMA_NULL;
6752  pDefragmentator->AddAllocation(hAlloc, pChanged);
6753  }
6754  }
6755 
6756  VkResult result = VK_SUCCESS;
6757 
6758  // ======== Main processing.
6759 
6760  VkDeviceSize maxBytesToMove = SIZE_MAX;
6761  uint32_t maxAllocationsToMove = UINT32_MAX;
6762  if(pDefragmentationInfo != VMA_NULL)
6763  {
6764  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
6765  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
6766  }
6767 
6768  // Process standard memory.
6769  for(uint32_t memTypeIndex = 0;
6770  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
6771  ++memTypeIndex)
6772  {
6773  // Only HOST_VISIBLE memory types can be defragmented.
6774  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
6775  {
6776  for(uint32_t blockVectorType = 0;
6777  (blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT) && (result == VK_SUCCESS);
6778  ++blockVectorType)
6779  {
6780  result = m_pBlockVectors[memTypeIndex][blockVectorType]->Defragment(
6781  pDefragmentationStats,
6782  maxBytesToMove,
6783  maxAllocationsToMove);
6784  }
6785  }
6786  }
6787 
6788  // Process custom pools.
6789  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
6790  {
6791  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
6792  pDefragmentationStats,
6793  maxBytesToMove,
6794  maxAllocationsToMove);
6795  }
6796 
6797  // ======== Destroy defragmentators.
6798 
6799  // Process custom pools.
6800  for(size_t poolIndex = poolCount; poolIndex--; )
6801  {
6802  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
6803  }
6804 
6805  // Process standard memory.
6806  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
6807  {
6808  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
6809  {
6810  for(size_t blockVectorType = VMA_BLOCK_VECTOR_TYPE_COUNT; blockVectorType--; )
6811  {
6812  m_pBlockVectors[memTypeIndex][blockVectorType]->DestroyDefragmentator();
6813  }
6814  }
6815  }
6816 
6817  return result;
6818 }
6819 
6820 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
6821 {
6822  if(hAllocation->CanBecomeLost())
6823  {
6824  /*
6825  Warning: This is a carefully designed algorithm.
6826  Do not modify unless you really know what you're doing :)
6827  */
6828  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
6829  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
6830  for(;;)
6831  {
6832  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
6833  {
6834  pAllocationInfo->memoryType = UINT32_MAX;
6835  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
6836  pAllocationInfo->offset = 0;
6837  pAllocationInfo->size = hAllocation->GetSize();
6838  pAllocationInfo->pMappedData = VMA_NULL;
6839  pAllocationInfo->pUserData = hAllocation->GetUserData();
6840  return;
6841  }
6842  else if(localLastUseFrameIndex == localCurrFrameIndex)
6843  {
6844  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
6845  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
6846  pAllocationInfo->offset = hAllocation->GetOffset();
6847  pAllocationInfo->size = hAllocation->GetSize();
6848  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
6849  pAllocationInfo->pUserData = hAllocation->GetUserData();
6850  return;
6851  }
6852  else // Last use time earlier than current time.
6853  {
6854  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
6855  {
6856  localLastUseFrameIndex = localCurrFrameIndex;
6857  }
6858  }
6859  }
6860  }
6861  // We could use the same code here, but for performance reasons we don't need to use the hAllocation.LastUseFrameIndex atomic.
6862  else
6863  {
6864  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
6865  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
6866  pAllocationInfo->offset = hAllocation->GetOffset();
6867  pAllocationInfo->size = hAllocation->GetSize();
6868  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
6869  pAllocationInfo->pUserData = hAllocation->GetUserData();
6870  }
6871 }
6872 
6873 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
6874 {
6875  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
6876 
6877  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
6878 
6879  if(newCreateInfo.maxBlockCount == 0)
6880  {
6881  newCreateInfo.maxBlockCount = SIZE_MAX;
6882  }
6883  if(newCreateInfo.blockSize == 0)
6884  {
6885  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
6886  }
6887 
6888  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
6889 
6890  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
6891  if(res != VK_SUCCESS)
6892  {
6893  vma_delete(this, *pPool);
6894  *pPool = VMA_NULL;
6895  return res;
6896  }
6897 
6898  // Add to m_Pools.
6899  {
6900  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6901  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
6902  }
6903 
6904  return VK_SUCCESS;
6905 }
6906 
6907 void VmaAllocator_T::DestroyPool(VmaPool pool)
6908 {
6909  // Remove from m_Pools.
6910  {
6911  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
6912  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
6913  VMA_ASSERT(success && "Pool not found in Allocator.");
6914  }
6915 
6916  vma_delete(this, pool);
6917 }
6918 
6919 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
6920 {
6921  pool->m_BlockVector.GetPoolStats(pPoolStats);
6922 }
6923 
6924 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
6925 {
6926  m_CurrentFrameIndex.store(frameIndex);
6927 }
6928 
6929 void VmaAllocator_T::MakePoolAllocationsLost(
6930  VmaPool hPool,
6931  size_t* pLostAllocationCount)
6932 {
6933  hPool->m_BlockVector.MakePoolAllocationsLost(
6934  m_CurrentFrameIndex.load(),
6935  pLostAllocationCount);
6936 }
6937 
6938 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
6939 {
6940  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST);
6941  (*pAllocation)->InitLost();
6942 }
6943 
6944 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
6945 {
6946  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
6947 
6948  VkResult res;
6949  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
6950  {
6951  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
6952  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
6953  {
6954  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
6955  if(res == VK_SUCCESS)
6956  {
6957  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
6958  }
6959  }
6960  else
6961  {
6962  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
6963  }
6964  }
6965  else
6966  {
6967  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
6968  }
6969 
6970  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
6971  {
6972  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
6973  }
6974 
6975  return res;
6976 }
6977 
6978 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
6979 {
6980  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
6981  {
6982  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
6983  }
6984 
6985  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
6986 
6987  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
6988  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
6989  {
6990  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
6991  m_HeapSizeLimit[heapIndex] += size;
6992  }
6993 }
6994 
6995 void VmaAllocator_T::FreeOwnMemory(VmaAllocation allocation)
6996 {
6997  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_OWN);
6998 
6999  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
7000  {
7001  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
7002  AllocationVectorType* const pOwnAllocations = m_pOwnAllocations[memTypeIndex][allocation->GetBlockVectorType()];
7003  VMA_ASSERT(pOwnAllocations);
7004  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pOwnAllocations, allocation);
7005  VMA_ASSERT(success);
7006  }
7007 
7008  VkDeviceMemory hMemory = allocation->GetMemory();
7009 
7010  if(allocation->GetMappedData() != VMA_NULL)
7011  {
7012  vkUnmapMemory(m_hDevice, hMemory);
7013  }
7014 
7015  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
7016 
7017  VMA_DEBUG_LOG(" Freed OwnMemory MemoryTypeIndex=%u", memTypeIndex);
7018 }
7019 
7020 #if VMA_STATS_STRING_ENABLED
7021 
7022 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
7023 {
7024  bool ownAllocationsStarted = false;
7025  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7026  {
7027  VmaMutexLock ownAllocationsLock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
7028  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
7029  {
7030  AllocationVectorType* const pOwnAllocVector = m_pOwnAllocations[memTypeIndex][blockVectorType];
7031  VMA_ASSERT(pOwnAllocVector);
7032  if(pOwnAllocVector->empty() == false)
7033  {
7034  if(ownAllocationsStarted == false)
7035  {
7036  ownAllocationsStarted = true;
7037  json.WriteString("OwnAllocations");
7038  json.BeginObject();
7039  }
7040 
7041  json.BeginString("Type ");
7042  json.ContinueString(memTypeIndex);
7043  if(blockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
7044  {
7045  json.ContinueString(" Mapped");
7046  }
7047  json.EndString();
7048 
7049  json.BeginArray();
7050 
7051  for(size_t i = 0; i < pOwnAllocVector->size(); ++i)
7052  {
7053  const VmaAllocation hAlloc = (*pOwnAllocVector)[i];
7054  json.BeginObject(true);
7055 
7056  json.WriteString("Size");
7057  json.WriteNumber(hAlloc->GetSize());
7058 
7059  json.WriteString("Type");
7060  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[hAlloc->GetSuballocationType()]);
7061 
7062  json.EndObject();
7063  }
7064 
7065  json.EndArray();
7066  }
7067  }
7068  }
7069  if(ownAllocationsStarted)
7070  {
7071  json.EndObject();
7072  }
7073 
7074  {
7075  bool allocationsStarted = false;
7076  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7077  {
7078  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
7079  {
7080  if(m_pBlockVectors[memTypeIndex][blockVectorType]->IsEmpty() == false)
7081  {
7082  if(allocationsStarted == false)
7083  {
7084  allocationsStarted = true;
7085  json.WriteString("DefaultPools");
7086  json.BeginObject();
7087  }
7088 
7089  json.BeginString("Type ");
7090  json.ContinueString(memTypeIndex);
7091  if(blockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
7092  {
7093  json.ContinueString(" Mapped");
7094  }
7095  json.EndString();
7096 
7097  m_pBlockVectors[memTypeIndex][blockVectorType]->PrintDetailedMap(json);
7098  }
7099  }
7100  }
7101  if(allocationsStarted)
7102  {
7103  json.EndObject();
7104  }
7105  }
7106 
7107  {
7108  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7109  const size_t poolCount = m_Pools.size();
7110  if(poolCount > 0)
7111  {
7112  json.WriteString("Pools");
7113  json.BeginArray();
7114  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
7115  {
7116  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
7117  }
7118  json.EndArray();
7119  }
7120  }
7121 }
7122 
7123 #endif // #if VMA_STATS_STRING_ENABLED
7124 
7125 static VkResult AllocateMemoryForImage(
7126  VmaAllocator allocator,
7127  VkImage image,
7128  const VmaAllocationCreateInfo* pAllocationCreateInfo,
7129  VmaSuballocationType suballocType,
7130  VmaAllocation* pAllocation)
7131 {
7132  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
7133 
7134  VkMemoryRequirements vkMemReq = {};
7135  (*allocator->GetVulkanFunctions().vkGetImageMemoryRequirements)(allocator->m_hDevice, image, &vkMemReq);
7136 
7137  return allocator->AllocateMemory(
7138  vkMemReq,
7139  *pAllocationCreateInfo,
7140  suballocType,
7141  pAllocation);
7142 }
7143 
7145 // Public interface
7146 
7147 VkResult vmaCreateAllocator(
7148  const VmaAllocatorCreateInfo* pCreateInfo,
7149  VmaAllocator* pAllocator)
7150 {
7151  VMA_ASSERT(pCreateInfo && pAllocator);
7152  VMA_DEBUG_LOG("vmaCreateAllocator");
7153  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
7154  return VK_SUCCESS;
7155 }
7156 
7157 void vmaDestroyAllocator(
7158  VmaAllocator allocator)
7159 {
7160  if(allocator != VK_NULL_HANDLE)
7161  {
7162  VMA_DEBUG_LOG("vmaDestroyAllocator");
7163  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
7164  vma_delete(&allocationCallbacks, allocator);
7165  }
7166 }
7167 
7169  VmaAllocator allocator,
7170  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
7171 {
7172  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
7173  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
7174 }
7175 
7177  VmaAllocator allocator,
7178  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
7179 {
7180  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
7181  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
7182 }
7183 
7185  VmaAllocator allocator,
7186  uint32_t memoryTypeIndex,
7187  VkMemoryPropertyFlags* pFlags)
7188 {
7189  VMA_ASSERT(allocator && pFlags);
7190  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
7191  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
7192 }
7193 
7195  VmaAllocator allocator,
7196  uint32_t frameIndex)
7197 {
7198  VMA_ASSERT(allocator);
7199  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
7200 
7201  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7202 
7203  allocator->SetCurrentFrameIndex(frameIndex);
7204 }
7205 
7206 void vmaCalculateStats(
7207  VmaAllocator allocator,
7208  VmaStats* pStats)
7209 {
7210  VMA_ASSERT(allocator && pStats);
7211  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7212  allocator->CalculateStats(pStats);
7213 }
7214 
7215 #if VMA_STATS_STRING_ENABLED
7216 
7217 void vmaBuildStatsString(
7218  VmaAllocator allocator,
7219  char** ppStatsString,
7220  VkBool32 detailedMap)
7221 {
7222  VMA_ASSERT(allocator && ppStatsString);
7223  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7224 
7225  VmaStringBuilder sb(allocator);
7226  {
7227  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
7228  json.BeginObject();
7229 
7230  VmaStats stats;
7231  allocator->CalculateStats(&stats);
7232 
7233  json.WriteString("Total");
7234  VmaPrintStatInfo(json, stats.total);
7235 
7236  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
7237  {
7238  json.BeginString("Heap ");
7239  json.ContinueString(heapIndex);
7240  json.EndString();
7241  json.BeginObject();
7242 
7243  json.WriteString("Size");
7244  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
7245 
7246  json.WriteString("Flags");
7247  json.BeginArray(true);
7248  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
7249  {
7250  json.WriteString("DEVICE_LOCAL");
7251  }
7252  json.EndArray();
7253 
7254  if(stats.memoryHeap[heapIndex].blockCount > 0)
7255  {
7256  json.WriteString("Stats");
7257  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
7258  }
7259 
7260  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
7261  {
7262  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
7263  {
7264  json.BeginString("Type ");
7265  json.ContinueString(typeIndex);
7266  json.EndString();
7267 
7268  json.BeginObject();
7269 
7270  json.WriteString("Flags");
7271  json.BeginArray(true);
7272  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
7273  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
7274  {
7275  json.WriteString("DEVICE_LOCAL");
7276  }
7277  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7278  {
7279  json.WriteString("HOST_VISIBLE");
7280  }
7281  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
7282  {
7283  json.WriteString("HOST_COHERENT");
7284  }
7285  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
7286  {
7287  json.WriteString("HOST_CACHED");
7288  }
7289  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
7290  {
7291  json.WriteString("LAZILY_ALLOCATED");
7292  }
7293  json.EndArray();
7294 
7295  if(stats.memoryType[typeIndex].blockCount > 0)
7296  {
7297  json.WriteString("Stats");
7298  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
7299  }
7300 
7301  json.EndObject();
7302  }
7303  }
7304 
7305  json.EndObject();
7306  }
7307  if(detailedMap == VK_TRUE)
7308  {
7309  allocator->PrintDetailedMap(json);
7310  }
7311 
7312  json.EndObject();
7313  }
7314 
7315  const size_t len = sb.GetLength();
7316  char* const pChars = vma_new_array(allocator, char, len + 1);
7317  if(len > 0)
7318  {
7319  memcpy(pChars, sb.GetData(), len);
7320  }
7321  pChars[len] = '\0';
7322  *ppStatsString = pChars;
7323 }
7324 
7325 void vmaFreeStatsString(
7326  VmaAllocator allocator,
7327  char* pStatsString)
7328 {
7329  if(pStatsString != VMA_NULL)
7330  {
7331  VMA_ASSERT(allocator);
7332  size_t len = strlen(pStatsString);
7333  vma_delete_array(allocator, pStatsString, len + 1);
7334  }
7335 }
7336 
7337 #endif // #if VMA_STATS_STRING_ENABLED
7338 
7341 VkResult vmaFindMemoryTypeIndex(
7342  VmaAllocator allocator,
7343  uint32_t memoryTypeBits,
7344  const VmaAllocationCreateInfo* pAllocationCreateInfo,
7345  uint32_t* pMemoryTypeIndex)
7346 {
7347  VMA_ASSERT(allocator != VK_NULL_HANDLE);
7348  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
7349  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
7350 
7351  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
7352  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
7353  if(preferredFlags == 0)
7354  {
7355  preferredFlags = requiredFlags;
7356  }
7357  // preferredFlags, if not 0, must be a superset of requiredFlags.
7358  VMA_ASSERT((requiredFlags & ~preferredFlags) == 0);
7359 
7360  // Convert usage to requiredFlags and preferredFlags.
7361  switch(pAllocationCreateInfo->usage)
7362  {
7364  break;
7366  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
7367  break;
7369  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
7370  break;
7372  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
7373  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
7374  break;
7376  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
7377  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
7378  break;
7379  default:
7380  break;
7381  }
7382 
7383  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0)
7384  {
7385  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
7386  }
7387 
7388  *pMemoryTypeIndex = UINT32_MAX;
7389  uint32_t minCost = UINT32_MAX;
7390  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
7391  memTypeIndex < allocator->GetMemoryTypeCount();
7392  ++memTypeIndex, memTypeBit <<= 1)
7393  {
7394  // This memory type is acceptable according to memoryTypeBits bitmask.
7395  if((memTypeBit & memoryTypeBits) != 0)
7396  {
7397  const VkMemoryPropertyFlags currFlags =
7398  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
7399  // This memory type contains requiredFlags.
7400  if((requiredFlags & ~currFlags) == 0)
7401  {
7402  // Calculate cost as number of bits from preferredFlags not present in this memory type.
7403  uint32_t currCost = CountBitsSet(preferredFlags & ~currFlags);
7404  // Remember memory type with lowest cost.
7405  if(currCost < minCost)
7406  {
7407  *pMemoryTypeIndex = memTypeIndex;
7408  if(currCost == 0)
7409  {
7410  return VK_SUCCESS;
7411  }
7412  minCost = currCost;
7413  }
7414  }
7415  }
7416  }
7417  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
7418 }
7419 
7420 VkResult vmaCreatePool(
7421  VmaAllocator allocator,
7422  const VmaPoolCreateInfo* pCreateInfo,
7423  VmaPool* pPool)
7424 {
7425  VMA_ASSERT(allocator && pCreateInfo && pPool);
7426 
7427  VMA_DEBUG_LOG("vmaCreatePool");
7428 
7429  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7430 
7431  return allocator->CreatePool(pCreateInfo, pPool);
7432 }
7433 
7434 void vmaDestroyPool(
7435  VmaAllocator allocator,
7436  VmaPool pool)
7437 {
7438  VMA_ASSERT(allocator && pool);
7439 
7440  VMA_DEBUG_LOG("vmaDestroyPool");
7441 
7442  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7443 
7444  allocator->DestroyPool(pool);
7445 }
7446 
7447 void vmaGetPoolStats(
7448  VmaAllocator allocator,
7449  VmaPool pool,
7450  VmaPoolStats* pPoolStats)
7451 {
7452  VMA_ASSERT(allocator && pool && pPoolStats);
7453 
7454  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7455 
7456  allocator->GetPoolStats(pool, pPoolStats);
7457 }
7458 
7460  VmaAllocator allocator,
7461  VmaPool pool,
7462  size_t* pLostAllocationCount)
7463 {
7464  VMA_ASSERT(allocator && pool);
7465 
7466  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7467 
7468  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
7469 }
7470 
7471 VkResult vmaAllocateMemory(
7472  VmaAllocator allocator,
7473  const VkMemoryRequirements* pVkMemoryRequirements,
7474  const VmaAllocationCreateInfo* pCreateInfo,
7475  VmaAllocation* pAllocation,
7476  VmaAllocationInfo* pAllocationInfo)
7477 {
7478  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
7479 
7480  VMA_DEBUG_LOG("vmaAllocateMemory");
7481 
7482  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7483 
7484  VkResult result = allocator->AllocateMemory(
7485  *pVkMemoryRequirements,
7486  *pCreateInfo,
7487  VMA_SUBALLOCATION_TYPE_UNKNOWN,
7488  pAllocation);
7489 
7490  if(pAllocationInfo && result == VK_SUCCESS)
7491  {
7492  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7493  }
7494 
7495  return result;
7496 }
7497 
7499  VmaAllocator allocator,
7500  VkBuffer buffer,
7501  const VmaAllocationCreateInfo* pCreateInfo,
7502  VmaAllocation* pAllocation,
7503  VmaAllocationInfo* pAllocationInfo)
7504 {
7505  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
7506 
7507  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
7508 
7509  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7510 
7511  VkMemoryRequirements vkMemReq = {};
7512  (*allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements)(allocator->m_hDevice, buffer, &vkMemReq);
7513 
7514  VkResult result = allocator->AllocateMemory(
7515  vkMemReq,
7516  *pCreateInfo,
7517  VMA_SUBALLOCATION_TYPE_BUFFER,
7518  pAllocation);
7519 
7520  if(pAllocationInfo && result == VK_SUCCESS)
7521  {
7522  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7523  }
7524 
7525  return result;
7526 }
7527 
7528 VkResult vmaAllocateMemoryForImage(
7529  VmaAllocator allocator,
7530  VkImage image,
7531  const VmaAllocationCreateInfo* pCreateInfo,
7532  VmaAllocation* pAllocation,
7533  VmaAllocationInfo* pAllocationInfo)
7534 {
7535  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
7536 
7537  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
7538 
7539  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7540 
7541  VkResult result = AllocateMemoryForImage(
7542  allocator,
7543  image,
7544  pCreateInfo,
7545  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
7546  pAllocation);
7547 
7548  if(pAllocationInfo && result == VK_SUCCESS)
7549  {
7550  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7551  }
7552 
7553  return result;
7554 }
7555 
7556 void vmaFreeMemory(
7557  VmaAllocator allocator,
7558  VmaAllocation allocation)
7559 {
7560  VMA_ASSERT(allocator && allocation);
7561 
7562  VMA_DEBUG_LOG("vmaFreeMemory");
7563 
7564  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7565 
7566  allocator->FreeMemory(allocation);
7567 }
7568 
7570  VmaAllocator allocator,
7571  VmaAllocation allocation,
7572  VmaAllocationInfo* pAllocationInfo)
7573 {
7574  VMA_ASSERT(allocator && allocation && pAllocationInfo);
7575 
7576  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7577 
7578  allocator->GetAllocationInfo(allocation, pAllocationInfo);
7579 }
7580 
7582  VmaAllocator allocator,
7583  VmaAllocation allocation,
7584  void* pUserData)
7585 {
7586  VMA_ASSERT(allocator && allocation);
7587 
7588  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7589 
7590  allocation->SetUserData(pUserData);
7591 }
7592 
7594  VmaAllocator allocator,
7595  VmaAllocation* pAllocation)
7596 {
7597  VMA_ASSERT(allocator && pAllocation);
7598 
7599  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
7600 
7601  allocator->CreateLostAllocation(pAllocation);
7602 }
7603 
7604 VkResult vmaMapMemory(
7605  VmaAllocator allocator,
7606  VmaAllocation allocation,
7607  void** ppData)
7608 {
7609  VMA_ASSERT(allocator && allocation && ppData);
7610 
7611  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7612 
7613  return vkMapMemory(allocator->m_hDevice, allocation->GetMemory(),
7614  allocation->GetOffset(), allocation->GetSize(), 0, ppData);
7615 }
7616 
7617 void vmaUnmapMemory(
7618  VmaAllocator allocator,
7619  VmaAllocation allocation)
7620 {
7621  VMA_ASSERT(allocator && allocation);
7622 
7623  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7624 
7625  vkUnmapMemory(allocator->m_hDevice, allocation->GetMemory());
7626 }
7627 
7628 void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator)
7629 {
7630  VMA_ASSERT(allocator);
7631 
7632  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7633 
7634  allocator->UnmapPersistentlyMappedMemory();
7635 }
7636 
7637 VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator)
7638 {
7639  VMA_ASSERT(allocator);
7640 
7641  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7642 
7643  return allocator->MapPersistentlyMappedMemory();
7644 }
7645 
7646 VkResult vmaDefragment(
7647  VmaAllocator allocator,
7648  VmaAllocation* pAllocations,
7649  size_t allocationCount,
7650  VkBool32* pAllocationsChanged,
7651  const VmaDefragmentationInfo *pDefragmentationInfo,
7652  VmaDefragmentationStats* pDefragmentationStats)
7653 {
7654  VMA_ASSERT(allocator && pAllocations);
7655 
7656  VMA_DEBUG_LOG("vmaDefragment");
7657 
7658  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7659 
7660  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
7661 }
7662 
7663 VkResult vmaCreateBuffer(
7664  VmaAllocator allocator,
7665  const VkBufferCreateInfo* pBufferCreateInfo,
7666  const VmaAllocationCreateInfo* pAllocationCreateInfo,
7667  VkBuffer* pBuffer,
7668  VmaAllocation* pAllocation,
7669  VmaAllocationInfo* pAllocationInfo)
7670 {
7671  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
7672 
7673  VMA_DEBUG_LOG("vmaCreateBuffer");
7674 
7675  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7676 
7677  *pBuffer = VK_NULL_HANDLE;
7678  *pAllocation = VK_NULL_HANDLE;
7679 
7680  // 1. Create VkBuffer.
7681  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
7682  allocator->m_hDevice,
7683  pBufferCreateInfo,
7684  allocator->GetAllocationCallbacks(),
7685  pBuffer);
7686  if(res >= 0)
7687  {
7688  // 2. vkGetBufferMemoryRequirements.
7689  VkMemoryRequirements vkMemReq = {};
7690  (*allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements)(allocator->m_hDevice, *pBuffer, &vkMemReq);
7691 
7692  // 3. Allocate memory using allocator.
7693  res = allocator->AllocateMemory(
7694  vkMemReq,
7695  *pAllocationCreateInfo,
7696  VMA_SUBALLOCATION_TYPE_BUFFER,
7697  pAllocation);
7698  if(res >= 0)
7699  {
7700  // 3. Bind buffer with memory.
7701  res = (*allocator->GetVulkanFunctions().vkBindBufferMemory)(
7702  allocator->m_hDevice,
7703  *pBuffer,
7704  (*pAllocation)->GetMemory(),
7705  (*pAllocation)->GetOffset());
7706  if(res >= 0)
7707  {
7708  // All steps succeeded.
7709  if(pAllocationInfo != VMA_NULL)
7710  {
7711  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7712  }
7713  return VK_SUCCESS;
7714  }
7715  allocator->FreeMemory(*pAllocation);
7716  *pAllocation = VK_NULL_HANDLE;
7717  return res;
7718  }
7719  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
7720  *pBuffer = VK_NULL_HANDLE;
7721  return res;
7722  }
7723  return res;
7724 }
7725 
7726 void vmaDestroyBuffer(
7727  VmaAllocator allocator,
7728  VkBuffer buffer,
7729  VmaAllocation allocation)
7730 {
7731  if(buffer != VK_NULL_HANDLE)
7732  {
7733  VMA_ASSERT(allocator);
7734 
7735  VMA_DEBUG_LOG("vmaDestroyBuffer");
7736 
7737  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7738 
7739  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
7740 
7741  allocator->FreeMemory(allocation);
7742  }
7743 }
7744 
7745 VkResult vmaCreateImage(
7746  VmaAllocator allocator,
7747  const VkImageCreateInfo* pImageCreateInfo,
7748  const VmaAllocationCreateInfo* pAllocationCreateInfo,
7749  VkImage* pImage,
7750  VmaAllocation* pAllocation,
7751  VmaAllocationInfo* pAllocationInfo)
7752 {
7753  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
7754 
7755  VMA_DEBUG_LOG("vmaCreateImage");
7756 
7757  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7758 
7759  *pImage = VK_NULL_HANDLE;
7760  *pAllocation = VK_NULL_HANDLE;
7761 
7762  // 1. Create VkImage.
7763  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
7764  allocator->m_hDevice,
7765  pImageCreateInfo,
7766  allocator->GetAllocationCallbacks(),
7767  pImage);
7768  if(res >= 0)
7769  {
7770  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
7771  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
7772  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
7773 
7774  // 2. Allocate memory using allocator.
7775  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
7776  if(res >= 0)
7777  {
7778  // 3. Bind image with memory.
7779  res = (*allocator->GetVulkanFunctions().vkBindImageMemory)(
7780  allocator->m_hDevice,
7781  *pImage,
7782  (*pAllocation)->GetMemory(),
7783  (*pAllocation)->GetOffset());
7784  if(res >= 0)
7785  {
7786  // All steps succeeded.
7787  if(pAllocationInfo != VMA_NULL)
7788  {
7789  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7790  }
7791  return VK_SUCCESS;
7792  }
7793  allocator->FreeMemory(*pAllocation);
7794  *pAllocation = VK_NULL_HANDLE;
7795  return res;
7796  }
7797  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
7798  *pImage = VK_NULL_HANDLE;
7799  return res;
7800  }
7801  return res;
7802 }
7803 
7804 void vmaDestroyImage(
7805  VmaAllocator allocator,
7806  VkImage image,
7807  VmaAllocation allocation)
7808 {
7809  if(image != VK_NULL_HANDLE)
7810  {
7811  VMA_ASSERT(allocator);
7812 
7813  VMA_DEBUG_LOG("vmaDestroyImage");
7814 
7815  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7816 
7817  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
7818 
7819  allocator->FreeMemory(allocation);
7820  }
7821 }
7822 
7823 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:440
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:463
Definition: vk_mem_alloc.h:794
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
diff --git a/src/vk_mem_alloc.h b/src/vk_mem_alloc.h index 8900421..f35d717 100644 --- a/src/vk_mem_alloc.h +++ b/src/vk_mem_alloc.h @@ -3023,8 +3023,107 @@ struct VmaAllocationRequest }; /* -Represents a single block of device memory (VkDeviceMemory ) with all the -data about its regions (aka suballocations, VmaAllocation), assigned and free. +Data structure used for bookkeeping of allocations and unused ranges of memory +in a single VkDeviceMemory block. +*/ +class VmaBlockMetadata +{ +public: + VmaBlockMetadata(VmaAllocator hAllocator); + ~VmaBlockMetadata(); + void Init(VkDeviceSize size); + + // Validates all data structures inside this object. If not valid, returns false. + bool Validate() const; + size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; } + VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; } + VkDeviceSize GetUnusedRangeSizeMax() const; + // Returns true if this block is empty - contains only single free suballocation. + bool IsEmpty() const; + + void CalcAllocationStatInfo(VmaStatInfo& outInfo) const; + void AddPoolStats(VmaPoolStats& inoutStats) const; + +#if VMA_STATS_STRING_ENABLED + void PrintDetailedMap(class VmaJsonWriter& json) const; +#endif + + // Creates trivial request for case when block is empty. + void CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest); + + // Tries to find a place for suballocation with given parameters inside this block. + // If succeeded, fills pAllocationRequest and returns true. + // If failed, returns false. + bool CreateAllocationRequest( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + bool canMakeOtherLost, + VmaAllocationRequest* pAllocationRequest); + + bool MakeRequestedAllocationsLost( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VmaAllocationRequest* pAllocationRequest); + + uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount); + + // Makes actual allocation based on request. Request must already be checked and valid. + void Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + VkDeviceSize allocSize, + VmaAllocation hAllocation); + + // Frees suballocation assigned to given memory region. + void Free(const VmaAllocation allocation); + +private: + VkDeviceSize m_Size; + uint32_t m_FreeCount; + VkDeviceSize m_SumFreeSize; + VmaSuballocationList m_Suballocations; + // Suballocations that are free and have size greater than certain threshold. + // Sorted by size, ascending. + VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize; + + bool ValidateFreeSuballocationList() const; + + // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem. + // If yes, fills pOffset and returns true. If no, returns false. + bool CheckAllocation( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + VmaSuballocationList::const_iterator suballocItem, + bool canMakeOtherLost, + VkDeviceSize* pOffset, + size_t* itemsToMakeLostCount, + VkDeviceSize* pSumFreeSize, + VkDeviceSize* pSumItemSize) const; + // Given free suballocation, it merges it with following one, which must also be free. + void MergeFreeWithNext(VmaSuballocationList::iterator item); + // Releases given suballocation, making it free. + // Merges it with adjacent free suballocations if applicable. + // Returns iterator to new free suballocation at this place. + VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem); + // Given free suballocation, it inserts it into sorted list of + // m_FreeSuballocationsBySize if it's suitable. + void RegisterFreeSuballocation(VmaSuballocationList::iterator item); + // Given free suballocation, it removes it from sorted list of + // m_FreeSuballocationsBySize if it's suitable. + void UnregisterFreeSuballocation(VmaSuballocationList::iterator item); +}; + +/* +Represents a single block of device memory (`VkDeviceMemory`) with all the +data about its regions (aka suballocations, `VmaAllocation`), assigned and free. Thread-safety: This class must be externally synchronized. */ @@ -3037,12 +3136,7 @@ public: VkDeviceSize m_Size; bool m_PersistentMap; void* m_pMappedData; - uint32_t m_FreeCount; - VkDeviceSize m_SumFreeSize; - VmaSuballocationList m_Suballocations; - // Suballocations that are free and have size greater than certain threshold. - // Sorted by size, ascending. - VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize; + VmaBlockMetadata m_Metadata; VmaDeviceMemoryBlock(VmaAllocator hAllocator); @@ -3064,75 +3158,6 @@ public: // Validates all data structures inside this object. If not valid, returns false. bool Validate() const; - - VkDeviceSize GetUnusedRangeSizeMax() const; - - // Tries to find a place for suballocation with given parameters inside this allocation. - // If succeeded, fills pAllocationRequest and returns true. - // If failed, returns false. - bool CreateAllocationRequest( - uint32_t currentFrameIndex, - uint32_t frameInUseCount, - VkDeviceSize bufferImageGranularity, - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - VmaSuballocationType allocType, - bool canMakeOtherLost, - VmaAllocationRequest* pAllocationRequest); - - bool MakeRequestedAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount, VmaAllocationRequest* pAllocationRequest); - - uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount); - - // Returns true if this allocation is empty - contains only single free suballocation. - bool IsEmpty() const; - - // Makes actual allocation based on request. Request must already be checked - // and valid. - void Alloc( - const VmaAllocationRequest& request, - VmaSuballocationType type, - VkDeviceSize allocSize, - VmaAllocation hAllocation); - - // Frees suballocation assigned to given memory region. - void Free(const VmaAllocation allocation); - -#if VMA_STATS_STRING_ENABLED - void PrintDetailedMap(class VmaJsonWriter& json) const; -#endif - -private: - // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem. - // If yes, fills pOffset and returns true. If no, returns false. - bool CheckAllocation( - uint32_t currentFrameIndex, - uint32_t frameInUseCount, - VkDeviceSize bufferImageGranularity, - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - VmaSuballocationType allocType, - VmaSuballocationList::const_iterator suballocItem, - bool canMakeOtherLost, - VkDeviceSize* pOffset, - size_t* itemsToMakeLostCount, - VkDeviceSize* pSumFreeSize, - VkDeviceSize* pSumItemSize) const; - - // Given free suballocation, it merges it with following one, which must also be free. - void MergeFreeWithNext(VmaSuballocationList::iterator item); - // Releases given suballocation, making it free. - // Merges it with adjacent free suballocations if applicable. - // Returns iterator to new free suballocation at this place. - VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem); - // Given free suballocation, it inserts it into sorted list of - // m_FreeSuballocationsBySize if it's suitable. - void RegisterFreeSuballocation(VmaSuballocationList::iterator item); - // Given free suballocation, it removes it from sorted list of - // m_FreeSuballocationsBySize if it's suitable. - void UnregisterFreeSuballocation(VmaSuballocationList::iterator item); - - bool ValidateFreeSuballocationList() const; }; struct VmaPointerLess @@ -3310,8 +3335,7 @@ class VmaDefragmentator void CalcHasNonMovableAllocations() { - const size_t blockAllocCount = - m_pBlock->m_Suballocations.size() - m_pBlock->m_FreeCount; + const size_t blockAllocCount = m_pBlock->m_Metadata.GetAllocationCount(); const size_t defragmentAllocCount = m_Allocations.size(); m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount; } @@ -3355,7 +3379,7 @@ class VmaDefragmentator { return false; } - if(pLhsBlockInfo->m_pBlock->m_SumFreeSize < pRhsBlockInfo->m_pBlock->m_SumFreeSize) + if(pLhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize()) { return true; } @@ -4141,13 +4165,11 @@ struct VmaSuballocationItemSizeLess } }; -VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) : - m_MemoryTypeIndex(UINT32_MAX), - m_BlockVectorType(VMA_BLOCK_VECTOR_TYPE_COUNT), - m_hMemory(VK_NULL_HANDLE), +//////////////////////////////////////////////////////////////////////////////// +// class VmaBlockMetadata + +VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) : m_Size(0), - m_PersistentMap(false), - m_pMappedData(VMA_NULL), m_FreeCount(0), m_SumFreeSize(0), m_Suballocations(VmaStlAllocator(hAllocator->GetAllocationCallbacks())), @@ -4155,31 +4177,19 @@ VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) : { } -void VmaDeviceMemoryBlock::Init( - uint32_t newMemoryTypeIndex, - VMA_BLOCK_VECTOR_TYPE newBlockVectorType, - VkDeviceMemory newMemory, - VkDeviceSize newSize, - bool persistentMap, - void* pMappedData) +VmaBlockMetadata::~VmaBlockMetadata() { - VMA_ASSERT(m_hMemory == VK_NULL_HANDLE); +} - m_MemoryTypeIndex = newMemoryTypeIndex; - m_BlockVectorType = newBlockVectorType; - m_hMemory = newMemory; - m_Size = newSize; - m_PersistentMap = persistentMap; - m_pMappedData = pMappedData; +void VmaBlockMetadata::Init(VkDeviceSize size) +{ + m_Size = size; m_FreeCount = 1; - m_SumFreeSize = newSize; - - m_Suballocations.clear(); - m_FreeSuballocationsBySize.clear(); + m_SumFreeSize = size; VmaSuballocation suballoc = {}; suballoc.offset = 0; - suballoc.size = newSize; + suballoc.size = size; suballoc.type = VMA_SUBALLOCATION_TYPE_FREE; suballoc.hAllocation = VK_NULL_HANDLE; @@ -4189,28 +4199,9 @@ void VmaDeviceMemoryBlock::Init( m_FreeSuballocationsBySize.push_back(suballocItem); } -void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator) +bool VmaBlockMetadata::Validate() const { - // This is the most important assert in the entire library. - // Hitting it means you have some memory leak - unreleased VmaAllocation objects. - VMA_ASSERT(IsEmpty() && "Some allocations were not freed before destruction of this memory block!"); - - VMA_ASSERT(m_hMemory != VK_NULL_HANDLE); - if(m_pMappedData != VMA_NULL) - { - (allocator->GetVulkanFunctions().vkUnmapMemory)(allocator->m_hDevice, m_hMemory); - m_pMappedData = VMA_NULL; - } - - allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Size, m_hMemory); - m_hMemory = VK_NULL_HANDLE; -} - -bool VmaDeviceMemoryBlock::Validate() const -{ - if((m_hMemory == VK_NULL_HANDLE) || - (m_Size == 0) || - m_Suballocations.empty()) + if(m_Suballocations.empty()) { return false; } @@ -4293,12 +4284,13 @@ bool VmaDeviceMemoryBlock::Validate() const // Check if totals match calculacted values. return + ValidateFreeSuballocationList() && (calculatedOffset == m_Size) && (calculatedSumFreeSize == m_SumFreeSize) && (calculatedFreeCount == m_FreeCount); } -VkDeviceSize VmaDeviceMemoryBlock::GetUnusedRangeSizeMax() const +VkDeviceSize VmaBlockMetadata::GetUnusedRangeSizeMax() const { if(!m_FreeSuballocationsBySize.empty()) { @@ -4310,6 +4302,101 @@ VkDeviceSize VmaDeviceMemoryBlock::GetUnusedRangeSizeMax() const } } +bool VmaBlockMetadata::IsEmpty() const +{ + return (m_Suballocations.size() == 1) && (m_FreeCount == 1); +} + +void VmaBlockMetadata::CalcAllocationStatInfo(VmaStatInfo& outInfo) const +{ + outInfo.blockCount = 1; + + const uint32_t rangeCount = (uint32_t)m_Suballocations.size(); + outInfo.allocationCount = rangeCount - m_FreeCount; + outInfo.unusedRangeCount = m_FreeCount; + + outInfo.unusedBytes = m_SumFreeSize; + outInfo.usedBytes = m_Size - outInfo.unusedBytes; + + outInfo.allocationSizeMin = UINT64_MAX; + outInfo.allocationSizeMax = 0; + outInfo.unusedRangeSizeMin = UINT64_MAX; + outInfo.unusedRangeSizeMax = 0; + + for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin(); + suballocItem != m_Suballocations.cend(); + ++suballocItem) + { + const VmaSuballocation& suballoc = *suballocItem; + if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) + { + outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size); + outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size); + } + else + { + outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size); + outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size); + } + } +} + +void VmaBlockMetadata::AddPoolStats(VmaPoolStats& inoutStats) const +{ + const uint32_t rangeCount = (uint32_t)m_Suballocations.size(); + + inoutStats.size += m_Size; + inoutStats.unusedSize += m_SumFreeSize; + inoutStats.allocationCount += rangeCount - m_FreeCount; + inoutStats.unusedRangeCount += m_FreeCount; + inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax()); +} + +#if VMA_STATS_STRING_ENABLED + +void VmaBlockMetadata::PrintDetailedMap(class VmaJsonWriter& json) const +{ + json.BeginObject(); + + json.WriteString("TotalBytes"); + json.WriteNumber(m_Size); + + json.WriteString("UnusedBytes"); + json.WriteNumber(m_SumFreeSize); + + json.WriteString("Allocations"); + json.WriteNumber(m_Suballocations.size() - m_FreeCount); + + json.WriteString("UnusedRanges"); + json.WriteNumber(m_FreeCount); + + json.WriteString("Suballocations"); + json.BeginArray(); + size_t i = 0; + for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin(); + suballocItem != m_Suballocations.cend(); + ++suballocItem, ++i) + { + json.BeginObject(true); + + json.WriteString("Type"); + json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]); + + json.WriteString("Size"); + json.WriteNumber(suballocItem->size); + + json.WriteString("Offset"); + json.WriteNumber(suballocItem->offset); + + json.EndObject(); + } + json.EndArray(); + + json.EndObject(); +} + +#endif // #if VMA_STATS_STRING_ENABLED + /* How many suitable free suballocations to analyze before choosing best one. - Set to 1 to use First-Fit algorithm - first suitable free suballocation will @@ -4320,7 +4407,17 @@ How many suitable free suballocations to analyze before choosing best one. */ //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8; -bool VmaDeviceMemoryBlock::CreateAllocationRequest( +void VmaBlockMetadata::CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest) +{ + VMA_ASSERT(IsEmpty()); + pAllocationRequest->offset = 0; + pAllocationRequest->sumFreeSize = m_SumFreeSize; + pAllocationRequest->sumItemSize = 0; + pAllocationRequest->item = m_Suballocations.begin(); + pAllocationRequest->itemsToMakeLostCount = 0; +} + +bool VmaBlockMetadata::CreateAllocationRequest( uint32_t currentFrameIndex, uint32_t frameInUseCount, VkDeviceSize bufferImageGranularity, @@ -4449,7 +4546,10 @@ bool VmaDeviceMemoryBlock::CreateAllocationRequest( return false; } -bool VmaDeviceMemoryBlock::MakeRequestedAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount, VmaAllocationRequest* pAllocationRequest) +bool VmaBlockMetadata::MakeRequestedAllocationsLost( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VmaAllocationRequest* pAllocationRequest) { while(pAllocationRequest->itemsToMakeLostCount > 0) { @@ -4478,7 +4578,7 @@ bool VmaDeviceMemoryBlock::MakeRequestedAllocationsLost(uint32_t currentFrameInd return true; } -uint32_t VmaDeviceMemoryBlock::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) +uint32_t VmaBlockMetadata::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) { uint32_t lostAllocationCount = 0; for(VmaSuballocationList::iterator it = m_Suballocations.begin(); @@ -4496,7 +4596,116 @@ uint32_t VmaDeviceMemoryBlock::MakeAllocationsLost(uint32_t currentFrameIndex, u return lostAllocationCount; } -bool VmaDeviceMemoryBlock::CheckAllocation( +void VmaBlockMetadata::Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + VkDeviceSize allocSize, + VmaAllocation hAllocation) +{ + VMA_ASSERT(request.item != m_Suballocations.end()); + VmaSuballocation& suballoc = *request.item; + // Given suballocation is a free block. + VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); + // Given offset is inside this suballocation. + VMA_ASSERT(request.offset >= suballoc.offset); + const VkDeviceSize paddingBegin = request.offset - suballoc.offset; + VMA_ASSERT(suballoc.size >= paddingBegin + allocSize); + const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize; + + // Unregister this free suballocation from m_FreeSuballocationsBySize and update + // it to become used. + UnregisterFreeSuballocation(request.item); + + suballoc.offset = request.offset; + suballoc.size = allocSize; + suballoc.type = type; + suballoc.hAllocation = hAllocation; + + // If there are any free bytes remaining at the end, insert new free suballocation after current one. + if(paddingEnd) + { + VmaSuballocation paddingSuballoc = {}; + paddingSuballoc.offset = request.offset + allocSize; + paddingSuballoc.size = paddingEnd; + paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE; + VmaSuballocationList::iterator next = request.item; + ++next; + const VmaSuballocationList::iterator paddingEndItem = + m_Suballocations.insert(next, paddingSuballoc); + RegisterFreeSuballocation(paddingEndItem); + } + + // If there are any free bytes remaining at the beginning, insert new free suballocation before current one. + if(paddingBegin) + { + VmaSuballocation paddingSuballoc = {}; + paddingSuballoc.offset = request.offset - paddingBegin; + paddingSuballoc.size = paddingBegin; + paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE; + const VmaSuballocationList::iterator paddingBeginItem = + m_Suballocations.insert(request.item, paddingSuballoc); + RegisterFreeSuballocation(paddingBeginItem); + } + + // Update totals. + m_FreeCount = m_FreeCount - 1; + if(paddingBegin > 0) + { + ++m_FreeCount; + } + if(paddingEnd > 0) + { + ++m_FreeCount; + } + m_SumFreeSize -= allocSize; +} + +void VmaBlockMetadata::Free(const VmaAllocation allocation) +{ + for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin(); + suballocItem != m_Suballocations.end(); + ++suballocItem) + { + VmaSuballocation& suballoc = *suballocItem; + if(suballoc.hAllocation == allocation) + { + FreeSuballocation(suballocItem); + VMA_HEAVY_ASSERT(Validate()); + return; + } + } + VMA_ASSERT(0 && "Not found!"); +} + +bool VmaBlockMetadata::ValidateFreeSuballocationList() const +{ + VkDeviceSize lastSize = 0; + for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i) + { + const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i]; + + if(it->type != VMA_SUBALLOCATION_TYPE_FREE) + { + VMA_ASSERT(0); + return false; + } + if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER) + { + VMA_ASSERT(0); + return false; + } + if(it->size < lastSize) + { + VMA_ASSERT(0); + return false; + } + + lastSize = it->size; + } + return true; +} + +bool VmaBlockMetadata::CheckAllocation( uint32_t currentFrameIndex, uint32_t frameInUseCount, VkDeviceSize bufferImageGranularity, @@ -4778,76 +4987,22 @@ bool VmaDeviceMemoryBlock::CheckAllocation( return true; } -bool VmaDeviceMemoryBlock::IsEmpty() const +void VmaBlockMetadata::MergeFreeWithNext(VmaSuballocationList::iterator item) { - return (m_Suballocations.size() == 1) && (m_FreeCount == 1); + VMA_ASSERT(item != m_Suballocations.end()); + VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE); + + VmaSuballocationList::iterator nextItem = item; + ++nextItem; + VMA_ASSERT(nextItem != m_Suballocations.end()); + VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE); + + item->size += nextItem->size; + --m_FreeCount; + m_Suballocations.erase(nextItem); } -void VmaDeviceMemoryBlock::Alloc( - const VmaAllocationRequest& request, - VmaSuballocationType type, - VkDeviceSize allocSize, - VmaAllocation hAllocation) -{ - VMA_ASSERT(request.item != m_Suballocations.end()); - VmaSuballocation& suballoc = *request.item; - // Given suballocation is a free block. - VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); - // Given offset is inside this suballocation. - VMA_ASSERT(request.offset >= suballoc.offset); - const VkDeviceSize paddingBegin = request.offset - suballoc.offset; - VMA_ASSERT(suballoc.size >= paddingBegin + allocSize); - const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize; - - // Unregister this free suballocation from m_FreeSuballocationsBySize and update - // it to become used. - UnregisterFreeSuballocation(request.item); - - suballoc.offset = request.offset; - suballoc.size = allocSize; - suballoc.type = type; - suballoc.hAllocation = hAllocation; - - // If there are any free bytes remaining at the end, insert new free suballocation after current one. - if(paddingEnd) - { - VmaSuballocation paddingSuballoc = {}; - paddingSuballoc.offset = request.offset + allocSize; - paddingSuballoc.size = paddingEnd; - paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE; - VmaSuballocationList::iterator next = request.item; - ++next; - const VmaSuballocationList::iterator paddingEndItem = - m_Suballocations.insert(next, paddingSuballoc); - RegisterFreeSuballocation(paddingEndItem); - } - - // If there are any free bytes remaining at the beginning, insert new free suballocation before current one. - if(paddingBegin) - { - VmaSuballocation paddingSuballoc = {}; - paddingSuballoc.offset = request.offset - paddingBegin; - paddingSuballoc.size = paddingBegin; - paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE; - const VmaSuballocationList::iterator paddingBeginItem = - m_Suballocations.insert(request.item, paddingSuballoc); - RegisterFreeSuballocation(paddingBeginItem); - } - - // Update totals. - m_FreeCount = m_FreeCount - 1; - if(paddingBegin > 0) - { - ++m_FreeCount; - } - if(paddingEnd > 0) - { - ++m_FreeCount; - } - m_SumFreeSize -= allocSize; -} - -VmaSuballocationList::iterator VmaDeviceMemoryBlock::FreeSuballocation(VmaSuballocationList::iterator suballocItem) +VmaSuballocationList::iterator VmaBlockMetadata::FreeSuballocation(VmaSuballocationList::iterator suballocItem) { // Change this suballocation to be marked as free. VmaSuballocation& suballoc = *suballocItem; @@ -4899,84 +5054,7 @@ VmaSuballocationList::iterator VmaDeviceMemoryBlock::FreeSuballocation(VmaSuball } } -void VmaDeviceMemoryBlock::Free(const VmaAllocation allocation) -{ - for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin(); - suballocItem != m_Suballocations.end(); - ++suballocItem) - { - VmaSuballocation& suballoc = *suballocItem; - if(suballoc.hAllocation == allocation) - { - FreeSuballocation(suballocItem); - VMA_HEAVY_ASSERT(Validate()); - return; - } - } - VMA_ASSERT(0 && "Not found!"); -} - -#if VMA_STATS_STRING_ENABLED - -void VmaDeviceMemoryBlock::PrintDetailedMap(class VmaJsonWriter& json) const -{ - json.BeginObject(); - - json.WriteString("TotalBytes"); - json.WriteNumber(m_Size); - - json.WriteString("UnusedBytes"); - json.WriteNumber(m_SumFreeSize); - - json.WriteString("Allocations"); - json.WriteNumber(m_Suballocations.size() - m_FreeCount); - - json.WriteString("UnusedRanges"); - json.WriteNumber(m_FreeCount); - - json.WriteString("Suballocations"); - json.BeginArray(); - size_t i = 0; - for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin(); - suballocItem != m_Suballocations.cend(); - ++suballocItem, ++i) - { - json.BeginObject(true); - - json.WriteString("Type"); - json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]); - - json.WriteString("Size"); - json.WriteNumber(suballocItem->size); - - json.WriteString("Offset"); - json.WriteNumber(suballocItem->offset); - - json.EndObject(); - } - json.EndArray(); - - json.EndObject(); -} - -#endif // #if VMA_STATS_STRING_ENABLED - -void VmaDeviceMemoryBlock::MergeFreeWithNext(VmaSuballocationList::iterator item) -{ - VMA_ASSERT(item != m_Suballocations.end()); - VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE); - - VmaSuballocationList::iterator nextItem = item; - ++nextItem; - VMA_ASSERT(nextItem != m_Suballocations.end()); - VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE); - - item->size += nextItem->size; - --m_FreeCount; - m_Suballocations.erase(nextItem); -} - -void VmaDeviceMemoryBlock::RegisterFreeSuballocation(VmaSuballocationList::iterator item) +void VmaBlockMetadata::RegisterFreeSuballocation(VmaSuballocationList::iterator item) { VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE); VMA_ASSERT(item->size > 0); @@ -5001,7 +5079,7 @@ void VmaDeviceMemoryBlock::RegisterFreeSuballocation(VmaSuballocationList::itera } -void VmaDeviceMemoryBlock::UnregisterFreeSuballocation(VmaSuballocationList::iterator item) +void VmaBlockMetadata::UnregisterFreeSuballocation(VmaSuballocationList::iterator item) { VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE); VMA_ASSERT(item->size > 0); @@ -5034,32 +5112,66 @@ void VmaDeviceMemoryBlock::UnregisterFreeSuballocation(VmaSuballocationList::ite //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList()); } -bool VmaDeviceMemoryBlock::ValidateFreeSuballocationList() const +//////////////////////////////////////////////////////////////////////////////// +// class VmaDeviceMemoryBlock + +VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) : + m_MemoryTypeIndex(UINT32_MAX), + m_BlockVectorType(VMA_BLOCK_VECTOR_TYPE_COUNT), + m_hMemory(VK_NULL_HANDLE), + m_Size(0), + m_PersistentMap(false), + m_pMappedData(VMA_NULL), + m_Metadata(hAllocator) { - VkDeviceSize lastSize = 0; - for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i) +} + +void VmaDeviceMemoryBlock::Init( + uint32_t newMemoryTypeIndex, + VMA_BLOCK_VECTOR_TYPE newBlockVectorType, + VkDeviceMemory newMemory, + VkDeviceSize newSize, + bool persistentMap, + void* pMappedData) +{ + VMA_ASSERT(m_hMemory == VK_NULL_HANDLE); + + m_MemoryTypeIndex = newMemoryTypeIndex; + m_BlockVectorType = newBlockVectorType; + m_hMemory = newMemory; + m_Size = newSize; + m_PersistentMap = persistentMap; + m_pMappedData = pMappedData; + + m_Metadata.Init(newSize); +} + +void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator) +{ + // This is the most important assert in the entire library. + // Hitting it means you have some memory leak - unreleased VmaAllocation objects. + VMA_ASSERT(m_Metadata.IsEmpty() && "Some allocations were not freed before destruction of this memory block!"); + + VMA_ASSERT(m_hMemory != VK_NULL_HANDLE); + if(m_pMappedData != VMA_NULL) { - const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i]; - - if(it->type != VMA_SUBALLOCATION_TYPE_FREE) - { - VMA_ASSERT(0); - return false; - } - if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER) - { - VMA_ASSERT(0); - return false; - } - if(it->size < lastSize) - { - VMA_ASSERT(0); - return false; - } - - lastSize = it->size; + (allocator->GetVulkanFunctions().vkUnmapMemory)(allocator->m_hDevice, m_hMemory); + m_pMappedData = VMA_NULL; } - return true; + + allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Size, m_hMemory); + m_hMemory = VK_NULL_HANDLE; +} + +bool VmaDeviceMemoryBlock::Validate() const +{ + if((m_hMemory == VK_NULL_HANDLE) || + (m_Size == 0)) + { + return false; + } + + return m_Metadata.Validate(); } static void InitStatInfo(VmaStatInfo& outInfo) @@ -5069,40 +5181,6 @@ static void InitStatInfo(VmaStatInfo& outInfo) outInfo.unusedRangeSizeMin = UINT64_MAX; } -static void CalcAllocationStatInfo(VmaStatInfo& outInfo, const VmaDeviceMemoryBlock& block) -{ - outInfo.blockCount = 1; - - const uint32_t rangeCount = (uint32_t)block.m_Suballocations.size(); - outInfo.allocationCount = rangeCount - block.m_FreeCount; - outInfo.unusedRangeCount = block.m_FreeCount; - - outInfo.unusedBytes = block.m_SumFreeSize; - outInfo.usedBytes = block.m_Size - outInfo.unusedBytes; - - outInfo.allocationSizeMin = UINT64_MAX; - outInfo.allocationSizeMax = 0; - outInfo.unusedRangeSizeMin = UINT64_MAX; - outInfo.unusedRangeSizeMax = 0; - - for(VmaSuballocationList::const_iterator suballocItem = block.m_Suballocations.cbegin(); - suballocItem != block.m_Suballocations.cend(); - ++suballocItem) - { - const VmaSuballocation& suballoc = *suballocItem; - if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) - { - outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size); - outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size); - } - else - { - outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size); - outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size); - } - } -} - // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo. static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo) { @@ -5214,14 +5292,7 @@ void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats) const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex]; VMA_ASSERT(pBlock); VMA_HEAVY_ASSERT(pBlock->Validate()); - - const uint32_t rangeCount = (uint32_t)pBlock->m_Suballocations.size(); - - pStats->size += pBlock->m_Size; - pStats->unusedSize += pBlock->m_SumFreeSize; - pStats->allocationCount += rangeCount - pBlock->m_FreeCount; - pStats->unusedRangeCount += pBlock->m_FreeCount; - pStats->unusedRangeSizeMax = VMA_MAX(pStats->unusedRangeSizeMax, pBlock->GetUnusedRangeSizeMax()); + pBlock->m_Metadata.AddPoolStats(*pStats); } } @@ -5252,7 +5323,7 @@ VkResult VmaBlockVector::Allocate( VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; VMA_ASSERT(pCurrBlock); VmaAllocationRequest currRequest = {}; - if(pCurrBlock->CreateAllocationRequest( + if(pCurrBlock->m_Metadata.CreateAllocationRequest( currentFrameIndex, m_FrameInUseCount, m_BufferImageGranularity, @@ -5266,13 +5337,13 @@ VkResult VmaBlockVector::Allocate( VMA_ASSERT(currRequest.itemsToMakeLostCount == 0); // We no longer have an empty Allocation. - if(pCurrBlock->IsEmpty()) + if(pCurrBlock->m_Metadata.IsEmpty()) { m_HasEmptyBlock = false; } *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex); - pCurrBlock->Alloc(currRequest, suballocType, vkMemReq.size, *pAllocation); + pCurrBlock->m_Metadata.Alloc(currRequest, suballocType, vkMemReq.size, *pAllocation); (*pAllocation)->InitBlockAllocation( hCurrentPool, pCurrBlock, @@ -5325,11 +5396,10 @@ VkResult VmaBlockVector::Allocate( VMA_ASSERT(pBlock->m_Size >= vkMemReq.size); // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled. - VmaAllocationRequest allocRequest = {}; - allocRequest.item = pBlock->m_Suballocations.begin(); - allocRequest.offset = 0; + VmaAllocationRequest allocRequest; + pBlock->m_Metadata.CreateFirstAllocationRequest(&allocRequest); *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex); - pBlock->Alloc(allocRequest, suballocType, vkMemReq.size, *pAllocation); + pBlock->m_Metadata.Alloc(allocRequest, suballocType, vkMemReq.size, *pAllocation); (*pAllocation)->InitBlockAllocation( hCurrentPool, pBlock, @@ -5365,7 +5435,7 @@ VkResult VmaBlockVector::Allocate( VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; VMA_ASSERT(pCurrBlock); VmaAllocationRequest currRequest = {}; - if(pCurrBlock->CreateAllocationRequest( + if(pCurrBlock->m_Metadata.CreateAllocationRequest( currentFrameIndex, m_FrameInUseCount, m_BufferImageGranularity, @@ -5393,19 +5463,19 @@ VkResult VmaBlockVector::Allocate( if(pBestRequestBlock != VMA_NULL) { - if(pBestRequestBlock->MakeRequestedAllocationsLost( + if(pBestRequestBlock->m_Metadata.MakeRequestedAllocationsLost( currentFrameIndex, m_FrameInUseCount, &bestRequest)) { // We no longer have an empty Allocation. - if(pBestRequestBlock->IsEmpty()) + if(pBestRequestBlock->m_Metadata.IsEmpty()) { m_HasEmptyBlock = false; } // Allocate from this pBlock. *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex); - pBestRequestBlock->Alloc(bestRequest, suballocType, vkMemReq.size, *pAllocation); + pBestRequestBlock->m_Metadata.Alloc(bestRequest, suballocType, vkMemReq.size, *pAllocation); (*pAllocation)->InitBlockAllocation( hCurrentPool, pBestRequestBlock, @@ -5450,13 +5520,13 @@ void VmaBlockVector::Free( VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock(); - pBlock->Free(hAllocation); + pBlock->m_Metadata.Free(hAllocation); VMA_HEAVY_ASSERT(pBlock->Validate()); VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex); // pBlock became empty after this deallocation. - if(pBlock->IsEmpty()) + if(pBlock->m_Metadata.IsEmpty()) { // Already has empty Allocation. We don't want to have two, so delete this one. if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount) @@ -5502,7 +5572,7 @@ void VmaBlockVector::IncrementallySortBlocks() // Bubble sort only until first swap. for(size_t i = 1; i < m_Blocks.size(); ++i) { - if(m_Blocks[i - 1]->m_SumFreeSize > m_Blocks[i]->m_SumFreeSize) + if(m_Blocks[i - 1]->m_Metadata.GetSumFreeSize() > m_Blocks[i]->m_Metadata.GetSumFreeSize()) { VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]); return; @@ -5617,7 +5687,7 @@ void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json) json.BeginArray(); for(size_t i = 0; i < m_Blocks.size(); ++i) { - m_Blocks[i]->PrintDetailedMap(json); + m_Blocks[i]->m_Metadata.PrintDetailedMap(json); } json.EndArray(); @@ -5717,7 +5787,7 @@ VkResult VmaBlockVector::Defragment( for(size_t blockIndex = m_Blocks.size(); blockIndex--; ) { VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex]; - if(pBlock->IsEmpty()) + if(pBlock->m_Metadata.IsEmpty()) { if(m_Blocks.size() > m_MinBlockCount) { @@ -5760,7 +5830,7 @@ void VmaBlockVector::MakePoolAllocationsLost( { VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex]; VMA_ASSERT(pBlock); - pBlock->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount); + pBlock->m_Metadata.MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount); } } @@ -5777,7 +5847,7 @@ void VmaBlockVector::AddStats(VmaStats* pStats) VMA_ASSERT(pBlock); VMA_HEAVY_ASSERT(pBlock->Validate()); VmaStatInfo allocationStatInfo; - CalcAllocationStatInfo(allocationStatInfo, *pBlock); + pBlock->m_Metadata.CalcAllocationStatInfo(allocationStatInfo); VmaAddStatInfo(pStats->total, allocationStatInfo); VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo); VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo); @@ -5904,7 +5974,7 @@ VkResult VmaDefragmentator::DefragmentRound( { BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex]; VmaAllocationRequest dstAllocRequest; - if(pDstBlockInfo->m_pBlock->CreateAllocationRequest( + if(pDstBlockInfo->m_pBlock->m_Metadata.CreateAllocationRequest( m_CurrentFrameIndex, m_pBlockVector->GetFrameInUseCount(), m_pBlockVector->GetBufferImageGranularity(), @@ -5945,8 +6015,8 @@ VkResult VmaDefragmentator::DefragmentRound( reinterpret_cast(pSrcMappedData) + srcOffset, static_cast(size)); - pDstBlockInfo->m_pBlock->Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation); - pSrcBlockInfo->m_pBlock->Free(allocInfo.m_hAllocation); + pDstBlockInfo->m_pBlock->m_Metadata.Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation); + pSrcBlockInfo->m_pBlock->m_Metadata.Free(allocInfo.m_hAllocation); allocInfo.m_hAllocation->ChangeBlockAllocation(pDstBlockInfo->m_pBlock, dstAllocRequest.offset);