From 89f6e446355558a5d19cddc66d70fd33ed840cde Mon Sep 17 00:00:00 2001 From: Adam Sawicki Date: Wed, 9 Aug 2017 13:06:41 +0200 Subject: [PATCH] Fixed vmaCreateBuffer, vmaCreateImage to always return null as *pBuffer, *pImage, *pAllocation of not succeeded. Fixed that in documentation as well. --- bin/VulkanSample_Release_2015.exe | Bin 88064 -> 88064 bytes docs/html/group__layer3.html | 9 +++++---- docs/html/vk__mem__alloc_8h_source.html | 2 +- src/vk_mem_alloc.h | 25 ++++++++++++++++++------ 4 files changed, 25 insertions(+), 11 deletions(-) diff --git a/bin/VulkanSample_Release_2015.exe b/bin/VulkanSample_Release_2015.exe index 9d9aea515dc1aafa978fa338cbfde585eb8c4043..c7bc6daabbf3c788d34d2ff0851dd35ee26dded5 100644 GIT binary patch delta 18923 zcmZ`>349Fa`=6O)l69<2Fys)~kdQ-&LzVOKJPKl`_8+QWaP>jx$-*EZ{>r9 z0|duaBGePANYw=4H!#vQW(o5X?}*#-DoKl^frWUa7z!JYVA|3I-N?e9J4L_iC9Bmd{(t4 z)wC^UO)3ux2>-WYmA4eD+YF1@_R53U!peS52HO(WxXMI-qxE0zUtXx_-re>$%davp zV8fz+Db9RL(P%R*Vr{DqVqR7KoGRJu>|oVo_Hot5Ox5>dUV1+#Bk}a$>HVB5*-Q&r z+iFm*)|efymK3nw{FeJ}Xyyz|it=VU+vb>Atf97el=U}EsQNrt5F*a2mNT|)tJyh& zx0t~m8k&l4@_gM?S^7MW6)B-?g%TyT$;&HJJ`$xFJK2cZBc&QU*{Rx-p&jEfA)tB& z)zB4v+uPr$I-RA?y;n%Q(vhidd&JIq3}Rn+)N`(Fn`mN*o|6M!&-u66kH;#SdD||r z+n#9+hW&?TtAA*=nZq)?MtWbGO%6Kf3~qAkUolJEon7y@!rU;}4t(UDD2<=ZdU+=Y zRO4E2M|}H8#j2~??y(=e2eGfc>p4rdv9nn3I+0SuEM}?GP#XIU+f^q;(k*7Se3nU5 zta)GhbQGoFMS0KaK5~*;ZD+sL%NG~1&+FHd-rdPg)DMiQ^euIByS-6PsKeSl)%IdP zA&YZmjc=3$!S);H9V~L!cmjBsnzl1}jr~_i;y3JggF52Myx$vKaF&wh7J8WF5Fv;&7wzk!TfSrT?ZL{!m z;=S*tTKiU}Q1Jvsx0ZSiW)q^DN#29mXVIIbDakCM^)B%{CdM?NCo^V>Qg6_|UAlfcfYM8`w;@gOK*iZ3er7<6~_8nV`pX8Z37D@GI4;O^&1j}J$u~gM+#ltJu zg0Nm>tCk|FH`aEIP3fE>)my|abWRh$WSzStDiOoVj1pm#O-47_*IjB${l>DwE=$Bu zSii1Qq>zQ|R98Pr09`vvqsFkPgk*6GTb@u~8j;2JCk&Dr4$Q0FEnF0%SeNcGta(p& zwz9i_P1QU&Bf&h&Ki00qZxe*9e8Yl5Hm#?Tm2}sMvFv4cAD1DjCCxvU)#(uk+xQ;F zcFk1FPc&bK+_Kv{Tb|$dj~e2ZVD03gT6+6O(ZuX#h%2-!{lB1Yg(>-uQmt|mSrqoU zAbz&4N8co4bx}yL>X!l(#^-(jHo7G{!5m{EfjMRjKwQ*Jc=v?~-Wrx)a^@-91E{0wwIJ{NmshT=aTsY0`x%DN`j9hYbN~q$yfF4UFY`g=}~FzCJHhR-TFz-TKd~Jc$Da9n6QGLfhH`AJrLqT zv<2bUpl3VcxUm-MQtAxyN@Ea)tQsisULRmU zuRA&u-F}>RXghVSAck}!+xK>~9}jCagPau#oE zj~1d^?A7@2WrG-7#)wN7b-zH^M*EzUe^>x>RW)#pLdxRutb1UZbf&WMH~v}$suz7Ly{6wq(`M(UvW zE$#Nvyh^aho4AxuWPNqQ&GE=O@kZ4g=Wk3f&!!S+SDMfOTH=e#>^{*`d>CKVqQ9zV zkCHq)Jx8N#m7m$|4t~r*(#blX7FZr)X~&G?xtI^BingOympU9qvF^QVd$is3*swa?jz{KELN!F&q7gy<%M zyaG{S{7q^$IG0fF*p;}?s9E|0Rhif)vw8|BTjmoAr(0X9t_54sXN@pgT8s}=e{0p3Fb8abh|QQz4lPWC={xO&zSB>H2Uvg``aL9ESps~NVNxW?i4nS3W5jfHFk~MG;u8nN;2WFQ zqkpJqXq?4c*jdLP882oTFr!+6IdY9FP4GwsfNk$vw+r>4T?xR7L)mjXC`;Be+!OG8 zBk?FnP<(zFv-_kAO_5Xa-Y58EYL{6c+rIv-Y@%;-VG{-hB?(a164ZNG^&As@oMvkK z_99L6$YhW-(Wg4HqjHMNd6GP4SF*66L2EZ>>I+@iwSi44)#9t1eu)eFKDj#^I;f4; zl{FaNga{n^*YHt0jGpbuuJrtB}-90kED8i~Y8$8&{`^v|p#E1Up^E{3o z@P<_(--&%RI8-{}#0mz7x`p#dtVZ-S&B8qPW^gYP&3U{mAz*!s69uny#PqT&Tewes zkjCo;o@$o6g0d@CO;k-2_2BB$!HYP7@Qp8?eH;zdbVw1w4&*o0OdAWUtUN+&z+W6t#NIl!1+IWT)4@oqQz$+4en6naPnox2QMP*UOiv;w2 z0PM>4l>%b1QwmRsk2pcoE(JQOrqh(`B~qYRj#=ct1op|$`jt=2A{X)lyTDEjjgfWw_Uw z3LvL8Cpvl$^HVQ#rL%BA6V^7d8ErbMWwq?w_W)YuI-1Tll>D7AIz}Z?Y3`T+3l9gW*M)DBfRT2nL+-X zogM!4`{2oJtqR(r-|-ilJ|fNZJvleW=rrKRtLoqp)xnJxOgsdSU!ywf@)COQ;d`dK zOv`dRD8ej%_)&b62oxHDRUorIAOdABR@km9Nk+OH2}(T6{8Iv0jFQJQBRa=`?E#cR$TwBT`8PBYvgT)}WcXY>=uU z4IIHoo40)|;j$f&VNmL^7Gr{w_Q2Ylbpah+n047Pl!aN>9T-+MIc7brZl8OB z-!O(tbLixzUp`K);;|?nzzzUv$^fq2E!~!TgKZew$n_pvV=KWfj%_PFSe{pFoK^H3 zi51auoNQG?aKTf9d)8h!x9EH3eK+1)6g}DH2^nH7Heg~0@jP2QagN~z9%zi>3k)8f zs3t6UQh?~k5-0gYbVX9MD=|1K;XTjSEPZp%j9TbmF3(4c(c8Y9?Z?(ls^9AK`E)2| z>GVR4E}Y_OSC(@+YsxQZn=+pLJ*l3lv&QKF^TTA`QZv7- zX1?a;L7Mq%ZeB-IzD!f!Z6t-_<5|L6Qkg)=Wwqe76Y@b?ebs)7H=`zW^^s4gX`A^e zkX-y2nvRrl{c$(4Rt^v^MEROP!7C@*Y=1$(_8&boh)d)IE_ngp`G$FyXnZVk2JtLa zFC)if^Eoe%V)s*9i%nVJWPktfAT?*@ViH*7T^!#9yfAAo?)2oqfyh!P*BsvrBVt!3 zqGhW6Fu3y`0Sl3*5hy^7(U%P$1R)I}%s;5c@@L}}+H6;9V!B{nU*yp!F4Pa@Dwnv* zA0Kj+dyiSkWZ(Fu(9|AP2oZ;VC0F_a+cM;)ZU2;$ z6pRS0K3h1&tAi1maBNpvpgz?c`huEZQG!aNMpb`n-VwE?q9>WfXVXIha?V3`XNtdR zU#U_cDOu#9B*lnbhqFlXX_otP7P16Cku7+oWv{1UjN|p4_%$S2x~2j%)VzkHZ{l!1au5-apJ?76!8dt{Tm*zTd!o8ON(ZtDWVBMgPq;=AK?l%D%>e(tQ(WUDJ%C=tv_9 z@55c8P18*u&dsXA%%blLEu2f#w8*i*GI6{k8sZL{<#14ZM-nz=@l%{0{F2R9rcaRa zt}@r@eQ-81c=~wV>nnoLQoehIotxgsv(Q%B59`pQsAc{gYSU#Ct2HCo&>IALFkS1x z_gIe^kl7;A-m z2+BZ93gfX}lFDD27x@Ya=*J6-eiV0+F`JE=86=Km*)xq{H95;=7IZQX>(tCg;miO26?t-!_RiMW^)2%SxO-A%E>I9eQ5*^U*er3{}rqcRfS>znGYA0@T1tzu; z%si)a*A4H}0&R6ZN?oVM2yTKfB_{UILfh}gFw@psZ?L&XwC5g zSg2Vi(hL#j@n)d)K^pNa@;BUZewr_$xr+s;w9yLq)Ma?Zi#$hsi&BrPYLn+ruy~JV zO@quF>7rG1lY2OkR*l!RL6e>OnSJ{~sK=TjX!50!9$|}onslwo?2j^y?Kkoo=AF!) zK*tw2U{)>jPNq-9S*_(9Iwp&}gPmemW~2eN=%<3ij|}!}y_LjV zvCd$1jJO7iIpQ87%2$jH$e&^&{ zfe_RKEB$@UGKyR!7w6}(+35yJ!6jD-!WeJUH772wqc0oavi6W}!8(C<%rqbYf6 z7!Y|rlWoju>~%p?jA>u5y!yF(^WS7iM#YU{s zLOum99@CGqt<)?XN&Nfs}^M@m>cTd36u*-zCjA_XhS?20;kO{CG9>bJH z^`+BeSgS>u(w=wO_lvw5uZAApZ`Wd}xj3AOOPS7%6Rn77%uX)Na_xh0wJRpp_oJmz*5Pl*NxzDoR?Fj@ zz-dREDVp+eTfg?q-RjHQuqth`>_g1H5(v*oxJ3|@!Jg29Woe45nCgURK==noP% zmca>S#q zO&Gn7^Qf<&YCQ{E?im=(H7HKfE$g(3aguUPHhH-sy08zIhq{)GbA+)xyi(3d9>#)` z?9b)(`;P;qRX2dJx2o$w9NyBhPSV%s`c%0)u>ZI6zu4p)MS66CEm)2b8tbTiMYzs% zg4dqP{ZP@55&ttU~dKYxRAA2xl-)LidJ?N-C3Pg2~DaF ze*0PYEBWnb;Q^-|&%(`Qma}Svdru6DY8KVG1vHE7%4Amcnjr4vIA?pYmb1- zto&)Qs%1$Axx|Zh7*34X<2B8sbNyJr+6}d*4d5z$REwolC6*Gy&ucqNXZo_BPm(43 zF81LkOdQ6%KW!$4vcyksi_KY!bz`dy>Wf{2Pz#>z%Huw4`?_A?UBN=apW~8XiMQV` zRxN#M8+H$VPT!j7KTRRU9ZA%q_|AY+pCz$~^$nYN_2J##9;xyWrOL{j4k=PIU-4A= zsyE3qU-4A=uy>w$eJxS?syAD|VWrrWwfbzgv@wZ2`Rp_4Y!8;Rv75A@7rVXjOn4=X zmR<4gq1E_$5K(%f+FAa*ACURNX_|-OV7EJ}C5PIM*m|+e{K5pCJh-cnwTe@{l;JkMa>98qxyr8xIvw**! zHPzJ1eEV^_+_yWY9lo8}FLdqEllarwyj-KJ2gifiwp?G=C+Kgx;>^zF zmPm)!v%@=9V*(D@nI!FxW4m_#ptR~v@`QHUFgM~<8ST&Db?m6xsD%b)=1L*{fX*#Tu;M zH_bZC{|a(*hcwkZbqqadcnQLA?`_Moz;W9pH!{bO#BsaA+6cnD{4^I*ykdccM6rW$!kMz1fc4 z&86Slu)Dj*clAX~R0Fo!6=scAgA>Ur)bJFcVlPnkApXi}M-4x2PaSWnaU_?XhRCxDg8HbonUM z?qAr7TAjbex!j}Ap=p-sa}aRy=$k0)o0D+}X_l`N6RXaU!;X(QPM>Dh{lQYVR&42R zUu;>g><=)tAtfGABVgqL?QTVUr8XBwdJy0N;q)Ib+z3dosUsjK+baAxi0WS>pxYav zX_g-ousT0|E1>hl{EvVJx8wn}W+%Q4Fs&dZ9?(2s&vo$+K{GM?nL090RE&dEjJfK;?$^-f-iulTCE{+^S;O&40 zYwA3pee^+t)5&>-6PpQ0_t`!NLt*Sr5`Mmjz+R}-%oF{zxmrw z#TT3|e=0uawBxC;HqX2J{R$C#m5hSP5?jii7EBUr5E%cTygt9J$L8dDaWKe}($SSMmE;`k!;y2?YR19xl;*nO(PAa~w zL`Pu9Z5il4HMh?V|Zv&ND|EP#V9s|8;2!N4DR|M@bR@_ zw-ST99KdmReJImi2yyT1Yqw9Et`3)H0YT&~S=@ycHG;EBJ41UZ%ZTa09$sivZQLC2 zEmtfj)d=OzJl61HU8!&$>vXY;v^s#Xi_5TM3jC>ltCq8&7JYcyB)n~V*p-ib$SU3g zFZ*tGWf3S#zS@*<4-Ka;&^VpWvVLmiesmV;C;O}FbDZ$W2WPROpS-&a%>>sT@T3l= zGj^Xc(U_X}(+=0h2t8BhY{QQ`gh&|2z{95_@>|L&aD_gi$$J9BO>m!uMP6#`BfuPE z)UAZRLN9VgkIEQNV%U|zEaQ@=7|lMu6e~UPX4fzEl2&-Lu*<&QBfUw79}o=#ik*4o zKd*V``^kUJWn(Yb6LrjbxsP<-i~V-FyRKeqQjYXyt**pXx=kycHS%UO`|wJefXAMo zXm{}lh+Qe-VZ#R(8CZ@1@?Bf>NDY2v;|v_(Ek zz%0i?oit;C$AMKSdx1lK3+0-?M0hHc>pHN#I*{tPd^ptC0#7Y;7A3IC`2iOFpXm^o z6P%5uTpXO~@0K!QnPp@RxqN~v z{8t6vmshZ{LItR=4|{RVM{L1rUGF50X7610l{R-}rt3lO58e|5>K?;}I7>mHeC0j1 z?|PW@^aFPPdfOV4A+)#4JnkG&($;Q>Gt28gU@6H1-gvKtYe3Ut~T_TQ>Gbz-6X9nPbumO^>=G*)~gEOY@Fna6d7^7}y0Z^EDP zHltad3@WWg=5b@8{0>kV0<)aVVt)2E^#*NNo`2XLK-u3OkjC$A_&to@>+yRte(%oj z86KqhisQTZ{ZD>Rslfk>bKdZ~!1)rtC-b`tzw11NBsi+fiK_fwjo%ZwS#^H*=bQ!{ zcjNa%{63!F`||r3e!o|X!kSkzFZiajSYxq5l#%o5@%sqYuDGMvhb<@$6aCq~;wHnK z8`$mqOGfL$DD=tQX8~n&I?=zC(D5v`qeFU^>UC8mh4w=KBk>tOPU|nuCkgktq zQ*LdOw%2FAzeEP=s$;%k`51gywJgNT4u}E|JjKV|fm*mTP*>gAtY2D7@UZ`vA?*j% zpp{wsQLglzuM_8$pE$PQborK|A*UT%3NJS3*S69J?rhDk*GwB{qSmxlwr+k{s#M&t zSzQkvR)>WE*e~%7P@8jhJH8ph-_2!^0WSK3xiO^3vnKxnG*HcqSwmv@``ZYAFK~mu zRitK7J^(27_oztoXDXD_$pRNSOd-v;9gZL|9DYR(w`dNBlA1;70#NGk6Nke>IhG7? zk-tDveB0mY8IZx>VDi`Bq&YkRCLHbrD0SGq%;6TYz(o$TN%3um%Sa4|dUB}K{4FIl zi!uX1^C#KfDeJ*hGQdUtLi&?t1&8-*suq#|0)$k2Z7Q;OT^lBrMSpf`p6~6U;!fLT zm+Hk2gjh<+rC0KGcJI+U`s`~OJK;JY^K%Me8IQvx-7jqC`FTQ4)^=+Zyor2I7)rFUIX=#6it=_-{~1zrmtsKq;DY_r;SWc^Pe!0d=xzp1S4Q6&p#|oC;?Bv0p@PR%Hkw3chAwv;`gGMo0C;l+^#3h zj8e^9@j=e2 zYGNmaM8!3=2vQ$v(LKodUo~+v7wvV3GRj0d3}OfP{hvXM<{D!h8rfwU@or*UzeebZ zSctHi@Yby>6{9|C?Uy;9yNO-VhCkfIhMuJD0qv^>O`2b-ozv7EEq#(R)LrZWu)|#p zX-~@QYKX#lj3v1$f-+s#0cm(`4w@jz31UoyXMnUa36FB>Ru?CtZfkYX8(?d7G$}Xd zVRf;7FgZ_$^MTMKW4A|SAqXksGH$0v<#pchFTh zLo>>`T|;cZ0}4by^{}5J=dq4}5&)@p`UBDthc9Dj^S!JPeFP9W;$SWZ$ zU>Ans(O7G>M(4DzC3fsZQ|>bUd=3rg!MHsJf(tp6)5o@H^ntH5TJz~5AlC|0QA+$A z-IG?z%eh+1F_m*2evz-auN}FXulIIpbj8HA0J^bx73%0+S#aBpnT1l?nQzfFouOmg zgQX3nRL4Q*%&aXA;L|q+j)$RM>S-eyXFRCkp8?Y4ldsY4o-Tsth^j)(W>o~2DN-us zvMAV+T^Q^LQdTt4;f6A*;8GU>KpxN%9b5kZ6{Ah=WYA@Il+Q zSTb^4lfY9;3Ba&ZnvYKa>7t(r$tT&;)eVqFzYm}UIMl(P4ML?V=zwMezuC%z_av!rMFq!gX@`dWp?z#a7#fTd^QCza|QDx`HLVdMk&ZS=Pk~Z(rh{1>xf-y?YcwpzkNaW zbse#nRMVCt`H1g`#e0V06QV|@UsI4$f<>VO@Xb&`SO%Qt?#{8uO2G30`;Eie1DuwS zapMKyFW|H^P}Yt`O0SsmE2MUw;=MA^v(O=U73DN=f=+X>-i3%Jo~peTVb=hG5wP15 zK{$?7Sussg`rNn-4?g$=Z=mRq?TP_MARl@Fub3w(hlZ@c+y-+BiQ`27Y44*30N4jk}%z@}m_}F#|*n$csY8fFej=i)=SiD{97E0L9 zk)(^s7)dz~#n=Qb0PMd@>t1Zmg=Hud^g6(v-)KRn08*l+22@Pgl*si+)KTz5&=VyA zczFUhi9D0n+KU!IK(HQ4D)2DCA{45O;NSbSAn@UhF!@{L4d`S59{LVJ0WSi)e*hf_ zoD%-3d@RMl*8%z#XttEd`yUc8Gcg2!=S{e+3qscc?mmnzg&-d={D{Vn1*~;c;~N2M z9z&qeF#$Z)z5_!8eR-|QR zX__WD^$d1ZNZAClQC0#cnDPT!37p`_vlv(4senh3s80i@#C`KeJm0{J0gV^5p-BZa zTyzW*AhNiLr*TT>Yf=1>q6rqEL;)vw6D0w7G2q~z(0bt6fag(YXoO3G5P1c80Q`!n zo>KmLlzHG2+>Nplcs}5^t7ri5y;n`xrCh@}frta7&&z2N;hj5N7kUqTh2mb+J_nT;#2}B|gGYVC(2=F2b z8C(WjUkpBUHUh@n!XgGd7H|X#=@4Avz_S5wI`Cq^=P2Y;_Y3x|D5T#Ea0AMh@be|0 zssFFoi-E`hT#Z5odjbFc4HF4`=if17QHVc=paUm(4~4Sc1Hk6DH9QJ%JPK7tFy;;> zCj||-1LZH^v_3sWfsRS2X%mDb6f#%`w}F?I?)Km7~4gx724X` zY6xN}9a}?DT1#WAXf0E;O{)vlcK+XU-(=DL{`2|Vcb5A-=bpQ~bMJe3$;y|r^5r}+ zuy~Pqpy2$K3Jrv6QVl_P07a&zOk;uKV==VAU0Now-&_>ase6L(++dQQ3>F17`%K99 z+pY-0MK$|bh~4e9AQ<}F)FL@>h{(-VtNvRjUV@6_rCS;}-)z zLTg|ZTBjcxXy7!MRu+(Fxk2ah24+>@J38@$%JBCuQwOa2S*ZB_+GVy>-YIr1XysZU zN?)I0kKG1Kf39GOx&-Oq3TD;?NNffBK<6i>v-7%Ubvv&>9}4C4=YsGwT(!*cP%Rf! zYk$e!oEhA`MTLd9w~!t#X9L~)OUstC{q6&US}sT2mL59Q+P6flav|CBnZ4=_QVoak zW!2idw!H>xQFSn@S}o9}rhSE#eOz^LP%GXokVGmrQp~4KUR^B?qv@e*u?7W{e}&s@At9u4MSqgM`@Jf4@Xsj zUu*X?dnOdBb$Pz5=>sG|u#e)p6IVPnod6wHroCH1^Pmlq7|G5xswa9EJZyB$MY=k# z;8^JTDq?g&pBB#~F}WbVRWni4v&j)*(!D)wOGF20>z;zU5o@Z5z6I0XI8Jrp(T$`> z!`Oi6bR}&VxyHRv<%?v?S(>V`c9q?aR>V&1kLU=oCJSxbLApAWO=ue~Z5ztAw4D^( zYUqD$s=iOUnI5XOUsVbfH&DCw(w9Tnq;{>PO+(nb?RHAP3}(sg4~XrV7}JPu%$RA) z_Cf!3HJg1LqllU8Y)pih$X>=o)ve}4S>$duK`^_teA*E0RIxuF$hvljm+}X)_yb z{n)tp%+SI8s3timgeQd})wce^@ zD9V}572Z8H@Yz0WRzh3J+=m@Z=pZJsUlU?Qch)*FPCD0{%}9)pSa0@0Vl(kAR-D*I zMA|ZTk{>F6ePVL3|1*T3{q=tWw6oi$@meWXLX{yzV zdsndUo(Y|5xkBy6*$1;}NvYCy3%iz-E;eIHT~d^S5#>fHFj`4Q!`a?0Ueed&SW%Z1 zVtv-X>on;DGrQ0=knaDkNz$V+EGl`37{b;jHdf4LXOR_kXZqTY3e&6AE>K`@SBiY*7n_JS_>}E(TawvoMkTck+R{0HC6ppyU`{St}IFnN~T>Z5s|5hQFGum0baX09p2xqMR}MDxgj$R4 z>r{&U!gmfwvLz$yDMtJ%jrdWRzBc0AQVZ=6jRm4G*knYmP-p0 zWfZHN1^Iu^#AW-LJLNMmkhSmKG;v)Gnh5IoC=8HuB7ES1y1L-5HW6h3C}W*yt+%sj zNG~Qbk`oFzLf^k>;5n4dW7Wo;+VkrYMfW7Knw`&H+8%xR|5A@VdpCI&DA3$)k zQB|$RD86`-&GVvA;v7m0?CdGOIvh@Hj7trc+Q+BMKAu7Oq%23jFl=8t9mc6SU!@2B zcjWj8X*bRgZqY48jkstJV5j>uWTk!TcJAUpFCxw$kXY5?*Nog*`(BQ)6&Wu#<%|9$ zs_r4&Ca5Yh-lM)k^EJw2hqC5fJ|KEM>)AKEW~bgvwrTg>9@eveQ(e5B&*5N}-M@8X zJsQD!t%)$RXv+Fxb*h$(yr*HRPZpyNK?eT<5b8Q zPR4ID7=wQt%0dj1P_T7Cm}vM1JbJxB$M22t?DoLf%+arY7aB*0l8t?XvLNCVEIGg8 zO1WeJQo}wsQTU@6i#r4ix@Gh|&lgF2_DR`Z{bBi{c6VbJ1~oAb<0wim*S40g>hrGL z)b4XbTh-H@*-Z&|qOqz!b0|-?P>A{nH`aM@3*FnA^oq?NY^`gAU_bnfMp!?Bydg;- zI=>iK7Cat(`|5PG5is0#H+IkcUo!dr`GgQhzLoobek zL2@X6Z{&f_*Q!3{@b96EIFCLBy!drvJ3#HIMeBhCGSdOUD(C!&xWLD0$6JfOJt~b6 zHcy(#DA;V7$s<4#dEyUkg*6=W~2irTmj(1c`K_KS~0MM+F_(SZ0?9T9Tw@jo> zhq9X0N)7Xv*dHsWh=zLr_3t=KNlh_b!;=obsdG|rWK6cor(uCbbw?zkKMUee{5J>) z#z8w@sxjg`Exc4<4r`cg9McfJ#I2KRKN>+PP9G{zCpF*-4VcM+f5I7g8bzLZ05J-# zCTocP95HY`cQwkM){47&iNr&$2x!!R%{ed}Km>W75|ia57C|AKJfgRuH=0&0aynP* zGuV|8L3MR}Wxl}ZyQ0<-nMMviv&a)gR%2wtsz>IM3wekjvUVe5q#%*a9@)t26Y^lr zNyCb;=-qz7VX@m`i~I{K9NF1)2yPsTu#W7Mo&?0=^T3oLppCpPIhX4C+ElaB67|&Q z6zAw+!Rj+OC>9PV+qKR9-|(C7PEP82GsT9HBESp zyoYLXZfD#@A9t|AH+zfQS+!Ah>plL9EUI5o%~rdFrIJ&MD|#eYX0q^z`xn|qteH>;QdMPj!N*LjS~OC6xE*g1zCb^4u2nm%uvo4(ag2t1_=o*OVu>7hHM49)5w6 z8y(x|{6yhkkdKvHh>RtTZs(Iu3UdxUQ5EIH;i`>QlUHoZ=+H)asMQ84fCr=h1Hm|y zXtZXDx%Vfv?pOU4`*C!zTMnQO#lh;1xzb`UY8*<|DkQDvIKiRa+JwcD@zR#eSq>hv z8dG78dBJ?fHWd%D#Ic>C7rk^gh-WFx%NraQ4@dCvIqE9;@a1(esDqEa?DW{CQo>92 z)7Svspq!2HL{CLpj5?jQ7?j z&|S9p)|y)AMbGR3dxw6Y(9>S8!+V|ElF#)OndupSn~ z6*gi*tdzN~VEY8CDE`dCCwYl?3)D%zqWC@=H91Rcz#dLc5Witfrpz~t!|jG?c!AN! zdOyYvO$io1Vt1zaMSP8vzvb8{$xTn>GqW;=A#A$xK;+tGLi1Q!kbg3b+@%$(N1bbS#wPk{MpGC#}Z z&|)?F6*oV~a`MkAc5{#*Bw6kO{rD%SLXjAHPOVj|I@MdM7vr8^ z$(JdT*37Bx#pCS2)F6|JT60c5CfOp#asB}KqMXCH(qe}JH~S>ryYc#Gf`IjIQC3v) zcNrYLfo zX>aUJrm}7s>oB9SZ$Frc87+{_J|Zlh2<#8MIN&bJn$f0CKWfNq%q+^uf`3d7+3sc+ zXIMQ#pVFH)q{~v=P7dWF8#gmxNCdi%{-ok2!$3U*1kzYYSjmnjd=#)Ps=QkN1r!b? za{=}57Vk~ZOu4}zfHiC>xJ); zHSk!e-w(#2EQ6Uve*-8yXw|mJHgMTEkTHu$K7H*XE*cd45vKIwRsp(wBl-~Rt99Tb+elqu07^smZ^>4 z@9gsINXdIHbDdM)G+>v*fd@l54G@gy1`znMC`)c~Hw(C%)AM=d#YfJ{;acS=UbzF6 zMaHPcXcxn4HpZb~o;m|#HYOJtX=2FrWX&4Slu?K;gxl^(;Kn5(WyW?GW9yJR0t~ja z$cPP){QsqSkq1%*{lUPZe}q_ZketsR&1oV&WFB(^!q;%sqPf6ipVq0_KZ+T5U@P(n z&56^zJcpogXAMeZZQ{4~$$4 zDAw-)4L6SCEVG@1=}~0#kP$tMioOFe8+BmYKVWaqZz%;nU(wVy{ zscVBw{)Zfwvov&SjNl;%(_-R&FY-^v?kn@kip~7DnFOxb%pY>uxtSNUEvB`m)o9tF zB+e3qeWLw1fA^r&_!bfVUY6$5fWWeGD2)-gYK|}ouE&j;)< zh8uA0PCYP)+Gx7Fy#J|);F zT4Gdv!C)OHf&hLU?&9f;f>{(19o?pK2-TV8CPbnmXM?1N!16x!4_J;THMTyoYfP+*YzH2mmTI^?@J*0UP-sGMNTHHRr#A`We0Q&;`R$a&!0pu8xrX# zvGDWE$#@HD`4Y{^6eZNDIM-3nEc)j#;Lq(xwc)zTzRhl`3K`H^^c%1z(u?8&=u|iG z5w~Hn85T*|=dex_@h$`Jm;+D-IFx3viK*S{tuuy&eO<5vhsc8#vpx$uwE5~THEK*n z3b=>8&ET1nkG6`8DWL)-FAw=_4o_k^HmGroJqKX-yX@S;m{5NNiCriS9_WvY{4nqV zbg+1o6ZJCmpMgD+I&WY}zKxJr>Ln=h*wus`f z#&JpCgz7wVQb_W&rh?el#qa?^L7tHX>YC=<;@-DZ0i zw-A3~-z-k_|0<0)eGIK(Ln_65+HVrnxCF?sTP%KwD$SY7EKBOu@`DeDav25@=k4kf z>>;Pw(IsJHw_Jb$EkL}cg8d@y3V)?_D03zuWW0^CBnCAL#oxw46N7?s&d-m|vCWTc zr{4y<3|%|f3Ys2l)~_Nz7X5PAo8?Rh4yCQuSi$#btU2pp9$)*yM1JRIpNTrn|DK_m z_4(|Q*(-4UAv*qIByjr6*D|R6!CdI@lobKD>zTYXRQ!vzTpBF?$oemBDSpVzO9KOL zop(lW>>P34q1+w^|BfPKVG7m^GRt%J`O>(c@e>5$s9>U=V`%OEF^5M5;j)L>n4p^Hy@5nDp4-J0)3TM1u!WXH zaUna8$BOx=LF%ZRJoSOZTKd`<+Pw}P4Oa8$6BU@}8WH`-N@(*M9 z#@qfn%UkA+5p!Q|)7{46!>&7qeYt$CTL&(TWmlHBV4GKb9y6+n@Lue4VcGK z1j{}XF!;3E?t&rv5M~#Nq~9f`Thq)f4fn1?*~r?hX{CF7k!qA;ma!(tbPNK$S`3wU z6L<23M9=P;$E~E{9xulArYB1Ho6hkR%m{EvmUCJ#cs4E%1`p+Oa=9rFCZ7kR??Bqn zFOw`MlR|C$iJEz@t>>18n}`RuwpGIA3p_X*NRq{dEj}sq6k6fn&@1+oND5cK8`WCW zxx~@4Igk1#nzpk1wLT%oID+CNZ26#7j+4M^*w1SfaS?M{7v`2W(HX|NR_FMWX&D|vKwdO99=Ji3+zsvE|_$qM!Z{u^zO)JmXWYW?g*^)v}ItOVT25Evb zy4+Se2ktgFFJ&3IK?APRt;lEaByq0=S4f(F1vi-EDYztX|8H=u+0W|~snL0+TZiQ{ z-r4y2R=UD-yzyNuV|`G43k0q4S;W28_>^<3aQ%Am5=+_8Rb0h3Z%A&jZ0PIv!dxy_ z+zS)A?7SC-4`p5(M|pmUj;m%-U2u|Ckwf{JE!$Y%cR0Q~iLwOH%YSE%B_@E#1*;lOEruI_IAeOYLwV2E1Z)z@< zvX3@}iTl~@O@k!=A?%IK4W*6)*^tdm#l39t<}fLwh#lITBe@S|F8UeEhA2~ ztjHplcw`NCC_l0(TUtv!2e9|IZ1?(YAcyo-EtWD!93_Tf?<7gx`m^2d43SI+ncG$- ze#qGVBM8{C4^M4+PcAj=!wR-FZn3d1ANGz&l&2_BF6DCQ6*c=$o+xK?Ir~qZ zC@1wTaNk}>l*0P5z<1Y+7ueBv4@p6N*tGZFm3pKwulKu2y54N``5rFt;X8w+F~eB@odfVfHGgL-Y4$MoU}tnSuWmG>7aU5PZY*S%O^;5wX-{j~Y_a@lO zf;)S|M7M%oB%Ea)`TlNJ&TnOH@{8Oq_v9evvM<1GJ?HnZ(0%@H(>TA7_1sq~C2VI2 z`!`|z-P_+=YHMVz3%*c}Vqox!C!U7Fs{fEnCz#iPfEF5#l339DXcql&1L7hJj6adl!8A~dK3nT*I3KKHqu+M zY+T{Qu3OPOwSbd#rA4E)kWF=!TA0q|iWc7Fva^MLu`J+l4{VI&wV`9uWS_fX8hFWYypZsEU09{*H&{Um>8;gfz=jh*({ozZOt0 zQvQ#C?nd)~K4D3p1e@v*hzH~Xt|FjLZAq{E^d6DOQX;Ph^bjh*c|f~gmIri#kY>4v z$iD;HL(2aV&<{~Opo=W}c(ADkfp|cFy<0h;e$k{?9z%(M>_lD<=&lCm0Ueo&0oGi?(mPepfW*a>ZRym&?w( zLjOj=INLf=ytMk%RH@c#HvQBTab3Z8r)s+R`nTXIMt;13BH}Lw^tYQ+M*N$36orYG zS%;#g#zbh;>=%xVmh{dBA2i@d!jG!_iP0(_=%88SO9;hdBzXP>!$>b4Bi8rR^0{G1Q@n2QOVId1Itan7*0YccEhYb0HuysK zs8BP#S$KjM0!lrw^kTq6qhT8L7#ILIzZgPZq0aBuB^(n|%I;q{BDIQTn=k$$ZeVLJ zEs)%r6nK5nO>DfxLcRA;%>#5;1+z)w9d!hM5saY^w(;wJX1P4uwWtlAoKLZ8S9Ub( zx)kr`Uy2!3kGLWC+JAM$R&ze1OOf0h6(Xf$tKw)%L|?-0Uh%1QLKB9A*h6aIv?Z4o zxPMt!lt#Q=5d2kd5$A|mSL=E1TuhFRkq5l>Un1wS9ajf6sSd3@ry6Z%=`O8oPiMoe z2kEAAonejb=@_j2p;@fSwN_#K7U8}y8%t2^1`ke4e=&IKcSDa)0lSqNJmvQ}@2UTY zEx8uzdEVdQ$e5+RDgOlqkso1auC1%JC#P&Mv`4b6n9FR^^=37m&WGM|)AG3*p^V94 zJFnN5I_I#W>s=(@My%G2H8?Nry3w%hk$Hf%JDo8FPukuNrDlDyGJ4}d-@~C)hlFRY zmc+03<2-#P#^o1G{<^KFk<5|HRrL?N-jh4bV<}(zcDbJots{6EOfKS#Ad`%xiT|)z z+#I22>s;)Q=&z>PItlX_a{6L~Em|&_i_u(Tqs5!FjPO=DdKo+Tb#uQgm}8E*mC|?L zS6ncnGUk&M4(0CZVjuAst6Ll=O{>R-6!((q)MNR@{=N@=3Bq3peG3-H@~Xez_O%7d zbKho9iW`VanEqy8sh2Msb+fx}J8f;&$W83%%|!RnnlQXQbGy|ccuHMV(b-}$7>AO= z#dAm9T?89ngXm}Lx@`Qdh9=VjM7D=Q#mgVs+(y4mUWw1P(YpN|06b>&AhlVxWogJb z@Mlm{ByWU<{1(Y;z|ra|lHYN1hjkFu2{t&?uY*o4auFq{D)hm4l{yO*=46+EGA-Vj z>Bp5RVJQc)mbdGPD_Hm2KGGx~Htu$$)ZB;V-EL?y2as(weAKObEy>tjtDwRvFW~hR zIRneoJ(+NtizCKmIJfd@w-v}k1;Vu{Q25v!F|C{YFO}6Fr*68su2>P^Q`ta z5k6~yi8ma*UwiuGBDp;rQ7c7qn;9(ioA59l6xru=Me-jRf`D-oemB|!%<|8W(raa( z3n-Gm0V|`zEZ@rmhh{lpUe6A{F=qDZ}aONeqF?`OT2~NaIusN%lLIUzh2?ZtmfDExMm0ESMqB| zu6f4!8~keL*YW)7q7>}8=OWgsQHR3I{y*FecxpSIC(pkE)PUN$yi24GC z;49*~iU+|2Vc5>NypxDT6e-|rBHCep}w@pFFbV7Pt`=vnaROfKPQ8f@cYz|dH z$_|JfzH~UWe-;zS06+S-HTxVMK9VNJ4JlR4j;ZJLH;?>fYW{pQe^MoXKVH%NnTq5W za8l;5dVQs30Ln}F4qWSYAm__+>c9}oPKCFBMOQEG7 z`3o(EP}$+r+Nwq5fBZoz9-NLWULPM-BX#yjFWCBMgvi3HWbL;XyVgv7Cd5%%F8kfN z&7S-?Oz&}9Q(q*?0;`0wte?WACHL9>pW28z=2=>s{ryudQCHwu+QQvbQlO#p_G>iD zNj*N+w9lmn2g+6Ygp-R}aCxUQA5rs6| zHaHtNd<>G{ikBm#-vvGCCW5Em#N57B#VKNV;n}LSezB)PxU5O?yb!_F$fb6a|%r_*vrc#1th_IZk-9SNLQOBBAs zT#~C4fax~`l%{vAQzlCFL@_5qIwiSxP04vC zoDTw!j8{6Hm5$fsW)o0>-?0g~ZEJ}QOoUGXz88+)gwJ#0KO{O)(>21zNxwCq6yST# z`(H%?~rN)av>6Ul^N}G5oD2>ivpfo!4k=V*C&GrQ-{oYxi*?tE~ zwzF_%CEFiC$#!6lX1ivQ&_=*O2z1UTXV#?}URLev2_q={ zU_4*fp}U019ii}vp9f-!dk-^%vXEN|LS@;ru8ZK=OP^LfwHm)y1vE&58jeBYaf8yJ zra9?+&@1n2gXghB!?XY;%m`5G`$kY2i;JLe73vUa`de2Os`heC)u!BCK%Ns$qIfsg zdacR19qWjldeM?Au32$wt#k&19Lj_1^nuoJ%txB6`E(VKYlY`1rGd`jNiWmo-m2qV z%K1*e$k*Ig%Y4n(@B1{la^hM7tZjd#Hu_W*RvyI4LMa=}*JN7Gz~mhggkMq0Fitsl zj+Z!)FW*!+9*K5or1Q|ZyyF_*@Pro3mQOX=)5lft8C6}V-MX6KI!#KWTowgevdf1Z zQOc@$HjSTp(rFJ$GjbA?Fk+5Q$m!!M7)I%GdwYv9NHnXw#la>Ue9+M?PBY`zpP*AK z$@!maKI&i4lp{dNC)v{P5-7EG9h7GOzEl4UPXMM!R!DMa6KM7LHD}RDp{8GNsBAE?J zzxZ6wx}tB>mDljW6pD8%=PDTGD!i`4{m(JE{pyNs>ez|};RZ@3#|wfc=Weenwo0OA zv+y59^ozpwNX-ufrQceo>~5febr%e+b%LQ+wX{swkVVKHLFjN7xdq{N$o2CTy+)Q* zMSO$)qI}})iMzK>@E+wMc(*nP-m7bu)-3TTce4_>!zgxOiMr$ehFlsTTxT<(C4z7W zrAaB#+;zTUQ_r%xhxY}6z8H(o{nA&QBmMd~w|hOYOP$t_5#GOksM}jl>?N&voGbZ> z!^G6W5q@IhW~MEvxcQrkLLTVe5lAH9^Fcd}7leH9DWH#0eg#ik$g2s+wBTuLNS}|T zg_K@7SR$qtH26zw9W4VZGEQBk$g0Or& z4iXJSVI^oIyl(AZ5H9|xL_LYS4H#?6{-3g|)c$VSmb(YpmLt4{ND_DdnGzUGz#TC@NuAL zQ3y|Y{cGgx1JL8xDc}eyn5bvOOh+M5<-AS#d(bWnJ#skFF(^^sah4VO>_!>^kMph2 zcMr}k#DgA3eog~lIagE8HtfYZfSzb$l#AeTmNp4BKGueX6820a;}S9kjVKU=fEMTh zXfX;+4G!l*?E@M<0Q5@~3c3W85;gIa6E-DsD-v}S{1DxMk_^5gfty60$=4O42dE&r z9VHEXK4{8etudnWj%Yz-f&P3Hc>@^Xm>_ig1a}Si6wnDs^t-@Q!uPRZ;eihT-HJjT zphRALQoz_FXwY7!Q^@X3Fm#}8PGd+>5eK>-h4ePib!Rkv9_X602o#t+(1o8NQ1B~3 z(~C4d3v|>ujZXum4}A!e3Yz%2#;1V3heG~Ml>XZyHD^)7pk-;A=yR00NZCX^E(yYV z@I*^cc7Z4Q@C(c<_)<_K67@y!l(>gq#r+LF6*Lcprlu5h#Wm+Nfg+2Wcp9g4?spvv z4Jn#v3Q81sqQg;=!KZ@WMHvF#;|7Kfg%)oX=)q$28v4qqo>G3`O^g!sMBAXO2OkF- ziqyUfeAF%6|0f~XAe;fkQyq^K&y2za6p9+pjlvxi8ZM&0Ir$f$Q|=-N*wXiZZ=(<< z5A-BTKm>e(3g2jch>k-Ef_@_C7bwXl2v@+k-_wk%gZ4$Ch6jKKmOu|o5a@A~Oz<|) z2PlLgYPhfQ9-zaWd@ATn6bfhwXn|9I0JOokg76{yGy^pi<5CPk_)ZXfQOF<)blw9r z0{ueJVieNbK?5FYJkjwew5lh99&+-BL7$;ea72$k!rlQtHqe%jF~Zaci@TA z=1sOlX`?2dXgtb#^qA-*r#=nzI0^-AYYj&KvnF_e4t4UWpdUH;e9*t0yzmQpRv6Y( Hyz2UYsWpDs diff --git a/docs/html/group__layer3.html b/docs/html/group__layer3.html index e7aa1f0..9e929f6 100644 --- a/docs/html/group__layer3.html +++ b/docs/html/group__layer3.html @@ -132,17 +132,18 @@ Functions - +
[out]pBufferBuffer that was created.
[out]pAllocationAllocation that was created.
[out]pAllocationInfoOptional. Information about allocated memory. It can be later fetched using function VmaGetAllocationInfo().
[out]pAllocationInfoOptional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().

This function automatically:

    -
  1. Creates buffer/image.
  2. +
  3. Creates buffer.
  4. Allocates appropriate memory for it.
  5. -
  6. Binds the buffer/image with the memory.
  7. +
  8. Binds the buffer with the memory.
-

You do not (and should not) pass returned pMemory to vmaFreeMemory. Only calling vmaDestroyBuffer() / vmaDestroyImage() is required for objects created using vmaCreateBuffer() / vmaCreateImage().

+

If any of these operations fail, buffer and allocation are not created, returned value is negative error code, *pBuffer and *pAllocation are null.

+

If the function succeeded, you must destroy both buffer and allocation when you no longer need them using either convenience function vmaDestroyBuffer() or separately, using vkDestroyBuffer() and vmaFreeMemory().

diff --git a/docs/html/vk__mem__alloc_8h_source.html b/docs/html/vk__mem__alloc_8h_source.html index 733cebd..8378926 100644 --- a/docs/html/vk__mem__alloc_8h_source.html +++ b/docs/html/vk__mem__alloc_8h_source.html @@ -62,7 +62,7 @@ $(function() {
vk_mem_alloc.h
-Go to the documentation of this file.
1 //
2 // Copyright (c) 2017 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
161 #include <vulkan/vulkan.h>
162 
164 
168 VK_DEFINE_HANDLE(VmaAllocator)
169 
170 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
172  VmaAllocator allocator,
173  uint32_t memoryType,
174  VkDeviceMemory memory,
175  VkDeviceSize size);
177 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
178  VmaAllocator allocator,
179  uint32_t memoryType,
180  VkDeviceMemory memory,
181  VkDeviceSize size);
182 
188 typedef struct VmaDeviceMemoryCallbacks {
194 
196 typedef enum VmaAllocatorFlagBits {
202 
205 typedef VkFlags VmaAllocatorFlags;
206 
209 {
213 
214  VkPhysicalDevice physicalDevice;
216 
217  VkDevice device;
219 
222 
225 
226  const VkAllocationCallbacks* pAllocationCallbacks;
228 
231 
233 VkResult vmaCreateAllocator(
234  const VmaAllocatorCreateInfo* pCreateInfo,
235  VmaAllocator* pAllocator);
236 
239  VmaAllocator allocator);
240 
246  VmaAllocator allocator,
247  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
248 
254  VmaAllocator allocator,
255  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
256 
264  VmaAllocator allocator,
265  uint32_t memoryTypeIndex,
266  VkMemoryPropertyFlags* pFlags);
267 
268 typedef struct VmaStatInfo
269 {
270  uint32_t AllocationCount;
273  VkDeviceSize UsedBytes;
274  VkDeviceSize UnusedBytes;
275  VkDeviceSize SuballocationSizeMin, SuballocationSizeAvg, SuballocationSizeMax;
276  VkDeviceSize UnusedRangeSizeMin, UnusedRangeSizeAvg, UnusedRangeSizeMax;
277 } VmaStatInfo;
278 
280 struct VmaStats
281 {
282  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
283  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
285 };
286 
288 void vmaCalculateStats(
289  VmaAllocator allocator,
290  VmaStats* pStats);
291 
292 #define VMA_STATS_STRING_ENABLED 1
293 
294 #if VMA_STATS_STRING_ENABLED
295 
297 
300  VmaAllocator allocator,
301  char** ppStatsString,
302  VkBool32 detailedMap);
303 
304 void vmaFreeStatsString(
305  VmaAllocator allocator,
306  char* pStatsString);
307 
308 #endif // #if VMA_STATS_STRING_ENABLED
309 
312 
317 typedef enum VmaMemoryUsage
318 {
324 
327 
330 
334 
346 
362 
366 
367 typedef struct VmaMemoryRequirements
368 {
378  VkMemoryPropertyFlags requiredFlags;
383  VkMemoryPropertyFlags preferredFlags;
385  void* pUserData;
387 
402 VkResult vmaFindMemoryTypeIndex(
403  VmaAllocator allocator,
404  uint32_t memoryTypeBits,
405  const VmaMemoryRequirements* pMemoryRequirements,
406  uint32_t* pMemoryTypeIndex);
407 
410 
415 VK_DEFINE_HANDLE(VmaAllocation)
416 
417 
419 typedef struct VmaAllocationInfo {
424  uint32_t memoryType;
431  VkDeviceMemory deviceMemory;
436  VkDeviceSize offset;
441  VkDeviceSize size;
447  void* pMappedData;
452  void* pUserData;
454 
465 VkResult vmaAllocateMemory(
466  VmaAllocator allocator,
467  const VkMemoryRequirements* pVkMemoryRequirements,
468  const VmaMemoryRequirements* pVmaMemoryRequirements,
469  VmaAllocation* pAllocation,
470  VmaAllocationInfo* pAllocationInfo);
471 
479  VmaAllocator allocator,
480  VkBuffer buffer,
481  const VmaMemoryRequirements* pMemoryRequirements,
482  VmaAllocation* pAllocation,
483  VmaAllocationInfo* pAllocationInfo);
484 
487  VmaAllocator allocator,
488  VkImage image,
489  const VmaMemoryRequirements* pMemoryRequirements,
490  VmaAllocation* pAllocation,
491  VmaAllocationInfo* pAllocationInfo);
492 
494 void vmaFreeMemory(
495  VmaAllocator allocator,
496  VmaAllocation allocation);
497 
500  VmaAllocator allocator,
501  VmaAllocation allocation,
502  VmaAllocationInfo* pAllocationInfo);
503 
506  VmaAllocator allocator,
507  VmaAllocation allocation,
508  void* pUserData);
509 
518 VkResult vmaMapMemory(
519  VmaAllocator allocator,
520  VmaAllocation allocation,
521  void** ppData);
522 
523 void vmaUnmapMemory(
524  VmaAllocator allocator,
525  VmaAllocation allocation);
526 
545 void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator);
546 
554 VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator);
555 
557 typedef struct VmaDefragmentationInfo {
562  VkDeviceSize maxBytesToMove;
569 
571 typedef struct VmaDefragmentationStats {
573  VkDeviceSize bytesMoved;
575  VkDeviceSize bytesFreed;
581 
652 VkResult vmaDefragment(
653  VmaAllocator allocator,
654  VmaAllocation* pAllocations,
655  size_t allocationCount,
656  VkBool32* pAllocationsChanged,
657  const VmaDefragmentationInfo *pDefragmentationInfo,
658  VmaDefragmentationStats* pDefragmentationStats);
659 
662 
682 VkResult vmaCreateBuffer(
683  VmaAllocator allocator,
684  const VkBufferCreateInfo* pCreateInfo,
685  const VmaMemoryRequirements* pMemoryRequirements,
686  VkBuffer* pBuffer,
687  VmaAllocation* pAllocation,
688  VmaAllocationInfo* pAllocationInfo);
689 
690 void vmaDestroyBuffer(
691  VmaAllocator allocator,
692  VkBuffer buffer,
693  VmaAllocation allocation);
694 
696 VkResult vmaCreateImage(
697  VmaAllocator allocator,
698  const VkImageCreateInfo* pCreateInfo,
699  const VmaMemoryRequirements* pMemoryRequirements,
700  VkImage* pImage,
701  VmaAllocation* pAllocation,
702  VmaAllocationInfo* pAllocationInfo);
703 
704 void vmaDestroyImage(
705  VmaAllocator allocator,
706  VkImage image,
707  VmaAllocation allocation);
708 
711 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
712 
713 #ifdef VMA_IMPLEMENTATION
714 #undef VMA_IMPLEMENTATION
715 
716 #include <cstdint>
717 #include <cstdlib>
718 
719 /*******************************************************************************
720 CONFIGURATION SECTION
721 
722 Define some of these macros before each #include of this header or change them
723 here if you need other then default behavior depending on your environment.
724 */
725 
726 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
727 //#define VMA_USE_STL_CONTAINERS 1
728 
729 /* Set this macro to 1 to make the library including and using STL containers:
730 std::pair, std::vector, std::list, std::unordered_map.
731 
732 Set it to 0 or undefined to make the library using its own implementation of
733 the containers.
734 */
735 #if VMA_USE_STL_CONTAINERS
736  #define VMA_USE_STL_VECTOR 1
737  #define VMA_USE_STL_UNORDERED_MAP 1
738  #define VMA_USE_STL_LIST 1
739 #endif
740 
741 #if VMA_USE_STL_VECTOR
742  #include <vector>
743 #endif
744 
745 #if VMA_USE_STL_UNORDERED_MAP
746  #include <unordered_map>
747 #endif
748 
749 #if VMA_USE_STL_LIST
750  #include <list>
751 #endif
752 
753 /*
754 Following headers are used in this CONFIGURATION section only, so feel free to
755 remove them if not needed.
756 */
757 #include <cassert> // for assert
758 #include <algorithm> // for min, max
759 #include <mutex> // for std::mutex
760 
761 #if !defined(_WIN32)
762  #include <malloc.h> // for aligned_alloc()
763 #endif
764 
765 // Normal assert to check for programmer's errors, especially in Debug configuration.
766 #ifndef VMA_ASSERT
767  #ifdef _DEBUG
768  #define VMA_ASSERT(expr) assert(expr)
769  #else
770  #define VMA_ASSERT(expr)
771  #endif
772 #endif
773 
774 // Assert that will be called very often, like inside data structures e.g. operator[].
775 // Making it non-empty can make program slow.
776 #ifndef VMA_HEAVY_ASSERT
777  #ifdef _DEBUG
778  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
779  #else
780  #define VMA_HEAVY_ASSERT(expr)
781  #endif
782 #endif
783 
784 #ifndef VMA_NULL
785  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
786  #define VMA_NULL nullptr
787 #endif
788 
789 #ifndef VMA_ALIGN_OF
790  #define VMA_ALIGN_OF(type) (__alignof(type))
791 #endif
792 
793 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
794  #if defined(_WIN32)
795  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
796  #else
797  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
798  #endif
799 #endif
800 
801 #ifndef VMA_SYSTEM_FREE
802  #if defined(_WIN32)
803  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
804  #else
805  #define VMA_SYSTEM_FREE(ptr) free(ptr)
806  #endif
807 #endif
808 
809 #ifndef VMA_MIN
810  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
811 #endif
812 
813 #ifndef VMA_MAX
814  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
815 #endif
816 
817 #ifndef VMA_SWAP
818  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
819 #endif
820 
821 #ifndef VMA_SORT
822  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
823 #endif
824 
825 #ifndef VMA_DEBUG_LOG
826  #define VMA_DEBUG_LOG(format, ...)
827  /*
828  #define VMA_DEBUG_LOG(format, ...) do { \
829  printf(format, __VA_ARGS__); \
830  printf("\n"); \
831  } while(false)
832  */
833 #endif
834 
835 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
836 #if VMA_STATS_STRING_ENABLED
837  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
838  {
839  _ultoa_s(num, outStr, strLen, 10);
840  }
841  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
842  {
843  _ui64toa_s(num, outStr, strLen, 10);
844  }
845 #endif
846 
847 #ifndef VMA_MUTEX
848  class VmaMutex
849  {
850  public:
851  VmaMutex() { }
852  ~VmaMutex() { }
853  void Lock() { m_Mutex.lock(); }
854  void Unlock() { m_Mutex.unlock(); }
855  private:
856  std::mutex m_Mutex;
857  };
858  #define VMA_MUTEX VmaMutex
859 #endif
860 
861 #ifndef VMA_BEST_FIT
862 
874  #define VMA_BEST_FIT (1)
875 #endif
876 
877 #ifndef VMA_DEBUG_ALWAYS_OWN_MEMORY
878 
882  #define VMA_DEBUG_ALWAYS_OWN_MEMORY (0)
883 #endif
884 
885 #ifndef VMA_DEBUG_ALIGNMENT
886 
890  #define VMA_DEBUG_ALIGNMENT (1)
891 #endif
892 
893 #ifndef VMA_DEBUG_MARGIN
894 
898  #define VMA_DEBUG_MARGIN (0)
899 #endif
900 
901 #ifndef VMA_DEBUG_GLOBAL_MUTEX
902 
906  #define VMA_DEBUG_GLOBAL_MUTEX (0)
907 #endif
908 
909 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
910 
914  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
915 #endif
916 
917 #ifndef VMA_SMALL_HEAP_MAX_SIZE
918  #define VMA_SMALL_HEAP_MAX_SIZE (512 * 1024 * 1024)
920 #endif
921 
922 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
923  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256 * 1024 * 1024)
925 #endif
926 
927 #ifndef VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE
928  #define VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE (64 * 1024 * 1024)
930 #endif
931 
932 /*******************************************************************************
933 END OF CONFIGURATION
934 */
935 
936 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
937  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
938 
939 // Returns number of bits set to 1 in (v).
940 static inline uint32_t CountBitsSet(uint32_t v)
941 {
942  uint32_t c = v - ((v >> 1) & 0x55555555);
943  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
944  c = ((c >> 4) + c) & 0x0F0F0F0F;
945  c = ((c >> 8) + c) & 0x00FF00FF;
946  c = ((c >> 16) + c) & 0x0000FFFF;
947  return c;
948 }
949 
950 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
951 // Use types like uint32_t, uint64_t as T.
952 template <typename T>
953 static inline T VmaAlignUp(T val, T align)
954 {
955  return (val + align - 1) / align * align;
956 }
957 
958 // Division with mathematical rounding to nearest number.
959 template <typename T>
960 inline T VmaRoundDiv(T x, T y)
961 {
962  return (x + (y / (T)2)) / y;
963 }
964 
965 #ifndef VMA_SORT
966 
967 template<typename Iterator, typename Compare>
968 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
969 {
970  Iterator centerValue = end; --centerValue;
971  Iterator insertIndex = beg;
972  for(Iterator i = beg; i < centerValue; ++i)
973  {
974  if(cmp(*i, *centerValue))
975  {
976  if(insertIndex != i)
977  {
978  VMA_SWAP(*i, *insertIndex);
979  }
980  ++insertIndex;
981  }
982  }
983  if(insertIndex != centerValue)
984  {
985  VMA_SWAP(*insertIndex, *centerValue);
986  }
987  return insertIndex;
988 }
989 
990 template<typename Iterator, typename Compare>
991 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
992 {
993  if(beg < end)
994  {
995  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
996  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
997  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
998  }
999 }
1000 
1001 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
1002 
1003 #endif // #ifndef VMA_SORT
1004 
1005 /*
1006 Returns true if two memory blocks occupy overlapping pages.
1007 ResourceA must be in less memory offset than ResourceB.
1008 
1009 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
1010 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
1011 */
1012 static inline bool VmaBlocksOnSamePage(
1013  VkDeviceSize resourceAOffset,
1014  VkDeviceSize resourceASize,
1015  VkDeviceSize resourceBOffset,
1016  VkDeviceSize pageSize)
1017 {
1018  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
1019  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
1020  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
1021  VkDeviceSize resourceBStart = resourceBOffset;
1022  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
1023  return resourceAEndPage == resourceBStartPage;
1024 }
1025 
1026 enum VmaSuballocationType
1027 {
1028  VMA_SUBALLOCATION_TYPE_FREE = 0,
1029  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
1030  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
1031  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
1032  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
1033  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
1034  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
1035 };
1036 
1037 /*
1038 Returns true if given suballocation types could conflict and must respect
1039 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
1040 or linear image and another one is optimal image. If type is unknown, behave
1041 conservatively.
1042 */
1043 static inline bool VmaIsBufferImageGranularityConflict(
1044  VmaSuballocationType suballocType1,
1045  VmaSuballocationType suballocType2)
1046 {
1047  if(suballocType1 > suballocType2)
1048  {
1049  VMA_SWAP(suballocType1, suballocType2);
1050  }
1051 
1052  switch(suballocType1)
1053  {
1054  case VMA_SUBALLOCATION_TYPE_FREE:
1055  return false;
1056  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
1057  return true;
1058  case VMA_SUBALLOCATION_TYPE_BUFFER:
1059  return
1060  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
1061  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1062  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
1063  return
1064  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
1065  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
1066  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1067  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
1068  return
1069  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1070  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
1071  return false;
1072  default:
1073  VMA_ASSERT(0);
1074  return true;
1075  }
1076 }
1077 
1078 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
1079 struct VmaMutexLock
1080 {
1081 public:
1082  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
1083  m_pMutex(useMutex ? &mutex : VMA_NULL)
1084  {
1085  if(m_pMutex)
1086  {
1087  m_pMutex->Lock();
1088  }
1089  }
1090 
1091  ~VmaMutexLock()
1092  {
1093  if(m_pMutex)
1094  {
1095  m_pMutex->Unlock();
1096  }
1097  }
1098 
1099 private:
1100  VMA_MUTEX* m_pMutex;
1101 };
1102 
1103 #if VMA_DEBUG_GLOBAL_MUTEX
1104  static VMA_MUTEX gDebugGlobalMutex;
1105  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex);
1106 #else
1107  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
1108 #endif
1109 
1110 // Minimum size of a free suballocation to register it in the free suballocation collection.
1111 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
1112 
1113 /*
1114 Performs binary search and returns iterator to first element that is greater or
1115 equal to (key), according to comparison (cmp).
1116 
1117 Cmp should return true if first argument is less than second argument.
1118 
1119 Returned value is the found element, if present in the collection or place where
1120 new element with value (key) should be inserted.
1121 */
1122 template <typename IterT, typename KeyT, typename CmpT>
1123 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
1124 {
1125  size_t down = 0, up = (end - beg);
1126  while(down < up)
1127  {
1128  const size_t mid = (down + up) / 2;
1129  if(cmp(*(beg+mid), key))
1130  {
1131  down = mid + 1;
1132  }
1133  else
1134  {
1135  up = mid;
1136  }
1137  }
1138  return beg + down;
1139 }
1140 
1142 // Memory allocation
1143 
1144 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
1145 {
1146  if((pAllocationCallbacks != VMA_NULL) &&
1147  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
1148  {
1149  return (*pAllocationCallbacks->pfnAllocation)(
1150  pAllocationCallbacks->pUserData,
1151  size,
1152  alignment,
1153  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1154  }
1155  else
1156  {
1157  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
1158  }
1159 }
1160 
1161 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
1162 {
1163  if((pAllocationCallbacks != VMA_NULL) &&
1164  (pAllocationCallbacks->pfnFree != VMA_NULL))
1165  {
1166  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
1167  }
1168  else
1169  {
1170  VMA_SYSTEM_FREE(ptr);
1171  }
1172 }
1173 
1174 template<typename T>
1175 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
1176 {
1177  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
1178 }
1179 
1180 template<typename T>
1181 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
1182 {
1183  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
1184 }
1185 
1186 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
1187 
1188 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
1189 
1190 template<typename T>
1191 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
1192 {
1193  ptr->~T();
1194  VmaFree(pAllocationCallbacks, ptr);
1195 }
1196 
1197 template<typename T>
1198 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
1199 {
1200  if(ptr != VMA_NULL)
1201  {
1202  for(size_t i = count; i--; )
1203  {
1204  ptr[i].~T();
1205  }
1206  VmaFree(pAllocationCallbacks, ptr);
1207  }
1208 }
1209 
1210 // STL-compatible allocator.
1211 template<typename T>
1212 class VmaStlAllocator
1213 {
1214 public:
1215  const VkAllocationCallbacks* const m_pCallbacks;
1216  typedef T value_type;
1217 
1218  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
1219  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
1220 
1221  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
1222  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
1223 
1224  template<typename U>
1225  bool operator==(const VmaStlAllocator<U>& rhs) const
1226  {
1227  return m_pCallbacks == rhs.m_pCallbacks;
1228  }
1229  template<typename U>
1230  bool operator!=(const VmaStlAllocator<U>& rhs) const
1231  {
1232  return m_pCallbacks != rhs.m_pCallbacks;
1233  }
1234 
1235  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
1236 };
1237 
1238 #if VMA_USE_STL_VECTOR
1239 
1240 #define VmaVector std::vector
1241 
1242 template<typename T, typename allocatorT>
1243 static void VectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
1244 {
1245  vec.insert(vec.begin() + index, item);
1246 }
1247 
1248 template<typename T, typename allocatorT>
1249 static void VectorRemove(std::vector<T, allocatorT>& vec, size_t index)
1250 {
1251  vec.erase(vec.begin() + index);
1252 }
1253 
1254 #else // #if VMA_USE_STL_VECTOR
1255 
1256 /* Class with interface compatible with subset of std::vector.
1257 T must be POD because constructors and destructors are not called and memcpy is
1258 used for these objects. */
1259 template<typename T, typename AllocatorT>
1260 class VmaVector
1261 {
1262 public:
1263  VmaVector(const AllocatorT& allocator) :
1264  m_Allocator(allocator),
1265  m_pArray(VMA_NULL),
1266  m_Count(0),
1267  m_Capacity(0)
1268  {
1269  }
1270 
1271  VmaVector(size_t count, const AllocatorT& allocator) :
1272  m_Allocator(allocator),
1273  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator->m_pCallbacks, count) : VMA_NULL),
1274  m_Count(count),
1275  m_Capacity(count)
1276  {
1277  }
1278 
1279  VmaVector(const VmaVector<T, AllocatorT>& src) :
1280  m_Allocator(src.m_Allocator),
1281  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src->m_pCallbacks, src.m_Count) : VMA_NULL),
1282  m_Count(src.m_Count),
1283  m_Capacity(src.m_Count)
1284  {
1285  if(m_Count != 0)
1286  {
1287  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
1288  }
1289  }
1290 
1291  ~VmaVector()
1292  {
1293  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
1294  }
1295 
1296  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
1297  {
1298  if(&rhs != this)
1299  {
1300  Resize(rhs.m_Count);
1301  if(m_Count != 0)
1302  {
1303  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
1304  }
1305  }
1306  return *this;
1307  }
1308 
1309  bool empty() const { return m_Count == 0; }
1310  size_t size() const { return m_Count; }
1311  T* data() { return m_pArray; }
1312  const T* data() const { return m_pArray; }
1313 
1314  T& operator[](size_t index)
1315  {
1316  VMA_HEAVY_ASSERT(index < m_Count);
1317  return m_pArray[index];
1318  }
1319  const T& operator[](size_t index) const
1320  {
1321  VMA_HEAVY_ASSERT(index < m_Count);
1322  return m_pArray[index];
1323  }
1324 
1325  T& front()
1326  {
1327  VMA_HEAVY_ASSERT(m_Count > 0);
1328  return m_pArray[0];
1329  }
1330  const T& front() const
1331  {
1332  VMA_HEAVY_ASSERT(m_Count > 0);
1333  return m_pArray[0];
1334  }
1335  T& back()
1336  {
1337  VMA_HEAVY_ASSERT(m_Count > 0);
1338  return m_pArray[m_Count - 1];
1339  }
1340  const T& back() const
1341  {
1342  VMA_HEAVY_ASSERT(m_Count > 0);
1343  return m_pArray[m_Count - 1];
1344  }
1345 
1346  void reserve(size_t newCapacity, bool freeMemory = false)
1347  {
1348  newCapacity = VMA_MAX(newCapacity, m_Count);
1349 
1350  if((newCapacity < m_Capacity) && !freeMemory)
1351  {
1352  newCapacity = m_Capacity;
1353  }
1354 
1355  if(newCapacity != m_Capacity)
1356  {
1357  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
1358  if(m_Count != 0)
1359  {
1360  memcpy(newArray, m_pArray, m_Count * sizeof(T));
1361  }
1362  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
1363  m_Capacity = newCapacity;
1364  m_pArray = newArray;
1365  }
1366  }
1367 
1368  void resize(size_t newCount, bool freeMemory = false)
1369  {
1370  size_t newCapacity = m_Capacity;
1371  if(newCount > m_Capacity)
1372  {
1373  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
1374  }
1375  else if(freeMemory)
1376  {
1377  newCapacity = newCount;
1378  }
1379 
1380  if(newCapacity != m_Capacity)
1381  {
1382  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
1383  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
1384  if(elementsToCopy != 0)
1385  {
1386  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
1387  }
1388  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
1389  m_Capacity = newCapacity;
1390  m_pArray = newArray;
1391  }
1392 
1393  m_Count = newCount;
1394  }
1395 
1396  void clear(bool freeMemory = false)
1397  {
1398  resize(0, freeMemory);
1399  }
1400 
1401  void insert(size_t index, const T& src)
1402  {
1403  VMA_HEAVY_ASSERT(index <= m_Count);
1404  const size_t oldCount = size();
1405  resize(oldCount + 1);
1406  if(index < oldCount)
1407  {
1408  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
1409  }
1410  m_pArray[index] = src;
1411  }
1412 
1413  void remove(size_t index)
1414  {
1415  VMA_HEAVY_ASSERT(index < m_Count);
1416  const size_t oldCount = size();
1417  if(index < oldCount - 1)
1418  {
1419  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
1420  }
1421  resize(oldCount - 1);
1422  }
1423 
1424  void push_back(const T& src)
1425  {
1426  const size_t newIndex = size();
1427  resize(newIndex + 1);
1428  m_pArray[newIndex] = src;
1429  }
1430 
1431  void pop_back()
1432  {
1433  VMA_HEAVY_ASSERT(m_Count > 0);
1434  resize(size() - 1);
1435  }
1436 
1437  void push_front(const T& src)
1438  {
1439  insert(0, src);
1440  }
1441 
1442  void pop_front()
1443  {
1444  VMA_HEAVY_ASSERT(m_Count > 0);
1445  remove(0);
1446  }
1447 
1448  typedef T* iterator;
1449 
1450  iterator begin() { return m_pArray; }
1451  iterator end() { return m_pArray + m_Count; }
1452 
1453 private:
1454  AllocatorT m_Allocator;
1455  T* m_pArray;
1456  size_t m_Count;
1457  size_t m_Capacity;
1458 };
1459 
1460 template<typename T, typename allocatorT>
1461 static void VectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
1462 {
1463  vec.insert(index, item);
1464 }
1465 
1466 template<typename T, typename allocatorT>
1467 static void VectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
1468 {
1469  vec.remove(index);
1470 }
1471 
1472 #endif // #if VMA_USE_STL_VECTOR
1473 
1475 // class VmaPoolAllocator
1476 
1477 /*
1478 Allocator for objects of type T using a list of arrays (pools) to speed up
1479 allocation. Number of elements that can be allocated is not bounded because
1480 allocator can create multiple blocks.
1481 */
1482 template<typename T>
1483 class VmaPoolAllocator
1484 {
1485 public:
1486  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
1487  ~VmaPoolAllocator();
1488  void Clear();
1489  T* Alloc();
1490  void Free(T* ptr);
1491 
1492 private:
1493  union Item
1494  {
1495  uint32_t NextFreeIndex;
1496  T Value;
1497  };
1498 
1499  struct ItemBlock
1500  {
1501  Item* pItems;
1502  uint32_t FirstFreeIndex;
1503  };
1504 
1505  const VkAllocationCallbacks* m_pAllocationCallbacks;
1506  size_t m_ItemsPerBlock;
1507  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
1508 
1509  ItemBlock& CreateNewBlock();
1510 };
1511 
1512 template<typename T>
1513 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
1514  m_pAllocationCallbacks(pAllocationCallbacks),
1515  m_ItemsPerBlock(itemsPerBlock),
1516  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
1517 {
1518  VMA_ASSERT(itemsPerBlock > 0);
1519 }
1520 
1521 template<typename T>
1522 VmaPoolAllocator<T>::~VmaPoolAllocator()
1523 {
1524  Clear();
1525 }
1526 
1527 template<typename T>
1528 void VmaPoolAllocator<T>::Clear()
1529 {
1530  for(size_t i = m_ItemBlocks.size(); i--; )
1531  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
1532  m_ItemBlocks.clear();
1533 }
1534 
1535 template<typename T>
1536 T* VmaPoolAllocator<T>::Alloc()
1537 {
1538  for(size_t i = m_ItemBlocks.size(); i--; )
1539  {
1540  ItemBlock& block = m_ItemBlocks[i];
1541  // This block has some free items: Use first one.
1542  if(block.FirstFreeIndex != UINT32_MAX)
1543  {
1544  Item* const pItem = &block.pItems[block.FirstFreeIndex];
1545  block.FirstFreeIndex = pItem->NextFreeIndex;
1546  return &pItem->Value;
1547  }
1548  }
1549 
1550  // No block has free item: Create new one and use it.
1551  ItemBlock& newBlock = CreateNewBlock();
1552  Item* const pItem = &newBlock.pItems[0];
1553  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
1554  return &pItem->Value;
1555 }
1556 
1557 template<typename T>
1558 void VmaPoolAllocator<T>::Free(T* ptr)
1559 {
1560  // Search all memory blocks to find ptr.
1561  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
1562  {
1563  ItemBlock& block = m_ItemBlocks[i];
1564 
1565  // Casting to union.
1566  Item* pItemPtr;
1567  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
1568 
1569  // Check if pItemPtr is in address range of this block.
1570  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
1571  {
1572  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
1573  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
1574  block.FirstFreeIndex = index;
1575  return;
1576  }
1577  }
1578  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
1579 }
1580 
1581 template<typename T>
1582 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
1583 {
1584  ItemBlock newBlock = {
1585  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
1586 
1587  m_ItemBlocks.push_back(newBlock);
1588 
1589  // Setup singly-linked list of all free items in this block.
1590  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
1591  newBlock.pItems[i].NextFreeIndex = i + 1;
1592  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
1593  return m_ItemBlocks.back();
1594 }
1595 
1597 // class VmaRawList, VmaList
1598 
1599 #if VMA_USE_STL_LIST
1600 
1601 #define VmaList std::list
1602 
1603 #else // #if VMA_USE_STL_LIST
1604 
1605 template<typename T>
1606 struct VmaListItem
1607 {
1608  VmaListItem* pPrev;
1609  VmaListItem* pNext;
1610  T Value;
1611 };
1612 
1613 // Doubly linked list.
1614 template<typename T>
1615 class VmaRawList
1616 {
1617 public:
1618  typedef VmaListItem<T> ItemType;
1619 
1620  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
1621  ~VmaRawList();
1622  void Clear();
1623 
1624  size_t GetCount() const { return m_Count; }
1625  bool IsEmpty() const { return m_Count == 0; }
1626 
1627  ItemType* Front() { return m_pFront; }
1628  const ItemType* Front() const { return m_pFront; }
1629  ItemType* Back() { return m_pBack; }
1630  const ItemType* Back() const { return m_pBack; }
1631 
1632  ItemType* PushBack();
1633  ItemType* PushFront();
1634  ItemType* PushBack(const T& value);
1635  ItemType* PushFront(const T& value);
1636  void PopBack();
1637  void PopFront();
1638 
1639  // Item can be null - it means PushBack.
1640  ItemType* InsertBefore(ItemType* pItem);
1641  // Item can be null - it means PushFront.
1642  ItemType* InsertAfter(ItemType* pItem);
1643 
1644  ItemType* InsertBefore(ItemType* pItem, const T& value);
1645  ItemType* InsertAfter(ItemType* pItem, const T& value);
1646 
1647  void Remove(ItemType* pItem);
1648 
1649 private:
1650  const VkAllocationCallbacks* const m_pAllocationCallbacks;
1651  VmaPoolAllocator<ItemType> m_ItemAllocator;
1652  ItemType* m_pFront;
1653  ItemType* m_pBack;
1654  size_t m_Count;
1655 
1656  // Declared not defined, to block copy constructor and assignment operator.
1657  VmaRawList(const VmaRawList<T>& src);
1658  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
1659 };
1660 
1661 template<typename T>
1662 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
1663  m_pAllocationCallbacks(pAllocationCallbacks),
1664  m_ItemAllocator(pAllocationCallbacks, 128),
1665  m_pFront(VMA_NULL),
1666  m_pBack(VMA_NULL),
1667  m_Count(0)
1668 {
1669 }
1670 
1671 template<typename T>
1672 VmaRawList<T>::~VmaRawList()
1673 {
1674  // Intentionally not calling Clear, because that would be unnecessary
1675  // computations to return all items to m_ItemAllocator as free.
1676 }
1677 
1678 template<typename T>
1679 void VmaRawList<T>::Clear()
1680 {
1681  if(IsEmpty() == false)
1682  {
1683  ItemType* pItem = m_pBack;
1684  while(pItem != VMA_NULL)
1685  {
1686  ItemType* const pPrevItem = pItem->pPrev;
1687  m_ItemAllocator.Free(pItem);
1688  pItem = pPrevItem;
1689  }
1690  m_pFront = VMA_NULL;
1691  m_pBack = VMA_NULL;
1692  m_Count = 0;
1693  }
1694 }
1695 
1696 template<typename T>
1697 VmaListItem<T>* VmaRawList<T>::PushBack()
1698 {
1699  ItemType* const pNewItem = m_ItemAllocator.Alloc();
1700  pNewItem->pNext = VMA_NULL;
1701  if(IsEmpty())
1702  {
1703  pNewItem->pPrev = VMA_NULL;
1704  m_pFront = pNewItem;
1705  m_pBack = pNewItem;
1706  m_Count = 1;
1707  }
1708  else
1709  {
1710  pNewItem->pPrev = m_pBack;
1711  m_pBack->pNext = pNewItem;
1712  m_pBack = pNewItem;
1713  ++m_Count;
1714  }
1715  return pNewItem;
1716 }
1717 
1718 template<typename T>
1719 VmaListItem<T>* VmaRawList<T>::PushFront()
1720 {
1721  ItemType* const pNewItem = m_ItemAllocator.Alloc();
1722  pNewItem->pPrev = VMA_NULL;
1723  if(IsEmpty())
1724  {
1725  pNewItem->pNext = VMA_NULL;
1726  m_pFront = pNewItem;
1727  m_pBack = pNewItem;
1728  m_Count = 1;
1729  }
1730  else
1731  {
1732  pNewItem->pNext = m_pFront;
1733  m_pFront->pPrev = pNewItem;
1734  m_pFront = pNewItem;
1735  ++m_Count;
1736  }
1737  return pNewItem;
1738 }
1739 
1740 template<typename T>
1741 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
1742 {
1743  ItemType* const pNewItem = PushBack();
1744  pNewItem->Value = value;
1745  return pNewItem;
1746 }
1747 
1748 template<typename T>
1749 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
1750 {
1751  ItemType* const pNewItem = PushFront();
1752  pNewItem->Value = value;
1753  return pNewItem;
1754 }
1755 
1756 template<typename T>
1757 void VmaRawList<T>::PopBack()
1758 {
1759  VMA_HEAVY_ASSERT(m_Count > 0);
1760  ItemType* const pBackItem = m_pBack;
1761  ItemType* const pPrevItem = pBackItem->pPrev;
1762  if(pPrevItem != VMA_NULL)
1763  {
1764  pPrevItem->pNext = VMA_NULL;
1765  }
1766  m_pBack = pPrevItem;
1767  m_ItemAllocator.Free(pBackItem);
1768  --m_Count;
1769 }
1770 
1771 template<typename T>
1772 void VmaRawList<T>::PopFront()
1773 {
1774  VMA_HEAVY_ASSERT(m_Count > 0);
1775  ItemType* const pFrontItem = m_pFront;
1776  ItemType* const pNextItem = pFrontItem->pNext;
1777  if(pNextItem != VMA_NULL)
1778  {
1779  pNextItem->pPrev = VMA_NULL;
1780  }
1781  m_pFront = pNextItem;
1782  m_ItemAllocator.Free(pFrontItem);
1783  --m_Count;
1784 }
1785 
1786 template<typename T>
1787 void VmaRawList<T>::Remove(ItemType* pItem)
1788 {
1789  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
1790  VMA_HEAVY_ASSERT(m_Count > 0);
1791 
1792  if(pItem->pPrev != VMA_NULL)
1793  {
1794  pItem->pPrev->pNext = pItem->pNext;
1795  }
1796  else
1797  {
1798  VMA_HEAVY_ASSERT(m_pFront == pItem);
1799  m_pFront = pItem->pNext;
1800  }
1801 
1802  if(pItem->pNext != VMA_NULL)
1803  {
1804  pItem->pNext->pPrev = pItem->pPrev;
1805  }
1806  else
1807  {
1808  VMA_HEAVY_ASSERT(m_pBack == pItem);
1809  m_pBack = pItem->pPrev;
1810  }
1811 
1812  m_ItemAllocator.Free(pItem);
1813  --m_Count;
1814 }
1815 
1816 template<typename T>
1817 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
1818 {
1819  if(pItem != VMA_NULL)
1820  {
1821  ItemType* const prevItem = pItem->pPrev;
1822  ItemType* const newItem = m_ItemAllocator.Alloc();
1823  newItem->pPrev = prevItem;
1824  newItem->pNext = pItem;
1825  pItem->pPrev = newItem;
1826  if(prevItem != VMA_NULL)
1827  {
1828  prevItem->pNext = newItem;
1829  }
1830  else
1831  {
1832  VMA_HEAVY_ASSERT(m_pFront == pItem);
1833  m_pFront = newItem;
1834  }
1835  ++m_Count;
1836  return newItem;
1837  }
1838  else
1839  return PushBack();
1840 }
1841 
1842 template<typename T>
1843 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
1844 {
1845  if(pItem != VMA_NULL)
1846  {
1847  ItemType* const nextItem = pItem->pNext;
1848  ItemType* const newItem = m_ItemAllocator.Alloc();
1849  newItem->pNext = nextItem;
1850  newItem->pPrev = pItem;
1851  pItem->pNext = newItem;
1852  if(nextItem != VMA_NULL)
1853  {
1854  nextItem->pPrev = newItem;
1855  }
1856  else
1857  {
1858  VMA_HEAVY_ASSERT(m_pBack == pItem);
1859  m_pBack = newItem;
1860  }
1861  ++m_Count;
1862  return newItem;
1863  }
1864  else
1865  return PushFront();
1866 }
1867 
1868 template<typename T>
1869 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
1870 {
1871  ItemType* const newItem = InsertBefore(pItem);
1872  newItem->Value = value;
1873  return newItem;
1874 }
1875 
1876 template<typename T>
1877 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
1878 {
1879  ItemType* const newItem = InsertAfter(pItem);
1880  newItem->Value = value;
1881  return newItem;
1882 }
1883 
1884 template<typename T, typename AllocatorT>
1885 class VmaList
1886 {
1887 public:
1888  class iterator
1889  {
1890  public:
1891  iterator() :
1892  m_pList(VMA_NULL),
1893  m_pItem(VMA_NULL)
1894  {
1895  }
1896 
1897  T& operator*() const
1898  {
1899  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
1900  return m_pItem->Value;
1901  }
1902  T* operator->() const
1903  {
1904  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
1905  return &m_pItem->Value;
1906  }
1907 
1908  iterator& operator++()
1909  {
1910  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
1911  m_pItem = m_pItem->pNext;
1912  return *this;
1913  }
1914  iterator& operator--()
1915  {
1916  if(m_pItem != VMA_NULL)
1917  {
1918  m_pItem = m_pItem->pPrev;
1919  }
1920  else
1921  {
1922  VMA_HEAVY_ASSERT(!m_pList.IsEmpty());
1923  m_pItem = m_pList->Back();
1924  }
1925  return *this;
1926  }
1927 
1928  iterator operator++(int)
1929  {
1930  iterator result = *this;
1931  ++*this;
1932  return result;
1933  }
1934  iterator operator--(int)
1935  {
1936  iterator result = *this;
1937  --*this;
1938  return result;
1939  }
1940 
1941  bool operator==(const iterator& rhs) const
1942  {
1943  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
1944  return m_pItem == rhs.m_pItem;
1945  }
1946  bool operator!=(const iterator& rhs) const
1947  {
1948  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
1949  return m_pItem != rhs.m_pItem;
1950  }
1951 
1952  private:
1953  VmaRawList<T>* m_pList;
1954  VmaListItem<T>* m_pItem;
1955 
1956  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
1957  m_pList(pList),
1958  m_pItem(pItem)
1959  {
1960  }
1961 
1962  friend class VmaList<T, AllocatorT>;
1963  friend class VmaList<T, AllocatorT>:: const_iterator;
1964  };
1965 
1966  class const_iterator
1967  {
1968  public:
1969  const_iterator() :
1970  m_pList(VMA_NULL),
1971  m_pItem(VMA_NULL)
1972  {
1973  }
1974 
1975  const_iterator(const iterator& src) :
1976  m_pList(src.m_pList),
1977  m_pItem(src.m_pItem)
1978  {
1979  }
1980 
1981  const T& operator*() const
1982  {
1983  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
1984  return m_pItem->Value;
1985  }
1986  const T* operator->() const
1987  {
1988  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
1989  return &m_pItem->Value;
1990  }
1991 
1992  const_iterator& operator++()
1993  {
1994  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
1995  m_pItem = m_pItem->pNext;
1996  return *this;
1997  }
1998  const_iterator& operator--()
1999  {
2000  if(m_pItem != VMA_NULL)
2001  {
2002  m_pItem = m_pItem->pPrev;
2003  }
2004  else
2005  {
2006  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
2007  m_pItem = m_pList->Back();
2008  }
2009  return *this;
2010  }
2011 
2012  const_iterator operator++(int)
2013  {
2014  const_iterator result = *this;
2015  ++*this;
2016  return result;
2017  }
2018  const_iterator operator--(int)
2019  {
2020  const_iterator result = *this;
2021  --*this;
2022  return result;
2023  }
2024 
2025  bool operator==(const const_iterator& rhs) const
2026  {
2027  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2028  return m_pItem == rhs.m_pItem;
2029  }
2030  bool operator!=(const const_iterator& rhs) const
2031  {
2032  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2033  return m_pItem != rhs.m_pItem;
2034  }
2035 
2036  private:
2037  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
2038  m_pList(pList),
2039  m_pItem(pItem)
2040  {
2041  }
2042 
2043  const VmaRawList<T>* m_pList;
2044  const VmaListItem<T>* m_pItem;
2045 
2046  friend class VmaList<T, AllocatorT>;
2047  };
2048 
2049  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
2050 
2051  bool empty() const { return m_RawList.IsEmpty(); }
2052  size_t size() const { return m_RawList.GetCount(); }
2053 
2054  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
2055  iterator end() { return iterator(&m_RawList, VMA_NULL); }
2056 
2057  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
2058  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
2059 
2060  void clear() { m_RawList.Clear(); }
2061  void push_back(const T& value) { m_RawList.PushBack(value); }
2062  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
2063  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
2064 
2065 private:
2066  VmaRawList<T> m_RawList;
2067 };
2068 
2069 #endif // #if VMA_USE_STL_LIST
2070 
2072 // class VmaMap
2073 
2074 #if VMA_USE_STL_UNORDERED_MAP
2075 
2076 #define VmaPair std::pair
2077 
2078 #define VMA_MAP_TYPE(KeyT, ValueT) \
2079  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
2080 
2081 #else // #if VMA_USE_STL_UNORDERED_MAP
2082 
2083 template<typename T1, typename T2>
2084 struct VmaPair
2085 {
2086  T1 first;
2087  T2 second;
2088 
2089  VmaPair() : first(), second() { }
2090  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
2091 };
2092 
2093 /* Class compatible with subset of interface of std::unordered_map.
2094 KeyT, ValueT must be POD because they will be stored in VmaVector.
2095 */
2096 template<typename KeyT, typename ValueT>
2097 class VmaMap
2098 {
2099 public:
2100  typedef VmaPair<KeyT, ValueT> PairType;
2101  typedef PairType* iterator;
2102 
2103  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
2104 
2105  iterator begin() { return m_Vector.begin(); }
2106  iterator end() { return m_Vector.end(); }
2107 
2108  void insert(const PairType& pair);
2109  iterator find(const KeyT& key);
2110  void erase(iterator it);
2111 
2112 private:
2113  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
2114 };
2115 
2116 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
2117 
2118 template<typename FirstT, typename SecondT>
2119 struct VmaPairFirstLess
2120 {
2121  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
2122  {
2123  return lhs.first < rhs.first;
2124  }
2125  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
2126  {
2127  return lhs.first < rhsFirst;
2128  }
2129 };
2130 
2131 template<typename KeyT, typename ValueT>
2132 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
2133 {
2134  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
2135  m_Vector.data(),
2136  m_Vector.data() + m_Vector.size(),
2137  pair,
2138  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
2139  VectorInsert(m_Vector, indexToInsert, pair);
2140 }
2141 
2142 template<typename KeyT, typename ValueT>
2143 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
2144 {
2145  PairType* it = VmaBinaryFindFirstNotLess(
2146  m_Vector.data(),
2147  m_Vector.data() + m_Vector.size(),
2148  key,
2149  VmaPairFirstLess<KeyT, ValueT>());
2150  if((it != m_Vector.end()) && (it->first == key))
2151  {
2152  return it;
2153  }
2154  else
2155  {
2156  return m_Vector.end();
2157  }
2158 }
2159 
2160 template<typename KeyT, typename ValueT>
2161 void VmaMap<KeyT, ValueT>::erase(iterator it)
2162 {
2163  VectorRemove(m_Vector, it - m_Vector.begin());
2164 }
2165 
2166 #endif // #if VMA_USE_STL_UNORDERED_MAP
2167 
2169 
2170 class VmaBlock;
2171 
2172 enum VMA_BLOCK_VECTOR_TYPE
2173 {
2174  VMA_BLOCK_VECTOR_TYPE_UNMAPPED,
2175  VMA_BLOCK_VECTOR_TYPE_MAPPED,
2176  VMA_BLOCK_VECTOR_TYPE_COUNT
2177 };
2178 
2179 static VMA_BLOCK_VECTOR_TYPE VmaMemoryRequirementFlagsToBlockVectorType(VmaMemoryRequirementFlags flags)
2180 {
2181  return (flags & VMA_MEMORY_REQUIREMENT_PERSISTENT_MAP_BIT) != 0 ?
2182  VMA_BLOCK_VECTOR_TYPE_MAPPED :
2183  VMA_BLOCK_VECTOR_TYPE_UNMAPPED;
2184 }
2185 
2186 struct VmaAllocation_T
2187 {
2188 public:
2189  enum ALLOCATION_TYPE
2190  {
2191  ALLOCATION_TYPE_NONE,
2192  ALLOCATION_TYPE_BLOCK,
2193  ALLOCATION_TYPE_OWN,
2194  };
2195 
2196  VmaAllocation_T()
2197  {
2198  memset(this, 0, sizeof(VmaAllocation_T));
2199  }
2200 
2201  void InitBlockAllocation(
2202  VmaBlock* block,
2203  VkDeviceSize offset,
2204  VkDeviceSize alignment,
2205  VkDeviceSize size,
2206  VmaSuballocationType suballocationType,
2207  void* pUserData)
2208  {
2209  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
2210  VMA_ASSERT(block != VMA_NULL);
2211  m_Type = ALLOCATION_TYPE_BLOCK;
2212  m_Alignment = alignment;
2213  m_Size = size;
2214  m_pUserData = pUserData;
2215  m_SuballocationType = suballocationType;
2216  m_BlockAllocation.m_Block = block;
2217  m_BlockAllocation.m_Offset = offset;
2218  }
2219 
2220  void ChangeBlockAllocation(
2221  VmaBlock* block,
2222  VkDeviceSize offset)
2223  {
2224  VMA_ASSERT(block != VMA_NULL);
2225  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
2226  m_BlockAllocation.m_Block = block;
2227  m_BlockAllocation.m_Offset = offset;
2228  }
2229 
2230  void InitOwnAllocation(
2231  uint32_t memoryTypeIndex,
2232  VkDeviceMemory hMemory,
2233  VmaSuballocationType suballocationType,
2234  bool persistentMap,
2235  void* pMappedData,
2236  VkDeviceSize size,
2237  void* pUserData)
2238  {
2239  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
2240  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
2241  m_Type = ALLOCATION_TYPE_OWN;
2242  m_Alignment = 0;
2243  m_Size = size;
2244  m_pUserData = pUserData;
2245  m_SuballocationType = suballocationType;
2246  m_OwnAllocation.m_MemoryTypeIndex = memoryTypeIndex;
2247  m_OwnAllocation.m_hMemory = hMemory;
2248  m_OwnAllocation.m_PersistentMap = persistentMap;
2249  m_OwnAllocation.m_pMappedData = pMappedData;
2250  }
2251 
2252  ALLOCATION_TYPE GetType() const { return m_Type; }
2253  VkDeviceSize GetAlignment() const { return m_Alignment; }
2254  VkDeviceSize GetSize() const { return m_Size; }
2255  void* GetUserData() const { return m_pUserData; }
2256  void SetUserData(void* pUserData) { m_pUserData = pUserData; }
2257  VmaSuballocationType GetSuballocationType() const { return m_SuballocationType; }
2258 
2259  VmaBlock* GetBlock() const
2260  {
2261  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
2262  return m_BlockAllocation.m_Block;
2263  }
2264  VkDeviceSize GetOffset() const
2265  {
2266  return (m_Type == ALLOCATION_TYPE_BLOCK) ? m_BlockAllocation.m_Offset : 0;
2267  }
2268  VkDeviceMemory GetMemory() const;
2269  uint32_t GetMemoryTypeIndex() const;
2270  VMA_BLOCK_VECTOR_TYPE GetBlockVectorType() const;
2271  void* GetMappedData() const;
2272 
2273  VkResult OwnAllocMapPersistentlyMappedMemory(VkDevice hDevice)
2274  {
2275  VMA_ASSERT(m_Type == ALLOCATION_TYPE_OWN);
2276  if(m_OwnAllocation.m_PersistentMap)
2277  {
2278  return vkMapMemory(hDevice, m_OwnAllocation.m_hMemory, 0, VK_WHOLE_SIZE, 0, &m_OwnAllocation.m_pMappedData);
2279  }
2280  return VK_SUCCESS;
2281  }
2282  void OwnAllocUnmapPersistentlyMappedMemory(VkDevice hDevice)
2283  {
2284  VMA_ASSERT(m_Type == ALLOCATION_TYPE_OWN);
2285  if(m_OwnAllocation.m_pMappedData)
2286  {
2287  VMA_ASSERT(m_OwnAllocation.m_PersistentMap);
2288  vkUnmapMemory(hDevice, m_OwnAllocation.m_hMemory);
2289  m_OwnAllocation.m_pMappedData = VMA_NULL;
2290  }
2291  }
2292 
2293 private:
2294  VkDeviceSize m_Alignment;
2295  VkDeviceSize m_Size;
2296  void* m_pUserData;
2297  ALLOCATION_TYPE m_Type;
2298  VmaSuballocationType m_SuballocationType;
2299 
2300  union
2301  {
2302  // Allocation out of VmaBlock.
2303  struct BlockAllocation
2304  {
2305  VmaBlock* m_Block;
2306  VkDeviceSize m_Offset;
2307  } m_BlockAllocation;
2308 
2309  // Allocation for an object that has its own private VkDeviceMemory.
2310  struct OwnAllocation
2311  {
2312  uint32_t m_MemoryTypeIndex;
2313  VkDeviceMemory m_hMemory;
2314  bool m_PersistentMap;
2315  void* m_pMappedData;
2316  } m_OwnAllocation;
2317  };
2318 };
2319 
2320 /*
2321 Represents a region of VmaBlock that is either assigned and returned as
2322 allocated memory block or free.
2323 */
2324 struct VmaSuballocation
2325 {
2326  VkDeviceSize offset;
2327  VkDeviceSize size;
2328  VmaSuballocationType type;
2329 };
2330 
2331 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
2332 
2333 // Parameters of an allocation.
2334 struct VmaAllocationRequest
2335 {
2336  VmaSuballocationList::iterator freeSuballocationItem;
2337  VkDeviceSize offset;
2338 };
2339 
2340 /* Single block of memory - VkDeviceMemory with all the data about its regions
2341 assigned or free. */
2342 class VmaBlock
2343 {
2344 public:
2345  uint32_t m_MemoryTypeIndex;
2346  VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
2347  VkDeviceMemory m_hMemory;
2348  VkDeviceSize m_Size;
2349  bool m_PersistentMap;
2350  void* m_pMappedData;
2351  uint32_t m_FreeCount;
2352  VkDeviceSize m_SumFreeSize;
2353  VmaSuballocationList m_Suballocations;
2354  // Suballocations that are free and have size greater than certain threshold.
2355  // Sorted by size, ascending.
2356  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
2357 
2358  VmaBlock(VmaAllocator hAllocator);
2359 
2360  ~VmaBlock()
2361  {
2362  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
2363  }
2364 
2365  // Always call after construction.
2366  void Init(
2367  uint32_t newMemoryTypeIndex,
2368  VMA_BLOCK_VECTOR_TYPE newBlockVectorType,
2369  VkDeviceMemory newMemory,
2370  VkDeviceSize newSize,
2371  bool persistentMap,
2372  void* pMappedData);
2373  // Always call before destruction.
2374  void Destroy(VmaAllocator allocator);
2375 
2376  // Validates all data structures inside this object. If not valid, returns false.
2377  bool Validate() const;
2378 
2379  // Tries to find a place for suballocation with given parameters inside this allocation.
2380  // If succeeded, fills pAllocationRequest and returns true.
2381  // If failed, returns false.
2382  bool CreateAllocationRequest(
2383  VkDeviceSize bufferImageGranularity,
2384  VkDeviceSize allocSize,
2385  VkDeviceSize allocAlignment,
2386  VmaSuballocationType allocType,
2387  VmaAllocationRequest* pAllocationRequest);
2388 
2389  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
2390  // If yes, fills pOffset and returns true. If no, returns false.
2391  bool CheckAllocation(
2392  VkDeviceSize bufferImageGranularity,
2393  VkDeviceSize allocSize,
2394  VkDeviceSize allocAlignment,
2395  VmaSuballocationType allocType,
2396  VmaSuballocationList::const_iterator freeSuballocItem,
2397  VkDeviceSize* pOffset) const;
2398 
2399  // Returns true if this allocation is empty - contains only single free suballocation.
2400  bool IsEmpty() const;
2401 
2402  // Makes actual allocation based on request. Request must already be checked
2403  // and valid.
2404  void Alloc(
2405  const VmaAllocationRequest& request,
2406  VmaSuballocationType type,
2407  VkDeviceSize allocSize);
2408 
2409  // Frees suballocation assigned to given memory region.
2410  void Free(const VmaAllocation allocation);
2411 
2412 #if VMA_STATS_STRING_ENABLED
2413  void PrintDetailedMap(class VmaStringBuilder& sb) const;
2414 #endif
2415 
2416 private:
2417  // Given free suballocation, it merges it with following one, which must also be free.
2418  void MergeFreeWithNext(VmaSuballocationList::iterator item);
2419  // Releases given suballocation, making it free. Merges it with adjacent free
2420  // suballocations if applicable.
2421  void FreeSuballocation(VmaSuballocationList::iterator suballocItem);
2422  // Given free suballocation, it inserts it into sorted list of
2423  // m_FreeSuballocationsBySize if it's suitable.
2424  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
2425  // Given free suballocation, it removes it from sorted list of
2426  // m_FreeSuballocationsBySize if it's suitable.
2427  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
2428 };
2429 
2430 struct VmaPointerLess
2431 {
2432  bool operator()(const void* lhs, const void* rhs) const
2433  {
2434  return lhs < rhs;
2435  }
2436 };
2437 
2438 /* Sequence of VmaBlock. Represents memory blocks allocated for a specific
2439 Vulkan memory type. */
2440 struct VmaBlockVector
2441 {
2442  // Incrementally sorted by sumFreeSize, ascending.
2443  VmaVector< VmaBlock*, VmaStlAllocator<VmaBlock*> > m_Blocks;
2444 
2445  VmaBlockVector(VmaAllocator hAllocator);
2446  ~VmaBlockVector();
2447 
2448  bool IsEmpty() const { return m_Blocks.empty(); }
2449 
2450  // Finds and removes given block from vector.
2451  void Remove(VmaBlock* pBlock);
2452 
2453  // Performs single step in sorting m_Blocks. They may not be fully sorted
2454  // after this call.
2455  void IncrementallySortBlocks();
2456 
2457  // Adds statistics of this BlockVector to pStats.
2458  void AddStats(VmaStats* pStats, uint32_t memTypeIndex, uint32_t memHeapIndex) const;
2459 
2460 #if VMA_STATS_STRING_ENABLED
2461  void PrintDetailedMap(class VmaStringBuilder& sb) const;
2462 #endif
2463 
2464  void UnmapPersistentlyMappedMemory();
2465  VkResult MapPersistentlyMappedMemory();
2466 
2467 private:
2468  VmaAllocator m_hAllocator;
2469 };
2470 
2471 // Main allocator object.
2472 struct VmaAllocator_T
2473 {
2474  bool m_UseMutex;
2475  VkDevice m_hDevice;
2476  bool m_AllocationCallbacksSpecified;
2477  VkAllocationCallbacks m_AllocationCallbacks;
2478  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
2479  VkDeviceSize m_PreferredLargeHeapBlockSize;
2480  VkDeviceSize m_PreferredSmallHeapBlockSize;
2481  // Non-zero when we are inside UnmapPersistentlyMappedMemory...MapPersistentlyMappedMemory.
2482  // Counter to allow nested calls to these functions.
2483  uint32_t m_UnmapPersistentlyMappedMemoryCounter;
2484 
2485  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
2486  VkPhysicalDeviceMemoryProperties m_MemProps;
2487 
2488  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES][VMA_BLOCK_VECTOR_TYPE_COUNT];
2489  /* There can be at most one allocation that is completely empty - a
2490  hysteresis to avoid pessimistic case of alternating creation and destruction
2491  of a VkDeviceMemory. */
2492  bool m_HasEmptyBlock[VK_MAX_MEMORY_TYPES];
2493  VMA_MUTEX m_BlocksMutex[VK_MAX_MEMORY_TYPES];
2494 
2495  // Each vector is sorted by memory (handle value).
2496  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
2497  AllocationVectorType* m_pOwnAllocations[VK_MAX_MEMORY_TYPES][VMA_BLOCK_VECTOR_TYPE_COUNT];
2498  VMA_MUTEX m_OwnAllocationsMutex[VK_MAX_MEMORY_TYPES];
2499 
2500  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
2501  ~VmaAllocator_T();
2502 
2503  const VkAllocationCallbacks* GetAllocationCallbacks() const
2504  {
2505  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
2506  }
2507 
2508  VkDeviceSize GetPreferredBlockSize(uint32_t memTypeIndex) const;
2509 
2510  VkDeviceSize GetBufferImageGranularity() const
2511  {
2512  return VMA_MAX(
2513  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
2514  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
2515  }
2516 
2517  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
2518  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
2519 
2520  // Main allocation function.
2521  VkResult AllocateMemory(
2522  const VkMemoryRequirements& vkMemReq,
2523  const VmaMemoryRequirements& vmaMemReq,
2524  VmaSuballocationType suballocType,
2525  VmaAllocation* pAllocation);
2526 
2527  // Main deallocation function.
2528  void FreeMemory(const VmaAllocation allocation);
2529 
2530  void CalculateStats(VmaStats* pStats);
2531 
2532 #if VMA_STATS_STRING_ENABLED
2533  void PrintDetailedMap(class VmaStringBuilder& sb);
2534 #endif
2535 
2536  void UnmapPersistentlyMappedMemory();
2537  VkResult MapPersistentlyMappedMemory();
2538 
2539  VkResult Defragment(
2540  VmaAllocation* pAllocations,
2541  size_t allocationCount,
2542  VkBool32* pAllocationsChanged,
2543  const VmaDefragmentationInfo* pDefragmentationInfo,
2544  VmaDefragmentationStats* pDefragmentationStats);
2545 
2546  static void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
2547 
2548 private:
2549  VkPhysicalDevice m_PhysicalDevice;
2550 
2551  VkResult AllocateMemoryOfType(
2552  const VkMemoryRequirements& vkMemReq,
2553  const VmaMemoryRequirements& vmaMemReq,
2554  uint32_t memTypeIndex,
2555  VmaSuballocationType suballocType,
2556  VmaAllocation* pAllocation);
2557 
2558  // Allocates and registers new VkDeviceMemory specifically for single allocation.
2559  VkResult AllocateOwnMemory(
2560  VkDeviceSize size,
2561  VmaSuballocationType suballocType,
2562  uint32_t memTypeIndex,
2563  bool map,
2564  void* pUserData,
2565  VmaAllocation* pAllocation);
2566 
2567  // Tries to free pMemory as Own Memory. Returns true if found and freed.
2568  void FreeOwnMemory(VmaAllocation allocation);
2569 };
2570 
2572 // Memory allocation #2 after VmaAllocator_T definition
2573 
2574 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
2575 {
2576  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
2577 }
2578 
2579 static void VmaFree(VmaAllocator hAllocator, void* ptr)
2580 {
2581  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
2582 }
2583 
2584 template<typename T>
2585 static T* VmaAllocate(VmaAllocator hAllocator)
2586 {
2587  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
2588 }
2589 
2590 template<typename T>
2591 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
2592 {
2593  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
2594 }
2595 
2596 template<typename T>
2597 static void vma_delete(VmaAllocator hAllocator, T* ptr)
2598 {
2599  if(ptr != VMA_NULL)
2600  {
2601  ptr->~T();
2602  VmaFree(hAllocator, ptr);
2603  }
2604 }
2605 
2606 template<typename T>
2607 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
2608 {
2609  if(ptr != VMA_NULL)
2610  {
2611  for(size_t i = count; i--; )
2612  ptr[i].~T();
2613  VmaFree(hAllocator, ptr);
2614  }
2615 }
2616 
2618 // VmaStringBuilder
2619 
2620 #if VMA_STATS_STRING_ENABLED
2621 
2622 class VmaStringBuilder
2623 {
2624 public:
2625  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
2626  size_t GetLength() const { return m_Data.size(); }
2627  const char* GetData() const { return m_Data.data(); }
2628 
2629  void Add(char ch) { m_Data.push_back(ch); }
2630  void Add(const char* pStr);
2631  void AddNewLine() { Add('\n'); }
2632  void AddNumber(uint32_t num);
2633  void AddNumber(uint64_t num);
2634  void AddBool(bool b) { Add(b ? "true" : "false"); }
2635  void AddNull() { Add("null"); }
2636  void AddString(const char* pStr);
2637 
2638 private:
2639  VmaVector< char, VmaStlAllocator<char> > m_Data;
2640 };
2641 
2642 void VmaStringBuilder::Add(const char* pStr)
2643 {
2644  const size_t strLen = strlen(pStr);
2645  if(strLen > 0)
2646  {
2647  const size_t oldCount = m_Data.size();
2648  m_Data.resize(oldCount + strLen);
2649  memcpy(m_Data.data() + oldCount, pStr, strLen);
2650  }
2651 }
2652 
2653 void VmaStringBuilder::AddNumber(uint32_t num)
2654 {
2655  char buf[11];
2656  VmaUint32ToStr(buf, sizeof(buf), num);
2657  Add(buf);
2658 }
2659 
2660 void VmaStringBuilder::AddNumber(uint64_t num)
2661 {
2662  char buf[21];
2663  VmaUint64ToStr(buf, sizeof(buf), num);
2664  Add(buf);
2665 }
2666 
2667 void VmaStringBuilder::AddString(const char* pStr)
2668 {
2669  Add('"');
2670  const size_t strLen = strlen(pStr);
2671  for(size_t i = 0; i < strLen; ++i)
2672  {
2673  char ch = pStr[i];
2674  if(ch == '\'')
2675  {
2676  Add("\\\\");
2677  }
2678  else if(ch == '"')
2679  {
2680  Add("\\\"");
2681  }
2682  else if(ch >= 32)
2683  {
2684  Add(ch);
2685  }
2686  else switch(ch)
2687  {
2688  case '\n':
2689  Add("\\n");
2690  break;
2691  case '\r':
2692  Add("\\r");
2693  break;
2694  case '\t':
2695  Add("\\t");
2696  break;
2697  default:
2698  VMA_ASSERT(0 && "Character not currently supported.");
2699  break;
2700  }
2701  }
2702  Add('"');
2703 }
2704 
2706 
2707 VkDeviceMemory VmaAllocation_T::GetMemory() const
2708 {
2709  return (m_Type == ALLOCATION_TYPE_BLOCK) ?
2710  m_BlockAllocation.m_Block->m_hMemory : m_OwnAllocation.m_hMemory;
2711 }
2712 
2713 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
2714 {
2715  return (m_Type == ALLOCATION_TYPE_BLOCK) ?
2716  m_BlockAllocation.m_Block->m_MemoryTypeIndex : m_OwnAllocation.m_MemoryTypeIndex;
2717 }
2718 
2719 VMA_BLOCK_VECTOR_TYPE VmaAllocation_T::GetBlockVectorType() const
2720 {
2721  return (m_Type == ALLOCATION_TYPE_BLOCK) ?
2722  m_BlockAllocation.m_Block->m_BlockVectorType :
2723  (m_OwnAllocation.m_PersistentMap ? VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED);
2724 }
2725 
2726 void* VmaAllocation_T::GetMappedData() const
2727 {
2728  switch(m_Type)
2729  {
2730  case ALLOCATION_TYPE_BLOCK:
2731  if(m_BlockAllocation.m_Block->m_pMappedData != VMA_NULL)
2732  {
2733  return (char*)m_BlockAllocation.m_Block->m_pMappedData + m_BlockAllocation.m_Offset;
2734  }
2735  else
2736  {
2737  return VMA_NULL;
2738  }
2739  break;
2740  case ALLOCATION_TYPE_OWN:
2741  return m_OwnAllocation.m_pMappedData;
2742  default:
2743  VMA_ASSERT(0);
2744  return VMA_NULL;
2745  }
2746 }
2747 
2748 // Correspond to values of enum VmaSuballocationType.
2749 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
2750  "FREE",
2751  "UNKNOWN",
2752  "BUFFER",
2753  "IMAGE_UNKNOWN",
2754  "IMAGE_LINEAR",
2755  "IMAGE_OPTIMAL",
2756 };
2757 
2758 static void VmaPrintStatInfo(VmaStringBuilder& sb, const VmaStatInfo& stat)
2759 {
2760  sb.Add("{ \"Allocations\": ");
2761  sb.AddNumber(stat.AllocationCount);
2762  sb.Add(", \"Suballocations\": ");
2763  sb.AddNumber(stat.SuballocationCount);
2764  sb.Add(", \"UnusedRanges\": ");
2765  sb.AddNumber(stat.UnusedRangeCount);
2766  sb.Add(", \"UsedBytes\": ");
2767  sb.AddNumber(stat.UsedBytes);
2768  sb.Add(", \"UnusedBytes\": ");
2769  sb.AddNumber(stat.UnusedBytes);
2770  sb.Add(", \"SuballocationSize\": { \"Min\": ");
2771  sb.AddNumber(stat.SuballocationSizeMin);
2772  sb.Add(", \"Avg\": ");
2773  sb.AddNumber(stat.SuballocationSizeAvg);
2774  sb.Add(", \"Max\": ");
2775  sb.AddNumber(stat.SuballocationSizeMax);
2776  sb.Add(" }, \"UnusedRangeSize\": { \"Min\": ");
2777  sb.AddNumber(stat.UnusedRangeSizeMin);
2778  sb.Add(", \"Avg\": ");
2779  sb.AddNumber(stat.UnusedRangeSizeAvg);
2780  sb.Add(", \"Max\": ");
2781  sb.AddNumber(stat.UnusedRangeSizeMax);
2782  sb.Add(" } }");
2783 }
2784 
2785 #endif // #if VMA_STATS_STRING_ENABLED
2786 
2787 struct VmaSuballocationItemSizeLess
2788 {
2789  bool operator()(
2790  const VmaSuballocationList::iterator lhs,
2791  const VmaSuballocationList::iterator rhs) const
2792  {
2793  return lhs->size < rhs->size;
2794  }
2795  bool operator()(
2796  const VmaSuballocationList::iterator lhs,
2797  VkDeviceSize rhsSize) const
2798  {
2799  return lhs->size < rhsSize;
2800  }
2801 };
2802 
2803 VmaBlock::VmaBlock(VmaAllocator hAllocator) :
2804  m_MemoryTypeIndex(UINT32_MAX),
2805  m_BlockVectorType(VMA_BLOCK_VECTOR_TYPE_COUNT),
2806  m_hMemory(VK_NULL_HANDLE),
2807  m_Size(0),
2808  m_PersistentMap(false),
2809  m_pMappedData(VMA_NULL),
2810  m_FreeCount(0),
2811  m_SumFreeSize(0),
2812  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
2813  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
2814 {
2815 }
2816 
2817 void VmaBlock::Init(
2818  uint32_t newMemoryTypeIndex,
2819  VMA_BLOCK_VECTOR_TYPE newBlockVectorType,
2820  VkDeviceMemory newMemory,
2821  VkDeviceSize newSize,
2822  bool persistentMap,
2823  void* pMappedData)
2824 {
2825  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
2826 
2827  m_MemoryTypeIndex = newMemoryTypeIndex;
2828  m_BlockVectorType = newBlockVectorType;
2829  m_hMemory = newMemory;
2830  m_Size = newSize;
2831  m_PersistentMap = persistentMap;
2832  m_pMappedData = pMappedData;
2833  m_FreeCount = 1;
2834  m_SumFreeSize = newSize;
2835 
2836  m_Suballocations.clear();
2837  m_FreeSuballocationsBySize.clear();
2838 
2839  VmaSuballocation suballoc = {};
2840  suballoc.offset = 0;
2841  suballoc.size = newSize;
2842  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
2843 
2844  m_Suballocations.push_back(suballoc);
2845  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
2846  --suballocItem;
2847  m_FreeSuballocationsBySize.push_back(suballocItem);
2848 }
2849 
2850 void VmaBlock::Destroy(VmaAllocator allocator)
2851 {
2852  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
2853  if(m_pMappedData != VMA_NULL)
2854  {
2855  vkUnmapMemory(allocator->m_hDevice, m_hMemory);
2856  m_pMappedData = VMA_NULL;
2857  }
2858 
2859  // Callback.
2860  if(allocator->m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
2861  {
2862  (*allocator->m_DeviceMemoryCallbacks.pfnFree)(allocator, m_MemoryTypeIndex, m_hMemory, m_Size);
2863  }
2864 
2865  vkFreeMemory(allocator->m_hDevice, m_hMemory, allocator->GetAllocationCallbacks());
2866  m_hMemory = VK_NULL_HANDLE;
2867 }
2868 
2869 bool VmaBlock::Validate() const
2870 {
2871  if((m_hMemory == VK_NULL_HANDLE) ||
2872  (m_Size == 0) ||
2873  m_Suballocations.empty())
2874  {
2875  return false;
2876  }
2877 
2878  // Expected offset of new suballocation as calculates from previous ones.
2879  VkDeviceSize calculatedOffset = 0;
2880  // Expected number of free suballocations as calculated from traversing their list.
2881  uint32_t calculatedFreeCount = 0;
2882  // Expected sum size of free suballocations as calculated from traversing their list.
2883  VkDeviceSize calculatedSumFreeSize = 0;
2884  // Expected number of free suballocations that should be registered in
2885  // m_FreeSuballocationsBySize calculated from traversing their list.
2886  size_t freeSuballocationsToRegister = 0;
2887  // True if previous visisted suballocation was free.
2888  bool prevFree = false;
2889 
2890  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
2891  suballocItem != m_Suballocations.cend();
2892  ++suballocItem)
2893  {
2894  const VmaSuballocation& subAlloc = *suballocItem;
2895 
2896  // Actual offset of this suballocation doesn't match expected one.
2897  if(subAlloc.offset != calculatedOffset)
2898  {
2899  return false;
2900  }
2901 
2902  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
2903  // Two adjacent free suballocations are invalid. They should be merged.
2904  if(prevFree && currFree)
2905  {
2906  return false;
2907  }
2908  prevFree = currFree;
2909 
2910  if(currFree)
2911  {
2912  calculatedSumFreeSize += subAlloc.size;
2913  ++calculatedFreeCount;
2914  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
2915  {
2916  ++freeSuballocationsToRegister;
2917  }
2918  }
2919 
2920  calculatedOffset += subAlloc.size;
2921  }
2922 
2923  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
2924  // match expected one.
2925  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
2926  {
2927  return false;
2928  }
2929 
2930  VkDeviceSize lastSize = 0;
2931  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
2932  {
2933  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
2934 
2935  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
2936  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
2937  {
2938  return false;
2939  }
2940  // They must be sorted by size ascending.
2941  if(suballocItem->size < lastSize)
2942  {
2943  return false;
2944  }
2945 
2946  lastSize = suballocItem->size;
2947  }
2948 
2949  // Check if totals match calculacted values.
2950  return
2951  (calculatedOffset == m_Size) &&
2952  (calculatedSumFreeSize == m_SumFreeSize) &&
2953  (calculatedFreeCount == m_FreeCount);
2954 }
2955 
2956 /*
2957 How many suitable free suballocations to analyze before choosing best one.
2958 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
2959  be chosen.
2960 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
2961  suballocations will be analized and best one will be chosen.
2962 - Any other value is also acceptable.
2963 */
2964 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
2965 
2966 bool VmaBlock::CreateAllocationRequest(
2967  VkDeviceSize bufferImageGranularity,
2968  VkDeviceSize allocSize,
2969  VkDeviceSize allocAlignment,
2970  VmaSuballocationType allocType,
2971  VmaAllocationRequest* pAllocationRequest)
2972 {
2973  VMA_ASSERT(allocSize > 0);
2974  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
2975  VMA_ASSERT(pAllocationRequest != VMA_NULL);
2976  VMA_HEAVY_ASSERT(Validate());
2977 
2978  // There is not enough total free space in this allocation to fullfill the request: Early return.
2979  if(m_SumFreeSize < allocSize)
2980  {
2981  return false;
2982  }
2983 
2984  // Old brute-force algorithm, linearly searching suballocations.
2985  /*
2986  uint32_t suitableSuballocationsFound = 0;
2987  for(VmaSuballocationList::iterator suballocItem = suballocations.Front();
2988  suballocItem != VMA_NULL &&
2989  suitableSuballocationsFound < MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK;
2990  suballocItem = suballocItem->Next)
2991  {
2992  if(suballocItem->Value.type == VMA_SUBALLOCATION_TYPE_FREE)
2993  {
2994  VkDeviceSize offset = 0, cost = 0;
2995  if(CheckAllocation(bufferImageGranularity, allocSize, allocAlignment, allocType, suballocItem, &offset, &cost))
2996  {
2997  ++suitableSuballocationsFound;
2998  if(cost < costLimit)
2999  {
3000  pAllocationRequest->freeSuballocationItem = suballocItem;
3001  pAllocationRequest->offset = offset;
3002  pAllocationRequest->cost = cost;
3003  if(cost == 0)
3004  return true;
3005  costLimit = cost;
3006  betterSuballocationFound = true;
3007  }
3008  }
3009  }
3010  }
3011  */
3012 
3013  // New algorithm, efficiently searching freeSuballocationsBySize.
3014  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
3015  if(freeSuballocCount > 0)
3016  {
3017  if(VMA_BEST_FIT)
3018  {
3019  // Find first free suballocation with size not less than allocSize.
3020  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
3021  m_FreeSuballocationsBySize.data(),
3022  m_FreeSuballocationsBySize.data() + freeSuballocCount,
3023  allocSize,
3024  VmaSuballocationItemSizeLess());
3025  size_t index = it - m_FreeSuballocationsBySize.data();
3026  for(; index < freeSuballocCount; ++index)
3027  {
3028  VkDeviceSize offset = 0;
3029  const VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[index];
3030  if(CheckAllocation(bufferImageGranularity, allocSize, allocAlignment, allocType, suballocItem, &offset))
3031  {
3032  pAllocationRequest->freeSuballocationItem = suballocItem;
3033  pAllocationRequest->offset = offset;
3034  return true;
3035  }
3036  }
3037  }
3038  else
3039  {
3040  // Search staring from biggest suballocations.
3041  for(size_t index = freeSuballocCount; index--; )
3042  {
3043  VkDeviceSize offset = 0;
3044  const VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[index];
3045  if(CheckAllocation(bufferImageGranularity, allocSize, allocAlignment, allocType, suballocItem, &offset))
3046  {
3047  pAllocationRequest->freeSuballocationItem = suballocItem;
3048  pAllocationRequest->offset = offset;
3049  return true;
3050  }
3051  }
3052  }
3053  }
3054 
3055  return false;
3056 }
3057 
3058 bool VmaBlock::CheckAllocation(
3059  VkDeviceSize bufferImageGranularity,
3060  VkDeviceSize allocSize,
3061  VkDeviceSize allocAlignment,
3062  VmaSuballocationType allocType,
3063  VmaSuballocationList::const_iterator freeSuballocItem,
3064  VkDeviceSize* pOffset) const
3065 {
3066  VMA_ASSERT(allocSize > 0);
3067  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
3068  VMA_ASSERT(freeSuballocItem != m_Suballocations.cend());
3069  VMA_ASSERT(pOffset != VMA_NULL);
3070 
3071  const VmaSuballocation& suballoc = *freeSuballocItem;
3072  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
3073 
3074  // Size of this suballocation is too small for this request: Early return.
3075  if(suballoc.size < allocSize)
3076  {
3077  return false;
3078  }
3079 
3080  // Start from offset equal to beginning of this suballocation.
3081  *pOffset = suballoc.offset;
3082 
3083  // Apply VMA_DEBUG_MARGIN at the beginning.
3084  if((VMA_DEBUG_MARGIN > 0) && freeSuballocItem != m_Suballocations.cbegin())
3085  {
3086  *pOffset += VMA_DEBUG_MARGIN;
3087  }
3088 
3089  // Apply alignment.
3090  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
3091  *pOffset = VmaAlignUp(*pOffset, alignment);
3092 
3093  // Check previous suballocations for BufferImageGranularity conflicts.
3094  // Make bigger alignment if necessary.
3095  if(bufferImageGranularity > 1)
3096  {
3097  bool bufferImageGranularityConflict = false;
3098  VmaSuballocationList::const_iterator prevSuballocItem = freeSuballocItem;
3099  while(prevSuballocItem != m_Suballocations.cbegin())
3100  {
3101  --prevSuballocItem;
3102  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
3103  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
3104  {
3105  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
3106  {
3107  bufferImageGranularityConflict = true;
3108  break;
3109  }
3110  }
3111  else
3112  // Already on previous page.
3113  break;
3114  }
3115  if(bufferImageGranularityConflict)
3116  {
3117  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
3118  }
3119  }
3120 
3121  // Calculate padding at the beginning based on current offset.
3122  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
3123 
3124  // Calculate required margin at the end if this is not last suballocation.
3125  VmaSuballocationList::const_iterator next = freeSuballocItem;
3126  ++next;
3127  const VkDeviceSize requiredEndMargin =
3128  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
3129 
3130  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
3131  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
3132  {
3133  return false;
3134  }
3135 
3136  // Check next suballocations for BufferImageGranularity conflicts.
3137  // If conflict exists, allocation cannot be made here.
3138  if(bufferImageGranularity > 1)
3139  {
3140  VmaSuballocationList::const_iterator nextSuballocItem = freeSuballocItem;
3141  ++nextSuballocItem;
3142  while(nextSuballocItem != m_Suballocations.cend())
3143  {
3144  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
3145  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
3146  {
3147  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
3148  {
3149  return false;
3150  }
3151  }
3152  else
3153  {
3154  // Already on next page.
3155  break;
3156  }
3157  ++nextSuballocItem;
3158  }
3159  }
3160 
3161  // All tests passed: Success. pOffset is already filled.
3162  return true;
3163 }
3164 
3165 bool VmaBlock::IsEmpty() const
3166 {
3167  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
3168 }
3169 
3170 void VmaBlock::Alloc(
3171  const VmaAllocationRequest& request,
3172  VmaSuballocationType type,
3173  VkDeviceSize allocSize)
3174 {
3175  VMA_ASSERT(request.freeSuballocationItem != m_Suballocations.end());
3176  VmaSuballocation& suballoc = *request.freeSuballocationItem;
3177  // Given suballocation is a free block.
3178  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
3179  // Given offset is inside this suballocation.
3180  VMA_ASSERT(request.offset >= suballoc.offset);
3181  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
3182  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
3183  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
3184 
3185  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
3186  // it to become used.
3187  UnregisterFreeSuballocation(request.freeSuballocationItem);
3188 
3189  suballoc.offset = request.offset;
3190  suballoc.size = allocSize;
3191  suballoc.type = type;
3192 
3193  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
3194  if(paddingEnd)
3195  {
3196  VmaSuballocation paddingSuballoc = {};
3197  paddingSuballoc.offset = request.offset + allocSize;
3198  paddingSuballoc.size = paddingEnd;
3199  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
3200  VmaSuballocationList::iterator next = request.freeSuballocationItem;
3201  ++next;
3202  const VmaSuballocationList::iterator paddingEndItem =
3203  m_Suballocations.insert(next, paddingSuballoc);
3204  RegisterFreeSuballocation(paddingEndItem);
3205  }
3206 
3207  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
3208  if(paddingBegin)
3209  {
3210  VmaSuballocation paddingSuballoc = {};
3211  paddingSuballoc.offset = request.offset - paddingBegin;
3212  paddingSuballoc.size = paddingBegin;
3213  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
3214  const VmaSuballocationList::iterator paddingBeginItem =
3215  m_Suballocations.insert(request.freeSuballocationItem, paddingSuballoc);
3216  RegisterFreeSuballocation(paddingBeginItem);
3217  }
3218 
3219  // Update totals.
3220  m_FreeCount = m_FreeCount - 1;
3221  if(paddingBegin > 0)
3222  {
3223  ++m_FreeCount;
3224  }
3225  if(paddingEnd > 0)
3226  {
3227  ++m_FreeCount;
3228  }
3229  m_SumFreeSize -= allocSize;
3230 }
3231 
3232 void VmaBlock::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
3233 {
3234  // Change this suballocation to be marked as free.
3235  VmaSuballocation& suballoc = *suballocItem;
3236  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
3237 
3238  // Update totals.
3239  ++m_FreeCount;
3240  m_SumFreeSize += suballoc.size;
3241 
3242  // Merge with previous and/or next suballocation if it's also free.
3243  bool mergeWithNext = false;
3244  bool mergeWithPrev = false;
3245 
3246  VmaSuballocationList::iterator nextItem = suballocItem;
3247  ++nextItem;
3248  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
3249  {
3250  mergeWithNext = true;
3251  }
3252 
3253  VmaSuballocationList::iterator prevItem = suballocItem;
3254  if(suballocItem != m_Suballocations.begin())
3255  {
3256  --prevItem;
3257  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
3258  {
3259  mergeWithPrev = true;
3260  }
3261  }
3262 
3263  if(mergeWithNext)
3264  {
3265  UnregisterFreeSuballocation(nextItem);
3266  MergeFreeWithNext(suballocItem);
3267  }
3268 
3269  if(mergeWithPrev)
3270  {
3271  UnregisterFreeSuballocation(prevItem);
3272  MergeFreeWithNext(prevItem);
3273  RegisterFreeSuballocation(prevItem);
3274  }
3275  else
3276  RegisterFreeSuballocation(suballocItem);
3277 }
3278 
3279 void VmaBlock::Free(const VmaAllocation allocation)
3280 {
3281  const VkDeviceSize allocationOffset = allocation->GetOffset();
3282  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
3283  suballocItem != m_Suballocations.end();
3284  ++suballocItem)
3285  {
3286  VmaSuballocation& suballoc = *suballocItem;
3287  if(suballoc.offset == allocationOffset)
3288  {
3289  FreeSuballocation(suballocItem);
3290  VMA_HEAVY_ASSERT(Validate());
3291  return;
3292  }
3293  }
3294  VMA_ASSERT(0 && "Not found!");
3295 }
3296 
3297 #if VMA_STATS_STRING_ENABLED
3298 
3299 void VmaBlock::PrintDetailedMap(class VmaStringBuilder& sb) const
3300 {
3301  sb.Add("{\n\t\t\t\"Bytes\": ");
3302  sb.AddNumber(m_Size);
3303  sb.Add(",\n\t\t\t\"FreeBytes\": ");
3304  sb.AddNumber(m_SumFreeSize);
3305  sb.Add(",\n\t\t\t\"Suballocations\": ");
3306  sb.AddNumber(m_Suballocations.size());
3307  sb.Add(",\n\t\t\t\"FreeSuballocations\": ");
3308  sb.AddNumber(m_FreeCount);
3309  sb.Add(",\n\t\t\t\"SuballocationList\": [");
3310 
3311  size_t i = 0;
3312  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
3313  suballocItem != m_Suballocations.cend();
3314  ++suballocItem, ++i)
3315  {
3316  if(i > 0)
3317  {
3318  sb.Add(",\n\t\t\t\t{ \"Type\": ");
3319  }
3320  else
3321  {
3322  sb.Add("\n\t\t\t\t{ \"Type\": ");
3323  }
3324  sb.AddString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
3325  sb.Add(", \"Size\": ");
3326  sb.AddNumber(suballocItem->size);
3327  sb.Add(", \"Offset\": ");
3328  sb.AddNumber(suballocItem->offset);
3329  sb.Add(" }");
3330  }
3331 
3332  sb.Add("\n\t\t\t]\n\t\t}");
3333 }
3334 
3335 #endif // #if VMA_STATS_STRING_ENABLED
3336 
3337 void VmaBlock::MergeFreeWithNext(VmaSuballocationList::iterator item)
3338 {
3339  VMA_ASSERT(item != m_Suballocations.end());
3340  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
3341 
3342  VmaSuballocationList::iterator nextItem = item;
3343  ++nextItem;
3344  VMA_ASSERT(nextItem != m_Suballocations.end());
3345  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
3346 
3347  item->size += nextItem->size;
3348  --m_FreeCount;
3349  m_Suballocations.erase(nextItem);
3350 }
3351 
3352 void VmaBlock::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
3353 {
3354  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
3355  VMA_ASSERT(item->size > 0);
3356 
3357  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
3358  {
3359  if(m_FreeSuballocationsBySize.empty())
3360  {
3361  m_FreeSuballocationsBySize.push_back(item);
3362  }
3363  else
3364  {
3365  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
3366  m_FreeSuballocationsBySize.data(),
3367  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
3368  item,
3369  VmaSuballocationItemSizeLess());
3370  size_t index = it - m_FreeSuballocationsBySize.data();
3371  VectorInsert(m_FreeSuballocationsBySize, index, item);
3372  }
3373  }
3374 }
3375 
3376 void VmaBlock::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
3377 {
3378  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
3379  VMA_ASSERT(item->size > 0);
3380 
3381  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
3382  {
3383  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
3384  m_FreeSuballocationsBySize.data(),
3385  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
3386  item,
3387  VmaSuballocationItemSizeLess());
3388  for(size_t index = it - m_FreeSuballocationsBySize.data();
3389  index < m_FreeSuballocationsBySize.size();
3390  ++index)
3391  {
3392  if(m_FreeSuballocationsBySize[index] == item)
3393  {
3394  VectorRemove(m_FreeSuballocationsBySize, index);
3395  return;
3396  }
3397  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
3398  }
3399  VMA_ASSERT(0 && "Not found.");
3400  }
3401 }
3402 
3403 static void InitStatInfo(VmaStatInfo& outInfo)
3404 {
3405  memset(&outInfo, 0, sizeof(outInfo));
3406  outInfo.SuballocationSizeMin = UINT64_MAX;
3407  outInfo.UnusedRangeSizeMin = UINT64_MAX;
3408 }
3409 
3410 static void CalcAllocationStatInfo(VmaStatInfo& outInfo, const VmaBlock& alloc)
3411 {
3412  outInfo.AllocationCount = 1;
3413 
3414  const uint32_t rangeCount = (uint32_t)alloc.m_Suballocations.size();
3415  outInfo.SuballocationCount = rangeCount - alloc.m_FreeCount;
3416  outInfo.UnusedRangeCount = alloc.m_FreeCount;
3417 
3418  outInfo.UnusedBytes = alloc.m_SumFreeSize;
3419  outInfo.UsedBytes = alloc.m_Size - outInfo.UnusedBytes;
3420 
3421  outInfo.SuballocationSizeMin = UINT64_MAX;
3422  outInfo.SuballocationSizeMax = 0;
3423  outInfo.UnusedRangeSizeMin = UINT64_MAX;
3424  outInfo.UnusedRangeSizeMax = 0;
3425 
3426  for(VmaSuballocationList::const_iterator suballocItem = alloc.m_Suballocations.cbegin();
3427  suballocItem != alloc.m_Suballocations.cend();
3428  ++suballocItem)
3429  {
3430  const VmaSuballocation& suballoc = *suballocItem;
3431  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
3432  {
3433  outInfo.SuballocationSizeMin = VMA_MIN(outInfo.SuballocationSizeMin, suballoc.size);
3434  outInfo.SuballocationSizeMax = VMA_MAX(outInfo.SuballocationSizeMax, suballoc.size);
3435  }
3436  else
3437  {
3438  outInfo.UnusedRangeSizeMin = VMA_MIN(outInfo.UnusedRangeSizeMin, suballoc.size);
3439  outInfo.UnusedRangeSizeMax = VMA_MAX(outInfo.UnusedRangeSizeMax, suballoc.size);
3440  }
3441  }
3442 }
3443 
3444 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
3445 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
3446 {
3447  inoutInfo.AllocationCount += srcInfo.AllocationCount;
3448  inoutInfo.SuballocationCount += srcInfo.SuballocationCount;
3449  inoutInfo.UnusedRangeCount += srcInfo.UnusedRangeCount;
3450  inoutInfo.UsedBytes += srcInfo.UsedBytes;
3451  inoutInfo.UnusedBytes += srcInfo.UnusedBytes;
3452  inoutInfo.SuballocationSizeMin = VMA_MIN(inoutInfo.SuballocationSizeMin, srcInfo.SuballocationSizeMin);
3453  inoutInfo.SuballocationSizeMax = VMA_MAX(inoutInfo.SuballocationSizeMax, srcInfo.SuballocationSizeMax);
3454  inoutInfo.UnusedRangeSizeMin = VMA_MIN(inoutInfo.UnusedRangeSizeMin, srcInfo.UnusedRangeSizeMin);
3455  inoutInfo.UnusedRangeSizeMax = VMA_MAX(inoutInfo.UnusedRangeSizeMax, srcInfo.UnusedRangeSizeMax);
3456 }
3457 
3458 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
3459 {
3460  inoutInfo.SuballocationSizeAvg = (inoutInfo.SuballocationCount > 0) ?
3461  VmaRoundDiv<VkDeviceSize>(inoutInfo.UsedBytes, inoutInfo.SuballocationCount) : 0;
3462  inoutInfo.UnusedRangeSizeAvg = (inoutInfo.UnusedRangeCount > 0) ?
3463  VmaRoundDiv<VkDeviceSize>(inoutInfo.UnusedBytes, inoutInfo.UnusedRangeCount) : 0;
3464 }
3465 
3466 VmaBlockVector::VmaBlockVector(VmaAllocator hAllocator) :
3467  m_hAllocator(hAllocator),
3468  m_Blocks(VmaStlAllocator<VmaBlock*>(hAllocator->GetAllocationCallbacks()))
3469 {
3470 }
3471 
3472 VmaBlockVector::~VmaBlockVector()
3473 {
3474  for(size_t i = m_Blocks.size(); i--; )
3475  {
3476  m_Blocks[i]->Destroy(m_hAllocator);
3477  vma_delete(m_hAllocator, m_Blocks[i]);
3478  }
3479 }
3480 
3481 void VmaBlockVector::Remove(VmaBlock* pBlock)
3482 {
3483  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
3484  {
3485  if(m_Blocks[blockIndex] == pBlock)
3486  {
3487  VectorRemove(m_Blocks, blockIndex);
3488  return;
3489  }
3490  }
3491  VMA_ASSERT(0);
3492 }
3493 
3494 void VmaBlockVector::IncrementallySortBlocks()
3495 {
3496  // Bubble sort only until first swap.
3497  for(size_t i = 1; i < m_Blocks.size(); ++i)
3498  {
3499  if(m_Blocks[i - 1]->m_SumFreeSize > m_Blocks[i]->m_SumFreeSize)
3500  {
3501  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
3502  return;
3503  }
3504  }
3505 }
3506 
3507 #if VMA_STATS_STRING_ENABLED
3508 
3509 void VmaBlockVector::PrintDetailedMap(class VmaStringBuilder& sb) const
3510 {
3511  for(size_t i = 0; i < m_Blocks.size(); ++i)
3512  {
3513  if(i > 0)
3514  {
3515  sb.Add(",\n\t\t");
3516  }
3517  else
3518  {
3519  sb.Add("\n\t\t");
3520  }
3521  m_Blocks[i]->PrintDetailedMap(sb);
3522  }
3523 }
3524 
3525 #endif // #if VMA_STATS_STRING_ENABLED
3526 
3527 void VmaBlockVector::UnmapPersistentlyMappedMemory()
3528 {
3529  for(size_t i = m_Blocks.size(); i--; )
3530  {
3531  VmaBlock* pBlock = m_Blocks[i];
3532  if(pBlock->m_pMappedData != VMA_NULL)
3533  {
3534  VMA_ASSERT(pBlock->m_PersistentMap != false);
3535  vkUnmapMemory(m_hAllocator->m_hDevice, pBlock->m_hMemory);
3536  pBlock->m_pMappedData = VMA_NULL;
3537  }
3538  }
3539 }
3540 
3541 VkResult VmaBlockVector::MapPersistentlyMappedMemory()
3542 {
3543  VkResult finalResult = VK_SUCCESS;
3544  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
3545  {
3546  VmaBlock* pBlock = m_Blocks[i];
3547  if(pBlock->m_PersistentMap)
3548  {
3549  VMA_ASSERT(pBlock->m_pMappedData == nullptr);
3550  VkResult localResult = vkMapMemory(m_hAllocator->m_hDevice, pBlock->m_hMemory, 0, VK_WHOLE_SIZE, 0, &pBlock->m_pMappedData);
3551  if(localResult != VK_SUCCESS)
3552  {
3553  finalResult = localResult;
3554  }
3555  }
3556  }
3557  return finalResult;
3558 }
3559 
3560 void VmaBlockVector::AddStats(VmaStats* pStats, uint32_t memTypeIndex, uint32_t memHeapIndex) const
3561 {
3562  for(uint32_t allocIndex = 0; allocIndex < m_Blocks.size(); ++allocIndex)
3563  {
3564  const VmaBlock* const pBlock = m_Blocks[allocIndex];
3565  VMA_ASSERT(pBlock);
3566  VMA_HEAVY_ASSERT(pBlock->Validate());
3567  VmaStatInfo allocationStatInfo;
3568  CalcAllocationStatInfo(allocationStatInfo, *pBlock);
3569  VmaAddStatInfo(pStats->total, allocationStatInfo);
3570  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
3571  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
3572  }
3573 }
3574 
3576 // VmaDefragmentator
3577 
3578 class VmaDefragmentator
3579 {
3580  VkDevice m_hDevice;
3581  const VkAllocationCallbacks* m_pAllocationCallbacks;
3582  VkDeviceSize m_BufferImageGranularity;
3583  uint32_t m_MemTypeIndex;
3584  VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
3585  VkDeviceSize m_BytesMoved;
3586  uint32_t m_AllocationsMoved;
3587 
3588  struct AllocationInfo
3589  {
3590  VmaAllocation m_hAllocation;
3591  VkBool32* m_pChanged;
3592 
3593  AllocationInfo() :
3594  m_hAllocation(VK_NULL_HANDLE),
3595  m_pChanged(VMA_NULL)
3596  {
3597  }
3598  };
3599 
3600  struct AllocationInfoSizeGreater
3601  {
3602  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
3603  {
3604  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
3605  }
3606  };
3607 
3608  // Used between AddAllocation and Defragment.
3609  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3610 
3611  struct BlockInfo
3612  {
3613  VmaBlock* m_pBlock;
3614  bool m_HasNonMovableAllocations;
3615  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3616 
3617  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
3618  m_pBlock(VMA_NULL),
3619  m_HasNonMovableAllocations(true),
3620  m_Allocations(pAllocationCallbacks),
3621  m_pMappedDataForDefragmentation(VMA_NULL)
3622  {
3623  }
3624 
3625  void CalcHasNonMovableAllocations()
3626  {
3627  const size_t blockAllocCount =
3628  m_pBlock->m_Suballocations.size() - m_pBlock->m_FreeCount;
3629  const size_t defragmentAllocCount = m_Allocations.size();
3630  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
3631  }
3632 
3633  void SortAllocationsBySizeDescecnding()
3634  {
3635  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
3636  }
3637 
3638  VkResult EnsureMapping(VkDevice hDevice, void** ppMappedData)
3639  {
3640  // It has already been mapped for defragmentation.
3641  if(m_pMappedDataForDefragmentation)
3642  {
3643  *ppMappedData = m_pMappedDataForDefragmentation;
3644  return VK_SUCCESS;
3645  }
3646 
3647  // It is persistently mapped.
3648  if(m_pBlock->m_PersistentMap)
3649  {
3650  VMA_ASSERT(m_pBlock->m_pMappedData != VMA_NULL);
3651  *ppMappedData = m_pBlock->m_pMappedData;
3652  return VK_SUCCESS;
3653  }
3654 
3655  // Map on first usage.
3656  VkResult res = vkMapMemory(hDevice, m_pBlock->m_hMemory, 0, VK_WHOLE_SIZE, 0, &m_pMappedDataForDefragmentation);
3657  *ppMappedData = m_pMappedDataForDefragmentation;
3658  return res;
3659  }
3660 
3661  void Unmap(VkDevice hDevice)
3662  {
3663  if(m_pMappedDataForDefragmentation != VMA_NULL)
3664  {
3665  vkUnmapMemory(hDevice, m_pBlock->m_hMemory);
3666  }
3667  }
3668 
3669  private:
3670  // Not null if mapped for defragmentation only, not persistently mapped.
3671  void* m_pMappedDataForDefragmentation;
3672  };
3673 
3674  struct BlockPointerLess
3675  {
3676  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaBlock* pRhsBlock) const
3677  {
3678  return pLhsBlockInfo->m_pBlock < pRhsBlock;
3679  }
3680  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
3681  {
3682  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
3683  }
3684  };
3685 
3686  // 1. Blocks with some non-movable allocations go first.
3687  // 2. Blocks with smaller sumFreeSize go first.
3688  struct BlockInfoCompareMoveDestination
3689  {
3690  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
3691  {
3692  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
3693  {
3694  return true;
3695  }
3696  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
3697  {
3698  return false;
3699  }
3700  if(pLhsBlockInfo->m_pBlock->m_SumFreeSize < pRhsBlockInfo->m_pBlock->m_SumFreeSize)
3701  {
3702  return true;
3703  }
3704  return false;
3705  }
3706  };
3707 
3708  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
3709  BlockInfoVector m_Blocks;
3710 
3711  VkResult DefragmentRound(
3712  VkDeviceSize maxBytesToMove,
3713  uint32_t maxAllocationsToMove);
3714 
3715  static bool MoveMakesSense(
3716  size_t dstBlockIndex, VkDeviceSize dstOffset,
3717  size_t srcBlockIndex, VkDeviceSize srcOffset);
3718 
3719 public:
3720  VmaDefragmentator(
3721  VkDevice hDevice,
3722  const VkAllocationCallbacks* pAllocationCallbacks,
3723  VkDeviceSize bufferImageGranularity,
3724  uint32_t memTypeIndex,
3725  VMA_BLOCK_VECTOR_TYPE blockVectorType);
3726 
3727  ~VmaDefragmentator();
3728 
3729  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
3730  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
3731 
3732  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
3733 
3734  VkResult Defragment(
3735  VmaBlockVector* pBlockVector,
3736  VkDeviceSize maxBytesToMove,
3737  uint32_t maxAllocationsToMove);
3738 };
3739 
3740 VmaDefragmentator::VmaDefragmentator(
3741  VkDevice hDevice,
3742  const VkAllocationCallbacks* pAllocationCallbacks,
3743  VkDeviceSize bufferImageGranularity,
3744  uint32_t memTypeIndex,
3745  VMA_BLOCK_VECTOR_TYPE blockVectorType) :
3746  m_hDevice(hDevice),
3747  m_pAllocationCallbacks(pAllocationCallbacks),
3748  m_BufferImageGranularity(bufferImageGranularity),
3749  m_MemTypeIndex(memTypeIndex),
3750  m_BlockVectorType(blockVectorType),
3751  m_BytesMoved(0),
3752  m_AllocationsMoved(0),
3753  m_Allocations(VmaStlAllocator<AllocationInfo>(pAllocationCallbacks)),
3754  m_Blocks(VmaStlAllocator<BlockInfo*>(pAllocationCallbacks))
3755 {
3756 }
3757 
3758 VmaDefragmentator::~VmaDefragmentator()
3759 {
3760  for(size_t i = m_Blocks.size(); i--; )
3761  {
3762  vma_delete(m_pAllocationCallbacks, m_Blocks[i]);
3763  }
3764 }
3765 
3766 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
3767 {
3768  AllocationInfo allocInfo;
3769  allocInfo.m_hAllocation = hAlloc;
3770  allocInfo.m_pChanged = pChanged;
3771  m_Allocations.push_back(allocInfo);
3772 }
3773 
3774 VkResult VmaDefragmentator::DefragmentRound(
3775  VkDeviceSize maxBytesToMove,
3776  uint32_t maxAllocationsToMove)
3777 {
3778  if(m_Blocks.empty())
3779  {
3780  return VK_SUCCESS;
3781  }
3782 
3783  size_t srcBlockIndex = m_Blocks.size() - 1;
3784  size_t srcAllocIndex = SIZE_MAX;
3785  for(;;)
3786  {
3787  // 1. Find next allocation to move.
3788  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
3789  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
3790  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
3791  {
3792  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
3793  {
3794  // Finished: no more allocations to process.
3795  if(srcBlockIndex == 0)
3796  {
3797  return VK_SUCCESS;
3798  }
3799  else
3800  {
3801  --srcBlockIndex;
3802  srcAllocIndex = SIZE_MAX;
3803  }
3804  }
3805  else
3806  {
3807  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
3808  }
3809  }
3810 
3811  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
3812  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
3813 
3814  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
3815  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
3816  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
3817  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
3818 
3819  // 2. Try to find new place for this allocation in preceding or current block.
3820  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
3821  {
3822  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
3823  VmaAllocationRequest dstAllocRequest;
3824  if(pDstBlockInfo->m_pBlock->CreateAllocationRequest(
3825  m_BufferImageGranularity,
3826  size,
3827  alignment,
3828  suballocType,
3829  &dstAllocRequest) &&
3830  MoveMakesSense(
3831  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
3832  {
3833  // Reached limit on number of allocations or bytes to move.
3834  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
3835  (m_BytesMoved + size > maxBytesToMove))
3836  {
3837  return VK_INCOMPLETE;
3838  }
3839 
3840  void* pDstMappedData = VMA_NULL;
3841  VkResult res = pDstBlockInfo->EnsureMapping(m_hDevice, &pDstMappedData);
3842  if(res != VK_SUCCESS)
3843  {
3844  return res;
3845  }
3846 
3847  void* pSrcMappedData = VMA_NULL;
3848  res = pSrcBlockInfo->EnsureMapping(m_hDevice, &pSrcMappedData);
3849  if(res != VK_SUCCESS)
3850  {
3851  return res;
3852  }
3853 
3854  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
3855  memcpy(
3856  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
3857  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
3858  size);
3859 
3860  pDstBlockInfo->m_pBlock->Alloc(dstAllocRequest, suballocType, size);
3861  pSrcBlockInfo->m_pBlock->Free(allocInfo.m_hAllocation);
3862 
3863  allocInfo.m_hAllocation->ChangeBlockAllocation(pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
3864 
3865  if(allocInfo.m_pChanged != VMA_NULL)
3866  {
3867  *allocInfo.m_pChanged = VK_TRUE;
3868  }
3869 
3870  ++m_AllocationsMoved;
3871  m_BytesMoved += size;
3872 
3873  VectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
3874 
3875  break;
3876  }
3877  }
3878 
3879  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
3880 
3881  if(srcAllocIndex > 0)
3882  {
3883  --srcAllocIndex;
3884  }
3885  else
3886  {
3887  if(srcBlockIndex > 0)
3888  {
3889  --srcBlockIndex;
3890  srcAllocIndex = SIZE_MAX;
3891  }
3892  else
3893  {
3894  return VK_SUCCESS;
3895  }
3896  }
3897  }
3898 }
3899 
3900 VkResult VmaDefragmentator::Defragment(
3901  VmaBlockVector* pBlockVector,
3902  VkDeviceSize maxBytesToMove,
3903  uint32_t maxAllocationsToMove)
3904 {
3905  if(m_Allocations.empty())
3906  {
3907  return VK_SUCCESS;
3908  }
3909 
3910  // Create block info for each block.
3911  const size_t blockCount = pBlockVector->m_Blocks.size();
3912  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
3913  {
3914  BlockInfo* pBlockInfo = vma_new(m_pAllocationCallbacks, BlockInfo)(m_pAllocationCallbacks);
3915  pBlockInfo->m_pBlock = pBlockVector->m_Blocks[blockIndex];
3916  m_Blocks.push_back(pBlockInfo);
3917  }
3918 
3919  // Sort them by m_pBlock pointer value.
3920  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
3921 
3922  // Move allocation infos from m_Allocations to appropriate m_Blocks[i].m_Allocations.
3923  for(size_t allocIndex = 0, allocCount = m_Allocations.size(); allocIndex < allocCount; ++allocIndex)
3924  {
3925  AllocationInfo& allocInfo = m_Allocations[allocIndex];
3926  VmaBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
3927  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
3928  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
3929  {
3930  (*it)->m_Allocations.push_back(allocInfo);
3931  }
3932  else
3933  {
3934  VMA_ASSERT(0);
3935  }
3936  }
3937  m_Allocations.clear();
3938 
3939  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
3940  {
3941  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
3942  pBlockInfo->CalcHasNonMovableAllocations();
3943  pBlockInfo->SortAllocationsBySizeDescecnding();
3944  }
3945 
3946  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
3947  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
3948 
3949  // Execute defragmentation round (the main part).
3950  VkResult result = VK_SUCCESS;
3951  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
3952  {
3953  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
3954  }
3955 
3956  // Unmap blocks that were mapped for defragmentation.
3957  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
3958  {
3959  m_Blocks[blockIndex]->Unmap(m_hDevice);
3960  }
3961 
3962  return result;
3963 }
3964 
3965 bool VmaDefragmentator::MoveMakesSense(
3966  size_t dstBlockIndex, VkDeviceSize dstOffset,
3967  size_t srcBlockIndex, VkDeviceSize srcOffset)
3968 {
3969  if(dstBlockIndex < srcBlockIndex)
3970  {
3971  return true;
3972  }
3973  if(dstBlockIndex > srcBlockIndex)
3974  {
3975  return false;
3976  }
3977  if(dstOffset < srcOffset)
3978  {
3979  return true;
3980  }
3981  return false;
3982 }
3983 
3985 // VmaAllocator_T
3986 
3987 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
3988  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
3989  m_PhysicalDevice(pCreateInfo->physicalDevice),
3990  m_hDevice(pCreateInfo->device),
3991  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
3992  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
3993  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
3994  m_PreferredLargeHeapBlockSize(0),
3995  m_PreferredSmallHeapBlockSize(0),
3996  m_UnmapPersistentlyMappedMemoryCounter(0)
3997 {
3998  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
3999 
4000  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
4001  memset(&m_MemProps, 0, sizeof(m_MemProps));
4002  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
4003 
4004  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
4005  memset(&m_HasEmptyBlock, 0, sizeof(m_HasEmptyBlock));
4006  memset(&m_pOwnAllocations, 0, sizeof(m_pOwnAllocations));
4007 
4008  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
4009  {
4010  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
4011  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
4012  }
4013 
4014  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
4015  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
4016  m_PreferredSmallHeapBlockSize = (pCreateInfo->preferredSmallHeapBlockSize != 0) ?
4017  pCreateInfo->preferredSmallHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE);
4018 
4019  vkGetPhysicalDeviceProperties(m_PhysicalDevice, &m_PhysicalDeviceProperties);
4020  vkGetPhysicalDeviceMemoryProperties(m_PhysicalDevice, &m_MemProps);
4021 
4022  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
4023  {
4024  for(size_t j = 0; j < VMA_BLOCK_VECTOR_TYPE_COUNT; ++j)
4025  {
4026  m_pBlockVectors[i][j] = vma_new(this, VmaBlockVector)(this);
4027  m_pOwnAllocations[i][j] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
4028  }
4029  }
4030 }
4031 
4032 VmaAllocator_T::~VmaAllocator_T()
4033 {
4034  for(size_t i = GetMemoryTypeCount(); i--; )
4035  {
4036  for(size_t j = VMA_BLOCK_VECTOR_TYPE_COUNT; j--; )
4037  {
4038  vma_delete(this, m_pOwnAllocations[i][j]);
4039  vma_delete(this, m_pBlockVectors[i][j]);
4040  }
4041  }
4042 }
4043 
4044 VkDeviceSize VmaAllocator_T::GetPreferredBlockSize(uint32_t memTypeIndex) const
4045 {
4046  VkDeviceSize heapSize = m_MemProps.memoryHeaps[m_MemProps.memoryTypes[memTypeIndex].heapIndex].size;
4047  return (heapSize <= VMA_SMALL_HEAP_MAX_SIZE) ?
4048  m_PreferredSmallHeapBlockSize : m_PreferredLargeHeapBlockSize;
4049 }
4050 
4051 VkResult VmaAllocator_T::AllocateMemoryOfType(
4052  const VkMemoryRequirements& vkMemReq,
4053  const VmaMemoryRequirements& vmaMemReq,
4054  uint32_t memTypeIndex,
4055  VmaSuballocationType suballocType,
4056  VmaAllocation* pAllocation)
4057 {
4058  VMA_ASSERT(pAllocation != VMA_NULL);
4059  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
4060 
4061  const VkDeviceSize preferredBlockSize = GetPreferredBlockSize(memTypeIndex);
4062  // Heuristics: Allocate own memory if requested size if greater than half of preferred block size.
4063  const bool ownMemory =
4064  (vmaMemReq.flags & VMA_MEMORY_REQUIREMENT_OWN_MEMORY_BIT) != 0 ||
4065  VMA_DEBUG_ALWAYS_OWN_MEMORY ||
4066  ((vmaMemReq.flags & VMA_MEMORY_REQUIREMENT_NEVER_ALLOCATE_BIT) == 0 &&
4067  vkMemReq.size > preferredBlockSize / 2);
4068 
4069  if(ownMemory)
4070  {
4071  if((vmaMemReq.flags & VMA_MEMORY_REQUIREMENT_NEVER_ALLOCATE_BIT) != 0)
4072  {
4073  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
4074  }
4075  else
4076  {
4077  return AllocateOwnMemory(
4078  vkMemReq.size,
4079  suballocType,
4080  memTypeIndex,
4082  vmaMemReq.pUserData,
4083  pAllocation);
4084  }
4085  }
4086  else
4087  {
4088  uint32_t blockVectorType = VmaMemoryRequirementFlagsToBlockVectorType(vmaMemReq.flags);
4089 
4090  VmaMutexLock lock(m_BlocksMutex[memTypeIndex], m_UseMutex);
4091  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
4092  VMA_ASSERT(blockVector);
4093 
4094  // 1. Search existing allocations.
4095  // Forward order - prefer blocks with smallest amount of free space.
4096  for(size_t allocIndex = 0; allocIndex < blockVector->m_Blocks.size(); ++allocIndex )
4097  {
4098  VmaBlock* const pBlock = blockVector->m_Blocks[allocIndex];
4099  VMA_ASSERT(pBlock);
4100  VmaAllocationRequest allocRequest = {};
4101  // Check if can allocate from pBlock.
4102  if(pBlock->CreateAllocationRequest(
4103  GetBufferImageGranularity(),
4104  vkMemReq.size,
4105  vkMemReq.alignment,
4106  suballocType,
4107  &allocRequest))
4108  {
4109  // We no longer have an empty Allocation.
4110  if(pBlock->IsEmpty())
4111  {
4112  m_HasEmptyBlock[memTypeIndex] = false;
4113  }
4114  // Allocate from this pBlock.
4115  pBlock->Alloc(allocRequest, suballocType, vkMemReq.size);
4116  *pAllocation = vma_new(this, VmaAllocation_T)();
4117  (*pAllocation)->InitBlockAllocation(
4118  pBlock,
4119  allocRequest.offset,
4120  vkMemReq.alignment,
4121  vkMemReq.size,
4122  suballocType,
4123  vmaMemReq.pUserData);
4124  VMA_HEAVY_ASSERT(pBlock->Validate());
4125  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)allocIndex);
4126  return VK_SUCCESS;
4127  }
4128  }
4129 
4130  // 2. Create new Allocation.
4131  if((vmaMemReq.flags & VMA_MEMORY_REQUIREMENT_NEVER_ALLOCATE_BIT) != 0)
4132  {
4133  VMA_DEBUG_LOG(" FAILED due to VMA_MEMORY_REQUIREMENT_NEVER_ALLOCATE_BIT");
4134  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
4135  }
4136  else
4137  {
4138  // Start with full preferredBlockSize.
4139  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
4140  allocInfo.memoryTypeIndex = memTypeIndex;
4141  allocInfo.allocationSize = preferredBlockSize;
4142  VkDeviceMemory mem = VK_NULL_HANDLE;
4143  VkResult res = vkAllocateMemory(m_hDevice, &allocInfo, GetAllocationCallbacks(), &mem);
4144  if(res < 0)
4145  {
4146  // 3. Try half the size.
4147  allocInfo.allocationSize /= 2;
4148  if(allocInfo.allocationSize >= vkMemReq.size)
4149  {
4150  res = vkAllocateMemory(m_hDevice, &allocInfo, GetAllocationCallbacks(), &mem);
4151  if(res < 0)
4152  {
4153  // 4. Try quarter the size.
4154  allocInfo.allocationSize /= 2;
4155  if(allocInfo.allocationSize >= vkMemReq.size)
4156  {
4157  res = vkAllocateMemory(m_hDevice, &allocInfo, GetAllocationCallbacks(), &mem);
4158  }
4159  }
4160  }
4161  }
4162  if(res < 0)
4163  {
4164  // 5. Try OwnAlloc.
4165  res = AllocateOwnMemory(
4166  vkMemReq.size,
4167  suballocType,
4168  memTypeIndex,
4170  vmaMemReq.pUserData,
4171  pAllocation);
4172  if(res == VK_SUCCESS)
4173  {
4174  // Succeeded: AllocateOwnMemory function already filld pMemory, nothing more to do here.
4175  VMA_DEBUG_LOG(" Allocated as OwnMemory");
4176  return VK_SUCCESS;
4177  }
4178  else
4179  {
4180  // Everything failed: Return error code.
4181  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
4182  return res;
4183  }
4184  }
4185 
4186  // New VkDeviceMemory successfully created.
4187 
4188  // Map memory if needed.
4189  void* pMappedData = VMA_NULL;
4190  const bool persistentMap = (vmaMemReq.flags & VMA_MEMORY_REQUIREMENT_PERSISTENT_MAP_BIT) != 0;
4191  if(persistentMap && m_UnmapPersistentlyMappedMemoryCounter == 0)
4192  {
4193  res = vkMapMemory(m_hDevice, mem, 0, VK_WHOLE_SIZE, 0, &pMappedData);
4194  if(res < 0)
4195  {
4196  VMA_DEBUG_LOG(" vkMapMemory FAILED");
4197  vkFreeMemory(m_hDevice, mem, GetAllocationCallbacks());
4198  return res;
4199  }
4200  }
4201 
4202  // Callback.
4203  if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
4204  {
4205  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, memTypeIndex, mem, allocInfo.allocationSize);
4206  }
4207 
4208  // Create new Allocation for it.
4209  VmaBlock* const pBlock = vma_new(this, VmaBlock)(this);
4210  pBlock->Init(
4211  memTypeIndex,
4212  (VMA_BLOCK_VECTOR_TYPE)blockVectorType,
4213  mem,
4214  allocInfo.allocationSize,
4215  persistentMap,
4216  pMappedData);
4217 
4218  blockVector->m_Blocks.push_back(pBlock);
4219 
4220  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
4221  VmaAllocationRequest allocRequest = {};
4222  allocRequest.freeSuballocationItem = pBlock->m_Suballocations.begin();
4223  allocRequest.offset = 0;
4224  pBlock->Alloc(allocRequest, suballocType, vkMemReq.size);
4225  *pAllocation = vma_new(this, VmaAllocation_T)();
4226  (*pAllocation)->InitBlockAllocation(
4227  pBlock,
4228  allocRequest.offset,
4229  vkMemReq.alignment,
4230  vkMemReq.size,
4231  suballocType,
4232  vmaMemReq.pUserData);
4233  VMA_HEAVY_ASSERT(pBlock->Validate());
4234  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
4235  return VK_SUCCESS;
4236  }
4237  }
4238 }
4239 
4240 VkResult VmaAllocator_T::AllocateOwnMemory(
4241  VkDeviceSize size,
4242  VmaSuballocationType suballocType,
4243  uint32_t memTypeIndex,
4244  bool map,
4245  void* pUserData,
4246  VmaAllocation* pAllocation)
4247 {
4248  VMA_ASSERT(pAllocation);
4249 
4250  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
4251  allocInfo.memoryTypeIndex = memTypeIndex;
4252  allocInfo.allocationSize = size;
4253 
4254  // Allocate VkDeviceMemory.
4255  VkDeviceMemory hMemory = VK_NULL_HANDLE;
4256  VkResult res = vkAllocateMemory(m_hDevice, &allocInfo, GetAllocationCallbacks(), &hMemory);
4257  if(res < 0)
4258  {
4259  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
4260  return res;
4261  }
4262 
4263  void* pMappedData = nullptr;
4264  if(map)
4265  {
4266  if(m_UnmapPersistentlyMappedMemoryCounter == 0)
4267  {
4268  res = vkMapMemory(m_hDevice, hMemory, 0, VK_WHOLE_SIZE, 0, &pMappedData);
4269  if(res < 0)
4270  {
4271  VMA_DEBUG_LOG(" vkMapMemory FAILED");
4272  vkFreeMemory(m_hDevice, hMemory, GetAllocationCallbacks());
4273  return res;
4274  }
4275  }
4276  }
4277 
4278  // Callback.
4279  if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
4280  {
4281  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, memTypeIndex, hMemory, size);
4282  }
4283 
4284  *pAllocation = vma_new(this, VmaAllocation_T)();
4285  (*pAllocation)->InitOwnAllocation(memTypeIndex, hMemory, suballocType, map, pMappedData, size, pUserData);
4286 
4287  // Register it in m_pOwnAllocations.
4288  {
4289  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
4290  AllocationVectorType* pOwnAllocations = m_pOwnAllocations[memTypeIndex][map ? VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED];
4291  VMA_ASSERT(pOwnAllocations);
4292  VmaAllocation* const pOwnAllocationsBeg = pOwnAllocations->data();
4293  VmaAllocation* const pOwnAllocationsEnd = pOwnAllocationsBeg + pOwnAllocations->size();
4294  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4295  pOwnAllocationsBeg,
4296  pOwnAllocationsEnd,
4297  *pAllocation,
4298  VmaPointerLess()) - pOwnAllocationsBeg;
4299  VectorInsert(*pOwnAllocations, indexToInsert, *pAllocation);
4300  }
4301 
4302  VMA_DEBUG_LOG(" Allocated OwnMemory MemoryTypeIndex=#%u", memTypeIndex);
4303 
4304  return VK_SUCCESS;
4305 }
4306 
4307 VkResult VmaAllocator_T::AllocateMemory(
4308  const VkMemoryRequirements& vkMemReq,
4309  const VmaMemoryRequirements& vmaMemReq,
4310  VmaSuballocationType suballocType,
4311  VmaAllocation* pAllocation)
4312 {
4313  if((vmaMemReq.flags & VMA_MEMORY_REQUIREMENT_OWN_MEMORY_BIT) != 0 &&
4315  {
4316  VMA_ASSERT(0 && "Specifying VMA_MEMORY_REQUIREMENT_OWN_MEMORY_BIT together with VMA_MEMORY_REQUIREMENT_NEVER_ALLOCATE_BIT makes no sense.");
4317  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
4318  }
4319 
4320  // Bit mask of memory Vulkan types acceptable for this allocation.
4321  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
4322  uint32_t memTypeIndex = UINT32_MAX;
4323  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &vmaMemReq, &memTypeIndex);
4324  if(res == VK_SUCCESS)
4325  {
4326  res = AllocateMemoryOfType(vkMemReq, vmaMemReq, memTypeIndex, suballocType, pAllocation);
4327  // Succeeded on first try.
4328  if(res == VK_SUCCESS)
4329  {
4330  return res;
4331  }
4332  // Allocation from this memory type failed. Try other compatible memory types.
4333  else
4334  {
4335  for(;;)
4336  {
4337  // Remove old memTypeIndex from list of possibilities.
4338  memoryTypeBits &= ~(1u << memTypeIndex);
4339  // Find alternative memTypeIndex.
4340  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &vmaMemReq, &memTypeIndex);
4341  if(res == VK_SUCCESS)
4342  {
4343  res = AllocateMemoryOfType(vkMemReq, vmaMemReq, memTypeIndex, suballocType, pAllocation);
4344  // Allocation from this alternative memory type succeeded.
4345  if(res == VK_SUCCESS)
4346  {
4347  return res;
4348  }
4349  // else: Allocation from this memory type failed. Try next one - next loop iteration.
4350  }
4351  // No other matching memory type index could be found.
4352  else
4353  {
4354  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
4355  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
4356  }
4357  }
4358  }
4359  }
4360  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
4361  else
4362  return res;
4363 }
4364 
4365 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
4366 {
4367  VMA_ASSERT(allocation);
4368 
4369  if(allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK)
4370  {
4371  VmaBlock* pBlockToDelete = VMA_NULL;
4372 
4373  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
4374  const VMA_BLOCK_VECTOR_TYPE blockVectorType = allocation->GetBlockVectorType();
4375  {
4376  VmaMutexLock lock(m_BlocksMutex[memTypeIndex], m_UseMutex);
4377 
4378  VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
4379  VmaBlock* pBlock = allocation->GetBlock();
4380 
4381  pBlock->Free(allocation);
4382  VMA_HEAVY_ASSERT(pBlock->Validate());
4383 
4384  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
4385 
4386  // pBlock became empty after this deallocation.
4387  if(pBlock->IsEmpty())
4388  {
4389  // Already has empty Allocation. We don't want to have two, so delete this one.
4390  if(m_HasEmptyBlock[memTypeIndex])
4391  {
4392  pBlockToDelete = pBlock;
4393  pBlockVector->Remove(pBlock);
4394  }
4395  // We now have first empty Allocation.
4396  else
4397  {
4398  m_HasEmptyBlock[memTypeIndex] = true;
4399  }
4400  }
4401  // Must be called after srcBlockIndex is used, because later it may become invalid!
4402  pBlockVector->IncrementallySortBlocks();
4403  }
4404  // Destruction of a free Allocation. Deferred until this point, outside of mutex
4405  // lock, for performance reason.
4406  if(pBlockToDelete != VMA_NULL)
4407  {
4408  VMA_DEBUG_LOG(" Deleted empty allocation");
4409  pBlockToDelete->Destroy(this);
4410  vma_delete(this, pBlockToDelete);
4411  }
4412 
4413  vma_delete(this, allocation);
4414  }
4415  else // VmaAllocation_T::ALLOCATION_TYPE_OWN
4416  {
4417  FreeOwnMemory(allocation);
4418  }
4419 }
4420 
4421 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
4422 {
4423  InitStatInfo(pStats->total);
4424  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
4425  InitStatInfo(pStats->memoryType[i]);
4426  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
4427  InitStatInfo(pStats->memoryHeap[i]);
4428 
4429  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
4430  {
4431  VmaMutexLock allocationsLock(m_BlocksMutex[memTypeIndex], m_UseMutex);
4432  const uint32_t heapIndex = m_MemProps.memoryTypes[memTypeIndex].heapIndex;
4433  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
4434  {
4435  const VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
4436  VMA_ASSERT(pBlockVector);
4437  pBlockVector->AddStats(pStats, memTypeIndex, heapIndex);
4438  }
4439  }
4440 
4441  VmaPostprocessCalcStatInfo(pStats->total);
4442  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
4443  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
4444  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
4445  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
4446 }
4447 
4448 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
4449 
4450 void VmaAllocator_T::UnmapPersistentlyMappedMemory()
4451 {
4452  if(m_UnmapPersistentlyMappedMemoryCounter++ == 0)
4453  {
4454  if(m_PhysicalDeviceProperties.vendorID == VMA_VENDOR_ID_AMD)
4455  {
4456  for(size_t memTypeIndex = m_MemProps.memoryTypeCount; memTypeIndex--; )
4457  {
4458  const VkMemoryPropertyFlags memFlags = m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
4459  if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0 &&
4460  (memFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
4461  {
4462  // Process OwnAllocations.
4463  {
4464  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
4465  AllocationVectorType* pOwnAllocationsVector = m_pOwnAllocations[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
4466  for(size_t ownAllocIndex = pOwnAllocationsVector->size(); ownAllocIndex--; )
4467  {
4468  VmaAllocation hAlloc = (*pOwnAllocationsVector)[ownAllocIndex];
4469  hAlloc->OwnAllocUnmapPersistentlyMappedMemory(m_hDevice);
4470  }
4471  }
4472 
4473  // Process normal Allocations.
4474  {
4475  VmaMutexLock lock(m_BlocksMutex[memTypeIndex], m_UseMutex);
4476  VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
4477  pBlockVector->UnmapPersistentlyMappedMemory();
4478  }
4479  }
4480  }
4481  }
4482  }
4483 }
4484 
4485 VkResult VmaAllocator_T::MapPersistentlyMappedMemory()
4486 {
4487  VMA_ASSERT(m_UnmapPersistentlyMappedMemoryCounter > 0);
4488  if(--m_UnmapPersistentlyMappedMemoryCounter == 0)
4489  {
4490  VkResult finalResult = VK_SUCCESS;
4491  if(m_PhysicalDeviceProperties.vendorID == VMA_VENDOR_ID_AMD)
4492  {
4493  for(size_t memTypeIndex = 0; memTypeIndex < m_MemProps.memoryTypeCount; ++memTypeIndex)
4494  {
4495  const VkMemoryPropertyFlags memFlags = m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
4496  if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0 &&
4497  (memFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
4498  {
4499  // Process OwnAllocations.
4500  {
4501  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
4502  AllocationVectorType* pAllocationsVector = m_pOwnAllocations[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
4503  for(size_t ownAllocIndex = 0, ownAllocCount = pAllocationsVector->size(); ownAllocIndex < ownAllocCount; ++ownAllocIndex)
4504  {
4505  VmaAllocation hAlloc = (*pAllocationsVector)[ownAllocIndex];
4506  hAlloc->OwnAllocMapPersistentlyMappedMemory(m_hDevice);
4507  }
4508  }
4509 
4510  // Process normal Allocations.
4511  {
4512  VmaMutexLock lock(m_BlocksMutex[memTypeIndex], m_UseMutex);
4513  VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
4514  VkResult localResult = pBlockVector->MapPersistentlyMappedMemory();
4515  if(localResult != VK_SUCCESS)
4516  {
4517  finalResult = localResult;
4518  }
4519  }
4520  }
4521  }
4522  }
4523  return finalResult;
4524  }
4525  else
4526  return VK_SUCCESS;
4527 }
4528 
4529 VkResult VmaAllocator_T::Defragment(
4530  VmaAllocation* pAllocations,
4531  size_t allocationCount,
4532  VkBool32* pAllocationsChanged,
4533  const VmaDefragmentationInfo* pDefragmentationInfo,
4534  VmaDefragmentationStats* pDefragmentationStats)
4535 {
4536  if(pAllocationsChanged != VMA_NULL)
4537  {
4538  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
4539  }
4540  if(pDefragmentationStats != VMA_NULL)
4541  {
4542  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
4543  }
4544 
4545  if(m_UnmapPersistentlyMappedMemoryCounter > 0)
4546  {
4547  VMA_DEBUG_LOG("ERROR: Cannot defragment when inside vmaUnmapPersistentlyMappedMemory.");
4548  return VK_ERROR_MEMORY_MAP_FAILED;
4549  }
4550 
4551  // Initialize defragmentators per memory type.
4552  const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity();
4553  VmaDefragmentator* pDefragmentators[VK_MAX_MEMORY_TYPES][VMA_BLOCK_VECTOR_TYPE_COUNT];
4554  memset(pDefragmentators, 0, sizeof(pDefragmentators));
4555  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
4556  {
4557  // Only HOST_VISIBLE memory types can be defragmented.
4558  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
4559  {
4560  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
4561  {
4562  pDefragmentators[memTypeIndex][blockVectorType] = vma_new(this, VmaDefragmentator)(
4563  m_hDevice,
4564  GetAllocationCallbacks(),
4565  bufferImageGranularity,
4566  memTypeIndex,
4567  (VMA_BLOCK_VECTOR_TYPE)blockVectorType);
4568  }
4569  }
4570  }
4571 
4572  // Dispatch pAllocations among defragmentators.
4573  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
4574  {
4575  VmaAllocation hAlloc = pAllocations[allocIndex];
4576  VMA_ASSERT(hAlloc);
4577  if(hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK)
4578  {
4579  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
4580  // Only HOST_VISIBLE memory types can be defragmented.
4581  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
4582  {
4583  const VMA_BLOCK_VECTOR_TYPE blockVectorType = hAlloc->GetBlockVectorType();
4584  VkBool32* pChanged = (pAllocationsChanged != VMA_NULL) ?
4585  &pAllocationsChanged[allocIndex] : VMA_NULL;
4586  pDefragmentators[memTypeIndex][blockVectorType]->AddAllocation(hAlloc, pChanged);
4587  }
4588  // else: skip this allocation, cannot move it.
4589  }
4590  // else ALLOCATION_TYPE_OWN: skip this allocation, nothing to defragment.
4591  }
4592 
4593  VkResult result = VK_SUCCESS;
4594 
4595  // Main processing.
4596  VkDeviceSize maxBytesToMove = SIZE_MAX;
4597  uint32_t maxAllocationsToMove = UINT32_MAX;
4598  if(pDefragmentationInfo != VMA_NULL)
4599  {
4600  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
4601  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
4602  }
4603  for(uint32_t memTypeIndex = 0;
4604  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
4605  ++memTypeIndex)
4606  {
4607  // Only HOST_VISIBLE memory types can be defragmented.
4608  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
4609  {
4610  VmaMutexLock lock(m_BlocksMutex[memTypeIndex], m_UseMutex);
4611 
4612  for(uint32_t blockVectorType = 0;
4613  (blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT) && (result == VK_SUCCESS);
4614  ++blockVectorType)
4615  {
4616  VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
4617 
4618  // Defragment.
4619  result = pDefragmentators[memTypeIndex][blockVectorType]->Defragment(pBlockVector, maxBytesToMove, maxAllocationsToMove);
4620 
4621  // Accumulate statistics.
4622  if(pDefragmentationStats != VMA_NULL)
4623  {
4624  const VkDeviceSize bytesMoved = pDefragmentators[memTypeIndex][blockVectorType]->GetBytesMoved();
4625  const uint32_t allocationsMoved = pDefragmentators[memTypeIndex][blockVectorType]->GetAllocationsMoved();
4626  pDefragmentationStats->bytesMoved += bytesMoved;
4627  pDefragmentationStats->allocationsMoved += allocationsMoved;
4628  VMA_ASSERT(bytesMoved <= maxBytesToMove);
4629  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
4630  maxBytesToMove -= bytesMoved;
4631  maxAllocationsToMove -= allocationsMoved;
4632  }
4633 
4634  // Free empty blocks.
4635  for(size_t blockIndex = pBlockVector->m_Blocks.size(); blockIndex--; )
4636  {
4637  VmaBlock* pBlock = pBlockVector->m_Blocks[blockIndex];
4638  if(pBlock->IsEmpty())
4639  {
4640  if(pDefragmentationStats != VMA_NULL)
4641  {
4642  ++pDefragmentationStats->deviceMemoryBlocksFreed;
4643  pDefragmentationStats->bytesFreed += pBlock->m_Size;
4644  }
4645 
4646  VectorRemove(pBlockVector->m_Blocks, blockIndex);
4647  pBlock->Destroy(this);
4648  vma_delete(this, pBlock);
4649  }
4650  }
4651 
4652  // All block vector types processed: we can be sure that all empty allocations have been freed.
4653  if(blockVectorType == VMA_BLOCK_VECTOR_TYPE_COUNT - 1)
4654  {
4655  m_HasEmptyBlock[memTypeIndex] = false;
4656  }
4657  }
4658  }
4659  }
4660 
4661  // Destroy defragmentators.
4662  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
4663  {
4664  for(size_t blockVectorType = VMA_BLOCK_VECTOR_TYPE_COUNT; blockVectorType--; )
4665  {
4666  vma_delete(this, pDefragmentators[memTypeIndex][blockVectorType]);
4667  }
4668  }
4669 
4670  return result;
4671 }
4672 
4673 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
4674 {
4675  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
4676  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
4677  pAllocationInfo->offset = hAllocation->GetOffset();
4678  pAllocationInfo->size = hAllocation->GetSize();
4679  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
4680  pAllocationInfo->pUserData = hAllocation->GetUserData();
4681 }
4682 
4683 void VmaAllocator_T::FreeOwnMemory(VmaAllocation allocation)
4684 {
4685  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_OWN);
4686 
4687  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
4688  {
4689  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
4690  AllocationVectorType* const pOwnAllocations = m_pOwnAllocations[memTypeIndex][allocation->GetBlockVectorType()];
4691  VMA_ASSERT(pOwnAllocations);
4692  VmaAllocation* const pOwnAllocationsBeg = pOwnAllocations->data();
4693  VmaAllocation* const pOwnAllocationsEnd = pOwnAllocationsBeg + pOwnAllocations->size();
4694  VmaAllocation* const pOwnAllocationIt = VmaBinaryFindFirstNotLess(
4695  pOwnAllocationsBeg,
4696  pOwnAllocationsEnd,
4697  allocation,
4698  VmaPointerLess());
4699  if(pOwnAllocationIt != pOwnAllocationsEnd)
4700  {
4701  const size_t ownAllocationIndex = pOwnAllocationIt - pOwnAllocationsBeg;
4702  VectorRemove(*pOwnAllocations, ownAllocationIndex);
4703  }
4704  else
4705  {
4706  VMA_ASSERT(0);
4707  }
4708  }
4709 
4710  VkDeviceMemory hMemory = allocation->GetMemory();
4711 
4712  // Callback.
4713  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
4714  {
4715  (*m_DeviceMemoryCallbacks.pfnFree)(this, memTypeIndex, hMemory, allocation->GetSize());
4716  }
4717 
4718  if(allocation->GetMappedData() != VMA_NULL)
4719  {
4720  vkUnmapMemory(m_hDevice, hMemory);
4721  }
4722 
4723  vkFreeMemory(m_hDevice, hMemory, GetAllocationCallbacks());
4724 
4725  VMA_DEBUG_LOG(" Freed OwnMemory MemoryTypeIndex=%u", memTypeIndex);
4726 
4727  vma_delete(this, allocation);
4728 }
4729 
4730 #if VMA_STATS_STRING_ENABLED
4731 
4732 void VmaAllocator_T::PrintDetailedMap(VmaStringBuilder& sb)
4733 {
4734  bool ownAllocationsStarted = false;
4735  for(size_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
4736  {
4737  VmaMutexLock ownAllocationsLock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
4738  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
4739  {
4740  AllocationVectorType* const pOwnAllocVector = m_pOwnAllocations[memTypeIndex][blockVectorType];
4741  VMA_ASSERT(pOwnAllocVector);
4742  if(pOwnAllocVector->empty() == false)
4743  {
4744  if(ownAllocationsStarted)
4745  {
4746  sb.Add(",\n\t\"Type ");
4747  }
4748  else
4749  {
4750  sb.Add(",\n\"OwnAllocations\": {\n\t\"Type ");
4751  ownAllocationsStarted = true;
4752  }
4753  sb.AddNumber(memTypeIndex);
4754  if(blockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
4755  {
4756  sb.Add(" Mapped");
4757  }
4758  sb.Add("\": [");
4759 
4760  for(size_t i = 0; i < pOwnAllocVector->size(); ++i)
4761  {
4762  const VmaAllocation hAlloc = (*pOwnAllocVector)[i];
4763  if(i > 0)
4764  {
4765  sb.Add(",\n\t\t{ \"Size\": ");
4766  }
4767  else
4768  {
4769  sb.Add("\n\t\t{ \"Size\": ");
4770  }
4771  sb.AddNumber(hAlloc->GetSize());
4772  sb.Add(", \"Type\": ");
4773  sb.AddString(VMA_SUBALLOCATION_TYPE_NAMES[hAlloc->GetSuballocationType()]);
4774  sb.Add(" }");
4775  }
4776 
4777  sb.Add("\n\t]");
4778  }
4779  }
4780  }
4781  if(ownAllocationsStarted)
4782  {
4783  sb.Add("\n}");
4784  }
4785 
4786  {
4787  bool allocationsStarted = false;
4788  for(size_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
4789  {
4790  VmaMutexLock globalAllocationsLock(m_BlocksMutex[memTypeIndex], m_UseMutex);
4791  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
4792  {
4793  if(m_pBlockVectors[memTypeIndex][blockVectorType]->IsEmpty() == false)
4794  {
4795  if(allocationsStarted)
4796  {
4797  sb.Add(",\n\t\"Type ");
4798  }
4799  else
4800  {
4801  sb.Add(",\n\"Allocations\": {\n\t\"Type ");
4802  allocationsStarted = true;
4803  }
4804  sb.AddNumber(memTypeIndex);
4805  if(blockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
4806  {
4807  sb.Add(" Mapped");
4808  }
4809  sb.Add("\": [");
4810 
4811  m_pBlockVectors[memTypeIndex][blockVectorType]->PrintDetailedMap(sb);
4812 
4813  sb.Add("\n\t]");
4814  }
4815  }
4816  }
4817  if(allocationsStarted)
4818  {
4819  sb.Add("\n}");
4820  }
4821  }
4822 }
4823 
4824 #endif // #if VMA_STATS_STRING_ENABLED
4825 
4826 static VkResult AllocateMemoryForImage(
4827  VmaAllocator allocator,
4828  VkImage image,
4829  const VmaMemoryRequirements* pMemoryRequirements,
4830  VmaSuballocationType suballocType,
4831  VmaAllocation* pAllocation)
4832 {
4833  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pMemoryRequirements && pAllocation);
4834 
4835  VkMemoryRequirements vkMemReq = {};
4836  vkGetImageMemoryRequirements(allocator->m_hDevice, image, &vkMemReq);
4837 
4838  return allocator->AllocateMemory(
4839  vkMemReq,
4840  *pMemoryRequirements,
4841  suballocType,
4842  pAllocation);
4843 }
4844 
4846 // Public interface
4847 
4848 VkResult vmaCreateAllocator(
4849  const VmaAllocatorCreateInfo* pCreateInfo,
4850  VmaAllocator* pAllocator)
4851 {
4852  VMA_ASSERT(pCreateInfo && pAllocator);
4853  VMA_DEBUG_LOG("vmaCreateAllocator");
4854  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
4855  return VK_SUCCESS;
4856 }
4857 
4858 void vmaDestroyAllocator(
4859  VmaAllocator allocator)
4860 {
4861  if(allocator != VK_NULL_HANDLE)
4862  {
4863  VMA_DEBUG_LOG("vmaDestroyAllocator");
4864  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
4865  vma_delete(&allocationCallbacks, allocator);
4866  }
4867 }
4868 
4870  VmaAllocator allocator,
4871  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
4872 {
4873  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
4874  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
4875 }
4876 
4878  VmaAllocator allocator,
4879  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
4880 {
4881  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
4882  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
4883 }
4884 
4886  VmaAllocator allocator,
4887  uint32_t memoryTypeIndex,
4888  VkMemoryPropertyFlags* pFlags)
4889 {
4890  VMA_ASSERT(allocator && pFlags);
4891  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
4892  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
4893 }
4894 
4895 void vmaCalculateStats(
4896  VmaAllocator allocator,
4897  VmaStats* pStats)
4898 {
4899  VMA_ASSERT(allocator && pStats);
4900  VMA_DEBUG_GLOBAL_MUTEX_LOCK
4901  allocator->CalculateStats(pStats);
4902 }
4903 
4904 #if VMA_STATS_STRING_ENABLED
4905 
4906 void vmaBuildStatsString(
4907  VmaAllocator allocator,
4908  char** ppStatsString,
4909  VkBool32 detailedMap)
4910 {
4911  VMA_ASSERT(allocator && ppStatsString);
4912  VMA_DEBUG_GLOBAL_MUTEX_LOCK
4913 
4914  VmaStringBuilder sb(allocator);
4915  {
4916  VmaStats stats;
4917  allocator->CalculateStats(&stats);
4918 
4919  sb.Add("{\n\"Total\": ");
4920  VmaPrintStatInfo(sb, stats.total);
4921 
4922  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
4923  {
4924  sb.Add(",\n\"Heap ");
4925  sb.AddNumber(heapIndex);
4926  sb.Add("\": {\n\t\"Size\": ");
4927  sb.AddNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
4928  sb.Add(",\n\t\"Flags\": ");
4929  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
4930  {
4931  sb.AddString("DEVICE_LOCAL");
4932  }
4933  else
4934  {
4935  sb.AddString("");
4936  }
4937  if(stats.memoryHeap[heapIndex].AllocationCount > 0)
4938  {
4939  sb.Add(",\n\t\"Stats:\": ");
4940  VmaPrintStatInfo(sb, stats.memoryHeap[heapIndex]);
4941  }
4942 
4943  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
4944  {
4945  if(allocator->m_MemProps.memoryTypes[typeIndex].heapIndex == heapIndex)
4946  {
4947  sb.Add(",\n\t\"Type ");
4948  sb.AddNumber(typeIndex);
4949  sb.Add("\": {\n\t\t\"Flags\": \"");
4950  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
4951  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
4952  {
4953  sb.Add(" DEVICE_LOCAL");
4954  }
4955  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
4956  {
4957  sb.Add(" HOST_VISIBLE");
4958  }
4959  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
4960  {
4961  sb.Add(" HOST_COHERENT");
4962  }
4963  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
4964  {
4965  sb.Add(" HOST_CACHED");
4966  }
4967  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
4968  {
4969  sb.Add(" LAZILY_ALLOCATED");
4970  }
4971  sb.Add("\"");
4972  if(stats.memoryType[typeIndex].AllocationCount > 0)
4973  {
4974  sb.Add(",\n\t\t\"Stats\": ");
4975  VmaPrintStatInfo(sb, stats.memoryType[typeIndex]);
4976  }
4977  sb.Add("\n\t}");
4978  }
4979  }
4980  sb.Add("\n}");
4981  }
4982  if(detailedMap == VK_TRUE)
4983  {
4984  allocator->PrintDetailedMap(sb);
4985  }
4986  sb.Add("\n}\n");
4987  }
4988 
4989  const size_t len = sb.GetLength();
4990  char* const pChars = vma_new_array(allocator, char, len + 1);
4991  if(len > 0)
4992  {
4993  memcpy(pChars, sb.GetData(), len);
4994  }
4995  pChars[len] = '\0';
4996  *ppStatsString = pChars;
4997 }
4998 
4999 void vmaFreeStatsString(
5000  VmaAllocator allocator,
5001  char* pStatsString)
5002 {
5003  if(pStatsString != VMA_NULL)
5004  {
5005  VMA_ASSERT(allocator);
5006  size_t len = strlen(pStatsString);
5007  vma_delete_array(allocator, pStatsString, len + 1);
5008  }
5009 }
5010 
5011 #endif // #if VMA_STATS_STRING_ENABLED
5012 
5015 VkResult vmaFindMemoryTypeIndex(
5016  VmaAllocator allocator,
5017  uint32_t memoryTypeBits,
5018  const VmaMemoryRequirements* pMemoryRequirements,
5019  uint32_t* pMemoryTypeIndex)
5020 {
5021  VMA_ASSERT(allocator != VK_NULL_HANDLE);
5022  VMA_ASSERT(pMemoryRequirements != VMA_NULL);
5023  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
5024 
5025  uint32_t requiredFlags = pMemoryRequirements->requiredFlags;
5026  uint32_t preferredFlags = pMemoryRequirements->preferredFlags;
5027  if(preferredFlags == 0)
5028  {
5029  preferredFlags = requiredFlags;
5030  }
5031  // preferredFlags, if not 0, must be a superset of requiredFlags.
5032  VMA_ASSERT((requiredFlags & ~preferredFlags) == 0);
5033 
5034  // Convert usage to requiredFlags and preferredFlags.
5035  switch(pMemoryRequirements->usage)
5036  {
5038  break;
5040  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
5041  break;
5043  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
5044  break;
5046  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
5047  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
5048  break;
5050  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
5051  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
5052  break;
5053  default:
5054  break;
5055  }
5056 
5057  if((pMemoryRequirements->flags & VMA_MEMORY_REQUIREMENT_PERSISTENT_MAP_BIT) != 0)
5058  {
5059  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
5060  }
5061 
5062  *pMemoryTypeIndex = UINT32_MAX;
5063  uint32_t minCost = UINT32_MAX;
5064  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
5065  memTypeIndex < allocator->GetMemoryTypeCount();
5066  ++memTypeIndex, memTypeBit <<= 1)
5067  {
5068  // This memory type is acceptable according to memoryTypeBits bitmask.
5069  if((memTypeBit & memoryTypeBits) != 0)
5070  {
5071  const VkMemoryPropertyFlags currFlags =
5072  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
5073  // This memory type contains requiredFlags.
5074  if((requiredFlags & ~currFlags) == 0)
5075  {
5076  // Calculate cost as number of bits from preferredFlags not present in this memory type.
5077  uint32_t currCost = CountBitsSet(preferredFlags & ~currFlags);
5078  // Remember memory type with lowest cost.
5079  if(currCost < minCost)
5080  {
5081  *pMemoryTypeIndex = memTypeIndex;
5082  if(currCost == 0)
5083  {
5084  return VK_SUCCESS;
5085  }
5086  minCost = currCost;
5087  }
5088  }
5089  }
5090  }
5091  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
5092 }
5093 
5094 VkResult vmaAllocateMemory(
5095  VmaAllocator allocator,
5096  const VkMemoryRequirements* pVkMemoryRequirements,
5097  const VmaMemoryRequirements* pVmaMemoryRequirements,
5098  VmaAllocation* pAllocation,
5099  VmaAllocationInfo* pAllocationInfo)
5100 {
5101  VMA_ASSERT(allocator && pVkMemoryRequirements && pVmaMemoryRequirements && pAllocation);
5102 
5103  VMA_DEBUG_LOG("vmaAllocateMemory");
5104 
5105  VMA_DEBUG_GLOBAL_MUTEX_LOCK
5106 
5107  return allocator->AllocateMemory(
5108  *pVkMemoryRequirements,
5109  *pVmaMemoryRequirements,
5110  VMA_SUBALLOCATION_TYPE_UNKNOWN,
5111  pAllocation);
5112 
5113  if(pAllocationInfo)
5114  {
5115  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
5116  }
5117 }
5118 
5120  VmaAllocator allocator,
5121  VkBuffer buffer,
5122  const VmaMemoryRequirements* pMemoryRequirements,
5123  VmaAllocation* pAllocation,
5124  VmaAllocationInfo* pAllocationInfo)
5125 {
5126  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pMemoryRequirements && pAllocation);
5127 
5128  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
5129 
5130  VMA_DEBUG_GLOBAL_MUTEX_LOCK
5131 
5132  VkMemoryRequirements vkMemReq = {};
5133  vkGetBufferMemoryRequirements(allocator->m_hDevice, buffer, &vkMemReq);
5134 
5135  return allocator->AllocateMemory(
5136  vkMemReq,
5137  *pMemoryRequirements,
5138  VMA_SUBALLOCATION_TYPE_BUFFER,
5139  pAllocation);
5140 
5141  if(pAllocationInfo)
5142  {
5143  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
5144  }
5145 }
5146 
5147 VkResult vmaAllocateMemoryForImage(
5148  VmaAllocator allocator,
5149  VkImage image,
5150  const VmaMemoryRequirements* pMemoryRequirements,
5151  VmaAllocation* pAllocation,
5152  VmaAllocationInfo* pAllocationInfo)
5153 {
5154  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pMemoryRequirements && pAllocation);
5155 
5156  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
5157 
5158  VMA_DEBUG_GLOBAL_MUTEX_LOCK
5159 
5160  return AllocateMemoryForImage(
5161  allocator,
5162  image,
5163  pMemoryRequirements,
5164  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
5165  pAllocation);
5166 
5167  if(pAllocationInfo)
5168  {
5169  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
5170  }
5171 }
5172 
5173 void vmaFreeMemory(
5174  VmaAllocator allocator,
5175  VmaAllocation allocation)
5176 {
5177  VMA_ASSERT(allocator && allocation);
5178 
5179  VMA_DEBUG_LOG("vmaFreeMemory");
5180 
5181  VMA_DEBUG_GLOBAL_MUTEX_LOCK
5182 
5183  allocator->FreeMemory(allocation);
5184 }
5185 
5187  VmaAllocator allocator,
5188  VmaAllocation allocation,
5189  VmaAllocationInfo* pAllocationInfo)
5190 {
5191  VMA_ASSERT(allocator && allocation && pAllocationInfo);
5192 
5193  VMA_DEBUG_GLOBAL_MUTEX_LOCK
5194 
5195  allocator->GetAllocationInfo(allocation, pAllocationInfo);
5196 }
5197 
5199  VmaAllocator allocator,
5200  VmaAllocation allocation,
5201  void* pUserData)
5202 {
5203  VMA_ASSERT(allocator && allocation);
5204 
5205  VMA_DEBUG_GLOBAL_MUTEX_LOCK
5206 
5207  allocation->SetUserData(pUserData);
5208 }
5209 
5210 VkResult vmaMapMemory(
5211  VmaAllocator allocator,
5212  VmaAllocation allocation,
5213  void** ppData)
5214 {
5215  VMA_ASSERT(allocator && allocation && ppData);
5216 
5217  VMA_DEBUG_GLOBAL_MUTEX_LOCK
5218 
5219  return vkMapMemory(allocator->m_hDevice, allocation->GetMemory(),
5220  allocation->GetOffset(), allocation->GetSize(), 0, ppData);
5221 }
5222 
5223 void vmaUnmapMemory(
5224  VmaAllocator allocator,
5225  VmaAllocation allocation)
5226 {
5227  VMA_ASSERT(allocator && allocation);
5228 
5229  VMA_DEBUG_GLOBAL_MUTEX_LOCK
5230 
5231  vkUnmapMemory(allocator->m_hDevice, allocation->GetMemory());
5232 }
5233 
5234 void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator)
5235 {
5236  VMA_ASSERT(allocator);
5237 
5238  VMA_DEBUG_GLOBAL_MUTEX_LOCK
5239 
5240  allocator->UnmapPersistentlyMappedMemory();
5241 }
5242 
5243 VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator)
5244 {
5245  VMA_ASSERT(allocator);
5246 
5247  VMA_DEBUG_GLOBAL_MUTEX_LOCK
5248 
5249  return allocator->MapPersistentlyMappedMemory();
5250 }
5251 
5252 VkResult vmaDefragment(
5253  VmaAllocator allocator,
5254  VmaAllocation* pAllocations,
5255  size_t allocationCount,
5256  VkBool32* pAllocationsChanged,
5257  const VmaDefragmentationInfo *pDefragmentationInfo,
5258  VmaDefragmentationStats* pDefragmentationStats)
5259 {
5260  VMA_ASSERT(allocator && pAllocations);
5261 
5262  VMA_DEBUG_LOG("vmaDefragment");
5263 
5264  VMA_DEBUG_GLOBAL_MUTEX_LOCK
5265 
5266  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
5267 }
5268 
5269 VkResult vmaCreateBuffer(
5270  VmaAllocator allocator,
5271  const VkBufferCreateInfo* pCreateInfo,
5272  const VmaMemoryRequirements* pMemoryRequirements,
5273  VkBuffer* pBuffer,
5274  VmaAllocation* pAllocation,
5275  VmaAllocationInfo* pAllocationInfo)
5276 {
5277  VMA_ASSERT(allocator && pCreateInfo && pMemoryRequirements && pBuffer && pAllocation);
5278 
5279  VMA_DEBUG_LOG("vmaCreateBuffer");
5280 
5281  VMA_DEBUG_GLOBAL_MUTEX_LOCK
5282 
5283  // 1. Create VkBuffer.
5284  VkResult res = vkCreateBuffer(allocator->m_hDevice, pCreateInfo, allocator->GetAllocationCallbacks(), pBuffer);
5285  if(res >= 0)
5286  {
5287  // 2. vkGetBufferMemoryRequirements.
5288  VkMemoryRequirements vkMemReq = {};
5289  vkGetBufferMemoryRequirements(allocator->m_hDevice, *pBuffer, &vkMemReq);
5290 
5291  // 3. Allocate memory using allocator.
5292  res = allocator->AllocateMemory(
5293  vkMemReq,
5294  *pMemoryRequirements,
5295  VMA_SUBALLOCATION_TYPE_BUFFER,
5296  pAllocation);
5297  if(res >= 0)
5298  {
5299  // 3. Bind buffer with memory.
5300  res = vkBindBufferMemory(allocator->m_hDevice, *pBuffer, (*pAllocation)->GetMemory(), (*pAllocation)->GetOffset());
5301  if(res >= 0)
5302  {
5303  // All steps succeeded.
5304  if(pAllocationInfo != VMA_NULL)
5305  {
5306  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
5307  }
5308  return VK_SUCCESS;
5309  }
5310  allocator->FreeMemory(*pAllocation);
5311  return res;
5312  }
5313  vkDestroyBuffer(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
5314  return res;
5315  }
5316  return res;
5317 }
5318 
5319 void vmaDestroyBuffer(
5320  VmaAllocator allocator,
5321  VkBuffer buffer,
5322  VmaAllocation allocation)
5323 {
5324  if(buffer != VK_NULL_HANDLE)
5325  {
5326  VMA_ASSERT(allocator);
5327 
5328  VMA_DEBUG_LOG("vmaDestroyBuffer");
5329 
5330  VMA_DEBUG_GLOBAL_MUTEX_LOCK
5331 
5332  vkDestroyBuffer(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
5333 
5334  allocator->FreeMemory(allocation);
5335  }
5336 }
5337 
5338 VkResult vmaCreateImage(
5339  VmaAllocator allocator,
5340  const VkImageCreateInfo* pCreateInfo,
5341  const VmaMemoryRequirements* pMemoryRequirements,
5342  VkImage* pImage,
5343  VmaAllocation* pAllocation,
5344  VmaAllocationInfo* pAllocationInfo)
5345 {
5346  VMA_ASSERT(allocator && pCreateInfo && pMemoryRequirements && pImage && pAllocation);
5347 
5348  VMA_DEBUG_LOG("vmaCreateImage");
5349 
5350  VMA_DEBUG_GLOBAL_MUTEX_LOCK
5351 
5352  // 1. Create VkImage.
5353  VkResult res = vkCreateImage(allocator->m_hDevice, pCreateInfo, allocator->GetAllocationCallbacks(), pImage);
5354  if(res >= 0)
5355  {
5356  VkMappedMemoryRange mem = {};
5357  VmaSuballocationType suballocType = pCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
5358  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
5359  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
5360 
5361  // 2. Allocate memory using allocator.
5362  res = AllocateMemoryForImage(allocator, *pImage, pMemoryRequirements, suballocType, pAllocation);
5363  if(res >= 0)
5364  {
5365  // 3. Bind image with memory.
5366  res = vkBindImageMemory(allocator->m_hDevice, *pImage, (*pAllocation)->GetMemory(), (*pAllocation)->GetOffset());
5367  if(res >= 0)
5368  {
5369  // All steps succeeded.
5370  if(pAllocationInfo != VMA_NULL)
5371  {
5372  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
5373  }
5374  return VK_SUCCESS;
5375  }
5376  allocator->FreeMemory(*pAllocation);
5377  return res;
5378  }
5379  vkDestroyImage(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
5380  return res;
5381  }
5382  return res;
5383 }
5384 
5385 void vmaDestroyImage(
5386  VmaAllocator allocator,
5387  VkImage image,
5388  VmaAllocation allocation)
5389 {
5390  if(image != VK_NULL_HANDLE)
5391  {
5392  VMA_ASSERT(allocator);
5393 
5394  VMA_DEBUG_LOG("vmaDestroyImage");
5395 
5396  VMA_DEBUG_GLOBAL_MUTEX_LOCK
5397 
5398  vkDestroyImage(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
5399 
5400  allocator->FreeMemory(allocation);
5401  }
5402 }
5403 
5404 #endif // #ifdef VMA_IMPLEMENTATION
VmaMemoryRequirementFlagBits
Flags to be passed as VmaMemoryRequirements::flags.
Definition: vk_mem_alloc.h:336
+Go to the documentation of this file.
1 //
2 // Copyright (c) 2017 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
161 #include <vulkan/vulkan.h>
162 
164 
168 VK_DEFINE_HANDLE(VmaAllocator)
169 
170 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
172  VmaAllocator allocator,
173  uint32_t memoryType,
174  VkDeviceMemory memory,
175  VkDeviceSize size);
177 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
178  VmaAllocator allocator,
179  uint32_t memoryType,
180  VkDeviceMemory memory,
181  VkDeviceSize size);
182 
188 typedef struct VmaDeviceMemoryCallbacks {
194 
196 typedef enum VmaAllocatorFlagBits {
202 
205 typedef VkFlags VmaAllocatorFlags;
206 
209 {
213 
214  VkPhysicalDevice physicalDevice;
216 
217  VkDevice device;
219 
222 
225 
226  const VkAllocationCallbacks* pAllocationCallbacks;
228 
231 
233 VkResult vmaCreateAllocator(
234  const VmaAllocatorCreateInfo* pCreateInfo,
235  VmaAllocator* pAllocator);
236 
239  VmaAllocator allocator);
240 
246  VmaAllocator allocator,
247  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
248 
254  VmaAllocator allocator,
255  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
256 
264  VmaAllocator allocator,
265  uint32_t memoryTypeIndex,
266  VkMemoryPropertyFlags* pFlags);
267 
268 typedef struct VmaStatInfo
269 {
270  uint32_t AllocationCount;
273  VkDeviceSize UsedBytes;
274  VkDeviceSize UnusedBytes;
275  VkDeviceSize SuballocationSizeMin, SuballocationSizeAvg, SuballocationSizeMax;
276  VkDeviceSize UnusedRangeSizeMin, UnusedRangeSizeAvg, UnusedRangeSizeMax;
277 } VmaStatInfo;
278 
280 struct VmaStats
281 {
282  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
283  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
285 };
286 
288 void vmaCalculateStats(
289  VmaAllocator allocator,
290  VmaStats* pStats);
291 
292 #define VMA_STATS_STRING_ENABLED 1
293 
294 #if VMA_STATS_STRING_ENABLED
295 
297 
300  VmaAllocator allocator,
301  char** ppStatsString,
302  VkBool32 detailedMap);
303 
304 void vmaFreeStatsString(
305  VmaAllocator allocator,
306  char* pStatsString);
307 
308 #endif // #if VMA_STATS_STRING_ENABLED
309 
312 
317 typedef enum VmaMemoryUsage
318 {
324 
327 
330 
334 
346 
362 
366 
367 typedef struct VmaMemoryRequirements
368 {
378  VkMemoryPropertyFlags requiredFlags;
383  VkMemoryPropertyFlags preferredFlags;
385  void* pUserData;
387 
402 VkResult vmaFindMemoryTypeIndex(
403  VmaAllocator allocator,
404  uint32_t memoryTypeBits,
405  const VmaMemoryRequirements* pMemoryRequirements,
406  uint32_t* pMemoryTypeIndex);
407 
410 
415 VK_DEFINE_HANDLE(VmaAllocation)
416 
417 
419 typedef struct VmaAllocationInfo {
424  uint32_t memoryType;
431  VkDeviceMemory deviceMemory;
436  VkDeviceSize offset;
441  VkDeviceSize size;
447  void* pMappedData;
452  void* pUserData;
454 
465 VkResult vmaAllocateMemory(
466  VmaAllocator allocator,
467  const VkMemoryRequirements* pVkMemoryRequirements,
468  const VmaMemoryRequirements* pVmaMemoryRequirements,
469  VmaAllocation* pAllocation,
470  VmaAllocationInfo* pAllocationInfo);
471 
479  VmaAllocator allocator,
480  VkBuffer buffer,
481  const VmaMemoryRequirements* pMemoryRequirements,
482  VmaAllocation* pAllocation,
483  VmaAllocationInfo* pAllocationInfo);
484 
487  VmaAllocator allocator,
488  VkImage image,
489  const VmaMemoryRequirements* pMemoryRequirements,
490  VmaAllocation* pAllocation,
491  VmaAllocationInfo* pAllocationInfo);
492 
494 void vmaFreeMemory(
495  VmaAllocator allocator,
496  VmaAllocation allocation);
497 
500  VmaAllocator allocator,
501  VmaAllocation allocation,
502  VmaAllocationInfo* pAllocationInfo);
503 
506  VmaAllocator allocator,
507  VmaAllocation allocation,
508  void* pUserData);
509 
518 VkResult vmaMapMemory(
519  VmaAllocator allocator,
520  VmaAllocation allocation,
521  void** ppData);
522 
523 void vmaUnmapMemory(
524  VmaAllocator allocator,
525  VmaAllocation allocation);
526 
545 void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator);
546 
554 VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator);
555 
557 typedef struct VmaDefragmentationInfo {
562  VkDeviceSize maxBytesToMove;
569 
571 typedef struct VmaDefragmentationStats {
573  VkDeviceSize bytesMoved;
575  VkDeviceSize bytesFreed;
581 
652 VkResult vmaDefragment(
653  VmaAllocator allocator,
654  VmaAllocation* pAllocations,
655  size_t allocationCount,
656  VkBool32* pAllocationsChanged,
657  const VmaDefragmentationInfo *pDefragmentationInfo,
658  VmaDefragmentationStats* pDefragmentationStats);
659 
662 
685 VkResult vmaCreateBuffer(
686  VmaAllocator allocator,
687  const VkBufferCreateInfo* pCreateInfo,
688  const VmaMemoryRequirements* pMemoryRequirements,
689  VkBuffer* pBuffer,
690  VmaAllocation* pAllocation,
691  VmaAllocationInfo* pAllocationInfo);
692 
693 void vmaDestroyBuffer(
694  VmaAllocator allocator,
695  VkBuffer buffer,
696  VmaAllocation allocation);
697 
699 VkResult vmaCreateImage(
700  VmaAllocator allocator,
701  const VkImageCreateInfo* pCreateInfo,
702  const VmaMemoryRequirements* pMemoryRequirements,
703  VkImage* pImage,
704  VmaAllocation* pAllocation,
705  VmaAllocationInfo* pAllocationInfo);
706 
707 void vmaDestroyImage(
708  VmaAllocator allocator,
709  VkImage image,
710  VmaAllocation allocation);
711 
714 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
715 
716 #ifdef VMA_IMPLEMENTATION
717 #undef VMA_IMPLEMENTATION
718 
719 #include <cstdint>
720 #include <cstdlib>
721 
722 /*******************************************************************************
723 CONFIGURATION SECTION
724 
725 Define some of these macros before each #include of this header or change them
726 here if you need other then default behavior depending on your environment.
727 */
728 
729 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
730 //#define VMA_USE_STL_CONTAINERS 1
731 
732 /* Set this macro to 1 to make the library including and using STL containers:
733 std::pair, std::vector, std::list, std::unordered_map.
734 
735 Set it to 0 or undefined to make the library using its own implementation of
736 the containers.
737 */
738 #if VMA_USE_STL_CONTAINERS
739  #define VMA_USE_STL_VECTOR 1
740  #define VMA_USE_STL_UNORDERED_MAP 1
741  #define VMA_USE_STL_LIST 1
742 #endif
743 
744 #if VMA_USE_STL_VECTOR
745  #include <vector>
746 #endif
747 
748 #if VMA_USE_STL_UNORDERED_MAP
749  #include <unordered_map>
750 #endif
751 
752 #if VMA_USE_STL_LIST
753  #include <list>
754 #endif
755 
756 /*
757 Following headers are used in this CONFIGURATION section only, so feel free to
758 remove them if not needed.
759 */
760 #include <cassert> // for assert
761 #include <algorithm> // for min, max
762 #include <mutex> // for std::mutex
763 
764 #if !defined(_WIN32)
765  #include <malloc.h> // for aligned_alloc()
766 #endif
767 
768 // Normal assert to check for programmer's errors, especially in Debug configuration.
769 #ifndef VMA_ASSERT
770  #ifdef _DEBUG
771  #define VMA_ASSERT(expr) assert(expr)
772  #else
773  #define VMA_ASSERT(expr)
774  #endif
775 #endif
776 
777 // Assert that will be called very often, like inside data structures e.g. operator[].
778 // Making it non-empty can make program slow.
779 #ifndef VMA_HEAVY_ASSERT
780  #ifdef _DEBUG
781  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
782  #else
783  #define VMA_HEAVY_ASSERT(expr)
784  #endif
785 #endif
786 
787 #ifndef VMA_NULL
788  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
789  #define VMA_NULL nullptr
790 #endif
791 
792 #ifndef VMA_ALIGN_OF
793  #define VMA_ALIGN_OF(type) (__alignof(type))
794 #endif
795 
796 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
797  #if defined(_WIN32)
798  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
799  #else
800  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
801  #endif
802 #endif
803 
804 #ifndef VMA_SYSTEM_FREE
805  #if defined(_WIN32)
806  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
807  #else
808  #define VMA_SYSTEM_FREE(ptr) free(ptr)
809  #endif
810 #endif
811 
812 #ifndef VMA_MIN
813  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
814 #endif
815 
816 #ifndef VMA_MAX
817  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
818 #endif
819 
820 #ifndef VMA_SWAP
821  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
822 #endif
823 
824 #ifndef VMA_SORT
825  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
826 #endif
827 
828 #ifndef VMA_DEBUG_LOG
829  #define VMA_DEBUG_LOG(format, ...)
830  /*
831  #define VMA_DEBUG_LOG(format, ...) do { \
832  printf(format, __VA_ARGS__); \
833  printf("\n"); \
834  } while(false)
835  */
836 #endif
837 
838 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
839 #if VMA_STATS_STRING_ENABLED
840  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
841  {
842  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
843  }
844  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
845  {
846  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
847  }
848 #endif
849 
850 #ifndef VMA_MUTEX
851  class VmaMutex
852  {
853  public:
854  VmaMutex() { }
855  ~VmaMutex() { }
856  void Lock() { m_Mutex.lock(); }
857  void Unlock() { m_Mutex.unlock(); }
858  private:
859  std::mutex m_Mutex;
860  };
861  #define VMA_MUTEX VmaMutex
862 #endif
863 
864 #ifndef VMA_BEST_FIT
865 
877  #define VMA_BEST_FIT (1)
878 #endif
879 
880 #ifndef VMA_DEBUG_ALWAYS_OWN_MEMORY
881 
885  #define VMA_DEBUG_ALWAYS_OWN_MEMORY (0)
886 #endif
887 
888 #ifndef VMA_DEBUG_ALIGNMENT
889 
893  #define VMA_DEBUG_ALIGNMENT (1)
894 #endif
895 
896 #ifndef VMA_DEBUG_MARGIN
897 
901  #define VMA_DEBUG_MARGIN (0)
902 #endif
903 
904 #ifndef VMA_DEBUG_GLOBAL_MUTEX
905 
909  #define VMA_DEBUG_GLOBAL_MUTEX (0)
910 #endif
911 
912 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
913 
917  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
918 #endif
919 
920 #ifndef VMA_SMALL_HEAP_MAX_SIZE
921  #define VMA_SMALL_HEAP_MAX_SIZE (512 * 1024 * 1024)
923 #endif
924 
925 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
926  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256 * 1024 * 1024)
928 #endif
929 
930 #ifndef VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE
931  #define VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE (64 * 1024 * 1024)
933 #endif
934 
935 /*******************************************************************************
936 END OF CONFIGURATION
937 */
938 
939 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
940  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
941 
942 // Returns number of bits set to 1 in (v).
943 static inline uint32_t CountBitsSet(uint32_t v)
944 {
945  uint32_t c = v - ((v >> 1) & 0x55555555);
946  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
947  c = ((c >> 4) + c) & 0x0F0F0F0F;
948  c = ((c >> 8) + c) & 0x00FF00FF;
949  c = ((c >> 16) + c) & 0x0000FFFF;
950  return c;
951 }
952 
953 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
954 // Use types like uint32_t, uint64_t as T.
955 template <typename T>
956 static inline T VmaAlignUp(T val, T align)
957 {
958  return (val + align - 1) / align * align;
959 }
960 
961 // Division with mathematical rounding to nearest number.
962 template <typename T>
963 inline T VmaRoundDiv(T x, T y)
964 {
965  return (x + (y / (T)2)) / y;
966 }
967 
968 #ifndef VMA_SORT
969 
970 template<typename Iterator, typename Compare>
971 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
972 {
973  Iterator centerValue = end; --centerValue;
974  Iterator insertIndex = beg;
975  for(Iterator i = beg; i < centerValue; ++i)
976  {
977  if(cmp(*i, *centerValue))
978  {
979  if(insertIndex != i)
980  {
981  VMA_SWAP(*i, *insertIndex);
982  }
983  ++insertIndex;
984  }
985  }
986  if(insertIndex != centerValue)
987  {
988  VMA_SWAP(*insertIndex, *centerValue);
989  }
990  return insertIndex;
991 }
992 
993 template<typename Iterator, typename Compare>
994 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
995 {
996  if(beg < end)
997  {
998  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
999  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
1000  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
1001  }
1002 }
1003 
1004 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
1005 
1006 #endif // #ifndef VMA_SORT
1007 
1008 /*
1009 Returns true if two memory blocks occupy overlapping pages.
1010 ResourceA must be in less memory offset than ResourceB.
1011 
1012 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
1013 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
1014 */
1015 static inline bool VmaBlocksOnSamePage(
1016  VkDeviceSize resourceAOffset,
1017  VkDeviceSize resourceASize,
1018  VkDeviceSize resourceBOffset,
1019  VkDeviceSize pageSize)
1020 {
1021  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
1022  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
1023  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
1024  VkDeviceSize resourceBStart = resourceBOffset;
1025  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
1026  return resourceAEndPage == resourceBStartPage;
1027 }
1028 
1029 enum VmaSuballocationType
1030 {
1031  VMA_SUBALLOCATION_TYPE_FREE = 0,
1032  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
1033  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
1034  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
1035  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
1036  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
1037  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
1038 };
1039 
1040 /*
1041 Returns true if given suballocation types could conflict and must respect
1042 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
1043 or linear image and another one is optimal image. If type is unknown, behave
1044 conservatively.
1045 */
1046 static inline bool VmaIsBufferImageGranularityConflict(
1047  VmaSuballocationType suballocType1,
1048  VmaSuballocationType suballocType2)
1049 {
1050  if(suballocType1 > suballocType2)
1051  {
1052  VMA_SWAP(suballocType1, suballocType2);
1053  }
1054 
1055  switch(suballocType1)
1056  {
1057  case VMA_SUBALLOCATION_TYPE_FREE:
1058  return false;
1059  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
1060  return true;
1061  case VMA_SUBALLOCATION_TYPE_BUFFER:
1062  return
1063  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
1064  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1065  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
1066  return
1067  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
1068  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
1069  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1070  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
1071  return
1072  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1073  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
1074  return false;
1075  default:
1076  VMA_ASSERT(0);
1077  return true;
1078  }
1079 }
1080 
1081 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
1082 struct VmaMutexLock
1083 {
1084 public:
1085  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
1086  m_pMutex(useMutex ? &mutex : VMA_NULL)
1087  {
1088  if(m_pMutex)
1089  {
1090  m_pMutex->Lock();
1091  }
1092  }
1093 
1094  ~VmaMutexLock()
1095  {
1096  if(m_pMutex)
1097  {
1098  m_pMutex->Unlock();
1099  }
1100  }
1101 
1102 private:
1103  VMA_MUTEX* m_pMutex;
1104 };
1105 
1106 #if VMA_DEBUG_GLOBAL_MUTEX
1107  static VMA_MUTEX gDebugGlobalMutex;
1108  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex);
1109 #else
1110  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
1111 #endif
1112 
1113 // Minimum size of a free suballocation to register it in the free suballocation collection.
1114 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
1115 
1116 /*
1117 Performs binary search and returns iterator to first element that is greater or
1118 equal to (key), according to comparison (cmp).
1119 
1120 Cmp should return true if first argument is less than second argument.
1121 
1122 Returned value is the found element, if present in the collection or place where
1123 new element with value (key) should be inserted.
1124 */
1125 template <typename IterT, typename KeyT, typename CmpT>
1126 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
1127 {
1128  size_t down = 0, up = (end - beg);
1129  while(down < up)
1130  {
1131  const size_t mid = (down + up) / 2;
1132  if(cmp(*(beg+mid), key))
1133  {
1134  down = mid + 1;
1135  }
1136  else
1137  {
1138  up = mid;
1139  }
1140  }
1141  return beg + down;
1142 }
1143 
1145 // Memory allocation
1146 
1147 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
1148 {
1149  if((pAllocationCallbacks != VMA_NULL) &&
1150  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
1151  {
1152  return (*pAllocationCallbacks->pfnAllocation)(
1153  pAllocationCallbacks->pUserData,
1154  size,
1155  alignment,
1156  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1157  }
1158  else
1159  {
1160  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
1161  }
1162 }
1163 
1164 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
1165 {
1166  if((pAllocationCallbacks != VMA_NULL) &&
1167  (pAllocationCallbacks->pfnFree != VMA_NULL))
1168  {
1169  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
1170  }
1171  else
1172  {
1173  VMA_SYSTEM_FREE(ptr);
1174  }
1175 }
1176 
1177 template<typename T>
1178 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
1179 {
1180  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
1181 }
1182 
1183 template<typename T>
1184 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
1185 {
1186  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
1187 }
1188 
1189 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
1190 
1191 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
1192 
1193 template<typename T>
1194 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
1195 {
1196  ptr->~T();
1197  VmaFree(pAllocationCallbacks, ptr);
1198 }
1199 
1200 template<typename T>
1201 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
1202 {
1203  if(ptr != VMA_NULL)
1204  {
1205  for(size_t i = count; i--; )
1206  {
1207  ptr[i].~T();
1208  }
1209  VmaFree(pAllocationCallbacks, ptr);
1210  }
1211 }
1212 
1213 // STL-compatible allocator.
1214 template<typename T>
1215 class VmaStlAllocator
1216 {
1217 public:
1218  const VkAllocationCallbacks* const m_pCallbacks;
1219  typedef T value_type;
1220 
1221  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
1222  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
1223 
1224  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
1225  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
1226 
1227  template<typename U>
1228  bool operator==(const VmaStlAllocator<U>& rhs) const
1229  {
1230  return m_pCallbacks == rhs.m_pCallbacks;
1231  }
1232  template<typename U>
1233  bool operator!=(const VmaStlAllocator<U>& rhs) const
1234  {
1235  return m_pCallbacks != rhs.m_pCallbacks;
1236  }
1237 
1238  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
1239 };
1240 
1241 #if VMA_USE_STL_VECTOR
1242 
1243 #define VmaVector std::vector
1244 
1245 template<typename T, typename allocatorT>
1246 static void VectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
1247 {
1248  vec.insert(vec.begin() + index, item);
1249 }
1250 
1251 template<typename T, typename allocatorT>
1252 static void VectorRemove(std::vector<T, allocatorT>& vec, size_t index)
1253 {
1254  vec.erase(vec.begin() + index);
1255 }
1256 
1257 #else // #if VMA_USE_STL_VECTOR
1258 
1259 /* Class with interface compatible with subset of std::vector.
1260 T must be POD because constructors and destructors are not called and memcpy is
1261 used for these objects. */
1262 template<typename T, typename AllocatorT>
1263 class VmaVector
1264 {
1265 public:
1266  VmaVector(const AllocatorT& allocator) :
1267  m_Allocator(allocator),
1268  m_pArray(VMA_NULL),
1269  m_Count(0),
1270  m_Capacity(0)
1271  {
1272  }
1273 
1274  VmaVector(size_t count, const AllocatorT& allocator) :
1275  m_Allocator(allocator),
1276  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator->m_pCallbacks, count) : VMA_NULL),
1277  m_Count(count),
1278  m_Capacity(count)
1279  {
1280  }
1281 
1282  VmaVector(const VmaVector<T, AllocatorT>& src) :
1283  m_Allocator(src.m_Allocator),
1284  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src->m_pCallbacks, src.m_Count) : VMA_NULL),
1285  m_Count(src.m_Count),
1286  m_Capacity(src.m_Count)
1287  {
1288  if(m_Count != 0)
1289  {
1290  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
1291  }
1292  }
1293 
1294  ~VmaVector()
1295  {
1296  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
1297  }
1298 
1299  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
1300  {
1301  if(&rhs != this)
1302  {
1303  Resize(rhs.m_Count);
1304  if(m_Count != 0)
1305  {
1306  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
1307  }
1308  }
1309  return *this;
1310  }
1311 
1312  bool empty() const { return m_Count == 0; }
1313  size_t size() const { return m_Count; }
1314  T* data() { return m_pArray; }
1315  const T* data() const { return m_pArray; }
1316 
1317  T& operator[](size_t index)
1318  {
1319  VMA_HEAVY_ASSERT(index < m_Count);
1320  return m_pArray[index];
1321  }
1322  const T& operator[](size_t index) const
1323  {
1324  VMA_HEAVY_ASSERT(index < m_Count);
1325  return m_pArray[index];
1326  }
1327 
1328  T& front()
1329  {
1330  VMA_HEAVY_ASSERT(m_Count > 0);
1331  return m_pArray[0];
1332  }
1333  const T& front() const
1334  {
1335  VMA_HEAVY_ASSERT(m_Count > 0);
1336  return m_pArray[0];
1337  }
1338  T& back()
1339  {
1340  VMA_HEAVY_ASSERT(m_Count > 0);
1341  return m_pArray[m_Count - 1];
1342  }
1343  const T& back() const
1344  {
1345  VMA_HEAVY_ASSERT(m_Count > 0);
1346  return m_pArray[m_Count - 1];
1347  }
1348 
1349  void reserve(size_t newCapacity, bool freeMemory = false)
1350  {
1351  newCapacity = VMA_MAX(newCapacity, m_Count);
1352 
1353  if((newCapacity < m_Capacity) && !freeMemory)
1354  {
1355  newCapacity = m_Capacity;
1356  }
1357 
1358  if(newCapacity != m_Capacity)
1359  {
1360  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
1361  if(m_Count != 0)
1362  {
1363  memcpy(newArray, m_pArray, m_Count * sizeof(T));
1364  }
1365  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
1366  m_Capacity = newCapacity;
1367  m_pArray = newArray;
1368  }
1369  }
1370 
1371  void resize(size_t newCount, bool freeMemory = false)
1372  {
1373  size_t newCapacity = m_Capacity;
1374  if(newCount > m_Capacity)
1375  {
1376  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
1377  }
1378  else if(freeMemory)
1379  {
1380  newCapacity = newCount;
1381  }
1382 
1383  if(newCapacity != m_Capacity)
1384  {
1385  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
1386  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
1387  if(elementsToCopy != 0)
1388  {
1389  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
1390  }
1391  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
1392  m_Capacity = newCapacity;
1393  m_pArray = newArray;
1394  }
1395 
1396  m_Count = newCount;
1397  }
1398 
1399  void clear(bool freeMemory = false)
1400  {
1401  resize(0, freeMemory);
1402  }
1403 
1404  void insert(size_t index, const T& src)
1405  {
1406  VMA_HEAVY_ASSERT(index <= m_Count);
1407  const size_t oldCount = size();
1408  resize(oldCount + 1);
1409  if(index < oldCount)
1410  {
1411  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
1412  }
1413  m_pArray[index] = src;
1414  }
1415 
1416  void remove(size_t index)
1417  {
1418  VMA_HEAVY_ASSERT(index < m_Count);
1419  const size_t oldCount = size();
1420  if(index < oldCount - 1)
1421  {
1422  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
1423  }
1424  resize(oldCount - 1);
1425  }
1426 
1427  void push_back(const T& src)
1428  {
1429  const size_t newIndex = size();
1430  resize(newIndex + 1);
1431  m_pArray[newIndex] = src;
1432  }
1433 
1434  void pop_back()
1435  {
1436  VMA_HEAVY_ASSERT(m_Count > 0);
1437  resize(size() - 1);
1438  }
1439 
1440  void push_front(const T& src)
1441  {
1442  insert(0, src);
1443  }
1444 
1445  void pop_front()
1446  {
1447  VMA_HEAVY_ASSERT(m_Count > 0);
1448  remove(0);
1449  }
1450 
1451  typedef T* iterator;
1452 
1453  iterator begin() { return m_pArray; }
1454  iterator end() { return m_pArray + m_Count; }
1455 
1456 private:
1457  AllocatorT m_Allocator;
1458  T* m_pArray;
1459  size_t m_Count;
1460  size_t m_Capacity;
1461 };
1462 
1463 template<typename T, typename allocatorT>
1464 static void VectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
1465 {
1466  vec.insert(index, item);
1467 }
1468 
1469 template<typename T, typename allocatorT>
1470 static void VectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
1471 {
1472  vec.remove(index);
1473 }
1474 
1475 #endif // #if VMA_USE_STL_VECTOR
1476 
1478 // class VmaPoolAllocator
1479 
1480 /*
1481 Allocator for objects of type T using a list of arrays (pools) to speed up
1482 allocation. Number of elements that can be allocated is not bounded because
1483 allocator can create multiple blocks.
1484 */
1485 template<typename T>
1486 class VmaPoolAllocator
1487 {
1488 public:
1489  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
1490  ~VmaPoolAllocator();
1491  void Clear();
1492  T* Alloc();
1493  void Free(T* ptr);
1494 
1495 private:
1496  union Item
1497  {
1498  uint32_t NextFreeIndex;
1499  T Value;
1500  };
1501 
1502  struct ItemBlock
1503  {
1504  Item* pItems;
1505  uint32_t FirstFreeIndex;
1506  };
1507 
1508  const VkAllocationCallbacks* m_pAllocationCallbacks;
1509  size_t m_ItemsPerBlock;
1510  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
1511 
1512  ItemBlock& CreateNewBlock();
1513 };
1514 
1515 template<typename T>
1516 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
1517  m_pAllocationCallbacks(pAllocationCallbacks),
1518  m_ItemsPerBlock(itemsPerBlock),
1519  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
1520 {
1521  VMA_ASSERT(itemsPerBlock > 0);
1522 }
1523 
1524 template<typename T>
1525 VmaPoolAllocator<T>::~VmaPoolAllocator()
1526 {
1527  Clear();
1528 }
1529 
1530 template<typename T>
1531 void VmaPoolAllocator<T>::Clear()
1532 {
1533  for(size_t i = m_ItemBlocks.size(); i--; )
1534  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
1535  m_ItemBlocks.clear();
1536 }
1537 
1538 template<typename T>
1539 T* VmaPoolAllocator<T>::Alloc()
1540 {
1541  for(size_t i = m_ItemBlocks.size(); i--; )
1542  {
1543  ItemBlock& block = m_ItemBlocks[i];
1544  // This block has some free items: Use first one.
1545  if(block.FirstFreeIndex != UINT32_MAX)
1546  {
1547  Item* const pItem = &block.pItems[block.FirstFreeIndex];
1548  block.FirstFreeIndex = pItem->NextFreeIndex;
1549  return &pItem->Value;
1550  }
1551  }
1552 
1553  // No block has free item: Create new one and use it.
1554  ItemBlock& newBlock = CreateNewBlock();
1555  Item* const pItem = &newBlock.pItems[0];
1556  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
1557  return &pItem->Value;
1558 }
1559 
1560 template<typename T>
1561 void VmaPoolAllocator<T>::Free(T* ptr)
1562 {
1563  // Search all memory blocks to find ptr.
1564  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
1565  {
1566  ItemBlock& block = m_ItemBlocks[i];
1567 
1568  // Casting to union.
1569  Item* pItemPtr;
1570  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
1571 
1572  // Check if pItemPtr is in address range of this block.
1573  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
1574  {
1575  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
1576  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
1577  block.FirstFreeIndex = index;
1578  return;
1579  }
1580  }
1581  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
1582 }
1583 
1584 template<typename T>
1585 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
1586 {
1587  ItemBlock newBlock = {
1588  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
1589 
1590  m_ItemBlocks.push_back(newBlock);
1591 
1592  // Setup singly-linked list of all free items in this block.
1593  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
1594  newBlock.pItems[i].NextFreeIndex = i + 1;
1595  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
1596  return m_ItemBlocks.back();
1597 }
1598 
1600 // class VmaRawList, VmaList
1601 
1602 #if VMA_USE_STL_LIST
1603 
1604 #define VmaList std::list
1605 
1606 #else // #if VMA_USE_STL_LIST
1607 
1608 template<typename T>
1609 struct VmaListItem
1610 {
1611  VmaListItem* pPrev;
1612  VmaListItem* pNext;
1613  T Value;
1614 };
1615 
1616 // Doubly linked list.
1617 template<typename T>
1618 class VmaRawList
1619 {
1620 public:
1621  typedef VmaListItem<T> ItemType;
1622 
1623  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
1624  ~VmaRawList();
1625  void Clear();
1626 
1627  size_t GetCount() const { return m_Count; }
1628  bool IsEmpty() const { return m_Count == 0; }
1629 
1630  ItemType* Front() { return m_pFront; }
1631  const ItemType* Front() const { return m_pFront; }
1632  ItemType* Back() { return m_pBack; }
1633  const ItemType* Back() const { return m_pBack; }
1634 
1635  ItemType* PushBack();
1636  ItemType* PushFront();
1637  ItemType* PushBack(const T& value);
1638  ItemType* PushFront(const T& value);
1639  void PopBack();
1640  void PopFront();
1641 
1642  // Item can be null - it means PushBack.
1643  ItemType* InsertBefore(ItemType* pItem);
1644  // Item can be null - it means PushFront.
1645  ItemType* InsertAfter(ItemType* pItem);
1646 
1647  ItemType* InsertBefore(ItemType* pItem, const T& value);
1648  ItemType* InsertAfter(ItemType* pItem, const T& value);
1649 
1650  void Remove(ItemType* pItem);
1651 
1652 private:
1653  const VkAllocationCallbacks* const m_pAllocationCallbacks;
1654  VmaPoolAllocator<ItemType> m_ItemAllocator;
1655  ItemType* m_pFront;
1656  ItemType* m_pBack;
1657  size_t m_Count;
1658 
1659  // Declared not defined, to block copy constructor and assignment operator.
1660  VmaRawList(const VmaRawList<T>& src);
1661  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
1662 };
1663 
1664 template<typename T>
1665 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
1666  m_pAllocationCallbacks(pAllocationCallbacks),
1667  m_ItemAllocator(pAllocationCallbacks, 128),
1668  m_pFront(VMA_NULL),
1669  m_pBack(VMA_NULL),
1670  m_Count(0)
1671 {
1672 }
1673 
1674 template<typename T>
1675 VmaRawList<T>::~VmaRawList()
1676 {
1677  // Intentionally not calling Clear, because that would be unnecessary
1678  // computations to return all items to m_ItemAllocator as free.
1679 }
1680 
1681 template<typename T>
1682 void VmaRawList<T>::Clear()
1683 {
1684  if(IsEmpty() == false)
1685  {
1686  ItemType* pItem = m_pBack;
1687  while(pItem != VMA_NULL)
1688  {
1689  ItemType* const pPrevItem = pItem->pPrev;
1690  m_ItemAllocator.Free(pItem);
1691  pItem = pPrevItem;
1692  }
1693  m_pFront = VMA_NULL;
1694  m_pBack = VMA_NULL;
1695  m_Count = 0;
1696  }
1697 }
1698 
1699 template<typename T>
1700 VmaListItem<T>* VmaRawList<T>::PushBack()
1701 {
1702  ItemType* const pNewItem = m_ItemAllocator.Alloc();
1703  pNewItem->pNext = VMA_NULL;
1704  if(IsEmpty())
1705  {
1706  pNewItem->pPrev = VMA_NULL;
1707  m_pFront = pNewItem;
1708  m_pBack = pNewItem;
1709  m_Count = 1;
1710  }
1711  else
1712  {
1713  pNewItem->pPrev = m_pBack;
1714  m_pBack->pNext = pNewItem;
1715  m_pBack = pNewItem;
1716  ++m_Count;
1717  }
1718  return pNewItem;
1719 }
1720 
1721 template<typename T>
1722 VmaListItem<T>* VmaRawList<T>::PushFront()
1723 {
1724  ItemType* const pNewItem = m_ItemAllocator.Alloc();
1725  pNewItem->pPrev = VMA_NULL;
1726  if(IsEmpty())
1727  {
1728  pNewItem->pNext = VMA_NULL;
1729  m_pFront = pNewItem;
1730  m_pBack = pNewItem;
1731  m_Count = 1;
1732  }
1733  else
1734  {
1735  pNewItem->pNext = m_pFront;
1736  m_pFront->pPrev = pNewItem;
1737  m_pFront = pNewItem;
1738  ++m_Count;
1739  }
1740  return pNewItem;
1741 }
1742 
1743 template<typename T>
1744 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
1745 {
1746  ItemType* const pNewItem = PushBack();
1747  pNewItem->Value = value;
1748  return pNewItem;
1749 }
1750 
1751 template<typename T>
1752 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
1753 {
1754  ItemType* const pNewItem = PushFront();
1755  pNewItem->Value = value;
1756  return pNewItem;
1757 }
1758 
1759 template<typename T>
1760 void VmaRawList<T>::PopBack()
1761 {
1762  VMA_HEAVY_ASSERT(m_Count > 0);
1763  ItemType* const pBackItem = m_pBack;
1764  ItemType* const pPrevItem = pBackItem->pPrev;
1765  if(pPrevItem != VMA_NULL)
1766  {
1767  pPrevItem->pNext = VMA_NULL;
1768  }
1769  m_pBack = pPrevItem;
1770  m_ItemAllocator.Free(pBackItem);
1771  --m_Count;
1772 }
1773 
1774 template<typename T>
1775 void VmaRawList<T>::PopFront()
1776 {
1777  VMA_HEAVY_ASSERT(m_Count > 0);
1778  ItemType* const pFrontItem = m_pFront;
1779  ItemType* const pNextItem = pFrontItem->pNext;
1780  if(pNextItem != VMA_NULL)
1781  {
1782  pNextItem->pPrev = VMA_NULL;
1783  }
1784  m_pFront = pNextItem;
1785  m_ItemAllocator.Free(pFrontItem);
1786  --m_Count;
1787 }
1788 
1789 template<typename T>
1790 void VmaRawList<T>::Remove(ItemType* pItem)
1791 {
1792  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
1793  VMA_HEAVY_ASSERT(m_Count > 0);
1794 
1795  if(pItem->pPrev != VMA_NULL)
1796  {
1797  pItem->pPrev->pNext = pItem->pNext;
1798  }
1799  else
1800  {
1801  VMA_HEAVY_ASSERT(m_pFront == pItem);
1802  m_pFront = pItem->pNext;
1803  }
1804 
1805  if(pItem->pNext != VMA_NULL)
1806  {
1807  pItem->pNext->pPrev = pItem->pPrev;
1808  }
1809  else
1810  {
1811  VMA_HEAVY_ASSERT(m_pBack == pItem);
1812  m_pBack = pItem->pPrev;
1813  }
1814 
1815  m_ItemAllocator.Free(pItem);
1816  --m_Count;
1817 }
1818 
1819 template<typename T>
1820 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
1821 {
1822  if(pItem != VMA_NULL)
1823  {
1824  ItemType* const prevItem = pItem->pPrev;
1825  ItemType* const newItem = m_ItemAllocator.Alloc();
1826  newItem->pPrev = prevItem;
1827  newItem->pNext = pItem;
1828  pItem->pPrev = newItem;
1829  if(prevItem != VMA_NULL)
1830  {
1831  prevItem->pNext = newItem;
1832  }
1833  else
1834  {
1835  VMA_HEAVY_ASSERT(m_pFront == pItem);
1836  m_pFront = newItem;
1837  }
1838  ++m_Count;
1839  return newItem;
1840  }
1841  else
1842  return PushBack();
1843 }
1844 
1845 template<typename T>
1846 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
1847 {
1848  if(pItem != VMA_NULL)
1849  {
1850  ItemType* const nextItem = pItem->pNext;
1851  ItemType* const newItem = m_ItemAllocator.Alloc();
1852  newItem->pNext = nextItem;
1853  newItem->pPrev = pItem;
1854  pItem->pNext = newItem;
1855  if(nextItem != VMA_NULL)
1856  {
1857  nextItem->pPrev = newItem;
1858  }
1859  else
1860  {
1861  VMA_HEAVY_ASSERT(m_pBack == pItem);
1862  m_pBack = newItem;
1863  }
1864  ++m_Count;
1865  return newItem;
1866  }
1867  else
1868  return PushFront();
1869 }
1870 
1871 template<typename T>
1872 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
1873 {
1874  ItemType* const newItem = InsertBefore(pItem);
1875  newItem->Value = value;
1876  return newItem;
1877 }
1878 
1879 template<typename T>
1880 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
1881 {
1882  ItemType* const newItem = InsertAfter(pItem);
1883  newItem->Value = value;
1884  return newItem;
1885 }
1886 
1887 template<typename T, typename AllocatorT>
1888 class VmaList
1889 {
1890 public:
1891  class iterator
1892  {
1893  public:
1894  iterator() :
1895  m_pList(VMA_NULL),
1896  m_pItem(VMA_NULL)
1897  {
1898  }
1899 
1900  T& operator*() const
1901  {
1902  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
1903  return m_pItem->Value;
1904  }
1905  T* operator->() const
1906  {
1907  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
1908  return &m_pItem->Value;
1909  }
1910 
1911  iterator& operator++()
1912  {
1913  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
1914  m_pItem = m_pItem->pNext;
1915  return *this;
1916  }
1917  iterator& operator--()
1918  {
1919  if(m_pItem != VMA_NULL)
1920  {
1921  m_pItem = m_pItem->pPrev;
1922  }
1923  else
1924  {
1925  VMA_HEAVY_ASSERT(!m_pList.IsEmpty());
1926  m_pItem = m_pList->Back();
1927  }
1928  return *this;
1929  }
1930 
1931  iterator operator++(int)
1932  {
1933  iterator result = *this;
1934  ++*this;
1935  return result;
1936  }
1937  iterator operator--(int)
1938  {
1939  iterator result = *this;
1940  --*this;
1941  return result;
1942  }
1943 
1944  bool operator==(const iterator& rhs) const
1945  {
1946  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
1947  return m_pItem == rhs.m_pItem;
1948  }
1949  bool operator!=(const iterator& rhs) const
1950  {
1951  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
1952  return m_pItem != rhs.m_pItem;
1953  }
1954 
1955  private:
1956  VmaRawList<T>* m_pList;
1957  VmaListItem<T>* m_pItem;
1958 
1959  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
1960  m_pList(pList),
1961  m_pItem(pItem)
1962  {
1963  }
1964 
1965  friend class VmaList<T, AllocatorT>;
1966  friend class VmaList<T, AllocatorT>:: const_iterator;
1967  };
1968 
1969  class const_iterator
1970  {
1971  public:
1972  const_iterator() :
1973  m_pList(VMA_NULL),
1974  m_pItem(VMA_NULL)
1975  {
1976  }
1977 
1978  const_iterator(const iterator& src) :
1979  m_pList(src.m_pList),
1980  m_pItem(src.m_pItem)
1981  {
1982  }
1983 
1984  const T& operator*() const
1985  {
1986  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
1987  return m_pItem->Value;
1988  }
1989  const T* operator->() const
1990  {
1991  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
1992  return &m_pItem->Value;
1993  }
1994 
1995  const_iterator& operator++()
1996  {
1997  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
1998  m_pItem = m_pItem->pNext;
1999  return *this;
2000  }
2001  const_iterator& operator--()
2002  {
2003  if(m_pItem != VMA_NULL)
2004  {
2005  m_pItem = m_pItem->pPrev;
2006  }
2007  else
2008  {
2009  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
2010  m_pItem = m_pList->Back();
2011  }
2012  return *this;
2013  }
2014 
2015  const_iterator operator++(int)
2016  {
2017  const_iterator result = *this;
2018  ++*this;
2019  return result;
2020  }
2021  const_iterator operator--(int)
2022  {
2023  const_iterator result = *this;
2024  --*this;
2025  return result;
2026  }
2027 
2028  bool operator==(const const_iterator& rhs) const
2029  {
2030  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2031  return m_pItem == rhs.m_pItem;
2032  }
2033  bool operator!=(const const_iterator& rhs) const
2034  {
2035  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2036  return m_pItem != rhs.m_pItem;
2037  }
2038 
2039  private:
2040  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
2041  m_pList(pList),
2042  m_pItem(pItem)
2043  {
2044  }
2045 
2046  const VmaRawList<T>* m_pList;
2047  const VmaListItem<T>* m_pItem;
2048 
2049  friend class VmaList<T, AllocatorT>;
2050  };
2051 
2052  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
2053 
2054  bool empty() const { return m_RawList.IsEmpty(); }
2055  size_t size() const { return m_RawList.GetCount(); }
2056 
2057  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
2058  iterator end() { return iterator(&m_RawList, VMA_NULL); }
2059 
2060  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
2061  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
2062 
2063  void clear() { m_RawList.Clear(); }
2064  void push_back(const T& value) { m_RawList.PushBack(value); }
2065  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
2066  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
2067 
2068 private:
2069  VmaRawList<T> m_RawList;
2070 };
2071 
2072 #endif // #if VMA_USE_STL_LIST
2073 
2075 // class VmaMap
2076 
2077 #if VMA_USE_STL_UNORDERED_MAP
2078 
2079 #define VmaPair std::pair
2080 
2081 #define VMA_MAP_TYPE(KeyT, ValueT) \
2082  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
2083 
2084 #else // #if VMA_USE_STL_UNORDERED_MAP
2085 
2086 template<typename T1, typename T2>
2087 struct VmaPair
2088 {
2089  T1 first;
2090  T2 second;
2091 
2092  VmaPair() : first(), second() { }
2093  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
2094 };
2095 
2096 /* Class compatible with subset of interface of std::unordered_map.
2097 KeyT, ValueT must be POD because they will be stored in VmaVector.
2098 */
2099 template<typename KeyT, typename ValueT>
2100 class VmaMap
2101 {
2102 public:
2103  typedef VmaPair<KeyT, ValueT> PairType;
2104  typedef PairType* iterator;
2105 
2106  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
2107 
2108  iterator begin() { return m_Vector.begin(); }
2109  iterator end() { return m_Vector.end(); }
2110 
2111  void insert(const PairType& pair);
2112  iterator find(const KeyT& key);
2113  void erase(iterator it);
2114 
2115 private:
2116  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
2117 };
2118 
2119 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
2120 
2121 template<typename FirstT, typename SecondT>
2122 struct VmaPairFirstLess
2123 {
2124  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
2125  {
2126  return lhs.first < rhs.first;
2127  }
2128  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
2129  {
2130  return lhs.first < rhsFirst;
2131  }
2132 };
2133 
2134 template<typename KeyT, typename ValueT>
2135 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
2136 {
2137  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
2138  m_Vector.data(),
2139  m_Vector.data() + m_Vector.size(),
2140  pair,
2141  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
2142  VectorInsert(m_Vector, indexToInsert, pair);
2143 }
2144 
2145 template<typename KeyT, typename ValueT>
2146 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
2147 {
2148  PairType* it = VmaBinaryFindFirstNotLess(
2149  m_Vector.data(),
2150  m_Vector.data() + m_Vector.size(),
2151  key,
2152  VmaPairFirstLess<KeyT, ValueT>());
2153  if((it != m_Vector.end()) && (it->first == key))
2154  {
2155  return it;
2156  }
2157  else
2158  {
2159  return m_Vector.end();
2160  }
2161 }
2162 
2163 template<typename KeyT, typename ValueT>
2164 void VmaMap<KeyT, ValueT>::erase(iterator it)
2165 {
2166  VectorRemove(m_Vector, it - m_Vector.begin());
2167 }
2168 
2169 #endif // #if VMA_USE_STL_UNORDERED_MAP
2170 
2172 
2173 class VmaBlock;
2174 
2175 enum VMA_BLOCK_VECTOR_TYPE
2176 {
2177  VMA_BLOCK_VECTOR_TYPE_UNMAPPED,
2178  VMA_BLOCK_VECTOR_TYPE_MAPPED,
2179  VMA_BLOCK_VECTOR_TYPE_COUNT
2180 };
2181 
2182 static VMA_BLOCK_VECTOR_TYPE VmaMemoryRequirementFlagsToBlockVectorType(VmaMemoryRequirementFlags flags)
2183 {
2184  return (flags & VMA_MEMORY_REQUIREMENT_PERSISTENT_MAP_BIT) != 0 ?
2185  VMA_BLOCK_VECTOR_TYPE_MAPPED :
2186  VMA_BLOCK_VECTOR_TYPE_UNMAPPED;
2187 }
2188 
2189 struct VmaAllocation_T
2190 {
2191 public:
2192  enum ALLOCATION_TYPE
2193  {
2194  ALLOCATION_TYPE_NONE,
2195  ALLOCATION_TYPE_BLOCK,
2196  ALLOCATION_TYPE_OWN,
2197  };
2198 
2199  VmaAllocation_T()
2200  {
2201  memset(this, 0, sizeof(VmaAllocation_T));
2202  }
2203 
2204  void InitBlockAllocation(
2205  VmaBlock* block,
2206  VkDeviceSize offset,
2207  VkDeviceSize alignment,
2208  VkDeviceSize size,
2209  VmaSuballocationType suballocationType,
2210  void* pUserData)
2211  {
2212  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
2213  VMA_ASSERT(block != VMA_NULL);
2214  m_Type = ALLOCATION_TYPE_BLOCK;
2215  m_Alignment = alignment;
2216  m_Size = size;
2217  m_pUserData = pUserData;
2218  m_SuballocationType = suballocationType;
2219  m_BlockAllocation.m_Block = block;
2220  m_BlockAllocation.m_Offset = offset;
2221  }
2222 
2223  void ChangeBlockAllocation(
2224  VmaBlock* block,
2225  VkDeviceSize offset)
2226  {
2227  VMA_ASSERT(block != VMA_NULL);
2228  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
2229  m_BlockAllocation.m_Block = block;
2230  m_BlockAllocation.m_Offset = offset;
2231  }
2232 
2233  void InitOwnAllocation(
2234  uint32_t memoryTypeIndex,
2235  VkDeviceMemory hMemory,
2236  VmaSuballocationType suballocationType,
2237  bool persistentMap,
2238  void* pMappedData,
2239  VkDeviceSize size,
2240  void* pUserData)
2241  {
2242  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
2243  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
2244  m_Type = ALLOCATION_TYPE_OWN;
2245  m_Alignment = 0;
2246  m_Size = size;
2247  m_pUserData = pUserData;
2248  m_SuballocationType = suballocationType;
2249  m_OwnAllocation.m_MemoryTypeIndex = memoryTypeIndex;
2250  m_OwnAllocation.m_hMemory = hMemory;
2251  m_OwnAllocation.m_PersistentMap = persistentMap;
2252  m_OwnAllocation.m_pMappedData = pMappedData;
2253  }
2254 
2255  ALLOCATION_TYPE GetType() const { return m_Type; }
2256  VkDeviceSize GetAlignment() const { return m_Alignment; }
2257  VkDeviceSize GetSize() const { return m_Size; }
2258  void* GetUserData() const { return m_pUserData; }
2259  void SetUserData(void* pUserData) { m_pUserData = pUserData; }
2260  VmaSuballocationType GetSuballocationType() const { return m_SuballocationType; }
2261 
2262  VmaBlock* GetBlock() const
2263  {
2264  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
2265  return m_BlockAllocation.m_Block;
2266  }
2267  VkDeviceSize GetOffset() const
2268  {
2269  return (m_Type == ALLOCATION_TYPE_BLOCK) ? m_BlockAllocation.m_Offset : 0;
2270  }
2271  VkDeviceMemory GetMemory() const;
2272  uint32_t GetMemoryTypeIndex() const;
2273  VMA_BLOCK_VECTOR_TYPE GetBlockVectorType() const;
2274  void* GetMappedData() const;
2275 
2276  VkResult OwnAllocMapPersistentlyMappedMemory(VkDevice hDevice)
2277  {
2278  VMA_ASSERT(m_Type == ALLOCATION_TYPE_OWN);
2279  if(m_OwnAllocation.m_PersistentMap)
2280  {
2281  return vkMapMemory(hDevice, m_OwnAllocation.m_hMemory, 0, VK_WHOLE_SIZE, 0, &m_OwnAllocation.m_pMappedData);
2282  }
2283  return VK_SUCCESS;
2284  }
2285  void OwnAllocUnmapPersistentlyMappedMemory(VkDevice hDevice)
2286  {
2287  VMA_ASSERT(m_Type == ALLOCATION_TYPE_OWN);
2288  if(m_OwnAllocation.m_pMappedData)
2289  {
2290  VMA_ASSERT(m_OwnAllocation.m_PersistentMap);
2291  vkUnmapMemory(hDevice, m_OwnAllocation.m_hMemory);
2292  m_OwnAllocation.m_pMappedData = VMA_NULL;
2293  }
2294  }
2295 
2296 private:
2297  VkDeviceSize m_Alignment;
2298  VkDeviceSize m_Size;
2299  void* m_pUserData;
2300  ALLOCATION_TYPE m_Type;
2301  VmaSuballocationType m_SuballocationType;
2302 
2303  // Allocation out of VmaBlock.
2304  struct BlockAllocation
2305  {
2306  VmaBlock* m_Block;
2307  VkDeviceSize m_Offset;
2308  };
2309 
2310  // Allocation for an object that has its own private VkDeviceMemory.
2311  struct OwnAllocation
2312  {
2313  uint32_t m_MemoryTypeIndex;
2314  VkDeviceMemory m_hMemory;
2315  bool m_PersistentMap;
2316  void* m_pMappedData;
2317  };
2318 
2319  union
2320  {
2321  // Allocation out of VmaBlock.
2322  BlockAllocation m_BlockAllocation;
2323  // Allocation for an object that has its own private VkDeviceMemory.
2324  OwnAllocation m_OwnAllocation;
2325  };
2326 };
2327 
2328 /*
2329 Represents a region of VmaBlock that is either assigned and returned as
2330 allocated memory block or free.
2331 */
2332 struct VmaSuballocation
2333 {
2334  VkDeviceSize offset;
2335  VkDeviceSize size;
2336  VmaSuballocationType type;
2337 };
2338 
2339 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
2340 
2341 // Parameters of an allocation.
2342 struct VmaAllocationRequest
2343 {
2344  VmaSuballocationList::iterator freeSuballocationItem;
2345  VkDeviceSize offset;
2346 };
2347 
2348 /* Single block of memory - VkDeviceMemory with all the data about its regions
2349 assigned or free. */
2350 class VmaBlock
2351 {
2352 public:
2353  uint32_t m_MemoryTypeIndex;
2354  VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
2355  VkDeviceMemory m_hMemory;
2356  VkDeviceSize m_Size;
2357  bool m_PersistentMap;
2358  void* m_pMappedData;
2359  uint32_t m_FreeCount;
2360  VkDeviceSize m_SumFreeSize;
2361  VmaSuballocationList m_Suballocations;
2362  // Suballocations that are free and have size greater than certain threshold.
2363  // Sorted by size, ascending.
2364  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
2365 
2366  VmaBlock(VmaAllocator hAllocator);
2367 
2368  ~VmaBlock()
2369  {
2370  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
2371  }
2372 
2373  // Always call after construction.
2374  void Init(
2375  uint32_t newMemoryTypeIndex,
2376  VMA_BLOCK_VECTOR_TYPE newBlockVectorType,
2377  VkDeviceMemory newMemory,
2378  VkDeviceSize newSize,
2379  bool persistentMap,
2380  void* pMappedData);
2381  // Always call before destruction.
2382  void Destroy(VmaAllocator allocator);
2383 
2384  // Validates all data structures inside this object. If not valid, returns false.
2385  bool Validate() const;
2386 
2387  // Tries to find a place for suballocation with given parameters inside this allocation.
2388  // If succeeded, fills pAllocationRequest and returns true.
2389  // If failed, returns false.
2390  bool CreateAllocationRequest(
2391  VkDeviceSize bufferImageGranularity,
2392  VkDeviceSize allocSize,
2393  VkDeviceSize allocAlignment,
2394  VmaSuballocationType allocType,
2395  VmaAllocationRequest* pAllocationRequest);
2396 
2397  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
2398  // If yes, fills pOffset and returns true. If no, returns false.
2399  bool CheckAllocation(
2400  VkDeviceSize bufferImageGranularity,
2401  VkDeviceSize allocSize,
2402  VkDeviceSize allocAlignment,
2403  VmaSuballocationType allocType,
2404  VmaSuballocationList::const_iterator freeSuballocItem,
2405  VkDeviceSize* pOffset) const;
2406 
2407  // Returns true if this allocation is empty - contains only single free suballocation.
2408  bool IsEmpty() const;
2409 
2410  // Makes actual allocation based on request. Request must already be checked
2411  // and valid.
2412  void Alloc(
2413  const VmaAllocationRequest& request,
2414  VmaSuballocationType type,
2415  VkDeviceSize allocSize);
2416 
2417  // Frees suballocation assigned to given memory region.
2418  void Free(const VmaAllocation allocation);
2419 
2420 #if VMA_STATS_STRING_ENABLED
2421  void PrintDetailedMap(class VmaStringBuilder& sb) const;
2422 #endif
2423 
2424 private:
2425  // Given free suballocation, it merges it with following one, which must also be free.
2426  void MergeFreeWithNext(VmaSuballocationList::iterator item);
2427  // Releases given suballocation, making it free. Merges it with adjacent free
2428  // suballocations if applicable.
2429  void FreeSuballocation(VmaSuballocationList::iterator suballocItem);
2430  // Given free suballocation, it inserts it into sorted list of
2431  // m_FreeSuballocationsBySize if it's suitable.
2432  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
2433  // Given free suballocation, it removes it from sorted list of
2434  // m_FreeSuballocationsBySize if it's suitable.
2435  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
2436 };
2437 
2438 struct VmaPointerLess
2439 {
2440  bool operator()(const void* lhs, const void* rhs) const
2441  {
2442  return lhs < rhs;
2443  }
2444 };
2445 
2446 /* Sequence of VmaBlock. Represents memory blocks allocated for a specific
2447 Vulkan memory type. */
2448 struct VmaBlockVector
2449 {
2450  // Incrementally sorted by sumFreeSize, ascending.
2451  VmaVector< VmaBlock*, VmaStlAllocator<VmaBlock*> > m_Blocks;
2452 
2453  VmaBlockVector(VmaAllocator hAllocator);
2454  ~VmaBlockVector();
2455 
2456  bool IsEmpty() const { return m_Blocks.empty(); }
2457 
2458  // Finds and removes given block from vector.
2459  void Remove(VmaBlock* pBlock);
2460 
2461  // Performs single step in sorting m_Blocks. They may not be fully sorted
2462  // after this call.
2463  void IncrementallySortBlocks();
2464 
2465  // Adds statistics of this BlockVector to pStats.
2466  void AddStats(VmaStats* pStats, uint32_t memTypeIndex, uint32_t memHeapIndex) const;
2467 
2468 #if VMA_STATS_STRING_ENABLED
2469  void PrintDetailedMap(class VmaStringBuilder& sb) const;
2470 #endif
2471 
2472  void UnmapPersistentlyMappedMemory();
2473  VkResult MapPersistentlyMappedMemory();
2474 
2475 private:
2476  VmaAllocator m_hAllocator;
2477 };
2478 
2479 // Main allocator object.
2480 struct VmaAllocator_T
2481 {
2482  bool m_UseMutex;
2483  VkDevice m_hDevice;
2484  bool m_AllocationCallbacksSpecified;
2485  VkAllocationCallbacks m_AllocationCallbacks;
2486  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
2487  VkDeviceSize m_PreferredLargeHeapBlockSize;
2488  VkDeviceSize m_PreferredSmallHeapBlockSize;
2489  // Non-zero when we are inside UnmapPersistentlyMappedMemory...MapPersistentlyMappedMemory.
2490  // Counter to allow nested calls to these functions.
2491  uint32_t m_UnmapPersistentlyMappedMemoryCounter;
2492 
2493  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
2494  VkPhysicalDeviceMemoryProperties m_MemProps;
2495 
2496  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES][VMA_BLOCK_VECTOR_TYPE_COUNT];
2497  /* There can be at most one allocation that is completely empty - a
2498  hysteresis to avoid pessimistic case of alternating creation and destruction
2499  of a VkDeviceMemory. */
2500  bool m_HasEmptyBlock[VK_MAX_MEMORY_TYPES];
2501  VMA_MUTEX m_BlocksMutex[VK_MAX_MEMORY_TYPES];
2502 
2503  // Each vector is sorted by memory (handle value).
2504  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
2505  AllocationVectorType* m_pOwnAllocations[VK_MAX_MEMORY_TYPES][VMA_BLOCK_VECTOR_TYPE_COUNT];
2506  VMA_MUTEX m_OwnAllocationsMutex[VK_MAX_MEMORY_TYPES];
2507 
2508  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
2509  ~VmaAllocator_T();
2510 
2511  const VkAllocationCallbacks* GetAllocationCallbacks() const
2512  {
2513  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
2514  }
2515 
2516  VkDeviceSize GetPreferredBlockSize(uint32_t memTypeIndex) const;
2517 
2518  VkDeviceSize GetBufferImageGranularity() const
2519  {
2520  return VMA_MAX(
2521  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
2522  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
2523  }
2524 
2525  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
2526  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
2527 
2528  // Main allocation function.
2529  VkResult AllocateMemory(
2530  const VkMemoryRequirements& vkMemReq,
2531  const VmaMemoryRequirements& vmaMemReq,
2532  VmaSuballocationType suballocType,
2533  VmaAllocation* pAllocation);
2534 
2535  // Main deallocation function.
2536  void FreeMemory(const VmaAllocation allocation);
2537 
2538  void CalculateStats(VmaStats* pStats);
2539 
2540 #if VMA_STATS_STRING_ENABLED
2541  void PrintDetailedMap(class VmaStringBuilder& sb);
2542 #endif
2543 
2544  void UnmapPersistentlyMappedMemory();
2545  VkResult MapPersistentlyMappedMemory();
2546 
2547  VkResult Defragment(
2548  VmaAllocation* pAllocations,
2549  size_t allocationCount,
2550  VkBool32* pAllocationsChanged,
2551  const VmaDefragmentationInfo* pDefragmentationInfo,
2552  VmaDefragmentationStats* pDefragmentationStats);
2553 
2554  static void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
2555 
2556 private:
2557  VkPhysicalDevice m_PhysicalDevice;
2558 
2559  VkResult AllocateMemoryOfType(
2560  const VkMemoryRequirements& vkMemReq,
2561  const VmaMemoryRequirements& vmaMemReq,
2562  uint32_t memTypeIndex,
2563  VmaSuballocationType suballocType,
2564  VmaAllocation* pAllocation);
2565 
2566  // Allocates and registers new VkDeviceMemory specifically for single allocation.
2567  VkResult AllocateOwnMemory(
2568  VkDeviceSize size,
2569  VmaSuballocationType suballocType,
2570  uint32_t memTypeIndex,
2571  bool map,
2572  void* pUserData,
2573  VmaAllocation* pAllocation);
2574 
2575  // Tries to free pMemory as Own Memory. Returns true if found and freed.
2576  void FreeOwnMemory(VmaAllocation allocation);
2577 };
2578 
2580 // Memory allocation #2 after VmaAllocator_T definition
2581 
2582 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
2583 {
2584  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
2585 }
2586 
2587 static void VmaFree(VmaAllocator hAllocator, void* ptr)
2588 {
2589  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
2590 }
2591 
2592 template<typename T>
2593 static T* VmaAllocate(VmaAllocator hAllocator)
2594 {
2595  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
2596 }
2597 
2598 template<typename T>
2599 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
2600 {
2601  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
2602 }
2603 
2604 template<typename T>
2605 static void vma_delete(VmaAllocator hAllocator, T* ptr)
2606 {
2607  if(ptr != VMA_NULL)
2608  {
2609  ptr->~T();
2610  VmaFree(hAllocator, ptr);
2611  }
2612 }
2613 
2614 template<typename T>
2615 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
2616 {
2617  if(ptr != VMA_NULL)
2618  {
2619  for(size_t i = count; i--; )
2620  ptr[i].~T();
2621  VmaFree(hAllocator, ptr);
2622  }
2623 }
2624 
2626 // VmaStringBuilder
2627 
2628 #if VMA_STATS_STRING_ENABLED
2629 
2630 class VmaStringBuilder
2631 {
2632 public:
2633  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
2634  size_t GetLength() const { return m_Data.size(); }
2635  const char* GetData() const { return m_Data.data(); }
2636 
2637  void Add(char ch) { m_Data.push_back(ch); }
2638  void Add(const char* pStr);
2639  void AddNewLine() { Add('\n'); }
2640  void AddNumber(uint32_t num);
2641  void AddNumber(uint64_t num);
2642  void AddBool(bool b) { Add(b ? "true" : "false"); }
2643  void AddNull() { Add("null"); }
2644  void AddString(const char* pStr);
2645 
2646 private:
2647  VmaVector< char, VmaStlAllocator<char> > m_Data;
2648 };
2649 
2650 void VmaStringBuilder::Add(const char* pStr)
2651 {
2652  const size_t strLen = strlen(pStr);
2653  if(strLen > 0)
2654  {
2655  const size_t oldCount = m_Data.size();
2656  m_Data.resize(oldCount + strLen);
2657  memcpy(m_Data.data() + oldCount, pStr, strLen);
2658  }
2659 }
2660 
2661 void VmaStringBuilder::AddNumber(uint32_t num)
2662 {
2663  char buf[11];
2664  VmaUint32ToStr(buf, sizeof(buf), num);
2665  Add(buf);
2666 }
2667 
2668 void VmaStringBuilder::AddNumber(uint64_t num)
2669 {
2670  char buf[21];
2671  VmaUint64ToStr(buf, sizeof(buf), num);
2672  Add(buf);
2673 }
2674 
2675 void VmaStringBuilder::AddString(const char* pStr)
2676 {
2677  Add('"');
2678  const size_t strLen = strlen(pStr);
2679  for(size_t i = 0; i < strLen; ++i)
2680  {
2681  char ch = pStr[i];
2682  if(ch == '\'')
2683  {
2684  Add("\\\\");
2685  }
2686  else if(ch == '"')
2687  {
2688  Add("\\\"");
2689  }
2690  else if(ch >= 32)
2691  {
2692  Add(ch);
2693  }
2694  else switch(ch)
2695  {
2696  case '\n':
2697  Add("\\n");
2698  break;
2699  case '\r':
2700  Add("\\r");
2701  break;
2702  case '\t':
2703  Add("\\t");
2704  break;
2705  default:
2706  VMA_ASSERT(0 && "Character not currently supported.");
2707  break;
2708  }
2709  }
2710  Add('"');
2711 }
2712 
2714 
2715 VkDeviceMemory VmaAllocation_T::GetMemory() const
2716 {
2717  return (m_Type == ALLOCATION_TYPE_BLOCK) ?
2718  m_BlockAllocation.m_Block->m_hMemory : m_OwnAllocation.m_hMemory;
2719 }
2720 
2721 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
2722 {
2723  return (m_Type == ALLOCATION_TYPE_BLOCK) ?
2724  m_BlockAllocation.m_Block->m_MemoryTypeIndex : m_OwnAllocation.m_MemoryTypeIndex;
2725 }
2726 
2727 VMA_BLOCK_VECTOR_TYPE VmaAllocation_T::GetBlockVectorType() const
2728 {
2729  return (m_Type == ALLOCATION_TYPE_BLOCK) ?
2730  m_BlockAllocation.m_Block->m_BlockVectorType :
2731  (m_OwnAllocation.m_PersistentMap ? VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED);
2732 }
2733 
2734 void* VmaAllocation_T::GetMappedData() const
2735 {
2736  switch(m_Type)
2737  {
2738  case ALLOCATION_TYPE_BLOCK:
2739  if(m_BlockAllocation.m_Block->m_pMappedData != VMA_NULL)
2740  {
2741  return (char*)m_BlockAllocation.m_Block->m_pMappedData + m_BlockAllocation.m_Offset;
2742  }
2743  else
2744  {
2745  return VMA_NULL;
2746  }
2747  break;
2748  case ALLOCATION_TYPE_OWN:
2749  return m_OwnAllocation.m_pMappedData;
2750  default:
2751  VMA_ASSERT(0);
2752  return VMA_NULL;
2753  }
2754 }
2755 
2756 // Correspond to values of enum VmaSuballocationType.
2757 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
2758  "FREE",
2759  "UNKNOWN",
2760  "BUFFER",
2761  "IMAGE_UNKNOWN",
2762  "IMAGE_LINEAR",
2763  "IMAGE_OPTIMAL",
2764 };
2765 
2766 static void VmaPrintStatInfo(VmaStringBuilder& sb, const VmaStatInfo& stat)
2767 {
2768  sb.Add("{ \"Allocations\": ");
2769  sb.AddNumber(stat.AllocationCount);
2770  sb.Add(", \"Suballocations\": ");
2771  sb.AddNumber(stat.SuballocationCount);
2772  sb.Add(", \"UnusedRanges\": ");
2773  sb.AddNumber(stat.UnusedRangeCount);
2774  sb.Add(", \"UsedBytes\": ");
2775  sb.AddNumber(stat.UsedBytes);
2776  sb.Add(", \"UnusedBytes\": ");
2777  sb.AddNumber(stat.UnusedBytes);
2778  sb.Add(", \"SuballocationSize\": { \"Min\": ");
2779  sb.AddNumber(stat.SuballocationSizeMin);
2780  sb.Add(", \"Avg\": ");
2781  sb.AddNumber(stat.SuballocationSizeAvg);
2782  sb.Add(", \"Max\": ");
2783  sb.AddNumber(stat.SuballocationSizeMax);
2784  sb.Add(" }, \"UnusedRangeSize\": { \"Min\": ");
2785  sb.AddNumber(stat.UnusedRangeSizeMin);
2786  sb.Add(", \"Avg\": ");
2787  sb.AddNumber(stat.UnusedRangeSizeAvg);
2788  sb.Add(", \"Max\": ");
2789  sb.AddNumber(stat.UnusedRangeSizeMax);
2790  sb.Add(" } }");
2791 }
2792 
2793 #endif // #if VMA_STATS_STRING_ENABLED
2794 
2795 struct VmaSuballocationItemSizeLess
2796 {
2797  bool operator()(
2798  const VmaSuballocationList::iterator lhs,
2799  const VmaSuballocationList::iterator rhs) const
2800  {
2801  return lhs->size < rhs->size;
2802  }
2803  bool operator()(
2804  const VmaSuballocationList::iterator lhs,
2805  VkDeviceSize rhsSize) const
2806  {
2807  return lhs->size < rhsSize;
2808  }
2809 };
2810 
2811 VmaBlock::VmaBlock(VmaAllocator hAllocator) :
2812  m_MemoryTypeIndex(UINT32_MAX),
2813  m_BlockVectorType(VMA_BLOCK_VECTOR_TYPE_COUNT),
2814  m_hMemory(VK_NULL_HANDLE),
2815  m_Size(0),
2816  m_PersistentMap(false),
2817  m_pMappedData(VMA_NULL),
2818  m_FreeCount(0),
2819  m_SumFreeSize(0),
2820  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
2821  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
2822 {
2823 }
2824 
2825 void VmaBlock::Init(
2826  uint32_t newMemoryTypeIndex,
2827  VMA_BLOCK_VECTOR_TYPE newBlockVectorType,
2828  VkDeviceMemory newMemory,
2829  VkDeviceSize newSize,
2830  bool persistentMap,
2831  void* pMappedData)
2832 {
2833  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
2834 
2835  m_MemoryTypeIndex = newMemoryTypeIndex;
2836  m_BlockVectorType = newBlockVectorType;
2837  m_hMemory = newMemory;
2838  m_Size = newSize;
2839  m_PersistentMap = persistentMap;
2840  m_pMappedData = pMappedData;
2841  m_FreeCount = 1;
2842  m_SumFreeSize = newSize;
2843 
2844  m_Suballocations.clear();
2845  m_FreeSuballocationsBySize.clear();
2846 
2847  VmaSuballocation suballoc = {};
2848  suballoc.offset = 0;
2849  suballoc.size = newSize;
2850  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
2851 
2852  m_Suballocations.push_back(suballoc);
2853  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
2854  --suballocItem;
2855  m_FreeSuballocationsBySize.push_back(suballocItem);
2856 }
2857 
2858 void VmaBlock::Destroy(VmaAllocator allocator)
2859 {
2860  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
2861  if(m_pMappedData != VMA_NULL)
2862  {
2863  vkUnmapMemory(allocator->m_hDevice, m_hMemory);
2864  m_pMappedData = VMA_NULL;
2865  }
2866 
2867  // Callback.
2868  if(allocator->m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
2869  {
2870  (*allocator->m_DeviceMemoryCallbacks.pfnFree)(allocator, m_MemoryTypeIndex, m_hMemory, m_Size);
2871  }
2872 
2873  vkFreeMemory(allocator->m_hDevice, m_hMemory, allocator->GetAllocationCallbacks());
2874  m_hMemory = VK_NULL_HANDLE;
2875 }
2876 
2877 bool VmaBlock::Validate() const
2878 {
2879  if((m_hMemory == VK_NULL_HANDLE) ||
2880  (m_Size == 0) ||
2881  m_Suballocations.empty())
2882  {
2883  return false;
2884  }
2885 
2886  // Expected offset of new suballocation as calculates from previous ones.
2887  VkDeviceSize calculatedOffset = 0;
2888  // Expected number of free suballocations as calculated from traversing their list.
2889  uint32_t calculatedFreeCount = 0;
2890  // Expected sum size of free suballocations as calculated from traversing their list.
2891  VkDeviceSize calculatedSumFreeSize = 0;
2892  // Expected number of free suballocations that should be registered in
2893  // m_FreeSuballocationsBySize calculated from traversing their list.
2894  size_t freeSuballocationsToRegister = 0;
2895  // True if previous visisted suballocation was free.
2896  bool prevFree = false;
2897 
2898  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
2899  suballocItem != m_Suballocations.cend();
2900  ++suballocItem)
2901  {
2902  const VmaSuballocation& subAlloc = *suballocItem;
2903 
2904  // Actual offset of this suballocation doesn't match expected one.
2905  if(subAlloc.offset != calculatedOffset)
2906  {
2907  return false;
2908  }
2909 
2910  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
2911  // Two adjacent free suballocations are invalid. They should be merged.
2912  if(prevFree && currFree)
2913  {
2914  return false;
2915  }
2916  prevFree = currFree;
2917 
2918  if(currFree)
2919  {
2920  calculatedSumFreeSize += subAlloc.size;
2921  ++calculatedFreeCount;
2922  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
2923  {
2924  ++freeSuballocationsToRegister;
2925  }
2926  }
2927 
2928  calculatedOffset += subAlloc.size;
2929  }
2930 
2931  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
2932  // match expected one.
2933  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
2934  {
2935  return false;
2936  }
2937 
2938  VkDeviceSize lastSize = 0;
2939  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
2940  {
2941  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
2942 
2943  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
2944  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
2945  {
2946  return false;
2947  }
2948  // They must be sorted by size ascending.
2949  if(suballocItem->size < lastSize)
2950  {
2951  return false;
2952  }
2953 
2954  lastSize = suballocItem->size;
2955  }
2956 
2957  // Check if totals match calculacted values.
2958  return
2959  (calculatedOffset == m_Size) &&
2960  (calculatedSumFreeSize == m_SumFreeSize) &&
2961  (calculatedFreeCount == m_FreeCount);
2962 }
2963 
2964 /*
2965 How many suitable free suballocations to analyze before choosing best one.
2966 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
2967  be chosen.
2968 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
2969  suballocations will be analized and best one will be chosen.
2970 - Any other value is also acceptable.
2971 */
2972 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
2973 
2974 bool VmaBlock::CreateAllocationRequest(
2975  VkDeviceSize bufferImageGranularity,
2976  VkDeviceSize allocSize,
2977  VkDeviceSize allocAlignment,
2978  VmaSuballocationType allocType,
2979  VmaAllocationRequest* pAllocationRequest)
2980 {
2981  VMA_ASSERT(allocSize > 0);
2982  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
2983  VMA_ASSERT(pAllocationRequest != VMA_NULL);
2984  VMA_HEAVY_ASSERT(Validate());
2985 
2986  // There is not enough total free space in this allocation to fullfill the request: Early return.
2987  if(m_SumFreeSize < allocSize)
2988  {
2989  return false;
2990  }
2991 
2992  // Old brute-force algorithm, linearly searching suballocations.
2993  /*
2994  uint32_t suitableSuballocationsFound = 0;
2995  for(VmaSuballocationList::iterator suballocItem = suballocations.Front();
2996  suballocItem != VMA_NULL &&
2997  suitableSuballocationsFound < MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK;
2998  suballocItem = suballocItem->Next)
2999  {
3000  if(suballocItem->Value.type == VMA_SUBALLOCATION_TYPE_FREE)
3001  {
3002  VkDeviceSize offset = 0, cost = 0;
3003  if(CheckAllocation(bufferImageGranularity, allocSize, allocAlignment, allocType, suballocItem, &offset, &cost))
3004  {
3005  ++suitableSuballocationsFound;
3006  if(cost < costLimit)
3007  {
3008  pAllocationRequest->freeSuballocationItem = suballocItem;
3009  pAllocationRequest->offset = offset;
3010  pAllocationRequest->cost = cost;
3011  if(cost == 0)
3012  return true;
3013  costLimit = cost;
3014  betterSuballocationFound = true;
3015  }
3016  }
3017  }
3018  }
3019  */
3020 
3021  // New algorithm, efficiently searching freeSuballocationsBySize.
3022  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
3023  if(freeSuballocCount > 0)
3024  {
3025  if(VMA_BEST_FIT)
3026  {
3027  // Find first free suballocation with size not less than allocSize.
3028  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
3029  m_FreeSuballocationsBySize.data(),
3030  m_FreeSuballocationsBySize.data() + freeSuballocCount,
3031  allocSize,
3032  VmaSuballocationItemSizeLess());
3033  size_t index = it - m_FreeSuballocationsBySize.data();
3034  for(; index < freeSuballocCount; ++index)
3035  {
3036  VkDeviceSize offset = 0;
3037  const VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[index];
3038  if(CheckAllocation(bufferImageGranularity, allocSize, allocAlignment, allocType, suballocItem, &offset))
3039  {
3040  pAllocationRequest->freeSuballocationItem = suballocItem;
3041  pAllocationRequest->offset = offset;
3042  return true;
3043  }
3044  }
3045  }
3046  else
3047  {
3048  // Search staring from biggest suballocations.
3049  for(size_t index = freeSuballocCount; index--; )
3050  {
3051  VkDeviceSize offset = 0;
3052  const VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[index];
3053  if(CheckAllocation(bufferImageGranularity, allocSize, allocAlignment, allocType, suballocItem, &offset))
3054  {
3055  pAllocationRequest->freeSuballocationItem = suballocItem;
3056  pAllocationRequest->offset = offset;
3057  return true;
3058  }
3059  }
3060  }
3061  }
3062 
3063  return false;
3064 }
3065 
3066 bool VmaBlock::CheckAllocation(
3067  VkDeviceSize bufferImageGranularity,
3068  VkDeviceSize allocSize,
3069  VkDeviceSize allocAlignment,
3070  VmaSuballocationType allocType,
3071  VmaSuballocationList::const_iterator freeSuballocItem,
3072  VkDeviceSize* pOffset) const
3073 {
3074  VMA_ASSERT(allocSize > 0);
3075  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
3076  VMA_ASSERT(freeSuballocItem != m_Suballocations.cend());
3077  VMA_ASSERT(pOffset != VMA_NULL);
3078 
3079  const VmaSuballocation& suballoc = *freeSuballocItem;
3080  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
3081 
3082  // Size of this suballocation is too small for this request: Early return.
3083  if(suballoc.size < allocSize)
3084  {
3085  return false;
3086  }
3087 
3088  // Start from offset equal to beginning of this suballocation.
3089  *pOffset = suballoc.offset;
3090 
3091  // Apply VMA_DEBUG_MARGIN at the beginning.
3092  if((VMA_DEBUG_MARGIN > 0) && freeSuballocItem != m_Suballocations.cbegin())
3093  {
3094  *pOffset += VMA_DEBUG_MARGIN;
3095  }
3096 
3097  // Apply alignment.
3098  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
3099  *pOffset = VmaAlignUp(*pOffset, alignment);
3100 
3101  // Check previous suballocations for BufferImageGranularity conflicts.
3102  // Make bigger alignment if necessary.
3103  if(bufferImageGranularity > 1)
3104  {
3105  bool bufferImageGranularityConflict = false;
3106  VmaSuballocationList::const_iterator prevSuballocItem = freeSuballocItem;
3107  while(prevSuballocItem != m_Suballocations.cbegin())
3108  {
3109  --prevSuballocItem;
3110  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
3111  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
3112  {
3113  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
3114  {
3115  bufferImageGranularityConflict = true;
3116  break;
3117  }
3118  }
3119  else
3120  // Already on previous page.
3121  break;
3122  }
3123  if(bufferImageGranularityConflict)
3124  {
3125  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
3126  }
3127  }
3128 
3129  // Calculate padding at the beginning based on current offset.
3130  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
3131 
3132  // Calculate required margin at the end if this is not last suballocation.
3133  VmaSuballocationList::const_iterator next = freeSuballocItem;
3134  ++next;
3135  const VkDeviceSize requiredEndMargin =
3136  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
3137 
3138  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
3139  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
3140  {
3141  return false;
3142  }
3143 
3144  // Check next suballocations for BufferImageGranularity conflicts.
3145  // If conflict exists, allocation cannot be made here.
3146  if(bufferImageGranularity > 1)
3147  {
3148  VmaSuballocationList::const_iterator nextSuballocItem = freeSuballocItem;
3149  ++nextSuballocItem;
3150  while(nextSuballocItem != m_Suballocations.cend())
3151  {
3152  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
3153  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
3154  {
3155  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
3156  {
3157  return false;
3158  }
3159  }
3160  else
3161  {
3162  // Already on next page.
3163  break;
3164  }
3165  ++nextSuballocItem;
3166  }
3167  }
3168 
3169  // All tests passed: Success. pOffset is already filled.
3170  return true;
3171 }
3172 
3173 bool VmaBlock::IsEmpty() const
3174 {
3175  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
3176 }
3177 
3178 void VmaBlock::Alloc(
3179  const VmaAllocationRequest& request,
3180  VmaSuballocationType type,
3181  VkDeviceSize allocSize)
3182 {
3183  VMA_ASSERT(request.freeSuballocationItem != m_Suballocations.end());
3184  VmaSuballocation& suballoc = *request.freeSuballocationItem;
3185  // Given suballocation is a free block.
3186  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
3187  // Given offset is inside this suballocation.
3188  VMA_ASSERT(request.offset >= suballoc.offset);
3189  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
3190  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
3191  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
3192 
3193  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
3194  // it to become used.
3195  UnregisterFreeSuballocation(request.freeSuballocationItem);
3196 
3197  suballoc.offset = request.offset;
3198  suballoc.size = allocSize;
3199  suballoc.type = type;
3200 
3201  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
3202  if(paddingEnd)
3203  {
3204  VmaSuballocation paddingSuballoc = {};
3205  paddingSuballoc.offset = request.offset + allocSize;
3206  paddingSuballoc.size = paddingEnd;
3207  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
3208  VmaSuballocationList::iterator next = request.freeSuballocationItem;
3209  ++next;
3210  const VmaSuballocationList::iterator paddingEndItem =
3211  m_Suballocations.insert(next, paddingSuballoc);
3212  RegisterFreeSuballocation(paddingEndItem);
3213  }
3214 
3215  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
3216  if(paddingBegin)
3217  {
3218  VmaSuballocation paddingSuballoc = {};
3219  paddingSuballoc.offset = request.offset - paddingBegin;
3220  paddingSuballoc.size = paddingBegin;
3221  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
3222  const VmaSuballocationList::iterator paddingBeginItem =
3223  m_Suballocations.insert(request.freeSuballocationItem, paddingSuballoc);
3224  RegisterFreeSuballocation(paddingBeginItem);
3225  }
3226 
3227  // Update totals.
3228  m_FreeCount = m_FreeCount - 1;
3229  if(paddingBegin > 0)
3230  {
3231  ++m_FreeCount;
3232  }
3233  if(paddingEnd > 0)
3234  {
3235  ++m_FreeCount;
3236  }
3237  m_SumFreeSize -= allocSize;
3238 }
3239 
3240 void VmaBlock::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
3241 {
3242  // Change this suballocation to be marked as free.
3243  VmaSuballocation& suballoc = *suballocItem;
3244  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
3245 
3246  // Update totals.
3247  ++m_FreeCount;
3248  m_SumFreeSize += suballoc.size;
3249 
3250  // Merge with previous and/or next suballocation if it's also free.
3251  bool mergeWithNext = false;
3252  bool mergeWithPrev = false;
3253 
3254  VmaSuballocationList::iterator nextItem = suballocItem;
3255  ++nextItem;
3256  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
3257  {
3258  mergeWithNext = true;
3259  }
3260 
3261  VmaSuballocationList::iterator prevItem = suballocItem;
3262  if(suballocItem != m_Suballocations.begin())
3263  {
3264  --prevItem;
3265  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
3266  {
3267  mergeWithPrev = true;
3268  }
3269  }
3270 
3271  if(mergeWithNext)
3272  {
3273  UnregisterFreeSuballocation(nextItem);
3274  MergeFreeWithNext(suballocItem);
3275  }
3276 
3277  if(mergeWithPrev)
3278  {
3279  UnregisterFreeSuballocation(prevItem);
3280  MergeFreeWithNext(prevItem);
3281  RegisterFreeSuballocation(prevItem);
3282  }
3283  else
3284  RegisterFreeSuballocation(suballocItem);
3285 }
3286 
3287 void VmaBlock::Free(const VmaAllocation allocation)
3288 {
3289  const VkDeviceSize allocationOffset = allocation->GetOffset();
3290  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
3291  suballocItem != m_Suballocations.end();
3292  ++suballocItem)
3293  {
3294  VmaSuballocation& suballoc = *suballocItem;
3295  if(suballoc.offset == allocationOffset)
3296  {
3297  FreeSuballocation(suballocItem);
3298  VMA_HEAVY_ASSERT(Validate());
3299  return;
3300  }
3301  }
3302  VMA_ASSERT(0 && "Not found!");
3303 }
3304 
3305 #if VMA_STATS_STRING_ENABLED
3306 
3307 void VmaBlock::PrintDetailedMap(class VmaStringBuilder& sb) const
3308 {
3309  sb.Add("{\n\t\t\t\"Bytes\": ");
3310  sb.AddNumber(m_Size);
3311  sb.Add(",\n\t\t\t\"FreeBytes\": ");
3312  sb.AddNumber(m_SumFreeSize);
3313  sb.Add(",\n\t\t\t\"Suballocations\": ");
3314  sb.AddNumber(m_Suballocations.size());
3315  sb.Add(",\n\t\t\t\"FreeSuballocations\": ");
3316  sb.AddNumber(m_FreeCount);
3317  sb.Add(",\n\t\t\t\"SuballocationList\": [");
3318 
3319  size_t i = 0;
3320  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
3321  suballocItem != m_Suballocations.cend();
3322  ++suballocItem, ++i)
3323  {
3324  if(i > 0)
3325  {
3326  sb.Add(",\n\t\t\t\t{ \"Type\": ");
3327  }
3328  else
3329  {
3330  sb.Add("\n\t\t\t\t{ \"Type\": ");
3331  }
3332  sb.AddString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
3333  sb.Add(", \"Size\": ");
3334  sb.AddNumber(suballocItem->size);
3335  sb.Add(", \"Offset\": ");
3336  sb.AddNumber(suballocItem->offset);
3337  sb.Add(" }");
3338  }
3339 
3340  sb.Add("\n\t\t\t]\n\t\t}");
3341 }
3342 
3343 #endif // #if VMA_STATS_STRING_ENABLED
3344 
3345 void VmaBlock::MergeFreeWithNext(VmaSuballocationList::iterator item)
3346 {
3347  VMA_ASSERT(item != m_Suballocations.end());
3348  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
3349 
3350  VmaSuballocationList::iterator nextItem = item;
3351  ++nextItem;
3352  VMA_ASSERT(nextItem != m_Suballocations.end());
3353  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
3354 
3355  item->size += nextItem->size;
3356  --m_FreeCount;
3357  m_Suballocations.erase(nextItem);
3358 }
3359 
3360 void VmaBlock::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
3361 {
3362  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
3363  VMA_ASSERT(item->size > 0);
3364 
3365  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
3366  {
3367  if(m_FreeSuballocationsBySize.empty())
3368  {
3369  m_FreeSuballocationsBySize.push_back(item);
3370  }
3371  else
3372  {
3373  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
3374  m_FreeSuballocationsBySize.data(),
3375  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
3376  item,
3377  VmaSuballocationItemSizeLess());
3378  size_t index = it - m_FreeSuballocationsBySize.data();
3379  VectorInsert(m_FreeSuballocationsBySize, index, item);
3380  }
3381  }
3382 }
3383 
3384 void VmaBlock::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
3385 {
3386  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
3387  VMA_ASSERT(item->size > 0);
3388 
3389  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
3390  {
3391  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
3392  m_FreeSuballocationsBySize.data(),
3393  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
3394  item,
3395  VmaSuballocationItemSizeLess());
3396  for(size_t index = it - m_FreeSuballocationsBySize.data();
3397  index < m_FreeSuballocationsBySize.size();
3398  ++index)
3399  {
3400  if(m_FreeSuballocationsBySize[index] == item)
3401  {
3402  VectorRemove(m_FreeSuballocationsBySize, index);
3403  return;
3404  }
3405  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
3406  }
3407  VMA_ASSERT(0 && "Not found.");
3408  }
3409 }
3410 
3411 static void InitStatInfo(VmaStatInfo& outInfo)
3412 {
3413  memset(&outInfo, 0, sizeof(outInfo));
3414  outInfo.SuballocationSizeMin = UINT64_MAX;
3415  outInfo.UnusedRangeSizeMin = UINT64_MAX;
3416 }
3417 
3418 static void CalcAllocationStatInfo(VmaStatInfo& outInfo, const VmaBlock& alloc)
3419 {
3420  outInfo.AllocationCount = 1;
3421 
3422  const uint32_t rangeCount = (uint32_t)alloc.m_Suballocations.size();
3423  outInfo.SuballocationCount = rangeCount - alloc.m_FreeCount;
3424  outInfo.UnusedRangeCount = alloc.m_FreeCount;
3425 
3426  outInfo.UnusedBytes = alloc.m_SumFreeSize;
3427  outInfo.UsedBytes = alloc.m_Size - outInfo.UnusedBytes;
3428 
3429  outInfo.SuballocationSizeMin = UINT64_MAX;
3430  outInfo.SuballocationSizeMax = 0;
3431  outInfo.UnusedRangeSizeMin = UINT64_MAX;
3432  outInfo.UnusedRangeSizeMax = 0;
3433 
3434  for(VmaSuballocationList::const_iterator suballocItem = alloc.m_Suballocations.cbegin();
3435  suballocItem != alloc.m_Suballocations.cend();
3436  ++suballocItem)
3437  {
3438  const VmaSuballocation& suballoc = *suballocItem;
3439  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
3440  {
3441  outInfo.SuballocationSizeMin = VMA_MIN(outInfo.SuballocationSizeMin, suballoc.size);
3442  outInfo.SuballocationSizeMax = VMA_MAX(outInfo.SuballocationSizeMax, suballoc.size);
3443  }
3444  else
3445  {
3446  outInfo.UnusedRangeSizeMin = VMA_MIN(outInfo.UnusedRangeSizeMin, suballoc.size);
3447  outInfo.UnusedRangeSizeMax = VMA_MAX(outInfo.UnusedRangeSizeMax, suballoc.size);
3448  }
3449  }
3450 }
3451 
3452 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
3453 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
3454 {
3455  inoutInfo.AllocationCount += srcInfo.AllocationCount;
3456  inoutInfo.SuballocationCount += srcInfo.SuballocationCount;
3457  inoutInfo.UnusedRangeCount += srcInfo.UnusedRangeCount;
3458  inoutInfo.UsedBytes += srcInfo.UsedBytes;
3459  inoutInfo.UnusedBytes += srcInfo.UnusedBytes;
3460  inoutInfo.SuballocationSizeMin = VMA_MIN(inoutInfo.SuballocationSizeMin, srcInfo.SuballocationSizeMin);
3461  inoutInfo.SuballocationSizeMax = VMA_MAX(inoutInfo.SuballocationSizeMax, srcInfo.SuballocationSizeMax);
3462  inoutInfo.UnusedRangeSizeMin = VMA_MIN(inoutInfo.UnusedRangeSizeMin, srcInfo.UnusedRangeSizeMin);
3463  inoutInfo.UnusedRangeSizeMax = VMA_MAX(inoutInfo.UnusedRangeSizeMax, srcInfo.UnusedRangeSizeMax);
3464 }
3465 
3466 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
3467 {
3468  inoutInfo.SuballocationSizeAvg = (inoutInfo.SuballocationCount > 0) ?
3469  VmaRoundDiv<VkDeviceSize>(inoutInfo.UsedBytes, inoutInfo.SuballocationCount) : 0;
3470  inoutInfo.UnusedRangeSizeAvg = (inoutInfo.UnusedRangeCount > 0) ?
3471  VmaRoundDiv<VkDeviceSize>(inoutInfo.UnusedBytes, inoutInfo.UnusedRangeCount) : 0;
3472 }
3473 
3474 VmaBlockVector::VmaBlockVector(VmaAllocator hAllocator) :
3475  m_hAllocator(hAllocator),
3476  m_Blocks(VmaStlAllocator<VmaBlock*>(hAllocator->GetAllocationCallbacks()))
3477 {
3478 }
3479 
3480 VmaBlockVector::~VmaBlockVector()
3481 {
3482  for(size_t i = m_Blocks.size(); i--; )
3483  {
3484  m_Blocks[i]->Destroy(m_hAllocator);
3485  vma_delete(m_hAllocator, m_Blocks[i]);
3486  }
3487 }
3488 
3489 void VmaBlockVector::Remove(VmaBlock* pBlock)
3490 {
3491  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
3492  {
3493  if(m_Blocks[blockIndex] == pBlock)
3494  {
3495  VectorRemove(m_Blocks, blockIndex);
3496  return;
3497  }
3498  }
3499  VMA_ASSERT(0);
3500 }
3501 
3502 void VmaBlockVector::IncrementallySortBlocks()
3503 {
3504  // Bubble sort only until first swap.
3505  for(size_t i = 1; i < m_Blocks.size(); ++i)
3506  {
3507  if(m_Blocks[i - 1]->m_SumFreeSize > m_Blocks[i]->m_SumFreeSize)
3508  {
3509  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
3510  return;
3511  }
3512  }
3513 }
3514 
3515 #if VMA_STATS_STRING_ENABLED
3516 
3517 void VmaBlockVector::PrintDetailedMap(class VmaStringBuilder& sb) const
3518 {
3519  for(size_t i = 0; i < m_Blocks.size(); ++i)
3520  {
3521  if(i > 0)
3522  {
3523  sb.Add(",\n\t\t");
3524  }
3525  else
3526  {
3527  sb.Add("\n\t\t");
3528  }
3529  m_Blocks[i]->PrintDetailedMap(sb);
3530  }
3531 }
3532 
3533 #endif // #if VMA_STATS_STRING_ENABLED
3534 
3535 void VmaBlockVector::UnmapPersistentlyMappedMemory()
3536 {
3537  for(size_t i = m_Blocks.size(); i--; )
3538  {
3539  VmaBlock* pBlock = m_Blocks[i];
3540  if(pBlock->m_pMappedData != VMA_NULL)
3541  {
3542  VMA_ASSERT(pBlock->m_PersistentMap != false);
3543  vkUnmapMemory(m_hAllocator->m_hDevice, pBlock->m_hMemory);
3544  pBlock->m_pMappedData = VMA_NULL;
3545  }
3546  }
3547 }
3548 
3549 VkResult VmaBlockVector::MapPersistentlyMappedMemory()
3550 {
3551  VkResult finalResult = VK_SUCCESS;
3552  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
3553  {
3554  VmaBlock* pBlock = m_Blocks[i];
3555  if(pBlock->m_PersistentMap)
3556  {
3557  VMA_ASSERT(pBlock->m_pMappedData == nullptr);
3558  VkResult localResult = vkMapMemory(m_hAllocator->m_hDevice, pBlock->m_hMemory, 0, VK_WHOLE_SIZE, 0, &pBlock->m_pMappedData);
3559  if(localResult != VK_SUCCESS)
3560  {
3561  finalResult = localResult;
3562  }
3563  }
3564  }
3565  return finalResult;
3566 }
3567 
3568 void VmaBlockVector::AddStats(VmaStats* pStats, uint32_t memTypeIndex, uint32_t memHeapIndex) const
3569 {
3570  for(uint32_t allocIndex = 0; allocIndex < m_Blocks.size(); ++allocIndex)
3571  {
3572  const VmaBlock* const pBlock = m_Blocks[allocIndex];
3573  VMA_ASSERT(pBlock);
3574  VMA_HEAVY_ASSERT(pBlock->Validate());
3575  VmaStatInfo allocationStatInfo;
3576  CalcAllocationStatInfo(allocationStatInfo, *pBlock);
3577  VmaAddStatInfo(pStats->total, allocationStatInfo);
3578  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
3579  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
3580  }
3581 }
3582 
3584 // VmaDefragmentator
3585 
3586 class VmaDefragmentator
3587 {
3588  VkDevice m_hDevice;
3589  const VkAllocationCallbacks* m_pAllocationCallbacks;
3590  VkDeviceSize m_BufferImageGranularity;
3591  uint32_t m_MemTypeIndex;
3592  VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
3593  VkDeviceSize m_BytesMoved;
3594  uint32_t m_AllocationsMoved;
3595 
3596  struct AllocationInfo
3597  {
3598  VmaAllocation m_hAllocation;
3599  VkBool32* m_pChanged;
3600 
3601  AllocationInfo() :
3602  m_hAllocation(VK_NULL_HANDLE),
3603  m_pChanged(VMA_NULL)
3604  {
3605  }
3606  };
3607 
3608  struct AllocationInfoSizeGreater
3609  {
3610  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
3611  {
3612  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
3613  }
3614  };
3615 
3616  // Used between AddAllocation and Defragment.
3617  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3618 
3619  struct BlockInfo
3620  {
3621  VmaBlock* m_pBlock;
3622  bool m_HasNonMovableAllocations;
3623  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3624 
3625  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
3626  m_pBlock(VMA_NULL),
3627  m_HasNonMovableAllocations(true),
3628  m_Allocations(pAllocationCallbacks),
3629  m_pMappedDataForDefragmentation(VMA_NULL)
3630  {
3631  }
3632 
3633  void CalcHasNonMovableAllocations()
3634  {
3635  const size_t blockAllocCount =
3636  m_pBlock->m_Suballocations.size() - m_pBlock->m_FreeCount;
3637  const size_t defragmentAllocCount = m_Allocations.size();
3638  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
3639  }
3640 
3641  void SortAllocationsBySizeDescecnding()
3642  {
3643  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
3644  }
3645 
3646  VkResult EnsureMapping(VkDevice hDevice, void** ppMappedData)
3647  {
3648  // It has already been mapped for defragmentation.
3649  if(m_pMappedDataForDefragmentation)
3650  {
3651  *ppMappedData = m_pMappedDataForDefragmentation;
3652  return VK_SUCCESS;
3653  }
3654 
3655  // It is persistently mapped.
3656  if(m_pBlock->m_PersistentMap)
3657  {
3658  VMA_ASSERT(m_pBlock->m_pMappedData != VMA_NULL);
3659  *ppMappedData = m_pBlock->m_pMappedData;
3660  return VK_SUCCESS;
3661  }
3662 
3663  // Map on first usage.
3664  VkResult res = vkMapMemory(hDevice, m_pBlock->m_hMemory, 0, VK_WHOLE_SIZE, 0, &m_pMappedDataForDefragmentation);
3665  *ppMappedData = m_pMappedDataForDefragmentation;
3666  return res;
3667  }
3668 
3669  void Unmap(VkDevice hDevice)
3670  {
3671  if(m_pMappedDataForDefragmentation != VMA_NULL)
3672  {
3673  vkUnmapMemory(hDevice, m_pBlock->m_hMemory);
3674  }
3675  }
3676 
3677  private:
3678  // Not null if mapped for defragmentation only, not persistently mapped.
3679  void* m_pMappedDataForDefragmentation;
3680  };
3681 
3682  struct BlockPointerLess
3683  {
3684  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaBlock* pRhsBlock) const
3685  {
3686  return pLhsBlockInfo->m_pBlock < pRhsBlock;
3687  }
3688  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
3689  {
3690  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
3691  }
3692  };
3693 
3694  // 1. Blocks with some non-movable allocations go first.
3695  // 2. Blocks with smaller sumFreeSize go first.
3696  struct BlockInfoCompareMoveDestination
3697  {
3698  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
3699  {
3700  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
3701  {
3702  return true;
3703  }
3704  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
3705  {
3706  return false;
3707  }
3708  if(pLhsBlockInfo->m_pBlock->m_SumFreeSize < pRhsBlockInfo->m_pBlock->m_SumFreeSize)
3709  {
3710  return true;
3711  }
3712  return false;
3713  }
3714  };
3715 
3716  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
3717  BlockInfoVector m_Blocks;
3718 
3719  VkResult DefragmentRound(
3720  VkDeviceSize maxBytesToMove,
3721  uint32_t maxAllocationsToMove);
3722 
3723  static bool MoveMakesSense(
3724  size_t dstBlockIndex, VkDeviceSize dstOffset,
3725  size_t srcBlockIndex, VkDeviceSize srcOffset);
3726 
3727 public:
3728  VmaDefragmentator(
3729  VkDevice hDevice,
3730  const VkAllocationCallbacks* pAllocationCallbacks,
3731  VkDeviceSize bufferImageGranularity,
3732  uint32_t memTypeIndex,
3733  VMA_BLOCK_VECTOR_TYPE blockVectorType);
3734 
3735  ~VmaDefragmentator();
3736 
3737  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
3738  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
3739 
3740  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
3741 
3742  VkResult Defragment(
3743  VmaBlockVector* pBlockVector,
3744  VkDeviceSize maxBytesToMove,
3745  uint32_t maxAllocationsToMove);
3746 };
3747 
3748 VmaDefragmentator::VmaDefragmentator(
3749  VkDevice hDevice,
3750  const VkAllocationCallbacks* pAllocationCallbacks,
3751  VkDeviceSize bufferImageGranularity,
3752  uint32_t memTypeIndex,
3753  VMA_BLOCK_VECTOR_TYPE blockVectorType) :
3754  m_hDevice(hDevice),
3755  m_pAllocationCallbacks(pAllocationCallbacks),
3756  m_BufferImageGranularity(bufferImageGranularity),
3757  m_MemTypeIndex(memTypeIndex),
3758  m_BlockVectorType(blockVectorType),
3759  m_BytesMoved(0),
3760  m_AllocationsMoved(0),
3761  m_Allocations(VmaStlAllocator<AllocationInfo>(pAllocationCallbacks)),
3762  m_Blocks(VmaStlAllocator<BlockInfo*>(pAllocationCallbacks))
3763 {
3764 }
3765 
3766 VmaDefragmentator::~VmaDefragmentator()
3767 {
3768  for(size_t i = m_Blocks.size(); i--; )
3769  {
3770  vma_delete(m_pAllocationCallbacks, m_Blocks[i]);
3771  }
3772 }
3773 
3774 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
3775 {
3776  AllocationInfo allocInfo;
3777  allocInfo.m_hAllocation = hAlloc;
3778  allocInfo.m_pChanged = pChanged;
3779  m_Allocations.push_back(allocInfo);
3780 }
3781 
3782 VkResult VmaDefragmentator::DefragmentRound(
3783  VkDeviceSize maxBytesToMove,
3784  uint32_t maxAllocationsToMove)
3785 {
3786  if(m_Blocks.empty())
3787  {
3788  return VK_SUCCESS;
3789  }
3790 
3791  size_t srcBlockIndex = m_Blocks.size() - 1;
3792  size_t srcAllocIndex = SIZE_MAX;
3793  for(;;)
3794  {
3795  // 1. Find next allocation to move.
3796  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
3797  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
3798  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
3799  {
3800  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
3801  {
3802  // Finished: no more allocations to process.
3803  if(srcBlockIndex == 0)
3804  {
3805  return VK_SUCCESS;
3806  }
3807  else
3808  {
3809  --srcBlockIndex;
3810  srcAllocIndex = SIZE_MAX;
3811  }
3812  }
3813  else
3814  {
3815  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
3816  }
3817  }
3818 
3819  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
3820  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
3821 
3822  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
3823  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
3824  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
3825  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
3826 
3827  // 2. Try to find new place for this allocation in preceding or current block.
3828  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
3829  {
3830  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
3831  VmaAllocationRequest dstAllocRequest;
3832  if(pDstBlockInfo->m_pBlock->CreateAllocationRequest(
3833  m_BufferImageGranularity,
3834  size,
3835  alignment,
3836  suballocType,
3837  &dstAllocRequest) &&
3838  MoveMakesSense(
3839  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
3840  {
3841  // Reached limit on number of allocations or bytes to move.
3842  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
3843  (m_BytesMoved + size > maxBytesToMove))
3844  {
3845  return VK_INCOMPLETE;
3846  }
3847 
3848  void* pDstMappedData = VMA_NULL;
3849  VkResult res = pDstBlockInfo->EnsureMapping(m_hDevice, &pDstMappedData);
3850  if(res != VK_SUCCESS)
3851  {
3852  return res;
3853  }
3854 
3855  void* pSrcMappedData = VMA_NULL;
3856  res = pSrcBlockInfo->EnsureMapping(m_hDevice, &pSrcMappedData);
3857  if(res != VK_SUCCESS)
3858  {
3859  return res;
3860  }
3861 
3862  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
3863  memcpy(
3864  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
3865  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
3866  size);
3867 
3868  pDstBlockInfo->m_pBlock->Alloc(dstAllocRequest, suballocType, size);
3869  pSrcBlockInfo->m_pBlock->Free(allocInfo.m_hAllocation);
3870 
3871  allocInfo.m_hAllocation->ChangeBlockAllocation(pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
3872 
3873  if(allocInfo.m_pChanged != VMA_NULL)
3874  {
3875  *allocInfo.m_pChanged = VK_TRUE;
3876  }
3877 
3878  ++m_AllocationsMoved;
3879  m_BytesMoved += size;
3880 
3881  VectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
3882 
3883  break;
3884  }
3885  }
3886 
3887  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
3888 
3889  if(srcAllocIndex > 0)
3890  {
3891  --srcAllocIndex;
3892  }
3893  else
3894  {
3895  if(srcBlockIndex > 0)
3896  {
3897  --srcBlockIndex;
3898  srcAllocIndex = SIZE_MAX;
3899  }
3900  else
3901  {
3902  return VK_SUCCESS;
3903  }
3904  }
3905  }
3906 }
3907 
3908 VkResult VmaDefragmentator::Defragment(
3909  VmaBlockVector* pBlockVector,
3910  VkDeviceSize maxBytesToMove,
3911  uint32_t maxAllocationsToMove)
3912 {
3913  if(m_Allocations.empty())
3914  {
3915  return VK_SUCCESS;
3916  }
3917 
3918  // Create block info for each block.
3919  const size_t blockCount = pBlockVector->m_Blocks.size();
3920  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
3921  {
3922  BlockInfo* pBlockInfo = vma_new(m_pAllocationCallbacks, BlockInfo)(m_pAllocationCallbacks);
3923  pBlockInfo->m_pBlock = pBlockVector->m_Blocks[blockIndex];
3924  m_Blocks.push_back(pBlockInfo);
3925  }
3926 
3927  // Sort them by m_pBlock pointer value.
3928  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
3929 
3930  // Move allocation infos from m_Allocations to appropriate m_Blocks[i].m_Allocations.
3931  for(size_t allocIndex = 0, allocCount = m_Allocations.size(); allocIndex < allocCount; ++allocIndex)
3932  {
3933  AllocationInfo& allocInfo = m_Allocations[allocIndex];
3934  VmaBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
3935  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
3936  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
3937  {
3938  (*it)->m_Allocations.push_back(allocInfo);
3939  }
3940  else
3941  {
3942  VMA_ASSERT(0);
3943  }
3944  }
3945  m_Allocations.clear();
3946 
3947  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
3948  {
3949  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
3950  pBlockInfo->CalcHasNonMovableAllocations();
3951  pBlockInfo->SortAllocationsBySizeDescecnding();
3952  }
3953 
3954  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
3955  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
3956 
3957  // Execute defragmentation round (the main part).
3958  VkResult result = VK_SUCCESS;
3959  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
3960  {
3961  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
3962  }
3963 
3964  // Unmap blocks that were mapped for defragmentation.
3965  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
3966  {
3967  m_Blocks[blockIndex]->Unmap(m_hDevice);
3968  }
3969 
3970  return result;
3971 }
3972 
3973 bool VmaDefragmentator::MoveMakesSense(
3974  size_t dstBlockIndex, VkDeviceSize dstOffset,
3975  size_t srcBlockIndex, VkDeviceSize srcOffset)
3976 {
3977  if(dstBlockIndex < srcBlockIndex)
3978  {
3979  return true;
3980  }
3981  if(dstBlockIndex > srcBlockIndex)
3982  {
3983  return false;
3984  }
3985  if(dstOffset < srcOffset)
3986  {
3987  return true;
3988  }
3989  return false;
3990 }
3991 
3993 // VmaAllocator_T
3994 
3995 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
3996  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
3997  m_PhysicalDevice(pCreateInfo->physicalDevice),
3998  m_hDevice(pCreateInfo->device),
3999  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
4000  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
4001  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
4002  m_PreferredLargeHeapBlockSize(0),
4003  m_PreferredSmallHeapBlockSize(0),
4004  m_UnmapPersistentlyMappedMemoryCounter(0)
4005 {
4006  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
4007 
4008  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
4009  memset(&m_MemProps, 0, sizeof(m_MemProps));
4010  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
4011 
4012  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
4013  memset(&m_HasEmptyBlock, 0, sizeof(m_HasEmptyBlock));
4014  memset(&m_pOwnAllocations, 0, sizeof(m_pOwnAllocations));
4015 
4016  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
4017  {
4018  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
4019  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
4020  }
4021 
4022  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
4023  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
4024  m_PreferredSmallHeapBlockSize = (pCreateInfo->preferredSmallHeapBlockSize != 0) ?
4025  pCreateInfo->preferredSmallHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE);
4026 
4027  vkGetPhysicalDeviceProperties(m_PhysicalDevice, &m_PhysicalDeviceProperties);
4028  vkGetPhysicalDeviceMemoryProperties(m_PhysicalDevice, &m_MemProps);
4029 
4030  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
4031  {
4032  for(size_t j = 0; j < VMA_BLOCK_VECTOR_TYPE_COUNT; ++j)
4033  {
4034  m_pBlockVectors[i][j] = vma_new(this, VmaBlockVector)(this);
4035  m_pOwnAllocations[i][j] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
4036  }
4037  }
4038 }
4039 
4040 VmaAllocator_T::~VmaAllocator_T()
4041 {
4042  for(size_t i = GetMemoryTypeCount(); i--; )
4043  {
4044  for(size_t j = VMA_BLOCK_VECTOR_TYPE_COUNT; j--; )
4045  {
4046  vma_delete(this, m_pOwnAllocations[i][j]);
4047  vma_delete(this, m_pBlockVectors[i][j]);
4048  }
4049  }
4050 }
4051 
4052 VkDeviceSize VmaAllocator_T::GetPreferredBlockSize(uint32_t memTypeIndex) const
4053 {
4054  VkDeviceSize heapSize = m_MemProps.memoryHeaps[m_MemProps.memoryTypes[memTypeIndex].heapIndex].size;
4055  return (heapSize <= VMA_SMALL_HEAP_MAX_SIZE) ?
4056  m_PreferredSmallHeapBlockSize : m_PreferredLargeHeapBlockSize;
4057 }
4058 
4059 VkResult VmaAllocator_T::AllocateMemoryOfType(
4060  const VkMemoryRequirements& vkMemReq,
4061  const VmaMemoryRequirements& vmaMemReq,
4062  uint32_t memTypeIndex,
4063  VmaSuballocationType suballocType,
4064  VmaAllocation* pAllocation)
4065 {
4066  VMA_ASSERT(pAllocation != VMA_NULL);
4067  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
4068 
4069  const VkDeviceSize preferredBlockSize = GetPreferredBlockSize(memTypeIndex);
4070  // Heuristics: Allocate own memory if requested size if greater than half of preferred block size.
4071  const bool ownMemory =
4072  (vmaMemReq.flags & VMA_MEMORY_REQUIREMENT_OWN_MEMORY_BIT) != 0 ||
4073  VMA_DEBUG_ALWAYS_OWN_MEMORY ||
4074  ((vmaMemReq.flags & VMA_MEMORY_REQUIREMENT_NEVER_ALLOCATE_BIT) == 0 &&
4075  vkMemReq.size > preferredBlockSize / 2);
4076 
4077  if(ownMemory)
4078  {
4079  if((vmaMemReq.flags & VMA_MEMORY_REQUIREMENT_NEVER_ALLOCATE_BIT) != 0)
4080  {
4081  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
4082  }
4083  else
4084  {
4085  return AllocateOwnMemory(
4086  vkMemReq.size,
4087  suballocType,
4088  memTypeIndex,
4090  vmaMemReq.pUserData,
4091  pAllocation);
4092  }
4093  }
4094  else
4095  {
4096  uint32_t blockVectorType = VmaMemoryRequirementFlagsToBlockVectorType(vmaMemReq.flags);
4097 
4098  VmaMutexLock lock(m_BlocksMutex[memTypeIndex], m_UseMutex);
4099  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
4100  VMA_ASSERT(blockVector);
4101 
4102  // 1. Search existing allocations.
4103  // Forward order - prefer blocks with smallest amount of free space.
4104  for(size_t allocIndex = 0; allocIndex < blockVector->m_Blocks.size(); ++allocIndex )
4105  {
4106  VmaBlock* const pBlock = blockVector->m_Blocks[allocIndex];
4107  VMA_ASSERT(pBlock);
4108  VmaAllocationRequest allocRequest = {};
4109  // Check if can allocate from pBlock.
4110  if(pBlock->CreateAllocationRequest(
4111  GetBufferImageGranularity(),
4112  vkMemReq.size,
4113  vkMemReq.alignment,
4114  suballocType,
4115  &allocRequest))
4116  {
4117  // We no longer have an empty Allocation.
4118  if(pBlock->IsEmpty())
4119  {
4120  m_HasEmptyBlock[memTypeIndex] = false;
4121  }
4122  // Allocate from this pBlock.
4123  pBlock->Alloc(allocRequest, suballocType, vkMemReq.size);
4124  *pAllocation = vma_new(this, VmaAllocation_T)();
4125  (*pAllocation)->InitBlockAllocation(
4126  pBlock,
4127  allocRequest.offset,
4128  vkMemReq.alignment,
4129  vkMemReq.size,
4130  suballocType,
4131  vmaMemReq.pUserData);
4132  VMA_HEAVY_ASSERT(pBlock->Validate());
4133  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)allocIndex);
4134  return VK_SUCCESS;
4135  }
4136  }
4137 
4138  // 2. Create new Allocation.
4139  if((vmaMemReq.flags & VMA_MEMORY_REQUIREMENT_NEVER_ALLOCATE_BIT) != 0)
4140  {
4141  VMA_DEBUG_LOG(" FAILED due to VMA_MEMORY_REQUIREMENT_NEVER_ALLOCATE_BIT");
4142  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
4143  }
4144  else
4145  {
4146  // Start with full preferredBlockSize.
4147  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
4148  allocInfo.memoryTypeIndex = memTypeIndex;
4149  allocInfo.allocationSize = preferredBlockSize;
4150  VkDeviceMemory mem = VK_NULL_HANDLE;
4151  VkResult res = vkAllocateMemory(m_hDevice, &allocInfo, GetAllocationCallbacks(), &mem);
4152  if(res < 0)
4153  {
4154  // 3. Try half the size.
4155  allocInfo.allocationSize /= 2;
4156  if(allocInfo.allocationSize >= vkMemReq.size)
4157  {
4158  res = vkAllocateMemory(m_hDevice, &allocInfo, GetAllocationCallbacks(), &mem);
4159  if(res < 0)
4160  {
4161  // 4. Try quarter the size.
4162  allocInfo.allocationSize /= 2;
4163  if(allocInfo.allocationSize >= vkMemReq.size)
4164  {
4165  res = vkAllocateMemory(m_hDevice, &allocInfo, GetAllocationCallbacks(), &mem);
4166  }
4167  }
4168  }
4169  }
4170  if(res < 0)
4171  {
4172  // 5. Try OwnAlloc.
4173  res = AllocateOwnMemory(
4174  vkMemReq.size,
4175  suballocType,
4176  memTypeIndex,
4178  vmaMemReq.pUserData,
4179  pAllocation);
4180  if(res == VK_SUCCESS)
4181  {
4182  // Succeeded: AllocateOwnMemory function already filld pMemory, nothing more to do here.
4183  VMA_DEBUG_LOG(" Allocated as OwnMemory");
4184  return VK_SUCCESS;
4185  }
4186  else
4187  {
4188  // Everything failed: Return error code.
4189  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
4190  return res;
4191  }
4192  }
4193 
4194  // New VkDeviceMemory successfully created.
4195 
4196  // Map memory if needed.
4197  void* pMappedData = VMA_NULL;
4198  const bool persistentMap = (vmaMemReq.flags & VMA_MEMORY_REQUIREMENT_PERSISTENT_MAP_BIT) != 0;
4199  if(persistentMap && m_UnmapPersistentlyMappedMemoryCounter == 0)
4200  {
4201  res = vkMapMemory(m_hDevice, mem, 0, VK_WHOLE_SIZE, 0, &pMappedData);
4202  if(res < 0)
4203  {
4204  VMA_DEBUG_LOG(" vkMapMemory FAILED");
4205  vkFreeMemory(m_hDevice, mem, GetAllocationCallbacks());
4206  return res;
4207  }
4208  }
4209 
4210  // Callback.
4211  if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
4212  {
4213  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, memTypeIndex, mem, allocInfo.allocationSize);
4214  }
4215 
4216  // Create new Allocation for it.
4217  VmaBlock* const pBlock = vma_new(this, VmaBlock)(this);
4218  pBlock->Init(
4219  memTypeIndex,
4220  (VMA_BLOCK_VECTOR_TYPE)blockVectorType,
4221  mem,
4222  allocInfo.allocationSize,
4223  persistentMap,
4224  pMappedData);
4225 
4226  blockVector->m_Blocks.push_back(pBlock);
4227 
4228  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
4229  VmaAllocationRequest allocRequest = {};
4230  allocRequest.freeSuballocationItem = pBlock->m_Suballocations.begin();
4231  allocRequest.offset = 0;
4232  pBlock->Alloc(allocRequest, suballocType, vkMemReq.size);
4233  *pAllocation = vma_new(this, VmaAllocation_T)();
4234  (*pAllocation)->InitBlockAllocation(
4235  pBlock,
4236  allocRequest.offset,
4237  vkMemReq.alignment,
4238  vkMemReq.size,
4239  suballocType,
4240  vmaMemReq.pUserData);
4241  VMA_HEAVY_ASSERT(pBlock->Validate());
4242  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
4243  return VK_SUCCESS;
4244  }
4245  }
4246 }
4247 
4248 VkResult VmaAllocator_T::AllocateOwnMemory(
4249  VkDeviceSize size,
4250  VmaSuballocationType suballocType,
4251  uint32_t memTypeIndex,
4252  bool map,
4253  void* pUserData,
4254  VmaAllocation* pAllocation)
4255 {
4256  VMA_ASSERT(pAllocation);
4257 
4258  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
4259  allocInfo.memoryTypeIndex = memTypeIndex;
4260  allocInfo.allocationSize = size;
4261 
4262  // Allocate VkDeviceMemory.
4263  VkDeviceMemory hMemory = VK_NULL_HANDLE;
4264  VkResult res = vkAllocateMemory(m_hDevice, &allocInfo, GetAllocationCallbacks(), &hMemory);
4265  if(res < 0)
4266  {
4267  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
4268  return res;
4269  }
4270 
4271  void* pMappedData = nullptr;
4272  if(map)
4273  {
4274  if(m_UnmapPersistentlyMappedMemoryCounter == 0)
4275  {
4276  res = vkMapMemory(m_hDevice, hMemory, 0, VK_WHOLE_SIZE, 0, &pMappedData);
4277  if(res < 0)
4278  {
4279  VMA_DEBUG_LOG(" vkMapMemory FAILED");
4280  vkFreeMemory(m_hDevice, hMemory, GetAllocationCallbacks());
4281  return res;
4282  }
4283  }
4284  }
4285 
4286  // Callback.
4287  if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
4288  {
4289  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, memTypeIndex, hMemory, size);
4290  }
4291 
4292  *pAllocation = vma_new(this, VmaAllocation_T)();
4293  (*pAllocation)->InitOwnAllocation(memTypeIndex, hMemory, suballocType, map, pMappedData, size, pUserData);
4294 
4295  // Register it in m_pOwnAllocations.
4296  {
4297  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
4298  AllocationVectorType* pOwnAllocations = m_pOwnAllocations[memTypeIndex][map ? VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED];
4299  VMA_ASSERT(pOwnAllocations);
4300  VmaAllocation* const pOwnAllocationsBeg = pOwnAllocations->data();
4301  VmaAllocation* const pOwnAllocationsEnd = pOwnAllocationsBeg + pOwnAllocations->size();
4302  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4303  pOwnAllocationsBeg,
4304  pOwnAllocationsEnd,
4305  *pAllocation,
4306  VmaPointerLess()) - pOwnAllocationsBeg;
4307  VectorInsert(*pOwnAllocations, indexToInsert, *pAllocation);
4308  }
4309 
4310  VMA_DEBUG_LOG(" Allocated OwnMemory MemoryTypeIndex=#%u", memTypeIndex);
4311 
4312  return VK_SUCCESS;
4313 }
4314 
4315 VkResult VmaAllocator_T::AllocateMemory(
4316  const VkMemoryRequirements& vkMemReq,
4317  const VmaMemoryRequirements& vmaMemReq,
4318  VmaSuballocationType suballocType,
4319  VmaAllocation* pAllocation)
4320 {
4321  if((vmaMemReq.flags & VMA_MEMORY_REQUIREMENT_OWN_MEMORY_BIT) != 0 &&
4323  {
4324  VMA_ASSERT(0 && "Specifying VMA_MEMORY_REQUIREMENT_OWN_MEMORY_BIT together with VMA_MEMORY_REQUIREMENT_NEVER_ALLOCATE_BIT makes no sense.");
4325  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
4326  }
4327 
4328  // Bit mask of memory Vulkan types acceptable for this allocation.
4329  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
4330  uint32_t memTypeIndex = UINT32_MAX;
4331  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &vmaMemReq, &memTypeIndex);
4332  if(res == VK_SUCCESS)
4333  {
4334  res = AllocateMemoryOfType(vkMemReq, vmaMemReq, memTypeIndex, suballocType, pAllocation);
4335  // Succeeded on first try.
4336  if(res == VK_SUCCESS)
4337  {
4338  return res;
4339  }
4340  // Allocation from this memory type failed. Try other compatible memory types.
4341  else
4342  {
4343  for(;;)
4344  {
4345  // Remove old memTypeIndex from list of possibilities.
4346  memoryTypeBits &= ~(1u << memTypeIndex);
4347  // Find alternative memTypeIndex.
4348  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &vmaMemReq, &memTypeIndex);
4349  if(res == VK_SUCCESS)
4350  {
4351  res = AllocateMemoryOfType(vkMemReq, vmaMemReq, memTypeIndex, suballocType, pAllocation);
4352  // Allocation from this alternative memory type succeeded.
4353  if(res == VK_SUCCESS)
4354  {
4355  return res;
4356  }
4357  // else: Allocation from this memory type failed. Try next one - next loop iteration.
4358  }
4359  // No other matching memory type index could be found.
4360  else
4361  {
4362  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
4363  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
4364  }
4365  }
4366  }
4367  }
4368  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
4369  else
4370  return res;
4371 }
4372 
4373 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
4374 {
4375  VMA_ASSERT(allocation);
4376 
4377  if(allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK)
4378  {
4379  VmaBlock* pBlockToDelete = VMA_NULL;
4380 
4381  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
4382  const VMA_BLOCK_VECTOR_TYPE blockVectorType = allocation->GetBlockVectorType();
4383  {
4384  VmaMutexLock lock(m_BlocksMutex[memTypeIndex], m_UseMutex);
4385 
4386  VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
4387  VmaBlock* pBlock = allocation->GetBlock();
4388 
4389  pBlock->Free(allocation);
4390  VMA_HEAVY_ASSERT(pBlock->Validate());
4391 
4392  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
4393 
4394  // pBlock became empty after this deallocation.
4395  if(pBlock->IsEmpty())
4396  {
4397  // Already has empty Allocation. We don't want to have two, so delete this one.
4398  if(m_HasEmptyBlock[memTypeIndex])
4399  {
4400  pBlockToDelete = pBlock;
4401  pBlockVector->Remove(pBlock);
4402  }
4403  // We now have first empty Allocation.
4404  else
4405  {
4406  m_HasEmptyBlock[memTypeIndex] = true;
4407  }
4408  }
4409  // Must be called after srcBlockIndex is used, because later it may become invalid!
4410  pBlockVector->IncrementallySortBlocks();
4411  }
4412  // Destruction of a free Allocation. Deferred until this point, outside of mutex
4413  // lock, for performance reason.
4414  if(pBlockToDelete != VMA_NULL)
4415  {
4416  VMA_DEBUG_LOG(" Deleted empty allocation");
4417  pBlockToDelete->Destroy(this);
4418  vma_delete(this, pBlockToDelete);
4419  }
4420 
4421  vma_delete(this, allocation);
4422  }
4423  else // VmaAllocation_T::ALLOCATION_TYPE_OWN
4424  {
4425  FreeOwnMemory(allocation);
4426  }
4427 }
4428 
4429 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
4430 {
4431  InitStatInfo(pStats->total);
4432  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
4433  InitStatInfo(pStats->memoryType[i]);
4434  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
4435  InitStatInfo(pStats->memoryHeap[i]);
4436 
4437  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
4438  {
4439  VmaMutexLock allocationsLock(m_BlocksMutex[memTypeIndex], m_UseMutex);
4440  const uint32_t heapIndex = m_MemProps.memoryTypes[memTypeIndex].heapIndex;
4441  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
4442  {
4443  const VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
4444  VMA_ASSERT(pBlockVector);
4445  pBlockVector->AddStats(pStats, memTypeIndex, heapIndex);
4446  }
4447  }
4448 
4449  VmaPostprocessCalcStatInfo(pStats->total);
4450  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
4451  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
4452  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
4453  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
4454 }
4455 
4456 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
4457 
4458 void VmaAllocator_T::UnmapPersistentlyMappedMemory()
4459 {
4460  if(m_UnmapPersistentlyMappedMemoryCounter++ == 0)
4461  {
4462  if(m_PhysicalDeviceProperties.vendorID == VMA_VENDOR_ID_AMD)
4463  {
4464  for(size_t memTypeIndex = m_MemProps.memoryTypeCount; memTypeIndex--; )
4465  {
4466  const VkMemoryPropertyFlags memFlags = m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
4467  if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0 &&
4468  (memFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
4469  {
4470  // Process OwnAllocations.
4471  {
4472  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
4473  AllocationVectorType* pOwnAllocationsVector = m_pOwnAllocations[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
4474  for(size_t ownAllocIndex = pOwnAllocationsVector->size(); ownAllocIndex--; )
4475  {
4476  VmaAllocation hAlloc = (*pOwnAllocationsVector)[ownAllocIndex];
4477  hAlloc->OwnAllocUnmapPersistentlyMappedMemory(m_hDevice);
4478  }
4479  }
4480 
4481  // Process normal Allocations.
4482  {
4483  VmaMutexLock lock(m_BlocksMutex[memTypeIndex], m_UseMutex);
4484  VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
4485  pBlockVector->UnmapPersistentlyMappedMemory();
4486  }
4487  }
4488  }
4489  }
4490  }
4491 }
4492 
4493 VkResult VmaAllocator_T::MapPersistentlyMappedMemory()
4494 {
4495  VMA_ASSERT(m_UnmapPersistentlyMappedMemoryCounter > 0);
4496  if(--m_UnmapPersistentlyMappedMemoryCounter == 0)
4497  {
4498  VkResult finalResult = VK_SUCCESS;
4499  if(m_PhysicalDeviceProperties.vendorID == VMA_VENDOR_ID_AMD)
4500  {
4501  for(size_t memTypeIndex = 0; memTypeIndex < m_MemProps.memoryTypeCount; ++memTypeIndex)
4502  {
4503  const VkMemoryPropertyFlags memFlags = m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
4504  if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0 &&
4505  (memFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
4506  {
4507  // Process OwnAllocations.
4508  {
4509  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
4510  AllocationVectorType* pAllocationsVector = m_pOwnAllocations[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
4511  for(size_t ownAllocIndex = 0, ownAllocCount = pAllocationsVector->size(); ownAllocIndex < ownAllocCount; ++ownAllocIndex)
4512  {
4513  VmaAllocation hAlloc = (*pAllocationsVector)[ownAllocIndex];
4514  hAlloc->OwnAllocMapPersistentlyMappedMemory(m_hDevice);
4515  }
4516  }
4517 
4518  // Process normal Allocations.
4519  {
4520  VmaMutexLock lock(m_BlocksMutex[memTypeIndex], m_UseMutex);
4521  VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
4522  VkResult localResult = pBlockVector->MapPersistentlyMappedMemory();
4523  if(localResult != VK_SUCCESS)
4524  {
4525  finalResult = localResult;
4526  }
4527  }
4528  }
4529  }
4530  }
4531  return finalResult;
4532  }
4533  else
4534  return VK_SUCCESS;
4535 }
4536 
4537 VkResult VmaAllocator_T::Defragment(
4538  VmaAllocation* pAllocations,
4539  size_t allocationCount,
4540  VkBool32* pAllocationsChanged,
4541  const VmaDefragmentationInfo* pDefragmentationInfo,
4542  VmaDefragmentationStats* pDefragmentationStats)
4543 {
4544  if(pAllocationsChanged != VMA_NULL)
4545  {
4546  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
4547  }
4548  if(pDefragmentationStats != VMA_NULL)
4549  {
4550  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
4551  }
4552 
4553  if(m_UnmapPersistentlyMappedMemoryCounter > 0)
4554  {
4555  VMA_DEBUG_LOG("ERROR: Cannot defragment when inside vmaUnmapPersistentlyMappedMemory.");
4556  return VK_ERROR_MEMORY_MAP_FAILED;
4557  }
4558 
4559  // Initialize defragmentators per memory type.
4560  const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity();
4561  VmaDefragmentator* pDefragmentators[VK_MAX_MEMORY_TYPES][VMA_BLOCK_VECTOR_TYPE_COUNT];
4562  memset(pDefragmentators, 0, sizeof(pDefragmentators));
4563  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
4564  {
4565  // Only HOST_VISIBLE memory types can be defragmented.
4566  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
4567  {
4568  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
4569  {
4570  pDefragmentators[memTypeIndex][blockVectorType] = vma_new(this, VmaDefragmentator)(
4571  m_hDevice,
4572  GetAllocationCallbacks(),
4573  bufferImageGranularity,
4574  memTypeIndex,
4575  (VMA_BLOCK_VECTOR_TYPE)blockVectorType);
4576  }
4577  }
4578  }
4579 
4580  // Dispatch pAllocations among defragmentators.
4581  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
4582  {
4583  VmaAllocation hAlloc = pAllocations[allocIndex];
4584  VMA_ASSERT(hAlloc);
4585  if(hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK)
4586  {
4587  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
4588  // Only HOST_VISIBLE memory types can be defragmented.
4589  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
4590  {
4591  const VMA_BLOCK_VECTOR_TYPE blockVectorType = hAlloc->GetBlockVectorType();
4592  VkBool32* pChanged = (pAllocationsChanged != VMA_NULL) ?
4593  &pAllocationsChanged[allocIndex] : VMA_NULL;
4594  pDefragmentators[memTypeIndex][blockVectorType]->AddAllocation(hAlloc, pChanged);
4595  }
4596  // else: skip this allocation, cannot move it.
4597  }
4598  // else ALLOCATION_TYPE_OWN: skip this allocation, nothing to defragment.
4599  }
4600 
4601  VkResult result = VK_SUCCESS;
4602 
4603  // Main processing.
4604  VkDeviceSize maxBytesToMove = SIZE_MAX;
4605  uint32_t maxAllocationsToMove = UINT32_MAX;
4606  if(pDefragmentationInfo != VMA_NULL)
4607  {
4608  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
4609  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
4610  }
4611  for(uint32_t memTypeIndex = 0;
4612  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
4613  ++memTypeIndex)
4614  {
4615  // Only HOST_VISIBLE memory types can be defragmented.
4616  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
4617  {
4618  VmaMutexLock lock(m_BlocksMutex[memTypeIndex], m_UseMutex);
4619 
4620  for(uint32_t blockVectorType = 0;
4621  (blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT) && (result == VK_SUCCESS);
4622  ++blockVectorType)
4623  {
4624  VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
4625 
4626  // Defragment.
4627  result = pDefragmentators[memTypeIndex][blockVectorType]->Defragment(pBlockVector, maxBytesToMove, maxAllocationsToMove);
4628 
4629  // Accumulate statistics.
4630  if(pDefragmentationStats != VMA_NULL)
4631  {
4632  const VkDeviceSize bytesMoved = pDefragmentators[memTypeIndex][blockVectorType]->GetBytesMoved();
4633  const uint32_t allocationsMoved = pDefragmentators[memTypeIndex][blockVectorType]->GetAllocationsMoved();
4634  pDefragmentationStats->bytesMoved += bytesMoved;
4635  pDefragmentationStats->allocationsMoved += allocationsMoved;
4636  VMA_ASSERT(bytesMoved <= maxBytesToMove);
4637  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
4638  maxBytesToMove -= bytesMoved;
4639  maxAllocationsToMove -= allocationsMoved;
4640  }
4641 
4642  // Free empty blocks.
4643  for(size_t blockIndex = pBlockVector->m_Blocks.size(); blockIndex--; )
4644  {
4645  VmaBlock* pBlock = pBlockVector->m_Blocks[blockIndex];
4646  if(pBlock->IsEmpty())
4647  {
4648  if(pDefragmentationStats != VMA_NULL)
4649  {
4650  ++pDefragmentationStats->deviceMemoryBlocksFreed;
4651  pDefragmentationStats->bytesFreed += pBlock->m_Size;
4652  }
4653 
4654  VectorRemove(pBlockVector->m_Blocks, blockIndex);
4655  pBlock->Destroy(this);
4656  vma_delete(this, pBlock);
4657  }
4658  }
4659 
4660  // All block vector types processed: we can be sure that all empty allocations have been freed.
4661  if(blockVectorType == VMA_BLOCK_VECTOR_TYPE_COUNT - 1)
4662  {
4663  m_HasEmptyBlock[memTypeIndex] = false;
4664  }
4665  }
4666  }
4667  }
4668 
4669  // Destroy defragmentators.
4670  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
4671  {
4672  for(size_t blockVectorType = VMA_BLOCK_VECTOR_TYPE_COUNT; blockVectorType--; )
4673  {
4674  vma_delete(this, pDefragmentators[memTypeIndex][blockVectorType]);
4675  }
4676  }
4677 
4678  return result;
4679 }
4680 
4681 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
4682 {
4683  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
4684  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
4685  pAllocationInfo->offset = hAllocation->GetOffset();
4686  pAllocationInfo->size = hAllocation->GetSize();
4687  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
4688  pAllocationInfo->pUserData = hAllocation->GetUserData();
4689 }
4690 
4691 void VmaAllocator_T::FreeOwnMemory(VmaAllocation allocation)
4692 {
4693  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_OWN);
4694 
4695  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
4696  {
4697  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
4698  AllocationVectorType* const pOwnAllocations = m_pOwnAllocations[memTypeIndex][allocation->GetBlockVectorType()];
4699  VMA_ASSERT(pOwnAllocations);
4700  VmaAllocation* const pOwnAllocationsBeg = pOwnAllocations->data();
4701  VmaAllocation* const pOwnAllocationsEnd = pOwnAllocationsBeg + pOwnAllocations->size();
4702  VmaAllocation* const pOwnAllocationIt = VmaBinaryFindFirstNotLess(
4703  pOwnAllocationsBeg,
4704  pOwnAllocationsEnd,
4705  allocation,
4706  VmaPointerLess());
4707  if(pOwnAllocationIt != pOwnAllocationsEnd)
4708  {
4709  const size_t ownAllocationIndex = pOwnAllocationIt - pOwnAllocationsBeg;
4710  VectorRemove(*pOwnAllocations, ownAllocationIndex);
4711  }
4712  else
4713  {
4714  VMA_ASSERT(0);
4715  }
4716  }
4717 
4718  VkDeviceMemory hMemory = allocation->GetMemory();
4719 
4720  // Callback.
4721  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
4722  {
4723  (*m_DeviceMemoryCallbacks.pfnFree)(this, memTypeIndex, hMemory, allocation->GetSize());
4724  }
4725 
4726  if(allocation->GetMappedData() != VMA_NULL)
4727  {
4728  vkUnmapMemory(m_hDevice, hMemory);
4729  }
4730 
4731  vkFreeMemory(m_hDevice, hMemory, GetAllocationCallbacks());
4732 
4733  VMA_DEBUG_LOG(" Freed OwnMemory MemoryTypeIndex=%u", memTypeIndex);
4734 
4735  vma_delete(this, allocation);
4736 }
4737 
4738 #if VMA_STATS_STRING_ENABLED
4739 
4740 void VmaAllocator_T::PrintDetailedMap(VmaStringBuilder& sb)
4741 {
4742  bool ownAllocationsStarted = false;
4743  for(size_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
4744  {
4745  VmaMutexLock ownAllocationsLock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
4746  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
4747  {
4748  AllocationVectorType* const pOwnAllocVector = m_pOwnAllocations[memTypeIndex][blockVectorType];
4749  VMA_ASSERT(pOwnAllocVector);
4750  if(pOwnAllocVector->empty() == false)
4751  {
4752  if(ownAllocationsStarted)
4753  {
4754  sb.Add(",\n\t\"Type ");
4755  }
4756  else
4757  {
4758  sb.Add(",\n\"OwnAllocations\": {\n\t\"Type ");
4759  ownAllocationsStarted = true;
4760  }
4761  sb.AddNumber(memTypeIndex);
4762  if(blockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
4763  {
4764  sb.Add(" Mapped");
4765  }
4766  sb.Add("\": [");
4767 
4768  for(size_t i = 0; i < pOwnAllocVector->size(); ++i)
4769  {
4770  const VmaAllocation hAlloc = (*pOwnAllocVector)[i];
4771  if(i > 0)
4772  {
4773  sb.Add(",\n\t\t{ \"Size\": ");
4774  }
4775  else
4776  {
4777  sb.Add("\n\t\t{ \"Size\": ");
4778  }
4779  sb.AddNumber(hAlloc->GetSize());
4780  sb.Add(", \"Type\": ");
4781  sb.AddString(VMA_SUBALLOCATION_TYPE_NAMES[hAlloc->GetSuballocationType()]);
4782  sb.Add(" }");
4783  }
4784 
4785  sb.Add("\n\t]");
4786  }
4787  }
4788  }
4789  if(ownAllocationsStarted)
4790  {
4791  sb.Add("\n}");
4792  }
4793 
4794  {
4795  bool allocationsStarted = false;
4796  for(size_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
4797  {
4798  VmaMutexLock globalAllocationsLock(m_BlocksMutex[memTypeIndex], m_UseMutex);
4799  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
4800  {
4801  if(m_pBlockVectors[memTypeIndex][blockVectorType]->IsEmpty() == false)
4802  {
4803  if(allocationsStarted)
4804  {
4805  sb.Add(",\n\t\"Type ");
4806  }
4807  else
4808  {
4809  sb.Add(",\n\"Allocations\": {\n\t\"Type ");
4810  allocationsStarted = true;
4811  }
4812  sb.AddNumber(memTypeIndex);
4813  if(blockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
4814  {
4815  sb.Add(" Mapped");
4816  }
4817  sb.Add("\": [");
4818 
4819  m_pBlockVectors[memTypeIndex][blockVectorType]->PrintDetailedMap(sb);
4820 
4821  sb.Add("\n\t]");
4822  }
4823  }
4824  }
4825  if(allocationsStarted)
4826  {
4827  sb.Add("\n}");
4828  }
4829  }
4830 }
4831 
4832 #endif // #if VMA_STATS_STRING_ENABLED
4833 
4834 static VkResult AllocateMemoryForImage(
4835  VmaAllocator allocator,
4836  VkImage image,
4837  const VmaMemoryRequirements* pMemoryRequirements,
4838  VmaSuballocationType suballocType,
4839  VmaAllocation* pAllocation)
4840 {
4841  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pMemoryRequirements && pAllocation);
4842 
4843  VkMemoryRequirements vkMemReq = {};
4844  vkGetImageMemoryRequirements(allocator->m_hDevice, image, &vkMemReq);
4845 
4846  return allocator->AllocateMemory(
4847  vkMemReq,
4848  *pMemoryRequirements,
4849  suballocType,
4850  pAllocation);
4851 }
4852 
4854 // Public interface
4855 
4856 VkResult vmaCreateAllocator(
4857  const VmaAllocatorCreateInfo* pCreateInfo,
4858  VmaAllocator* pAllocator)
4859 {
4860  VMA_ASSERT(pCreateInfo && pAllocator);
4861  VMA_DEBUG_LOG("vmaCreateAllocator");
4862  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
4863  return VK_SUCCESS;
4864 }
4865 
4866 void vmaDestroyAllocator(
4867  VmaAllocator allocator)
4868 {
4869  if(allocator != VK_NULL_HANDLE)
4870  {
4871  VMA_DEBUG_LOG("vmaDestroyAllocator");
4872  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
4873  vma_delete(&allocationCallbacks, allocator);
4874  }
4875 }
4876 
4878  VmaAllocator allocator,
4879  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
4880 {
4881  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
4882  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
4883 }
4884 
4886  VmaAllocator allocator,
4887  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
4888 {
4889  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
4890  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
4891 }
4892 
4894  VmaAllocator allocator,
4895  uint32_t memoryTypeIndex,
4896  VkMemoryPropertyFlags* pFlags)
4897 {
4898  VMA_ASSERT(allocator && pFlags);
4899  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
4900  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
4901 }
4902 
4903 void vmaCalculateStats(
4904  VmaAllocator allocator,
4905  VmaStats* pStats)
4906 {
4907  VMA_ASSERT(allocator && pStats);
4908  VMA_DEBUG_GLOBAL_MUTEX_LOCK
4909  allocator->CalculateStats(pStats);
4910 }
4911 
4912 #if VMA_STATS_STRING_ENABLED
4913 
4914 void vmaBuildStatsString(
4915  VmaAllocator allocator,
4916  char** ppStatsString,
4917  VkBool32 detailedMap)
4918 {
4919  VMA_ASSERT(allocator && ppStatsString);
4920  VMA_DEBUG_GLOBAL_MUTEX_LOCK
4921 
4922  VmaStringBuilder sb(allocator);
4923  {
4924  VmaStats stats;
4925  allocator->CalculateStats(&stats);
4926 
4927  sb.Add("{\n\"Total\": ");
4928  VmaPrintStatInfo(sb, stats.total);
4929 
4930  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
4931  {
4932  sb.Add(",\n\"Heap ");
4933  sb.AddNumber(heapIndex);
4934  sb.Add("\": {\n\t\"Size\": ");
4935  sb.AddNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
4936  sb.Add(",\n\t\"Flags\": ");
4937  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
4938  {
4939  sb.AddString("DEVICE_LOCAL");
4940  }
4941  else
4942  {
4943  sb.AddString("");
4944  }
4945  if(stats.memoryHeap[heapIndex].AllocationCount > 0)
4946  {
4947  sb.Add(",\n\t\"Stats:\": ");
4948  VmaPrintStatInfo(sb, stats.memoryHeap[heapIndex]);
4949  }
4950 
4951  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
4952  {
4953  if(allocator->m_MemProps.memoryTypes[typeIndex].heapIndex == heapIndex)
4954  {
4955  sb.Add(",\n\t\"Type ");
4956  sb.AddNumber(typeIndex);
4957  sb.Add("\": {\n\t\t\"Flags\": \"");
4958  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
4959  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
4960  {
4961  sb.Add(" DEVICE_LOCAL");
4962  }
4963  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
4964  {
4965  sb.Add(" HOST_VISIBLE");
4966  }
4967  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
4968  {
4969  sb.Add(" HOST_COHERENT");
4970  }
4971  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
4972  {
4973  sb.Add(" HOST_CACHED");
4974  }
4975  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
4976  {
4977  sb.Add(" LAZILY_ALLOCATED");
4978  }
4979  sb.Add("\"");
4980  if(stats.memoryType[typeIndex].AllocationCount > 0)
4981  {
4982  sb.Add(",\n\t\t\"Stats\": ");
4983  VmaPrintStatInfo(sb, stats.memoryType[typeIndex]);
4984  }
4985  sb.Add("\n\t}");
4986  }
4987  }
4988  sb.Add("\n}");
4989  }
4990  if(detailedMap == VK_TRUE)
4991  {
4992  allocator->PrintDetailedMap(sb);
4993  }
4994  sb.Add("\n}\n");
4995  }
4996 
4997  const size_t len = sb.GetLength();
4998  char* const pChars = vma_new_array(allocator, char, len + 1);
4999  if(len > 0)
5000  {
5001  memcpy(pChars, sb.GetData(), len);
5002  }
5003  pChars[len] = '\0';
5004  *ppStatsString = pChars;
5005 }
5006 
5007 void vmaFreeStatsString(
5008  VmaAllocator allocator,
5009  char* pStatsString)
5010 {
5011  if(pStatsString != VMA_NULL)
5012  {
5013  VMA_ASSERT(allocator);
5014  size_t len = strlen(pStatsString);
5015  vma_delete_array(allocator, pStatsString, len + 1);
5016  }
5017 }
5018 
5019 #endif // #if VMA_STATS_STRING_ENABLED
5020 
5023 VkResult vmaFindMemoryTypeIndex(
5024  VmaAllocator allocator,
5025  uint32_t memoryTypeBits,
5026  const VmaMemoryRequirements* pMemoryRequirements,
5027  uint32_t* pMemoryTypeIndex)
5028 {
5029  VMA_ASSERT(allocator != VK_NULL_HANDLE);
5030  VMA_ASSERT(pMemoryRequirements != VMA_NULL);
5031  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
5032 
5033  uint32_t requiredFlags = pMemoryRequirements->requiredFlags;
5034  uint32_t preferredFlags = pMemoryRequirements->preferredFlags;
5035  if(preferredFlags == 0)
5036  {
5037  preferredFlags = requiredFlags;
5038  }
5039  // preferredFlags, if not 0, must be a superset of requiredFlags.
5040  VMA_ASSERT((requiredFlags & ~preferredFlags) == 0);
5041 
5042  // Convert usage to requiredFlags and preferredFlags.
5043  switch(pMemoryRequirements->usage)
5044  {
5046  break;
5048  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
5049  break;
5051  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
5052  break;
5054  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
5055  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
5056  break;
5058  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
5059  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
5060  break;
5061  default:
5062  break;
5063  }
5064 
5065  if((pMemoryRequirements->flags & VMA_MEMORY_REQUIREMENT_PERSISTENT_MAP_BIT) != 0)
5066  {
5067  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
5068  }
5069 
5070  *pMemoryTypeIndex = UINT32_MAX;
5071  uint32_t minCost = UINT32_MAX;
5072  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
5073  memTypeIndex < allocator->GetMemoryTypeCount();
5074  ++memTypeIndex, memTypeBit <<= 1)
5075  {
5076  // This memory type is acceptable according to memoryTypeBits bitmask.
5077  if((memTypeBit & memoryTypeBits) != 0)
5078  {
5079  const VkMemoryPropertyFlags currFlags =
5080  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
5081  // This memory type contains requiredFlags.
5082  if((requiredFlags & ~currFlags) == 0)
5083  {
5084  // Calculate cost as number of bits from preferredFlags not present in this memory type.
5085  uint32_t currCost = CountBitsSet(preferredFlags & ~currFlags);
5086  // Remember memory type with lowest cost.
5087  if(currCost < minCost)
5088  {
5089  *pMemoryTypeIndex = memTypeIndex;
5090  if(currCost == 0)
5091  {
5092  return VK_SUCCESS;
5093  }
5094  minCost = currCost;
5095  }
5096  }
5097  }
5098  }
5099  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
5100 }
5101 
5102 VkResult vmaAllocateMemory(
5103  VmaAllocator allocator,
5104  const VkMemoryRequirements* pVkMemoryRequirements,
5105  const VmaMemoryRequirements* pVmaMemoryRequirements,
5106  VmaAllocation* pAllocation,
5107  VmaAllocationInfo* pAllocationInfo)
5108 {
5109  VMA_ASSERT(allocator && pVkMemoryRequirements && pVmaMemoryRequirements && pAllocation);
5110 
5111  VMA_DEBUG_LOG("vmaAllocateMemory");
5112 
5113  VMA_DEBUG_GLOBAL_MUTEX_LOCK
5114 
5115  return allocator->AllocateMemory(
5116  *pVkMemoryRequirements,
5117  *pVmaMemoryRequirements,
5118  VMA_SUBALLOCATION_TYPE_UNKNOWN,
5119  pAllocation);
5120 
5121  if(pAllocationInfo)
5122  {
5123  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
5124  }
5125 }
5126 
5128  VmaAllocator allocator,
5129  VkBuffer buffer,
5130  const VmaMemoryRequirements* pMemoryRequirements,
5131  VmaAllocation* pAllocation,
5132  VmaAllocationInfo* pAllocationInfo)
5133 {
5134  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pMemoryRequirements && pAllocation);
5135 
5136  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
5137 
5138  VMA_DEBUG_GLOBAL_MUTEX_LOCK
5139 
5140  VkMemoryRequirements vkMemReq = {};
5141  vkGetBufferMemoryRequirements(allocator->m_hDevice, buffer, &vkMemReq);
5142 
5143  return allocator->AllocateMemory(
5144  vkMemReq,
5145  *pMemoryRequirements,
5146  VMA_SUBALLOCATION_TYPE_BUFFER,
5147  pAllocation);
5148 
5149  if(pAllocationInfo)
5150  {
5151  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
5152  }
5153 }
5154 
5155 VkResult vmaAllocateMemoryForImage(
5156  VmaAllocator allocator,
5157  VkImage image,
5158  const VmaMemoryRequirements* pMemoryRequirements,
5159  VmaAllocation* pAllocation,
5160  VmaAllocationInfo* pAllocationInfo)
5161 {
5162  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pMemoryRequirements && pAllocation);
5163 
5164  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
5165 
5166  VMA_DEBUG_GLOBAL_MUTEX_LOCK
5167 
5168  return AllocateMemoryForImage(
5169  allocator,
5170  image,
5171  pMemoryRequirements,
5172  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
5173  pAllocation);
5174 
5175  if(pAllocationInfo)
5176  {
5177  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
5178  }
5179 }
5180 
5181 void vmaFreeMemory(
5182  VmaAllocator allocator,
5183  VmaAllocation allocation)
5184 {
5185  VMA_ASSERT(allocator && allocation);
5186 
5187  VMA_DEBUG_LOG("vmaFreeMemory");
5188 
5189  VMA_DEBUG_GLOBAL_MUTEX_LOCK
5190 
5191  allocator->FreeMemory(allocation);
5192 }
5193 
5195  VmaAllocator allocator,
5196  VmaAllocation allocation,
5197  VmaAllocationInfo* pAllocationInfo)
5198 {
5199  VMA_ASSERT(allocator && allocation && pAllocationInfo);
5200 
5201  VMA_DEBUG_GLOBAL_MUTEX_LOCK
5202 
5203  allocator->GetAllocationInfo(allocation, pAllocationInfo);
5204 }
5205 
5207  VmaAllocator allocator,
5208  VmaAllocation allocation,
5209  void* pUserData)
5210 {
5211  VMA_ASSERT(allocator && allocation);
5212 
5213  VMA_DEBUG_GLOBAL_MUTEX_LOCK
5214 
5215  allocation->SetUserData(pUserData);
5216 }
5217 
5218 VkResult vmaMapMemory(
5219  VmaAllocator allocator,
5220  VmaAllocation allocation,
5221  void** ppData)
5222 {
5223  VMA_ASSERT(allocator && allocation && ppData);
5224 
5225  VMA_DEBUG_GLOBAL_MUTEX_LOCK
5226 
5227  return vkMapMemory(allocator->m_hDevice, allocation->GetMemory(),
5228  allocation->GetOffset(), allocation->GetSize(), 0, ppData);
5229 }
5230 
5231 void vmaUnmapMemory(
5232  VmaAllocator allocator,
5233  VmaAllocation allocation)
5234 {
5235  VMA_ASSERT(allocator && allocation);
5236 
5237  VMA_DEBUG_GLOBAL_MUTEX_LOCK
5238 
5239  vkUnmapMemory(allocator->m_hDevice, allocation->GetMemory());
5240 }
5241 
5242 void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator)
5243 {
5244  VMA_ASSERT(allocator);
5245 
5246  VMA_DEBUG_GLOBAL_MUTEX_LOCK
5247 
5248  allocator->UnmapPersistentlyMappedMemory();
5249 }
5250 
5251 VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator)
5252 {
5253  VMA_ASSERT(allocator);
5254 
5255  VMA_DEBUG_GLOBAL_MUTEX_LOCK
5256 
5257  return allocator->MapPersistentlyMappedMemory();
5258 }
5259 
5260 VkResult vmaDefragment(
5261  VmaAllocator allocator,
5262  VmaAllocation* pAllocations,
5263  size_t allocationCount,
5264  VkBool32* pAllocationsChanged,
5265  const VmaDefragmentationInfo *pDefragmentationInfo,
5266  VmaDefragmentationStats* pDefragmentationStats)
5267 {
5268  VMA_ASSERT(allocator && pAllocations);
5269 
5270  VMA_DEBUG_LOG("vmaDefragment");
5271 
5272  VMA_DEBUG_GLOBAL_MUTEX_LOCK
5273 
5274  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
5275 }
5276 
5277 VkResult vmaCreateBuffer(
5278  VmaAllocator allocator,
5279  const VkBufferCreateInfo* pCreateInfo,
5280  const VmaMemoryRequirements* pMemoryRequirements,
5281  VkBuffer* pBuffer,
5282  VmaAllocation* pAllocation,
5283  VmaAllocationInfo* pAllocationInfo)
5284 {
5285  VMA_ASSERT(allocator && pCreateInfo && pMemoryRequirements && pBuffer && pAllocation);
5286 
5287  VMA_DEBUG_LOG("vmaCreateBuffer");
5288 
5289  VMA_DEBUG_GLOBAL_MUTEX_LOCK
5290 
5291  *pBuffer = VK_NULL_HANDLE;
5292  *pAllocation = VK_NULL_HANDLE;
5293 
5294  // 1. Create VkBuffer.
5295  VkResult res = vkCreateBuffer(allocator->m_hDevice, pCreateInfo, allocator->GetAllocationCallbacks(), pBuffer);
5296  if(res >= 0)
5297  {
5298  // 2. vkGetBufferMemoryRequirements.
5299  VkMemoryRequirements vkMemReq = {};
5300  vkGetBufferMemoryRequirements(allocator->m_hDevice, *pBuffer, &vkMemReq);
5301 
5302  // 3. Allocate memory using allocator.
5303  res = allocator->AllocateMemory(
5304  vkMemReq,
5305  *pMemoryRequirements,
5306  VMA_SUBALLOCATION_TYPE_BUFFER,
5307  pAllocation);
5308  if(res >= 0)
5309  {
5310  // 3. Bind buffer with memory.
5311  res = vkBindBufferMemory(allocator->m_hDevice, *pBuffer, (*pAllocation)->GetMemory(), (*pAllocation)->GetOffset());
5312  if(res >= 0)
5313  {
5314  // All steps succeeded.
5315  if(pAllocationInfo != VMA_NULL)
5316  {
5317  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
5318  }
5319  return VK_SUCCESS;
5320  }
5321  allocator->FreeMemory(*pAllocation);
5322  *pAllocation = VK_NULL_HANDLE;
5323  return res;
5324  }
5325  vkDestroyBuffer(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
5326  *pBuffer = VK_NULL_HANDLE;
5327  return res;
5328  }
5329  return res;
5330 }
5331 
5332 void vmaDestroyBuffer(
5333  VmaAllocator allocator,
5334  VkBuffer buffer,
5335  VmaAllocation allocation)
5336 {
5337  if(buffer != VK_NULL_HANDLE)
5338  {
5339  VMA_ASSERT(allocator);
5340 
5341  VMA_DEBUG_LOG("vmaDestroyBuffer");
5342 
5343  VMA_DEBUG_GLOBAL_MUTEX_LOCK
5344 
5345  vkDestroyBuffer(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
5346 
5347  allocator->FreeMemory(allocation);
5348  }
5349 }
5350 
5351 VkResult vmaCreateImage(
5352  VmaAllocator allocator,
5353  const VkImageCreateInfo* pCreateInfo,
5354  const VmaMemoryRequirements* pMemoryRequirements,
5355  VkImage* pImage,
5356  VmaAllocation* pAllocation,
5357  VmaAllocationInfo* pAllocationInfo)
5358 {
5359  VMA_ASSERT(allocator && pCreateInfo && pMemoryRequirements && pImage && pAllocation);
5360 
5361  VMA_DEBUG_LOG("vmaCreateImage");
5362 
5363  VMA_DEBUG_GLOBAL_MUTEX_LOCK
5364 
5365  // 1. Create VkImage.
5366  VkResult res = vkCreateImage(allocator->m_hDevice, pCreateInfo, allocator->GetAllocationCallbacks(), pImage);
5367  if(res >= 0)
5368  {
5369  VkMappedMemoryRange mem = {};
5370  VmaSuballocationType suballocType = pCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
5371  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
5372  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
5373 
5374  // 2. Allocate memory using allocator.
5375  res = AllocateMemoryForImage(allocator, *pImage, pMemoryRequirements, suballocType, pAllocation);
5376  if(res >= 0)
5377  {
5378  // 3. Bind image with memory.
5379  res = vkBindImageMemory(allocator->m_hDevice, *pImage, (*pAllocation)->GetMemory(), (*pAllocation)->GetOffset());
5380  if(res >= 0)
5381  {
5382  // All steps succeeded.
5383  if(pAllocationInfo != VMA_NULL)
5384  {
5385  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
5386  }
5387  return VK_SUCCESS;
5388  }
5389  allocator->FreeMemory(*pAllocation);
5390  *pAllocation = VK_NULL_HANDLE;
5391  return res;
5392  }
5393  vkDestroyImage(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
5394  *pImage = VK_NULL_HANDLE;
5395  return res;
5396  }
5397  return res;
5398 }
5399 
5400 void vmaDestroyImage(
5401  VmaAllocator allocator,
5402  VkImage image,
5403  VmaAllocation allocation)
5404 {
5405  if(image != VK_NULL_HANDLE)
5406  {
5407  VMA_ASSERT(allocator);
5408 
5409  VMA_DEBUG_LOG("vmaDestroyImage");
5410 
5411  VMA_DEBUG_GLOBAL_MUTEX_LOCK
5412 
5413  vkDestroyImage(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
5414 
5415  allocator->FreeMemory(allocation);
5416  }
5417 }
5418 
5419 #endif // #ifdef VMA_IMPLEMENTATION
VmaMemoryRequirementFlagBits
Flags to be passed as VmaMemoryRequirements::flags.
Definition: vk_mem_alloc.h:336
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:345
struct VmaMemoryRequirements VmaMemoryRequirements
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:214
diff --git a/src/vk_mem_alloc.h b/src/vk_mem_alloc.h index eb5a471..74cbc22 100644 --- a/src/vk_mem_alloc.h +++ b/src/vk_mem_alloc.h @@ -667,17 +667,20 @@ VkResult vmaDefragment( /** @param[out] pBuffer Buffer that was created. @param[out] pAllocation Allocation that was created. -@param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function VmaGetAllocationInfo(). +@param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). This function automatically: --# Creates buffer/image. +-# Creates buffer. -# Allocates appropriate memory for it. --# Binds the buffer/image with the memory. +-# Binds the buffer with the memory. -You do not (and should not) pass returned pMemory to vmaFreeMemory. Only calling -vmaDestroyBuffer() / vmaDestroyImage() is required for objects created using -vmaCreateBuffer() / vmaCreateImage(). +If any of these operations fail, buffer and allocation are not created, +returned value is negative error code, *pBuffer and *pAllocation are null. + +If the function succeeded, you must destroy both buffer and allocation when you +no longer need them using either convenience function vmaDestroyBuffer() or +separately, using vkDestroyBuffer() and vmaFreeMemory(). */ VkResult vmaCreateBuffer( VmaAllocator allocator, @@ -5285,6 +5288,9 @@ VkResult vmaCreateBuffer( VMA_DEBUG_GLOBAL_MUTEX_LOCK + *pBuffer = VK_NULL_HANDLE; + *pAllocation = VK_NULL_HANDLE; + // 1. Create VkBuffer. VkResult res = vkCreateBuffer(allocator->m_hDevice, pCreateInfo, allocator->GetAllocationCallbacks(), pBuffer); if(res >= 0) @@ -5313,9 +5319,11 @@ VkResult vmaCreateBuffer( return VK_SUCCESS; } allocator->FreeMemory(*pAllocation); + *pAllocation = VK_NULL_HANDLE; return res; } vkDestroyBuffer(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks()); + *pBuffer = VK_NULL_HANDLE; return res; } return res; @@ -5354,6 +5362,9 @@ VkResult vmaCreateImage( VMA_DEBUG_GLOBAL_MUTEX_LOCK + *pImage = VK_NULL_HANDLE; + *pAllocation = VK_NULL_HANDLE; + // 1. Create VkImage. VkResult res = vkCreateImage(allocator->m_hDevice, pCreateInfo, allocator->GetAllocationCallbacks(), pImage); if(res >= 0) @@ -5379,9 +5390,11 @@ VkResult vmaCreateImage( return VK_SUCCESS; } allocator->FreeMemory(*pAllocation); + *pAllocation = VK_NULL_HANDLE; return res; } vkDestroyImage(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks()); + *pImage = VK_NULL_HANDLE; return res; } return res;