1+ #include "sys/mutex.h"
12#define KL_LOG KL_VM
23#include <sys/klog.h>
34#include <sys/malloc.h>
67#include <sys/vm_map.h>
78#include <sys/vm_amap.h>
89#include <sys/vm_physmem.h>
9- #include <sys/refcnt.h>
1010#include <sys/errno.h>
1111
1212/*
2424 * (adjustable with EXTRA_AMAP_SLOTS) to allow resizing of small amounts.
2525 * - Amap is managing a simple array of referenced pages so it may not be the
2626 * most effective implementation.
27+ *
28+ * LOCKING
29+ * There are 2 types of locks already implemented here: amapa and anon locks.
30+ * Sometimes there is a need to hold both mutexes (e.g. when replacing an
31+ * existing anon in amap with new one). To avoid dead locks the proper order of
32+ * holding locks must be preserved. The amap should be locked first.
33+ *
34+ * Few observations about locking:
35+ * 1. No function needs to be called with any lock held.
36+ * 2. The only situation when we need to hold 2 locks is in
37+ * `vm_amap_replace_anon`.
2738 */
2839
2940/* Amap size will be increased by this number of slots to easier resizing. */
3950struct vm_amap {
4051 mtx_t mtx ; /* Amap lock. */
4152 size_t slots ; /* (!) maximum number of slots */
42- refcnt_t ref_cnt ; /* (a) number of references */
53+ uint32_t ref_cnt ; /* (a) number of references */
4354 vm_anon_t * * anon_list ; /* (@) anon list */
4455 bitstr_t * anon_bitmap ; /* (@) anon bitmap */
4556};
@@ -76,9 +87,16 @@ vm_aref_t vm_amap_needs_copy(vm_aref_t aref, size_t slots) {
7687 if (!amap )
7788 return (vm_aref_t ){.amap = NULL , .offset = 0 };
7889
79- vm_amap_t * new = vm_amap_alloc (slots );
80-
8190 SCOPED_MTX_LOCK (& amap -> mtx );
91+
92+ /* Amap don't have to be copied. */
93+ if (amap -> ref_cnt == 1 ) {
94+ return aref ;
95+ }
96+
97+ amap -> ref_cnt -- ;
98+
99+ vm_amap_t * new = vm_amap_alloc (slots );
82100 for (size_t slot = 0 ; slot < slots ; slot ++ ) {
83101 size_t old_slot = aref .offset + slot ;
84102
@@ -125,7 +143,7 @@ void vm_amap_insert_anon(vm_aref_t aref, vm_anon_t *anon, size_t offset) {
125143 bit_set (amap -> anon_bitmap , offset );
126144}
127145
128- void vm_amap_replace_anon (vm_aref_t aref , vm_anon_t * anon , size_t offset ) {
146+ bool vm_amap_replace_anon (vm_aref_t aref , vm_anon_t * anon , size_t offset ) {
129147 vm_amap_t * amap = aref .amap ;
130148 assert (amap != NULL && anon != NULL );
131149
@@ -138,12 +156,21 @@ void vm_amap_replace_anon(vm_aref_t aref, vm_anon_t *anon, size_t offset) {
138156 /* Anon must be already there. */
139157 assert (bit_test (amap -> anon_bitmap , offset ));
140158
141- /* Drop the old anon, because we won't use it any more. */
142159 vm_anon_t * old = amap -> anon_list [offset ];
143- vm_anon_drop (old );
144160
145- /* Insert new one. */
161+ /* If old anon was transferred to another amap there is no need to
162+ * replace it here. */
163+ WITH_MTX_LOCK (& old -> mtx ) {
164+ if (old -> ref_cnt == 1 ) {
165+ vm_anon_drop (anon );
166+ return false;
167+ }
168+ /* We are actually replacing it. */
169+ old -> ref_cnt -- ;
170+ }
171+
146172 amap -> anon_list [offset ] = anon ;
173+ return true;
147174}
148175
149176static void vm_amap_remove_pages_unlocked (vm_amap_t * amap , size_t start ,
@@ -169,48 +196,60 @@ void vm_amap_remove_pages(vm_aref_t aref, size_t start, size_t nslots) {
169196}
170197
171198void vm_amap_hold (vm_amap_t * amap ) {
172- refcnt_acquire (& amap -> ref_cnt );
199+ SCOPED_MTX_LOCK (& amap -> mtx );
200+ amap -> ref_cnt ++ ;
173201}
174202
175203void vm_amap_drop (vm_amap_t * amap ) {
176- if (refcnt_release (& amap -> ref_cnt )) {
177- vm_amap_remove_pages_unlocked (amap , 0 , amap -> slots );
178- kfree (M_AMAP , amap -> anon_list );
179- kfree (M_AMAP , amap -> anon_bitmap );
180- pool_free (P_VM_AMAP_STRUCT , amap );
204+ WITH_MTX_LOCK (& amap -> mtx ) {
205+ amap -> ref_cnt -- ;
206+ if (amap -> ref_cnt >= 1 )
207+ return ;
181208 }
209+ vm_amap_remove_pages_unlocked (amap , 0 , amap -> slots );
210+ kfree (M_AMAP , amap -> anon_list );
211+ kfree (M_AMAP , amap -> anon_bitmap );
212+ pool_free (P_VM_AMAP_STRUCT , amap );
182213}
183214
184- vm_anon_t * vm_anon_alloc (void ) {
215+ static vm_anon_t * alloc_empty_anon (void ) {
185216 vm_anon_t * anon = pool_alloc (P_VM_ANON_STRUCT , M_WAITOK );
186- if (!anon )
187- return NULL ;
188-
189217 anon -> ref_cnt = 1 ;
190218 anon -> page = NULL ;
219+ mtx_init (& anon -> mtx , MTX_SLEEP );
220+ return anon ;
221+ }
222+
223+ vm_anon_t * vm_anon_alloc (void ) {
224+ vm_anon_t * anon = alloc_empty_anon ();
225+ vm_page_t * page = vm_page_alloc (1 );
226+ if (!page ) {
227+ vm_anon_drop (anon );
228+ return NULL ;
229+ }
230+ pmap_zero_page (page );
231+ anon -> page = page ;
191232 return anon ;
192233}
193234
194235void vm_anon_hold (vm_anon_t * anon ) {
195- refcnt_acquire (& anon -> ref_cnt );
236+ SCOPED_MTX_LOCK (& anon -> mtx );
237+ anon -> ref_cnt ++ ;
196238}
197239
198240void vm_anon_drop (vm_anon_t * anon ) {
199- if (refcnt_release (& anon -> ref_cnt )) {
200- vm_page_free (anon -> page );
201- pool_free (P_VM_ANON_STRUCT , anon );
241+ WITH_MTX_LOCK (& anon -> mtx ) {
242+ anon -> ref_cnt -- ;
243+ if (anon -> ref_cnt >= 1 )
244+ return ;
202245 }
246+ vm_page_free (anon -> page );
247+ pool_free (P_VM_ANON_STRUCT , anon );
203248}
204249
205- vm_anon_t * vm_anon_transfer (vm_anon_t * src ) {
206- vm_anon_t * new = vm_anon_alloc ();
207- if (!new )
208- return NULL ;
209-
250+ vm_anon_t * vm_anon_copy (vm_anon_t * src ) {
251+ vm_anon_t * new = alloc_empty_anon ();
210252 new -> page = vm_page_alloc (1 );
211253 pmap_copy_page (src -> page , new -> page );
212-
213- /* We don't drop the src anon because it is still used inside the src amap. */
214-
215254 return new ;
216255}
0 commit comments