@@ -88,7 +88,7 @@ class CachedNMTInformation : public VirtualMemoryWalker {
88
88
// of them fit into a cache line.
89
89
Range* _ranges;
90
90
MEMFLAGS* _flags;
91
- uintx _count, _capacity;
91
+ size_t _count, _capacity;
92
92
public:
93
93
CachedNMTInformation () : _ranges(nullptr ), _flags(nullptr ), _count(0 ), _capacity(0 ) {}
94
94
@@ -107,12 +107,12 @@ class CachedNMTInformation : public VirtualMemoryWalker {
107
107
}
108
108
if (_count == _capacity) {
109
109
// Enlarge if needed
110
- const uintx new_capacity = MAX2 ((uintx )4096 , 2 * _capacity);
110
+ const size_t new_capacity = MAX2 ((size_t )4096 , 2 * _capacity);
111
111
// Unfortunately, we need to allocate manually, raw, since we must prevent NMT deadlocks (ThreadCritical).
112
112
ALLOW_C_FUNCTION (realloc, _ranges = (Range*)::realloc (_ranges, new_capacity * sizeof (Range));)
113
113
ALLOW_C_FUNCTION (realloc, _flags = (MEMFLAGS*)::realloc (_flags, new_capacity * sizeof (MEMFLAGS));)
114
114
if (_ranges == nullptr || _flags == nullptr ) {
115
- // In case of OOM lets make no fuzz . Just return.
115
+ // In case of OOM lets make no fuss . Just return.
116
116
return false ;
117
117
}
118
118
_capacity = new_capacity;
@@ -127,11 +127,21 @@ class CachedNMTInformation : public VirtualMemoryWalker {
127
127
// Given a vma [from, to), find all regions that intersect with this vma and
128
128
// return their collective flags.
129
129
MemFlagBitmap lookup (const void * from, const void * to) const {
130
+ assert (from <= to, " Sanity" );
131
+ // We optimize for sequential lookups. Since this class is used when a list
132
+ // of OS mappings is scanned (VirtualQuery, /proc/pid/maps), and these lists
133
+ // are usually sorted in order of addresses, ascending.
134
+ static uintx last = 0 ;
135
+ if (to <= _ranges[last].from ) {
136
+ // the range is to the right of the given section, we need to re-start the search
137
+ last = 0 ;
138
+ }
130
139
MemFlagBitmap bm;
131
- for (uintx i = 0 ; i < _count; i++) {
140
+ for (uintx i = last ; i < _count; i++) {
132
141
if (range_intersects (from, to, _ranges[i].from , _ranges[i].to )) {
133
142
bm.set_flag (_flags[i]);
134
- } else if (from < _ranges[i].to ) {
143
+ } else if (to <= _ranges[i].from ) {
144
+ last = i;
135
145
break ;
136
146
}
137
147
}
0 commit comments