##// END OF EJS Templates
upgrade: implement partial upgrade for upgrading persistent-nodemap...
Pulkit Goyal -
r47199:98e39f04 default
parent child Browse files
Show More
@@ -1,647 +1,652 b''
1 # nodemap.py - nodemap related code and utilities
1 # nodemap.py - nodemap related code and utilities
2 #
2 #
3 # Copyright 2019 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2019 Pierre-Yves David <pierre-yves.david@octobus.net>
4 # Copyright 2019 George Racinet <georges.racinet@octobus.net>
4 # Copyright 2019 George Racinet <georges.racinet@octobus.net>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import os
12 import os
13 import re
13 import re
14 import struct
14 import struct
15
15
16 from ..node import hex
16 from ..node import hex
17
17
18 from .. import (
18 from .. import (
19 error,
19 error,
20 util,
20 util,
21 )
21 )
22
22
23
23
24 class NodeMap(dict):
24 class NodeMap(dict):
25 def __missing__(self, x):
25 def __missing__(self, x):
26 raise error.RevlogError(b'unknown node: %s' % x)
26 raise error.RevlogError(b'unknown node: %s' % x)
27
27
28
28
29 def persisted_data(revlog):
29 def persisted_data(revlog):
30 """read the nodemap for a revlog from disk"""
30 """read the nodemap for a revlog from disk"""
31 if revlog.nodemap_file is None:
31 if revlog.nodemap_file is None:
32 return None
32 return None
33 pdata = revlog.opener.tryread(revlog.nodemap_file)
33 pdata = revlog.opener.tryread(revlog.nodemap_file)
34 if not pdata:
34 if not pdata:
35 return None
35 return None
36 offset = 0
36 offset = 0
37 (version,) = S_VERSION.unpack(pdata[offset : offset + S_VERSION.size])
37 (version,) = S_VERSION.unpack(pdata[offset : offset + S_VERSION.size])
38 if version != ONDISK_VERSION:
38 if version != ONDISK_VERSION:
39 return None
39 return None
40 offset += S_VERSION.size
40 offset += S_VERSION.size
41 headers = S_HEADER.unpack(pdata[offset : offset + S_HEADER.size])
41 headers = S_HEADER.unpack(pdata[offset : offset + S_HEADER.size])
42 uid_size, tip_rev, data_length, data_unused, tip_node_size = headers
42 uid_size, tip_rev, data_length, data_unused, tip_node_size = headers
43 offset += S_HEADER.size
43 offset += S_HEADER.size
44 docket = NodeMapDocket(pdata[offset : offset + uid_size])
44 docket = NodeMapDocket(pdata[offset : offset + uid_size])
45 offset += uid_size
45 offset += uid_size
46 docket.tip_rev = tip_rev
46 docket.tip_rev = tip_rev
47 docket.tip_node = pdata[offset : offset + tip_node_size]
47 docket.tip_node = pdata[offset : offset + tip_node_size]
48 docket.data_length = data_length
48 docket.data_length = data_length
49 docket.data_unused = data_unused
49 docket.data_unused = data_unused
50
50
51 filename = _rawdata_filepath(revlog, docket)
51 filename = _rawdata_filepath(revlog, docket)
52 use_mmap = revlog.opener.options.get(b"persistent-nodemap.mmap")
52 use_mmap = revlog.opener.options.get(b"persistent-nodemap.mmap")
53 try:
53 try:
54 with revlog.opener(filename) as fd:
54 with revlog.opener(filename) as fd:
55 if use_mmap:
55 if use_mmap:
56 data = util.buffer(util.mmapread(fd, data_length))
56 data = util.buffer(util.mmapread(fd, data_length))
57 else:
57 else:
58 data = fd.read(data_length)
58 data = fd.read(data_length)
59 except (IOError, OSError) as e:
59 except (IOError, OSError) as e:
60 if e.errno == errno.ENOENT:
60 if e.errno == errno.ENOENT:
61 return None
61 return None
62 else:
62 else:
63 raise
63 raise
64 if len(data) < data_length:
64 if len(data) < data_length:
65 return None
65 return None
66 return docket, data
66 return docket, data
67
67
68
68
69 def setup_persistent_nodemap(tr, revlog):
69 def setup_persistent_nodemap(tr, revlog):
70 """Install whatever is needed transaction side to persist a nodemap on disk
70 """Install whatever is needed transaction side to persist a nodemap on disk
71
71
72 (only actually persist the nodemap if this is relevant for this revlog)
72 (only actually persist the nodemap if this is relevant for this revlog)
73 """
73 """
74 if revlog._inline:
74 if revlog._inline:
75 return # inlined revlog are too small for this to be relevant
75 return # inlined revlog are too small for this to be relevant
76 if revlog.nodemap_file is None:
76 if revlog.nodemap_file is None:
77 return # we do not use persistent_nodemap on this revlog
77 return # we do not use persistent_nodemap on this revlog
78
78
79 # we need to happen after the changelog finalization, in that use "cl-"
79 # we need to happen after the changelog finalization, in that use "cl-"
80 callback_id = b"nm-revlog-persistent-nodemap-%s" % revlog.nodemap_file
80 callback_id = b"nm-revlog-persistent-nodemap-%s" % revlog.nodemap_file
81 if tr.hasfinalize(callback_id):
81 if tr.hasfinalize(callback_id):
82 return # no need to register again
82 return # no need to register again
83 tr.addpending(
83 tr.addpending(
84 callback_id, lambda tr: persist_nodemap(tr, revlog, pending=True)
84 callback_id, lambda tr: persist_nodemap(tr, revlog, pending=True)
85 )
85 )
86 tr.addfinalize(callback_id, lambda tr: persist_nodemap(tr, revlog))
86 tr.addfinalize(callback_id, lambda tr: persist_nodemap(tr, revlog))
87
87
88
88
89 class _NoTransaction(object):
89 class _NoTransaction(object):
90 """transaction like object to update the nodemap outside a transaction"""
90 """transaction like object to update the nodemap outside a transaction"""
91
91
92 def __init__(self):
92 def __init__(self):
93 self._postclose = {}
93 self._postclose = {}
94
94
95 def addpostclose(self, callback_id, callback_func):
95 def addpostclose(self, callback_id, callback_func):
96 self._postclose[callback_id] = callback_func
96 self._postclose[callback_id] = callback_func
97
97
98 def registertmp(self, *args, **kwargs):
98 def registertmp(self, *args, **kwargs):
99 pass
99 pass
100
100
101 def addbackup(self, *args, **kwargs):
101 def addbackup(self, *args, **kwargs):
102 pass
102 pass
103
103
104 def add(self, *args, **kwargs):
104 def add(self, *args, **kwargs):
105 pass
105 pass
106
106
107 def addabort(self, *args, **kwargs):
107 def addabort(self, *args, **kwargs):
108 pass
108 pass
109
109
110 def _report(self, *args):
110 def _report(self, *args):
111 pass
111 pass
112
112
113
113
114 def update_persistent_nodemap(revlog):
114 def update_persistent_nodemap(revlog):
115 """update the persistent nodemap right now
115 """update the persistent nodemap right now
116
116
117 To be used for updating the nodemap on disk outside of a normal transaction
117 To be used for updating the nodemap on disk outside of a normal transaction
118 setup (eg, `debugupdatecache`).
118 setup (eg, `debugupdatecache`).
119 """
119 """
120 if revlog._inline:
120 if revlog._inline:
121 return # inlined revlog are too small for this to be relevant
121 return # inlined revlog are too small for this to be relevant
122 if revlog.nodemap_file is None:
122 if revlog.nodemap_file is None:
123 return # we do not use persistent_nodemap on this revlog
123 return # we do not use persistent_nodemap on this revlog
124
124
125 notr = _NoTransaction()
125 notr = _NoTransaction()
126 persist_nodemap(notr, revlog)
126 persist_nodemap(notr, revlog)
127 for k in sorted(notr._postclose):
127 for k in sorted(notr._postclose):
128 notr._postclose[k](None)
128 notr._postclose[k](None)
129
129
130
130
131 def persist_nodemap(tr, revlog, pending=False):
131 def persist_nodemap(tr, revlog, pending=False, force=False):
132 """Write nodemap data on disk for a given revlog"""
132 """Write nodemap data on disk for a given revlog"""
133 if getattr(revlog, 'filteredrevs', ()):
133 if getattr(revlog, 'filteredrevs', ()):
134 raise error.ProgrammingError(
134 raise error.ProgrammingError(
135 "cannot persist nodemap of a filtered changelog"
135 "cannot persist nodemap of a filtered changelog"
136 )
136 )
137 if revlog.nodemap_file is None:
137 if revlog.nodemap_file is None:
138 msg = "calling persist nodemap on a revlog without the feature enabled"
138 if force:
139 raise error.ProgrammingError(msg)
139 revlog.nodemap_file = get_nodemap_file(
140 revlog.opener, revlog.indexfile
141 )
142 else:
143 msg = "calling persist nodemap on a revlog without the feature enabled"
144 raise error.ProgrammingError(msg)
140
145
141 can_incremental = util.safehasattr(revlog.index, "nodemap_data_incremental")
146 can_incremental = util.safehasattr(revlog.index, "nodemap_data_incremental")
142 ondisk_docket = revlog._nodemap_docket
147 ondisk_docket = revlog._nodemap_docket
143 feed_data = util.safehasattr(revlog.index, "update_nodemap_data")
148 feed_data = util.safehasattr(revlog.index, "update_nodemap_data")
144 use_mmap = revlog.opener.options.get(b"persistent-nodemap.mmap")
149 use_mmap = revlog.opener.options.get(b"persistent-nodemap.mmap")
145
150
146 data = None
151 data = None
147 # first attemp an incremental update of the data
152 # first attemp an incremental update of the data
148 if can_incremental and ondisk_docket is not None:
153 if can_incremental and ondisk_docket is not None:
149 target_docket = revlog._nodemap_docket.copy()
154 target_docket = revlog._nodemap_docket.copy()
150 (
155 (
151 src_docket,
156 src_docket,
152 data_changed_count,
157 data_changed_count,
153 data,
158 data,
154 ) = revlog.index.nodemap_data_incremental()
159 ) = revlog.index.nodemap_data_incremental()
155 new_length = target_docket.data_length + len(data)
160 new_length = target_docket.data_length + len(data)
156 new_unused = target_docket.data_unused + data_changed_count
161 new_unused = target_docket.data_unused + data_changed_count
157 if src_docket != target_docket:
162 if src_docket != target_docket:
158 data = None
163 data = None
159 elif new_length <= (new_unused * 10): # under 10% of unused data
164 elif new_length <= (new_unused * 10): # under 10% of unused data
160 data = None
165 data = None
161 else:
166 else:
162 datafile = _rawdata_filepath(revlog, target_docket)
167 datafile = _rawdata_filepath(revlog, target_docket)
163 # EXP-TODO: if this is a cache, this should use a cache vfs, not a
168 # EXP-TODO: if this is a cache, this should use a cache vfs, not a
164 # store vfs
169 # store vfs
165 tr.add(datafile, target_docket.data_length)
170 tr.add(datafile, target_docket.data_length)
166 with revlog.opener(datafile, b'r+') as fd:
171 with revlog.opener(datafile, b'r+') as fd:
167 fd.seek(target_docket.data_length)
172 fd.seek(target_docket.data_length)
168 fd.write(data)
173 fd.write(data)
169 if feed_data:
174 if feed_data:
170 if use_mmap:
175 if use_mmap:
171 fd.seek(0)
176 fd.seek(0)
172 new_data = fd.read(new_length)
177 new_data = fd.read(new_length)
173 else:
178 else:
174 fd.flush()
179 fd.flush()
175 new_data = util.buffer(util.mmapread(fd, new_length))
180 new_data = util.buffer(util.mmapread(fd, new_length))
176 target_docket.data_length = new_length
181 target_docket.data_length = new_length
177 target_docket.data_unused = new_unused
182 target_docket.data_unused = new_unused
178
183
179 if data is None:
184 if data is None:
180 # otherwise fallback to a full new export
185 # otherwise fallback to a full new export
181 target_docket = NodeMapDocket()
186 target_docket = NodeMapDocket()
182 datafile = _rawdata_filepath(revlog, target_docket)
187 datafile = _rawdata_filepath(revlog, target_docket)
183 if util.safehasattr(revlog.index, "nodemap_data_all"):
188 if util.safehasattr(revlog.index, "nodemap_data_all"):
184 data = revlog.index.nodemap_data_all()
189 data = revlog.index.nodemap_data_all()
185 else:
190 else:
186 data = persistent_data(revlog.index)
191 data = persistent_data(revlog.index)
187 # EXP-TODO: if this is a cache, this should use a cache vfs, not a
192 # EXP-TODO: if this is a cache, this should use a cache vfs, not a
188 # store vfs
193 # store vfs
189
194
190 tryunlink = revlog.opener.tryunlink
195 tryunlink = revlog.opener.tryunlink
191
196
192 def abortck(tr):
197 def abortck(tr):
193 tryunlink(datafile)
198 tryunlink(datafile)
194
199
195 callback_id = b"delete-%s" % datafile
200 callback_id = b"delete-%s" % datafile
196
201
197 # some flavor of the transaction abort does not cleanup new file, it
202 # some flavor of the transaction abort does not cleanup new file, it
198 # simply empty them.
203 # simply empty them.
199 tr.addabort(callback_id, abortck)
204 tr.addabort(callback_id, abortck)
200 with revlog.opener(datafile, b'w+') as fd:
205 with revlog.opener(datafile, b'w+') as fd:
201 fd.write(data)
206 fd.write(data)
202 if feed_data:
207 if feed_data:
203 if use_mmap:
208 if use_mmap:
204 new_data = data
209 new_data = data
205 else:
210 else:
206 fd.flush()
211 fd.flush()
207 new_data = util.buffer(util.mmapread(fd, len(data)))
212 new_data = util.buffer(util.mmapread(fd, len(data)))
208 target_docket.data_length = len(data)
213 target_docket.data_length = len(data)
209 target_docket.tip_rev = revlog.tiprev()
214 target_docket.tip_rev = revlog.tiprev()
210 target_docket.tip_node = revlog.node(target_docket.tip_rev)
215 target_docket.tip_node = revlog.node(target_docket.tip_rev)
211 # EXP-TODO: if this is a cache, this should use a cache vfs, not a
216 # EXP-TODO: if this is a cache, this should use a cache vfs, not a
212 # store vfs
217 # store vfs
213 file_path = revlog.nodemap_file
218 file_path = revlog.nodemap_file
214 if pending:
219 if pending:
215 file_path += b'.a'
220 file_path += b'.a'
216 tr.registertmp(file_path)
221 tr.registertmp(file_path)
217 else:
222 else:
218 tr.addbackup(file_path)
223 tr.addbackup(file_path)
219
224
220 with revlog.opener(file_path, b'w', atomictemp=True) as fp:
225 with revlog.opener(file_path, b'w', atomictemp=True) as fp:
221 fp.write(target_docket.serialize())
226 fp.write(target_docket.serialize())
222 revlog._nodemap_docket = target_docket
227 revlog._nodemap_docket = target_docket
223 if feed_data:
228 if feed_data:
224 revlog.index.update_nodemap_data(target_docket, new_data)
229 revlog.index.update_nodemap_data(target_docket, new_data)
225
230
226 # search for old index file in all cases, some older process might have
231 # search for old index file in all cases, some older process might have
227 # left one behind.
232 # left one behind.
228 olds = _other_rawdata_filepath(revlog, target_docket)
233 olds = _other_rawdata_filepath(revlog, target_docket)
229 if olds:
234 if olds:
230 realvfs = getattr(revlog, '_realopener', revlog.opener)
235 realvfs = getattr(revlog, '_realopener', revlog.opener)
231
236
232 def cleanup(tr):
237 def cleanup(tr):
233 for oldfile in olds:
238 for oldfile in olds:
234 realvfs.tryunlink(oldfile)
239 realvfs.tryunlink(oldfile)
235
240
236 callback_id = b"revlog-cleanup-nodemap-%s" % revlog.nodemap_file
241 callback_id = b"revlog-cleanup-nodemap-%s" % revlog.nodemap_file
237 tr.addpostclose(callback_id, cleanup)
242 tr.addpostclose(callback_id, cleanup)
238
243
239
244
240 ### Nodemap docket file
245 ### Nodemap docket file
241 #
246 #
242 # The nodemap data are stored on disk using 2 files:
247 # The nodemap data are stored on disk using 2 files:
243 #
248 #
244 # * a raw data files containing a persistent nodemap
249 # * a raw data files containing a persistent nodemap
245 # (see `Nodemap Trie` section)
250 # (see `Nodemap Trie` section)
246 #
251 #
247 # * a small "docket" file containing medatadata
252 # * a small "docket" file containing medatadata
248 #
253 #
249 # While the nodemap data can be multiple tens of megabytes, the "docket" is
254 # While the nodemap data can be multiple tens of megabytes, the "docket" is
250 # small, it is easy to update it automatically or to duplicated its content
255 # small, it is easy to update it automatically or to duplicated its content
251 # during a transaction.
256 # during a transaction.
252 #
257 #
253 # Multiple raw data can exist at the same time (The currently valid one and a
258 # Multiple raw data can exist at the same time (The currently valid one and a
254 # new one beind used by an in progress transaction). To accomodate this, the
259 # new one beind used by an in progress transaction). To accomodate this, the
255 # filename hosting the raw data has a variable parts. The exact filename is
260 # filename hosting the raw data has a variable parts. The exact filename is
256 # specified inside the "docket" file.
261 # specified inside the "docket" file.
257 #
262 #
258 # The docket file contains information to find, qualify and validate the raw
263 # The docket file contains information to find, qualify and validate the raw
259 # data. Its content is currently very light, but it will expand as the on disk
264 # data. Its content is currently very light, but it will expand as the on disk
260 # nodemap gains the necessary features to be used in production.
265 # nodemap gains the necessary features to be used in production.
261
266
262 ONDISK_VERSION = 1
267 ONDISK_VERSION = 1
263 S_VERSION = struct.Struct(">B")
268 S_VERSION = struct.Struct(">B")
264 S_HEADER = struct.Struct(">BQQQQ")
269 S_HEADER = struct.Struct(">BQQQQ")
265
270
266 ID_SIZE = 8
271 ID_SIZE = 8
267
272
268
273
269 def _make_uid():
274 def _make_uid():
270 """return a new unique identifier.
275 """return a new unique identifier.
271
276
272 The identifier is random and composed of ascii characters."""
277 The identifier is random and composed of ascii characters."""
273 return hex(os.urandom(ID_SIZE))
278 return hex(os.urandom(ID_SIZE))
274
279
275
280
276 class NodeMapDocket(object):
281 class NodeMapDocket(object):
277 """metadata associated with persistent nodemap data
282 """metadata associated with persistent nodemap data
278
283
279 The persistent data may come from disk or be on their way to disk.
284 The persistent data may come from disk or be on their way to disk.
280 """
285 """
281
286
282 def __init__(self, uid=None):
287 def __init__(self, uid=None):
283 if uid is None:
288 if uid is None:
284 uid = _make_uid()
289 uid = _make_uid()
285 # a unique identifier for the data file:
290 # a unique identifier for the data file:
286 # - When new data are appended, it is preserved.
291 # - When new data are appended, it is preserved.
287 # - When a new data file is created, a new identifier is generated.
292 # - When a new data file is created, a new identifier is generated.
288 self.uid = uid
293 self.uid = uid
289 # the tipmost revision stored in the data file. This revision and all
294 # the tipmost revision stored in the data file. This revision and all
290 # revision before it are expected to be encoded in the data file.
295 # revision before it are expected to be encoded in the data file.
291 self.tip_rev = None
296 self.tip_rev = None
292 # the node of that tipmost revision, if it mismatch the current index
297 # the node of that tipmost revision, if it mismatch the current index
293 # data the docket is not valid for the current index and should be
298 # data the docket is not valid for the current index and should be
294 # discarded.
299 # discarded.
295 #
300 #
296 # note: this method is not perfect as some destructive operation could
301 # note: this method is not perfect as some destructive operation could
297 # preserve the same tip_rev + tip_node while altering lower revision.
302 # preserve the same tip_rev + tip_node while altering lower revision.
298 # However this multiple other caches have the same vulnerability (eg:
303 # However this multiple other caches have the same vulnerability (eg:
299 # brancmap cache).
304 # brancmap cache).
300 self.tip_node = None
305 self.tip_node = None
301 # the size (in bytes) of the persisted data to encode the nodemap valid
306 # the size (in bytes) of the persisted data to encode the nodemap valid
302 # for `tip_rev`.
307 # for `tip_rev`.
303 # - data file shorter than this are corrupted,
308 # - data file shorter than this are corrupted,
304 # - any extra data should be ignored.
309 # - any extra data should be ignored.
305 self.data_length = None
310 self.data_length = None
306 # the amount (in bytes) of "dead" data, still in the data file but no
311 # the amount (in bytes) of "dead" data, still in the data file but no
307 # longer used for the nodemap.
312 # longer used for the nodemap.
308 self.data_unused = 0
313 self.data_unused = 0
309
314
310 def copy(self):
315 def copy(self):
311 new = NodeMapDocket(uid=self.uid)
316 new = NodeMapDocket(uid=self.uid)
312 new.tip_rev = self.tip_rev
317 new.tip_rev = self.tip_rev
313 new.tip_node = self.tip_node
318 new.tip_node = self.tip_node
314 new.data_length = self.data_length
319 new.data_length = self.data_length
315 new.data_unused = self.data_unused
320 new.data_unused = self.data_unused
316 return new
321 return new
317
322
318 def __cmp__(self, other):
323 def __cmp__(self, other):
319 if self.uid < other.uid:
324 if self.uid < other.uid:
320 return -1
325 return -1
321 if self.uid > other.uid:
326 if self.uid > other.uid:
322 return 1
327 return 1
323 elif self.data_length < other.data_length:
328 elif self.data_length < other.data_length:
324 return -1
329 return -1
325 elif self.data_length > other.data_length:
330 elif self.data_length > other.data_length:
326 return 1
331 return 1
327 return 0
332 return 0
328
333
329 def __eq__(self, other):
334 def __eq__(self, other):
330 return self.uid == other.uid and self.data_length == other.data_length
335 return self.uid == other.uid and self.data_length == other.data_length
331
336
332 def serialize(self):
337 def serialize(self):
333 """return serialized bytes for a docket using the passed uid"""
338 """return serialized bytes for a docket using the passed uid"""
334 data = []
339 data = []
335 data.append(S_VERSION.pack(ONDISK_VERSION))
340 data.append(S_VERSION.pack(ONDISK_VERSION))
336 headers = (
341 headers = (
337 len(self.uid),
342 len(self.uid),
338 self.tip_rev,
343 self.tip_rev,
339 self.data_length,
344 self.data_length,
340 self.data_unused,
345 self.data_unused,
341 len(self.tip_node),
346 len(self.tip_node),
342 )
347 )
343 data.append(S_HEADER.pack(*headers))
348 data.append(S_HEADER.pack(*headers))
344 data.append(self.uid)
349 data.append(self.uid)
345 data.append(self.tip_node)
350 data.append(self.tip_node)
346 return b''.join(data)
351 return b''.join(data)
347
352
348
353
349 def _rawdata_filepath(revlog, docket):
354 def _rawdata_filepath(revlog, docket):
350 """The (vfs relative) nodemap's rawdata file for a given uid"""
355 """The (vfs relative) nodemap's rawdata file for a given uid"""
351 if revlog.nodemap_file.endswith(b'.n.a'):
356 if revlog.nodemap_file.endswith(b'.n.a'):
352 prefix = revlog.nodemap_file[:-4]
357 prefix = revlog.nodemap_file[:-4]
353 else:
358 else:
354 prefix = revlog.nodemap_file[:-2]
359 prefix = revlog.nodemap_file[:-2]
355 return b"%s-%s.nd" % (prefix, docket.uid)
360 return b"%s-%s.nd" % (prefix, docket.uid)
356
361
357
362
358 def _other_rawdata_filepath(revlog, docket):
363 def _other_rawdata_filepath(revlog, docket):
359 prefix = revlog.nodemap_file[:-2]
364 prefix = revlog.nodemap_file[:-2]
360 pattern = re.compile(br"(^|/)%s-[0-9a-f]+\.nd$" % prefix)
365 pattern = re.compile(br"(^|/)%s-[0-9a-f]+\.nd$" % prefix)
361 new_file_path = _rawdata_filepath(revlog, docket)
366 new_file_path = _rawdata_filepath(revlog, docket)
362 new_file_name = revlog.opener.basename(new_file_path)
367 new_file_name = revlog.opener.basename(new_file_path)
363 dirpath = revlog.opener.dirname(new_file_path)
368 dirpath = revlog.opener.dirname(new_file_path)
364 others = []
369 others = []
365 for f in revlog.opener.listdir(dirpath):
370 for f in revlog.opener.listdir(dirpath):
366 if pattern.match(f) and f != new_file_name:
371 if pattern.match(f) and f != new_file_name:
367 others.append(f)
372 others.append(f)
368 return others
373 return others
369
374
370
375
371 ### Nodemap Trie
376 ### Nodemap Trie
372 #
377 #
373 # This is a simple reference implementation to compute and persist a nodemap
378 # This is a simple reference implementation to compute and persist a nodemap
374 # trie. This reference implementation is write only. The python version of this
379 # trie. This reference implementation is write only. The python version of this
375 # is not expected to be actually used, since it wont provide performance
380 # is not expected to be actually used, since it wont provide performance
376 # improvement over existing non-persistent C implementation.
381 # improvement over existing non-persistent C implementation.
377 #
382 #
378 # The nodemap is persisted as Trie using 4bits-address/16-entries block. each
383 # The nodemap is persisted as Trie using 4bits-address/16-entries block. each
379 # revision can be adressed using its node shortest prefix.
384 # revision can be adressed using its node shortest prefix.
380 #
385 #
381 # The trie is stored as a sequence of block. Each block contains 16 entries
386 # The trie is stored as a sequence of block. Each block contains 16 entries
382 # (signed 64bit integer, big endian). Each entry can be one of the following:
387 # (signed 64bit integer, big endian). Each entry can be one of the following:
383 #
388 #
384 # * value >= 0 -> index of sub-block
389 # * value >= 0 -> index of sub-block
385 # * value == -1 -> no value
390 # * value == -1 -> no value
386 # * value < -1 -> encoded revision: rev = -(value+2)
391 # * value < -1 -> encoded revision: rev = -(value+2)
387 #
392 #
388 # See REV_OFFSET and _transform_rev below.
393 # See REV_OFFSET and _transform_rev below.
389 #
394 #
390 # The implementation focus on simplicity, not on performance. A Rust
395 # The implementation focus on simplicity, not on performance. A Rust
391 # implementation should provide a efficient version of the same binary
396 # implementation should provide a efficient version of the same binary
392 # persistence. This reference python implementation is never meant to be
397 # persistence. This reference python implementation is never meant to be
393 # extensively use in production.
398 # extensively use in production.
394
399
395
400
396 def persistent_data(index):
401 def persistent_data(index):
397 """return the persistent binary form for a nodemap for a given index"""
402 """return the persistent binary form for a nodemap for a given index"""
398 trie = _build_trie(index)
403 trie = _build_trie(index)
399 return _persist_trie(trie)
404 return _persist_trie(trie)
400
405
401
406
402 def update_persistent_data(index, root, max_idx, last_rev):
407 def update_persistent_data(index, root, max_idx, last_rev):
403 """return the incremental update for persistent nodemap from a given index"""
408 """return the incremental update for persistent nodemap from a given index"""
404 changed_block, trie = _update_trie(index, root, last_rev)
409 changed_block, trie = _update_trie(index, root, last_rev)
405 return (
410 return (
406 changed_block * S_BLOCK.size,
411 changed_block * S_BLOCK.size,
407 _persist_trie(trie, existing_idx=max_idx),
412 _persist_trie(trie, existing_idx=max_idx),
408 )
413 )
409
414
410
415
411 S_BLOCK = struct.Struct(">" + ("l" * 16))
416 S_BLOCK = struct.Struct(">" + ("l" * 16))
412
417
413 NO_ENTRY = -1
418 NO_ENTRY = -1
414 # rev 0 need to be -2 because 0 is used by block, -1 is a special value.
419 # rev 0 need to be -2 because 0 is used by block, -1 is a special value.
415 REV_OFFSET = 2
420 REV_OFFSET = 2
416
421
417
422
418 def _transform_rev(rev):
423 def _transform_rev(rev):
419 """Return the number used to represent the rev in the tree.
424 """Return the number used to represent the rev in the tree.
420
425
421 (or retrieve a rev number from such representation)
426 (or retrieve a rev number from such representation)
422
427
423 Note that this is an involution, a function equal to its inverse (i.e.
428 Note that this is an involution, a function equal to its inverse (i.e.
424 which gives the identity when applied to itself).
429 which gives the identity when applied to itself).
425 """
430 """
426 return -(rev + REV_OFFSET)
431 return -(rev + REV_OFFSET)
427
432
428
433
429 def _to_int(hex_digit):
434 def _to_int(hex_digit):
430 """turn an hexadecimal digit into a proper integer"""
435 """turn an hexadecimal digit into a proper integer"""
431 return int(hex_digit, 16)
436 return int(hex_digit, 16)
432
437
433
438
434 class Block(dict):
439 class Block(dict):
435 """represent a block of the Trie
440 """represent a block of the Trie
436
441
437 contains up to 16 entry indexed from 0 to 15"""
442 contains up to 16 entry indexed from 0 to 15"""
438
443
439 def __init__(self):
444 def __init__(self):
440 super(Block, self).__init__()
445 super(Block, self).__init__()
441 # If this block exist on disk, here is its ID
446 # If this block exist on disk, here is its ID
442 self.ondisk_id = None
447 self.ondisk_id = None
443
448
444 def __iter__(self):
449 def __iter__(self):
445 return iter(self.get(i) for i in range(16))
450 return iter(self.get(i) for i in range(16))
446
451
447
452
448 def _build_trie(index):
453 def _build_trie(index):
449 """build a nodemap trie
454 """build a nodemap trie
450
455
451 The nodemap stores revision number for each unique prefix.
456 The nodemap stores revision number for each unique prefix.
452
457
453 Each block is a dictionary with keys in `[0, 15]`. Values are either
458 Each block is a dictionary with keys in `[0, 15]`. Values are either
454 another block or a revision number.
459 another block or a revision number.
455 """
460 """
456 root = Block()
461 root = Block()
457 for rev in range(len(index)):
462 for rev in range(len(index)):
458 current_hex = hex(index[rev][7])
463 current_hex = hex(index[rev][7])
459 _insert_into_block(index, 0, root, rev, current_hex)
464 _insert_into_block(index, 0, root, rev, current_hex)
460 return root
465 return root
461
466
462
467
463 def _update_trie(index, root, last_rev):
468 def _update_trie(index, root, last_rev):
464 """consume"""
469 """consume"""
465 changed = 0
470 changed = 0
466 for rev in range(last_rev + 1, len(index)):
471 for rev in range(last_rev + 1, len(index)):
467 current_hex = hex(index[rev][7])
472 current_hex = hex(index[rev][7])
468 changed += _insert_into_block(index, 0, root, rev, current_hex)
473 changed += _insert_into_block(index, 0, root, rev, current_hex)
469 return changed, root
474 return changed, root
470
475
471
476
472 def _insert_into_block(index, level, block, current_rev, current_hex):
477 def _insert_into_block(index, level, block, current_rev, current_hex):
473 """insert a new revision in a block
478 """insert a new revision in a block
474
479
475 index: the index we are adding revision for
480 index: the index we are adding revision for
476 level: the depth of the current block in the trie
481 level: the depth of the current block in the trie
477 block: the block currently being considered
482 block: the block currently being considered
478 current_rev: the revision number we are adding
483 current_rev: the revision number we are adding
479 current_hex: the hexadecimal representation of the of that revision
484 current_hex: the hexadecimal representation of the of that revision
480 """
485 """
481 changed = 1
486 changed = 1
482 if block.ondisk_id is not None:
487 if block.ondisk_id is not None:
483 block.ondisk_id = None
488 block.ondisk_id = None
484 hex_digit = _to_int(current_hex[level : level + 1])
489 hex_digit = _to_int(current_hex[level : level + 1])
485 entry = block.get(hex_digit)
490 entry = block.get(hex_digit)
486 if entry is None:
491 if entry is None:
487 # no entry, simply store the revision number
492 # no entry, simply store the revision number
488 block[hex_digit] = current_rev
493 block[hex_digit] = current_rev
489 elif isinstance(entry, dict):
494 elif isinstance(entry, dict):
490 # need to recurse to an underlying block
495 # need to recurse to an underlying block
491 changed += _insert_into_block(
496 changed += _insert_into_block(
492 index, level + 1, entry, current_rev, current_hex
497 index, level + 1, entry, current_rev, current_hex
493 )
498 )
494 else:
499 else:
495 # collision with a previously unique prefix, inserting new
500 # collision with a previously unique prefix, inserting new
496 # vertices to fit both entry.
501 # vertices to fit both entry.
497 other_hex = hex(index[entry][7])
502 other_hex = hex(index[entry][7])
498 other_rev = entry
503 other_rev = entry
499 new = Block()
504 new = Block()
500 block[hex_digit] = new
505 block[hex_digit] = new
501 _insert_into_block(index, level + 1, new, other_rev, other_hex)
506 _insert_into_block(index, level + 1, new, other_rev, other_hex)
502 _insert_into_block(index, level + 1, new, current_rev, current_hex)
507 _insert_into_block(index, level + 1, new, current_rev, current_hex)
503 return changed
508 return changed
504
509
505
510
506 def _persist_trie(root, existing_idx=None):
511 def _persist_trie(root, existing_idx=None):
507 """turn a nodemap trie into persistent binary data
512 """turn a nodemap trie into persistent binary data
508
513
509 See `_build_trie` for nodemap trie structure"""
514 See `_build_trie` for nodemap trie structure"""
510 block_map = {}
515 block_map = {}
511 if existing_idx is not None:
516 if existing_idx is not None:
512 base_idx = existing_idx + 1
517 base_idx = existing_idx + 1
513 else:
518 else:
514 base_idx = 0
519 base_idx = 0
515 chunks = []
520 chunks = []
516 for tn in _walk_trie(root):
521 for tn in _walk_trie(root):
517 if tn.ondisk_id is not None:
522 if tn.ondisk_id is not None:
518 block_map[id(tn)] = tn.ondisk_id
523 block_map[id(tn)] = tn.ondisk_id
519 else:
524 else:
520 block_map[id(tn)] = len(chunks) + base_idx
525 block_map[id(tn)] = len(chunks) + base_idx
521 chunks.append(_persist_block(tn, block_map))
526 chunks.append(_persist_block(tn, block_map))
522 return b''.join(chunks)
527 return b''.join(chunks)
523
528
524
529
525 def _walk_trie(block):
530 def _walk_trie(block):
526 """yield all the block in a trie
531 """yield all the block in a trie
527
532
528 Children blocks are always yield before their parent block.
533 Children blocks are always yield before their parent block.
529 """
534 """
530 for (__, item) in sorted(block.items()):
535 for (__, item) in sorted(block.items()):
531 if isinstance(item, dict):
536 if isinstance(item, dict):
532 for sub_block in _walk_trie(item):
537 for sub_block in _walk_trie(item):
533 yield sub_block
538 yield sub_block
534 yield block
539 yield block
535
540
536
541
537 def _persist_block(block_node, block_map):
542 def _persist_block(block_node, block_map):
538 """produce persistent binary data for a single block
543 """produce persistent binary data for a single block
539
544
540 Children block are assumed to be already persisted and present in
545 Children block are assumed to be already persisted and present in
541 block_map.
546 block_map.
542 """
547 """
543 data = tuple(_to_value(v, block_map) for v in block_node)
548 data = tuple(_to_value(v, block_map) for v in block_node)
544 return S_BLOCK.pack(*data)
549 return S_BLOCK.pack(*data)
545
550
546
551
547 def _to_value(item, block_map):
552 def _to_value(item, block_map):
548 """persist any value as an integer"""
553 """persist any value as an integer"""
549 if item is None:
554 if item is None:
550 return NO_ENTRY
555 return NO_ENTRY
551 elif isinstance(item, dict):
556 elif isinstance(item, dict):
552 return block_map[id(item)]
557 return block_map[id(item)]
553 else:
558 else:
554 return _transform_rev(item)
559 return _transform_rev(item)
555
560
556
561
557 def parse_data(data):
562 def parse_data(data):
558 """parse parse nodemap data into a nodemap Trie"""
563 """parse parse nodemap data into a nodemap Trie"""
559 if (len(data) % S_BLOCK.size) != 0:
564 if (len(data) % S_BLOCK.size) != 0:
560 msg = "nodemap data size is not a multiple of block size (%d): %d"
565 msg = "nodemap data size is not a multiple of block size (%d): %d"
561 raise error.Abort(msg % (S_BLOCK.size, len(data)))
566 raise error.Abort(msg % (S_BLOCK.size, len(data)))
562 if not data:
567 if not data:
563 return Block(), None
568 return Block(), None
564 block_map = {}
569 block_map = {}
565 new_blocks = []
570 new_blocks = []
566 for i in range(0, len(data), S_BLOCK.size):
571 for i in range(0, len(data), S_BLOCK.size):
567 block = Block()
572 block = Block()
568 block.ondisk_id = len(block_map)
573 block.ondisk_id = len(block_map)
569 block_map[block.ondisk_id] = block
574 block_map[block.ondisk_id] = block
570 block_data = data[i : i + S_BLOCK.size]
575 block_data = data[i : i + S_BLOCK.size]
571 values = S_BLOCK.unpack(block_data)
576 values = S_BLOCK.unpack(block_data)
572 new_blocks.append((block, values))
577 new_blocks.append((block, values))
573 for b, values in new_blocks:
578 for b, values in new_blocks:
574 for idx, v in enumerate(values):
579 for idx, v in enumerate(values):
575 if v == NO_ENTRY:
580 if v == NO_ENTRY:
576 continue
581 continue
577 elif v >= 0:
582 elif v >= 0:
578 b[idx] = block_map[v]
583 b[idx] = block_map[v]
579 else:
584 else:
580 b[idx] = _transform_rev(v)
585 b[idx] = _transform_rev(v)
581 return block, i // S_BLOCK.size
586 return block, i // S_BLOCK.size
582
587
583
588
584 # debug utility
589 # debug utility
585
590
586
591
587 def check_data(ui, index, data):
592 def check_data(ui, index, data):
588 """verify that the provided nodemap data are valid for the given idex"""
593 """verify that the provided nodemap data are valid for the given idex"""
589 ret = 0
594 ret = 0
590 ui.status((b"revision in index: %d\n") % len(index))
595 ui.status((b"revision in index: %d\n") % len(index))
591 root, __ = parse_data(data)
596 root, __ = parse_data(data)
592 all_revs = set(_all_revisions(root))
597 all_revs = set(_all_revisions(root))
593 ui.status((b"revision in nodemap: %d\n") % len(all_revs))
598 ui.status((b"revision in nodemap: %d\n") % len(all_revs))
594 for r in range(len(index)):
599 for r in range(len(index)):
595 if r not in all_revs:
600 if r not in all_revs:
596 msg = b" revision missing from nodemap: %d\n" % r
601 msg = b" revision missing from nodemap: %d\n" % r
597 ui.write_err(msg)
602 ui.write_err(msg)
598 ret = 1
603 ret = 1
599 else:
604 else:
600 all_revs.remove(r)
605 all_revs.remove(r)
601 nm_rev = _find_node(root, hex(index[r][7]))
606 nm_rev = _find_node(root, hex(index[r][7]))
602 if nm_rev is None:
607 if nm_rev is None:
603 msg = b" revision node does not match any entries: %d\n" % r
608 msg = b" revision node does not match any entries: %d\n" % r
604 ui.write_err(msg)
609 ui.write_err(msg)
605 ret = 1
610 ret = 1
606 elif nm_rev != r:
611 elif nm_rev != r:
607 msg = (
612 msg = (
608 b" revision node does not match the expected revision: "
613 b" revision node does not match the expected revision: "
609 b"%d != %d\n" % (r, nm_rev)
614 b"%d != %d\n" % (r, nm_rev)
610 )
615 )
611 ui.write_err(msg)
616 ui.write_err(msg)
612 ret = 1
617 ret = 1
613
618
614 if all_revs:
619 if all_revs:
615 for r in sorted(all_revs):
620 for r in sorted(all_revs):
616 msg = b" extra revision in nodemap: %d\n" % r
621 msg = b" extra revision in nodemap: %d\n" % r
617 ui.write_err(msg)
622 ui.write_err(msg)
618 ret = 1
623 ret = 1
619 return ret
624 return ret
620
625
621
626
622 def _all_revisions(root):
627 def _all_revisions(root):
623 """return all revisions stored in a Trie"""
628 """return all revisions stored in a Trie"""
624 for block in _walk_trie(root):
629 for block in _walk_trie(root):
625 for v in block:
630 for v in block:
626 if v is None or isinstance(v, Block):
631 if v is None or isinstance(v, Block):
627 continue
632 continue
628 yield v
633 yield v
629
634
630
635
631 def _find_node(block, node):
636 def _find_node(block, node):
632 """find the revision associated with a given node"""
637 """find the revision associated with a given node"""
633 entry = block.get(_to_int(node[0:1]))
638 entry = block.get(_to_int(node[0:1]))
634 if isinstance(entry, dict):
639 if isinstance(entry, dict):
635 return _find_node(entry, node[1:])
640 return _find_node(entry, node[1:])
636 return entry
641 return entry
637
642
638
643
639 def get_nodemap_file(opener, indexfile):
644 def get_nodemap_file(opener, indexfile):
640 if indexfile.endswith(b'.a'):
645 if indexfile.endswith(b'.a'):
641 pending_path = indexfile[:-4] + b".n.a"
646 pending_path = indexfile[:-4] + b".n.a"
642 if opener.exists(pending_path):
647 if opener.exists(pending_path):
643 return pending_path
648 return pending_path
644 else:
649 else:
645 return indexfile[:-4] + b".n"
650 return indexfile[:-4] + b".n"
646 else:
651 else:
647 return indexfile[:-2] + b".n"
652 return indexfile[:-2] + b".n"
@@ -1,539 +1,556 b''
1 # upgrade.py - functions for in place upgrade of Mercurial repository
1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 #
2 #
3 # Copyright (c) 2016-present, Gregory Szorc
3 # Copyright (c) 2016-present, Gregory Szorc
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import stat
10 import stat
11
11
12 from ..i18n import _
12 from ..i18n import _
13 from ..pycompat import getattr
13 from ..pycompat import getattr
14 from .. import (
14 from .. import (
15 changelog,
15 changelog,
16 error,
16 error,
17 filelog,
17 filelog,
18 manifest,
18 manifest,
19 metadata,
19 metadata,
20 pycompat,
20 pycompat,
21 requirements,
21 requirements,
22 revlog,
22 revlog,
23 scmutil,
23 scmutil,
24 util,
24 util,
25 vfs as vfsmod,
25 vfs as vfsmod,
26 )
26 )
27 from ..revlogutils import nodemap
27
28
28
29
29 def _revlogfrompath(repo, path):
30 def _revlogfrompath(repo, path):
30 """Obtain a revlog from a repo path.
31 """Obtain a revlog from a repo path.
31
32
32 An instance of the appropriate class is returned.
33 An instance of the appropriate class is returned.
33 """
34 """
34 if path == b'00changelog.i':
35 if path == b'00changelog.i':
35 return changelog.changelog(repo.svfs)
36 return changelog.changelog(repo.svfs)
36 elif path.endswith(b'00manifest.i'):
37 elif path.endswith(b'00manifest.i'):
37 mandir = path[: -len(b'00manifest.i')]
38 mandir = path[: -len(b'00manifest.i')]
38 return manifest.manifestrevlog(repo.svfs, tree=mandir)
39 return manifest.manifestrevlog(repo.svfs, tree=mandir)
39 else:
40 else:
40 # reverse of "/".join(("data", path + ".i"))
41 # reverse of "/".join(("data", path + ".i"))
41 return filelog.filelog(repo.svfs, path[5:-2])
42 return filelog.filelog(repo.svfs, path[5:-2])
42
43
43
44
44 def _copyrevlog(tr, destrepo, oldrl, unencodedname):
45 def _copyrevlog(tr, destrepo, oldrl, unencodedname):
45 """copy all relevant files for `oldrl` into `destrepo` store
46 """copy all relevant files for `oldrl` into `destrepo` store
46
47
47 Files are copied "as is" without any transformation. The copy is performed
48 Files are copied "as is" without any transformation. The copy is performed
48 without extra checks. Callers are responsible for making sure the copied
49 without extra checks. Callers are responsible for making sure the copied
49 content is compatible with format of the destination repository.
50 content is compatible with format of the destination repository.
50 """
51 """
51 oldrl = getattr(oldrl, '_revlog', oldrl)
52 oldrl = getattr(oldrl, '_revlog', oldrl)
52 newrl = _revlogfrompath(destrepo, unencodedname)
53 newrl = _revlogfrompath(destrepo, unencodedname)
53 newrl = getattr(newrl, '_revlog', newrl)
54 newrl = getattr(newrl, '_revlog', newrl)
54
55
55 oldvfs = oldrl.opener
56 oldvfs = oldrl.opener
56 newvfs = newrl.opener
57 newvfs = newrl.opener
57 oldindex = oldvfs.join(oldrl.indexfile)
58 oldindex = oldvfs.join(oldrl.indexfile)
58 newindex = newvfs.join(newrl.indexfile)
59 newindex = newvfs.join(newrl.indexfile)
59 olddata = oldvfs.join(oldrl.datafile)
60 olddata = oldvfs.join(oldrl.datafile)
60 newdata = newvfs.join(newrl.datafile)
61 newdata = newvfs.join(newrl.datafile)
61
62
62 with newvfs(newrl.indexfile, b'w'):
63 with newvfs(newrl.indexfile, b'w'):
63 pass # create all the directories
64 pass # create all the directories
64
65
65 util.copyfile(oldindex, newindex)
66 util.copyfile(oldindex, newindex)
66 copydata = oldrl.opener.exists(oldrl.datafile)
67 copydata = oldrl.opener.exists(oldrl.datafile)
67 if copydata:
68 if copydata:
68 util.copyfile(olddata, newdata)
69 util.copyfile(olddata, newdata)
69
70
70 if not (
71 if not (
71 unencodedname.endswith(b'00changelog.i')
72 unencodedname.endswith(b'00changelog.i')
72 or unencodedname.endswith(b'00manifest.i')
73 or unencodedname.endswith(b'00manifest.i')
73 ):
74 ):
74 destrepo.svfs.fncache.add(unencodedname)
75 destrepo.svfs.fncache.add(unencodedname)
75 if copydata:
76 if copydata:
76 destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d')
77 destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d')
77
78
78
79
79 UPGRADE_CHANGELOG = b"changelog"
80 UPGRADE_CHANGELOG = b"changelog"
80 UPGRADE_MANIFEST = b"manifest"
81 UPGRADE_MANIFEST = b"manifest"
81 UPGRADE_FILELOGS = b"all-filelogs"
82 UPGRADE_FILELOGS = b"all-filelogs"
82
83
83 UPGRADE_ALL_REVLOGS = frozenset(
84 UPGRADE_ALL_REVLOGS = frozenset(
84 [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOGS]
85 [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOGS]
85 )
86 )
86
87
87
88
88 def getsidedatacompanion(srcrepo, dstrepo):
89 def getsidedatacompanion(srcrepo, dstrepo):
89 sidedatacompanion = None
90 sidedatacompanion = None
90 removedreqs = srcrepo.requirements - dstrepo.requirements
91 removedreqs = srcrepo.requirements - dstrepo.requirements
91 addedreqs = dstrepo.requirements - srcrepo.requirements
92 addedreqs = dstrepo.requirements - srcrepo.requirements
92 if requirements.SIDEDATA_REQUIREMENT in removedreqs:
93 if requirements.SIDEDATA_REQUIREMENT in removedreqs:
93
94
94 def sidedatacompanion(rl, rev):
95 def sidedatacompanion(rl, rev):
95 rl = getattr(rl, '_revlog', rl)
96 rl = getattr(rl, '_revlog', rl)
96 if rl.flags(rev) & revlog.REVIDX_SIDEDATA:
97 if rl.flags(rev) & revlog.REVIDX_SIDEDATA:
97 return True, (), {}, 0, 0
98 return True, (), {}, 0, 0
98 return False, (), {}, 0, 0
99 return False, (), {}, 0, 0
99
100
100 elif requirements.COPIESSDC_REQUIREMENT in addedreqs:
101 elif requirements.COPIESSDC_REQUIREMENT in addedreqs:
101 sidedatacompanion = metadata.getsidedataadder(srcrepo, dstrepo)
102 sidedatacompanion = metadata.getsidedataadder(srcrepo, dstrepo)
102 elif requirements.COPIESSDC_REQUIREMENT in removedreqs:
103 elif requirements.COPIESSDC_REQUIREMENT in removedreqs:
103 sidedatacompanion = metadata.getsidedataremover(srcrepo, dstrepo)
104 sidedatacompanion = metadata.getsidedataremover(srcrepo, dstrepo)
104 return sidedatacompanion
105 return sidedatacompanion
105
106
106
107
107 def matchrevlog(revlogfilter, entry):
108 def matchrevlog(revlogfilter, entry):
108 """check if a revlog is selected for cloning.
109 """check if a revlog is selected for cloning.
109
110
110 In other words, are there any updates which need to be done on revlog
111 In other words, are there any updates which need to be done on revlog
111 or it can be blindly copied.
112 or it can be blindly copied.
112
113
113 The store entry is checked against the passed filter"""
114 The store entry is checked against the passed filter"""
114 if entry.endswith(b'00changelog.i'):
115 if entry.endswith(b'00changelog.i'):
115 return UPGRADE_CHANGELOG in revlogfilter
116 return UPGRADE_CHANGELOG in revlogfilter
116 elif entry.endswith(b'00manifest.i'):
117 elif entry.endswith(b'00manifest.i'):
117 return UPGRADE_MANIFEST in revlogfilter
118 return UPGRADE_MANIFEST in revlogfilter
118 return UPGRADE_FILELOGS in revlogfilter
119 return UPGRADE_FILELOGS in revlogfilter
119
120
120
121
121 def _perform_clone(
122 def _perform_clone(
122 ui,
123 ui,
123 dstrepo,
124 dstrepo,
124 tr,
125 tr,
125 old_revlog,
126 old_revlog,
126 unencoded,
127 unencoded,
127 upgrade_op,
128 upgrade_op,
128 sidedatacompanion,
129 sidedatacompanion,
129 oncopiedrevision,
130 oncopiedrevision,
130 ):
131 ):
131 """ returns the new revlog object created"""
132 """ returns the new revlog object created"""
132 newrl = None
133 newrl = None
133 if matchrevlog(upgrade_op.revlogs_to_process, unencoded):
134 if matchrevlog(upgrade_op.revlogs_to_process, unencoded):
134 ui.note(
135 ui.note(
135 _(b'cloning %d revisions from %s\n') % (len(old_revlog), unencoded)
136 _(b'cloning %d revisions from %s\n') % (len(old_revlog), unencoded)
136 )
137 )
137 newrl = _revlogfrompath(dstrepo, unencoded)
138 newrl = _revlogfrompath(dstrepo, unencoded)
138 old_revlog.clone(
139 old_revlog.clone(
139 tr,
140 tr,
140 newrl,
141 newrl,
141 addrevisioncb=oncopiedrevision,
142 addrevisioncb=oncopiedrevision,
142 deltareuse=upgrade_op.delta_reuse_mode,
143 deltareuse=upgrade_op.delta_reuse_mode,
143 forcedeltabothparents=upgrade_op.force_re_delta_both_parents,
144 forcedeltabothparents=upgrade_op.force_re_delta_both_parents,
144 sidedatacompanion=sidedatacompanion,
145 sidedatacompanion=sidedatacompanion,
145 )
146 )
146 else:
147 else:
147 msg = _(b'blindly copying %s containing %i revisions\n')
148 msg = _(b'blindly copying %s containing %i revisions\n')
148 ui.note(msg % (unencoded, len(old_revlog)))
149 ui.note(msg % (unencoded, len(old_revlog)))
149 _copyrevlog(tr, dstrepo, old_revlog, unencoded)
150 _copyrevlog(tr, dstrepo, old_revlog, unencoded)
150
151
151 newrl = _revlogfrompath(dstrepo, unencoded)
152 newrl = _revlogfrompath(dstrepo, unencoded)
152 return newrl
153 return newrl
153
154
154
155
155 def _clonerevlogs(
156 def _clonerevlogs(
156 ui,
157 ui,
157 srcrepo,
158 srcrepo,
158 dstrepo,
159 dstrepo,
159 tr,
160 tr,
160 upgrade_op,
161 upgrade_op,
161 ):
162 ):
162 """Copy revlogs between 2 repos."""
163 """Copy revlogs between 2 repos."""
163 revcount = 0
164 revcount = 0
164 srcsize = 0
165 srcsize = 0
165 srcrawsize = 0
166 srcrawsize = 0
166 dstsize = 0
167 dstsize = 0
167 fcount = 0
168 fcount = 0
168 frevcount = 0
169 frevcount = 0
169 fsrcsize = 0
170 fsrcsize = 0
170 frawsize = 0
171 frawsize = 0
171 fdstsize = 0
172 fdstsize = 0
172 mcount = 0
173 mcount = 0
173 mrevcount = 0
174 mrevcount = 0
174 msrcsize = 0
175 msrcsize = 0
175 mrawsize = 0
176 mrawsize = 0
176 mdstsize = 0
177 mdstsize = 0
177 crevcount = 0
178 crevcount = 0
178 csrcsize = 0
179 csrcsize = 0
179 crawsize = 0
180 crawsize = 0
180 cdstsize = 0
181 cdstsize = 0
181
182
182 alldatafiles = list(srcrepo.store.walk())
183 alldatafiles = list(srcrepo.store.walk())
183 # mapping of data files which needs to be cloned
184 # mapping of data files which needs to be cloned
184 # key is unencoded filename
185 # key is unencoded filename
185 # value is revlog_object_from_srcrepo
186 # value is revlog_object_from_srcrepo
186 manifests = {}
187 manifests = {}
187 changelogs = {}
188 changelogs = {}
188 filelogs = {}
189 filelogs = {}
189
190
190 # Perform a pass to collect metadata. This validates we can open all
191 # Perform a pass to collect metadata. This validates we can open all
191 # source files and allows a unified progress bar to be displayed.
192 # source files and allows a unified progress bar to be displayed.
192 for unencoded, encoded, size in alldatafiles:
193 for unencoded, encoded, size in alldatafiles:
193 if not unencoded.endswith(b'.i'):
194 if not unencoded.endswith(b'.i'):
194 continue
195 continue
195
196
196 rl = _revlogfrompath(srcrepo, unencoded)
197 rl = _revlogfrompath(srcrepo, unencoded)
197
198
198 info = rl.storageinfo(
199 info = rl.storageinfo(
199 exclusivefiles=True,
200 exclusivefiles=True,
200 revisionscount=True,
201 revisionscount=True,
201 trackedsize=True,
202 trackedsize=True,
202 storedsize=True,
203 storedsize=True,
203 )
204 )
204
205
205 revcount += info[b'revisionscount'] or 0
206 revcount += info[b'revisionscount'] or 0
206 datasize = info[b'storedsize'] or 0
207 datasize = info[b'storedsize'] or 0
207 rawsize = info[b'trackedsize'] or 0
208 rawsize = info[b'trackedsize'] or 0
208
209
209 srcsize += datasize
210 srcsize += datasize
210 srcrawsize += rawsize
211 srcrawsize += rawsize
211
212
212 # This is for the separate progress bars.
213 # This is for the separate progress bars.
213 if isinstance(rl, changelog.changelog):
214 if isinstance(rl, changelog.changelog):
214 changelogs[unencoded] = rl
215 changelogs[unencoded] = rl
215 crevcount += len(rl)
216 crevcount += len(rl)
216 csrcsize += datasize
217 csrcsize += datasize
217 crawsize += rawsize
218 crawsize += rawsize
218 elif isinstance(rl, manifest.manifestrevlog):
219 elif isinstance(rl, manifest.manifestrevlog):
219 manifests[unencoded] = rl
220 manifests[unencoded] = rl
220 mcount += 1
221 mcount += 1
221 mrevcount += len(rl)
222 mrevcount += len(rl)
222 msrcsize += datasize
223 msrcsize += datasize
223 mrawsize += rawsize
224 mrawsize += rawsize
224 elif isinstance(rl, filelog.filelog):
225 elif isinstance(rl, filelog.filelog):
225 filelogs[unencoded] = rl
226 filelogs[unencoded] = rl
226 fcount += 1
227 fcount += 1
227 frevcount += len(rl)
228 frevcount += len(rl)
228 fsrcsize += datasize
229 fsrcsize += datasize
229 frawsize += rawsize
230 frawsize += rawsize
230 else:
231 else:
231 error.ProgrammingError(b'unknown revlog type')
232 error.ProgrammingError(b'unknown revlog type')
232
233
233 if not revcount:
234 if not revcount:
234 return
235 return
235
236
236 ui.status(
237 ui.status(
237 _(
238 _(
238 b'migrating %d total revisions (%d in filelogs, %d in manifests, '
239 b'migrating %d total revisions (%d in filelogs, %d in manifests, '
239 b'%d in changelog)\n'
240 b'%d in changelog)\n'
240 )
241 )
241 % (revcount, frevcount, mrevcount, crevcount)
242 % (revcount, frevcount, mrevcount, crevcount)
242 )
243 )
243 ui.status(
244 ui.status(
244 _(b'migrating %s in store; %s tracked data\n')
245 _(b'migrating %s in store; %s tracked data\n')
245 % ((util.bytecount(srcsize), util.bytecount(srcrawsize)))
246 % ((util.bytecount(srcsize), util.bytecount(srcrawsize)))
246 )
247 )
247
248
248 # Used to keep track of progress.
249 # Used to keep track of progress.
249 progress = None
250 progress = None
250
251
251 def oncopiedrevision(rl, rev, node):
252 def oncopiedrevision(rl, rev, node):
252 progress.increment()
253 progress.increment()
253
254
254 sidedatacompanion = getsidedatacompanion(srcrepo, dstrepo)
255 sidedatacompanion = getsidedatacompanion(srcrepo, dstrepo)
255
256
256 # Migrating filelogs
257 # Migrating filelogs
257 ui.status(
258 ui.status(
258 _(
259 _(
259 b'migrating %d filelogs containing %d revisions '
260 b'migrating %d filelogs containing %d revisions '
260 b'(%s in store; %s tracked data)\n'
261 b'(%s in store; %s tracked data)\n'
261 )
262 )
262 % (
263 % (
263 fcount,
264 fcount,
264 frevcount,
265 frevcount,
265 util.bytecount(fsrcsize),
266 util.bytecount(fsrcsize),
266 util.bytecount(frawsize),
267 util.bytecount(frawsize),
267 )
268 )
268 )
269 )
269 progress = srcrepo.ui.makeprogress(_(b'file revisions'), total=frevcount)
270 progress = srcrepo.ui.makeprogress(_(b'file revisions'), total=frevcount)
270 for unencoded, oldrl in sorted(filelogs.items()):
271 for unencoded, oldrl in sorted(filelogs.items()):
271 newrl = _perform_clone(
272 newrl = _perform_clone(
272 ui,
273 ui,
273 dstrepo,
274 dstrepo,
274 tr,
275 tr,
275 oldrl,
276 oldrl,
276 unencoded,
277 unencoded,
277 upgrade_op,
278 upgrade_op,
278 sidedatacompanion,
279 sidedatacompanion,
279 oncopiedrevision,
280 oncopiedrevision,
280 )
281 )
281 info = newrl.storageinfo(storedsize=True)
282 info = newrl.storageinfo(storedsize=True)
282 fdstsize += info[b'storedsize'] or 0
283 fdstsize += info[b'storedsize'] or 0
283 ui.status(
284 ui.status(
284 _(
285 _(
285 b'finished migrating %d filelog revisions across %d '
286 b'finished migrating %d filelog revisions across %d '
286 b'filelogs; change in size: %s\n'
287 b'filelogs; change in size: %s\n'
287 )
288 )
288 % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize))
289 % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize))
289 )
290 )
290
291
291 # Migrating manifests
292 # Migrating manifests
292 ui.status(
293 ui.status(
293 _(
294 _(
294 b'migrating %d manifests containing %d revisions '
295 b'migrating %d manifests containing %d revisions '
295 b'(%s in store; %s tracked data)\n'
296 b'(%s in store; %s tracked data)\n'
296 )
297 )
297 % (
298 % (
298 mcount,
299 mcount,
299 mrevcount,
300 mrevcount,
300 util.bytecount(msrcsize),
301 util.bytecount(msrcsize),
301 util.bytecount(mrawsize),
302 util.bytecount(mrawsize),
302 )
303 )
303 )
304 )
304 if progress:
305 if progress:
305 progress.complete()
306 progress.complete()
306 progress = srcrepo.ui.makeprogress(
307 progress = srcrepo.ui.makeprogress(
307 _(b'manifest revisions'), total=mrevcount
308 _(b'manifest revisions'), total=mrevcount
308 )
309 )
309 for unencoded, oldrl in sorted(manifests.items()):
310 for unencoded, oldrl in sorted(manifests.items()):
310 newrl = _perform_clone(
311 newrl = _perform_clone(
311 ui,
312 ui,
312 dstrepo,
313 dstrepo,
313 tr,
314 tr,
314 oldrl,
315 oldrl,
315 unencoded,
316 unencoded,
316 upgrade_op,
317 upgrade_op,
317 sidedatacompanion,
318 sidedatacompanion,
318 oncopiedrevision,
319 oncopiedrevision,
319 )
320 )
320 info = newrl.storageinfo(storedsize=True)
321 info = newrl.storageinfo(storedsize=True)
321 mdstsize += info[b'storedsize'] or 0
322 mdstsize += info[b'storedsize'] or 0
322 ui.status(
323 ui.status(
323 _(
324 _(
324 b'finished migrating %d manifest revisions across %d '
325 b'finished migrating %d manifest revisions across %d '
325 b'manifests; change in size: %s\n'
326 b'manifests; change in size: %s\n'
326 )
327 )
327 % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize))
328 % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize))
328 )
329 )
329
330
330 # Migrating changelog
331 # Migrating changelog
331 ui.status(
332 ui.status(
332 _(
333 _(
333 b'migrating changelog containing %d revisions '
334 b'migrating changelog containing %d revisions '
334 b'(%s in store; %s tracked data)\n'
335 b'(%s in store; %s tracked data)\n'
335 )
336 )
336 % (
337 % (
337 crevcount,
338 crevcount,
338 util.bytecount(csrcsize),
339 util.bytecount(csrcsize),
339 util.bytecount(crawsize),
340 util.bytecount(crawsize),
340 )
341 )
341 )
342 )
342 if progress:
343 if progress:
343 progress.complete()
344 progress.complete()
344 progress = srcrepo.ui.makeprogress(
345 progress = srcrepo.ui.makeprogress(
345 _(b'changelog revisions'), total=crevcount
346 _(b'changelog revisions'), total=crevcount
346 )
347 )
347 for unencoded, oldrl in sorted(changelogs.items()):
348 for unencoded, oldrl in sorted(changelogs.items()):
348 newrl = _perform_clone(
349 newrl = _perform_clone(
349 ui,
350 ui,
350 dstrepo,
351 dstrepo,
351 tr,
352 tr,
352 oldrl,
353 oldrl,
353 unencoded,
354 unencoded,
354 upgrade_op,
355 upgrade_op,
355 sidedatacompanion,
356 sidedatacompanion,
356 oncopiedrevision,
357 oncopiedrevision,
357 )
358 )
358 info = newrl.storageinfo(storedsize=True)
359 info = newrl.storageinfo(storedsize=True)
359 cdstsize += info[b'storedsize'] or 0
360 cdstsize += info[b'storedsize'] or 0
360 progress.complete()
361 progress.complete()
361 ui.status(
362 ui.status(
362 _(
363 _(
363 b'finished migrating %d changelog revisions; change in size: '
364 b'finished migrating %d changelog revisions; change in size: '
364 b'%s\n'
365 b'%s\n'
365 )
366 )
366 % (crevcount, util.bytecount(cdstsize - csrcsize))
367 % (crevcount, util.bytecount(cdstsize - csrcsize))
367 )
368 )
368
369
369 dstsize = fdstsize + mdstsize + cdstsize
370 dstsize = fdstsize + mdstsize + cdstsize
370 ui.status(
371 ui.status(
371 _(
372 _(
372 b'finished migrating %d total revisions; total change in store '
373 b'finished migrating %d total revisions; total change in store '
373 b'size: %s\n'
374 b'size: %s\n'
374 )
375 )
375 % (revcount, util.bytecount(dstsize - srcsize))
376 % (revcount, util.bytecount(dstsize - srcsize))
376 )
377 )
377
378
378
379
379 def _files_to_copy_post_revlog_clone(srcrepo):
380 def _files_to_copy_post_revlog_clone(srcrepo):
380 """yields files which should be copied to destination after revlogs
381 """yields files which should be copied to destination after revlogs
381 are cloned"""
382 are cloned"""
382 for path, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)):
383 for path, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)):
383 # don't copy revlogs as they are already cloned
384 # don't copy revlogs as they are already cloned
384 if path.endswith((b'.i', b'.d', b'.n', b'.nd')):
385 if path.endswith((b'.i', b'.d', b'.n', b'.nd')):
385 continue
386 continue
386 # Skip transaction related files.
387 # Skip transaction related files.
387 if path.startswith(b'undo'):
388 if path.startswith(b'undo'):
388 continue
389 continue
389 # Only copy regular files.
390 # Only copy regular files.
390 if kind != stat.S_IFREG:
391 if kind != stat.S_IFREG:
391 continue
392 continue
392 # Skip other skipped files.
393 # Skip other skipped files.
393 if path in (b'lock', b'fncache'):
394 if path in (b'lock', b'fncache'):
394 continue
395 continue
395 # TODO: should we skip cache too?
396 # TODO: should we skip cache too?
396
397
397 yield path
398 yield path
398
399
399
400
400 def _replacestores(currentrepo, upgradedrepo, backupvfs, upgrade_op):
401 def _replacestores(currentrepo, upgradedrepo, backupvfs, upgrade_op):
401 """Replace the stores after current repository is upgraded
402 """Replace the stores after current repository is upgraded
402
403
403 Creates a backup of current repository store at backup path
404 Creates a backup of current repository store at backup path
404 Replaces upgraded store files in current repo from upgraded one
405 Replaces upgraded store files in current repo from upgraded one
405
406
406 Arguments:
407 Arguments:
407 currentrepo: repo object of current repository
408 currentrepo: repo object of current repository
408 upgradedrepo: repo object of the upgraded data
409 upgradedrepo: repo object of the upgraded data
409 backupvfs: vfs object for the backup path
410 backupvfs: vfs object for the backup path
410 upgrade_op: upgrade operation object
411 upgrade_op: upgrade operation object
411 to be used to decide what all is upgraded
412 to be used to decide what all is upgraded
412 """
413 """
413 # TODO: don't blindly rename everything in store
414 # TODO: don't blindly rename everything in store
414 # There can be upgrades where store is not touched at all
415 # There can be upgrades where store is not touched at all
415 if upgrade_op.backup_store:
416 if upgrade_op.backup_store:
416 util.rename(currentrepo.spath, backupvfs.join(b'store'))
417 util.rename(currentrepo.spath, backupvfs.join(b'store'))
417 else:
418 else:
418 currentrepo.vfs.rmtree(b'store', forcibly=True)
419 currentrepo.vfs.rmtree(b'store', forcibly=True)
419 util.rename(upgradedrepo.spath, currentrepo.spath)
420 util.rename(upgradedrepo.spath, currentrepo.spath)
420
421
421
422
422 def finishdatamigration(ui, srcrepo, dstrepo, requirements):
423 def finishdatamigration(ui, srcrepo, dstrepo, requirements):
423 """Hook point for extensions to perform additional actions during upgrade.
424 """Hook point for extensions to perform additional actions during upgrade.
424
425
425 This function is called after revlogs and store files have been copied but
426 This function is called after revlogs and store files have been copied but
426 before the new store is swapped into the original location.
427 before the new store is swapped into the original location.
427 """
428 """
428
429
429
430
430 def upgrade(ui, srcrepo, dstrepo, upgrade_op):
431 def upgrade(ui, srcrepo, dstrepo, upgrade_op):
431 """Do the low-level work of upgrading a repository.
432 """Do the low-level work of upgrading a repository.
432
433
433 The upgrade is effectively performed as a copy between a source
434 The upgrade is effectively performed as a copy between a source
434 repository and a temporary destination repository.
435 repository and a temporary destination repository.
435
436
436 The source repository is unmodified for as long as possible so the
437 The source repository is unmodified for as long as possible so the
437 upgrade can abort at any time without causing loss of service for
438 upgrade can abort at any time without causing loss of service for
438 readers and without corrupting the source repository.
439 readers and without corrupting the source repository.
439 """
440 """
440 assert srcrepo.currentwlock()
441 assert srcrepo.currentwlock()
441 assert dstrepo.currentwlock()
442 assert dstrepo.currentwlock()
442 backuppath = None
443 backuppath = None
443 backupvfs = None
444 backupvfs = None
444
445
445 ui.status(
446 ui.status(
446 _(
447 _(
447 b'(it is safe to interrupt this process any time before '
448 b'(it is safe to interrupt this process any time before '
448 b'data migration completes)\n'
449 b'data migration completes)\n'
449 )
450 )
450 )
451 )
451
452
452 if upgrade_op.requirements_only:
453 if upgrade_op.requirements_only:
453 ui.status(_(b'upgrading repository requirements\n'))
454 ui.status(_(b'upgrading repository requirements\n'))
454 scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
455 scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
456 # if there is only one action and that is persistent nodemap upgrade
457 # directly write the nodemap file and update requirements instead of going
458 # through the whole cloning process
459 elif (
460 len(upgrade_op.upgrade_actions) == 1
461 and b'persistent-nodemap' in upgrade_op._upgrade_actions_names
462 and not upgrade_op.removed_actions
463 ):
464 ui.status(
465 _(b'upgrading repository to use persistent nodemap feature\n')
466 )
467 with srcrepo.transaction(b'upgrade') as tr:
468 unfi = srcrepo.unfiltered()
469 cl = unfi.changelog
470 nodemap.persist_nodemap(tr, cl, force=True)
471 scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
455 else:
472 else:
456 with dstrepo.transaction(b'upgrade') as tr:
473 with dstrepo.transaction(b'upgrade') as tr:
457 _clonerevlogs(
474 _clonerevlogs(
458 ui,
475 ui,
459 srcrepo,
476 srcrepo,
460 dstrepo,
477 dstrepo,
461 tr,
478 tr,
462 upgrade_op,
479 upgrade_op,
463 )
480 )
464
481
465 # Now copy other files in the store directory.
482 # Now copy other files in the store directory.
466 for p in _files_to_copy_post_revlog_clone(srcrepo):
483 for p in _files_to_copy_post_revlog_clone(srcrepo):
467 srcrepo.ui.status(_(b'copying %s\n') % p)
484 srcrepo.ui.status(_(b'copying %s\n') % p)
468 src = srcrepo.store.rawvfs.join(p)
485 src = srcrepo.store.rawvfs.join(p)
469 dst = dstrepo.store.rawvfs.join(p)
486 dst = dstrepo.store.rawvfs.join(p)
470 util.copyfile(src, dst, copystat=True)
487 util.copyfile(src, dst, copystat=True)
471
488
472 finishdatamigration(ui, srcrepo, dstrepo, requirements)
489 finishdatamigration(ui, srcrepo, dstrepo, requirements)
473
490
474 ui.status(_(b'data fully upgraded in a temporary repository\n'))
491 ui.status(_(b'data fully upgraded in a temporary repository\n'))
475
492
476 if upgrade_op.backup_store:
493 if upgrade_op.backup_store:
477 backuppath = pycompat.mkdtemp(
494 backuppath = pycompat.mkdtemp(
478 prefix=b'upgradebackup.', dir=srcrepo.path
495 prefix=b'upgradebackup.', dir=srcrepo.path
479 )
496 )
480 backupvfs = vfsmod.vfs(backuppath)
497 backupvfs = vfsmod.vfs(backuppath)
481
498
482 # Make a backup of requires file first, as it is the first to be modified.
499 # Make a backup of requires file first, as it is the first to be modified.
483 util.copyfile(
500 util.copyfile(
484 srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires')
501 srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires')
485 )
502 )
486
503
487 # We install an arbitrary requirement that clients must not support
504 # We install an arbitrary requirement that clients must not support
488 # as a mechanism to lock out new clients during the data swap. This is
505 # as a mechanism to lock out new clients during the data swap. This is
489 # better than allowing a client to continue while the repository is in
506 # better than allowing a client to continue while the repository is in
490 # an inconsistent state.
507 # an inconsistent state.
491 ui.status(
508 ui.status(
492 _(
509 _(
493 b'marking source repository as being upgraded; clients will be '
510 b'marking source repository as being upgraded; clients will be '
494 b'unable to read from repository\n'
511 b'unable to read from repository\n'
495 )
512 )
496 )
513 )
497 scmutil.writereporequirements(
514 scmutil.writereporequirements(
498 srcrepo, srcrepo.requirements | {b'upgradeinprogress'}
515 srcrepo, srcrepo.requirements | {b'upgradeinprogress'}
499 )
516 )
500
517
501 ui.status(_(b'starting in-place swap of repository data\n'))
518 ui.status(_(b'starting in-place swap of repository data\n'))
502 if upgrade_op.backup_store:
519 if upgrade_op.backup_store:
503 ui.status(
520 ui.status(
504 _(b'replaced files will be backed up at %s\n') % backuppath
521 _(b'replaced files will be backed up at %s\n') % backuppath
505 )
522 )
506
523
507 # Now swap in the new store directory. Doing it as a rename should make
524 # Now swap in the new store directory. Doing it as a rename should make
508 # the operation nearly instantaneous and atomic (at least in well-behaved
525 # the operation nearly instantaneous and atomic (at least in well-behaved
509 # environments).
526 # environments).
510 ui.status(_(b'replacing store...\n'))
527 ui.status(_(b'replacing store...\n'))
511 tstart = util.timer()
528 tstart = util.timer()
512 _replacestores(srcrepo, dstrepo, backupvfs, upgrade_op)
529 _replacestores(srcrepo, dstrepo, backupvfs, upgrade_op)
513 elapsed = util.timer() - tstart
530 elapsed = util.timer() - tstart
514 ui.status(
531 ui.status(
515 _(
532 _(
516 b'store replacement complete; repository was inconsistent for '
533 b'store replacement complete; repository was inconsistent for '
517 b'%0.1fs\n'
534 b'%0.1fs\n'
518 )
535 )
519 % elapsed
536 % elapsed
520 )
537 )
521
538
522 # We first write the requirements file. Any new requirements will lock
539 # We first write the requirements file. Any new requirements will lock
523 # out legacy clients.
540 # out legacy clients.
524 ui.status(
541 ui.status(
525 _(
542 _(
526 b'finalizing requirements file and making repository readable '
543 b'finalizing requirements file and making repository readable '
527 b'again\n'
544 b'again\n'
528 )
545 )
529 )
546 )
530 scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
547 scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
531
548
532 if upgrade_op.backup_store:
549 if upgrade_op.backup_store:
533 # The lock file from the old store won't be removed because nothing has a
550 # The lock file from the old store won't be removed because nothing has a
534 # reference to its new location. So clean it up manually. Alternatively, we
551 # reference to its new location. So clean it up manually. Alternatively, we
535 # could update srcrepo.svfs and other variables to point to the new
552 # could update srcrepo.svfs and other variables to point to the new
536 # location. This is simpler.
553 # location. This is simpler.
537 backupvfs.unlink(b'store/lock')
554 backupvfs.unlink(b'store/lock')
538
555
539 return backuppath
556 return backuppath
@@ -1,768 +1,751 b''
1 ===================================
1 ===================================
2 Test the persistent on-disk nodemap
2 Test the persistent on-disk nodemap
3 ===================================
3 ===================================
4
4
5 $ cat << EOF >> $HGRCPATH
5 $ cat << EOF >> $HGRCPATH
6 > [format]
6 > [format]
7 > use-persistent-nodemap=yes
7 > use-persistent-nodemap=yes
8 > [devel]
8 > [devel]
9 > persistent-nodemap=yes
9 > persistent-nodemap=yes
10 > EOF
10 > EOF
11
11
12 $ hg init test-repo --config storage.revlog.persistent-nodemap.slow-path=allow
12 $ hg init test-repo --config storage.revlog.persistent-nodemap.slow-path=allow
13 $ cd test-repo
13 $ cd test-repo
14
14
15 Check handling of the default slow-path value
15 Check handling of the default slow-path value
16
16
17 #if no-pure no-rust
17 #if no-pure no-rust
18
18
19 $ hg id
19 $ hg id
20 abort: accessing `persistent-nodemap` repository without associated fast implementation.
20 abort: accessing `persistent-nodemap` repository without associated fast implementation.
21 (check `hg help config.format.use-persistent-nodemap` for details)
21 (check `hg help config.format.use-persistent-nodemap` for details)
22 [255]
22 [255]
23
23
24 Unlock further check (we are here to test the feature)
24 Unlock further check (we are here to test the feature)
25
25
26 $ cat << EOF >> $HGRCPATH
26 $ cat << EOF >> $HGRCPATH
27 > [storage]
27 > [storage]
28 > # to avoid spamming the test
28 > # to avoid spamming the test
29 > revlog.persistent-nodemap.slow-path=allow
29 > revlog.persistent-nodemap.slow-path=allow
30 > EOF
30 > EOF
31
31
32 #endif
32 #endif
33
33
34 #if rust
34 #if rust
35
35
36 Regression test for a previous bug in Rust/C FFI for the `Revlog_CAPI` capsule:
36 Regression test for a previous bug in Rust/C FFI for the `Revlog_CAPI` capsule:
37 in places where `mercurial/cext/revlog.c` function signatures use `Py_ssize_t`
37 in places where `mercurial/cext/revlog.c` function signatures use `Py_ssize_t`
38 (64 bits on Linux x86_64), corresponding declarations in `rust/hg-cpython/src/cindex.rs`
38 (64 bits on Linux x86_64), corresponding declarations in `rust/hg-cpython/src/cindex.rs`
39 incorrectly used `libc::c_int` (32 bits).
39 incorrectly used `libc::c_int` (32 bits).
40 As a result, -1 passed from Rust for the null revision became 4294967295 in C.
40 As a result, -1 passed from Rust for the null revision became 4294967295 in C.
41
41
42 $ hg log -r 00000000
42 $ hg log -r 00000000
43 changeset: -1:000000000000
43 changeset: -1:000000000000
44 tag: tip
44 tag: tip
45 user:
45 user:
46 date: Thu Jan 01 00:00:00 1970 +0000
46 date: Thu Jan 01 00:00:00 1970 +0000
47
47
48
48
49 #endif
49 #endif
50
50
51
51
52 $ hg debugformat
52 $ hg debugformat
53 format-variant repo
53 format-variant repo
54 fncache: yes
54 fncache: yes
55 dotencode: yes
55 dotencode: yes
56 generaldelta: yes
56 generaldelta: yes
57 share-safe: no
57 share-safe: no
58 sparserevlog: yes
58 sparserevlog: yes
59 sidedata: no
59 sidedata: no
60 persistent-nodemap: yes
60 persistent-nodemap: yes
61 copies-sdc: no
61 copies-sdc: no
62 plain-cl-delta: yes
62 plain-cl-delta: yes
63 compression: zlib
63 compression: zlib
64 compression-level: default
64 compression-level: default
65 $ hg debugbuilddag .+5000 --new-file
65 $ hg debugbuilddag .+5000 --new-file
66
66
67 $ hg debugnodemap --metadata
67 $ hg debugnodemap --metadata
68 uid: ???????????????? (glob)
68 uid: ???????????????? (glob)
69 tip-rev: 5000
69 tip-rev: 5000
70 tip-node: 6b02b8c7b96654c25e86ba69eda198d7e6ad8b3c
70 tip-node: 6b02b8c7b96654c25e86ba69eda198d7e6ad8b3c
71 data-length: 121088
71 data-length: 121088
72 data-unused: 0
72 data-unused: 0
73 data-unused: 0.000%
73 data-unused: 0.000%
74 $ f --size .hg/store/00changelog.n
74 $ f --size .hg/store/00changelog.n
75 .hg/store/00changelog.n: size=70
75 .hg/store/00changelog.n: size=70
76
76
77 Simple lookup works
77 Simple lookup works
78
78
79 $ ANYNODE=`hg log --template '{node|short}\n' --rev tip`
79 $ ANYNODE=`hg log --template '{node|short}\n' --rev tip`
80 $ hg log -r "$ANYNODE" --template '{rev}\n'
80 $ hg log -r "$ANYNODE" --template '{rev}\n'
81 5000
81 5000
82
82
83
83
84 #if rust
84 #if rust
85
85
86 $ f --sha256 .hg/store/00changelog-*.nd
86 $ f --sha256 .hg/store/00changelog-*.nd
87 .hg/store/00changelog-????????????????.nd: sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd (glob)
87 .hg/store/00changelog-????????????????.nd: sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd (glob)
88
88
89 $ f --sha256 .hg/store/00manifest-*.nd
89 $ f --sha256 .hg/store/00manifest-*.nd
90 .hg/store/00manifest-????????????????.nd: sha256=97117b1c064ea2f86664a124589e47db0e254e8d34739b5c5cc5bf31c9da2b51 (glob)
90 .hg/store/00manifest-????????????????.nd: sha256=97117b1c064ea2f86664a124589e47db0e254e8d34739b5c5cc5bf31c9da2b51 (glob)
91 $ hg debugnodemap --dump-new | f --sha256 --size
91 $ hg debugnodemap --dump-new | f --sha256 --size
92 size=121088, sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd
92 size=121088, sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd
93 $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
93 $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
94 size=121088, sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd
94 size=121088, sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd
95 0000: 00 00 00 91 00 00 00 20 00 00 00 bb 00 00 00 e7 |....... ........|
95 0000: 00 00 00 91 00 00 00 20 00 00 00 bb 00 00 00 e7 |....... ........|
96 0010: 00 00 00 66 00 00 00 a1 00 00 01 13 00 00 01 22 |...f..........."|
96 0010: 00 00 00 66 00 00 00 a1 00 00 01 13 00 00 01 22 |...f..........."|
97 0020: 00 00 00 23 00 00 00 fc 00 00 00 ba 00 00 00 5e |...#...........^|
97 0020: 00 00 00 23 00 00 00 fc 00 00 00 ba 00 00 00 5e |...#...........^|
98 0030: 00 00 00 df 00 00 01 4e 00 00 01 65 00 00 00 ab |.......N...e....|
98 0030: 00 00 00 df 00 00 01 4e 00 00 01 65 00 00 00 ab |.......N...e....|
99 0040: 00 00 00 a9 00 00 00 95 00 00 00 73 00 00 00 38 |...........s...8|
99 0040: 00 00 00 a9 00 00 00 95 00 00 00 73 00 00 00 38 |...........s...8|
100 0050: 00 00 00 cc 00 00 00 92 00 00 00 90 00 00 00 69 |...............i|
100 0050: 00 00 00 cc 00 00 00 92 00 00 00 90 00 00 00 69 |...............i|
101 0060: 00 00 00 ec 00 00 00 8d 00 00 01 4f 00 00 00 12 |...........O....|
101 0060: 00 00 00 ec 00 00 00 8d 00 00 01 4f 00 00 00 12 |...........O....|
102 0070: 00 00 02 0c 00 00 00 77 00 00 00 9c 00 00 00 8f |.......w........|
102 0070: 00 00 02 0c 00 00 00 77 00 00 00 9c 00 00 00 8f |.......w........|
103 0080: 00 00 00 d5 00 00 00 6b 00 00 00 48 00 00 00 b3 |.......k...H....|
103 0080: 00 00 00 d5 00 00 00 6b 00 00 00 48 00 00 00 b3 |.......k...H....|
104 0090: 00 00 00 e5 00 00 00 b5 00 00 00 8e 00 00 00 ad |................|
104 0090: 00 00 00 e5 00 00 00 b5 00 00 00 8e 00 00 00 ad |................|
105 00a0: 00 00 00 7b 00 00 00 7c 00 00 00 0b 00 00 00 2b |...{...|.......+|
105 00a0: 00 00 00 7b 00 00 00 7c 00 00 00 0b 00 00 00 2b |...{...|.......+|
106 00b0: 00 00 00 c6 00 00 00 1e 00 00 01 08 00 00 00 11 |................|
106 00b0: 00 00 00 c6 00 00 00 1e 00 00 01 08 00 00 00 11 |................|
107 00c0: 00 00 01 30 00 00 00 26 00 00 01 9c 00 00 00 35 |...0...&.......5|
107 00c0: 00 00 01 30 00 00 00 26 00 00 01 9c 00 00 00 35 |...0...&.......5|
108 00d0: 00 00 00 b8 00 00 01 31 00 00 00 2c 00 00 00 55 |.......1...,...U|
108 00d0: 00 00 00 b8 00 00 01 31 00 00 00 2c 00 00 00 55 |.......1...,...U|
109 00e0: 00 00 00 8a 00 00 00 9a 00 00 00 0c 00 00 01 1e |................|
109 00e0: 00 00 00 8a 00 00 00 9a 00 00 00 0c 00 00 01 1e |................|
110 00f0: 00 00 00 a4 00 00 00 83 00 00 00 c9 00 00 00 8c |................|
110 00f0: 00 00 00 a4 00 00 00 83 00 00 00 c9 00 00 00 8c |................|
111
111
112
112
113 #else
113 #else
114
114
115 $ f --sha256 .hg/store/00changelog-*.nd
115 $ f --sha256 .hg/store/00changelog-*.nd
116 .hg/store/00changelog-????????????????.nd: sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79 (glob)
116 .hg/store/00changelog-????????????????.nd: sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79 (glob)
117 $ hg debugnodemap --dump-new | f --sha256 --size
117 $ hg debugnodemap --dump-new | f --sha256 --size
118 size=121088, sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79
118 size=121088, sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79
119 $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
119 $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
120 size=121088, sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79
120 size=121088, sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79
121 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
121 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
122 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
122 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
123 0020: ff ff ff ff ff ff f5 06 ff ff ff ff ff ff f3 e7 |................|
123 0020: ff ff ff ff ff ff f5 06 ff ff ff ff ff ff f3 e7 |................|
124 0030: ff ff ef ca ff ff ff ff ff ff ff ff ff ff ff ff |................|
124 0030: ff ff ef ca ff ff ff ff ff ff ff ff ff ff ff ff |................|
125 0040: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
125 0040: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
126 0050: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ed 08 |................|
126 0050: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ed 08 |................|
127 0060: ff ff ed 66 ff ff ff ff ff ff ff ff ff ff ff ff |...f............|
127 0060: ff ff ed 66 ff ff ff ff ff ff ff ff ff ff ff ff |...f............|
128 0070: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
128 0070: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
129 0080: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
129 0080: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
130 0090: ff ff ff ff ff ff ff ff ff ff ff ff ff ff f6 ed |................|
130 0090: ff ff ff ff ff ff ff ff ff ff ff ff ff ff f6 ed |................|
131 00a0: ff ff ff ff ff ff fe 61 ff ff ff ff ff ff ff ff |.......a........|
131 00a0: ff ff ff ff ff ff fe 61 ff ff ff ff ff ff ff ff |.......a........|
132 00b0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
132 00b0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
133 00c0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
133 00c0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
134 00d0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
134 00d0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
135 00e0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff f1 02 |................|
135 00e0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff f1 02 |................|
136 00f0: ff ff ff ff ff ff ed 1b ff ff ff ff ff ff ff ff |................|
136 00f0: ff ff ff ff ff ff ed 1b ff ff ff ff ff ff ff ff |................|
137
137
138 #endif
138 #endif
139
139
140 $ hg debugnodemap --check
140 $ hg debugnodemap --check
141 revision in index: 5001
141 revision in index: 5001
142 revision in nodemap: 5001
142 revision in nodemap: 5001
143
143
144 add a new commit
144 add a new commit
145
145
146 $ hg up
146 $ hg up
147 5001 files updated, 0 files merged, 0 files removed, 0 files unresolved
147 5001 files updated, 0 files merged, 0 files removed, 0 files unresolved
148 $ echo foo > foo
148 $ echo foo > foo
149 $ hg add foo
149 $ hg add foo
150
150
151
151
152 Check slow-path config value handling
152 Check slow-path config value handling
153 -------------------------------------
153 -------------------------------------
154
154
155 #if no-pure no-rust
155 #if no-pure no-rust
156
156
157 $ hg id --config "storage.revlog.persistent-nodemap.slow-path=invalid-value"
157 $ hg id --config "storage.revlog.persistent-nodemap.slow-path=invalid-value"
158 unknown value for config "storage.revlog.persistent-nodemap.slow-path": "invalid-value"
158 unknown value for config "storage.revlog.persistent-nodemap.slow-path": "invalid-value"
159 falling back to default value: abort
159 falling back to default value: abort
160 abort: accessing `persistent-nodemap` repository without associated fast implementation.
160 abort: accessing `persistent-nodemap` repository without associated fast implementation.
161 (check `hg help config.format.use-persistent-nodemap` for details)
161 (check `hg help config.format.use-persistent-nodemap` for details)
162 [255]
162 [255]
163
163
164 $ hg log -r . --config "storage.revlog.persistent-nodemap.slow-path=warn"
164 $ hg log -r . --config "storage.revlog.persistent-nodemap.slow-path=warn"
165 warning: accessing `persistent-nodemap` repository without associated fast implementation.
165 warning: accessing `persistent-nodemap` repository without associated fast implementation.
166 (check `hg help config.format.use-persistent-nodemap` for details)
166 (check `hg help config.format.use-persistent-nodemap` for details)
167 changeset: 5000:6b02b8c7b966
167 changeset: 5000:6b02b8c7b966
168 tag: tip
168 tag: tip
169 user: debugbuilddag
169 user: debugbuilddag
170 date: Thu Jan 01 01:23:20 1970 +0000
170 date: Thu Jan 01 01:23:20 1970 +0000
171 summary: r5000
171 summary: r5000
172
172
173 $ hg ci -m 'foo' --config "storage.revlog.persistent-nodemap.slow-path=abort"
173 $ hg ci -m 'foo' --config "storage.revlog.persistent-nodemap.slow-path=abort"
174 abort: accessing `persistent-nodemap` repository without associated fast implementation.
174 abort: accessing `persistent-nodemap` repository without associated fast implementation.
175 (check `hg help config.format.use-persistent-nodemap` for details)
175 (check `hg help config.format.use-persistent-nodemap` for details)
176 [255]
176 [255]
177
177
178 #else
178 #else
179
179
180 $ hg id --config "storage.revlog.persistent-nodemap.slow-path=invalid-value"
180 $ hg id --config "storage.revlog.persistent-nodemap.slow-path=invalid-value"
181 unknown value for config "storage.revlog.persistent-nodemap.slow-path": "invalid-value"
181 unknown value for config "storage.revlog.persistent-nodemap.slow-path": "invalid-value"
182 falling back to default value: abort
182 falling back to default value: abort
183 6b02b8c7b966+ tip
183 6b02b8c7b966+ tip
184
184
185 #endif
185 #endif
186
186
187 $ hg ci -m 'foo'
187 $ hg ci -m 'foo'
188
188
189 #if no-pure no-rust
189 #if no-pure no-rust
190 $ hg debugnodemap --metadata
190 $ hg debugnodemap --metadata
191 uid: ???????????????? (glob)
191 uid: ???????????????? (glob)
192 tip-rev: 5001
192 tip-rev: 5001
193 tip-node: 16395c3cf7e231394735e6b1717823ada303fb0c
193 tip-node: 16395c3cf7e231394735e6b1717823ada303fb0c
194 data-length: 121088
194 data-length: 121088
195 data-unused: 0
195 data-unused: 0
196 data-unused: 0.000%
196 data-unused: 0.000%
197 #else
197 #else
198 $ hg debugnodemap --metadata
198 $ hg debugnodemap --metadata
199 uid: ???????????????? (glob)
199 uid: ???????????????? (glob)
200 tip-rev: 5001
200 tip-rev: 5001
201 tip-node: 16395c3cf7e231394735e6b1717823ada303fb0c
201 tip-node: 16395c3cf7e231394735e6b1717823ada303fb0c
202 data-length: 121344
202 data-length: 121344
203 data-unused: 256
203 data-unused: 256
204 data-unused: 0.211%
204 data-unused: 0.211%
205 #endif
205 #endif
206
206
207 $ f --size .hg/store/00changelog.n
207 $ f --size .hg/store/00changelog.n
208 .hg/store/00changelog.n: size=70
208 .hg/store/00changelog.n: size=70
209
209
210 (The pure code use the debug code that perform incremental update, the C code reencode from scratch)
210 (The pure code use the debug code that perform incremental update, the C code reencode from scratch)
211
211
212 #if pure
212 #if pure
213 $ f --sha256 .hg/store/00changelog-*.nd --size
213 $ f --sha256 .hg/store/00changelog-*.nd --size
214 .hg/store/00changelog-????????????????.nd: size=121344, sha256=cce54c5da5bde3ad72a4938673ed4064c86231b9c64376b082b163fdb20f8f66 (glob)
214 .hg/store/00changelog-????????????????.nd: size=121344, sha256=cce54c5da5bde3ad72a4938673ed4064c86231b9c64376b082b163fdb20f8f66 (glob)
215 #endif
215 #endif
216
216
217 #if rust
217 #if rust
218 $ f --sha256 .hg/store/00changelog-*.nd --size
218 $ f --sha256 .hg/store/00changelog-*.nd --size
219 .hg/store/00changelog-????????????????.nd: size=121344, sha256=952b042fcf614ceb37b542b1b723e04f18f83efe99bee4e0f5ccd232ef470e58 (glob)
219 .hg/store/00changelog-????????????????.nd: size=121344, sha256=952b042fcf614ceb37b542b1b723e04f18f83efe99bee4e0f5ccd232ef470e58 (glob)
220 #endif
220 #endif
221
221
222 #if no-pure no-rust
222 #if no-pure no-rust
223 $ f --sha256 .hg/store/00changelog-*.nd --size
223 $ f --sha256 .hg/store/00changelog-*.nd --size
224 .hg/store/00changelog-????????????????.nd: size=121088, sha256=df7c06a035b96cb28c7287d349d603baef43240be7736fe34eea419a49702e17 (glob)
224 .hg/store/00changelog-????????????????.nd: size=121088, sha256=df7c06a035b96cb28c7287d349d603baef43240be7736fe34eea419a49702e17 (glob)
225 #endif
225 #endif
226
226
227 $ hg debugnodemap --check
227 $ hg debugnodemap --check
228 revision in index: 5002
228 revision in index: 5002
229 revision in nodemap: 5002
229 revision in nodemap: 5002
230
230
231 Test code path without mmap
231 Test code path without mmap
232 ---------------------------
232 ---------------------------
233
233
234 $ echo bar > bar
234 $ echo bar > bar
235 $ hg add bar
235 $ hg add bar
236 $ hg ci -m 'bar' --config storage.revlog.persistent-nodemap.mmap=no
236 $ hg ci -m 'bar' --config storage.revlog.persistent-nodemap.mmap=no
237
237
238 $ hg debugnodemap --check --config storage.revlog.persistent-nodemap.mmap=yes
238 $ hg debugnodemap --check --config storage.revlog.persistent-nodemap.mmap=yes
239 revision in index: 5003
239 revision in index: 5003
240 revision in nodemap: 5003
240 revision in nodemap: 5003
241 $ hg debugnodemap --check --config storage.revlog.persistent-nodemap.mmap=no
241 $ hg debugnodemap --check --config storage.revlog.persistent-nodemap.mmap=no
242 revision in index: 5003
242 revision in index: 5003
243 revision in nodemap: 5003
243 revision in nodemap: 5003
244
244
245
245
246 #if pure
246 #if pure
247 $ hg debugnodemap --metadata
247 $ hg debugnodemap --metadata
248 uid: ???????????????? (glob)
248 uid: ???????????????? (glob)
249 tip-rev: 5002
249 tip-rev: 5002
250 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
250 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
251 data-length: 121600
251 data-length: 121600
252 data-unused: 512
252 data-unused: 512
253 data-unused: 0.421%
253 data-unused: 0.421%
254 $ f --sha256 .hg/store/00changelog-*.nd --size
254 $ f --sha256 .hg/store/00changelog-*.nd --size
255 .hg/store/00changelog-????????????????.nd: size=121600, sha256=def52503d049ccb823974af313a98a935319ba61f40f3aa06a8be4d35c215054 (glob)
255 .hg/store/00changelog-????????????????.nd: size=121600, sha256=def52503d049ccb823974af313a98a935319ba61f40f3aa06a8be4d35c215054 (glob)
256 #endif
256 #endif
257 #if rust
257 #if rust
258 $ hg debugnodemap --metadata
258 $ hg debugnodemap --metadata
259 uid: ???????????????? (glob)
259 uid: ???????????????? (glob)
260 tip-rev: 5002
260 tip-rev: 5002
261 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
261 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
262 data-length: 121600
262 data-length: 121600
263 data-unused: 512
263 data-unused: 512
264 data-unused: 0.421%
264 data-unused: 0.421%
265 $ f --sha256 .hg/store/00changelog-*.nd --size
265 $ f --sha256 .hg/store/00changelog-*.nd --size
266 .hg/store/00changelog-????????????????.nd: size=121600, sha256=dacf5b5f1d4585fee7527d0e67cad5b1ba0930e6a0928f650f779aefb04ce3fb (glob)
266 .hg/store/00changelog-????????????????.nd: size=121600, sha256=dacf5b5f1d4585fee7527d0e67cad5b1ba0930e6a0928f650f779aefb04ce3fb (glob)
267 #endif
267 #endif
268 #if no-pure no-rust
268 #if no-pure no-rust
269 $ hg debugnodemap --metadata
269 $ hg debugnodemap --metadata
270 uid: ???????????????? (glob)
270 uid: ???????????????? (glob)
271 tip-rev: 5002
271 tip-rev: 5002
272 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
272 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
273 data-length: 121088
273 data-length: 121088
274 data-unused: 0
274 data-unused: 0
275 data-unused: 0.000%
275 data-unused: 0.000%
276 $ f --sha256 .hg/store/00changelog-*.nd --size
276 $ f --sha256 .hg/store/00changelog-*.nd --size
277 .hg/store/00changelog-????????????????.nd: size=121088, sha256=59fcede3e3cc587755916ceed29e3c33748cd1aa7d2f91828ac83e7979d935e8 (glob)
277 .hg/store/00changelog-????????????????.nd: size=121088, sha256=59fcede3e3cc587755916ceed29e3c33748cd1aa7d2f91828ac83e7979d935e8 (glob)
278 #endif
278 #endif
279
279
280 Test force warming the cache
280 Test force warming the cache
281
281
282 $ rm .hg/store/00changelog.n
282 $ rm .hg/store/00changelog.n
283 $ hg debugnodemap --metadata
283 $ hg debugnodemap --metadata
284 $ hg debugupdatecache
284 $ hg debugupdatecache
285 #if pure
285 #if pure
286 $ hg debugnodemap --metadata
286 $ hg debugnodemap --metadata
287 uid: ???????????????? (glob)
287 uid: ???????????????? (glob)
288 tip-rev: 5002
288 tip-rev: 5002
289 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
289 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
290 data-length: 121088
290 data-length: 121088
291 data-unused: 0
291 data-unused: 0
292 data-unused: 0.000%
292 data-unused: 0.000%
293 #else
293 #else
294 $ hg debugnodemap --metadata
294 $ hg debugnodemap --metadata
295 uid: ???????????????? (glob)
295 uid: ???????????????? (glob)
296 tip-rev: 5002
296 tip-rev: 5002
297 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
297 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
298 data-length: 121088
298 data-length: 121088
299 data-unused: 0
299 data-unused: 0
300 data-unused: 0.000%
300 data-unused: 0.000%
301 #endif
301 #endif
302
302
303 Check out of sync nodemap
303 Check out of sync nodemap
304 =========================
304 =========================
305
305
306 First copy old data on the side.
306 First copy old data on the side.
307
307
308 $ mkdir ../tmp-copies
308 $ mkdir ../tmp-copies
309 $ cp .hg/store/00changelog-????????????????.nd .hg/store/00changelog.n ../tmp-copies
309 $ cp .hg/store/00changelog-????????????????.nd .hg/store/00changelog.n ../tmp-copies
310
310
311 Nodemap lagging behind
311 Nodemap lagging behind
312 ----------------------
312 ----------------------
313
313
314 make a new commit
314 make a new commit
315
315
316 $ echo bar2 > bar
316 $ echo bar2 > bar
317 $ hg ci -m 'bar2'
317 $ hg ci -m 'bar2'
318 $ NODE=`hg log -r tip -T '{node}\n'`
318 $ NODE=`hg log -r tip -T '{node}\n'`
319 $ hg log -r "$NODE" -T '{rev}\n'
319 $ hg log -r "$NODE" -T '{rev}\n'
320 5003
320 5003
321
321
322 If the nodemap is lagging behind, it can catch up fine
322 If the nodemap is lagging behind, it can catch up fine
323
323
324 $ hg debugnodemap --metadata
324 $ hg debugnodemap --metadata
325 uid: ???????????????? (glob)
325 uid: ???????????????? (glob)
326 tip-rev: 5003
326 tip-rev: 5003
327 tip-node: c9329770f979ade2d16912267c38ba5f82fd37b3
327 tip-node: c9329770f979ade2d16912267c38ba5f82fd37b3
328 data-length: 121344 (pure !)
328 data-length: 121344 (pure !)
329 data-length: 121344 (rust !)
329 data-length: 121344 (rust !)
330 data-length: 121152 (no-rust no-pure !)
330 data-length: 121152 (no-rust no-pure !)
331 data-unused: 192 (pure !)
331 data-unused: 192 (pure !)
332 data-unused: 192 (rust !)
332 data-unused: 192 (rust !)
333 data-unused: 0 (no-rust no-pure !)
333 data-unused: 0 (no-rust no-pure !)
334 data-unused: 0.158% (pure !)
334 data-unused: 0.158% (pure !)
335 data-unused: 0.158% (rust !)
335 data-unused: 0.158% (rust !)
336 data-unused: 0.000% (no-rust no-pure !)
336 data-unused: 0.000% (no-rust no-pure !)
337 $ cp -f ../tmp-copies/* .hg/store/
337 $ cp -f ../tmp-copies/* .hg/store/
338 $ hg debugnodemap --metadata
338 $ hg debugnodemap --metadata
339 uid: ???????????????? (glob)
339 uid: ???????????????? (glob)
340 tip-rev: 5002
340 tip-rev: 5002
341 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
341 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
342 data-length: 121088
342 data-length: 121088
343 data-unused: 0
343 data-unused: 0
344 data-unused: 0.000%
344 data-unused: 0.000%
345 $ hg log -r "$NODE" -T '{rev}\n'
345 $ hg log -r "$NODE" -T '{rev}\n'
346 5003
346 5003
347
347
348 changelog altered
348 changelog altered
349 -----------------
349 -----------------
350
350
351 If the nodemap is not gated behind a requirements, an unaware client can alter
351 If the nodemap is not gated behind a requirements, an unaware client can alter
352 the repository so the revlog used to generate the nodemap is not longer
352 the repository so the revlog used to generate the nodemap is not longer
353 compatible with the persistent nodemap. We need to detect that.
353 compatible with the persistent nodemap. We need to detect that.
354
354
355 $ hg up "$NODE~5"
355 $ hg up "$NODE~5"
356 0 files updated, 0 files merged, 4 files removed, 0 files unresolved
356 0 files updated, 0 files merged, 4 files removed, 0 files unresolved
357 $ echo bar > babar
357 $ echo bar > babar
358 $ hg add babar
358 $ hg add babar
359 $ hg ci -m 'babar'
359 $ hg ci -m 'babar'
360 created new head
360 created new head
361 $ OTHERNODE=`hg log -r tip -T '{node}\n'`
361 $ OTHERNODE=`hg log -r tip -T '{node}\n'`
362 $ hg log -r "$OTHERNODE" -T '{rev}\n'
362 $ hg log -r "$OTHERNODE" -T '{rev}\n'
363 5004
363 5004
364
364
365 $ hg --config extensions.strip= strip --rev "$NODE~1" --no-backup
365 $ hg --config extensions.strip= strip --rev "$NODE~1" --no-backup
366
366
367 the nodemap should detect the changelog have been tampered with and recover.
367 the nodemap should detect the changelog have been tampered with and recover.
368
368
369 $ hg debugnodemap --metadata
369 $ hg debugnodemap --metadata
370 uid: ???????????????? (glob)
370 uid: ???????????????? (glob)
371 tip-rev: 5002
371 tip-rev: 5002
372 tip-node: b355ef8adce0949b8bdf6afc72ca853740d65944
372 tip-node: b355ef8adce0949b8bdf6afc72ca853740d65944
373 data-length: 121536 (pure !)
373 data-length: 121536 (pure !)
374 data-length: 121088 (rust !)
374 data-length: 121088 (rust !)
375 data-length: 121088 (no-pure no-rust !)
375 data-length: 121088 (no-pure no-rust !)
376 data-unused: 448 (pure !)
376 data-unused: 448 (pure !)
377 data-unused: 0 (rust !)
377 data-unused: 0 (rust !)
378 data-unused: 0 (no-pure no-rust !)
378 data-unused: 0 (no-pure no-rust !)
379 data-unused: 0.000% (rust !)
379 data-unused: 0.000% (rust !)
380 data-unused: 0.369% (pure !)
380 data-unused: 0.369% (pure !)
381 data-unused: 0.000% (no-pure no-rust !)
381 data-unused: 0.000% (no-pure no-rust !)
382
382
383 $ cp -f ../tmp-copies/* .hg/store/
383 $ cp -f ../tmp-copies/* .hg/store/
384 $ hg debugnodemap --metadata
384 $ hg debugnodemap --metadata
385 uid: ???????????????? (glob)
385 uid: ???????????????? (glob)
386 tip-rev: 5002
386 tip-rev: 5002
387 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
387 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
388 data-length: 121088
388 data-length: 121088
389 data-unused: 0
389 data-unused: 0
390 data-unused: 0.000%
390 data-unused: 0.000%
391 $ hg log -r "$OTHERNODE" -T '{rev}\n'
391 $ hg log -r "$OTHERNODE" -T '{rev}\n'
392 5002
392 5002
393
393
394 missing data file
394 missing data file
395 -----------------
395 -----------------
396
396
397 $ UUID=`hg debugnodemap --metadata| grep 'uid:' | \
397 $ UUID=`hg debugnodemap --metadata| grep 'uid:' | \
398 > sed 's/uid: //'`
398 > sed 's/uid: //'`
399 $ FILE=.hg/store/00changelog-"${UUID}".nd
399 $ FILE=.hg/store/00changelog-"${UUID}".nd
400 $ mv $FILE ../tmp-data-file
400 $ mv $FILE ../tmp-data-file
401 $ cp .hg/store/00changelog.n ../tmp-docket
401 $ cp .hg/store/00changelog.n ../tmp-docket
402
402
403 mercurial don't crash
403 mercurial don't crash
404
404
405 $ hg log -r .
405 $ hg log -r .
406 changeset: 5002:b355ef8adce0
406 changeset: 5002:b355ef8adce0
407 tag: tip
407 tag: tip
408 parent: 4998:d918ad6d18d3
408 parent: 4998:d918ad6d18d3
409 user: test
409 user: test
410 date: Thu Jan 01 00:00:00 1970 +0000
410 date: Thu Jan 01 00:00:00 1970 +0000
411 summary: babar
411 summary: babar
412
412
413 $ hg debugnodemap --metadata
413 $ hg debugnodemap --metadata
414
414
415 $ hg debugupdatecache
415 $ hg debugupdatecache
416 $ hg debugnodemap --metadata
416 $ hg debugnodemap --metadata
417 uid: * (glob)
417 uid: * (glob)
418 tip-rev: 5002
418 tip-rev: 5002
419 tip-node: b355ef8adce0949b8bdf6afc72ca853740d65944
419 tip-node: b355ef8adce0949b8bdf6afc72ca853740d65944
420 data-length: 121088
420 data-length: 121088
421 data-unused: 0
421 data-unused: 0
422 data-unused: 0.000%
422 data-unused: 0.000%
423 $ mv ../tmp-data-file $FILE
423 $ mv ../tmp-data-file $FILE
424 $ mv ../tmp-docket .hg/store/00changelog.n
424 $ mv ../tmp-docket .hg/store/00changelog.n
425
425
426 Check transaction related property
426 Check transaction related property
427 ==================================
427 ==================================
428
428
429 An up to date nodemap should be available to shell hooks,
429 An up to date nodemap should be available to shell hooks,
430
430
431 $ echo dsljfl > a
431 $ echo dsljfl > a
432 $ hg add a
432 $ hg add a
433 $ hg ci -m a
433 $ hg ci -m a
434 $ hg debugnodemap --metadata
434 $ hg debugnodemap --metadata
435 uid: ???????????????? (glob)
435 uid: ???????????????? (glob)
436 tip-rev: 5003
436 tip-rev: 5003
437 tip-node: a52c5079765b5865d97b993b303a18740113bbb2
437 tip-node: a52c5079765b5865d97b993b303a18740113bbb2
438 data-length: 121088
438 data-length: 121088
439 data-unused: 0
439 data-unused: 0
440 data-unused: 0.000%
440 data-unused: 0.000%
441 $ echo babar2 > babar
441 $ echo babar2 > babar
442 $ hg ci -m 'babar2' --config "hooks.pretxnclose.nodemap-test=hg debugnodemap --metadata"
442 $ hg ci -m 'babar2' --config "hooks.pretxnclose.nodemap-test=hg debugnodemap --metadata"
443 uid: ???????????????? (glob)
443 uid: ???????????????? (glob)
444 tip-rev: 5004
444 tip-rev: 5004
445 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
445 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
446 data-length: 121280 (pure !)
446 data-length: 121280 (pure !)
447 data-length: 121280 (rust !)
447 data-length: 121280 (rust !)
448 data-length: 121088 (no-pure no-rust !)
448 data-length: 121088 (no-pure no-rust !)
449 data-unused: 192 (pure !)
449 data-unused: 192 (pure !)
450 data-unused: 192 (rust !)
450 data-unused: 192 (rust !)
451 data-unused: 0 (no-pure no-rust !)
451 data-unused: 0 (no-pure no-rust !)
452 data-unused: 0.158% (pure !)
452 data-unused: 0.158% (pure !)
453 data-unused: 0.158% (rust !)
453 data-unused: 0.158% (rust !)
454 data-unused: 0.000% (no-pure no-rust !)
454 data-unused: 0.000% (no-pure no-rust !)
455 $ hg debugnodemap --metadata
455 $ hg debugnodemap --metadata
456 uid: ???????????????? (glob)
456 uid: ???????????????? (glob)
457 tip-rev: 5004
457 tip-rev: 5004
458 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
458 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
459 data-length: 121280 (pure !)
459 data-length: 121280 (pure !)
460 data-length: 121280 (rust !)
460 data-length: 121280 (rust !)
461 data-length: 121088 (no-pure no-rust !)
461 data-length: 121088 (no-pure no-rust !)
462 data-unused: 192 (pure !)
462 data-unused: 192 (pure !)
463 data-unused: 192 (rust !)
463 data-unused: 192 (rust !)
464 data-unused: 0 (no-pure no-rust !)
464 data-unused: 0 (no-pure no-rust !)
465 data-unused: 0.158% (pure !)
465 data-unused: 0.158% (pure !)
466 data-unused: 0.158% (rust !)
466 data-unused: 0.158% (rust !)
467 data-unused: 0.000% (no-pure no-rust !)
467 data-unused: 0.000% (no-pure no-rust !)
468
468
469 Another process does not see the pending nodemap content during run.
469 Another process does not see the pending nodemap content during run.
470
470
471 $ PATH=$RUNTESTDIR/testlib/:$PATH
471 $ PATH=$RUNTESTDIR/testlib/:$PATH
472 $ echo qpoasp > a
472 $ echo qpoasp > a
473 $ hg ci -m a2 \
473 $ hg ci -m a2 \
474 > --config "hooks.pretxnclose=wait-on-file 20 sync-repo-read sync-txn-pending" \
474 > --config "hooks.pretxnclose=wait-on-file 20 sync-repo-read sync-txn-pending" \
475 > --config "hooks.txnclose=touch sync-txn-close" > output.txt 2>&1 &
475 > --config "hooks.txnclose=touch sync-txn-close" > output.txt 2>&1 &
476
476
477 (read the repository while the commit transaction is pending)
477 (read the repository while the commit transaction is pending)
478
478
479 $ wait-on-file 20 sync-txn-pending && \
479 $ wait-on-file 20 sync-txn-pending && \
480 > hg debugnodemap --metadata && \
480 > hg debugnodemap --metadata && \
481 > wait-on-file 20 sync-txn-close sync-repo-read
481 > wait-on-file 20 sync-txn-close sync-repo-read
482 uid: ???????????????? (glob)
482 uid: ???????????????? (glob)
483 tip-rev: 5004
483 tip-rev: 5004
484 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
484 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
485 data-length: 121280 (pure !)
485 data-length: 121280 (pure !)
486 data-length: 121280 (rust !)
486 data-length: 121280 (rust !)
487 data-length: 121088 (no-pure no-rust !)
487 data-length: 121088 (no-pure no-rust !)
488 data-unused: 192 (pure !)
488 data-unused: 192 (pure !)
489 data-unused: 192 (rust !)
489 data-unused: 192 (rust !)
490 data-unused: 0 (no-pure no-rust !)
490 data-unused: 0 (no-pure no-rust !)
491 data-unused: 0.158% (pure !)
491 data-unused: 0.158% (pure !)
492 data-unused: 0.158% (rust !)
492 data-unused: 0.158% (rust !)
493 data-unused: 0.000% (no-pure no-rust !)
493 data-unused: 0.000% (no-pure no-rust !)
494 $ hg debugnodemap --metadata
494 $ hg debugnodemap --metadata
495 uid: ???????????????? (glob)
495 uid: ???????????????? (glob)
496 tip-rev: 5005
496 tip-rev: 5005
497 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
497 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
498 data-length: 121536 (pure !)
498 data-length: 121536 (pure !)
499 data-length: 121536 (rust !)
499 data-length: 121536 (rust !)
500 data-length: 121088 (no-pure no-rust !)
500 data-length: 121088 (no-pure no-rust !)
501 data-unused: 448 (pure !)
501 data-unused: 448 (pure !)
502 data-unused: 448 (rust !)
502 data-unused: 448 (rust !)
503 data-unused: 0 (no-pure no-rust !)
503 data-unused: 0 (no-pure no-rust !)
504 data-unused: 0.369% (pure !)
504 data-unused: 0.369% (pure !)
505 data-unused: 0.369% (rust !)
505 data-unused: 0.369% (rust !)
506 data-unused: 0.000% (no-pure no-rust !)
506 data-unused: 0.000% (no-pure no-rust !)
507
507
508 $ cat output.txt
508 $ cat output.txt
509
509
510 Check that a failing transaction will properly revert the data
510 Check that a failing transaction will properly revert the data
511
511
512 $ echo plakfe > a
512 $ echo plakfe > a
513 $ f --size --sha256 .hg/store/00changelog-*.nd
513 $ f --size --sha256 .hg/store/00changelog-*.nd
514 .hg/store/00changelog-????????????????.nd: size=121536, sha256=bb414468d225cf52d69132e1237afba34d4346ee2eb81b505027e6197b107f03 (glob) (pure !)
514 .hg/store/00changelog-????????????????.nd: size=121536, sha256=bb414468d225cf52d69132e1237afba34d4346ee2eb81b505027e6197b107f03 (glob) (pure !)
515 .hg/store/00changelog-????????????????.nd: size=121536, sha256=909ac727bc4d1c0fda5f7bff3c620c98bd4a2967c143405a1503439e33b377da (glob) (rust !)
515 .hg/store/00changelog-????????????????.nd: size=121536, sha256=909ac727bc4d1c0fda5f7bff3c620c98bd4a2967c143405a1503439e33b377da (glob) (rust !)
516 .hg/store/00changelog-????????????????.nd: size=121088, sha256=342d36d30d86dde67d3cb6c002606c4a75bcad665595d941493845066d9c8ee0 (glob) (no-pure no-rust !)
516 .hg/store/00changelog-????????????????.nd: size=121088, sha256=342d36d30d86dde67d3cb6c002606c4a75bcad665595d941493845066d9c8ee0 (glob) (no-pure no-rust !)
517 $ hg ci -m a3 --config "extensions.abort=$RUNTESTDIR/testlib/crash_transaction_late.py"
517 $ hg ci -m a3 --config "extensions.abort=$RUNTESTDIR/testlib/crash_transaction_late.py"
518 transaction abort!
518 transaction abort!
519 rollback completed
519 rollback completed
520 abort: This is a late abort
520 abort: This is a late abort
521 [255]
521 [255]
522 $ hg debugnodemap --metadata
522 $ hg debugnodemap --metadata
523 uid: ???????????????? (glob)
523 uid: ???????????????? (glob)
524 tip-rev: 5005
524 tip-rev: 5005
525 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
525 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
526 data-length: 121536 (pure !)
526 data-length: 121536 (pure !)
527 data-length: 121536 (rust !)
527 data-length: 121536 (rust !)
528 data-length: 121088 (no-pure no-rust !)
528 data-length: 121088 (no-pure no-rust !)
529 data-unused: 448 (pure !)
529 data-unused: 448 (pure !)
530 data-unused: 448 (rust !)
530 data-unused: 448 (rust !)
531 data-unused: 0 (no-pure no-rust !)
531 data-unused: 0 (no-pure no-rust !)
532 data-unused: 0.369% (pure !)
532 data-unused: 0.369% (pure !)
533 data-unused: 0.369% (rust !)
533 data-unused: 0.369% (rust !)
534 data-unused: 0.000% (no-pure no-rust !)
534 data-unused: 0.000% (no-pure no-rust !)
535 $ f --size --sha256 .hg/store/00changelog-*.nd
535 $ f --size --sha256 .hg/store/00changelog-*.nd
536 .hg/store/00changelog-????????????????.nd: size=121536, sha256=bb414468d225cf52d69132e1237afba34d4346ee2eb81b505027e6197b107f03 (glob) (pure !)
536 .hg/store/00changelog-????????????????.nd: size=121536, sha256=bb414468d225cf52d69132e1237afba34d4346ee2eb81b505027e6197b107f03 (glob) (pure !)
537 .hg/store/00changelog-????????????????.nd: size=121536, sha256=909ac727bc4d1c0fda5f7bff3c620c98bd4a2967c143405a1503439e33b377da (glob) (rust !)
537 .hg/store/00changelog-????????????????.nd: size=121536, sha256=909ac727bc4d1c0fda5f7bff3c620c98bd4a2967c143405a1503439e33b377da (glob) (rust !)
538 .hg/store/00changelog-????????????????.nd: size=121088, sha256=342d36d30d86dde67d3cb6c002606c4a75bcad665595d941493845066d9c8ee0 (glob) (no-pure no-rust !)
538 .hg/store/00changelog-????????????????.nd: size=121088, sha256=342d36d30d86dde67d3cb6c002606c4a75bcad665595d941493845066d9c8ee0 (glob) (no-pure no-rust !)
539
539
540 Check that removing content does not confuse the nodemap
540 Check that removing content does not confuse the nodemap
541 --------------------------------------------------------
541 --------------------------------------------------------
542
542
543 removing data with rollback
543 removing data with rollback
544
544
545 $ echo aso > a
545 $ echo aso > a
546 $ hg ci -m a4
546 $ hg ci -m a4
547 $ hg rollback
547 $ hg rollback
548 repository tip rolled back to revision 5005 (undo commit)
548 repository tip rolled back to revision 5005 (undo commit)
549 working directory now based on revision 5005
549 working directory now based on revision 5005
550 $ hg id -r .
550 $ hg id -r .
551 90d5d3ba2fc4 tip
551 90d5d3ba2fc4 tip
552
552
553 roming data with strip
553 roming data with strip
554
554
555 $ echo aso > a
555 $ echo aso > a
556 $ hg ci -m a4
556 $ hg ci -m a4
557 $ hg --config extensions.strip= strip -r . --no-backup
557 $ hg --config extensions.strip= strip -r . --no-backup
558 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
558 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
559 $ hg id -r . --traceback
559 $ hg id -r . --traceback
560 90d5d3ba2fc4 tip
560 90d5d3ba2fc4 tip
561
561
562 Test upgrade / downgrade
562 Test upgrade / downgrade
563 ========================
563 ========================
564
564
565 downgrading
565 downgrading
566
566
567 $ cat << EOF >> .hg/hgrc
567 $ cat << EOF >> .hg/hgrc
568 > [format]
568 > [format]
569 > use-persistent-nodemap=no
569 > use-persistent-nodemap=no
570 > EOF
570 > EOF
571 $ hg debugformat -v
571 $ hg debugformat -v
572 format-variant repo config default
572 format-variant repo config default
573 fncache: yes yes yes
573 fncache: yes yes yes
574 dotencode: yes yes yes
574 dotencode: yes yes yes
575 generaldelta: yes yes yes
575 generaldelta: yes yes yes
576 share-safe: no no no
576 share-safe: no no no
577 sparserevlog: yes yes yes
577 sparserevlog: yes yes yes
578 sidedata: no no no
578 sidedata: no no no
579 persistent-nodemap: yes no no
579 persistent-nodemap: yes no no
580 copies-sdc: no no no
580 copies-sdc: no no no
581 plain-cl-delta: yes yes yes
581 plain-cl-delta: yes yes yes
582 compression: zlib zlib zlib
582 compression: zlib zlib zlib
583 compression-level: default default default
583 compression-level: default default default
584 $ hg debugupgraderepo --run --no-backup --quiet
584 $ hg debugupgraderepo --run --no-backup --quiet
585 upgrade will perform the following actions:
585 upgrade will perform the following actions:
586
586
587 requirements
587 requirements
588 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
588 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
589 removed: persistent-nodemap
589 removed: persistent-nodemap
590
590
591 processed revlogs:
591 processed revlogs:
592 - all-filelogs
592 - all-filelogs
593 - changelog
593 - changelog
594 - manifest
594 - manifest
595
595
596 $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
596 $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
597 [1]
597 [1]
598 $ hg debugnodemap --metadata
598 $ hg debugnodemap --metadata
599
599
600
600
601 upgrading
601 upgrading
602
602
603 $ cat << EOF >> .hg/hgrc
603 $ cat << EOF >> .hg/hgrc
604 > [format]
604 > [format]
605 > use-persistent-nodemap=yes
605 > use-persistent-nodemap=yes
606 > EOF
606 > EOF
607 $ hg debugformat -v
607 $ hg debugformat -v
608 format-variant repo config default
608 format-variant repo config default
609 fncache: yes yes yes
609 fncache: yes yes yes
610 dotencode: yes yes yes
610 dotencode: yes yes yes
611 generaldelta: yes yes yes
611 generaldelta: yes yes yes
612 share-safe: no no no
612 share-safe: no no no
613 sparserevlog: yes yes yes
613 sparserevlog: yes yes yes
614 sidedata: no no no
614 sidedata: no no no
615 persistent-nodemap: no yes no
615 persistent-nodemap: no yes no
616 copies-sdc: no no no
616 copies-sdc: no no no
617 plain-cl-delta: yes yes yes
617 plain-cl-delta: yes yes yes
618 compression: zlib zlib zlib
618 compression: zlib zlib zlib
619 compression-level: default default default
619 compression-level: default default default
620 $ hg debugupgraderepo --run --no-backup
620 $ hg debugupgraderepo --run --no-backup
621 upgrade will perform the following actions:
621 upgrade will perform the following actions:
622
622
623 requirements
623 requirements
624 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
624 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
625 added: persistent-nodemap
625 added: persistent-nodemap
626
626
627 persistent-nodemap
627 persistent-nodemap
628 Speedup revision lookup by node id.
628 Speedup revision lookup by node id.
629
629
630 processed revlogs:
630 processed revlogs:
631 - all-filelogs
631 - all-filelogs
632 - changelog
632 - changelog
633 - manifest
633 - manifest
634
634
635 beginning upgrade...
635 beginning upgrade...
636 repository locked and read-only
636 repository locked and read-only
637 creating temporary repository to stage upgraded data: $TESTTMP/test-repo/.hg/upgrade.* (glob)
637 creating temporary repository to stage upgraded data: $TESTTMP/test-repo/.hg/upgrade.* (glob)
638 (it is safe to interrupt this process any time before data migration completes)
638 (it is safe to interrupt this process any time before data migration completes)
639 migrating 15018 total revisions (5006 in filelogs, 5006 in manifests, 5006 in changelog)
639 upgrading repository to use persistent nodemap feature
640 migrating 1.74 MB in store; 569 MB tracked data
641 migrating 5004 filelogs containing 5006 revisions (346 KB in store; 28.2 KB tracked data)
642 finished migrating 5006 filelog revisions across 5004 filelogs; change in size: 0 bytes
643 migrating 1 manifests containing 5006 revisions (765 KB in store; 569 MB tracked data)
644 finished migrating 5006 manifest revisions across 1 manifests; change in size: 0 bytes
645 migrating changelog containing 5006 revisions (673 KB in store; 363 KB tracked data)
646 finished migrating 5006 changelog revisions; change in size: 0 bytes
647 finished migrating 15018 total revisions; total change in store size: 0 bytes
648 copying phaseroots
649 data fully upgraded in a temporary repository
650 marking source repository as being upgraded; clients will be unable to read from repository
651 starting in-place swap of repository data
652 replacing store...
653 store replacement complete; repository was inconsistent for *s (glob)
654 finalizing requirements file and making repository readable again
655 removing temporary repository $TESTTMP/test-repo/.hg/upgrade.* (glob)
640 removing temporary repository $TESTTMP/test-repo/.hg/upgrade.* (glob)
656 $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
641 $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
657 00changelog-*.nd (glob)
642 00changelog-*.nd (glob)
658 00changelog.n
643 00changelog.n
659 00manifest-*.nd (glob)
660 00manifest.n
661
644
662 $ hg debugnodemap --metadata
645 $ hg debugnodemap --metadata
663 uid: * (glob)
646 uid: * (glob)
664 tip-rev: 5005
647 tip-rev: 5005
665 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
648 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
666 data-length: 121088
649 data-length: 121088
667 data-unused: 0
650 data-unused: 0
668 data-unused: 0.000%
651 data-unused: 0.000%
669
652
670 Running unrelated upgrade
653 Running unrelated upgrade
671
654
672 $ hg debugupgraderepo --run --no-backup --quiet --optimize re-delta-all
655 $ hg debugupgraderepo --run --no-backup --quiet --optimize re-delta-all
673 upgrade will perform the following actions:
656 upgrade will perform the following actions:
674
657
675 requirements
658 requirements
676 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, sparserevlog, store
659 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, sparserevlog, store
677
660
678 optimisations: re-delta-all
661 optimisations: re-delta-all
679
662
680 processed revlogs:
663 processed revlogs:
681 - all-filelogs
664 - all-filelogs
682 - changelog
665 - changelog
683 - manifest
666 - manifest
684
667
685 $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
668 $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
686 00changelog-*.nd (glob)
669 00changelog-*.nd (glob)
687 00changelog.n
670 00changelog.n
688 00manifest-*.nd (glob)
671 00manifest-*.nd (glob)
689 00manifest.n
672 00manifest.n
690
673
691 $ hg debugnodemap --metadata
674 $ hg debugnodemap --metadata
692 uid: * (glob)
675 uid: * (glob)
693 tip-rev: 5005
676 tip-rev: 5005
694 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
677 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
695 data-length: 121088
678 data-length: 121088
696 data-unused: 0
679 data-unused: 0
697 data-unused: 0.000%
680 data-unused: 0.000%
698
681
699 Persistent nodemap and local/streaming clone
682 Persistent nodemap and local/streaming clone
700 ============================================
683 ============================================
701
684
702 $ cd ..
685 $ cd ..
703
686
704 standard clone
687 standard clone
705 --------------
688 --------------
706
689
707 The persistent nodemap should exist after a streaming clone
690 The persistent nodemap should exist after a streaming clone
708
691
709 $ hg clone --pull --quiet -U test-repo standard-clone
692 $ hg clone --pull --quiet -U test-repo standard-clone
710 $ ls -1 standard-clone/.hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
693 $ ls -1 standard-clone/.hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
711 00changelog-*.nd (glob)
694 00changelog-*.nd (glob)
712 00changelog.n
695 00changelog.n
713 00manifest-*.nd (glob)
696 00manifest-*.nd (glob)
714 00manifest.n
697 00manifest.n
715 $ hg -R standard-clone debugnodemap --metadata
698 $ hg -R standard-clone debugnodemap --metadata
716 uid: * (glob)
699 uid: * (glob)
717 tip-rev: 5005
700 tip-rev: 5005
718 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
701 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
719 data-length: 121088
702 data-length: 121088
720 data-unused: 0
703 data-unused: 0
721 data-unused: 0.000%
704 data-unused: 0.000%
722
705
723
706
724 local clone
707 local clone
725 ------------
708 ------------
726
709
727 The persistent nodemap should exist after a streaming clone
710 The persistent nodemap should exist after a streaming clone
728
711
729 $ hg clone -U test-repo local-clone
712 $ hg clone -U test-repo local-clone
730 $ ls -1 local-clone/.hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
713 $ ls -1 local-clone/.hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
731 00changelog-*.nd (glob)
714 00changelog-*.nd (glob)
732 00changelog.n
715 00changelog.n
733 00manifest-*.nd (glob)
716 00manifest-*.nd (glob)
734 00manifest.n
717 00manifest.n
735 $ hg -R local-clone debugnodemap --metadata
718 $ hg -R local-clone debugnodemap --metadata
736 uid: * (glob)
719 uid: * (glob)
737 tip-rev: 5005
720 tip-rev: 5005
738 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
721 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
739 data-length: 121088
722 data-length: 121088
740 data-unused: 0
723 data-unused: 0
741 data-unused: 0.000%
724 data-unused: 0.000%
742
725
743 stream clone
726 stream clone
744 ------------
727 ------------
745
728
746 The persistent nodemap should exist after a streaming clone
729 The persistent nodemap should exist after a streaming clone
747
730
748 $ hg clone -U --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/test-repo stream-clone --debug | egrep '00(changelog|manifest)'
731 $ hg clone -U --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/test-repo stream-clone --debug | egrep '00(changelog|manifest)'
749 adding [s] 00manifest.n (70 bytes)
732 adding [s] 00manifest.n (70 bytes)
750 adding [s] 00manifest.i (313 KB)
733 adding [s] 00manifest.i (313 KB)
751 adding [s] 00manifest.d (452 KB)
734 adding [s] 00manifest.d (452 KB)
752 adding [s] 00manifest-*.nd (118 KB) (glob)
735 adding [s] 00manifest-*.nd (118 KB) (glob)
753 adding [s] 00changelog.n (70 bytes)
736 adding [s] 00changelog.n (70 bytes)
754 adding [s] 00changelog.i (313 KB)
737 adding [s] 00changelog.i (313 KB)
755 adding [s] 00changelog.d (360 KB)
738 adding [s] 00changelog.d (360 KB)
756 adding [s] 00changelog-*.nd (118 KB) (glob)
739 adding [s] 00changelog-*.nd (118 KB) (glob)
757 $ ls -1 stream-clone/.hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
740 $ ls -1 stream-clone/.hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
758 00changelog-*.nd (glob)
741 00changelog-*.nd (glob)
759 00changelog.n
742 00changelog.n
760 00manifest-*.nd (glob)
743 00manifest-*.nd (glob)
761 00manifest.n
744 00manifest.n
762 $ hg -R stream-clone debugnodemap --metadata
745 $ hg -R stream-clone debugnodemap --metadata
763 uid: * (glob)
746 uid: * (glob)
764 tip-rev: 5005
747 tip-rev: 5005
765 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
748 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
766 data-length: 121088
749 data-length: 121088
767 data-unused: 0
750 data-unused: 0
768 data-unused: 0.000%
751 data-unused: 0.000%
General Comments 0
You need to be logged in to leave comments. Login now