##// END OF EJS Templates
obsutil: move 'exclusivemarkers' to the new modules...
marmoute -
r33144:d09ae850 default
parent child Browse files
Show More
@@ -1,1267 +1,1147
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "precursor" and possible
23 The old obsoleted changeset is called a "precursor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a precursor are called "successor markers of X" because they hold
25 a precursor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "precursor markers of Y" because they hold
27 a successors are call "precursor markers of Y" because they hold
28 information about the precursors of Y.
28 information about the precursors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker is used:
45 - When changeset A is split into B and C, a single marker is used:
46
46
47 (A, (B, C))
47 (A, (B, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 from __future__ import absolute_import
70 from __future__ import absolute_import
71
71
72 import errno
72 import errno
73 import struct
73 import struct
74
74
75 from .i18n import _
75 from .i18n import _
76 from . import (
76 from . import (
77 error,
77 error,
78 node,
78 node,
79 obsutil,
79 obsutil,
80 phases,
80 phases,
81 policy,
81 policy,
82 util,
82 util,
83 )
83 )
84
84
85 parsers = policy.importmod(r'parsers')
85 parsers = policy.importmod(r'parsers')
86
86
87 _pack = struct.pack
87 _pack = struct.pack
88 _unpack = struct.unpack
88 _unpack = struct.unpack
89 _calcsize = struct.calcsize
89 _calcsize = struct.calcsize
90 propertycache = util.propertycache
90 propertycache = util.propertycache
91
91
92 # the obsolete feature is not mature enough to be enabled by default.
92 # the obsolete feature is not mature enough to be enabled by default.
93 # you have to rely on third party extension extension to enable this.
93 # you have to rely on third party extension extension to enable this.
94 _enabled = False
94 _enabled = False
95
95
96 # Options for obsolescence
96 # Options for obsolescence
97 createmarkersopt = 'createmarkers'
97 createmarkersopt = 'createmarkers'
98 allowunstableopt = 'allowunstable'
98 allowunstableopt = 'allowunstable'
99 exchangeopt = 'exchange'
99 exchangeopt = 'exchange'
100
100
101 def isenabled(repo, option):
101 def isenabled(repo, option):
102 """Returns True if the given repository has the given obsolete option
102 """Returns True if the given repository has the given obsolete option
103 enabled.
103 enabled.
104 """
104 """
105 result = set(repo.ui.configlist('experimental', 'evolution'))
105 result = set(repo.ui.configlist('experimental', 'evolution'))
106 if 'all' in result:
106 if 'all' in result:
107 return True
107 return True
108
108
109 # For migration purposes, temporarily return true if the config hasn't been
109 # For migration purposes, temporarily return true if the config hasn't been
110 # set but _enabled is true.
110 # set but _enabled is true.
111 if len(result) == 0 and _enabled:
111 if len(result) == 0 and _enabled:
112 return True
112 return True
113
113
114 # createmarkers must be enabled if other options are enabled
114 # createmarkers must be enabled if other options are enabled
115 if ((allowunstableopt in result or exchangeopt in result) and
115 if ((allowunstableopt in result or exchangeopt in result) and
116 not createmarkersopt in result):
116 not createmarkersopt in result):
117 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
117 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
118 "if other obsolete options are enabled"))
118 "if other obsolete options are enabled"))
119
119
120 return option in result
120 return option in result
121
121
122 ### obsolescence marker flag
122 ### obsolescence marker flag
123
123
124 ## bumpedfix flag
124 ## bumpedfix flag
125 #
125 #
126 # When a changeset A' succeed to a changeset A which became public, we call A'
126 # When a changeset A' succeed to a changeset A which became public, we call A'
127 # "bumped" because it's a successors of a public changesets
127 # "bumped" because it's a successors of a public changesets
128 #
128 #
129 # o A' (bumped)
129 # o A' (bumped)
130 # |`:
130 # |`:
131 # | o A
131 # | o A
132 # |/
132 # |/
133 # o Z
133 # o Z
134 #
134 #
135 # The way to solve this situation is to create a new changeset Ad as children
135 # The way to solve this situation is to create a new changeset Ad as children
136 # of A. This changeset have the same content than A'. So the diff from A to A'
136 # of A. This changeset have the same content than A'. So the diff from A to A'
137 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
137 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
138 #
138 #
139 # o Ad
139 # o Ad
140 # |`:
140 # |`:
141 # | x A'
141 # | x A'
142 # |'|
142 # |'|
143 # o | A
143 # o | A
144 # |/
144 # |/
145 # o Z
145 # o Z
146 #
146 #
147 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
147 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
148 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
148 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
149 # This flag mean that the successors express the changes between the public and
149 # This flag mean that the successors express the changes between the public and
150 # bumped version and fix the situation, breaking the transitivity of
150 # bumped version and fix the situation, breaking the transitivity of
151 # "bumped" here.
151 # "bumped" here.
152 bumpedfix = 1
152 bumpedfix = 1
153 usingsha256 = 2
153 usingsha256 = 2
154
154
155 ## Parsing and writing of version "0"
155 ## Parsing and writing of version "0"
156 #
156 #
157 # The header is followed by the markers. Each marker is made of:
157 # The header is followed by the markers. Each marker is made of:
158 #
158 #
159 # - 1 uint8 : number of new changesets "N", can be zero.
159 # - 1 uint8 : number of new changesets "N", can be zero.
160 #
160 #
161 # - 1 uint32: metadata size "M" in bytes.
161 # - 1 uint32: metadata size "M" in bytes.
162 #
162 #
163 # - 1 byte: a bit field. It is reserved for flags used in common
163 # - 1 byte: a bit field. It is reserved for flags used in common
164 # obsolete marker operations, to avoid repeated decoding of metadata
164 # obsolete marker operations, to avoid repeated decoding of metadata
165 # entries.
165 # entries.
166 #
166 #
167 # - 20 bytes: obsoleted changeset identifier.
167 # - 20 bytes: obsoleted changeset identifier.
168 #
168 #
169 # - N*20 bytes: new changesets identifiers.
169 # - N*20 bytes: new changesets identifiers.
170 #
170 #
171 # - M bytes: metadata as a sequence of nul-terminated strings. Each
171 # - M bytes: metadata as a sequence of nul-terminated strings. Each
172 # string contains a key and a value, separated by a colon ':', without
172 # string contains a key and a value, separated by a colon ':', without
173 # additional encoding. Keys cannot contain '\0' or ':' and values
173 # additional encoding. Keys cannot contain '\0' or ':' and values
174 # cannot contain '\0'.
174 # cannot contain '\0'.
175 _fm0version = 0
175 _fm0version = 0
176 _fm0fixed = '>BIB20s'
176 _fm0fixed = '>BIB20s'
177 _fm0node = '20s'
177 _fm0node = '20s'
178 _fm0fsize = _calcsize(_fm0fixed)
178 _fm0fsize = _calcsize(_fm0fixed)
179 _fm0fnodesize = _calcsize(_fm0node)
179 _fm0fnodesize = _calcsize(_fm0node)
180
180
181 def _fm0readmarkers(data, off):
181 def _fm0readmarkers(data, off):
182 # Loop on markers
182 # Loop on markers
183 l = len(data)
183 l = len(data)
184 while off + _fm0fsize <= l:
184 while off + _fm0fsize <= l:
185 # read fixed part
185 # read fixed part
186 cur = data[off:off + _fm0fsize]
186 cur = data[off:off + _fm0fsize]
187 off += _fm0fsize
187 off += _fm0fsize
188 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
188 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
189 # read replacement
189 # read replacement
190 sucs = ()
190 sucs = ()
191 if numsuc:
191 if numsuc:
192 s = (_fm0fnodesize * numsuc)
192 s = (_fm0fnodesize * numsuc)
193 cur = data[off:off + s]
193 cur = data[off:off + s]
194 sucs = _unpack(_fm0node * numsuc, cur)
194 sucs = _unpack(_fm0node * numsuc, cur)
195 off += s
195 off += s
196 # read metadata
196 # read metadata
197 # (metadata will be decoded on demand)
197 # (metadata will be decoded on demand)
198 metadata = data[off:off + mdsize]
198 metadata = data[off:off + mdsize]
199 if len(metadata) != mdsize:
199 if len(metadata) != mdsize:
200 raise error.Abort(_('parsing obsolete marker: metadata is too '
200 raise error.Abort(_('parsing obsolete marker: metadata is too '
201 'short, %d bytes expected, got %d')
201 'short, %d bytes expected, got %d')
202 % (mdsize, len(metadata)))
202 % (mdsize, len(metadata)))
203 off += mdsize
203 off += mdsize
204 metadata = _fm0decodemeta(metadata)
204 metadata = _fm0decodemeta(metadata)
205 try:
205 try:
206 when, offset = metadata.pop('date', '0 0').split(' ')
206 when, offset = metadata.pop('date', '0 0').split(' ')
207 date = float(when), int(offset)
207 date = float(when), int(offset)
208 except ValueError:
208 except ValueError:
209 date = (0., 0)
209 date = (0., 0)
210 parents = None
210 parents = None
211 if 'p2' in metadata:
211 if 'p2' in metadata:
212 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
212 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
213 elif 'p1' in metadata:
213 elif 'p1' in metadata:
214 parents = (metadata.pop('p1', None),)
214 parents = (metadata.pop('p1', None),)
215 elif 'p0' in metadata:
215 elif 'p0' in metadata:
216 parents = ()
216 parents = ()
217 if parents is not None:
217 if parents is not None:
218 try:
218 try:
219 parents = tuple(node.bin(p) for p in parents)
219 parents = tuple(node.bin(p) for p in parents)
220 # if parent content is not a nodeid, drop the data
220 # if parent content is not a nodeid, drop the data
221 for p in parents:
221 for p in parents:
222 if len(p) != 20:
222 if len(p) != 20:
223 parents = None
223 parents = None
224 break
224 break
225 except TypeError:
225 except TypeError:
226 # if content cannot be translated to nodeid drop the data.
226 # if content cannot be translated to nodeid drop the data.
227 parents = None
227 parents = None
228
228
229 metadata = tuple(sorted(metadata.iteritems()))
229 metadata = tuple(sorted(metadata.iteritems()))
230
230
231 yield (pre, sucs, flags, metadata, date, parents)
231 yield (pre, sucs, flags, metadata, date, parents)
232
232
233 def _fm0encodeonemarker(marker):
233 def _fm0encodeonemarker(marker):
234 pre, sucs, flags, metadata, date, parents = marker
234 pre, sucs, flags, metadata, date, parents = marker
235 if flags & usingsha256:
235 if flags & usingsha256:
236 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
236 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
237 metadata = dict(metadata)
237 metadata = dict(metadata)
238 time, tz = date
238 time, tz = date
239 metadata['date'] = '%r %i' % (time, tz)
239 metadata['date'] = '%r %i' % (time, tz)
240 if parents is not None:
240 if parents is not None:
241 if not parents:
241 if not parents:
242 # mark that we explicitly recorded no parents
242 # mark that we explicitly recorded no parents
243 metadata['p0'] = ''
243 metadata['p0'] = ''
244 for i, p in enumerate(parents, 1):
244 for i, p in enumerate(parents, 1):
245 metadata['p%i' % i] = node.hex(p)
245 metadata['p%i' % i] = node.hex(p)
246 metadata = _fm0encodemeta(metadata)
246 metadata = _fm0encodemeta(metadata)
247 numsuc = len(sucs)
247 numsuc = len(sucs)
248 format = _fm0fixed + (_fm0node * numsuc)
248 format = _fm0fixed + (_fm0node * numsuc)
249 data = [numsuc, len(metadata), flags, pre]
249 data = [numsuc, len(metadata), flags, pre]
250 data.extend(sucs)
250 data.extend(sucs)
251 return _pack(format, *data) + metadata
251 return _pack(format, *data) + metadata
252
252
253 def _fm0encodemeta(meta):
253 def _fm0encodemeta(meta):
254 """Return encoded metadata string to string mapping.
254 """Return encoded metadata string to string mapping.
255
255
256 Assume no ':' in key and no '\0' in both key and value."""
256 Assume no ':' in key and no '\0' in both key and value."""
257 for key, value in meta.iteritems():
257 for key, value in meta.iteritems():
258 if ':' in key or '\0' in key:
258 if ':' in key or '\0' in key:
259 raise ValueError("':' and '\0' are forbidden in metadata key'")
259 raise ValueError("':' and '\0' are forbidden in metadata key'")
260 if '\0' in value:
260 if '\0' in value:
261 raise ValueError("':' is forbidden in metadata value'")
261 raise ValueError("':' is forbidden in metadata value'")
262 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
262 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
263
263
264 def _fm0decodemeta(data):
264 def _fm0decodemeta(data):
265 """Return string to string dictionary from encoded version."""
265 """Return string to string dictionary from encoded version."""
266 d = {}
266 d = {}
267 for l in data.split('\0'):
267 for l in data.split('\0'):
268 if l:
268 if l:
269 key, value = l.split(':')
269 key, value = l.split(':')
270 d[key] = value
270 d[key] = value
271 return d
271 return d
272
272
273 ## Parsing and writing of version "1"
273 ## Parsing and writing of version "1"
274 #
274 #
275 # The header is followed by the markers. Each marker is made of:
275 # The header is followed by the markers. Each marker is made of:
276 #
276 #
277 # - uint32: total size of the marker (including this field)
277 # - uint32: total size of the marker (including this field)
278 #
278 #
279 # - float64: date in seconds since epoch
279 # - float64: date in seconds since epoch
280 #
280 #
281 # - int16: timezone offset in minutes
281 # - int16: timezone offset in minutes
282 #
282 #
283 # - uint16: a bit field. It is reserved for flags used in common
283 # - uint16: a bit field. It is reserved for flags used in common
284 # obsolete marker operations, to avoid repeated decoding of metadata
284 # obsolete marker operations, to avoid repeated decoding of metadata
285 # entries.
285 # entries.
286 #
286 #
287 # - uint8: number of successors "N", can be zero.
287 # - uint8: number of successors "N", can be zero.
288 #
288 #
289 # - uint8: number of parents "P", can be zero.
289 # - uint8: number of parents "P", can be zero.
290 #
290 #
291 # 0: parents data stored but no parent,
291 # 0: parents data stored but no parent,
292 # 1: one parent stored,
292 # 1: one parent stored,
293 # 2: two parents stored,
293 # 2: two parents stored,
294 # 3: no parent data stored
294 # 3: no parent data stored
295 #
295 #
296 # - uint8: number of metadata entries M
296 # - uint8: number of metadata entries M
297 #
297 #
298 # - 20 or 32 bytes: precursor changeset identifier.
298 # - 20 or 32 bytes: precursor changeset identifier.
299 #
299 #
300 # - N*(20 or 32) bytes: successors changesets identifiers.
300 # - N*(20 or 32) bytes: successors changesets identifiers.
301 #
301 #
302 # - P*(20 or 32) bytes: parents of the precursors changesets.
302 # - P*(20 or 32) bytes: parents of the precursors changesets.
303 #
303 #
304 # - M*(uint8, uint8): size of all metadata entries (key and value)
304 # - M*(uint8, uint8): size of all metadata entries (key and value)
305 #
305 #
306 # - remaining bytes: the metadata, each (key, value) pair after the other.
306 # - remaining bytes: the metadata, each (key, value) pair after the other.
307 _fm1version = 1
307 _fm1version = 1
308 _fm1fixed = '>IdhHBBB20s'
308 _fm1fixed = '>IdhHBBB20s'
309 _fm1nodesha1 = '20s'
309 _fm1nodesha1 = '20s'
310 _fm1nodesha256 = '32s'
310 _fm1nodesha256 = '32s'
311 _fm1nodesha1size = _calcsize(_fm1nodesha1)
311 _fm1nodesha1size = _calcsize(_fm1nodesha1)
312 _fm1nodesha256size = _calcsize(_fm1nodesha256)
312 _fm1nodesha256size = _calcsize(_fm1nodesha256)
313 _fm1fsize = _calcsize(_fm1fixed)
313 _fm1fsize = _calcsize(_fm1fixed)
314 _fm1parentnone = 3
314 _fm1parentnone = 3
315 _fm1parentshift = 14
315 _fm1parentshift = 14
316 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
316 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
317 _fm1metapair = 'BB'
317 _fm1metapair = 'BB'
318 _fm1metapairsize = _calcsize('BB')
318 _fm1metapairsize = _calcsize('BB')
319
319
320 def _fm1purereadmarkers(data, off):
320 def _fm1purereadmarkers(data, off):
321 # make some global constants local for performance
321 # make some global constants local for performance
322 noneflag = _fm1parentnone
322 noneflag = _fm1parentnone
323 sha2flag = usingsha256
323 sha2flag = usingsha256
324 sha1size = _fm1nodesha1size
324 sha1size = _fm1nodesha1size
325 sha2size = _fm1nodesha256size
325 sha2size = _fm1nodesha256size
326 sha1fmt = _fm1nodesha1
326 sha1fmt = _fm1nodesha1
327 sha2fmt = _fm1nodesha256
327 sha2fmt = _fm1nodesha256
328 metasize = _fm1metapairsize
328 metasize = _fm1metapairsize
329 metafmt = _fm1metapair
329 metafmt = _fm1metapair
330 fsize = _fm1fsize
330 fsize = _fm1fsize
331 unpack = _unpack
331 unpack = _unpack
332
332
333 # Loop on markers
333 # Loop on markers
334 stop = len(data) - _fm1fsize
334 stop = len(data) - _fm1fsize
335 ufixed = struct.Struct(_fm1fixed).unpack
335 ufixed = struct.Struct(_fm1fixed).unpack
336
336
337 while off <= stop:
337 while off <= stop:
338 # read fixed part
338 # read fixed part
339 o1 = off + fsize
339 o1 = off + fsize
340 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
340 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
341
341
342 if flags & sha2flag:
342 if flags & sha2flag:
343 # FIXME: prec was read as a SHA1, needs to be amended
343 # FIXME: prec was read as a SHA1, needs to be amended
344
344
345 # read 0 or more successors
345 # read 0 or more successors
346 if numsuc == 1:
346 if numsuc == 1:
347 o2 = o1 + sha2size
347 o2 = o1 + sha2size
348 sucs = (data[o1:o2],)
348 sucs = (data[o1:o2],)
349 else:
349 else:
350 o2 = o1 + sha2size * numsuc
350 o2 = o1 + sha2size * numsuc
351 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
351 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
352
352
353 # read parents
353 # read parents
354 if numpar == noneflag:
354 if numpar == noneflag:
355 o3 = o2
355 o3 = o2
356 parents = None
356 parents = None
357 elif numpar == 1:
357 elif numpar == 1:
358 o3 = o2 + sha2size
358 o3 = o2 + sha2size
359 parents = (data[o2:o3],)
359 parents = (data[o2:o3],)
360 else:
360 else:
361 o3 = o2 + sha2size * numpar
361 o3 = o2 + sha2size * numpar
362 parents = unpack(sha2fmt * numpar, data[o2:o3])
362 parents = unpack(sha2fmt * numpar, data[o2:o3])
363 else:
363 else:
364 # read 0 or more successors
364 # read 0 or more successors
365 if numsuc == 1:
365 if numsuc == 1:
366 o2 = o1 + sha1size
366 o2 = o1 + sha1size
367 sucs = (data[o1:o2],)
367 sucs = (data[o1:o2],)
368 else:
368 else:
369 o2 = o1 + sha1size * numsuc
369 o2 = o1 + sha1size * numsuc
370 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
370 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
371
371
372 # read parents
372 # read parents
373 if numpar == noneflag:
373 if numpar == noneflag:
374 o3 = o2
374 o3 = o2
375 parents = None
375 parents = None
376 elif numpar == 1:
376 elif numpar == 1:
377 o3 = o2 + sha1size
377 o3 = o2 + sha1size
378 parents = (data[o2:o3],)
378 parents = (data[o2:o3],)
379 else:
379 else:
380 o3 = o2 + sha1size * numpar
380 o3 = o2 + sha1size * numpar
381 parents = unpack(sha1fmt * numpar, data[o2:o3])
381 parents = unpack(sha1fmt * numpar, data[o2:o3])
382
382
383 # read metadata
383 # read metadata
384 off = o3 + metasize * nummeta
384 off = o3 + metasize * nummeta
385 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
385 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
386 metadata = []
386 metadata = []
387 for idx in xrange(0, len(metapairsize), 2):
387 for idx in xrange(0, len(metapairsize), 2):
388 o1 = off + metapairsize[idx]
388 o1 = off + metapairsize[idx]
389 o2 = o1 + metapairsize[idx + 1]
389 o2 = o1 + metapairsize[idx + 1]
390 metadata.append((data[off:o1], data[o1:o2]))
390 metadata.append((data[off:o1], data[o1:o2]))
391 off = o2
391 off = o2
392
392
393 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
393 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
394
394
395 def _fm1encodeonemarker(marker):
395 def _fm1encodeonemarker(marker):
396 pre, sucs, flags, metadata, date, parents = marker
396 pre, sucs, flags, metadata, date, parents = marker
397 # determine node size
397 # determine node size
398 _fm1node = _fm1nodesha1
398 _fm1node = _fm1nodesha1
399 if flags & usingsha256:
399 if flags & usingsha256:
400 _fm1node = _fm1nodesha256
400 _fm1node = _fm1nodesha256
401 numsuc = len(sucs)
401 numsuc = len(sucs)
402 numextranodes = numsuc
402 numextranodes = numsuc
403 if parents is None:
403 if parents is None:
404 numpar = _fm1parentnone
404 numpar = _fm1parentnone
405 else:
405 else:
406 numpar = len(parents)
406 numpar = len(parents)
407 numextranodes += numpar
407 numextranodes += numpar
408 formatnodes = _fm1node * numextranodes
408 formatnodes = _fm1node * numextranodes
409 formatmeta = _fm1metapair * len(metadata)
409 formatmeta = _fm1metapair * len(metadata)
410 format = _fm1fixed + formatnodes + formatmeta
410 format = _fm1fixed + formatnodes + formatmeta
411 # tz is stored in minutes so we divide by 60
411 # tz is stored in minutes so we divide by 60
412 tz = date[1]//60
412 tz = date[1]//60
413 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
413 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
414 data.extend(sucs)
414 data.extend(sucs)
415 if parents is not None:
415 if parents is not None:
416 data.extend(parents)
416 data.extend(parents)
417 totalsize = _calcsize(format)
417 totalsize = _calcsize(format)
418 for key, value in metadata:
418 for key, value in metadata:
419 lk = len(key)
419 lk = len(key)
420 lv = len(value)
420 lv = len(value)
421 data.append(lk)
421 data.append(lk)
422 data.append(lv)
422 data.append(lv)
423 totalsize += lk + lv
423 totalsize += lk + lv
424 data[0] = totalsize
424 data[0] = totalsize
425 data = [_pack(format, *data)]
425 data = [_pack(format, *data)]
426 for key, value in metadata:
426 for key, value in metadata:
427 data.append(key)
427 data.append(key)
428 data.append(value)
428 data.append(value)
429 return ''.join(data)
429 return ''.join(data)
430
430
431 def _fm1readmarkers(data, off):
431 def _fm1readmarkers(data, off):
432 native = getattr(parsers, 'fm1readmarkers', None)
432 native = getattr(parsers, 'fm1readmarkers', None)
433 if not native:
433 if not native:
434 return _fm1purereadmarkers(data, off)
434 return _fm1purereadmarkers(data, off)
435 stop = len(data) - _fm1fsize
435 stop = len(data) - _fm1fsize
436 return native(data, off, stop)
436 return native(data, off, stop)
437
437
438 # mapping to read/write various marker formats
438 # mapping to read/write various marker formats
439 # <version> -> (decoder, encoder)
439 # <version> -> (decoder, encoder)
440 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
440 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
441 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
441 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
442
442
443 def _readmarkerversion(data):
443 def _readmarkerversion(data):
444 return _unpack('>B', data[0:1])[0]
444 return _unpack('>B', data[0:1])[0]
445
445
446 @util.nogc
446 @util.nogc
447 def _readmarkers(data):
447 def _readmarkers(data):
448 """Read and enumerate markers from raw data"""
448 """Read and enumerate markers from raw data"""
449 diskversion = _readmarkerversion(data)
449 diskversion = _readmarkerversion(data)
450 off = 1
450 off = 1
451 if diskversion not in formats:
451 if diskversion not in formats:
452 msg = _('parsing obsolete marker: unknown version %r') % diskversion
452 msg = _('parsing obsolete marker: unknown version %r') % diskversion
453 raise error.UnknownVersion(msg, version=diskversion)
453 raise error.UnknownVersion(msg, version=diskversion)
454 return diskversion, formats[diskversion][0](data, off)
454 return diskversion, formats[diskversion][0](data, off)
455
455
456 def encodeheader(version=_fm0version):
456 def encodeheader(version=_fm0version):
457 return _pack('>B', version)
457 return _pack('>B', version)
458
458
459 def encodemarkers(markers, addheader=False, version=_fm0version):
459 def encodemarkers(markers, addheader=False, version=_fm0version):
460 # Kept separate from flushmarkers(), it will be reused for
460 # Kept separate from flushmarkers(), it will be reused for
461 # markers exchange.
461 # markers exchange.
462 encodeone = formats[version][1]
462 encodeone = formats[version][1]
463 if addheader:
463 if addheader:
464 yield encodeheader(version)
464 yield encodeheader(version)
465 for marker in markers:
465 for marker in markers:
466 yield encodeone(marker)
466 yield encodeone(marker)
467
467
468
468
469 class marker(object):
469 class marker(object):
470 """Wrap obsolete marker raw data"""
470 """Wrap obsolete marker raw data"""
471
471
472 def __init__(self, repo, data):
472 def __init__(self, repo, data):
473 # the repo argument will be used to create changectx in later version
473 # the repo argument will be used to create changectx in later version
474 self._repo = repo
474 self._repo = repo
475 self._data = data
475 self._data = data
476 self._decodedmeta = None
476 self._decodedmeta = None
477
477
478 def __hash__(self):
478 def __hash__(self):
479 return hash(self._data)
479 return hash(self._data)
480
480
481 def __eq__(self, other):
481 def __eq__(self, other):
482 if type(other) != type(self):
482 if type(other) != type(self):
483 return False
483 return False
484 return self._data == other._data
484 return self._data == other._data
485
485
486 def precnode(self):
486 def precnode(self):
487 """Precursor changeset node identifier"""
487 """Precursor changeset node identifier"""
488 return self._data[0]
488 return self._data[0]
489
489
490 def succnodes(self):
490 def succnodes(self):
491 """List of successor changesets node identifiers"""
491 """List of successor changesets node identifiers"""
492 return self._data[1]
492 return self._data[1]
493
493
494 def parentnodes(self):
494 def parentnodes(self):
495 """Parents of the precursors (None if not recorded)"""
495 """Parents of the precursors (None if not recorded)"""
496 return self._data[5]
496 return self._data[5]
497
497
498 def metadata(self):
498 def metadata(self):
499 """Decoded metadata dictionary"""
499 """Decoded metadata dictionary"""
500 return dict(self._data[3])
500 return dict(self._data[3])
501
501
502 def date(self):
502 def date(self):
503 """Creation date as (unixtime, offset)"""
503 """Creation date as (unixtime, offset)"""
504 return self._data[4]
504 return self._data[4]
505
505
506 def flags(self):
506 def flags(self):
507 """The flags field of the marker"""
507 """The flags field of the marker"""
508 return self._data[2]
508 return self._data[2]
509
509
510 @util.nogc
510 @util.nogc
511 def _addsuccessors(successors, markers):
511 def _addsuccessors(successors, markers):
512 for mark in markers:
512 for mark in markers:
513 successors.setdefault(mark[0], set()).add(mark)
513 successors.setdefault(mark[0], set()).add(mark)
514
514
515 @util.nogc
515 @util.nogc
516 def _addprecursors(precursors, markers):
516 def _addprecursors(precursors, markers):
517 for mark in markers:
517 for mark in markers:
518 for suc in mark[1]:
518 for suc in mark[1]:
519 precursors.setdefault(suc, set()).add(mark)
519 precursors.setdefault(suc, set()).add(mark)
520
520
521 @util.nogc
521 @util.nogc
522 def _addchildren(children, markers):
522 def _addchildren(children, markers):
523 for mark in markers:
523 for mark in markers:
524 parents = mark[5]
524 parents = mark[5]
525 if parents is not None:
525 if parents is not None:
526 for p in parents:
526 for p in parents:
527 children.setdefault(p, set()).add(mark)
527 children.setdefault(p, set()).add(mark)
528
528
529 def _checkinvalidmarkers(markers):
529 def _checkinvalidmarkers(markers):
530 """search for marker with invalid data and raise error if needed
530 """search for marker with invalid data and raise error if needed
531
531
532 Exist as a separated function to allow the evolve extension for a more
532 Exist as a separated function to allow the evolve extension for a more
533 subtle handling.
533 subtle handling.
534 """
534 """
535 for mark in markers:
535 for mark in markers:
536 if node.nullid in mark[1]:
536 if node.nullid in mark[1]:
537 raise error.Abort(_('bad obsolescence marker detected: '
537 raise error.Abort(_('bad obsolescence marker detected: '
538 'invalid successors nullid'))
538 'invalid successors nullid'))
539
539
540 class obsstore(object):
540 class obsstore(object):
541 """Store obsolete markers
541 """Store obsolete markers
542
542
543 Markers can be accessed with two mappings:
543 Markers can be accessed with two mappings:
544 - precursors[x] -> set(markers on precursors edges of x)
544 - precursors[x] -> set(markers on precursors edges of x)
545 - successors[x] -> set(markers on successors edges of x)
545 - successors[x] -> set(markers on successors edges of x)
546 - children[x] -> set(markers on precursors edges of children(x)
546 - children[x] -> set(markers on precursors edges of children(x)
547 """
547 """
548
548
549 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
549 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
550 # prec: nodeid, precursor changesets
550 # prec: nodeid, precursor changesets
551 # succs: tuple of nodeid, successor changesets (0-N length)
551 # succs: tuple of nodeid, successor changesets (0-N length)
552 # flag: integer, flag field carrying modifier for the markers (see doc)
552 # flag: integer, flag field carrying modifier for the markers (see doc)
553 # meta: binary blob, encoded metadata dictionary
553 # meta: binary blob, encoded metadata dictionary
554 # date: (float, int) tuple, date of marker creation
554 # date: (float, int) tuple, date of marker creation
555 # parents: (tuple of nodeid) or None, parents of precursors
555 # parents: (tuple of nodeid) or None, parents of precursors
556 # None is used when no data has been recorded
556 # None is used when no data has been recorded
557
557
558 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
558 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
559 # caches for various obsolescence related cache
559 # caches for various obsolescence related cache
560 self.caches = {}
560 self.caches = {}
561 self.svfs = svfs
561 self.svfs = svfs
562 self._defaultformat = defaultformat
562 self._defaultformat = defaultformat
563 self._readonly = readonly
563 self._readonly = readonly
564
564
565 def __iter__(self):
565 def __iter__(self):
566 return iter(self._all)
566 return iter(self._all)
567
567
568 def __len__(self):
568 def __len__(self):
569 return len(self._all)
569 return len(self._all)
570
570
571 def __nonzero__(self):
571 def __nonzero__(self):
572 if not self._cached('_all'):
572 if not self._cached('_all'):
573 try:
573 try:
574 return self.svfs.stat('obsstore').st_size > 1
574 return self.svfs.stat('obsstore').st_size > 1
575 except OSError as inst:
575 except OSError as inst:
576 if inst.errno != errno.ENOENT:
576 if inst.errno != errno.ENOENT:
577 raise
577 raise
578 # just build an empty _all list if no obsstore exists, which
578 # just build an empty _all list if no obsstore exists, which
579 # avoids further stat() syscalls
579 # avoids further stat() syscalls
580 pass
580 pass
581 return bool(self._all)
581 return bool(self._all)
582
582
583 __bool__ = __nonzero__
583 __bool__ = __nonzero__
584
584
585 @property
585 @property
586 def readonly(self):
586 def readonly(self):
587 """True if marker creation is disabled
587 """True if marker creation is disabled
588
588
589 Remove me in the future when obsolete marker is always on."""
589 Remove me in the future when obsolete marker is always on."""
590 return self._readonly
590 return self._readonly
591
591
592 def create(self, transaction, prec, succs=(), flag=0, parents=None,
592 def create(self, transaction, prec, succs=(), flag=0, parents=None,
593 date=None, metadata=None, ui=None):
593 date=None, metadata=None, ui=None):
594 """obsolete: add a new obsolete marker
594 """obsolete: add a new obsolete marker
595
595
596 * ensuring it is hashable
596 * ensuring it is hashable
597 * check mandatory metadata
597 * check mandatory metadata
598 * encode metadata
598 * encode metadata
599
599
600 If you are a human writing code creating marker you want to use the
600 If you are a human writing code creating marker you want to use the
601 `createmarkers` function in this module instead.
601 `createmarkers` function in this module instead.
602
602
603 return True if a new marker have been added, False if the markers
603 return True if a new marker have been added, False if the markers
604 already existed (no op).
604 already existed (no op).
605 """
605 """
606 if metadata is None:
606 if metadata is None:
607 metadata = {}
607 metadata = {}
608 if date is None:
608 if date is None:
609 if 'date' in metadata:
609 if 'date' in metadata:
610 # as a courtesy for out-of-tree extensions
610 # as a courtesy for out-of-tree extensions
611 date = util.parsedate(metadata.pop('date'))
611 date = util.parsedate(metadata.pop('date'))
612 elif ui is not None:
612 elif ui is not None:
613 date = ui.configdate('devel', 'default-date')
613 date = ui.configdate('devel', 'default-date')
614 if date is None:
614 if date is None:
615 date = util.makedate()
615 date = util.makedate()
616 else:
616 else:
617 date = util.makedate()
617 date = util.makedate()
618 if len(prec) != 20:
618 if len(prec) != 20:
619 raise ValueError(prec)
619 raise ValueError(prec)
620 for succ in succs:
620 for succ in succs:
621 if len(succ) != 20:
621 if len(succ) != 20:
622 raise ValueError(succ)
622 raise ValueError(succ)
623 if prec in succs:
623 if prec in succs:
624 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
624 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
625
625
626 metadata = tuple(sorted(metadata.iteritems()))
626 metadata = tuple(sorted(metadata.iteritems()))
627
627
628 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
628 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
629 return bool(self.add(transaction, [marker]))
629 return bool(self.add(transaction, [marker]))
630
630
631 def add(self, transaction, markers):
631 def add(self, transaction, markers):
632 """Add new markers to the store
632 """Add new markers to the store
633
633
634 Take care of filtering duplicate.
634 Take care of filtering duplicate.
635 Return the number of new marker."""
635 Return the number of new marker."""
636 if self._readonly:
636 if self._readonly:
637 raise error.Abort(_('creating obsolete markers is not enabled on '
637 raise error.Abort(_('creating obsolete markers is not enabled on '
638 'this repo'))
638 'this repo'))
639 known = set()
639 known = set()
640 getsuccessors = self.successors.get
640 getsuccessors = self.successors.get
641 new = []
641 new = []
642 for m in markers:
642 for m in markers:
643 if m not in getsuccessors(m[0], ()) and m not in known:
643 if m not in getsuccessors(m[0], ()) and m not in known:
644 known.add(m)
644 known.add(m)
645 new.append(m)
645 new.append(m)
646 if new:
646 if new:
647 f = self.svfs('obsstore', 'ab')
647 f = self.svfs('obsstore', 'ab')
648 try:
648 try:
649 offset = f.tell()
649 offset = f.tell()
650 transaction.add('obsstore', offset)
650 transaction.add('obsstore', offset)
651 # offset == 0: new file - add the version header
651 # offset == 0: new file - add the version header
652 for bytes in encodemarkers(new, offset == 0, self._version):
652 for bytes in encodemarkers(new, offset == 0, self._version):
653 f.write(bytes)
653 f.write(bytes)
654 finally:
654 finally:
655 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
655 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
656 # call 'filecacheentry.refresh()' here
656 # call 'filecacheentry.refresh()' here
657 f.close()
657 f.close()
658 self._addmarkers(new)
658 self._addmarkers(new)
659 # new marker *may* have changed several set. invalidate the cache.
659 # new marker *may* have changed several set. invalidate the cache.
660 self.caches.clear()
660 self.caches.clear()
661 # records the number of new markers for the transaction hooks
661 # records the number of new markers for the transaction hooks
662 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
662 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
663 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
663 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
664 return len(new)
664 return len(new)
665
665
666 def mergemarkers(self, transaction, data):
666 def mergemarkers(self, transaction, data):
667 """merge a binary stream of markers inside the obsstore
667 """merge a binary stream of markers inside the obsstore
668
668
669 Returns the number of new markers added."""
669 Returns the number of new markers added."""
670 version, markers = _readmarkers(data)
670 version, markers = _readmarkers(data)
671 return self.add(transaction, markers)
671 return self.add(transaction, markers)
672
672
673 @propertycache
673 @propertycache
674 def _data(self):
674 def _data(self):
675 return self.svfs.tryread('obsstore')
675 return self.svfs.tryread('obsstore')
676
676
677 @propertycache
677 @propertycache
678 def _version(self):
678 def _version(self):
679 if len(self._data) >= 1:
679 if len(self._data) >= 1:
680 return _readmarkerversion(self._data)
680 return _readmarkerversion(self._data)
681 else:
681 else:
682 return self._defaultformat
682 return self._defaultformat
683
683
684 @propertycache
684 @propertycache
685 def _all(self):
685 def _all(self):
686 data = self._data
686 data = self._data
687 if not data:
687 if not data:
688 return []
688 return []
689 self._version, markers = _readmarkers(data)
689 self._version, markers = _readmarkers(data)
690 markers = list(markers)
690 markers = list(markers)
691 _checkinvalidmarkers(markers)
691 _checkinvalidmarkers(markers)
692 return markers
692 return markers
693
693
694 @propertycache
694 @propertycache
695 def successors(self):
695 def successors(self):
696 successors = {}
696 successors = {}
697 _addsuccessors(successors, self._all)
697 _addsuccessors(successors, self._all)
698 return successors
698 return successors
699
699
700 @propertycache
700 @propertycache
701 def precursors(self):
701 def precursors(self):
702 precursors = {}
702 precursors = {}
703 _addprecursors(precursors, self._all)
703 _addprecursors(precursors, self._all)
704 return precursors
704 return precursors
705
705
706 @propertycache
706 @propertycache
707 def children(self):
707 def children(self):
708 children = {}
708 children = {}
709 _addchildren(children, self._all)
709 _addchildren(children, self._all)
710 return children
710 return children
711
711
712 def _cached(self, attr):
712 def _cached(self, attr):
713 return attr in self.__dict__
713 return attr in self.__dict__
714
714
715 def _addmarkers(self, markers):
715 def _addmarkers(self, markers):
716 markers = list(markers) # to allow repeated iteration
716 markers = list(markers) # to allow repeated iteration
717 self._all.extend(markers)
717 self._all.extend(markers)
718 if self._cached('successors'):
718 if self._cached('successors'):
719 _addsuccessors(self.successors, markers)
719 _addsuccessors(self.successors, markers)
720 if self._cached('precursors'):
720 if self._cached('precursors'):
721 _addprecursors(self.precursors, markers)
721 _addprecursors(self.precursors, markers)
722 if self._cached('children'):
722 if self._cached('children'):
723 _addchildren(self.children, markers)
723 _addchildren(self.children, markers)
724 _checkinvalidmarkers(markers)
724 _checkinvalidmarkers(markers)
725
725
726 def relevantmarkers(self, nodes):
726 def relevantmarkers(self, nodes):
727 """return a set of all obsolescence markers relevant to a set of nodes.
727 """return a set of all obsolescence markers relevant to a set of nodes.
728
728
729 "relevant" to a set of nodes mean:
729 "relevant" to a set of nodes mean:
730
730
731 - marker that use this changeset as successor
731 - marker that use this changeset as successor
732 - prune marker of direct children on this changeset
732 - prune marker of direct children on this changeset
733 - recursive application of the two rules on precursors of these markers
733 - recursive application of the two rules on precursors of these markers
734
734
735 It is a set so you cannot rely on order."""
735 It is a set so you cannot rely on order."""
736
736
737 pendingnodes = set(nodes)
737 pendingnodes = set(nodes)
738 seenmarkers = set()
738 seenmarkers = set()
739 seennodes = set(pendingnodes)
739 seennodes = set(pendingnodes)
740 precursorsmarkers = self.precursors
740 precursorsmarkers = self.precursors
741 succsmarkers = self.successors
741 succsmarkers = self.successors
742 children = self.children
742 children = self.children
743 while pendingnodes:
743 while pendingnodes:
744 direct = set()
744 direct = set()
745 for current in pendingnodes:
745 for current in pendingnodes:
746 direct.update(precursorsmarkers.get(current, ()))
746 direct.update(precursorsmarkers.get(current, ()))
747 pruned = [m for m in children.get(current, ()) if not m[1]]
747 pruned = [m for m in children.get(current, ()) if not m[1]]
748 direct.update(pruned)
748 direct.update(pruned)
749 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
749 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
750 direct.update(pruned)
750 direct.update(pruned)
751 direct -= seenmarkers
751 direct -= seenmarkers
752 pendingnodes = set([m[0] for m in direct])
752 pendingnodes = set([m[0] for m in direct])
753 seenmarkers |= direct
753 seenmarkers |= direct
754 pendingnodes -= seennodes
754 pendingnodes -= seennodes
755 seennodes |= pendingnodes
755 seennodes |= pendingnodes
756 return seenmarkers
756 return seenmarkers
757
757
758 def makestore(ui, repo):
758 def makestore(ui, repo):
759 """Create an obsstore instance from a repo."""
759 """Create an obsstore instance from a repo."""
760 # read default format for new obsstore.
760 # read default format for new obsstore.
761 # developer config: format.obsstore-version
761 # developer config: format.obsstore-version
762 defaultformat = ui.configint('format', 'obsstore-version', None)
762 defaultformat = ui.configint('format', 'obsstore-version', None)
763 # rely on obsstore class default when possible.
763 # rely on obsstore class default when possible.
764 kwargs = {}
764 kwargs = {}
765 if defaultformat is not None:
765 if defaultformat is not None:
766 kwargs['defaultformat'] = defaultformat
766 kwargs['defaultformat'] = defaultformat
767 readonly = not isenabled(repo, createmarkersopt)
767 readonly = not isenabled(repo, createmarkersopt)
768 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
768 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
769 if store and readonly:
769 if store and readonly:
770 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
770 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
771 % len(list(store)))
771 % len(list(store)))
772 return store
772 return store
773
773
774 def _filterprunes(markers):
775 """return a set with no prune markers"""
776 return set(m for m in markers if m[1])
777
778 def exclusivemarkers(repo, nodes):
779 """set of markers relevant to "nodes" but no other locally-known nodes
780
781 This function compute the set of markers "exclusive" to a locally-known
782 node. This means we walk the markers starting from <nodes> until we reach a
783 locally-known precursors outside of <nodes>. Element of <nodes> with
784 locally-known successors outside of <nodes> are ignored (since their
785 precursors markers are also relevant to these successors).
786
787 For example:
788
789 # (A0 rewritten as A1)
790 #
791 # A0 <-1- A1 # Marker "1" is exclusive to A1
792
793 or
794
795 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
796 #
797 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
798
799 or
800
801 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
802 #
803 # <-2- A1 # Marker "2" is exclusive to A0,A1
804 # /
805 # <-1- A0
806 # \
807 # <-3- A2 # Marker "3" is exclusive to A0,A2
808 #
809 # in addition:
810 #
811 # Markers "2,3" are exclusive to A1,A2
812 # Markers "1,2,3" are exclusive to A0,A1,A2
813
814 See test/test-obsolete-bundle-strip.t for more examples.
815
816 An example usage is strip. When stripping a changeset, we also want to
817 strip the markers exclusive to this changeset. Otherwise we would have
818 "dangling"" obsolescence markers from its precursors: Obsolescence markers
819 marking a node as obsolete without any successors available locally.
820
821 As for relevant markers, the prune markers for children will be followed.
822 Of course, they will only be followed if the pruned children is
823 locally-known. Since the prune markers are relevant to the pruned node.
824 However, while prune markers are considered relevant to the parent of the
825 pruned changesets, prune markers for locally-known changeset (with no
826 successors) are considered exclusive to the pruned nodes. This allows
827 to strip the prune markers (with the rest of the exclusive chain) alongside
828 the pruned changesets.
829 """
830 # running on a filtered repository would be dangerous as markers could be
831 # reported as exclusive when they are relevant for other filtered nodes.
832 unfi = repo.unfiltered()
833
834 # shortcut to various useful item
835 nm = unfi.changelog.nodemap
836 precursorsmarkers = unfi.obsstore.precursors
837 successormarkers = unfi.obsstore.successors
838 childrenmarkers = unfi.obsstore.children
839
840 # exclusive markers (return of the function)
841 exclmarkers = set()
842 # we need fast membership testing
843 nodes = set(nodes)
844 # looking for head in the obshistory
845 #
846 # XXX we are ignoring all issues in regard with cycle for now.
847 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
848 stack.sort()
849 # nodes already stacked
850 seennodes = set(stack)
851 while stack:
852 current = stack.pop()
853 # fetch precursors markers
854 markers = list(precursorsmarkers.get(current, ()))
855 # extend the list with prune markers
856 for mark in successormarkers.get(current, ()):
857 if not mark[1]:
858 markers.append(mark)
859 # and markers from children (looking for prune)
860 for mark in childrenmarkers.get(current, ()):
861 if not mark[1]:
862 markers.append(mark)
863 # traverse the markers
864 for mark in markers:
865 if mark in exclmarkers:
866 # markers already selected
867 continue
868
869 # If the markers is about the current node, select it
870 #
871 # (this delay the addition of markers from children)
872 if mark[1] or mark[0] == current:
873 exclmarkers.add(mark)
874
875 # should we keep traversing through the precursors?
876 prec = mark[0]
877
878 # nodes in the stack or already processed
879 if prec in seennodes:
880 continue
881
882 # is this a locally known node ?
883 known = prec in nm
884 # if locally-known and not in the <nodes> set the traversal
885 # stop here.
886 if known and prec not in nodes:
887 continue
888
889 # do not keep going if there are unselected markers pointing to this
890 # nodes. If we end up traversing these unselected markers later the
891 # node will be taken care of at that point.
892 precmarkers = _filterprunes(successormarkers.get(prec))
893 if precmarkers.issubset(exclmarkers):
894 seennodes.add(prec)
895 stack.append(prec)
896
897 return exclmarkers
898
899 def commonversion(versions):
774 def commonversion(versions):
900 """Return the newest version listed in both versions and our local formats.
775 """Return the newest version listed in both versions and our local formats.
901
776
902 Returns None if no common version exists.
777 Returns None if no common version exists.
903 """
778 """
904 versions.sort(reverse=True)
779 versions.sort(reverse=True)
905 # search for highest version known on both side
780 # search for highest version known on both side
906 for v in versions:
781 for v in versions:
907 if v in formats:
782 if v in formats:
908 return v
783 return v
909 return None
784 return None
910
785
911 # arbitrary picked to fit into 8K limit from HTTP server
786 # arbitrary picked to fit into 8K limit from HTTP server
912 # you have to take in account:
787 # you have to take in account:
913 # - the version header
788 # - the version header
914 # - the base85 encoding
789 # - the base85 encoding
915 _maxpayload = 5300
790 _maxpayload = 5300
916
791
917 def _pushkeyescape(markers):
792 def _pushkeyescape(markers):
918 """encode markers into a dict suitable for pushkey exchange
793 """encode markers into a dict suitable for pushkey exchange
919
794
920 - binary data is base85 encoded
795 - binary data is base85 encoded
921 - split in chunks smaller than 5300 bytes"""
796 - split in chunks smaller than 5300 bytes"""
922 keys = {}
797 keys = {}
923 parts = []
798 parts = []
924 currentlen = _maxpayload * 2 # ensure we create a new part
799 currentlen = _maxpayload * 2 # ensure we create a new part
925 for marker in markers:
800 for marker in markers:
926 nextdata = _fm0encodeonemarker(marker)
801 nextdata = _fm0encodeonemarker(marker)
927 if (len(nextdata) + currentlen > _maxpayload):
802 if (len(nextdata) + currentlen > _maxpayload):
928 currentpart = []
803 currentpart = []
929 currentlen = 0
804 currentlen = 0
930 parts.append(currentpart)
805 parts.append(currentpart)
931 currentpart.append(nextdata)
806 currentpart.append(nextdata)
932 currentlen += len(nextdata)
807 currentlen += len(nextdata)
933 for idx, part in enumerate(reversed(parts)):
808 for idx, part in enumerate(reversed(parts)):
934 data = ''.join([_pack('>B', _fm0version)] + part)
809 data = ''.join([_pack('>B', _fm0version)] + part)
935 keys['dump%i' % idx] = util.b85encode(data)
810 keys['dump%i' % idx] = util.b85encode(data)
936 return keys
811 return keys
937
812
938 def listmarkers(repo):
813 def listmarkers(repo):
939 """List markers over pushkey"""
814 """List markers over pushkey"""
940 if not repo.obsstore:
815 if not repo.obsstore:
941 return {}
816 return {}
942 return _pushkeyescape(sorted(repo.obsstore))
817 return _pushkeyescape(sorted(repo.obsstore))
943
818
944 def pushmarker(repo, key, old, new):
819 def pushmarker(repo, key, old, new):
945 """Push markers over pushkey"""
820 """Push markers over pushkey"""
946 if not key.startswith('dump'):
821 if not key.startswith('dump'):
947 repo.ui.warn(_('unknown key: %r') % key)
822 repo.ui.warn(_('unknown key: %r') % key)
948 return False
823 return False
949 if old:
824 if old:
950 repo.ui.warn(_('unexpected old value for %r') % key)
825 repo.ui.warn(_('unexpected old value for %r') % key)
951 return False
826 return False
952 data = util.b85decode(new)
827 data = util.b85decode(new)
953 lock = repo.lock()
828 lock = repo.lock()
954 try:
829 try:
955 tr = repo.transaction('pushkey: obsolete markers')
830 tr = repo.transaction('pushkey: obsolete markers')
956 try:
831 try:
957 repo.obsstore.mergemarkers(tr, data)
832 repo.obsstore.mergemarkers(tr, data)
958 repo.invalidatevolatilesets()
833 repo.invalidatevolatilesets()
959 tr.close()
834 tr.close()
960 return True
835 return True
961 finally:
836 finally:
962 tr.release()
837 tr.release()
963 finally:
838 finally:
964 lock.release()
839 lock.release()
965
840
966 def getmarkers(repo, nodes=None, exclusive=False):
841 def getmarkers(repo, nodes=None, exclusive=False):
967 """returns markers known in a repository
842 """returns markers known in a repository
968
843
969 If <nodes> is specified, only markers "relevant" to those nodes are are
844 If <nodes> is specified, only markers "relevant" to those nodes are are
970 returned"""
845 returned"""
971 if nodes is None:
846 if nodes is None:
972 rawmarkers = repo.obsstore
847 rawmarkers = repo.obsstore
973 elif exclusive:
848 elif exclusive:
974 rawmarkers = exclusivemarkers(repo, nodes)
849 rawmarkers = obsutil.exclusivemarkers(repo, nodes)
975 else:
850 else:
976 rawmarkers = repo.obsstore.relevantmarkers(nodes)
851 rawmarkers = repo.obsstore.relevantmarkers(nodes)
977
852
978 for markerdata in rawmarkers:
853 for markerdata in rawmarkers:
979 yield marker(repo, markerdata)
854 yield marker(repo, markerdata)
980
855
981 def relevantmarkers(repo, node):
856 def relevantmarkers(repo, node):
982 """all obsolete markers relevant to some revision"""
857 """all obsolete markers relevant to some revision"""
983 for markerdata in repo.obsstore.relevantmarkers(node):
858 for markerdata in repo.obsstore.relevantmarkers(node):
984 yield marker(repo, markerdata)
859 yield marker(repo, markerdata)
985
860
986
861
987 def precursormarkers(ctx):
862 def precursormarkers(ctx):
988 """obsolete marker marking this changeset as a successors"""
863 """obsolete marker marking this changeset as a successors"""
989 for data in ctx.repo().obsstore.precursors.get(ctx.node(), ()):
864 for data in ctx.repo().obsstore.precursors.get(ctx.node(), ()):
990 yield marker(ctx.repo(), data)
865 yield marker(ctx.repo(), data)
991
866
992 def successormarkers(ctx):
867 def successormarkers(ctx):
993 """obsolete marker making this changeset obsolete"""
868 """obsolete marker making this changeset obsolete"""
994 for data in ctx.repo().obsstore.successors.get(ctx.node(), ()):
869 for data in ctx.repo().obsstore.successors.get(ctx.node(), ()):
995 yield marker(ctx.repo(), data)
870 yield marker(ctx.repo(), data)
996
871
997 def allsuccessors(obsstore, nodes, ignoreflags=0):
872 def allsuccessors(obsstore, nodes, ignoreflags=0):
998 """Yield node for every successor of <nodes>.
873 """Yield node for every successor of <nodes>.
999
874
1000 Some successors may be unknown locally.
875 Some successors may be unknown locally.
1001
876
1002 This is a linear yield unsuited to detecting split changesets. It includes
877 This is a linear yield unsuited to detecting split changesets. It includes
1003 initial nodes too."""
878 initial nodes too."""
1004 remaining = set(nodes)
879 remaining = set(nodes)
1005 seen = set(remaining)
880 seen = set(remaining)
1006 while remaining:
881 while remaining:
1007 current = remaining.pop()
882 current = remaining.pop()
1008 yield current
883 yield current
1009 for mark in obsstore.successors.get(current, ()):
884 for mark in obsstore.successors.get(current, ()):
1010 # ignore marker flagged with specified flag
885 # ignore marker flagged with specified flag
1011 if mark[2] & ignoreflags:
886 if mark[2] & ignoreflags:
1012 continue
887 continue
1013 for suc in mark[1]:
888 for suc in mark[1]:
1014 if suc not in seen:
889 if suc not in seen:
1015 seen.add(suc)
890 seen.add(suc)
1016 remaining.add(suc)
891 remaining.add(suc)
1017
892
1018 def allprecursors(obsstore, nodes, ignoreflags=0):
893 def allprecursors(obsstore, nodes, ignoreflags=0):
1019 """Yield node for every precursors of <nodes>.
894 """Yield node for every precursors of <nodes>.
1020
895
1021 Some precursors may be unknown locally.
896 Some precursors may be unknown locally.
1022
897
1023 This is a linear yield unsuited to detecting folded changesets. It includes
898 This is a linear yield unsuited to detecting folded changesets. It includes
1024 initial nodes too."""
899 initial nodes too."""
1025
900
1026 remaining = set(nodes)
901 remaining = set(nodes)
1027 seen = set(remaining)
902 seen = set(remaining)
1028 while remaining:
903 while remaining:
1029 current = remaining.pop()
904 current = remaining.pop()
1030 yield current
905 yield current
1031 for mark in obsstore.precursors.get(current, ()):
906 for mark in obsstore.precursors.get(current, ()):
1032 # ignore marker flagged with specified flag
907 # ignore marker flagged with specified flag
1033 if mark[2] & ignoreflags:
908 if mark[2] & ignoreflags:
1034 continue
909 continue
1035 suc = mark[0]
910 suc = mark[0]
1036 if suc not in seen:
911 if suc not in seen:
1037 seen.add(suc)
912 seen.add(suc)
1038 remaining.add(suc)
913 remaining.add(suc)
1039
914
1040 def foreground(repo, nodes):
915 def foreground(repo, nodes):
1041 """return all nodes in the "foreground" of other node
916 """return all nodes in the "foreground" of other node
1042
917
1043 The foreground of a revision is anything reachable using parent -> children
918 The foreground of a revision is anything reachable using parent -> children
1044 or precursor -> successor relation. It is very similar to "descendant" but
919 or precursor -> successor relation. It is very similar to "descendant" but
1045 augmented with obsolescence information.
920 augmented with obsolescence information.
1046
921
1047 Beware that possible obsolescence cycle may result if complex situation.
922 Beware that possible obsolescence cycle may result if complex situation.
1048 """
923 """
1049 repo = repo.unfiltered()
924 repo = repo.unfiltered()
1050 foreground = set(repo.set('%ln::', nodes))
925 foreground = set(repo.set('%ln::', nodes))
1051 if repo.obsstore:
926 if repo.obsstore:
1052 # We only need this complicated logic if there is obsolescence
927 # We only need this complicated logic if there is obsolescence
1053 # XXX will probably deserve an optimised revset.
928 # XXX will probably deserve an optimised revset.
1054 nm = repo.changelog.nodemap
929 nm = repo.changelog.nodemap
1055 plen = -1
930 plen = -1
1056 # compute the whole set of successors or descendants
931 # compute the whole set of successors or descendants
1057 while len(foreground) != plen:
932 while len(foreground) != plen:
1058 plen = len(foreground)
933 plen = len(foreground)
1059 succs = set(c.node() for c in foreground)
934 succs = set(c.node() for c in foreground)
1060 mutable = [c.node() for c in foreground if c.mutable()]
935 mutable = [c.node() for c in foreground if c.mutable()]
1061 succs.update(allsuccessors(repo.obsstore, mutable))
936 succs.update(allsuccessors(repo.obsstore, mutable))
1062 known = (n for n in succs if n in nm)
937 known = (n for n in succs if n in nm)
1063 foreground = set(repo.set('%ln::', known))
938 foreground = set(repo.set('%ln::', known))
1064 return set(c.node() for c in foreground)
939 return set(c.node() for c in foreground)
1065
940
941 def exclusivemarkers(repo, nodes):
942 movemsg = 'obsolete.exclusivemarkers moved to obsutil.exclusivemarkers'
943 repo.ui.deprecwarn(movemsg, '4.3')
944 return obsutil.exclusivemarkers(repo, nodes)
945
1066 def successorssets(repo, initialnode, cache=None):
946 def successorssets(repo, initialnode, cache=None):
1067 movemsg = 'obsolete.successorssets moved to obsutil.successorssets'
947 movemsg = 'obsolete.successorssets moved to obsutil.successorssets'
1068 repo.ui.deprecwarn(movemsg, '4.3')
948 repo.ui.deprecwarn(movemsg, '4.3')
1069 return obsutil.successorssets(repo, initialnode, cache=cache)
949 return obsutil.successorssets(repo, initialnode, cache=cache)
1070
950
1071 # mapping of 'set-name' -> <function to compute this set>
951 # mapping of 'set-name' -> <function to compute this set>
1072 cachefuncs = {}
952 cachefuncs = {}
1073 def cachefor(name):
953 def cachefor(name):
1074 """Decorator to register a function as computing the cache for a set"""
954 """Decorator to register a function as computing the cache for a set"""
1075 def decorator(func):
955 def decorator(func):
1076 if name in cachefuncs:
956 if name in cachefuncs:
1077 msg = "duplicated registration for volatileset '%s' (existing: %r)"
957 msg = "duplicated registration for volatileset '%s' (existing: %r)"
1078 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
958 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
1079 cachefuncs[name] = func
959 cachefuncs[name] = func
1080 return func
960 return func
1081 return decorator
961 return decorator
1082
962
1083 def getrevs(repo, name):
963 def getrevs(repo, name):
1084 """Return the set of revision that belong to the <name> set
964 """Return the set of revision that belong to the <name> set
1085
965
1086 Such access may compute the set and cache it for future use"""
966 Such access may compute the set and cache it for future use"""
1087 repo = repo.unfiltered()
967 repo = repo.unfiltered()
1088 if not repo.obsstore:
968 if not repo.obsstore:
1089 return frozenset()
969 return frozenset()
1090 if name not in repo.obsstore.caches:
970 if name not in repo.obsstore.caches:
1091 repo.obsstore.caches[name] = cachefuncs[name](repo)
971 repo.obsstore.caches[name] = cachefuncs[name](repo)
1092 return repo.obsstore.caches[name]
972 return repo.obsstore.caches[name]
1093
973
1094 # To be simple we need to invalidate obsolescence cache when:
974 # To be simple we need to invalidate obsolescence cache when:
1095 #
975 #
1096 # - new changeset is added:
976 # - new changeset is added:
1097 # - public phase is changed
977 # - public phase is changed
1098 # - obsolescence marker are added
978 # - obsolescence marker are added
1099 # - strip is used a repo
979 # - strip is used a repo
1100 def clearobscaches(repo):
980 def clearobscaches(repo):
1101 """Remove all obsolescence related cache from a repo
981 """Remove all obsolescence related cache from a repo
1102
982
1103 This remove all cache in obsstore is the obsstore already exist on the
983 This remove all cache in obsstore is the obsstore already exist on the
1104 repo.
984 repo.
1105
985
1106 (We could be smarter here given the exact event that trigger the cache
986 (We could be smarter here given the exact event that trigger the cache
1107 clearing)"""
987 clearing)"""
1108 # only clear cache is there is obsstore data in this repo
988 # only clear cache is there is obsstore data in this repo
1109 if 'obsstore' in repo._filecache:
989 if 'obsstore' in repo._filecache:
1110 repo.obsstore.caches.clear()
990 repo.obsstore.caches.clear()
1111
991
1112 def _mutablerevs(repo):
992 def _mutablerevs(repo):
1113 """the set of mutable revision in the repository"""
993 """the set of mutable revision in the repository"""
1114 return repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
994 return repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
1115
995
1116 @cachefor('obsolete')
996 @cachefor('obsolete')
1117 def _computeobsoleteset(repo):
997 def _computeobsoleteset(repo):
1118 """the set of obsolete revisions"""
998 """the set of obsolete revisions"""
1119 getnode = repo.changelog.node
999 getnode = repo.changelog.node
1120 notpublic = _mutablerevs(repo)
1000 notpublic = _mutablerevs(repo)
1121 isobs = repo.obsstore.successors.__contains__
1001 isobs = repo.obsstore.successors.__contains__
1122 obs = set(r for r in notpublic if isobs(getnode(r)))
1002 obs = set(r for r in notpublic if isobs(getnode(r)))
1123 return obs
1003 return obs
1124
1004
1125 @cachefor('unstable')
1005 @cachefor('unstable')
1126 def _computeunstableset(repo):
1006 def _computeunstableset(repo):
1127 """the set of non obsolete revisions with obsolete parents"""
1007 """the set of non obsolete revisions with obsolete parents"""
1128 pfunc = repo.changelog.parentrevs
1008 pfunc = repo.changelog.parentrevs
1129 mutable = _mutablerevs(repo)
1009 mutable = _mutablerevs(repo)
1130 obsolete = getrevs(repo, 'obsolete')
1010 obsolete = getrevs(repo, 'obsolete')
1131 others = mutable - obsolete
1011 others = mutable - obsolete
1132 unstable = set()
1012 unstable = set()
1133 for r in sorted(others):
1013 for r in sorted(others):
1134 # A rev is unstable if one of its parent is obsolete or unstable
1014 # A rev is unstable if one of its parent is obsolete or unstable
1135 # this works since we traverse following growing rev order
1015 # this works since we traverse following growing rev order
1136 for p in pfunc(r):
1016 for p in pfunc(r):
1137 if p in obsolete or p in unstable:
1017 if p in obsolete or p in unstable:
1138 unstable.add(r)
1018 unstable.add(r)
1139 break
1019 break
1140 return unstable
1020 return unstable
1141
1021
1142 @cachefor('suspended')
1022 @cachefor('suspended')
1143 def _computesuspendedset(repo):
1023 def _computesuspendedset(repo):
1144 """the set of obsolete parents with non obsolete descendants"""
1024 """the set of obsolete parents with non obsolete descendants"""
1145 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1025 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1146 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1026 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1147
1027
1148 @cachefor('extinct')
1028 @cachefor('extinct')
1149 def _computeextinctset(repo):
1029 def _computeextinctset(repo):
1150 """the set of obsolete parents without non obsolete descendants"""
1030 """the set of obsolete parents without non obsolete descendants"""
1151 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1031 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1152
1032
1153
1033
1154 @cachefor('bumped')
1034 @cachefor('bumped')
1155 def _computebumpedset(repo):
1035 def _computebumpedset(repo):
1156 """the set of revs trying to obsolete public revisions"""
1036 """the set of revs trying to obsolete public revisions"""
1157 bumped = set()
1037 bumped = set()
1158 # util function (avoid attribute lookup in the loop)
1038 # util function (avoid attribute lookup in the loop)
1159 phase = repo._phasecache.phase # would be faster to grab the full list
1039 phase = repo._phasecache.phase # would be faster to grab the full list
1160 public = phases.public
1040 public = phases.public
1161 cl = repo.changelog
1041 cl = repo.changelog
1162 torev = cl.nodemap.get
1042 torev = cl.nodemap.get
1163 for ctx in repo.set('(not public()) and (not obsolete())'):
1043 for ctx in repo.set('(not public()) and (not obsolete())'):
1164 rev = ctx.rev()
1044 rev = ctx.rev()
1165 # We only evaluate mutable, non-obsolete revision
1045 # We only evaluate mutable, non-obsolete revision
1166 node = ctx.node()
1046 node = ctx.node()
1167 # (future) A cache of precursors may worth if split is very common
1047 # (future) A cache of precursors may worth if split is very common
1168 for pnode in allprecursors(repo.obsstore, [node],
1048 for pnode in allprecursors(repo.obsstore, [node],
1169 ignoreflags=bumpedfix):
1049 ignoreflags=bumpedfix):
1170 prev = torev(pnode) # unfiltered! but so is phasecache
1050 prev = torev(pnode) # unfiltered! but so is phasecache
1171 if (prev is not None) and (phase(repo, prev) <= public):
1051 if (prev is not None) and (phase(repo, prev) <= public):
1172 # we have a public precursor
1052 # we have a public precursor
1173 bumped.add(rev)
1053 bumped.add(rev)
1174 break # Next draft!
1054 break # Next draft!
1175 return bumped
1055 return bumped
1176
1056
1177 @cachefor('divergent')
1057 @cachefor('divergent')
1178 def _computedivergentset(repo):
1058 def _computedivergentset(repo):
1179 """the set of rev that compete to be the final successors of some revision.
1059 """the set of rev that compete to be the final successors of some revision.
1180 """
1060 """
1181 divergent = set()
1061 divergent = set()
1182 obsstore = repo.obsstore
1062 obsstore = repo.obsstore
1183 newermap = {}
1063 newermap = {}
1184 for ctx in repo.set('(not public()) - obsolete()'):
1064 for ctx in repo.set('(not public()) - obsolete()'):
1185 mark = obsstore.precursors.get(ctx.node(), ())
1065 mark = obsstore.precursors.get(ctx.node(), ())
1186 toprocess = set(mark)
1066 toprocess = set(mark)
1187 seen = set()
1067 seen = set()
1188 while toprocess:
1068 while toprocess:
1189 prec = toprocess.pop()[0]
1069 prec = toprocess.pop()[0]
1190 if prec in seen:
1070 if prec in seen:
1191 continue # emergency cycle hanging prevention
1071 continue # emergency cycle hanging prevention
1192 seen.add(prec)
1072 seen.add(prec)
1193 if prec not in newermap:
1073 if prec not in newermap:
1194 obsutil.successorssets(repo, prec, newermap)
1074 obsutil.successorssets(repo, prec, newermap)
1195 newer = [n for n in newermap[prec] if n]
1075 newer = [n for n in newermap[prec] if n]
1196 if len(newer) > 1:
1076 if len(newer) > 1:
1197 divergent.add(ctx.rev())
1077 divergent.add(ctx.rev())
1198 break
1078 break
1199 toprocess.update(obsstore.precursors.get(prec, ()))
1079 toprocess.update(obsstore.precursors.get(prec, ()))
1200 return divergent
1080 return divergent
1201
1081
1202
1082
1203 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
1083 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
1204 operation=None):
1084 operation=None):
1205 """Add obsolete markers between changesets in a repo
1085 """Add obsolete markers between changesets in a repo
1206
1086
1207 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1087 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1208 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1088 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1209 containing metadata for this marker only. It is merged with the global
1089 containing metadata for this marker only. It is merged with the global
1210 metadata specified through the `metadata` argument of this function,
1090 metadata specified through the `metadata` argument of this function,
1211
1091
1212 Trying to obsolete a public changeset will raise an exception.
1092 Trying to obsolete a public changeset will raise an exception.
1213
1093
1214 Current user and date are used except if specified otherwise in the
1094 Current user and date are used except if specified otherwise in the
1215 metadata attribute.
1095 metadata attribute.
1216
1096
1217 This function operates within a transaction of its own, but does
1097 This function operates within a transaction of its own, but does
1218 not take any lock on the repo.
1098 not take any lock on the repo.
1219 """
1099 """
1220 # prepare metadata
1100 # prepare metadata
1221 if metadata is None:
1101 if metadata is None:
1222 metadata = {}
1102 metadata = {}
1223 if 'user' not in metadata:
1103 if 'user' not in metadata:
1224 metadata['user'] = repo.ui.username()
1104 metadata['user'] = repo.ui.username()
1225 useoperation = repo.ui.configbool('experimental',
1105 useoperation = repo.ui.configbool('experimental',
1226 'evolution.track-operation',
1106 'evolution.track-operation',
1227 False)
1107 False)
1228 if useoperation and operation:
1108 if useoperation and operation:
1229 metadata['operation'] = operation
1109 metadata['operation'] = operation
1230 tr = repo.transaction('add-obsolescence-marker')
1110 tr = repo.transaction('add-obsolescence-marker')
1231 try:
1111 try:
1232 markerargs = []
1112 markerargs = []
1233 for rel in relations:
1113 for rel in relations:
1234 prec = rel[0]
1114 prec = rel[0]
1235 sucs = rel[1]
1115 sucs = rel[1]
1236 localmetadata = metadata.copy()
1116 localmetadata = metadata.copy()
1237 if 2 < len(rel):
1117 if 2 < len(rel):
1238 localmetadata.update(rel[2])
1118 localmetadata.update(rel[2])
1239
1119
1240 if not prec.mutable():
1120 if not prec.mutable():
1241 raise error.Abort(_("cannot obsolete public changeset: %s")
1121 raise error.Abort(_("cannot obsolete public changeset: %s")
1242 % prec,
1122 % prec,
1243 hint="see 'hg help phases' for details")
1123 hint="see 'hg help phases' for details")
1244 nprec = prec.node()
1124 nprec = prec.node()
1245 nsucs = tuple(s.node() for s in sucs)
1125 nsucs = tuple(s.node() for s in sucs)
1246 npare = None
1126 npare = None
1247 if not nsucs:
1127 if not nsucs:
1248 npare = tuple(p.node() for p in prec.parents())
1128 npare = tuple(p.node() for p in prec.parents())
1249 if nprec in nsucs:
1129 if nprec in nsucs:
1250 raise error.Abort(_("changeset %s cannot obsolete itself")
1130 raise error.Abort(_("changeset %s cannot obsolete itself")
1251 % prec)
1131 % prec)
1252
1132
1253 # Creating the marker causes the hidden cache to become invalid,
1133 # Creating the marker causes the hidden cache to become invalid,
1254 # which causes recomputation when we ask for prec.parents() above.
1134 # which causes recomputation when we ask for prec.parents() above.
1255 # Resulting in n^2 behavior. So let's prepare all of the args
1135 # Resulting in n^2 behavior. So let's prepare all of the args
1256 # first, then create the markers.
1136 # first, then create the markers.
1257 markerargs.append((nprec, nsucs, npare, localmetadata))
1137 markerargs.append((nprec, nsucs, npare, localmetadata))
1258
1138
1259 for args in markerargs:
1139 for args in markerargs:
1260 nprec, nsucs, npare, localmetadata = args
1140 nprec, nsucs, npare, localmetadata = args
1261 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1141 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1262 date=date, metadata=localmetadata,
1142 date=date, metadata=localmetadata,
1263 ui=repo.ui)
1143 ui=repo.ui)
1264 repo.filteredrevcache.clear()
1144 repo.filteredrevcache.clear()
1265 tr.close()
1145 tr.close()
1266 finally:
1146 finally:
1267 tr.release()
1147 tr.release()
@@ -1,241 +1,366
1 # obsutil.py - utility functions for obsolescence
1 # obsutil.py - utility functions for obsolescence
2 #
2 #
3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 def closestpredecessors(repo, nodeid):
10 def closestpredecessors(repo, nodeid):
11 """yield the list of next predecessors pointing on visible changectx nodes
11 """yield the list of next predecessors pointing on visible changectx nodes
12
12
13 This function respect the repoview filtering, filtered revision will be
13 This function respect the repoview filtering, filtered revision will be
14 considered missing.
14 considered missing.
15 """
15 """
16
16
17 precursors = repo.obsstore.precursors
17 precursors = repo.obsstore.precursors
18 stack = [nodeid]
18 stack = [nodeid]
19 seen = set(stack)
19 seen = set(stack)
20
20
21 while stack:
21 while stack:
22 current = stack.pop()
22 current = stack.pop()
23 currentpreccs = precursors.get(current, ())
23 currentpreccs = precursors.get(current, ())
24
24
25 for prec in currentpreccs:
25 for prec in currentpreccs:
26 precnodeid = prec[0]
26 precnodeid = prec[0]
27
27
28 # Basic cycle protection
28 # Basic cycle protection
29 if precnodeid in seen:
29 if precnodeid in seen:
30 continue
30 continue
31 seen.add(precnodeid)
31 seen.add(precnodeid)
32
32
33 if precnodeid in repo:
33 if precnodeid in repo:
34 yield precnodeid
34 yield precnodeid
35 else:
35 else:
36 stack.append(precnodeid)
36 stack.append(precnodeid)
37
37
38 def _filterprunes(markers):
39 """return a set with no prune markers"""
40 return set(m for m in markers if m[1])
41
42 def exclusivemarkers(repo, nodes):
43 """set of markers relevant to "nodes" but no other locally-known nodes
44
45 This function compute the set of markers "exclusive" to a locally-known
46 node. This means we walk the markers starting from <nodes> until we reach a
47 locally-known precursors outside of <nodes>. Element of <nodes> with
48 locally-known successors outside of <nodes> are ignored (since their
49 precursors markers are also relevant to these successors).
50
51 For example:
52
53 # (A0 rewritten as A1)
54 #
55 # A0 <-1- A1 # Marker "1" is exclusive to A1
56
57 or
58
59 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
60 #
61 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
62
63 or
64
65 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
66 #
67 # <-2- A1 # Marker "2" is exclusive to A0,A1
68 # /
69 # <-1- A0
70 # \
71 # <-3- A2 # Marker "3" is exclusive to A0,A2
72 #
73 # in addition:
74 #
75 # Markers "2,3" are exclusive to A1,A2
76 # Markers "1,2,3" are exclusive to A0,A1,A2
77
78 See test/test-obsolete-bundle-strip.t for more examples.
79
80 An example usage is strip. When stripping a changeset, we also want to
81 strip the markers exclusive to this changeset. Otherwise we would have
82 "dangling"" obsolescence markers from its precursors: Obsolescence markers
83 marking a node as obsolete without any successors available locally.
84
85 As for relevant markers, the prune markers for children will be followed.
86 Of course, they will only be followed if the pruned children is
87 locally-known. Since the prune markers are relevant to the pruned node.
88 However, while prune markers are considered relevant to the parent of the
89 pruned changesets, prune markers for locally-known changeset (with no
90 successors) are considered exclusive to the pruned nodes. This allows
91 to strip the prune markers (with the rest of the exclusive chain) alongside
92 the pruned changesets.
93 """
94 # running on a filtered repository would be dangerous as markers could be
95 # reported as exclusive when they are relevant for other filtered nodes.
96 unfi = repo.unfiltered()
97
98 # shortcut to various useful item
99 nm = unfi.changelog.nodemap
100 precursorsmarkers = unfi.obsstore.precursors
101 successormarkers = unfi.obsstore.successors
102 childrenmarkers = unfi.obsstore.children
103
104 # exclusive markers (return of the function)
105 exclmarkers = set()
106 # we need fast membership testing
107 nodes = set(nodes)
108 # looking for head in the obshistory
109 #
110 # XXX we are ignoring all issues in regard with cycle for now.
111 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
112 stack.sort()
113 # nodes already stacked
114 seennodes = set(stack)
115 while stack:
116 current = stack.pop()
117 # fetch precursors markers
118 markers = list(precursorsmarkers.get(current, ()))
119 # extend the list with prune markers
120 for mark in successormarkers.get(current, ()):
121 if not mark[1]:
122 markers.append(mark)
123 # and markers from children (looking for prune)
124 for mark in childrenmarkers.get(current, ()):
125 if not mark[1]:
126 markers.append(mark)
127 # traverse the markers
128 for mark in markers:
129 if mark in exclmarkers:
130 # markers already selected
131 continue
132
133 # If the markers is about the current node, select it
134 #
135 # (this delay the addition of markers from children)
136 if mark[1] or mark[0] == current:
137 exclmarkers.add(mark)
138
139 # should we keep traversing through the precursors?
140 prec = mark[0]
141
142 # nodes in the stack or already processed
143 if prec in seennodes:
144 continue
145
146 # is this a locally known node ?
147 known = prec in nm
148 # if locally-known and not in the <nodes> set the traversal
149 # stop here.
150 if known and prec not in nodes:
151 continue
152
153 # do not keep going if there are unselected markers pointing to this
154 # nodes. If we end up traversing these unselected markers later the
155 # node will be taken care of at that point.
156 precmarkers = _filterprunes(successormarkers.get(prec))
157 if precmarkers.issubset(exclmarkers):
158 seennodes.add(prec)
159 stack.append(prec)
160
161 return exclmarkers
162
38 def successorssets(repo, initialnode, cache=None):
163 def successorssets(repo, initialnode, cache=None):
39 """Return set of all latest successors of initial nodes
164 """Return set of all latest successors of initial nodes
40
165
41 The successors set of a changeset A are the group of revisions that succeed
166 The successors set of a changeset A are the group of revisions that succeed
42 A. It succeeds A as a consistent whole, each revision being only a partial
167 A. It succeeds A as a consistent whole, each revision being only a partial
43 replacement. The successors set contains non-obsolete changesets only.
168 replacement. The successors set contains non-obsolete changesets only.
44
169
45 This function returns the full list of successor sets which is why it
170 This function returns the full list of successor sets which is why it
46 returns a list of tuples and not just a single tuple. Each tuple is a valid
171 returns a list of tuples and not just a single tuple. Each tuple is a valid
47 successors set. Note that (A,) may be a valid successors set for changeset A
172 successors set. Note that (A,) may be a valid successors set for changeset A
48 (see below).
173 (see below).
49
174
50 In most cases, a changeset A will have a single element (e.g. the changeset
175 In most cases, a changeset A will have a single element (e.g. the changeset
51 A is replaced by A') in its successors set. Though, it is also common for a
176 A is replaced by A') in its successors set. Though, it is also common for a
52 changeset A to have no elements in its successor set (e.g. the changeset
177 changeset A to have no elements in its successor set (e.g. the changeset
53 has been pruned). Therefore, the returned list of successors sets will be
178 has been pruned). Therefore, the returned list of successors sets will be
54 [(A',)] or [], respectively.
179 [(A',)] or [], respectively.
55
180
56 When a changeset A is split into A' and B', however, it will result in a
181 When a changeset A is split into A' and B', however, it will result in a
57 successors set containing more than a single element, i.e. [(A',B')].
182 successors set containing more than a single element, i.e. [(A',B')].
58 Divergent changesets will result in multiple successors sets, i.e. [(A',),
183 Divergent changesets will result in multiple successors sets, i.e. [(A',),
59 (A'')].
184 (A'')].
60
185
61 If a changeset A is not obsolete, then it will conceptually have no
186 If a changeset A is not obsolete, then it will conceptually have no
62 successors set. To distinguish this from a pruned changeset, the successor
187 successors set. To distinguish this from a pruned changeset, the successor
63 set will contain itself only, i.e. [(A,)].
188 set will contain itself only, i.e. [(A,)].
64
189
65 Finally, successors unknown locally are considered to be pruned (obsoleted
190 Finally, successors unknown locally are considered to be pruned (obsoleted
66 without any successors).
191 without any successors).
67
192
68 The optional `cache` parameter is a dictionary that may contain precomputed
193 The optional `cache` parameter is a dictionary that may contain precomputed
69 successors sets. It is meant to reuse the computation of a previous call to
194 successors sets. It is meant to reuse the computation of a previous call to
70 `successorssets` when multiple calls are made at the same time. The cache
195 `successorssets` when multiple calls are made at the same time. The cache
71 dictionary is updated in place. The caller is responsible for its life
196 dictionary is updated in place. The caller is responsible for its life
72 span. Code that makes multiple calls to `successorssets` *must* use this
197 span. Code that makes multiple calls to `successorssets` *must* use this
73 cache mechanism or suffer terrible performance.
198 cache mechanism or suffer terrible performance.
74 """
199 """
75
200
76 succmarkers = repo.obsstore.successors
201 succmarkers = repo.obsstore.successors
77
202
78 # Stack of nodes we search successors sets for
203 # Stack of nodes we search successors sets for
79 toproceed = [initialnode]
204 toproceed = [initialnode]
80 # set version of above list for fast loop detection
205 # set version of above list for fast loop detection
81 # element added to "toproceed" must be added here
206 # element added to "toproceed" must be added here
82 stackedset = set(toproceed)
207 stackedset = set(toproceed)
83 if cache is None:
208 if cache is None:
84 cache = {}
209 cache = {}
85
210
86 # This while loop is the flattened version of a recursive search for
211 # This while loop is the flattened version of a recursive search for
87 # successors sets
212 # successors sets
88 #
213 #
89 # def successorssets(x):
214 # def successorssets(x):
90 # successors = directsuccessors(x)
215 # successors = directsuccessors(x)
91 # ss = [[]]
216 # ss = [[]]
92 # for succ in directsuccessors(x):
217 # for succ in directsuccessors(x):
93 # # product as in itertools cartesian product
218 # # product as in itertools cartesian product
94 # ss = product(ss, successorssets(succ))
219 # ss = product(ss, successorssets(succ))
95 # return ss
220 # return ss
96 #
221 #
97 # But we can not use plain recursive calls here:
222 # But we can not use plain recursive calls here:
98 # - that would blow the python call stack
223 # - that would blow the python call stack
99 # - obsolescence markers may have cycles, we need to handle them.
224 # - obsolescence markers may have cycles, we need to handle them.
100 #
225 #
101 # The `toproceed` list act as our call stack. Every node we search
226 # The `toproceed` list act as our call stack. Every node we search
102 # successors set for are stacked there.
227 # successors set for are stacked there.
103 #
228 #
104 # The `stackedset` is set version of this stack used to check if a node is
229 # The `stackedset` is set version of this stack used to check if a node is
105 # already stacked. This check is used to detect cycles and prevent infinite
230 # already stacked. This check is used to detect cycles and prevent infinite
106 # loop.
231 # loop.
107 #
232 #
108 # successors set of all nodes are stored in the `cache` dictionary.
233 # successors set of all nodes are stored in the `cache` dictionary.
109 #
234 #
110 # After this while loop ends we use the cache to return the successors sets
235 # After this while loop ends we use the cache to return the successors sets
111 # for the node requested by the caller.
236 # for the node requested by the caller.
112 while toproceed:
237 while toproceed:
113 # Every iteration tries to compute the successors sets of the topmost
238 # Every iteration tries to compute the successors sets of the topmost
114 # node of the stack: CURRENT.
239 # node of the stack: CURRENT.
115 #
240 #
116 # There are four possible outcomes:
241 # There are four possible outcomes:
117 #
242 #
118 # 1) We already know the successors sets of CURRENT:
243 # 1) We already know the successors sets of CURRENT:
119 # -> mission accomplished, pop it from the stack.
244 # -> mission accomplished, pop it from the stack.
120 # 2) Node is not obsolete:
245 # 2) Node is not obsolete:
121 # -> the node is its own successors sets. Add it to the cache.
246 # -> the node is its own successors sets. Add it to the cache.
122 # 3) We do not know successors set of direct successors of CURRENT:
247 # 3) We do not know successors set of direct successors of CURRENT:
123 # -> We add those successors to the stack.
248 # -> We add those successors to the stack.
124 # 4) We know successors sets of all direct successors of CURRENT:
249 # 4) We know successors sets of all direct successors of CURRENT:
125 # -> We can compute CURRENT successors set and add it to the
250 # -> We can compute CURRENT successors set and add it to the
126 # cache.
251 # cache.
127 #
252 #
128 current = toproceed[-1]
253 current = toproceed[-1]
129 if current in cache:
254 if current in cache:
130 # case (1): We already know the successors sets
255 # case (1): We already know the successors sets
131 stackedset.remove(toproceed.pop())
256 stackedset.remove(toproceed.pop())
132 elif current not in succmarkers:
257 elif current not in succmarkers:
133 # case (2): The node is not obsolete.
258 # case (2): The node is not obsolete.
134 if current in repo:
259 if current in repo:
135 # We have a valid last successors.
260 # We have a valid last successors.
136 cache[current] = [(current,)]
261 cache[current] = [(current,)]
137 else:
262 else:
138 # Final obsolete version is unknown locally.
263 # Final obsolete version is unknown locally.
139 # Do not count that as a valid successors
264 # Do not count that as a valid successors
140 cache[current] = []
265 cache[current] = []
141 else:
266 else:
142 # cases (3) and (4)
267 # cases (3) and (4)
143 #
268 #
144 # We proceed in two phases. Phase 1 aims to distinguish case (3)
269 # We proceed in two phases. Phase 1 aims to distinguish case (3)
145 # from case (4):
270 # from case (4):
146 #
271 #
147 # For each direct successors of CURRENT, we check whether its
272 # For each direct successors of CURRENT, we check whether its
148 # successors sets are known. If they are not, we stack the
273 # successors sets are known. If they are not, we stack the
149 # unknown node and proceed to the next iteration of the while
274 # unknown node and proceed to the next iteration of the while
150 # loop. (case 3)
275 # loop. (case 3)
151 #
276 #
152 # During this step, we may detect obsolescence cycles: a node
277 # During this step, we may detect obsolescence cycles: a node
153 # with unknown successors sets but already in the call stack.
278 # with unknown successors sets but already in the call stack.
154 # In such a situation, we arbitrary set the successors sets of
279 # In such a situation, we arbitrary set the successors sets of
155 # the node to nothing (node pruned) to break the cycle.
280 # the node to nothing (node pruned) to break the cycle.
156 #
281 #
157 # If no break was encountered we proceed to phase 2.
282 # If no break was encountered we proceed to phase 2.
158 #
283 #
159 # Phase 2 computes successors sets of CURRENT (case 4); see details
284 # Phase 2 computes successors sets of CURRENT (case 4); see details
160 # in phase 2 itself.
285 # in phase 2 itself.
161 #
286 #
162 # Note the two levels of iteration in each phase.
287 # Note the two levels of iteration in each phase.
163 # - The first one handles obsolescence markers using CURRENT as
288 # - The first one handles obsolescence markers using CURRENT as
164 # precursor (successors markers of CURRENT).
289 # precursor (successors markers of CURRENT).
165 #
290 #
166 # Having multiple entry here means divergence.
291 # Having multiple entry here means divergence.
167 #
292 #
168 # - The second one handles successors defined in each marker.
293 # - The second one handles successors defined in each marker.
169 #
294 #
170 # Having none means pruned node, multiple successors means split,
295 # Having none means pruned node, multiple successors means split,
171 # single successors are standard replacement.
296 # single successors are standard replacement.
172 #
297 #
173 for mark in sorted(succmarkers[current]):
298 for mark in sorted(succmarkers[current]):
174 for suc in mark[1]:
299 for suc in mark[1]:
175 if suc not in cache:
300 if suc not in cache:
176 if suc in stackedset:
301 if suc in stackedset:
177 # cycle breaking
302 # cycle breaking
178 cache[suc] = []
303 cache[suc] = []
179 else:
304 else:
180 # case (3) If we have not computed successors sets
305 # case (3) If we have not computed successors sets
181 # of one of those successors we add it to the
306 # of one of those successors we add it to the
182 # `toproceed` stack and stop all work for this
307 # `toproceed` stack and stop all work for this
183 # iteration.
308 # iteration.
184 toproceed.append(suc)
309 toproceed.append(suc)
185 stackedset.add(suc)
310 stackedset.add(suc)
186 break
311 break
187 else:
312 else:
188 continue
313 continue
189 break
314 break
190 else:
315 else:
191 # case (4): we know all successors sets of all direct
316 # case (4): we know all successors sets of all direct
192 # successors
317 # successors
193 #
318 #
194 # Successors set contributed by each marker depends on the
319 # Successors set contributed by each marker depends on the
195 # successors sets of all its "successors" node.
320 # successors sets of all its "successors" node.
196 #
321 #
197 # Each different marker is a divergence in the obsolescence
322 # Each different marker is a divergence in the obsolescence
198 # history. It contributes successors sets distinct from other
323 # history. It contributes successors sets distinct from other
199 # markers.
324 # markers.
200 #
325 #
201 # Within a marker, a successor may have divergent successors
326 # Within a marker, a successor may have divergent successors
202 # sets. In such a case, the marker will contribute multiple
327 # sets. In such a case, the marker will contribute multiple
203 # divergent successors sets. If multiple successors have
328 # divergent successors sets. If multiple successors have
204 # divergent successors sets, a Cartesian product is used.
329 # divergent successors sets, a Cartesian product is used.
205 #
330 #
206 # At the end we post-process successors sets to remove
331 # At the end we post-process successors sets to remove
207 # duplicated entry and successors set that are strict subset of
332 # duplicated entry and successors set that are strict subset of
208 # another one.
333 # another one.
209 succssets = []
334 succssets = []
210 for mark in sorted(succmarkers[current]):
335 for mark in sorted(succmarkers[current]):
211 # successors sets contributed by this marker
336 # successors sets contributed by this marker
212 markss = [[]]
337 markss = [[]]
213 for suc in mark[1]:
338 for suc in mark[1]:
214 # cardinal product with previous successors
339 # cardinal product with previous successors
215 productresult = []
340 productresult = []
216 for prefix in markss:
341 for prefix in markss:
217 for suffix in cache[suc]:
342 for suffix in cache[suc]:
218 newss = list(prefix)
343 newss = list(prefix)
219 for part in suffix:
344 for part in suffix:
220 # do not duplicated entry in successors set
345 # do not duplicated entry in successors set
221 # first entry wins.
346 # first entry wins.
222 if part not in newss:
347 if part not in newss:
223 newss.append(part)
348 newss.append(part)
224 productresult.append(newss)
349 productresult.append(newss)
225 markss = productresult
350 markss = productresult
226 succssets.extend(markss)
351 succssets.extend(markss)
227 # remove duplicated and subset
352 # remove duplicated and subset
228 seen = []
353 seen = []
229 final = []
354 final = []
230 candidate = sorted(((set(s), s) for s in succssets if s),
355 candidate = sorted(((set(s), s) for s in succssets if s),
231 key=lambda x: len(x[1]), reverse=True)
356 key=lambda x: len(x[1]), reverse=True)
232 for setversion, listversion in candidate:
357 for setversion, listversion in candidate:
233 for seenset in seen:
358 for seenset in seen:
234 if setversion.issubset(seenset):
359 if setversion.issubset(seenset):
235 break
360 break
236 else:
361 else:
237 final.append(listversion)
362 final.append(listversion)
238 seen.append(setversion)
363 seen.append(setversion)
239 final.reverse() # put small successors set first
364 final.reverse() # put small successors set first
240 cache[current] = final
365 cache[current] = final
241 return cache[initialnode]
366 return cache[initialnode]
@@ -1,433 +1,434
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import short
15 from .node import short
16 from . import (
16 from . import (
17 bundle2,
17 bundle2,
18 changegroup,
18 changegroup,
19 discovery,
19 discovery,
20 error,
20 error,
21 exchange,
21 exchange,
22 obsolete,
22 obsolete,
23 obsutil,
23 util,
24 util,
24 )
25 )
25
26
26 def _bundle(repo, bases, heads, node, suffix, compress=True, obsolescence=True):
27 def _bundle(repo, bases, heads, node, suffix, compress=True, obsolescence=True):
27 """create a bundle with the specified revisions as a backup"""
28 """create a bundle with the specified revisions as a backup"""
28
29
29 backupdir = "strip-backup"
30 backupdir = "strip-backup"
30 vfs = repo.vfs
31 vfs = repo.vfs
31 if not vfs.isdir(backupdir):
32 if not vfs.isdir(backupdir):
32 vfs.mkdir(backupdir)
33 vfs.mkdir(backupdir)
33
34
34 # Include a hash of all the nodes in the filename for uniqueness
35 # Include a hash of all the nodes in the filename for uniqueness
35 allcommits = repo.set('%ln::%ln', bases, heads)
36 allcommits = repo.set('%ln::%ln', bases, heads)
36 allhashes = sorted(c.hex() for c in allcommits)
37 allhashes = sorted(c.hex() for c in allcommits)
37 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
38 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
38 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
39 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
39
40
40 cgversion = changegroup.safeversion(repo)
41 cgversion = changegroup.safeversion(repo)
41 comp = None
42 comp = None
42 if cgversion != '01':
43 if cgversion != '01':
43 bundletype = "HG20"
44 bundletype = "HG20"
44 if compress:
45 if compress:
45 comp = 'BZ'
46 comp = 'BZ'
46 elif compress:
47 elif compress:
47 bundletype = "HG10BZ"
48 bundletype = "HG10BZ"
48 else:
49 else:
49 bundletype = "HG10UN"
50 bundletype = "HG10UN"
50
51
51 outgoing = discovery.outgoing(repo, missingroots=bases, missingheads=heads)
52 outgoing = discovery.outgoing(repo, missingroots=bases, missingheads=heads)
52 contentopts = {
53 contentopts = {
53 'cg.version': cgversion,
54 'cg.version': cgversion,
54 'obsolescence': obsolescence,
55 'obsolescence': obsolescence,
55 'phases': True,
56 'phases': True,
56 }
57 }
57 return bundle2.writenewbundle(repo.ui, repo, 'strip', name, bundletype,
58 return bundle2.writenewbundle(repo.ui, repo, 'strip', name, bundletype,
58 outgoing, contentopts, vfs, compression=comp)
59 outgoing, contentopts, vfs, compression=comp)
59
60
60 def _collectfiles(repo, striprev):
61 def _collectfiles(repo, striprev):
61 """find out the filelogs affected by the strip"""
62 """find out the filelogs affected by the strip"""
62 files = set()
63 files = set()
63
64
64 for x in xrange(striprev, len(repo)):
65 for x in xrange(striprev, len(repo)):
65 files.update(repo[x].files())
66 files.update(repo[x].files())
66
67
67 return sorted(files)
68 return sorted(files)
68
69
69 def _collectbrokencsets(repo, files, striprev):
70 def _collectbrokencsets(repo, files, striprev):
70 """return the changesets which will be broken by the truncation"""
71 """return the changesets which will be broken by the truncation"""
71 s = set()
72 s = set()
72 def collectone(revlog):
73 def collectone(revlog):
73 _, brokenset = revlog.getstrippoint(striprev)
74 _, brokenset = revlog.getstrippoint(striprev)
74 s.update([revlog.linkrev(r) for r in brokenset])
75 s.update([revlog.linkrev(r) for r in brokenset])
75
76
76 collectone(repo.manifestlog._revlog)
77 collectone(repo.manifestlog._revlog)
77 for fname in files:
78 for fname in files:
78 collectone(repo.file(fname))
79 collectone(repo.file(fname))
79
80
80 return s
81 return s
81
82
82 def strip(ui, repo, nodelist, backup=True, topic='backup'):
83 def strip(ui, repo, nodelist, backup=True, topic='backup'):
83 # This function requires the caller to lock the repo, but it operates
84 # This function requires the caller to lock the repo, but it operates
84 # within a transaction of its own, and thus requires there to be no current
85 # within a transaction of its own, and thus requires there to be no current
85 # transaction when it is called.
86 # transaction when it is called.
86 if repo.currenttransaction() is not None:
87 if repo.currenttransaction() is not None:
87 raise error.ProgrammingError('cannot strip from inside a transaction')
88 raise error.ProgrammingError('cannot strip from inside a transaction')
88
89
89 # Simple way to maintain backwards compatibility for this
90 # Simple way to maintain backwards compatibility for this
90 # argument.
91 # argument.
91 if backup in ['none', 'strip']:
92 if backup in ['none', 'strip']:
92 backup = False
93 backup = False
93
94
94 repo = repo.unfiltered()
95 repo = repo.unfiltered()
95 repo.destroying()
96 repo.destroying()
96
97
97 cl = repo.changelog
98 cl = repo.changelog
98 # TODO handle undo of merge sets
99 # TODO handle undo of merge sets
99 if isinstance(nodelist, str):
100 if isinstance(nodelist, str):
100 nodelist = [nodelist]
101 nodelist = [nodelist]
101 striplist = [cl.rev(node) for node in nodelist]
102 striplist = [cl.rev(node) for node in nodelist]
102 striprev = min(striplist)
103 striprev = min(striplist)
103
104
104 files = _collectfiles(repo, striprev)
105 files = _collectfiles(repo, striprev)
105 saverevs = _collectbrokencsets(repo, files, striprev)
106 saverevs = _collectbrokencsets(repo, files, striprev)
106
107
107 # Some revisions with rev > striprev may not be descendants of striprev.
108 # Some revisions with rev > striprev may not be descendants of striprev.
108 # We have to find these revisions and put them in a bundle, so that
109 # We have to find these revisions and put them in a bundle, so that
109 # we can restore them after the truncations.
110 # we can restore them after the truncations.
110 # To create the bundle we use repo.changegroupsubset which requires
111 # To create the bundle we use repo.changegroupsubset which requires
111 # the list of heads and bases of the set of interesting revisions.
112 # the list of heads and bases of the set of interesting revisions.
112 # (head = revision in the set that has no descendant in the set;
113 # (head = revision in the set that has no descendant in the set;
113 # base = revision in the set that has no ancestor in the set)
114 # base = revision in the set that has no ancestor in the set)
114 tostrip = set(striplist)
115 tostrip = set(striplist)
115 saveheads = set(saverevs)
116 saveheads = set(saverevs)
116 for r in cl.revs(start=striprev + 1):
117 for r in cl.revs(start=striprev + 1):
117 if any(p in tostrip for p in cl.parentrevs(r)):
118 if any(p in tostrip for p in cl.parentrevs(r)):
118 tostrip.add(r)
119 tostrip.add(r)
119
120
120 if r not in tostrip:
121 if r not in tostrip:
121 saverevs.add(r)
122 saverevs.add(r)
122 saveheads.difference_update(cl.parentrevs(r))
123 saveheads.difference_update(cl.parentrevs(r))
123 saveheads.add(r)
124 saveheads.add(r)
124 saveheads = [cl.node(r) for r in saveheads]
125 saveheads = [cl.node(r) for r in saveheads]
125
126
126 # compute base nodes
127 # compute base nodes
127 if saverevs:
128 if saverevs:
128 descendants = set(cl.descendants(saverevs))
129 descendants = set(cl.descendants(saverevs))
129 saverevs.difference_update(descendants)
130 saverevs.difference_update(descendants)
130 savebases = [cl.node(r) for r in saverevs]
131 savebases = [cl.node(r) for r in saverevs]
131 stripbases = [cl.node(r) for r in tostrip]
132 stripbases = [cl.node(r) for r in tostrip]
132
133
133 stripobsidx = obsmarkers = ()
134 stripobsidx = obsmarkers = ()
134 if repo.ui.configbool('devel', 'strip-obsmarkers', True):
135 if repo.ui.configbool('devel', 'strip-obsmarkers', True):
135 obsmarkers = obsolete.exclusivemarkers(repo, stripbases)
136 obsmarkers = obsutil.exclusivemarkers(repo, stripbases)
136 if obsmarkers:
137 if obsmarkers:
137 stripobsidx = [i for i, m in enumerate(repo.obsstore)
138 stripobsidx = [i for i, m in enumerate(repo.obsstore)
138 if m in obsmarkers]
139 if m in obsmarkers]
139
140
140 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
141 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
141 # is much faster
142 # is much faster
142 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
143 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
143 if newbmtarget:
144 if newbmtarget:
144 newbmtarget = repo[newbmtarget.first()].node()
145 newbmtarget = repo[newbmtarget.first()].node()
145 else:
146 else:
146 newbmtarget = '.'
147 newbmtarget = '.'
147
148
148 bm = repo._bookmarks
149 bm = repo._bookmarks
149 updatebm = []
150 updatebm = []
150 for m in bm:
151 for m in bm:
151 rev = repo[bm[m]].rev()
152 rev = repo[bm[m]].rev()
152 if rev in tostrip:
153 if rev in tostrip:
153 updatebm.append(m)
154 updatebm.append(m)
154
155
155 # create a changegroup for all the branches we need to keep
156 # create a changegroup for all the branches we need to keep
156 backupfile = None
157 backupfile = None
157 vfs = repo.vfs
158 vfs = repo.vfs
158 node = nodelist[-1]
159 node = nodelist[-1]
159 if backup:
160 if backup:
160 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
161 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
161 repo.ui.status(_("saved backup bundle to %s\n") %
162 repo.ui.status(_("saved backup bundle to %s\n") %
162 vfs.join(backupfile))
163 vfs.join(backupfile))
163 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
164 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
164 vfs.join(backupfile))
165 vfs.join(backupfile))
165 tmpbundlefile = None
166 tmpbundlefile = None
166 if saveheads:
167 if saveheads:
167 # do not compress temporary bundle if we remove it from disk later
168 # do not compress temporary bundle if we remove it from disk later
168 #
169 #
169 # We do not include obsolescence, it might re-introduce prune markers
170 # We do not include obsolescence, it might re-introduce prune markers
170 # we are trying to strip. This is harmless since the stripped markers
171 # we are trying to strip. This is harmless since the stripped markers
171 # are already backed up and we did not touched the markers for the
172 # are already backed up and we did not touched the markers for the
172 # saved changesets.
173 # saved changesets.
173 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
174 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
174 compress=False, obsolescence=False)
175 compress=False, obsolescence=False)
175
176
176 mfst = repo.manifestlog._revlog
177 mfst = repo.manifestlog._revlog
177
178
178 try:
179 try:
179 with repo.transaction("strip") as tr:
180 with repo.transaction("strip") as tr:
180 offset = len(tr.entries)
181 offset = len(tr.entries)
181
182
182 tr.startgroup()
183 tr.startgroup()
183 cl.strip(striprev, tr)
184 cl.strip(striprev, tr)
184 mfst.strip(striprev, tr)
185 mfst.strip(striprev, tr)
185 striptrees(repo, tr, striprev, files)
186 striptrees(repo, tr, striprev, files)
186
187
187 for fn in files:
188 for fn in files:
188 repo.file(fn).strip(striprev, tr)
189 repo.file(fn).strip(striprev, tr)
189 tr.endgroup()
190 tr.endgroup()
190
191
191 for i in xrange(offset, len(tr.entries)):
192 for i in xrange(offset, len(tr.entries)):
192 file, troffset, ignore = tr.entries[i]
193 file, troffset, ignore = tr.entries[i]
193 with repo.svfs(file, 'a', checkambig=True) as fp:
194 with repo.svfs(file, 'a', checkambig=True) as fp:
194 fp.truncate(troffset)
195 fp.truncate(troffset)
195 if troffset == 0:
196 if troffset == 0:
196 repo.store.markremoved(file)
197 repo.store.markremoved(file)
197
198
198 deleteobsmarkers(repo.obsstore, stripobsidx)
199 deleteobsmarkers(repo.obsstore, stripobsidx)
199 del repo.obsstore
200 del repo.obsstore
200
201
201 repo._phasecache.filterunknown(repo)
202 repo._phasecache.filterunknown(repo)
202 if tmpbundlefile:
203 if tmpbundlefile:
203 ui.note(_("adding branch\n"))
204 ui.note(_("adding branch\n"))
204 f = vfs.open(tmpbundlefile, "rb")
205 f = vfs.open(tmpbundlefile, "rb")
205 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
206 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
206 if not repo.ui.verbose:
207 if not repo.ui.verbose:
207 # silence internal shuffling chatter
208 # silence internal shuffling chatter
208 repo.ui.pushbuffer()
209 repo.ui.pushbuffer()
209 tmpbundleurl = 'bundle:' + vfs.join(tmpbundlefile)
210 tmpbundleurl = 'bundle:' + vfs.join(tmpbundlefile)
210 txnname = 'strip'
211 txnname = 'strip'
211 if not isinstance(gen, bundle2.unbundle20):
212 if not isinstance(gen, bundle2.unbundle20):
212 txnname = "strip\n%s" % util.hidepassword(tmpbundleurl)
213 txnname = "strip\n%s" % util.hidepassword(tmpbundleurl)
213 with repo.transaction(txnname) as tr:
214 with repo.transaction(txnname) as tr:
214 bundle2.applybundle(repo, gen, tr, source='strip',
215 bundle2.applybundle(repo, gen, tr, source='strip',
215 url=tmpbundleurl, emptyok=True)
216 url=tmpbundleurl, emptyok=True)
216 if not repo.ui.verbose:
217 if not repo.ui.verbose:
217 repo.ui.popbuffer()
218 repo.ui.popbuffer()
218 f.close()
219 f.close()
219 repo._phasecache.invalidate()
220 repo._phasecache.invalidate()
220
221
221 for m in updatebm:
222 for m in updatebm:
222 bm[m] = repo[newbmtarget].node()
223 bm[m] = repo[newbmtarget].node()
223
224
224 with repo.transaction('repair') as tr:
225 with repo.transaction('repair') as tr:
225 bm.recordchange(tr)
226 bm.recordchange(tr)
226
227
227 # remove undo files
228 # remove undo files
228 for undovfs, undofile in repo.undofiles():
229 for undovfs, undofile in repo.undofiles():
229 try:
230 try:
230 undovfs.unlink(undofile)
231 undovfs.unlink(undofile)
231 except OSError as e:
232 except OSError as e:
232 if e.errno != errno.ENOENT:
233 if e.errno != errno.ENOENT:
233 ui.warn(_('error removing %s: %s\n') %
234 ui.warn(_('error removing %s: %s\n') %
234 (undovfs.join(undofile), str(e)))
235 (undovfs.join(undofile), str(e)))
235
236
236 except: # re-raises
237 except: # re-raises
237 if backupfile:
238 if backupfile:
238 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
239 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
239 % vfs.join(backupfile))
240 % vfs.join(backupfile))
240 if tmpbundlefile:
241 if tmpbundlefile:
241 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
242 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
242 % vfs.join(tmpbundlefile))
243 % vfs.join(tmpbundlefile))
243 ui.warn(_("(fix the problem, then recover the changesets with "
244 ui.warn(_("(fix the problem, then recover the changesets with "
244 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
245 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
245 raise
246 raise
246 else:
247 else:
247 if tmpbundlefile:
248 if tmpbundlefile:
248 # Remove temporary bundle only if there were no exceptions
249 # Remove temporary bundle only if there were no exceptions
249 vfs.unlink(tmpbundlefile)
250 vfs.unlink(tmpbundlefile)
250
251
251 repo.destroyed()
252 repo.destroyed()
252 # return the backup file path (or None if 'backup' was False) so
253 # return the backup file path (or None if 'backup' was False) so
253 # extensions can use it
254 # extensions can use it
254 return backupfile
255 return backupfile
255
256
256 def safestriproots(ui, repo, nodes):
257 def safestriproots(ui, repo, nodes):
257 """return list of roots of nodes where descendants are covered by nodes"""
258 """return list of roots of nodes where descendants are covered by nodes"""
258 torev = repo.unfiltered().changelog.rev
259 torev = repo.unfiltered().changelog.rev
259 revs = set(torev(n) for n in nodes)
260 revs = set(torev(n) for n in nodes)
260 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
261 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
261 # orphaned = affected - wanted
262 # orphaned = affected - wanted
262 # affected = descendants(roots(wanted))
263 # affected = descendants(roots(wanted))
263 # wanted = revs
264 # wanted = revs
264 tostrip = set(repo.revs('%ld-(::((roots(%ld)::)-%ld))', revs, revs, revs))
265 tostrip = set(repo.revs('%ld-(::((roots(%ld)::)-%ld))', revs, revs, revs))
265 notstrip = revs - tostrip
266 notstrip = revs - tostrip
266 if notstrip:
267 if notstrip:
267 nodestr = ', '.join(sorted(short(repo[n].node()) for n in notstrip))
268 nodestr = ', '.join(sorted(short(repo[n].node()) for n in notstrip))
268 ui.warn(_('warning: orphaned descendants detected, '
269 ui.warn(_('warning: orphaned descendants detected, '
269 'not stripping %s\n') % nodestr)
270 'not stripping %s\n') % nodestr)
270 return [c.node() for c in repo.set('roots(%ld)', tostrip)]
271 return [c.node() for c in repo.set('roots(%ld)', tostrip)]
271
272
272 class stripcallback(object):
273 class stripcallback(object):
273 """used as a transaction postclose callback"""
274 """used as a transaction postclose callback"""
274
275
275 def __init__(self, ui, repo, backup, topic):
276 def __init__(self, ui, repo, backup, topic):
276 self.ui = ui
277 self.ui = ui
277 self.repo = repo
278 self.repo = repo
278 self.backup = backup
279 self.backup = backup
279 self.topic = topic or 'backup'
280 self.topic = topic or 'backup'
280 self.nodelist = []
281 self.nodelist = []
281
282
282 def addnodes(self, nodes):
283 def addnodes(self, nodes):
283 self.nodelist.extend(nodes)
284 self.nodelist.extend(nodes)
284
285
285 def __call__(self, tr):
286 def __call__(self, tr):
286 roots = safestriproots(self.ui, self.repo, self.nodelist)
287 roots = safestriproots(self.ui, self.repo, self.nodelist)
287 if roots:
288 if roots:
288 strip(self.ui, self.repo, roots, self.backup, self.topic)
289 strip(self.ui, self.repo, roots, self.backup, self.topic)
289
290
290 def delayedstrip(ui, repo, nodelist, topic=None):
291 def delayedstrip(ui, repo, nodelist, topic=None):
291 """like strip, but works inside transaction and won't strip irreverent revs
292 """like strip, but works inside transaction and won't strip irreverent revs
292
293
293 nodelist must explicitly contain all descendants. Otherwise a warning will
294 nodelist must explicitly contain all descendants. Otherwise a warning will
294 be printed that some nodes are not stripped.
295 be printed that some nodes are not stripped.
295
296
296 Always do a backup. The last non-None "topic" will be used as the backup
297 Always do a backup. The last non-None "topic" will be used as the backup
297 topic name. The default backup topic name is "backup".
298 topic name. The default backup topic name is "backup".
298 """
299 """
299 tr = repo.currenttransaction()
300 tr = repo.currenttransaction()
300 if not tr:
301 if not tr:
301 nodes = safestriproots(ui, repo, nodelist)
302 nodes = safestriproots(ui, repo, nodelist)
302 return strip(ui, repo, nodes, True, topic)
303 return strip(ui, repo, nodes, True, topic)
303 # transaction postclose callbacks are called in alphabet order.
304 # transaction postclose callbacks are called in alphabet order.
304 # use '\xff' as prefix so we are likely to be called last.
305 # use '\xff' as prefix so we are likely to be called last.
305 callback = tr.getpostclose('\xffstrip')
306 callback = tr.getpostclose('\xffstrip')
306 if callback is None:
307 if callback is None:
307 callback = stripcallback(ui, repo, True, topic)
308 callback = stripcallback(ui, repo, True, topic)
308 tr.addpostclose('\xffstrip', callback)
309 tr.addpostclose('\xffstrip', callback)
309 if topic:
310 if topic:
310 callback.topic = topic
311 callback.topic = topic
311 callback.addnodes(nodelist)
312 callback.addnodes(nodelist)
312
313
313 def striptrees(repo, tr, striprev, files):
314 def striptrees(repo, tr, striprev, files):
314 if 'treemanifest' in repo.requirements: # safe but unnecessary
315 if 'treemanifest' in repo.requirements: # safe but unnecessary
315 # otherwise
316 # otherwise
316 for unencoded, encoded, size in repo.store.datafiles():
317 for unencoded, encoded, size in repo.store.datafiles():
317 if (unencoded.startswith('meta/') and
318 if (unencoded.startswith('meta/') and
318 unencoded.endswith('00manifest.i')):
319 unencoded.endswith('00manifest.i')):
319 dir = unencoded[5:-12]
320 dir = unencoded[5:-12]
320 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
321 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
321
322
322 def rebuildfncache(ui, repo):
323 def rebuildfncache(ui, repo):
323 """Rebuilds the fncache file from repo history.
324 """Rebuilds the fncache file from repo history.
324
325
325 Missing entries will be added. Extra entries will be removed.
326 Missing entries will be added. Extra entries will be removed.
326 """
327 """
327 repo = repo.unfiltered()
328 repo = repo.unfiltered()
328
329
329 if 'fncache' not in repo.requirements:
330 if 'fncache' not in repo.requirements:
330 ui.warn(_('(not rebuilding fncache because repository does not '
331 ui.warn(_('(not rebuilding fncache because repository does not '
331 'support fncache)\n'))
332 'support fncache)\n'))
332 return
333 return
333
334
334 with repo.lock():
335 with repo.lock():
335 fnc = repo.store.fncache
336 fnc = repo.store.fncache
336 # Trigger load of fncache.
337 # Trigger load of fncache.
337 if 'irrelevant' in fnc:
338 if 'irrelevant' in fnc:
338 pass
339 pass
339
340
340 oldentries = set(fnc.entries)
341 oldentries = set(fnc.entries)
341 newentries = set()
342 newentries = set()
342 seenfiles = set()
343 seenfiles = set()
343
344
344 repolen = len(repo)
345 repolen = len(repo)
345 for rev in repo:
346 for rev in repo:
346 ui.progress(_('rebuilding'), rev, total=repolen,
347 ui.progress(_('rebuilding'), rev, total=repolen,
347 unit=_('changesets'))
348 unit=_('changesets'))
348
349
349 ctx = repo[rev]
350 ctx = repo[rev]
350 for f in ctx.files():
351 for f in ctx.files():
351 # This is to minimize I/O.
352 # This is to minimize I/O.
352 if f in seenfiles:
353 if f in seenfiles:
353 continue
354 continue
354 seenfiles.add(f)
355 seenfiles.add(f)
355
356
356 i = 'data/%s.i' % f
357 i = 'data/%s.i' % f
357 d = 'data/%s.d' % f
358 d = 'data/%s.d' % f
358
359
359 if repo.store._exists(i):
360 if repo.store._exists(i):
360 newentries.add(i)
361 newentries.add(i)
361 if repo.store._exists(d):
362 if repo.store._exists(d):
362 newentries.add(d)
363 newentries.add(d)
363
364
364 ui.progress(_('rebuilding'), None)
365 ui.progress(_('rebuilding'), None)
365
366
366 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
367 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
367 for dir in util.dirs(seenfiles):
368 for dir in util.dirs(seenfiles):
368 i = 'meta/%s/00manifest.i' % dir
369 i = 'meta/%s/00manifest.i' % dir
369 d = 'meta/%s/00manifest.d' % dir
370 d = 'meta/%s/00manifest.d' % dir
370
371
371 if repo.store._exists(i):
372 if repo.store._exists(i):
372 newentries.add(i)
373 newentries.add(i)
373 if repo.store._exists(d):
374 if repo.store._exists(d):
374 newentries.add(d)
375 newentries.add(d)
375
376
376 addcount = len(newentries - oldentries)
377 addcount = len(newentries - oldentries)
377 removecount = len(oldentries - newentries)
378 removecount = len(oldentries - newentries)
378 for p in sorted(oldentries - newentries):
379 for p in sorted(oldentries - newentries):
379 ui.write(_('removing %s\n') % p)
380 ui.write(_('removing %s\n') % p)
380 for p in sorted(newentries - oldentries):
381 for p in sorted(newentries - oldentries):
381 ui.write(_('adding %s\n') % p)
382 ui.write(_('adding %s\n') % p)
382
383
383 if addcount or removecount:
384 if addcount or removecount:
384 ui.write(_('%d items added, %d removed from fncache\n') %
385 ui.write(_('%d items added, %d removed from fncache\n') %
385 (addcount, removecount))
386 (addcount, removecount))
386 fnc.entries = newentries
387 fnc.entries = newentries
387 fnc._dirty = True
388 fnc._dirty = True
388
389
389 with repo.transaction('fncache') as tr:
390 with repo.transaction('fncache') as tr:
390 fnc.write(tr)
391 fnc.write(tr)
391 else:
392 else:
392 ui.write(_('fncache already up to date\n'))
393 ui.write(_('fncache already up to date\n'))
393
394
394 def stripbmrevset(repo, mark):
395 def stripbmrevset(repo, mark):
395 """
396 """
396 The revset to strip when strip is called with -B mark
397 The revset to strip when strip is called with -B mark
397
398
398 Needs to live here so extensions can use it and wrap it even when strip is
399 Needs to live here so extensions can use it and wrap it even when strip is
399 not enabled or not present on a box.
400 not enabled or not present on a box.
400 """
401 """
401 return repo.revs("ancestors(bookmark(%s)) - "
402 return repo.revs("ancestors(bookmark(%s)) - "
402 "ancestors(head() and not bookmark(%s)) - "
403 "ancestors(head() and not bookmark(%s)) - "
403 "ancestors(bookmark() and not bookmark(%s))",
404 "ancestors(bookmark() and not bookmark(%s))",
404 mark, mark, mark)
405 mark, mark, mark)
405
406
406 def deleteobsmarkers(obsstore, indices):
407 def deleteobsmarkers(obsstore, indices):
407 """Delete some obsmarkers from obsstore and return how many were deleted
408 """Delete some obsmarkers from obsstore and return how many were deleted
408
409
409 'indices' is a list of ints which are the indices
410 'indices' is a list of ints which are the indices
410 of the markers to be deleted.
411 of the markers to be deleted.
411
412
412 Every invocation of this function completely rewrites the obsstore file,
413 Every invocation of this function completely rewrites the obsstore file,
413 skipping the markers we want to be removed. The new temporary file is
414 skipping the markers we want to be removed. The new temporary file is
414 created, remaining markers are written there and on .close() this file
415 created, remaining markers are written there and on .close() this file
415 gets atomically renamed to obsstore, thus guaranteeing consistency."""
416 gets atomically renamed to obsstore, thus guaranteeing consistency."""
416 if not indices:
417 if not indices:
417 # we don't want to rewrite the obsstore with the same content
418 # we don't want to rewrite the obsstore with the same content
418 return
419 return
419
420
420 left = []
421 left = []
421 current = obsstore._all
422 current = obsstore._all
422 n = 0
423 n = 0
423 for i, m in enumerate(current):
424 for i, m in enumerate(current):
424 if i in indices:
425 if i in indices:
425 n += 1
426 n += 1
426 continue
427 continue
427 left.append(m)
428 left.append(m)
428
429
429 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
430 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
430 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
431 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
431 newobsstorefile.write(bytes)
432 newobsstorefile.write(bytes)
432 newobsstorefile.close()
433 newobsstorefile.close()
433 return n
434 return n
General Comments 0
You need to be logged in to leave comments. Login now