##// END OF EJS Templates
obsolete: fix relevant-obsmarkers computation on pruned changeset...
marmoute -
r32488:176d1a0c default
parent child Browse files
Show More
@@ -1,1298 +1,1301 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "precursor" and possible
23 The old obsoleted changeset is called a "precursor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a precursor are called "successor markers of X" because they hold
25 a precursor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "precursor markers of Y" because they hold
27 a successors are call "precursor markers of Y" because they hold
28 information about the precursors of Y.
28 information about the precursors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker is used:
45 - When changeset A is split into B and C, a single marker is used:
46
46
47 (A, (B, C))
47 (A, (B, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 from __future__ import absolute_import
70 from __future__ import absolute_import
71
71
72 import errno
72 import errno
73 import struct
73 import struct
74
74
75 from .i18n import _
75 from .i18n import _
76 from . import (
76 from . import (
77 error,
77 error,
78 node,
78 node,
79 phases,
79 phases,
80 policy,
80 policy,
81 util,
81 util,
82 )
82 )
83
83
84 parsers = policy.importmod(r'parsers')
84 parsers = policy.importmod(r'parsers')
85
85
86 _pack = struct.pack
86 _pack = struct.pack
87 _unpack = struct.unpack
87 _unpack = struct.unpack
88 _calcsize = struct.calcsize
88 _calcsize = struct.calcsize
89 propertycache = util.propertycache
89 propertycache = util.propertycache
90
90
91 # the obsolete feature is not mature enough to be enabled by default.
91 # the obsolete feature is not mature enough to be enabled by default.
92 # you have to rely on third party extension extension to enable this.
92 # you have to rely on third party extension extension to enable this.
93 _enabled = False
93 _enabled = False
94
94
95 # Options for obsolescence
95 # Options for obsolescence
96 createmarkersopt = 'createmarkers'
96 createmarkersopt = 'createmarkers'
97 allowunstableopt = 'allowunstable'
97 allowunstableopt = 'allowunstable'
98 exchangeopt = 'exchange'
98 exchangeopt = 'exchange'
99
99
100 def isenabled(repo, option):
100 def isenabled(repo, option):
101 """Returns True if the given repository has the given obsolete option
101 """Returns True if the given repository has the given obsolete option
102 enabled.
102 enabled.
103 """
103 """
104 result = set(repo.ui.configlist('experimental', 'evolution'))
104 result = set(repo.ui.configlist('experimental', 'evolution'))
105 if 'all' in result:
105 if 'all' in result:
106 return True
106 return True
107
107
108 # For migration purposes, temporarily return true if the config hasn't been
108 # For migration purposes, temporarily return true if the config hasn't been
109 # set but _enabled is true.
109 # set but _enabled is true.
110 if len(result) == 0 and _enabled:
110 if len(result) == 0 and _enabled:
111 return True
111 return True
112
112
113 # createmarkers must be enabled if other options are enabled
113 # createmarkers must be enabled if other options are enabled
114 if ((allowunstableopt in result or exchangeopt in result) and
114 if ((allowunstableopt in result or exchangeopt in result) and
115 not createmarkersopt in result):
115 not createmarkersopt in result):
116 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
116 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
117 "if other obsolete options are enabled"))
117 "if other obsolete options are enabled"))
118
118
119 return option in result
119 return option in result
120
120
121 ### obsolescence marker flag
121 ### obsolescence marker flag
122
122
123 ## bumpedfix flag
123 ## bumpedfix flag
124 #
124 #
125 # When a changeset A' succeed to a changeset A which became public, we call A'
125 # When a changeset A' succeed to a changeset A which became public, we call A'
126 # "bumped" because it's a successors of a public changesets
126 # "bumped" because it's a successors of a public changesets
127 #
127 #
128 # o A' (bumped)
128 # o A' (bumped)
129 # |`:
129 # |`:
130 # | o A
130 # | o A
131 # |/
131 # |/
132 # o Z
132 # o Z
133 #
133 #
134 # The way to solve this situation is to create a new changeset Ad as children
134 # The way to solve this situation is to create a new changeset Ad as children
135 # of A. This changeset have the same content than A'. So the diff from A to A'
135 # of A. This changeset have the same content than A'. So the diff from A to A'
136 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
136 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
137 #
137 #
138 # o Ad
138 # o Ad
139 # |`:
139 # |`:
140 # | x A'
140 # | x A'
141 # |'|
141 # |'|
142 # o | A
142 # o | A
143 # |/
143 # |/
144 # o Z
144 # o Z
145 #
145 #
146 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
146 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
147 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
147 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
148 # This flag mean that the successors express the changes between the public and
148 # This flag mean that the successors express the changes between the public and
149 # bumped version and fix the situation, breaking the transitivity of
149 # bumped version and fix the situation, breaking the transitivity of
150 # "bumped" here.
150 # "bumped" here.
151 bumpedfix = 1
151 bumpedfix = 1
152 usingsha256 = 2
152 usingsha256 = 2
153
153
154 ## Parsing and writing of version "0"
154 ## Parsing and writing of version "0"
155 #
155 #
156 # The header is followed by the markers. Each marker is made of:
156 # The header is followed by the markers. Each marker is made of:
157 #
157 #
158 # - 1 uint8 : number of new changesets "N", can be zero.
158 # - 1 uint8 : number of new changesets "N", can be zero.
159 #
159 #
160 # - 1 uint32: metadata size "M" in bytes.
160 # - 1 uint32: metadata size "M" in bytes.
161 #
161 #
162 # - 1 byte: a bit field. It is reserved for flags used in common
162 # - 1 byte: a bit field. It is reserved for flags used in common
163 # obsolete marker operations, to avoid repeated decoding of metadata
163 # obsolete marker operations, to avoid repeated decoding of metadata
164 # entries.
164 # entries.
165 #
165 #
166 # - 20 bytes: obsoleted changeset identifier.
166 # - 20 bytes: obsoleted changeset identifier.
167 #
167 #
168 # - N*20 bytes: new changesets identifiers.
168 # - N*20 bytes: new changesets identifiers.
169 #
169 #
170 # - M bytes: metadata as a sequence of nul-terminated strings. Each
170 # - M bytes: metadata as a sequence of nul-terminated strings. Each
171 # string contains a key and a value, separated by a colon ':', without
171 # string contains a key and a value, separated by a colon ':', without
172 # additional encoding. Keys cannot contain '\0' or ':' and values
172 # additional encoding. Keys cannot contain '\0' or ':' and values
173 # cannot contain '\0'.
173 # cannot contain '\0'.
174 _fm0version = 0
174 _fm0version = 0
175 _fm0fixed = '>BIB20s'
175 _fm0fixed = '>BIB20s'
176 _fm0node = '20s'
176 _fm0node = '20s'
177 _fm0fsize = _calcsize(_fm0fixed)
177 _fm0fsize = _calcsize(_fm0fixed)
178 _fm0fnodesize = _calcsize(_fm0node)
178 _fm0fnodesize = _calcsize(_fm0node)
179
179
180 def _fm0readmarkers(data, off):
180 def _fm0readmarkers(data, off):
181 # Loop on markers
181 # Loop on markers
182 l = len(data)
182 l = len(data)
183 while off + _fm0fsize <= l:
183 while off + _fm0fsize <= l:
184 # read fixed part
184 # read fixed part
185 cur = data[off:off + _fm0fsize]
185 cur = data[off:off + _fm0fsize]
186 off += _fm0fsize
186 off += _fm0fsize
187 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
187 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
188 # read replacement
188 # read replacement
189 sucs = ()
189 sucs = ()
190 if numsuc:
190 if numsuc:
191 s = (_fm0fnodesize * numsuc)
191 s = (_fm0fnodesize * numsuc)
192 cur = data[off:off + s]
192 cur = data[off:off + s]
193 sucs = _unpack(_fm0node * numsuc, cur)
193 sucs = _unpack(_fm0node * numsuc, cur)
194 off += s
194 off += s
195 # read metadata
195 # read metadata
196 # (metadata will be decoded on demand)
196 # (metadata will be decoded on demand)
197 metadata = data[off:off + mdsize]
197 metadata = data[off:off + mdsize]
198 if len(metadata) != mdsize:
198 if len(metadata) != mdsize:
199 raise error.Abort(_('parsing obsolete marker: metadata is too '
199 raise error.Abort(_('parsing obsolete marker: metadata is too '
200 'short, %d bytes expected, got %d')
200 'short, %d bytes expected, got %d')
201 % (mdsize, len(metadata)))
201 % (mdsize, len(metadata)))
202 off += mdsize
202 off += mdsize
203 metadata = _fm0decodemeta(metadata)
203 metadata = _fm0decodemeta(metadata)
204 try:
204 try:
205 when, offset = metadata.pop('date', '0 0').split(' ')
205 when, offset = metadata.pop('date', '0 0').split(' ')
206 date = float(when), int(offset)
206 date = float(when), int(offset)
207 except ValueError:
207 except ValueError:
208 date = (0., 0)
208 date = (0., 0)
209 parents = None
209 parents = None
210 if 'p2' in metadata:
210 if 'p2' in metadata:
211 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
211 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
212 elif 'p1' in metadata:
212 elif 'p1' in metadata:
213 parents = (metadata.pop('p1', None),)
213 parents = (metadata.pop('p1', None),)
214 elif 'p0' in metadata:
214 elif 'p0' in metadata:
215 parents = ()
215 parents = ()
216 if parents is not None:
216 if parents is not None:
217 try:
217 try:
218 parents = tuple(node.bin(p) for p in parents)
218 parents = tuple(node.bin(p) for p in parents)
219 # if parent content is not a nodeid, drop the data
219 # if parent content is not a nodeid, drop the data
220 for p in parents:
220 for p in parents:
221 if len(p) != 20:
221 if len(p) != 20:
222 parents = None
222 parents = None
223 break
223 break
224 except TypeError:
224 except TypeError:
225 # if content cannot be translated to nodeid drop the data.
225 # if content cannot be translated to nodeid drop the data.
226 parents = None
226 parents = None
227
227
228 metadata = tuple(sorted(metadata.iteritems()))
228 metadata = tuple(sorted(metadata.iteritems()))
229
229
230 yield (pre, sucs, flags, metadata, date, parents)
230 yield (pre, sucs, flags, metadata, date, parents)
231
231
232 def _fm0encodeonemarker(marker):
232 def _fm0encodeonemarker(marker):
233 pre, sucs, flags, metadata, date, parents = marker
233 pre, sucs, flags, metadata, date, parents = marker
234 if flags & usingsha256:
234 if flags & usingsha256:
235 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
235 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
236 metadata = dict(metadata)
236 metadata = dict(metadata)
237 time, tz = date
237 time, tz = date
238 metadata['date'] = '%r %i' % (time, tz)
238 metadata['date'] = '%r %i' % (time, tz)
239 if parents is not None:
239 if parents is not None:
240 if not parents:
240 if not parents:
241 # mark that we explicitly recorded no parents
241 # mark that we explicitly recorded no parents
242 metadata['p0'] = ''
242 metadata['p0'] = ''
243 for i, p in enumerate(parents, 1):
243 for i, p in enumerate(parents, 1):
244 metadata['p%i' % i] = node.hex(p)
244 metadata['p%i' % i] = node.hex(p)
245 metadata = _fm0encodemeta(metadata)
245 metadata = _fm0encodemeta(metadata)
246 numsuc = len(sucs)
246 numsuc = len(sucs)
247 format = _fm0fixed + (_fm0node * numsuc)
247 format = _fm0fixed + (_fm0node * numsuc)
248 data = [numsuc, len(metadata), flags, pre]
248 data = [numsuc, len(metadata), flags, pre]
249 data.extend(sucs)
249 data.extend(sucs)
250 return _pack(format, *data) + metadata
250 return _pack(format, *data) + metadata
251
251
252 def _fm0encodemeta(meta):
252 def _fm0encodemeta(meta):
253 """Return encoded metadata string to string mapping.
253 """Return encoded metadata string to string mapping.
254
254
255 Assume no ':' in key and no '\0' in both key and value."""
255 Assume no ':' in key and no '\0' in both key and value."""
256 for key, value in meta.iteritems():
256 for key, value in meta.iteritems():
257 if ':' in key or '\0' in key:
257 if ':' in key or '\0' in key:
258 raise ValueError("':' and '\0' are forbidden in metadata key'")
258 raise ValueError("':' and '\0' are forbidden in metadata key'")
259 if '\0' in value:
259 if '\0' in value:
260 raise ValueError("':' is forbidden in metadata value'")
260 raise ValueError("':' is forbidden in metadata value'")
261 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
261 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
262
262
263 def _fm0decodemeta(data):
263 def _fm0decodemeta(data):
264 """Return string to string dictionary from encoded version."""
264 """Return string to string dictionary from encoded version."""
265 d = {}
265 d = {}
266 for l in data.split('\0'):
266 for l in data.split('\0'):
267 if l:
267 if l:
268 key, value = l.split(':')
268 key, value = l.split(':')
269 d[key] = value
269 d[key] = value
270 return d
270 return d
271
271
272 ## Parsing and writing of version "1"
272 ## Parsing and writing of version "1"
273 #
273 #
274 # The header is followed by the markers. Each marker is made of:
274 # The header is followed by the markers. Each marker is made of:
275 #
275 #
276 # - uint32: total size of the marker (including this field)
276 # - uint32: total size of the marker (including this field)
277 #
277 #
278 # - float64: date in seconds since epoch
278 # - float64: date in seconds since epoch
279 #
279 #
280 # - int16: timezone offset in minutes
280 # - int16: timezone offset in minutes
281 #
281 #
282 # - uint16: a bit field. It is reserved for flags used in common
282 # - uint16: a bit field. It is reserved for flags used in common
283 # obsolete marker operations, to avoid repeated decoding of metadata
283 # obsolete marker operations, to avoid repeated decoding of metadata
284 # entries.
284 # entries.
285 #
285 #
286 # - uint8: number of successors "N", can be zero.
286 # - uint8: number of successors "N", can be zero.
287 #
287 #
288 # - uint8: number of parents "P", can be zero.
288 # - uint8: number of parents "P", can be zero.
289 #
289 #
290 # 0: parents data stored but no parent,
290 # 0: parents data stored but no parent,
291 # 1: one parent stored,
291 # 1: one parent stored,
292 # 2: two parents stored,
292 # 2: two parents stored,
293 # 3: no parent data stored
293 # 3: no parent data stored
294 #
294 #
295 # - uint8: number of metadata entries M
295 # - uint8: number of metadata entries M
296 #
296 #
297 # - 20 or 32 bytes: precursor changeset identifier.
297 # - 20 or 32 bytes: precursor changeset identifier.
298 #
298 #
299 # - N*(20 or 32) bytes: successors changesets identifiers.
299 # - N*(20 or 32) bytes: successors changesets identifiers.
300 #
300 #
301 # - P*(20 or 32) bytes: parents of the precursors changesets.
301 # - P*(20 or 32) bytes: parents of the precursors changesets.
302 #
302 #
303 # - M*(uint8, uint8): size of all metadata entries (key and value)
303 # - M*(uint8, uint8): size of all metadata entries (key and value)
304 #
304 #
305 # - remaining bytes: the metadata, each (key, value) pair after the other.
305 # - remaining bytes: the metadata, each (key, value) pair after the other.
306 _fm1version = 1
306 _fm1version = 1
307 _fm1fixed = '>IdhHBBB20s'
307 _fm1fixed = '>IdhHBBB20s'
308 _fm1nodesha1 = '20s'
308 _fm1nodesha1 = '20s'
309 _fm1nodesha256 = '32s'
309 _fm1nodesha256 = '32s'
310 _fm1nodesha1size = _calcsize(_fm1nodesha1)
310 _fm1nodesha1size = _calcsize(_fm1nodesha1)
311 _fm1nodesha256size = _calcsize(_fm1nodesha256)
311 _fm1nodesha256size = _calcsize(_fm1nodesha256)
312 _fm1fsize = _calcsize(_fm1fixed)
312 _fm1fsize = _calcsize(_fm1fixed)
313 _fm1parentnone = 3
313 _fm1parentnone = 3
314 _fm1parentshift = 14
314 _fm1parentshift = 14
315 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
315 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
316 _fm1metapair = 'BB'
316 _fm1metapair = 'BB'
317 _fm1metapairsize = _calcsize('BB')
317 _fm1metapairsize = _calcsize('BB')
318
318
319 def _fm1purereadmarkers(data, off):
319 def _fm1purereadmarkers(data, off):
320 # make some global constants local for performance
320 # make some global constants local for performance
321 noneflag = _fm1parentnone
321 noneflag = _fm1parentnone
322 sha2flag = usingsha256
322 sha2flag = usingsha256
323 sha1size = _fm1nodesha1size
323 sha1size = _fm1nodesha1size
324 sha2size = _fm1nodesha256size
324 sha2size = _fm1nodesha256size
325 sha1fmt = _fm1nodesha1
325 sha1fmt = _fm1nodesha1
326 sha2fmt = _fm1nodesha256
326 sha2fmt = _fm1nodesha256
327 metasize = _fm1metapairsize
327 metasize = _fm1metapairsize
328 metafmt = _fm1metapair
328 metafmt = _fm1metapair
329 fsize = _fm1fsize
329 fsize = _fm1fsize
330 unpack = _unpack
330 unpack = _unpack
331
331
332 # Loop on markers
332 # Loop on markers
333 stop = len(data) - _fm1fsize
333 stop = len(data) - _fm1fsize
334 ufixed = struct.Struct(_fm1fixed).unpack
334 ufixed = struct.Struct(_fm1fixed).unpack
335
335
336 while off <= stop:
336 while off <= stop:
337 # read fixed part
337 # read fixed part
338 o1 = off + fsize
338 o1 = off + fsize
339 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
339 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
340
340
341 if flags & sha2flag:
341 if flags & sha2flag:
342 # FIXME: prec was read as a SHA1, needs to be amended
342 # FIXME: prec was read as a SHA1, needs to be amended
343
343
344 # read 0 or more successors
344 # read 0 or more successors
345 if numsuc == 1:
345 if numsuc == 1:
346 o2 = o1 + sha2size
346 o2 = o1 + sha2size
347 sucs = (data[o1:o2],)
347 sucs = (data[o1:o2],)
348 else:
348 else:
349 o2 = o1 + sha2size * numsuc
349 o2 = o1 + sha2size * numsuc
350 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
350 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
351
351
352 # read parents
352 # read parents
353 if numpar == noneflag:
353 if numpar == noneflag:
354 o3 = o2
354 o3 = o2
355 parents = None
355 parents = None
356 elif numpar == 1:
356 elif numpar == 1:
357 o3 = o2 + sha2size
357 o3 = o2 + sha2size
358 parents = (data[o2:o3],)
358 parents = (data[o2:o3],)
359 else:
359 else:
360 o3 = o2 + sha2size * numpar
360 o3 = o2 + sha2size * numpar
361 parents = unpack(sha2fmt * numpar, data[o2:o3])
361 parents = unpack(sha2fmt * numpar, data[o2:o3])
362 else:
362 else:
363 # read 0 or more successors
363 # read 0 or more successors
364 if numsuc == 1:
364 if numsuc == 1:
365 o2 = o1 + sha1size
365 o2 = o1 + sha1size
366 sucs = (data[o1:o2],)
366 sucs = (data[o1:o2],)
367 else:
367 else:
368 o2 = o1 + sha1size * numsuc
368 o2 = o1 + sha1size * numsuc
369 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
369 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
370
370
371 # read parents
371 # read parents
372 if numpar == noneflag:
372 if numpar == noneflag:
373 o3 = o2
373 o3 = o2
374 parents = None
374 parents = None
375 elif numpar == 1:
375 elif numpar == 1:
376 o3 = o2 + sha1size
376 o3 = o2 + sha1size
377 parents = (data[o2:o3],)
377 parents = (data[o2:o3],)
378 else:
378 else:
379 o3 = o2 + sha1size * numpar
379 o3 = o2 + sha1size * numpar
380 parents = unpack(sha1fmt * numpar, data[o2:o3])
380 parents = unpack(sha1fmt * numpar, data[o2:o3])
381
381
382 # read metadata
382 # read metadata
383 off = o3 + metasize * nummeta
383 off = o3 + metasize * nummeta
384 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
384 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
385 metadata = []
385 metadata = []
386 for idx in xrange(0, len(metapairsize), 2):
386 for idx in xrange(0, len(metapairsize), 2):
387 o1 = off + metapairsize[idx]
387 o1 = off + metapairsize[idx]
388 o2 = o1 + metapairsize[idx + 1]
388 o2 = o1 + metapairsize[idx + 1]
389 metadata.append((data[off:o1], data[o1:o2]))
389 metadata.append((data[off:o1], data[o1:o2]))
390 off = o2
390 off = o2
391
391
392 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
392 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
393
393
394 def _fm1encodeonemarker(marker):
394 def _fm1encodeonemarker(marker):
395 pre, sucs, flags, metadata, date, parents = marker
395 pre, sucs, flags, metadata, date, parents = marker
396 # determine node size
396 # determine node size
397 _fm1node = _fm1nodesha1
397 _fm1node = _fm1nodesha1
398 if flags & usingsha256:
398 if flags & usingsha256:
399 _fm1node = _fm1nodesha256
399 _fm1node = _fm1nodesha256
400 numsuc = len(sucs)
400 numsuc = len(sucs)
401 numextranodes = numsuc
401 numextranodes = numsuc
402 if parents is None:
402 if parents is None:
403 numpar = _fm1parentnone
403 numpar = _fm1parentnone
404 else:
404 else:
405 numpar = len(parents)
405 numpar = len(parents)
406 numextranodes += numpar
406 numextranodes += numpar
407 formatnodes = _fm1node * numextranodes
407 formatnodes = _fm1node * numextranodes
408 formatmeta = _fm1metapair * len(metadata)
408 formatmeta = _fm1metapair * len(metadata)
409 format = _fm1fixed + formatnodes + formatmeta
409 format = _fm1fixed + formatnodes + formatmeta
410 # tz is stored in minutes so we divide by 60
410 # tz is stored in minutes so we divide by 60
411 tz = date[1]//60
411 tz = date[1]//60
412 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
412 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
413 data.extend(sucs)
413 data.extend(sucs)
414 if parents is not None:
414 if parents is not None:
415 data.extend(parents)
415 data.extend(parents)
416 totalsize = _calcsize(format)
416 totalsize = _calcsize(format)
417 for key, value in metadata:
417 for key, value in metadata:
418 lk = len(key)
418 lk = len(key)
419 lv = len(value)
419 lv = len(value)
420 data.append(lk)
420 data.append(lk)
421 data.append(lv)
421 data.append(lv)
422 totalsize += lk + lv
422 totalsize += lk + lv
423 data[0] = totalsize
423 data[0] = totalsize
424 data = [_pack(format, *data)]
424 data = [_pack(format, *data)]
425 for key, value in metadata:
425 for key, value in metadata:
426 data.append(key)
426 data.append(key)
427 data.append(value)
427 data.append(value)
428 return ''.join(data)
428 return ''.join(data)
429
429
430 def _fm1readmarkers(data, off):
430 def _fm1readmarkers(data, off):
431 native = getattr(parsers, 'fm1readmarkers', None)
431 native = getattr(parsers, 'fm1readmarkers', None)
432 if not native:
432 if not native:
433 return _fm1purereadmarkers(data, off)
433 return _fm1purereadmarkers(data, off)
434 stop = len(data) - _fm1fsize
434 stop = len(data) - _fm1fsize
435 return native(data, off, stop)
435 return native(data, off, stop)
436
436
437 # mapping to read/write various marker formats
437 # mapping to read/write various marker formats
438 # <version> -> (decoder, encoder)
438 # <version> -> (decoder, encoder)
439 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
439 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
440 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
440 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
441
441
442 @util.nogc
442 @util.nogc
443 def _readmarkers(data):
443 def _readmarkers(data):
444 """Read and enumerate markers from raw data"""
444 """Read and enumerate markers from raw data"""
445 off = 0
445 off = 0
446 diskversion = _unpack('>B', data[off:off + 1])[0]
446 diskversion = _unpack('>B', data[off:off + 1])[0]
447 off += 1
447 off += 1
448 if diskversion not in formats:
448 if diskversion not in formats:
449 raise error.Abort(_('parsing obsolete marker: unknown version %r')
449 raise error.Abort(_('parsing obsolete marker: unknown version %r')
450 % diskversion)
450 % diskversion)
451 return diskversion, formats[diskversion][0](data, off)
451 return diskversion, formats[diskversion][0](data, off)
452
452
453 def encodemarkers(markers, addheader=False, version=_fm0version):
453 def encodemarkers(markers, addheader=False, version=_fm0version):
454 # Kept separate from flushmarkers(), it will be reused for
454 # Kept separate from flushmarkers(), it will be reused for
455 # markers exchange.
455 # markers exchange.
456 encodeone = formats[version][1]
456 encodeone = formats[version][1]
457 if addheader:
457 if addheader:
458 yield _pack('>B', version)
458 yield _pack('>B', version)
459 for marker in markers:
459 for marker in markers:
460 yield encodeone(marker)
460 yield encodeone(marker)
461
461
462
462
463 class marker(object):
463 class marker(object):
464 """Wrap obsolete marker raw data"""
464 """Wrap obsolete marker raw data"""
465
465
466 def __init__(self, repo, data):
466 def __init__(self, repo, data):
467 # the repo argument will be used to create changectx in later version
467 # the repo argument will be used to create changectx in later version
468 self._repo = repo
468 self._repo = repo
469 self._data = data
469 self._data = data
470 self._decodedmeta = None
470 self._decodedmeta = None
471
471
472 def __hash__(self):
472 def __hash__(self):
473 return hash(self._data)
473 return hash(self._data)
474
474
475 def __eq__(self, other):
475 def __eq__(self, other):
476 if type(other) != type(self):
476 if type(other) != type(self):
477 return False
477 return False
478 return self._data == other._data
478 return self._data == other._data
479
479
480 def precnode(self):
480 def precnode(self):
481 """Precursor changeset node identifier"""
481 """Precursor changeset node identifier"""
482 return self._data[0]
482 return self._data[0]
483
483
484 def succnodes(self):
484 def succnodes(self):
485 """List of successor changesets node identifiers"""
485 """List of successor changesets node identifiers"""
486 return self._data[1]
486 return self._data[1]
487
487
488 def parentnodes(self):
488 def parentnodes(self):
489 """Parents of the precursors (None if not recorded)"""
489 """Parents of the precursors (None if not recorded)"""
490 return self._data[5]
490 return self._data[5]
491
491
492 def metadata(self):
492 def metadata(self):
493 """Decoded metadata dictionary"""
493 """Decoded metadata dictionary"""
494 return dict(self._data[3])
494 return dict(self._data[3])
495
495
496 def date(self):
496 def date(self):
497 """Creation date as (unixtime, offset)"""
497 """Creation date as (unixtime, offset)"""
498 return self._data[4]
498 return self._data[4]
499
499
500 def flags(self):
500 def flags(self):
501 """The flags field of the marker"""
501 """The flags field of the marker"""
502 return self._data[2]
502 return self._data[2]
503
503
504 @util.nogc
504 @util.nogc
505 def _addsuccessors(successors, markers):
505 def _addsuccessors(successors, markers):
506 for mark in markers:
506 for mark in markers:
507 successors.setdefault(mark[0], set()).add(mark)
507 successors.setdefault(mark[0], set()).add(mark)
508
508
509 @util.nogc
509 @util.nogc
510 def _addprecursors(precursors, markers):
510 def _addprecursors(precursors, markers):
511 for mark in markers:
511 for mark in markers:
512 for suc in mark[1]:
512 for suc in mark[1]:
513 precursors.setdefault(suc, set()).add(mark)
513 precursors.setdefault(suc, set()).add(mark)
514
514
515 @util.nogc
515 @util.nogc
516 def _addchildren(children, markers):
516 def _addchildren(children, markers):
517 for mark in markers:
517 for mark in markers:
518 parents = mark[5]
518 parents = mark[5]
519 if parents is not None:
519 if parents is not None:
520 for p in parents:
520 for p in parents:
521 children.setdefault(p, set()).add(mark)
521 children.setdefault(p, set()).add(mark)
522
522
523 def _checkinvalidmarkers(markers):
523 def _checkinvalidmarkers(markers):
524 """search for marker with invalid data and raise error if needed
524 """search for marker with invalid data and raise error if needed
525
525
526 Exist as a separated function to allow the evolve extension for a more
526 Exist as a separated function to allow the evolve extension for a more
527 subtle handling.
527 subtle handling.
528 """
528 """
529 for mark in markers:
529 for mark in markers:
530 if node.nullid in mark[1]:
530 if node.nullid in mark[1]:
531 raise error.Abort(_('bad obsolescence marker detected: '
531 raise error.Abort(_('bad obsolescence marker detected: '
532 'invalid successors nullid'))
532 'invalid successors nullid'))
533
533
534 class obsstore(object):
534 class obsstore(object):
535 """Store obsolete markers
535 """Store obsolete markers
536
536
537 Markers can be accessed with two mappings:
537 Markers can be accessed with two mappings:
538 - precursors[x] -> set(markers on precursors edges of x)
538 - precursors[x] -> set(markers on precursors edges of x)
539 - successors[x] -> set(markers on successors edges of x)
539 - successors[x] -> set(markers on successors edges of x)
540 - children[x] -> set(markers on precursors edges of children(x)
540 - children[x] -> set(markers on precursors edges of children(x)
541 """
541 """
542
542
543 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
543 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
544 # prec: nodeid, precursor changesets
544 # prec: nodeid, precursor changesets
545 # succs: tuple of nodeid, successor changesets (0-N length)
545 # succs: tuple of nodeid, successor changesets (0-N length)
546 # flag: integer, flag field carrying modifier for the markers (see doc)
546 # flag: integer, flag field carrying modifier for the markers (see doc)
547 # meta: binary blob, encoded metadata dictionary
547 # meta: binary blob, encoded metadata dictionary
548 # date: (float, int) tuple, date of marker creation
548 # date: (float, int) tuple, date of marker creation
549 # parents: (tuple of nodeid) or None, parents of precursors
549 # parents: (tuple of nodeid) or None, parents of precursors
550 # None is used when no data has been recorded
550 # None is used when no data has been recorded
551
551
552 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
552 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
553 # caches for various obsolescence related cache
553 # caches for various obsolescence related cache
554 self.caches = {}
554 self.caches = {}
555 self.svfs = svfs
555 self.svfs = svfs
556 self._version = defaultformat
556 self._version = defaultformat
557 self._readonly = readonly
557 self._readonly = readonly
558
558
559 def __iter__(self):
559 def __iter__(self):
560 return iter(self._all)
560 return iter(self._all)
561
561
562 def __len__(self):
562 def __len__(self):
563 return len(self._all)
563 return len(self._all)
564
564
565 def __nonzero__(self):
565 def __nonzero__(self):
566 if not self._cached('_all'):
566 if not self._cached('_all'):
567 try:
567 try:
568 return self.svfs.stat('obsstore').st_size > 1
568 return self.svfs.stat('obsstore').st_size > 1
569 except OSError as inst:
569 except OSError as inst:
570 if inst.errno != errno.ENOENT:
570 if inst.errno != errno.ENOENT:
571 raise
571 raise
572 # just build an empty _all list if no obsstore exists, which
572 # just build an empty _all list if no obsstore exists, which
573 # avoids further stat() syscalls
573 # avoids further stat() syscalls
574 pass
574 pass
575 return bool(self._all)
575 return bool(self._all)
576
576
577 __bool__ = __nonzero__
577 __bool__ = __nonzero__
578
578
579 @property
579 @property
580 def readonly(self):
580 def readonly(self):
581 """True if marker creation is disabled
581 """True if marker creation is disabled
582
582
583 Remove me in the future when obsolete marker is always on."""
583 Remove me in the future when obsolete marker is always on."""
584 return self._readonly
584 return self._readonly
585
585
586 def create(self, transaction, prec, succs=(), flag=0, parents=None,
586 def create(self, transaction, prec, succs=(), flag=0, parents=None,
587 date=None, metadata=None, ui=None):
587 date=None, metadata=None, ui=None):
588 """obsolete: add a new obsolete marker
588 """obsolete: add a new obsolete marker
589
589
590 * ensuring it is hashable
590 * ensuring it is hashable
591 * check mandatory metadata
591 * check mandatory metadata
592 * encode metadata
592 * encode metadata
593
593
594 If you are a human writing code creating marker you want to use the
594 If you are a human writing code creating marker you want to use the
595 `createmarkers` function in this module instead.
595 `createmarkers` function in this module instead.
596
596
597 return True if a new marker have been added, False if the markers
597 return True if a new marker have been added, False if the markers
598 already existed (no op).
598 already existed (no op).
599 """
599 """
600 if metadata is None:
600 if metadata is None:
601 metadata = {}
601 metadata = {}
602 if date is None:
602 if date is None:
603 if 'date' in metadata:
603 if 'date' in metadata:
604 # as a courtesy for out-of-tree extensions
604 # as a courtesy for out-of-tree extensions
605 date = util.parsedate(metadata.pop('date'))
605 date = util.parsedate(metadata.pop('date'))
606 elif ui is not None:
606 elif ui is not None:
607 date = ui.configdate('devel', 'default-date')
607 date = ui.configdate('devel', 'default-date')
608 if date is None:
608 if date is None:
609 date = util.makedate()
609 date = util.makedate()
610 else:
610 else:
611 date = util.makedate()
611 date = util.makedate()
612 if len(prec) != 20:
612 if len(prec) != 20:
613 raise ValueError(prec)
613 raise ValueError(prec)
614 for succ in succs:
614 for succ in succs:
615 if len(succ) != 20:
615 if len(succ) != 20:
616 raise ValueError(succ)
616 raise ValueError(succ)
617 if prec in succs:
617 if prec in succs:
618 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
618 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
619
619
620 metadata = tuple(sorted(metadata.iteritems()))
620 metadata = tuple(sorted(metadata.iteritems()))
621
621
622 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
622 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
623 return bool(self.add(transaction, [marker]))
623 return bool(self.add(transaction, [marker]))
624
624
625 def add(self, transaction, markers):
625 def add(self, transaction, markers):
626 """Add new markers to the store
626 """Add new markers to the store
627
627
628 Take care of filtering duplicate.
628 Take care of filtering duplicate.
629 Return the number of new marker."""
629 Return the number of new marker."""
630 if self._readonly:
630 if self._readonly:
631 raise error.Abort(_('creating obsolete markers is not enabled on '
631 raise error.Abort(_('creating obsolete markers is not enabled on '
632 'this repo'))
632 'this repo'))
633 known = set(self._all)
633 known = set(self._all)
634 new = []
634 new = []
635 for m in markers:
635 for m in markers:
636 if m not in known:
636 if m not in known:
637 known.add(m)
637 known.add(m)
638 new.append(m)
638 new.append(m)
639 if new:
639 if new:
640 f = self.svfs('obsstore', 'ab')
640 f = self.svfs('obsstore', 'ab')
641 try:
641 try:
642 offset = f.tell()
642 offset = f.tell()
643 transaction.add('obsstore', offset)
643 transaction.add('obsstore', offset)
644 # offset == 0: new file - add the version header
644 # offset == 0: new file - add the version header
645 for bytes in encodemarkers(new, offset == 0, self._version):
645 for bytes in encodemarkers(new, offset == 0, self._version):
646 f.write(bytes)
646 f.write(bytes)
647 finally:
647 finally:
648 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
648 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
649 # call 'filecacheentry.refresh()' here
649 # call 'filecacheentry.refresh()' here
650 f.close()
650 f.close()
651 self._addmarkers(new)
651 self._addmarkers(new)
652 # new marker *may* have changed several set. invalidate the cache.
652 # new marker *may* have changed several set. invalidate the cache.
653 self.caches.clear()
653 self.caches.clear()
654 # records the number of new markers for the transaction hooks
654 # records the number of new markers for the transaction hooks
655 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
655 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
656 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
656 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
657 return len(new)
657 return len(new)
658
658
659 def mergemarkers(self, transaction, data):
659 def mergemarkers(self, transaction, data):
660 """merge a binary stream of markers inside the obsstore
660 """merge a binary stream of markers inside the obsstore
661
661
662 Returns the number of new markers added."""
662 Returns the number of new markers added."""
663 version, markers = _readmarkers(data)
663 version, markers = _readmarkers(data)
664 return self.add(transaction, markers)
664 return self.add(transaction, markers)
665
665
666 @propertycache
666 @propertycache
667 def _all(self):
667 def _all(self):
668 data = self.svfs.tryread('obsstore')
668 data = self.svfs.tryread('obsstore')
669 if not data:
669 if not data:
670 return []
670 return []
671 self._version, markers = _readmarkers(data)
671 self._version, markers = _readmarkers(data)
672 markers = list(markers)
672 markers = list(markers)
673 _checkinvalidmarkers(markers)
673 _checkinvalidmarkers(markers)
674 return markers
674 return markers
675
675
676 @propertycache
676 @propertycache
677 def successors(self):
677 def successors(self):
678 successors = {}
678 successors = {}
679 _addsuccessors(successors, self._all)
679 _addsuccessors(successors, self._all)
680 return successors
680 return successors
681
681
682 @propertycache
682 @propertycache
683 def precursors(self):
683 def precursors(self):
684 precursors = {}
684 precursors = {}
685 _addprecursors(precursors, self._all)
685 _addprecursors(precursors, self._all)
686 return precursors
686 return precursors
687
687
688 @propertycache
688 @propertycache
689 def children(self):
689 def children(self):
690 children = {}
690 children = {}
691 _addchildren(children, self._all)
691 _addchildren(children, self._all)
692 return children
692 return children
693
693
694 def _cached(self, attr):
694 def _cached(self, attr):
695 return attr in self.__dict__
695 return attr in self.__dict__
696
696
697 def _addmarkers(self, markers):
697 def _addmarkers(self, markers):
698 markers = list(markers) # to allow repeated iteration
698 markers = list(markers) # to allow repeated iteration
699 self._all.extend(markers)
699 self._all.extend(markers)
700 if self._cached('successors'):
700 if self._cached('successors'):
701 _addsuccessors(self.successors, markers)
701 _addsuccessors(self.successors, markers)
702 if self._cached('precursors'):
702 if self._cached('precursors'):
703 _addprecursors(self.precursors, markers)
703 _addprecursors(self.precursors, markers)
704 if self._cached('children'):
704 if self._cached('children'):
705 _addchildren(self.children, markers)
705 _addchildren(self.children, markers)
706 _checkinvalidmarkers(markers)
706 _checkinvalidmarkers(markers)
707
707
708 def relevantmarkers(self, nodes):
708 def relevantmarkers(self, nodes):
709 """return a set of all obsolescence markers relevant to a set of nodes.
709 """return a set of all obsolescence markers relevant to a set of nodes.
710
710
711 "relevant" to a set of nodes mean:
711 "relevant" to a set of nodes mean:
712
712
713 - marker that use this changeset as successor
713 - marker that use this changeset as successor
714 - prune marker of direct children on this changeset
714 - prune marker of direct children on this changeset
715 - recursive application of the two rules on precursors of these markers
715 - recursive application of the two rules on precursors of these markers
716
716
717 It is a set so you cannot rely on order."""
717 It is a set so you cannot rely on order."""
718
718
719 pendingnodes = set(nodes)
719 pendingnodes = set(nodes)
720 seenmarkers = set()
720 seenmarkers = set()
721 seennodes = set(pendingnodes)
721 seennodes = set(pendingnodes)
722 precursorsmarkers = self.precursors
722 precursorsmarkers = self.precursors
723 succsmarkers = self.successors
723 children = self.children
724 children = self.children
724 while pendingnodes:
725 while pendingnodes:
725 direct = set()
726 direct = set()
726 for current in pendingnodes:
727 for current in pendingnodes:
727 direct.update(precursorsmarkers.get(current, ()))
728 direct.update(precursorsmarkers.get(current, ()))
728 pruned = [m for m in children.get(current, ()) if not m[1]]
729 pruned = [m for m in children.get(current, ()) if not m[1]]
729 direct.update(pruned)
730 direct.update(pruned)
731 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
732 direct.update(pruned)
730 direct -= seenmarkers
733 direct -= seenmarkers
731 pendingnodes = set([m[0] for m in direct])
734 pendingnodes = set([m[0] for m in direct])
732 seenmarkers |= direct
735 seenmarkers |= direct
733 pendingnodes -= seennodes
736 pendingnodes -= seennodes
734 seennodes |= pendingnodes
737 seennodes |= pendingnodes
735 return seenmarkers
738 return seenmarkers
736
739
737 def commonversion(versions):
740 def commonversion(versions):
738 """Return the newest version listed in both versions and our local formats.
741 """Return the newest version listed in both versions and our local formats.
739
742
740 Returns None if no common version exists.
743 Returns None if no common version exists.
741 """
744 """
742 versions.sort(reverse=True)
745 versions.sort(reverse=True)
743 # search for highest version known on both side
746 # search for highest version known on both side
744 for v in versions:
747 for v in versions:
745 if v in formats:
748 if v in formats:
746 return v
749 return v
747 return None
750 return None
748
751
749 # arbitrary picked to fit into 8K limit from HTTP server
752 # arbitrary picked to fit into 8K limit from HTTP server
750 # you have to take in account:
753 # you have to take in account:
751 # - the version header
754 # - the version header
752 # - the base85 encoding
755 # - the base85 encoding
753 _maxpayload = 5300
756 _maxpayload = 5300
754
757
755 def _pushkeyescape(markers):
758 def _pushkeyescape(markers):
756 """encode markers into a dict suitable for pushkey exchange
759 """encode markers into a dict suitable for pushkey exchange
757
760
758 - binary data is base85 encoded
761 - binary data is base85 encoded
759 - split in chunks smaller than 5300 bytes"""
762 - split in chunks smaller than 5300 bytes"""
760 keys = {}
763 keys = {}
761 parts = []
764 parts = []
762 currentlen = _maxpayload * 2 # ensure we create a new part
765 currentlen = _maxpayload * 2 # ensure we create a new part
763 for marker in markers:
766 for marker in markers:
764 nextdata = _fm0encodeonemarker(marker)
767 nextdata = _fm0encodeonemarker(marker)
765 if (len(nextdata) + currentlen > _maxpayload):
768 if (len(nextdata) + currentlen > _maxpayload):
766 currentpart = []
769 currentpart = []
767 currentlen = 0
770 currentlen = 0
768 parts.append(currentpart)
771 parts.append(currentpart)
769 currentpart.append(nextdata)
772 currentpart.append(nextdata)
770 currentlen += len(nextdata)
773 currentlen += len(nextdata)
771 for idx, part in enumerate(reversed(parts)):
774 for idx, part in enumerate(reversed(parts)):
772 data = ''.join([_pack('>B', _fm0version)] + part)
775 data = ''.join([_pack('>B', _fm0version)] + part)
773 keys['dump%i' % idx] = util.b85encode(data)
776 keys['dump%i' % idx] = util.b85encode(data)
774 return keys
777 return keys
775
778
776 def listmarkers(repo):
779 def listmarkers(repo):
777 """List markers over pushkey"""
780 """List markers over pushkey"""
778 if not repo.obsstore:
781 if not repo.obsstore:
779 return {}
782 return {}
780 return _pushkeyescape(sorted(repo.obsstore))
783 return _pushkeyescape(sorted(repo.obsstore))
781
784
782 def pushmarker(repo, key, old, new):
785 def pushmarker(repo, key, old, new):
783 """Push markers over pushkey"""
786 """Push markers over pushkey"""
784 if not key.startswith('dump'):
787 if not key.startswith('dump'):
785 repo.ui.warn(_('unknown key: %r') % key)
788 repo.ui.warn(_('unknown key: %r') % key)
786 return 0
789 return 0
787 if old:
790 if old:
788 repo.ui.warn(_('unexpected old value for %r') % key)
791 repo.ui.warn(_('unexpected old value for %r') % key)
789 return 0
792 return 0
790 data = util.b85decode(new)
793 data = util.b85decode(new)
791 lock = repo.lock()
794 lock = repo.lock()
792 try:
795 try:
793 tr = repo.transaction('pushkey: obsolete markers')
796 tr = repo.transaction('pushkey: obsolete markers')
794 try:
797 try:
795 repo.obsstore.mergemarkers(tr, data)
798 repo.obsstore.mergemarkers(tr, data)
796 repo.invalidatevolatilesets()
799 repo.invalidatevolatilesets()
797 tr.close()
800 tr.close()
798 return 1
801 return 1
799 finally:
802 finally:
800 tr.release()
803 tr.release()
801 finally:
804 finally:
802 lock.release()
805 lock.release()
803
806
804 def getmarkers(repo, nodes=None):
807 def getmarkers(repo, nodes=None):
805 """returns markers known in a repository
808 """returns markers known in a repository
806
809
807 If <nodes> is specified, only markers "relevant" to those nodes are are
810 If <nodes> is specified, only markers "relevant" to those nodes are are
808 returned"""
811 returned"""
809 if nodes is None:
812 if nodes is None:
810 rawmarkers = repo.obsstore
813 rawmarkers = repo.obsstore
811 else:
814 else:
812 rawmarkers = repo.obsstore.relevantmarkers(nodes)
815 rawmarkers = repo.obsstore.relevantmarkers(nodes)
813
816
814 for markerdata in rawmarkers:
817 for markerdata in rawmarkers:
815 yield marker(repo, markerdata)
818 yield marker(repo, markerdata)
816
819
817 def relevantmarkers(repo, node):
820 def relevantmarkers(repo, node):
818 """all obsolete markers relevant to some revision"""
821 """all obsolete markers relevant to some revision"""
819 for markerdata in repo.obsstore.relevantmarkers(node):
822 for markerdata in repo.obsstore.relevantmarkers(node):
820 yield marker(repo, markerdata)
823 yield marker(repo, markerdata)
821
824
822
825
823 def precursormarkers(ctx):
826 def precursormarkers(ctx):
824 """obsolete marker marking this changeset as a successors"""
827 """obsolete marker marking this changeset as a successors"""
825 for data in ctx.repo().obsstore.precursors.get(ctx.node(), ()):
828 for data in ctx.repo().obsstore.precursors.get(ctx.node(), ()):
826 yield marker(ctx.repo(), data)
829 yield marker(ctx.repo(), data)
827
830
828 def successormarkers(ctx):
831 def successormarkers(ctx):
829 """obsolete marker making this changeset obsolete"""
832 """obsolete marker making this changeset obsolete"""
830 for data in ctx.repo().obsstore.successors.get(ctx.node(), ()):
833 for data in ctx.repo().obsstore.successors.get(ctx.node(), ()):
831 yield marker(ctx.repo(), data)
834 yield marker(ctx.repo(), data)
832
835
833 def allsuccessors(obsstore, nodes, ignoreflags=0):
836 def allsuccessors(obsstore, nodes, ignoreflags=0):
834 """Yield node for every successor of <nodes>.
837 """Yield node for every successor of <nodes>.
835
838
836 Some successors may be unknown locally.
839 Some successors may be unknown locally.
837
840
838 This is a linear yield unsuited to detecting split changesets. It includes
841 This is a linear yield unsuited to detecting split changesets. It includes
839 initial nodes too."""
842 initial nodes too."""
840 remaining = set(nodes)
843 remaining = set(nodes)
841 seen = set(remaining)
844 seen = set(remaining)
842 while remaining:
845 while remaining:
843 current = remaining.pop()
846 current = remaining.pop()
844 yield current
847 yield current
845 for mark in obsstore.successors.get(current, ()):
848 for mark in obsstore.successors.get(current, ()):
846 # ignore marker flagged with specified flag
849 # ignore marker flagged with specified flag
847 if mark[2] & ignoreflags:
850 if mark[2] & ignoreflags:
848 continue
851 continue
849 for suc in mark[1]:
852 for suc in mark[1]:
850 if suc not in seen:
853 if suc not in seen:
851 seen.add(suc)
854 seen.add(suc)
852 remaining.add(suc)
855 remaining.add(suc)
853
856
854 def allprecursors(obsstore, nodes, ignoreflags=0):
857 def allprecursors(obsstore, nodes, ignoreflags=0):
855 """Yield node for every precursors of <nodes>.
858 """Yield node for every precursors of <nodes>.
856
859
857 Some precursors may be unknown locally.
860 Some precursors may be unknown locally.
858
861
859 This is a linear yield unsuited to detecting folded changesets. It includes
862 This is a linear yield unsuited to detecting folded changesets. It includes
860 initial nodes too."""
863 initial nodes too."""
861
864
862 remaining = set(nodes)
865 remaining = set(nodes)
863 seen = set(remaining)
866 seen = set(remaining)
864 while remaining:
867 while remaining:
865 current = remaining.pop()
868 current = remaining.pop()
866 yield current
869 yield current
867 for mark in obsstore.precursors.get(current, ()):
870 for mark in obsstore.precursors.get(current, ()):
868 # ignore marker flagged with specified flag
871 # ignore marker flagged with specified flag
869 if mark[2] & ignoreflags:
872 if mark[2] & ignoreflags:
870 continue
873 continue
871 suc = mark[0]
874 suc = mark[0]
872 if suc not in seen:
875 if suc not in seen:
873 seen.add(suc)
876 seen.add(suc)
874 remaining.add(suc)
877 remaining.add(suc)
875
878
876 def foreground(repo, nodes):
879 def foreground(repo, nodes):
877 """return all nodes in the "foreground" of other node
880 """return all nodes in the "foreground" of other node
878
881
879 The foreground of a revision is anything reachable using parent -> children
882 The foreground of a revision is anything reachable using parent -> children
880 or precursor -> successor relation. It is very similar to "descendant" but
883 or precursor -> successor relation. It is very similar to "descendant" but
881 augmented with obsolescence information.
884 augmented with obsolescence information.
882
885
883 Beware that possible obsolescence cycle may result if complex situation.
886 Beware that possible obsolescence cycle may result if complex situation.
884 """
887 """
885 repo = repo.unfiltered()
888 repo = repo.unfiltered()
886 foreground = set(repo.set('%ln::', nodes))
889 foreground = set(repo.set('%ln::', nodes))
887 if repo.obsstore:
890 if repo.obsstore:
888 # We only need this complicated logic if there is obsolescence
891 # We only need this complicated logic if there is obsolescence
889 # XXX will probably deserve an optimised revset.
892 # XXX will probably deserve an optimised revset.
890 nm = repo.changelog.nodemap
893 nm = repo.changelog.nodemap
891 plen = -1
894 plen = -1
892 # compute the whole set of successors or descendants
895 # compute the whole set of successors or descendants
893 while len(foreground) != plen:
896 while len(foreground) != plen:
894 plen = len(foreground)
897 plen = len(foreground)
895 succs = set(c.node() for c in foreground)
898 succs = set(c.node() for c in foreground)
896 mutable = [c.node() for c in foreground if c.mutable()]
899 mutable = [c.node() for c in foreground if c.mutable()]
897 succs.update(allsuccessors(repo.obsstore, mutable))
900 succs.update(allsuccessors(repo.obsstore, mutable))
898 known = (n for n in succs if n in nm)
901 known = (n for n in succs if n in nm)
899 foreground = set(repo.set('%ln::', known))
902 foreground = set(repo.set('%ln::', known))
900 return set(c.node() for c in foreground)
903 return set(c.node() for c in foreground)
901
904
902
905
903 def successorssets(repo, initialnode, cache=None):
906 def successorssets(repo, initialnode, cache=None):
904 """Return set of all latest successors of initial nodes
907 """Return set of all latest successors of initial nodes
905
908
906 The successors set of a changeset A are the group of revisions that succeed
909 The successors set of a changeset A are the group of revisions that succeed
907 A. It succeeds A as a consistent whole, each revision being only a partial
910 A. It succeeds A as a consistent whole, each revision being only a partial
908 replacement. The successors set contains non-obsolete changesets only.
911 replacement. The successors set contains non-obsolete changesets only.
909
912
910 This function returns the full list of successor sets which is why it
913 This function returns the full list of successor sets which is why it
911 returns a list of tuples and not just a single tuple. Each tuple is a valid
914 returns a list of tuples and not just a single tuple. Each tuple is a valid
912 successors set. Note that (A,) may be a valid successors set for changeset A
915 successors set. Note that (A,) may be a valid successors set for changeset A
913 (see below).
916 (see below).
914
917
915 In most cases, a changeset A will have a single element (e.g. the changeset
918 In most cases, a changeset A will have a single element (e.g. the changeset
916 A is replaced by A') in its successors set. Though, it is also common for a
919 A is replaced by A') in its successors set. Though, it is also common for a
917 changeset A to have no elements in its successor set (e.g. the changeset
920 changeset A to have no elements in its successor set (e.g. the changeset
918 has been pruned). Therefore, the returned list of successors sets will be
921 has been pruned). Therefore, the returned list of successors sets will be
919 [(A',)] or [], respectively.
922 [(A',)] or [], respectively.
920
923
921 When a changeset A is split into A' and B', however, it will result in a
924 When a changeset A is split into A' and B', however, it will result in a
922 successors set containing more than a single element, i.e. [(A',B')].
925 successors set containing more than a single element, i.e. [(A',B')].
923 Divergent changesets will result in multiple successors sets, i.e. [(A',),
926 Divergent changesets will result in multiple successors sets, i.e. [(A',),
924 (A'')].
927 (A'')].
925
928
926 If a changeset A is not obsolete, then it will conceptually have no
929 If a changeset A is not obsolete, then it will conceptually have no
927 successors set. To distinguish this from a pruned changeset, the successor
930 successors set. To distinguish this from a pruned changeset, the successor
928 set will contain itself only, i.e. [(A,)].
931 set will contain itself only, i.e. [(A,)].
929
932
930 Finally, successors unknown locally are considered to be pruned (obsoleted
933 Finally, successors unknown locally are considered to be pruned (obsoleted
931 without any successors).
934 without any successors).
932
935
933 The optional `cache` parameter is a dictionary that may contain precomputed
936 The optional `cache` parameter is a dictionary that may contain precomputed
934 successors sets. It is meant to reuse the computation of a previous call to
937 successors sets. It is meant to reuse the computation of a previous call to
935 `successorssets` when multiple calls are made at the same time. The cache
938 `successorssets` when multiple calls are made at the same time. The cache
936 dictionary is updated in place. The caller is responsible for its life
939 dictionary is updated in place. The caller is responsible for its life
937 span. Code that makes multiple calls to `successorssets` *must* use this
940 span. Code that makes multiple calls to `successorssets` *must* use this
938 cache mechanism or suffer terrible performance.
941 cache mechanism or suffer terrible performance.
939 """
942 """
940
943
941 succmarkers = repo.obsstore.successors
944 succmarkers = repo.obsstore.successors
942
945
943 # Stack of nodes we search successors sets for
946 # Stack of nodes we search successors sets for
944 toproceed = [initialnode]
947 toproceed = [initialnode]
945 # set version of above list for fast loop detection
948 # set version of above list for fast loop detection
946 # element added to "toproceed" must be added here
949 # element added to "toproceed" must be added here
947 stackedset = set(toproceed)
950 stackedset = set(toproceed)
948 if cache is None:
951 if cache is None:
949 cache = {}
952 cache = {}
950
953
951 # This while loop is the flattened version of a recursive search for
954 # This while loop is the flattened version of a recursive search for
952 # successors sets
955 # successors sets
953 #
956 #
954 # def successorssets(x):
957 # def successorssets(x):
955 # successors = directsuccessors(x)
958 # successors = directsuccessors(x)
956 # ss = [[]]
959 # ss = [[]]
957 # for succ in directsuccessors(x):
960 # for succ in directsuccessors(x):
958 # # product as in itertools cartesian product
961 # # product as in itertools cartesian product
959 # ss = product(ss, successorssets(succ))
962 # ss = product(ss, successorssets(succ))
960 # return ss
963 # return ss
961 #
964 #
962 # But we can not use plain recursive calls here:
965 # But we can not use plain recursive calls here:
963 # - that would blow the python call stack
966 # - that would blow the python call stack
964 # - obsolescence markers may have cycles, we need to handle them.
967 # - obsolescence markers may have cycles, we need to handle them.
965 #
968 #
966 # The `toproceed` list act as our call stack. Every node we search
969 # The `toproceed` list act as our call stack. Every node we search
967 # successors set for are stacked there.
970 # successors set for are stacked there.
968 #
971 #
969 # The `stackedset` is set version of this stack used to check if a node is
972 # The `stackedset` is set version of this stack used to check if a node is
970 # already stacked. This check is used to detect cycles and prevent infinite
973 # already stacked. This check is used to detect cycles and prevent infinite
971 # loop.
974 # loop.
972 #
975 #
973 # successors set of all nodes are stored in the `cache` dictionary.
976 # successors set of all nodes are stored in the `cache` dictionary.
974 #
977 #
975 # After this while loop ends we use the cache to return the successors sets
978 # After this while loop ends we use the cache to return the successors sets
976 # for the node requested by the caller.
979 # for the node requested by the caller.
977 while toproceed:
980 while toproceed:
978 # Every iteration tries to compute the successors sets of the topmost
981 # Every iteration tries to compute the successors sets of the topmost
979 # node of the stack: CURRENT.
982 # node of the stack: CURRENT.
980 #
983 #
981 # There are four possible outcomes:
984 # There are four possible outcomes:
982 #
985 #
983 # 1) We already know the successors sets of CURRENT:
986 # 1) We already know the successors sets of CURRENT:
984 # -> mission accomplished, pop it from the stack.
987 # -> mission accomplished, pop it from the stack.
985 # 2) Node is not obsolete:
988 # 2) Node is not obsolete:
986 # -> the node is its own successors sets. Add it to the cache.
989 # -> the node is its own successors sets. Add it to the cache.
987 # 3) We do not know successors set of direct successors of CURRENT:
990 # 3) We do not know successors set of direct successors of CURRENT:
988 # -> We add those successors to the stack.
991 # -> We add those successors to the stack.
989 # 4) We know successors sets of all direct successors of CURRENT:
992 # 4) We know successors sets of all direct successors of CURRENT:
990 # -> We can compute CURRENT successors set and add it to the
993 # -> We can compute CURRENT successors set and add it to the
991 # cache.
994 # cache.
992 #
995 #
993 current = toproceed[-1]
996 current = toproceed[-1]
994 if current in cache:
997 if current in cache:
995 # case (1): We already know the successors sets
998 # case (1): We already know the successors sets
996 stackedset.remove(toproceed.pop())
999 stackedset.remove(toproceed.pop())
997 elif current not in succmarkers:
1000 elif current not in succmarkers:
998 # case (2): The node is not obsolete.
1001 # case (2): The node is not obsolete.
999 if current in repo:
1002 if current in repo:
1000 # We have a valid last successors.
1003 # We have a valid last successors.
1001 cache[current] = [(current,)]
1004 cache[current] = [(current,)]
1002 else:
1005 else:
1003 # Final obsolete version is unknown locally.
1006 # Final obsolete version is unknown locally.
1004 # Do not count that as a valid successors
1007 # Do not count that as a valid successors
1005 cache[current] = []
1008 cache[current] = []
1006 else:
1009 else:
1007 # cases (3) and (4)
1010 # cases (3) and (4)
1008 #
1011 #
1009 # We proceed in two phases. Phase 1 aims to distinguish case (3)
1012 # We proceed in two phases. Phase 1 aims to distinguish case (3)
1010 # from case (4):
1013 # from case (4):
1011 #
1014 #
1012 # For each direct successors of CURRENT, we check whether its
1015 # For each direct successors of CURRENT, we check whether its
1013 # successors sets are known. If they are not, we stack the
1016 # successors sets are known. If they are not, we stack the
1014 # unknown node and proceed to the next iteration of the while
1017 # unknown node and proceed to the next iteration of the while
1015 # loop. (case 3)
1018 # loop. (case 3)
1016 #
1019 #
1017 # During this step, we may detect obsolescence cycles: a node
1020 # During this step, we may detect obsolescence cycles: a node
1018 # with unknown successors sets but already in the call stack.
1021 # with unknown successors sets but already in the call stack.
1019 # In such a situation, we arbitrary set the successors sets of
1022 # In such a situation, we arbitrary set the successors sets of
1020 # the node to nothing (node pruned) to break the cycle.
1023 # the node to nothing (node pruned) to break the cycle.
1021 #
1024 #
1022 # If no break was encountered we proceed to phase 2.
1025 # If no break was encountered we proceed to phase 2.
1023 #
1026 #
1024 # Phase 2 computes successors sets of CURRENT (case 4); see details
1027 # Phase 2 computes successors sets of CURRENT (case 4); see details
1025 # in phase 2 itself.
1028 # in phase 2 itself.
1026 #
1029 #
1027 # Note the two levels of iteration in each phase.
1030 # Note the two levels of iteration in each phase.
1028 # - The first one handles obsolescence markers using CURRENT as
1031 # - The first one handles obsolescence markers using CURRENT as
1029 # precursor (successors markers of CURRENT).
1032 # precursor (successors markers of CURRENT).
1030 #
1033 #
1031 # Having multiple entry here means divergence.
1034 # Having multiple entry here means divergence.
1032 #
1035 #
1033 # - The second one handles successors defined in each marker.
1036 # - The second one handles successors defined in each marker.
1034 #
1037 #
1035 # Having none means pruned node, multiple successors means split,
1038 # Having none means pruned node, multiple successors means split,
1036 # single successors are standard replacement.
1039 # single successors are standard replacement.
1037 #
1040 #
1038 for mark in sorted(succmarkers[current]):
1041 for mark in sorted(succmarkers[current]):
1039 for suc in mark[1]:
1042 for suc in mark[1]:
1040 if suc not in cache:
1043 if suc not in cache:
1041 if suc in stackedset:
1044 if suc in stackedset:
1042 # cycle breaking
1045 # cycle breaking
1043 cache[suc] = []
1046 cache[suc] = []
1044 else:
1047 else:
1045 # case (3) If we have not computed successors sets
1048 # case (3) If we have not computed successors sets
1046 # of one of those successors we add it to the
1049 # of one of those successors we add it to the
1047 # `toproceed` stack and stop all work for this
1050 # `toproceed` stack and stop all work for this
1048 # iteration.
1051 # iteration.
1049 toproceed.append(suc)
1052 toproceed.append(suc)
1050 stackedset.add(suc)
1053 stackedset.add(suc)
1051 break
1054 break
1052 else:
1055 else:
1053 continue
1056 continue
1054 break
1057 break
1055 else:
1058 else:
1056 # case (4): we know all successors sets of all direct
1059 # case (4): we know all successors sets of all direct
1057 # successors
1060 # successors
1058 #
1061 #
1059 # Successors set contributed by each marker depends on the
1062 # Successors set contributed by each marker depends on the
1060 # successors sets of all its "successors" node.
1063 # successors sets of all its "successors" node.
1061 #
1064 #
1062 # Each different marker is a divergence in the obsolescence
1065 # Each different marker is a divergence in the obsolescence
1063 # history. It contributes successors sets distinct from other
1066 # history. It contributes successors sets distinct from other
1064 # markers.
1067 # markers.
1065 #
1068 #
1066 # Within a marker, a successor may have divergent successors
1069 # Within a marker, a successor may have divergent successors
1067 # sets. In such a case, the marker will contribute multiple
1070 # sets. In such a case, the marker will contribute multiple
1068 # divergent successors sets. If multiple successors have
1071 # divergent successors sets. If multiple successors have
1069 # divergent successors sets, a Cartesian product is used.
1072 # divergent successors sets, a Cartesian product is used.
1070 #
1073 #
1071 # At the end we post-process successors sets to remove
1074 # At the end we post-process successors sets to remove
1072 # duplicated entry and successors set that are strict subset of
1075 # duplicated entry and successors set that are strict subset of
1073 # another one.
1076 # another one.
1074 succssets = []
1077 succssets = []
1075 for mark in sorted(succmarkers[current]):
1078 for mark in sorted(succmarkers[current]):
1076 # successors sets contributed by this marker
1079 # successors sets contributed by this marker
1077 markss = [[]]
1080 markss = [[]]
1078 for suc in mark[1]:
1081 for suc in mark[1]:
1079 # cardinal product with previous successors
1082 # cardinal product with previous successors
1080 productresult = []
1083 productresult = []
1081 for prefix in markss:
1084 for prefix in markss:
1082 for suffix in cache[suc]:
1085 for suffix in cache[suc]:
1083 newss = list(prefix)
1086 newss = list(prefix)
1084 for part in suffix:
1087 for part in suffix:
1085 # do not duplicated entry in successors set
1088 # do not duplicated entry in successors set
1086 # first entry wins.
1089 # first entry wins.
1087 if part not in newss:
1090 if part not in newss:
1088 newss.append(part)
1091 newss.append(part)
1089 productresult.append(newss)
1092 productresult.append(newss)
1090 markss = productresult
1093 markss = productresult
1091 succssets.extend(markss)
1094 succssets.extend(markss)
1092 # remove duplicated and subset
1095 # remove duplicated and subset
1093 seen = []
1096 seen = []
1094 final = []
1097 final = []
1095 candidate = sorted(((set(s), s) for s in succssets if s),
1098 candidate = sorted(((set(s), s) for s in succssets if s),
1096 key=lambda x: len(x[1]), reverse=True)
1099 key=lambda x: len(x[1]), reverse=True)
1097 for setversion, listversion in candidate:
1100 for setversion, listversion in candidate:
1098 for seenset in seen:
1101 for seenset in seen:
1099 if setversion.issubset(seenset):
1102 if setversion.issubset(seenset):
1100 break
1103 break
1101 else:
1104 else:
1102 final.append(listversion)
1105 final.append(listversion)
1103 seen.append(setversion)
1106 seen.append(setversion)
1104 final.reverse() # put small successors set first
1107 final.reverse() # put small successors set first
1105 cache[current] = final
1108 cache[current] = final
1106 return cache[initialnode]
1109 return cache[initialnode]
1107
1110
1108 # mapping of 'set-name' -> <function to compute this set>
1111 # mapping of 'set-name' -> <function to compute this set>
1109 cachefuncs = {}
1112 cachefuncs = {}
1110 def cachefor(name):
1113 def cachefor(name):
1111 """Decorator to register a function as computing the cache for a set"""
1114 """Decorator to register a function as computing the cache for a set"""
1112 def decorator(func):
1115 def decorator(func):
1113 assert name not in cachefuncs
1116 assert name not in cachefuncs
1114 cachefuncs[name] = func
1117 cachefuncs[name] = func
1115 return func
1118 return func
1116 return decorator
1119 return decorator
1117
1120
1118 def getrevs(repo, name):
1121 def getrevs(repo, name):
1119 """Return the set of revision that belong to the <name> set
1122 """Return the set of revision that belong to the <name> set
1120
1123
1121 Such access may compute the set and cache it for future use"""
1124 Such access may compute the set and cache it for future use"""
1122 repo = repo.unfiltered()
1125 repo = repo.unfiltered()
1123 if not repo.obsstore:
1126 if not repo.obsstore:
1124 return frozenset()
1127 return frozenset()
1125 if name not in repo.obsstore.caches:
1128 if name not in repo.obsstore.caches:
1126 repo.obsstore.caches[name] = cachefuncs[name](repo)
1129 repo.obsstore.caches[name] = cachefuncs[name](repo)
1127 return repo.obsstore.caches[name]
1130 return repo.obsstore.caches[name]
1128
1131
1129 # To be simple we need to invalidate obsolescence cache when:
1132 # To be simple we need to invalidate obsolescence cache when:
1130 #
1133 #
1131 # - new changeset is added:
1134 # - new changeset is added:
1132 # - public phase is changed
1135 # - public phase is changed
1133 # - obsolescence marker are added
1136 # - obsolescence marker are added
1134 # - strip is used a repo
1137 # - strip is used a repo
1135 def clearobscaches(repo):
1138 def clearobscaches(repo):
1136 """Remove all obsolescence related cache from a repo
1139 """Remove all obsolescence related cache from a repo
1137
1140
1138 This remove all cache in obsstore is the obsstore already exist on the
1141 This remove all cache in obsstore is the obsstore already exist on the
1139 repo.
1142 repo.
1140
1143
1141 (We could be smarter here given the exact event that trigger the cache
1144 (We could be smarter here given the exact event that trigger the cache
1142 clearing)"""
1145 clearing)"""
1143 # only clear cache is there is obsstore data in this repo
1146 # only clear cache is there is obsstore data in this repo
1144 if 'obsstore' in repo._filecache:
1147 if 'obsstore' in repo._filecache:
1145 repo.obsstore.caches.clear()
1148 repo.obsstore.caches.clear()
1146
1149
1147 @cachefor('obsolete')
1150 @cachefor('obsolete')
1148 def _computeobsoleteset(repo):
1151 def _computeobsoleteset(repo):
1149 """the set of obsolete revisions"""
1152 """the set of obsolete revisions"""
1150 obs = set()
1153 obs = set()
1151 getnode = repo.changelog.node
1154 getnode = repo.changelog.node
1152 notpublic = repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
1155 notpublic = repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
1153 for r in notpublic:
1156 for r in notpublic:
1154 if getnode(r) in repo.obsstore.successors:
1157 if getnode(r) in repo.obsstore.successors:
1155 obs.add(r)
1158 obs.add(r)
1156 return obs
1159 return obs
1157
1160
1158 @cachefor('unstable')
1161 @cachefor('unstable')
1159 def _computeunstableset(repo):
1162 def _computeunstableset(repo):
1160 """the set of non obsolete revisions with obsolete parents"""
1163 """the set of non obsolete revisions with obsolete parents"""
1161 revs = [(ctx.rev(), ctx) for ctx in
1164 revs = [(ctx.rev(), ctx) for ctx in
1162 repo.set('(not public()) and (not obsolete())')]
1165 repo.set('(not public()) and (not obsolete())')]
1163 revs.sort(key=lambda x:x[0])
1166 revs.sort(key=lambda x:x[0])
1164 unstable = set()
1167 unstable = set()
1165 for rev, ctx in revs:
1168 for rev, ctx in revs:
1166 # A rev is unstable if one of its parent is obsolete or unstable
1169 # A rev is unstable if one of its parent is obsolete or unstable
1167 # this works since we traverse following growing rev order
1170 # this works since we traverse following growing rev order
1168 if any((x.obsolete() or (x.rev() in unstable))
1171 if any((x.obsolete() or (x.rev() in unstable))
1169 for x in ctx.parents()):
1172 for x in ctx.parents()):
1170 unstable.add(rev)
1173 unstable.add(rev)
1171 return unstable
1174 return unstable
1172
1175
1173 @cachefor('suspended')
1176 @cachefor('suspended')
1174 def _computesuspendedset(repo):
1177 def _computesuspendedset(repo):
1175 """the set of obsolete parents with non obsolete descendants"""
1178 """the set of obsolete parents with non obsolete descendants"""
1176 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1179 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1177 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1180 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1178
1181
1179 @cachefor('extinct')
1182 @cachefor('extinct')
1180 def _computeextinctset(repo):
1183 def _computeextinctset(repo):
1181 """the set of obsolete parents without non obsolete descendants"""
1184 """the set of obsolete parents without non obsolete descendants"""
1182 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1185 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1183
1186
1184
1187
1185 @cachefor('bumped')
1188 @cachefor('bumped')
1186 def _computebumpedset(repo):
1189 def _computebumpedset(repo):
1187 """the set of revs trying to obsolete public revisions"""
1190 """the set of revs trying to obsolete public revisions"""
1188 bumped = set()
1191 bumped = set()
1189 # util function (avoid attribute lookup in the loop)
1192 # util function (avoid attribute lookup in the loop)
1190 phase = repo._phasecache.phase # would be faster to grab the full list
1193 phase = repo._phasecache.phase # would be faster to grab the full list
1191 public = phases.public
1194 public = phases.public
1192 cl = repo.changelog
1195 cl = repo.changelog
1193 torev = cl.nodemap.get
1196 torev = cl.nodemap.get
1194 for ctx in repo.set('(not public()) and (not obsolete())'):
1197 for ctx in repo.set('(not public()) and (not obsolete())'):
1195 rev = ctx.rev()
1198 rev = ctx.rev()
1196 # We only evaluate mutable, non-obsolete revision
1199 # We only evaluate mutable, non-obsolete revision
1197 node = ctx.node()
1200 node = ctx.node()
1198 # (future) A cache of precursors may worth if split is very common
1201 # (future) A cache of precursors may worth if split is very common
1199 for pnode in allprecursors(repo.obsstore, [node],
1202 for pnode in allprecursors(repo.obsstore, [node],
1200 ignoreflags=bumpedfix):
1203 ignoreflags=bumpedfix):
1201 prev = torev(pnode) # unfiltered! but so is phasecache
1204 prev = torev(pnode) # unfiltered! but so is phasecache
1202 if (prev is not None) and (phase(repo, prev) <= public):
1205 if (prev is not None) and (phase(repo, prev) <= public):
1203 # we have a public precursor
1206 # we have a public precursor
1204 bumped.add(rev)
1207 bumped.add(rev)
1205 break # Next draft!
1208 break # Next draft!
1206 return bumped
1209 return bumped
1207
1210
1208 @cachefor('divergent')
1211 @cachefor('divergent')
1209 def _computedivergentset(repo):
1212 def _computedivergentset(repo):
1210 """the set of rev that compete to be the final successors of some revision.
1213 """the set of rev that compete to be the final successors of some revision.
1211 """
1214 """
1212 divergent = set()
1215 divergent = set()
1213 obsstore = repo.obsstore
1216 obsstore = repo.obsstore
1214 newermap = {}
1217 newermap = {}
1215 for ctx in repo.set('(not public()) - obsolete()'):
1218 for ctx in repo.set('(not public()) - obsolete()'):
1216 mark = obsstore.precursors.get(ctx.node(), ())
1219 mark = obsstore.precursors.get(ctx.node(), ())
1217 toprocess = set(mark)
1220 toprocess = set(mark)
1218 seen = set()
1221 seen = set()
1219 while toprocess:
1222 while toprocess:
1220 prec = toprocess.pop()[0]
1223 prec = toprocess.pop()[0]
1221 if prec in seen:
1224 if prec in seen:
1222 continue # emergency cycle hanging prevention
1225 continue # emergency cycle hanging prevention
1223 seen.add(prec)
1226 seen.add(prec)
1224 if prec not in newermap:
1227 if prec not in newermap:
1225 successorssets(repo, prec, newermap)
1228 successorssets(repo, prec, newermap)
1226 newer = [n for n in newermap[prec] if n]
1229 newer = [n for n in newermap[prec] if n]
1227 if len(newer) > 1:
1230 if len(newer) > 1:
1228 divergent.add(ctx.rev())
1231 divergent.add(ctx.rev())
1229 break
1232 break
1230 toprocess.update(obsstore.precursors.get(prec, ()))
1233 toprocess.update(obsstore.precursors.get(prec, ()))
1231 return divergent
1234 return divergent
1232
1235
1233
1236
1234 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
1237 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
1235 operation=None):
1238 operation=None):
1236 """Add obsolete markers between changesets in a repo
1239 """Add obsolete markers between changesets in a repo
1237
1240
1238 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1241 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1239 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1242 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1240 containing metadata for this marker only. It is merged with the global
1243 containing metadata for this marker only. It is merged with the global
1241 metadata specified through the `metadata` argument of this function,
1244 metadata specified through the `metadata` argument of this function,
1242
1245
1243 Trying to obsolete a public changeset will raise an exception.
1246 Trying to obsolete a public changeset will raise an exception.
1244
1247
1245 Current user and date are used except if specified otherwise in the
1248 Current user and date are used except if specified otherwise in the
1246 metadata attribute.
1249 metadata attribute.
1247
1250
1248 This function operates within a transaction of its own, but does
1251 This function operates within a transaction of its own, but does
1249 not take any lock on the repo.
1252 not take any lock on the repo.
1250 """
1253 """
1251 # prepare metadata
1254 # prepare metadata
1252 if metadata is None:
1255 if metadata is None:
1253 metadata = {}
1256 metadata = {}
1254 if 'user' not in metadata:
1257 if 'user' not in metadata:
1255 metadata['user'] = repo.ui.username()
1258 metadata['user'] = repo.ui.username()
1256 useoperation = repo.ui.configbool('experimental',
1259 useoperation = repo.ui.configbool('experimental',
1257 'evolution.track-operation',
1260 'evolution.track-operation',
1258 False)
1261 False)
1259 if useoperation and operation:
1262 if useoperation and operation:
1260 metadata['operation'] = operation
1263 metadata['operation'] = operation
1261 tr = repo.transaction('add-obsolescence-marker')
1264 tr = repo.transaction('add-obsolescence-marker')
1262 try:
1265 try:
1263 markerargs = []
1266 markerargs = []
1264 for rel in relations:
1267 for rel in relations:
1265 prec = rel[0]
1268 prec = rel[0]
1266 sucs = rel[1]
1269 sucs = rel[1]
1267 localmetadata = metadata.copy()
1270 localmetadata = metadata.copy()
1268 if 2 < len(rel):
1271 if 2 < len(rel):
1269 localmetadata.update(rel[2])
1272 localmetadata.update(rel[2])
1270
1273
1271 if not prec.mutable():
1274 if not prec.mutable():
1272 raise error.Abort(_("cannot obsolete public changeset: %s")
1275 raise error.Abort(_("cannot obsolete public changeset: %s")
1273 % prec,
1276 % prec,
1274 hint="see 'hg help phases' for details")
1277 hint="see 'hg help phases' for details")
1275 nprec = prec.node()
1278 nprec = prec.node()
1276 nsucs = tuple(s.node() for s in sucs)
1279 nsucs = tuple(s.node() for s in sucs)
1277 npare = None
1280 npare = None
1278 if not nsucs:
1281 if not nsucs:
1279 npare = tuple(p.node() for p in prec.parents())
1282 npare = tuple(p.node() for p in prec.parents())
1280 if nprec in nsucs:
1283 if nprec in nsucs:
1281 raise error.Abort(_("changeset %s cannot obsolete itself")
1284 raise error.Abort(_("changeset %s cannot obsolete itself")
1282 % prec)
1285 % prec)
1283
1286
1284 # Creating the marker causes the hidden cache to become invalid,
1287 # Creating the marker causes the hidden cache to become invalid,
1285 # which causes recomputation when we ask for prec.parents() above.
1288 # which causes recomputation when we ask for prec.parents() above.
1286 # Resulting in n^2 behavior. So let's prepare all of the args
1289 # Resulting in n^2 behavior. So let's prepare all of the args
1287 # first, then create the markers.
1290 # first, then create the markers.
1288 markerargs.append((nprec, nsucs, npare, localmetadata))
1291 markerargs.append((nprec, nsucs, npare, localmetadata))
1289
1292
1290 for args in markerargs:
1293 for args in markerargs:
1291 nprec, nsucs, npare, localmetadata = args
1294 nprec, nsucs, npare, localmetadata = args
1292 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1295 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1293 date=date, metadata=localmetadata,
1296 date=date, metadata=localmetadata,
1294 ui=repo.ui)
1297 ui=repo.ui)
1295 repo.filteredrevcache.clear()
1298 repo.filteredrevcache.clear()
1296 tr.close()
1299 tr.close()
1297 finally:
1300 finally:
1298 tr.release()
1301 tr.release()
@@ -1,102 +1,198 b''
1 ==================================================
1 ==================================================
2 Test obsmarkers interaction with bundle and strip
2 Test obsmarkers interaction with bundle and strip
3 ==================================================
3 ==================================================
4
4
5 In practice, this file does not yet contains any tests for bundle and strip.
5 In practice, this file does not yet contains any tests for bundle and strip.
6 But their will be some soon (tm).
6 But their will be some soon (tm).
7
7
8 For now this test check the logic computing markers relevant to a set of
8 For now this test check the logic computing markers relevant to a set of
9 revision. That logic will be use by "hg bundle" to select the markers to
9 revision. That logic will be use by "hg bundle" to select the markers to
10 include, and strip to find the markers to backup.
10 include, and strip to find the markers to backup.
11
11
12 Setup a repository with various case
12 Setup a repository with various case
13 ====================================
13 ====================================
14
14
15 Config setup
15 Config setup
16 ------------
16 ------------
17
17
18 $ cat >> $HGRCPATH <<EOF
18 $ cat >> $HGRCPATH <<EOF
19 > [ui]
19 > [ui]
20 > # simpler log output
20 > # simpler log output
21 > logtemplate = "{node|short}: {desc}\n"
21 > logtemplate = "{node|short}: {desc}\n"
22 >
22 >
23 > [experimental]
23 > [experimental]
24 > # enable evolution
24 > # enable evolution
25 > evolution = all
25 > evolution = all
26 >
26 >
27 > # include obsmarkers in bundle
27 > # include obsmarkers in bundle
28 > evolution.bundle-obsmarker = yes
28 > evolution.bundle-obsmarker = yes
29 >
29 >
30 > [extensions]
30 > [extensions]
31 > # needed for some tests
31 > # needed for some tests
32 > strip =
32 > strip =
33 > [defaults]
33 > [defaults]
34 > # we'll query many hidden changeset
34 > # we'll query many hidden changeset
35 > debugobsolete = --hidden
35 > debugobsolete = --hidden
36 > EOF
36 > EOF
37
37
38 $ mkcommit() {
38 $ mkcommit() {
39 > echo "$1" > "$1"
39 > echo "$1" > "$1"
40 > hg add "$1"
40 > hg add "$1"
41 > hg ci -m "$1"
41 > hg ci -m "$1"
42 > }
42 > }
43
43
44 $ getid() {
44 $ getid() {
45 > hg log --hidden --template '{node}\n' --rev "$1"
45 > hg log --hidden --template '{node}\n' --rev "$1"
46 > }
46 > }
47
47
48 $ mktestrepo () {
48 $ mktestrepo () {
49 > [ -n "$1" ] || exit 1
49 > [ -n "$1" ] || exit 1
50 > cd $TESTTMP
50 > cd $TESTTMP
51 > hg init $1
51 > hg init $1
52 > cd $1
52 > cd $1
53 > mkcommit ROOT
53 > mkcommit ROOT
54 > }
54 > }
55
55
56 root setup
56 root setup
57 -------------
57 -------------
58
58
59 simple chain
59 simple chain
60 ============
60 ============
61
61
62 . A0
62 . A0
63 . β‡ ΓΈβ‡ β—” A1
63 . β‡ ΓΈβ‡ β—” A1
64 . |/
64 . |/
65 . ●
65 . ●
66
66
67 setup
67 setup
68 -----
68 -----
69
69
70 $ mktestrepo simple-chain
70 $ mktestrepo simple-chain
71 $ mkcommit 'C-A0'
71 $ mkcommit 'C-A0'
72 $ hg up 'desc("ROOT")'
72 $ hg up 'desc("ROOT")'
73 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
73 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
74 $ mkcommit 'C-A1'
74 $ mkcommit 'C-A1'
75 created new head
75 created new head
76 $ hg debugobsolete a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0 `getid 'desc("C-A0")'`
76 $ hg debugobsolete a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0 `getid 'desc("C-A0")'`
77 $ hg debugobsolete `getid 'desc("C-A0")'` a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1
77 $ hg debugobsolete `getid 'desc("C-A0")'` a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1
78 $ hg debugobsolete a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1 `getid 'desc("C-A1")'`
78 $ hg debugobsolete a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1 `getid 'desc("C-A1")'`
79
79
80 $ hg up 'desc("ROOT")'
80 $ hg up 'desc("ROOT")'
81 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
81 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
82 $ hg log --hidden -G
82 $ hg log --hidden -G
83 o cf2c22470d67: C-A1
83 o cf2c22470d67: C-A1
84 |
84 |
85 | x 84fcb0dfe17b: C-A0
85 | x 84fcb0dfe17b: C-A0
86 |/
86 |/
87 @ ea207398892e: ROOT
87 @ ea207398892e: ROOT
88
88
89 $ hg debugobsolete
89 $ hg debugobsolete
90 a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0 84fcb0dfe17b256ebae52e05572993b9194c018a 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
90 a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0 84fcb0dfe17b256ebae52e05572993b9194c018a 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
91 84fcb0dfe17b256ebae52e05572993b9194c018a a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
91 84fcb0dfe17b256ebae52e05572993b9194c018a a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
92 a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1 cf2c22470d67233004e934a31184ac2b35389914 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
92 a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1 cf2c22470d67233004e934a31184ac2b35389914 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
93
93
94 Actual testing
94 Actual testing
95 --------------
95 --------------
96
96
97 $ hg debugobsolete --rev 'desc("C-A0")'
97 $ hg debugobsolete --rev 'desc("C-A0")'
98 a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0 84fcb0dfe17b256ebae52e05572993b9194c018a 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
98 a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0 84fcb0dfe17b256ebae52e05572993b9194c018a 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
99 $ hg debugobsolete --rev 'desc("C-A1")'
99 $ hg debugobsolete --rev 'desc("C-A1")'
100 84fcb0dfe17b256ebae52e05572993b9194c018a a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
100 84fcb0dfe17b256ebae52e05572993b9194c018a a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
101 a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0 84fcb0dfe17b256ebae52e05572993b9194c018a 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
101 a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0 84fcb0dfe17b256ebae52e05572993b9194c018a 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
102 a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1 cf2c22470d67233004e934a31184ac2b35389914 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
102 a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1 cf2c22470d67233004e934a31184ac2b35389914 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
103
104 chain with prune children
105 =========================
106
107 . β‡ βŠ— B0
108 . |
109 . β‡ ΓΈβ‡ β—” A1
110 . |
111 . ●
112
113 setup
114 -----
115
116 $ mktestrepo prune
117 $ mkcommit 'C-A0'
118 $ mkcommit 'C-B0'
119 $ hg up 'desc("ROOT")'
120 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
121 $ mkcommit 'C-A1'
122 created new head
123 $ hg debugobsolete a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0 `getid 'desc("C-A0")'`
124 $ hg debugobsolete `getid 'desc("C-A0")'` `getid 'desc("C-A1")'`
125 $ hg debugobsolete --record-parents `getid 'desc("C-B0")'`
126 $ hg up 'desc("ROOT")'
127 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
128 $ hg log --hidden -G
129 o cf2c22470d67: C-A1
130 |
131 | x 29f93b1df87b: C-B0
132 | |
133 | x 84fcb0dfe17b: C-A0
134 |/
135 @ ea207398892e: ROOT
136
137 $ hg debugobsolete
138 a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0 84fcb0dfe17b256ebae52e05572993b9194c018a 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
139 84fcb0dfe17b256ebae52e05572993b9194c018a cf2c22470d67233004e934a31184ac2b35389914 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
140 29f93b1df87baee1824e014080d8adf145f81783 0 {84fcb0dfe17b256ebae52e05572993b9194c018a} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
141
142 Actual testing
143 --------------
144
145 $ hg debugobsolete --rev 'desc("C-A0")'
146 29f93b1df87baee1824e014080d8adf145f81783 0 {84fcb0dfe17b256ebae52e05572993b9194c018a} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
147 a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0 84fcb0dfe17b256ebae52e05572993b9194c018a 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
148 $ hg debugobsolete --rev 'desc("C-B0")'
149 29f93b1df87baee1824e014080d8adf145f81783 0 {84fcb0dfe17b256ebae52e05572993b9194c018a} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
150 $ hg debugobsolete --rev 'desc("C-A1")'
151 29f93b1df87baee1824e014080d8adf145f81783 0 {84fcb0dfe17b256ebae52e05572993b9194c018a} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
152 84fcb0dfe17b256ebae52e05572993b9194c018a cf2c22470d67233004e934a31184ac2b35389914 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
153 a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0 84fcb0dfe17b256ebae52e05572993b9194c018a 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
154
155 chain with precursors also pruned
156 =================================
157
158 . A0 (also pruned)
159 . β‡ ΓΈβ‡ β—” A1
160 . |
161 . ●
162
163 setup
164 -----
165
166 $ mktestrepo prune-inline
167 $ mkcommit 'C-A0'
168 $ hg up 'desc("ROOT")'
169 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
170 $ mkcommit 'C-A1'
171 created new head
172 $ hg debugobsolete a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0 `getid 'desc("C-A0")'`
173 $ hg debugobsolete --record-parents `getid 'desc("C-A0")'`
174 $ hg debugobsolete `getid 'desc("C-A0")'` `getid 'desc("C-A1")'`
175 $ hg up 'desc("ROOT")'
176 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
177 $ hg log --hidden -G
178 o cf2c22470d67: C-A1
179 |
180 | x 84fcb0dfe17b: C-A0
181 |/
182 @ ea207398892e: ROOT
183
184 $ hg debugobsolete
185 a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0 84fcb0dfe17b256ebae52e05572993b9194c018a 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
186 84fcb0dfe17b256ebae52e05572993b9194c018a 0 {ea207398892eb49e06441f10dda2a731f0450f20} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
187 84fcb0dfe17b256ebae52e05572993b9194c018a cf2c22470d67233004e934a31184ac2b35389914 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
188
189 Actual testing
190 --------------
191
192 $ hg debugobsolete --rev 'desc("C-A0")'
193 84fcb0dfe17b256ebae52e05572993b9194c018a 0 {ea207398892eb49e06441f10dda2a731f0450f20} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
194 a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0 84fcb0dfe17b256ebae52e05572993b9194c018a 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
195 $ hg debugobsolete --rev 'desc("C-A1")'
196 84fcb0dfe17b256ebae52e05572993b9194c018a 0 {ea207398892eb49e06441f10dda2a731f0450f20} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
197 84fcb0dfe17b256ebae52e05572993b9194c018a cf2c22470d67233004e934a31184ac2b35389914 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
198 a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0 84fcb0dfe17b256ebae52e05572993b9194c018a 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
General Comments 0
You need to be logged in to leave comments. Login now