##// END OF EJS Templates
obsolete: move marker flags to obsutil...
av6 -
r36971:b9bbcf9f default
parent child Browse files
Show More
@@ -1,1042 +1,1012
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "predecessor" and possible
23 The old obsoleted changeset is called a "predecessor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a predecessor are called "successor markers of X" because they hold
25 a predecessor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "predecessor markers of Y" because they hold
27 a successors are call "predecessor markers of Y" because they hold
28 information about the predecessors of Y.
28 information about the predecessors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker is used:
45 - When changeset A is split into B and C, a single marker is used:
46
46
47 (A, (B, C))
47 (A, (B, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 from __future__ import absolute_import
70 from __future__ import absolute_import
71
71
72 import errno
72 import errno
73 import struct
73 import struct
74
74
75 from .i18n import _
75 from .i18n import _
76 from . import (
76 from . import (
77 error,
77 error,
78 node,
78 node,
79 obsutil,
79 obsutil,
80 phases,
80 phases,
81 policy,
81 policy,
82 util,
82 util,
83 )
83 )
84 from .utils import dateutil
84 from .utils import dateutil
85
85
86 parsers = policy.importmod(r'parsers')
86 parsers = policy.importmod(r'parsers')
87
87
88 _pack = struct.pack
88 _pack = struct.pack
89 _unpack = struct.unpack
89 _unpack = struct.unpack
90 _calcsize = struct.calcsize
90 _calcsize = struct.calcsize
91 propertycache = util.propertycache
91 propertycache = util.propertycache
92
92
93 # the obsolete feature is not mature enough to be enabled by default.
93 # the obsolete feature is not mature enough to be enabled by default.
94 # you have to rely on third party extension extension to enable this.
94 # you have to rely on third party extension extension to enable this.
95 _enabled = False
95 _enabled = False
96
96
97 # Options for obsolescence
97 # Options for obsolescence
98 createmarkersopt = 'createmarkers'
98 createmarkersopt = 'createmarkers'
99 allowunstableopt = 'allowunstable'
99 allowunstableopt = 'allowunstable'
100 exchangeopt = 'exchange'
100 exchangeopt = 'exchange'
101
101
102 def _getoptionvalue(repo, option):
102 def _getoptionvalue(repo, option):
103 """Returns True if the given repository has the given obsolete option
103 """Returns True if the given repository has the given obsolete option
104 enabled.
104 enabled.
105 """
105 """
106 configkey = 'evolution.%s' % option
106 configkey = 'evolution.%s' % option
107 newconfig = repo.ui.configbool('experimental', configkey)
107 newconfig = repo.ui.configbool('experimental', configkey)
108
108
109 # Return the value only if defined
109 # Return the value only if defined
110 if newconfig is not None:
110 if newconfig is not None:
111 return newconfig
111 return newconfig
112
112
113 # Fallback on generic option
113 # Fallback on generic option
114 try:
114 try:
115 return repo.ui.configbool('experimental', 'evolution')
115 return repo.ui.configbool('experimental', 'evolution')
116 except (error.ConfigError, AttributeError):
116 except (error.ConfigError, AttributeError):
117 # Fallback on old-fashion config
117 # Fallback on old-fashion config
118 # inconsistent config: experimental.evolution
118 # inconsistent config: experimental.evolution
119 result = set(repo.ui.configlist('experimental', 'evolution'))
119 result = set(repo.ui.configlist('experimental', 'evolution'))
120
120
121 if 'all' in result:
121 if 'all' in result:
122 return True
122 return True
123
123
124 # For migration purposes, temporarily return true if the config hasn't
124 # For migration purposes, temporarily return true if the config hasn't
125 # been set but _enabled is true.
125 # been set but _enabled is true.
126 if len(result) == 0 and _enabled:
126 if len(result) == 0 and _enabled:
127 return True
127 return True
128
128
129 # Temporary hack for next check
129 # Temporary hack for next check
130 newconfig = repo.ui.config('experimental', 'evolution.createmarkers')
130 newconfig = repo.ui.config('experimental', 'evolution.createmarkers')
131 if newconfig:
131 if newconfig:
132 result.add('createmarkers')
132 result.add('createmarkers')
133
133
134 return option in result
134 return option in result
135
135
136 def isenabled(repo, option):
136 def isenabled(repo, option):
137 """Returns True if the given repository has the given obsolete option
137 """Returns True if the given repository has the given obsolete option
138 enabled.
138 enabled.
139 """
139 """
140 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
140 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
141 unstabluevalue = _getoptionvalue(repo, allowunstableopt)
141 unstabluevalue = _getoptionvalue(repo, allowunstableopt)
142 exchangevalue = _getoptionvalue(repo, exchangeopt)
142 exchangevalue = _getoptionvalue(repo, exchangeopt)
143
143
144 # createmarkers must be enabled if other options are enabled
144 # createmarkers must be enabled if other options are enabled
145 if ((unstabluevalue or exchangevalue) and not createmarkersvalue):
145 if ((unstabluevalue or exchangevalue) and not createmarkersvalue):
146 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
146 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
147 "if other obsolete options are enabled"))
147 "if other obsolete options are enabled"))
148
148
149 return _getoptionvalue(repo, option)
149 return _getoptionvalue(repo, option)
150
150
151 ### obsolescence marker flag
151 bumpedfix = obsutil.bumpedfix
152
152 usingsha256 = obsutil.usingsha256
153 ## bumpedfix flag
154 #
155 # When a changeset A' succeed to a changeset A which became public, we call A'
156 # "bumped" because it's a successors of a public changesets
157 #
158 # o A' (bumped)
159 # |`:
160 # | o A
161 # |/
162 # o Z
163 #
164 # The way to solve this situation is to create a new changeset Ad as children
165 # of A. This changeset have the same content than A'. So the diff from A to A'
166 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
167 #
168 # o Ad
169 # |`:
170 # | x A'
171 # |'|
172 # o | A
173 # |/
174 # o Z
175 #
176 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
177 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
178 # This flag mean that the successors express the changes between the public and
179 # bumped version and fix the situation, breaking the transitivity of
180 # "bumped" here.
181 bumpedfix = 1
182 usingsha256 = 2
183
153
184 ## Parsing and writing of version "0"
154 ## Parsing and writing of version "0"
185 #
155 #
186 # The header is followed by the markers. Each marker is made of:
156 # The header is followed by the markers. Each marker is made of:
187 #
157 #
188 # - 1 uint8 : number of new changesets "N", can be zero.
158 # - 1 uint8 : number of new changesets "N", can be zero.
189 #
159 #
190 # - 1 uint32: metadata size "M" in bytes.
160 # - 1 uint32: metadata size "M" in bytes.
191 #
161 #
192 # - 1 byte: a bit field. It is reserved for flags used in common
162 # - 1 byte: a bit field. It is reserved for flags used in common
193 # obsolete marker operations, to avoid repeated decoding of metadata
163 # obsolete marker operations, to avoid repeated decoding of metadata
194 # entries.
164 # entries.
195 #
165 #
196 # - 20 bytes: obsoleted changeset identifier.
166 # - 20 bytes: obsoleted changeset identifier.
197 #
167 #
198 # - N*20 bytes: new changesets identifiers.
168 # - N*20 bytes: new changesets identifiers.
199 #
169 #
200 # - M bytes: metadata as a sequence of nul-terminated strings. Each
170 # - M bytes: metadata as a sequence of nul-terminated strings. Each
201 # string contains a key and a value, separated by a colon ':', without
171 # string contains a key and a value, separated by a colon ':', without
202 # additional encoding. Keys cannot contain '\0' or ':' and values
172 # additional encoding. Keys cannot contain '\0' or ':' and values
203 # cannot contain '\0'.
173 # cannot contain '\0'.
204 _fm0version = 0
174 _fm0version = 0
205 _fm0fixed = '>BIB20s'
175 _fm0fixed = '>BIB20s'
206 _fm0node = '20s'
176 _fm0node = '20s'
207 _fm0fsize = _calcsize(_fm0fixed)
177 _fm0fsize = _calcsize(_fm0fixed)
208 _fm0fnodesize = _calcsize(_fm0node)
178 _fm0fnodesize = _calcsize(_fm0node)
209
179
210 def _fm0readmarkers(data, off, stop):
180 def _fm0readmarkers(data, off, stop):
211 # Loop on markers
181 # Loop on markers
212 while off < stop:
182 while off < stop:
213 # read fixed part
183 # read fixed part
214 cur = data[off:off + _fm0fsize]
184 cur = data[off:off + _fm0fsize]
215 off += _fm0fsize
185 off += _fm0fsize
216 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
186 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
217 # read replacement
187 # read replacement
218 sucs = ()
188 sucs = ()
219 if numsuc:
189 if numsuc:
220 s = (_fm0fnodesize * numsuc)
190 s = (_fm0fnodesize * numsuc)
221 cur = data[off:off + s]
191 cur = data[off:off + s]
222 sucs = _unpack(_fm0node * numsuc, cur)
192 sucs = _unpack(_fm0node * numsuc, cur)
223 off += s
193 off += s
224 # read metadata
194 # read metadata
225 # (metadata will be decoded on demand)
195 # (metadata will be decoded on demand)
226 metadata = data[off:off + mdsize]
196 metadata = data[off:off + mdsize]
227 if len(metadata) != mdsize:
197 if len(metadata) != mdsize:
228 raise error.Abort(_('parsing obsolete marker: metadata is too '
198 raise error.Abort(_('parsing obsolete marker: metadata is too '
229 'short, %d bytes expected, got %d')
199 'short, %d bytes expected, got %d')
230 % (mdsize, len(metadata)))
200 % (mdsize, len(metadata)))
231 off += mdsize
201 off += mdsize
232 metadata = _fm0decodemeta(metadata)
202 metadata = _fm0decodemeta(metadata)
233 try:
203 try:
234 when, offset = metadata.pop('date', '0 0').split(' ')
204 when, offset = metadata.pop('date', '0 0').split(' ')
235 date = float(when), int(offset)
205 date = float(when), int(offset)
236 except ValueError:
206 except ValueError:
237 date = (0., 0)
207 date = (0., 0)
238 parents = None
208 parents = None
239 if 'p2' in metadata:
209 if 'p2' in metadata:
240 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
210 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
241 elif 'p1' in metadata:
211 elif 'p1' in metadata:
242 parents = (metadata.pop('p1', None),)
212 parents = (metadata.pop('p1', None),)
243 elif 'p0' in metadata:
213 elif 'p0' in metadata:
244 parents = ()
214 parents = ()
245 if parents is not None:
215 if parents is not None:
246 try:
216 try:
247 parents = tuple(node.bin(p) for p in parents)
217 parents = tuple(node.bin(p) for p in parents)
248 # if parent content is not a nodeid, drop the data
218 # if parent content is not a nodeid, drop the data
249 for p in parents:
219 for p in parents:
250 if len(p) != 20:
220 if len(p) != 20:
251 parents = None
221 parents = None
252 break
222 break
253 except TypeError:
223 except TypeError:
254 # if content cannot be translated to nodeid drop the data.
224 # if content cannot be translated to nodeid drop the data.
255 parents = None
225 parents = None
256
226
257 metadata = tuple(sorted(metadata.iteritems()))
227 metadata = tuple(sorted(metadata.iteritems()))
258
228
259 yield (pre, sucs, flags, metadata, date, parents)
229 yield (pre, sucs, flags, metadata, date, parents)
260
230
261 def _fm0encodeonemarker(marker):
231 def _fm0encodeonemarker(marker):
262 pre, sucs, flags, metadata, date, parents = marker
232 pre, sucs, flags, metadata, date, parents = marker
263 if flags & usingsha256:
233 if flags & usingsha256:
264 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
234 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
265 metadata = dict(metadata)
235 metadata = dict(metadata)
266 time, tz = date
236 time, tz = date
267 metadata['date'] = '%r %i' % (time, tz)
237 metadata['date'] = '%r %i' % (time, tz)
268 if parents is not None:
238 if parents is not None:
269 if not parents:
239 if not parents:
270 # mark that we explicitly recorded no parents
240 # mark that we explicitly recorded no parents
271 metadata['p0'] = ''
241 metadata['p0'] = ''
272 for i, p in enumerate(parents, 1):
242 for i, p in enumerate(parents, 1):
273 metadata['p%i' % i] = node.hex(p)
243 metadata['p%i' % i] = node.hex(p)
274 metadata = _fm0encodemeta(metadata)
244 metadata = _fm0encodemeta(metadata)
275 numsuc = len(sucs)
245 numsuc = len(sucs)
276 format = _fm0fixed + (_fm0node * numsuc)
246 format = _fm0fixed + (_fm0node * numsuc)
277 data = [numsuc, len(metadata), flags, pre]
247 data = [numsuc, len(metadata), flags, pre]
278 data.extend(sucs)
248 data.extend(sucs)
279 return _pack(format, *data) + metadata
249 return _pack(format, *data) + metadata
280
250
281 def _fm0encodemeta(meta):
251 def _fm0encodemeta(meta):
282 """Return encoded metadata string to string mapping.
252 """Return encoded metadata string to string mapping.
283
253
284 Assume no ':' in key and no '\0' in both key and value."""
254 Assume no ':' in key and no '\0' in both key and value."""
285 for key, value in meta.iteritems():
255 for key, value in meta.iteritems():
286 if ':' in key or '\0' in key:
256 if ':' in key or '\0' in key:
287 raise ValueError("':' and '\0' are forbidden in metadata key'")
257 raise ValueError("':' and '\0' are forbidden in metadata key'")
288 if '\0' in value:
258 if '\0' in value:
289 raise ValueError("':' is forbidden in metadata value'")
259 raise ValueError("':' is forbidden in metadata value'")
290 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
260 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
291
261
292 def _fm0decodemeta(data):
262 def _fm0decodemeta(data):
293 """Return string to string dictionary from encoded version."""
263 """Return string to string dictionary from encoded version."""
294 d = {}
264 d = {}
295 for l in data.split('\0'):
265 for l in data.split('\0'):
296 if l:
266 if l:
297 key, value = l.split(':')
267 key, value = l.split(':')
298 d[key] = value
268 d[key] = value
299 return d
269 return d
300
270
301 ## Parsing and writing of version "1"
271 ## Parsing and writing of version "1"
302 #
272 #
303 # The header is followed by the markers. Each marker is made of:
273 # The header is followed by the markers. Each marker is made of:
304 #
274 #
305 # - uint32: total size of the marker (including this field)
275 # - uint32: total size of the marker (including this field)
306 #
276 #
307 # - float64: date in seconds since epoch
277 # - float64: date in seconds since epoch
308 #
278 #
309 # - int16: timezone offset in minutes
279 # - int16: timezone offset in minutes
310 #
280 #
311 # - uint16: a bit field. It is reserved for flags used in common
281 # - uint16: a bit field. It is reserved for flags used in common
312 # obsolete marker operations, to avoid repeated decoding of metadata
282 # obsolete marker operations, to avoid repeated decoding of metadata
313 # entries.
283 # entries.
314 #
284 #
315 # - uint8: number of successors "N", can be zero.
285 # - uint8: number of successors "N", can be zero.
316 #
286 #
317 # - uint8: number of parents "P", can be zero.
287 # - uint8: number of parents "P", can be zero.
318 #
288 #
319 # 0: parents data stored but no parent,
289 # 0: parents data stored but no parent,
320 # 1: one parent stored,
290 # 1: one parent stored,
321 # 2: two parents stored,
291 # 2: two parents stored,
322 # 3: no parent data stored
292 # 3: no parent data stored
323 #
293 #
324 # - uint8: number of metadata entries M
294 # - uint8: number of metadata entries M
325 #
295 #
326 # - 20 or 32 bytes: predecessor changeset identifier.
296 # - 20 or 32 bytes: predecessor changeset identifier.
327 #
297 #
328 # - N*(20 or 32) bytes: successors changesets identifiers.
298 # - N*(20 or 32) bytes: successors changesets identifiers.
329 #
299 #
330 # - P*(20 or 32) bytes: parents of the predecessors changesets.
300 # - P*(20 or 32) bytes: parents of the predecessors changesets.
331 #
301 #
332 # - M*(uint8, uint8): size of all metadata entries (key and value)
302 # - M*(uint8, uint8): size of all metadata entries (key and value)
333 #
303 #
334 # - remaining bytes: the metadata, each (key, value) pair after the other.
304 # - remaining bytes: the metadata, each (key, value) pair after the other.
335 _fm1version = 1
305 _fm1version = 1
336 _fm1fixed = '>IdhHBBB20s'
306 _fm1fixed = '>IdhHBBB20s'
337 _fm1nodesha1 = '20s'
307 _fm1nodesha1 = '20s'
338 _fm1nodesha256 = '32s'
308 _fm1nodesha256 = '32s'
339 _fm1nodesha1size = _calcsize(_fm1nodesha1)
309 _fm1nodesha1size = _calcsize(_fm1nodesha1)
340 _fm1nodesha256size = _calcsize(_fm1nodesha256)
310 _fm1nodesha256size = _calcsize(_fm1nodesha256)
341 _fm1fsize = _calcsize(_fm1fixed)
311 _fm1fsize = _calcsize(_fm1fixed)
342 _fm1parentnone = 3
312 _fm1parentnone = 3
343 _fm1parentshift = 14
313 _fm1parentshift = 14
344 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
314 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
345 _fm1metapair = 'BB'
315 _fm1metapair = 'BB'
346 _fm1metapairsize = _calcsize(_fm1metapair)
316 _fm1metapairsize = _calcsize(_fm1metapair)
347
317
348 def _fm1purereadmarkers(data, off, stop):
318 def _fm1purereadmarkers(data, off, stop):
349 # make some global constants local for performance
319 # make some global constants local for performance
350 noneflag = _fm1parentnone
320 noneflag = _fm1parentnone
351 sha2flag = usingsha256
321 sha2flag = usingsha256
352 sha1size = _fm1nodesha1size
322 sha1size = _fm1nodesha1size
353 sha2size = _fm1nodesha256size
323 sha2size = _fm1nodesha256size
354 sha1fmt = _fm1nodesha1
324 sha1fmt = _fm1nodesha1
355 sha2fmt = _fm1nodesha256
325 sha2fmt = _fm1nodesha256
356 metasize = _fm1metapairsize
326 metasize = _fm1metapairsize
357 metafmt = _fm1metapair
327 metafmt = _fm1metapair
358 fsize = _fm1fsize
328 fsize = _fm1fsize
359 unpack = _unpack
329 unpack = _unpack
360
330
361 # Loop on markers
331 # Loop on markers
362 ufixed = struct.Struct(_fm1fixed).unpack
332 ufixed = struct.Struct(_fm1fixed).unpack
363
333
364 while off < stop:
334 while off < stop:
365 # read fixed part
335 # read fixed part
366 o1 = off + fsize
336 o1 = off + fsize
367 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
337 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
368
338
369 if flags & sha2flag:
339 if flags & sha2flag:
370 # FIXME: prec was read as a SHA1, needs to be amended
340 # FIXME: prec was read as a SHA1, needs to be amended
371
341
372 # read 0 or more successors
342 # read 0 or more successors
373 if numsuc == 1:
343 if numsuc == 1:
374 o2 = o1 + sha2size
344 o2 = o1 + sha2size
375 sucs = (data[o1:o2],)
345 sucs = (data[o1:o2],)
376 else:
346 else:
377 o2 = o1 + sha2size * numsuc
347 o2 = o1 + sha2size * numsuc
378 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
348 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
379
349
380 # read parents
350 # read parents
381 if numpar == noneflag:
351 if numpar == noneflag:
382 o3 = o2
352 o3 = o2
383 parents = None
353 parents = None
384 elif numpar == 1:
354 elif numpar == 1:
385 o3 = o2 + sha2size
355 o3 = o2 + sha2size
386 parents = (data[o2:o3],)
356 parents = (data[o2:o3],)
387 else:
357 else:
388 o3 = o2 + sha2size * numpar
358 o3 = o2 + sha2size * numpar
389 parents = unpack(sha2fmt * numpar, data[o2:o3])
359 parents = unpack(sha2fmt * numpar, data[o2:o3])
390 else:
360 else:
391 # read 0 or more successors
361 # read 0 or more successors
392 if numsuc == 1:
362 if numsuc == 1:
393 o2 = o1 + sha1size
363 o2 = o1 + sha1size
394 sucs = (data[o1:o2],)
364 sucs = (data[o1:o2],)
395 else:
365 else:
396 o2 = o1 + sha1size * numsuc
366 o2 = o1 + sha1size * numsuc
397 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
367 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
398
368
399 # read parents
369 # read parents
400 if numpar == noneflag:
370 if numpar == noneflag:
401 o3 = o2
371 o3 = o2
402 parents = None
372 parents = None
403 elif numpar == 1:
373 elif numpar == 1:
404 o3 = o2 + sha1size
374 o3 = o2 + sha1size
405 parents = (data[o2:o3],)
375 parents = (data[o2:o3],)
406 else:
376 else:
407 o3 = o2 + sha1size * numpar
377 o3 = o2 + sha1size * numpar
408 parents = unpack(sha1fmt * numpar, data[o2:o3])
378 parents = unpack(sha1fmt * numpar, data[o2:o3])
409
379
410 # read metadata
380 # read metadata
411 off = o3 + metasize * nummeta
381 off = o3 + metasize * nummeta
412 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
382 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
413 metadata = []
383 metadata = []
414 for idx in xrange(0, len(metapairsize), 2):
384 for idx in xrange(0, len(metapairsize), 2):
415 o1 = off + metapairsize[idx]
385 o1 = off + metapairsize[idx]
416 o2 = o1 + metapairsize[idx + 1]
386 o2 = o1 + metapairsize[idx + 1]
417 metadata.append((data[off:o1], data[o1:o2]))
387 metadata.append((data[off:o1], data[o1:o2]))
418 off = o2
388 off = o2
419
389
420 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
390 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
421
391
422 def _fm1encodeonemarker(marker):
392 def _fm1encodeonemarker(marker):
423 pre, sucs, flags, metadata, date, parents = marker
393 pre, sucs, flags, metadata, date, parents = marker
424 # determine node size
394 # determine node size
425 _fm1node = _fm1nodesha1
395 _fm1node = _fm1nodesha1
426 if flags & usingsha256:
396 if flags & usingsha256:
427 _fm1node = _fm1nodesha256
397 _fm1node = _fm1nodesha256
428 numsuc = len(sucs)
398 numsuc = len(sucs)
429 numextranodes = numsuc
399 numextranodes = numsuc
430 if parents is None:
400 if parents is None:
431 numpar = _fm1parentnone
401 numpar = _fm1parentnone
432 else:
402 else:
433 numpar = len(parents)
403 numpar = len(parents)
434 numextranodes += numpar
404 numextranodes += numpar
435 formatnodes = _fm1node * numextranodes
405 formatnodes = _fm1node * numextranodes
436 formatmeta = _fm1metapair * len(metadata)
406 formatmeta = _fm1metapair * len(metadata)
437 format = _fm1fixed + formatnodes + formatmeta
407 format = _fm1fixed + formatnodes + formatmeta
438 # tz is stored in minutes so we divide by 60
408 # tz is stored in minutes so we divide by 60
439 tz = date[1]//60
409 tz = date[1]//60
440 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
410 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
441 data.extend(sucs)
411 data.extend(sucs)
442 if parents is not None:
412 if parents is not None:
443 data.extend(parents)
413 data.extend(parents)
444 totalsize = _calcsize(format)
414 totalsize = _calcsize(format)
445 for key, value in metadata:
415 for key, value in metadata:
446 lk = len(key)
416 lk = len(key)
447 lv = len(value)
417 lv = len(value)
448 if lk > 255:
418 if lk > 255:
449 msg = ('obsstore metadata key cannot be longer than 255 bytes'
419 msg = ('obsstore metadata key cannot be longer than 255 bytes'
450 ' (key "%s" is %u bytes)') % (key, lk)
420 ' (key "%s" is %u bytes)') % (key, lk)
451 raise error.ProgrammingError(msg)
421 raise error.ProgrammingError(msg)
452 if lv > 255:
422 if lv > 255:
453 msg = ('obsstore metadata value cannot be longer than 255 bytes'
423 msg = ('obsstore metadata value cannot be longer than 255 bytes'
454 ' (value "%s" for key "%s" is %u bytes)') % (value, key, lv)
424 ' (value "%s" for key "%s" is %u bytes)') % (value, key, lv)
455 raise error.ProgrammingError(msg)
425 raise error.ProgrammingError(msg)
456 data.append(lk)
426 data.append(lk)
457 data.append(lv)
427 data.append(lv)
458 totalsize += lk + lv
428 totalsize += lk + lv
459 data[0] = totalsize
429 data[0] = totalsize
460 data = [_pack(format, *data)]
430 data = [_pack(format, *data)]
461 for key, value in metadata:
431 for key, value in metadata:
462 data.append(key)
432 data.append(key)
463 data.append(value)
433 data.append(value)
464 return ''.join(data)
434 return ''.join(data)
465
435
466 def _fm1readmarkers(data, off, stop):
436 def _fm1readmarkers(data, off, stop):
467 native = getattr(parsers, 'fm1readmarkers', None)
437 native = getattr(parsers, 'fm1readmarkers', None)
468 if not native:
438 if not native:
469 return _fm1purereadmarkers(data, off, stop)
439 return _fm1purereadmarkers(data, off, stop)
470 return native(data, off, stop)
440 return native(data, off, stop)
471
441
472 # mapping to read/write various marker formats
442 # mapping to read/write various marker formats
473 # <version> -> (decoder, encoder)
443 # <version> -> (decoder, encoder)
474 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
444 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
475 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
445 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
476
446
477 def _readmarkerversion(data):
447 def _readmarkerversion(data):
478 return _unpack('>B', data[0:1])[0]
448 return _unpack('>B', data[0:1])[0]
479
449
480 @util.nogc
450 @util.nogc
481 def _readmarkers(data, off=None, stop=None):
451 def _readmarkers(data, off=None, stop=None):
482 """Read and enumerate markers from raw data"""
452 """Read and enumerate markers from raw data"""
483 diskversion = _readmarkerversion(data)
453 diskversion = _readmarkerversion(data)
484 if not off:
454 if not off:
485 off = 1 # skip 1 byte version number
455 off = 1 # skip 1 byte version number
486 if stop is None:
456 if stop is None:
487 stop = len(data)
457 stop = len(data)
488 if diskversion not in formats:
458 if diskversion not in formats:
489 msg = _('parsing obsolete marker: unknown version %r') % diskversion
459 msg = _('parsing obsolete marker: unknown version %r') % diskversion
490 raise error.UnknownVersion(msg, version=diskversion)
460 raise error.UnknownVersion(msg, version=diskversion)
491 return diskversion, formats[diskversion][0](data, off, stop)
461 return diskversion, formats[diskversion][0](data, off, stop)
492
462
493 def encodeheader(version=_fm0version):
463 def encodeheader(version=_fm0version):
494 return _pack('>B', version)
464 return _pack('>B', version)
495
465
496 def encodemarkers(markers, addheader=False, version=_fm0version):
466 def encodemarkers(markers, addheader=False, version=_fm0version):
497 # Kept separate from flushmarkers(), it will be reused for
467 # Kept separate from flushmarkers(), it will be reused for
498 # markers exchange.
468 # markers exchange.
499 encodeone = formats[version][1]
469 encodeone = formats[version][1]
500 if addheader:
470 if addheader:
501 yield encodeheader(version)
471 yield encodeheader(version)
502 for marker in markers:
472 for marker in markers:
503 yield encodeone(marker)
473 yield encodeone(marker)
504
474
505 @util.nogc
475 @util.nogc
506 def _addsuccessors(successors, markers):
476 def _addsuccessors(successors, markers):
507 for mark in markers:
477 for mark in markers:
508 successors.setdefault(mark[0], set()).add(mark)
478 successors.setdefault(mark[0], set()).add(mark)
509
479
510 @util.nogc
480 @util.nogc
511 def _addpredecessors(predecessors, markers):
481 def _addpredecessors(predecessors, markers):
512 for mark in markers:
482 for mark in markers:
513 for suc in mark[1]:
483 for suc in mark[1]:
514 predecessors.setdefault(suc, set()).add(mark)
484 predecessors.setdefault(suc, set()).add(mark)
515
485
516 @util.nogc
486 @util.nogc
517 def _addchildren(children, markers):
487 def _addchildren(children, markers):
518 for mark in markers:
488 for mark in markers:
519 parents = mark[5]
489 parents = mark[5]
520 if parents is not None:
490 if parents is not None:
521 for p in parents:
491 for p in parents:
522 children.setdefault(p, set()).add(mark)
492 children.setdefault(p, set()).add(mark)
523
493
524 def _checkinvalidmarkers(markers):
494 def _checkinvalidmarkers(markers):
525 """search for marker with invalid data and raise error if needed
495 """search for marker with invalid data and raise error if needed
526
496
527 Exist as a separated function to allow the evolve extension for a more
497 Exist as a separated function to allow the evolve extension for a more
528 subtle handling.
498 subtle handling.
529 """
499 """
530 for mark in markers:
500 for mark in markers:
531 if node.nullid in mark[1]:
501 if node.nullid in mark[1]:
532 raise error.Abort(_('bad obsolescence marker detected: '
502 raise error.Abort(_('bad obsolescence marker detected: '
533 'invalid successors nullid'))
503 'invalid successors nullid'))
534
504
535 class obsstore(object):
505 class obsstore(object):
536 """Store obsolete markers
506 """Store obsolete markers
537
507
538 Markers can be accessed with two mappings:
508 Markers can be accessed with two mappings:
539 - predecessors[x] -> set(markers on predecessors edges of x)
509 - predecessors[x] -> set(markers on predecessors edges of x)
540 - successors[x] -> set(markers on successors edges of x)
510 - successors[x] -> set(markers on successors edges of x)
541 - children[x] -> set(markers on predecessors edges of children(x)
511 - children[x] -> set(markers on predecessors edges of children(x)
542 """
512 """
543
513
544 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
514 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
545 # prec: nodeid, predecessors changesets
515 # prec: nodeid, predecessors changesets
546 # succs: tuple of nodeid, successor changesets (0-N length)
516 # succs: tuple of nodeid, successor changesets (0-N length)
547 # flag: integer, flag field carrying modifier for the markers (see doc)
517 # flag: integer, flag field carrying modifier for the markers (see doc)
548 # meta: binary blob, encoded metadata dictionary
518 # meta: binary blob, encoded metadata dictionary
549 # date: (float, int) tuple, date of marker creation
519 # date: (float, int) tuple, date of marker creation
550 # parents: (tuple of nodeid) or None, parents of predecessors
520 # parents: (tuple of nodeid) or None, parents of predecessors
551 # None is used when no data has been recorded
521 # None is used when no data has been recorded
552
522
553 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
523 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
554 # caches for various obsolescence related cache
524 # caches for various obsolescence related cache
555 self.caches = {}
525 self.caches = {}
556 self.svfs = svfs
526 self.svfs = svfs
557 self._defaultformat = defaultformat
527 self._defaultformat = defaultformat
558 self._readonly = readonly
528 self._readonly = readonly
559
529
560 def __iter__(self):
530 def __iter__(self):
561 return iter(self._all)
531 return iter(self._all)
562
532
563 def __len__(self):
533 def __len__(self):
564 return len(self._all)
534 return len(self._all)
565
535
566 def __nonzero__(self):
536 def __nonzero__(self):
567 if not self._cached(r'_all'):
537 if not self._cached(r'_all'):
568 try:
538 try:
569 return self.svfs.stat('obsstore').st_size > 1
539 return self.svfs.stat('obsstore').st_size > 1
570 except OSError as inst:
540 except OSError as inst:
571 if inst.errno != errno.ENOENT:
541 if inst.errno != errno.ENOENT:
572 raise
542 raise
573 # just build an empty _all list if no obsstore exists, which
543 # just build an empty _all list if no obsstore exists, which
574 # avoids further stat() syscalls
544 # avoids further stat() syscalls
575 return bool(self._all)
545 return bool(self._all)
576
546
577 __bool__ = __nonzero__
547 __bool__ = __nonzero__
578
548
579 @property
549 @property
580 def readonly(self):
550 def readonly(self):
581 """True if marker creation is disabled
551 """True if marker creation is disabled
582
552
583 Remove me in the future when obsolete marker is always on."""
553 Remove me in the future when obsolete marker is always on."""
584 return self._readonly
554 return self._readonly
585
555
586 def create(self, transaction, prec, succs=(), flag=0, parents=None,
556 def create(self, transaction, prec, succs=(), flag=0, parents=None,
587 date=None, metadata=None, ui=None):
557 date=None, metadata=None, ui=None):
588 """obsolete: add a new obsolete marker
558 """obsolete: add a new obsolete marker
589
559
590 * ensuring it is hashable
560 * ensuring it is hashable
591 * check mandatory metadata
561 * check mandatory metadata
592 * encode metadata
562 * encode metadata
593
563
594 If you are a human writing code creating marker you want to use the
564 If you are a human writing code creating marker you want to use the
595 `createmarkers` function in this module instead.
565 `createmarkers` function in this module instead.
596
566
597 return True if a new marker have been added, False if the markers
567 return True if a new marker have been added, False if the markers
598 already existed (no op).
568 already existed (no op).
599 """
569 """
600 if metadata is None:
570 if metadata is None:
601 metadata = {}
571 metadata = {}
602 if date is None:
572 if date is None:
603 if 'date' in metadata:
573 if 'date' in metadata:
604 # as a courtesy for out-of-tree extensions
574 # as a courtesy for out-of-tree extensions
605 date = dateutil.parsedate(metadata.pop('date'))
575 date = dateutil.parsedate(metadata.pop('date'))
606 elif ui is not None:
576 elif ui is not None:
607 date = ui.configdate('devel', 'default-date')
577 date = ui.configdate('devel', 'default-date')
608 if date is None:
578 if date is None:
609 date = dateutil.makedate()
579 date = dateutil.makedate()
610 else:
580 else:
611 date = dateutil.makedate()
581 date = dateutil.makedate()
612 if len(prec) != 20:
582 if len(prec) != 20:
613 raise ValueError(prec)
583 raise ValueError(prec)
614 for succ in succs:
584 for succ in succs:
615 if len(succ) != 20:
585 if len(succ) != 20:
616 raise ValueError(succ)
586 raise ValueError(succ)
617 if prec in succs:
587 if prec in succs:
618 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
588 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
619
589
620 metadata = tuple(sorted(metadata.iteritems()))
590 metadata = tuple(sorted(metadata.iteritems()))
621
591
622 marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
592 marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
623 return bool(self.add(transaction, [marker]))
593 return bool(self.add(transaction, [marker]))
624
594
625 def add(self, transaction, markers):
595 def add(self, transaction, markers):
626 """Add new markers to the store
596 """Add new markers to the store
627
597
628 Take care of filtering duplicate.
598 Take care of filtering duplicate.
629 Return the number of new marker."""
599 Return the number of new marker."""
630 if self._readonly:
600 if self._readonly:
631 raise error.Abort(_('creating obsolete markers is not enabled on '
601 raise error.Abort(_('creating obsolete markers is not enabled on '
632 'this repo'))
602 'this repo'))
633 known = set()
603 known = set()
634 getsuccessors = self.successors.get
604 getsuccessors = self.successors.get
635 new = []
605 new = []
636 for m in markers:
606 for m in markers:
637 if m not in getsuccessors(m[0], ()) and m not in known:
607 if m not in getsuccessors(m[0], ()) and m not in known:
638 known.add(m)
608 known.add(m)
639 new.append(m)
609 new.append(m)
640 if new:
610 if new:
641 f = self.svfs('obsstore', 'ab')
611 f = self.svfs('obsstore', 'ab')
642 try:
612 try:
643 offset = f.tell()
613 offset = f.tell()
644 transaction.add('obsstore', offset)
614 transaction.add('obsstore', offset)
645 # offset == 0: new file - add the version header
615 # offset == 0: new file - add the version header
646 data = b''.join(encodemarkers(new, offset == 0, self._version))
616 data = b''.join(encodemarkers(new, offset == 0, self._version))
647 f.write(data)
617 f.write(data)
648 finally:
618 finally:
649 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
619 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
650 # call 'filecacheentry.refresh()' here
620 # call 'filecacheentry.refresh()' here
651 f.close()
621 f.close()
652 addedmarkers = transaction.changes.get('obsmarkers')
622 addedmarkers = transaction.changes.get('obsmarkers')
653 if addedmarkers is not None:
623 if addedmarkers is not None:
654 addedmarkers.update(new)
624 addedmarkers.update(new)
655 self._addmarkers(new, data)
625 self._addmarkers(new, data)
656 # new marker *may* have changed several set. invalidate the cache.
626 # new marker *may* have changed several set. invalidate the cache.
657 self.caches.clear()
627 self.caches.clear()
658 # records the number of new markers for the transaction hooks
628 # records the number of new markers for the transaction hooks
659 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
629 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
660 transaction.hookargs['new_obsmarkers'] = '%d' % (previous + len(new))
630 transaction.hookargs['new_obsmarkers'] = '%d' % (previous + len(new))
661 return len(new)
631 return len(new)
662
632
663 def mergemarkers(self, transaction, data):
633 def mergemarkers(self, transaction, data):
664 """merge a binary stream of markers inside the obsstore
634 """merge a binary stream of markers inside the obsstore
665
635
666 Returns the number of new markers added."""
636 Returns the number of new markers added."""
667 version, markers = _readmarkers(data)
637 version, markers = _readmarkers(data)
668 return self.add(transaction, markers)
638 return self.add(transaction, markers)
669
639
670 @propertycache
640 @propertycache
671 def _data(self):
641 def _data(self):
672 return self.svfs.tryread('obsstore')
642 return self.svfs.tryread('obsstore')
673
643
674 @propertycache
644 @propertycache
675 def _version(self):
645 def _version(self):
676 if len(self._data) >= 1:
646 if len(self._data) >= 1:
677 return _readmarkerversion(self._data)
647 return _readmarkerversion(self._data)
678 else:
648 else:
679 return self._defaultformat
649 return self._defaultformat
680
650
681 @propertycache
651 @propertycache
682 def _all(self):
652 def _all(self):
683 data = self._data
653 data = self._data
684 if not data:
654 if not data:
685 return []
655 return []
686 self._version, markers = _readmarkers(data)
656 self._version, markers = _readmarkers(data)
687 markers = list(markers)
657 markers = list(markers)
688 _checkinvalidmarkers(markers)
658 _checkinvalidmarkers(markers)
689 return markers
659 return markers
690
660
691 @propertycache
661 @propertycache
692 def successors(self):
662 def successors(self):
693 successors = {}
663 successors = {}
694 _addsuccessors(successors, self._all)
664 _addsuccessors(successors, self._all)
695 return successors
665 return successors
696
666
697 @propertycache
667 @propertycache
698 def predecessors(self):
668 def predecessors(self):
699 predecessors = {}
669 predecessors = {}
700 _addpredecessors(predecessors, self._all)
670 _addpredecessors(predecessors, self._all)
701 return predecessors
671 return predecessors
702
672
703 @propertycache
673 @propertycache
704 def children(self):
674 def children(self):
705 children = {}
675 children = {}
706 _addchildren(children, self._all)
676 _addchildren(children, self._all)
707 return children
677 return children
708
678
709 def _cached(self, attr):
679 def _cached(self, attr):
710 return attr in self.__dict__
680 return attr in self.__dict__
711
681
712 def _addmarkers(self, markers, rawdata):
682 def _addmarkers(self, markers, rawdata):
713 markers = list(markers) # to allow repeated iteration
683 markers = list(markers) # to allow repeated iteration
714 self._data = self._data + rawdata
684 self._data = self._data + rawdata
715 self._all.extend(markers)
685 self._all.extend(markers)
716 if self._cached(r'successors'):
686 if self._cached(r'successors'):
717 _addsuccessors(self.successors, markers)
687 _addsuccessors(self.successors, markers)
718 if self._cached(r'predecessors'):
688 if self._cached(r'predecessors'):
719 _addpredecessors(self.predecessors, markers)
689 _addpredecessors(self.predecessors, markers)
720 if self._cached(r'children'):
690 if self._cached(r'children'):
721 _addchildren(self.children, markers)
691 _addchildren(self.children, markers)
722 _checkinvalidmarkers(markers)
692 _checkinvalidmarkers(markers)
723
693
724 def relevantmarkers(self, nodes):
694 def relevantmarkers(self, nodes):
725 """return a set of all obsolescence markers relevant to a set of nodes.
695 """return a set of all obsolescence markers relevant to a set of nodes.
726
696
727 "relevant" to a set of nodes mean:
697 "relevant" to a set of nodes mean:
728
698
729 - marker that use this changeset as successor
699 - marker that use this changeset as successor
730 - prune marker of direct children on this changeset
700 - prune marker of direct children on this changeset
731 - recursive application of the two rules on predecessors of these
701 - recursive application of the two rules on predecessors of these
732 markers
702 markers
733
703
734 It is a set so you cannot rely on order."""
704 It is a set so you cannot rely on order."""
735
705
736 pendingnodes = set(nodes)
706 pendingnodes = set(nodes)
737 seenmarkers = set()
707 seenmarkers = set()
738 seennodes = set(pendingnodes)
708 seennodes = set(pendingnodes)
739 precursorsmarkers = self.predecessors
709 precursorsmarkers = self.predecessors
740 succsmarkers = self.successors
710 succsmarkers = self.successors
741 children = self.children
711 children = self.children
742 while pendingnodes:
712 while pendingnodes:
743 direct = set()
713 direct = set()
744 for current in pendingnodes:
714 for current in pendingnodes:
745 direct.update(precursorsmarkers.get(current, ()))
715 direct.update(precursorsmarkers.get(current, ()))
746 pruned = [m for m in children.get(current, ()) if not m[1]]
716 pruned = [m for m in children.get(current, ()) if not m[1]]
747 direct.update(pruned)
717 direct.update(pruned)
748 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
718 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
749 direct.update(pruned)
719 direct.update(pruned)
750 direct -= seenmarkers
720 direct -= seenmarkers
751 pendingnodes = set([m[0] for m in direct])
721 pendingnodes = set([m[0] for m in direct])
752 seenmarkers |= direct
722 seenmarkers |= direct
753 pendingnodes -= seennodes
723 pendingnodes -= seennodes
754 seennodes |= pendingnodes
724 seennodes |= pendingnodes
755 return seenmarkers
725 return seenmarkers
756
726
757 def makestore(ui, repo):
727 def makestore(ui, repo):
758 """Create an obsstore instance from a repo."""
728 """Create an obsstore instance from a repo."""
759 # read default format for new obsstore.
729 # read default format for new obsstore.
760 # developer config: format.obsstore-version
730 # developer config: format.obsstore-version
761 defaultformat = ui.configint('format', 'obsstore-version')
731 defaultformat = ui.configint('format', 'obsstore-version')
762 # rely on obsstore class default when possible.
732 # rely on obsstore class default when possible.
763 kwargs = {}
733 kwargs = {}
764 if defaultformat is not None:
734 if defaultformat is not None:
765 kwargs[r'defaultformat'] = defaultformat
735 kwargs[r'defaultformat'] = defaultformat
766 readonly = not isenabled(repo, createmarkersopt)
736 readonly = not isenabled(repo, createmarkersopt)
767 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
737 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
768 if store and readonly:
738 if store and readonly:
769 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
739 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
770 % len(list(store)))
740 % len(list(store)))
771 return store
741 return store
772
742
773 def commonversion(versions):
743 def commonversion(versions):
774 """Return the newest version listed in both versions and our local formats.
744 """Return the newest version listed in both versions and our local formats.
775
745
776 Returns None if no common version exists.
746 Returns None if no common version exists.
777 """
747 """
778 versions.sort(reverse=True)
748 versions.sort(reverse=True)
779 # search for highest version known on both side
749 # search for highest version known on both side
780 for v in versions:
750 for v in versions:
781 if v in formats:
751 if v in formats:
782 return v
752 return v
783 return None
753 return None
784
754
785 # arbitrary picked to fit into 8K limit from HTTP server
755 # arbitrary picked to fit into 8K limit from HTTP server
786 # you have to take in account:
756 # you have to take in account:
787 # - the version header
757 # - the version header
788 # - the base85 encoding
758 # - the base85 encoding
789 _maxpayload = 5300
759 _maxpayload = 5300
790
760
791 def _pushkeyescape(markers):
761 def _pushkeyescape(markers):
792 """encode markers into a dict suitable for pushkey exchange
762 """encode markers into a dict suitable for pushkey exchange
793
763
794 - binary data is base85 encoded
764 - binary data is base85 encoded
795 - split in chunks smaller than 5300 bytes"""
765 - split in chunks smaller than 5300 bytes"""
796 keys = {}
766 keys = {}
797 parts = []
767 parts = []
798 currentlen = _maxpayload * 2 # ensure we create a new part
768 currentlen = _maxpayload * 2 # ensure we create a new part
799 for marker in markers:
769 for marker in markers:
800 nextdata = _fm0encodeonemarker(marker)
770 nextdata = _fm0encodeonemarker(marker)
801 if (len(nextdata) + currentlen > _maxpayload):
771 if (len(nextdata) + currentlen > _maxpayload):
802 currentpart = []
772 currentpart = []
803 currentlen = 0
773 currentlen = 0
804 parts.append(currentpart)
774 parts.append(currentpart)
805 currentpart.append(nextdata)
775 currentpart.append(nextdata)
806 currentlen += len(nextdata)
776 currentlen += len(nextdata)
807 for idx, part in enumerate(reversed(parts)):
777 for idx, part in enumerate(reversed(parts)):
808 data = ''.join([_pack('>B', _fm0version)] + part)
778 data = ''.join([_pack('>B', _fm0version)] + part)
809 keys['dump%i' % idx] = util.b85encode(data)
779 keys['dump%i' % idx] = util.b85encode(data)
810 return keys
780 return keys
811
781
812 def listmarkers(repo):
782 def listmarkers(repo):
813 """List markers over pushkey"""
783 """List markers over pushkey"""
814 if not repo.obsstore:
784 if not repo.obsstore:
815 return {}
785 return {}
816 return _pushkeyescape(sorted(repo.obsstore))
786 return _pushkeyescape(sorted(repo.obsstore))
817
787
818 def pushmarker(repo, key, old, new):
788 def pushmarker(repo, key, old, new):
819 """Push markers over pushkey"""
789 """Push markers over pushkey"""
820 if not key.startswith('dump'):
790 if not key.startswith('dump'):
821 repo.ui.warn(_('unknown key: %r') % key)
791 repo.ui.warn(_('unknown key: %r') % key)
822 return False
792 return False
823 if old:
793 if old:
824 repo.ui.warn(_('unexpected old value for %r') % key)
794 repo.ui.warn(_('unexpected old value for %r') % key)
825 return False
795 return False
826 data = util.b85decode(new)
796 data = util.b85decode(new)
827 with repo.lock(), repo.transaction('pushkey: obsolete markers') as tr:
797 with repo.lock(), repo.transaction('pushkey: obsolete markers') as tr:
828 repo.obsstore.mergemarkers(tr, data)
798 repo.obsstore.mergemarkers(tr, data)
829 repo.invalidatevolatilesets()
799 repo.invalidatevolatilesets()
830 return True
800 return True
831
801
832 # mapping of 'set-name' -> <function to compute this set>
802 # mapping of 'set-name' -> <function to compute this set>
833 cachefuncs = {}
803 cachefuncs = {}
834 def cachefor(name):
804 def cachefor(name):
835 """Decorator to register a function as computing the cache for a set"""
805 """Decorator to register a function as computing the cache for a set"""
836 def decorator(func):
806 def decorator(func):
837 if name in cachefuncs:
807 if name in cachefuncs:
838 msg = "duplicated registration for volatileset '%s' (existing: %r)"
808 msg = "duplicated registration for volatileset '%s' (existing: %r)"
839 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
809 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
840 cachefuncs[name] = func
810 cachefuncs[name] = func
841 return func
811 return func
842 return decorator
812 return decorator
843
813
844 def getrevs(repo, name):
814 def getrevs(repo, name):
845 """Return the set of revision that belong to the <name> set
815 """Return the set of revision that belong to the <name> set
846
816
847 Such access may compute the set and cache it for future use"""
817 Such access may compute the set and cache it for future use"""
848 repo = repo.unfiltered()
818 repo = repo.unfiltered()
849 if not repo.obsstore:
819 if not repo.obsstore:
850 return frozenset()
820 return frozenset()
851 if name not in repo.obsstore.caches:
821 if name not in repo.obsstore.caches:
852 repo.obsstore.caches[name] = cachefuncs[name](repo)
822 repo.obsstore.caches[name] = cachefuncs[name](repo)
853 return repo.obsstore.caches[name]
823 return repo.obsstore.caches[name]
854
824
855 # To be simple we need to invalidate obsolescence cache when:
825 # To be simple we need to invalidate obsolescence cache when:
856 #
826 #
857 # - new changeset is added:
827 # - new changeset is added:
858 # - public phase is changed
828 # - public phase is changed
859 # - obsolescence marker are added
829 # - obsolescence marker are added
860 # - strip is used a repo
830 # - strip is used a repo
861 def clearobscaches(repo):
831 def clearobscaches(repo):
862 """Remove all obsolescence related cache from a repo
832 """Remove all obsolescence related cache from a repo
863
833
864 This remove all cache in obsstore is the obsstore already exist on the
834 This remove all cache in obsstore is the obsstore already exist on the
865 repo.
835 repo.
866
836
867 (We could be smarter here given the exact event that trigger the cache
837 (We could be smarter here given the exact event that trigger the cache
868 clearing)"""
838 clearing)"""
869 # only clear cache is there is obsstore data in this repo
839 # only clear cache is there is obsstore data in this repo
870 if 'obsstore' in repo._filecache:
840 if 'obsstore' in repo._filecache:
871 repo.obsstore.caches.clear()
841 repo.obsstore.caches.clear()
872
842
873 def _mutablerevs(repo):
843 def _mutablerevs(repo):
874 """the set of mutable revision in the repository"""
844 """the set of mutable revision in the repository"""
875 return repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
845 return repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
876
846
877 @cachefor('obsolete')
847 @cachefor('obsolete')
878 def _computeobsoleteset(repo):
848 def _computeobsoleteset(repo):
879 """the set of obsolete revisions"""
849 """the set of obsolete revisions"""
880 getnode = repo.changelog.node
850 getnode = repo.changelog.node
881 notpublic = _mutablerevs(repo)
851 notpublic = _mutablerevs(repo)
882 isobs = repo.obsstore.successors.__contains__
852 isobs = repo.obsstore.successors.__contains__
883 obs = set(r for r in notpublic if isobs(getnode(r)))
853 obs = set(r for r in notpublic if isobs(getnode(r)))
884 return obs
854 return obs
885
855
886 @cachefor('orphan')
856 @cachefor('orphan')
887 def _computeorphanset(repo):
857 def _computeorphanset(repo):
888 """the set of non obsolete revisions with obsolete parents"""
858 """the set of non obsolete revisions with obsolete parents"""
889 pfunc = repo.changelog.parentrevs
859 pfunc = repo.changelog.parentrevs
890 mutable = _mutablerevs(repo)
860 mutable = _mutablerevs(repo)
891 obsolete = getrevs(repo, 'obsolete')
861 obsolete = getrevs(repo, 'obsolete')
892 others = mutable - obsolete
862 others = mutable - obsolete
893 unstable = set()
863 unstable = set()
894 for r in sorted(others):
864 for r in sorted(others):
895 # A rev is unstable if one of its parent is obsolete or unstable
865 # A rev is unstable if one of its parent is obsolete or unstable
896 # this works since we traverse following growing rev order
866 # this works since we traverse following growing rev order
897 for p in pfunc(r):
867 for p in pfunc(r):
898 if p in obsolete or p in unstable:
868 if p in obsolete or p in unstable:
899 unstable.add(r)
869 unstable.add(r)
900 break
870 break
901 return unstable
871 return unstable
902
872
903 @cachefor('suspended')
873 @cachefor('suspended')
904 def _computesuspendedset(repo):
874 def _computesuspendedset(repo):
905 """the set of obsolete parents with non obsolete descendants"""
875 """the set of obsolete parents with non obsolete descendants"""
906 suspended = repo.changelog.ancestors(getrevs(repo, 'orphan'))
876 suspended = repo.changelog.ancestors(getrevs(repo, 'orphan'))
907 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
877 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
908
878
909 @cachefor('extinct')
879 @cachefor('extinct')
910 def _computeextinctset(repo):
880 def _computeextinctset(repo):
911 """the set of obsolete parents without non obsolete descendants"""
881 """the set of obsolete parents without non obsolete descendants"""
912 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
882 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
913
883
914 @cachefor('phasedivergent')
884 @cachefor('phasedivergent')
915 def _computephasedivergentset(repo):
885 def _computephasedivergentset(repo):
916 """the set of revs trying to obsolete public revisions"""
886 """the set of revs trying to obsolete public revisions"""
917 bumped = set()
887 bumped = set()
918 # util function (avoid attribute lookup in the loop)
888 # util function (avoid attribute lookup in the loop)
919 phase = repo._phasecache.phase # would be faster to grab the full list
889 phase = repo._phasecache.phase # would be faster to grab the full list
920 public = phases.public
890 public = phases.public
921 cl = repo.changelog
891 cl = repo.changelog
922 torev = cl.nodemap.get
892 torev = cl.nodemap.get
923 tonode = cl.node
893 tonode = cl.node
924 for rev in repo.revs('(not public()) and (not obsolete())'):
894 for rev in repo.revs('(not public()) and (not obsolete())'):
925 # We only evaluate mutable, non-obsolete revision
895 # We only evaluate mutable, non-obsolete revision
926 node = tonode(rev)
896 node = tonode(rev)
927 # (future) A cache of predecessors may worth if split is very common
897 # (future) A cache of predecessors may worth if split is very common
928 for pnode in obsutil.allpredecessors(repo.obsstore, [node],
898 for pnode in obsutil.allpredecessors(repo.obsstore, [node],
929 ignoreflags=bumpedfix):
899 ignoreflags=bumpedfix):
930 prev = torev(pnode) # unfiltered! but so is phasecache
900 prev = torev(pnode) # unfiltered! but so is phasecache
931 if (prev is not None) and (phase(repo, prev) <= public):
901 if (prev is not None) and (phase(repo, prev) <= public):
932 # we have a public predecessor
902 # we have a public predecessor
933 bumped.add(rev)
903 bumped.add(rev)
934 break # Next draft!
904 break # Next draft!
935 return bumped
905 return bumped
936
906
937 @cachefor('contentdivergent')
907 @cachefor('contentdivergent')
938 def _computecontentdivergentset(repo):
908 def _computecontentdivergentset(repo):
939 """the set of rev that compete to be the final successors of some revision.
909 """the set of rev that compete to be the final successors of some revision.
940 """
910 """
941 divergent = set()
911 divergent = set()
942 obsstore = repo.obsstore
912 obsstore = repo.obsstore
943 newermap = {}
913 newermap = {}
944 tonode = repo.changelog.node
914 tonode = repo.changelog.node
945 for rev in repo.revs('(not public()) - obsolete()'):
915 for rev in repo.revs('(not public()) - obsolete()'):
946 node = tonode(rev)
916 node = tonode(rev)
947 mark = obsstore.predecessors.get(node, ())
917 mark = obsstore.predecessors.get(node, ())
948 toprocess = set(mark)
918 toprocess = set(mark)
949 seen = set()
919 seen = set()
950 while toprocess:
920 while toprocess:
951 prec = toprocess.pop()[0]
921 prec = toprocess.pop()[0]
952 if prec in seen:
922 if prec in seen:
953 continue # emergency cycle hanging prevention
923 continue # emergency cycle hanging prevention
954 seen.add(prec)
924 seen.add(prec)
955 if prec not in newermap:
925 if prec not in newermap:
956 obsutil.successorssets(repo, prec, cache=newermap)
926 obsutil.successorssets(repo, prec, cache=newermap)
957 newer = [n for n in newermap[prec] if n]
927 newer = [n for n in newermap[prec] if n]
958 if len(newer) > 1:
928 if len(newer) > 1:
959 divergent.add(rev)
929 divergent.add(rev)
960 break
930 break
961 toprocess.update(obsstore.predecessors.get(prec, ()))
931 toprocess.update(obsstore.predecessors.get(prec, ()))
962 return divergent
932 return divergent
963
933
964
934
965 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
935 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
966 operation=None):
936 operation=None):
967 """Add obsolete markers between changesets in a repo
937 """Add obsolete markers between changesets in a repo
968
938
969 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
939 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
970 tuple. `old` and `news` are changectx. metadata is an optional dictionary
940 tuple. `old` and `news` are changectx. metadata is an optional dictionary
971 containing metadata for this marker only. It is merged with the global
941 containing metadata for this marker only. It is merged with the global
972 metadata specified through the `metadata` argument of this function,
942 metadata specified through the `metadata` argument of this function,
973
943
974 Trying to obsolete a public changeset will raise an exception.
944 Trying to obsolete a public changeset will raise an exception.
975
945
976 Current user and date are used except if specified otherwise in the
946 Current user and date are used except if specified otherwise in the
977 metadata attribute.
947 metadata attribute.
978
948
979 This function operates within a transaction of its own, but does
949 This function operates within a transaction of its own, but does
980 not take any lock on the repo.
950 not take any lock on the repo.
981 """
951 """
982 # prepare metadata
952 # prepare metadata
983 if metadata is None:
953 if metadata is None:
984 metadata = {}
954 metadata = {}
985 if 'user' not in metadata:
955 if 'user' not in metadata:
986 develuser = repo.ui.config('devel', 'user.obsmarker')
956 develuser = repo.ui.config('devel', 'user.obsmarker')
987 if develuser:
957 if develuser:
988 metadata['user'] = develuser
958 metadata['user'] = develuser
989 else:
959 else:
990 metadata['user'] = repo.ui.username()
960 metadata['user'] = repo.ui.username()
991
961
992 # Operation metadata handling
962 # Operation metadata handling
993 useoperation = repo.ui.configbool('experimental',
963 useoperation = repo.ui.configbool('experimental',
994 'evolution.track-operation')
964 'evolution.track-operation')
995 if useoperation and operation:
965 if useoperation and operation:
996 metadata['operation'] = operation
966 metadata['operation'] = operation
997
967
998 # Effect flag metadata handling
968 # Effect flag metadata handling
999 saveeffectflag = repo.ui.configbool('experimental',
969 saveeffectflag = repo.ui.configbool('experimental',
1000 'evolution.effect-flags')
970 'evolution.effect-flags')
1001
971
1002 with repo.transaction('add-obsolescence-marker') as tr:
972 with repo.transaction('add-obsolescence-marker') as tr:
1003 markerargs = []
973 markerargs = []
1004 for rel in relations:
974 for rel in relations:
1005 prec = rel[0]
975 prec = rel[0]
1006 sucs = rel[1]
976 sucs = rel[1]
1007 localmetadata = metadata.copy()
977 localmetadata = metadata.copy()
1008 if 2 < len(rel):
978 if 2 < len(rel):
1009 localmetadata.update(rel[2])
979 localmetadata.update(rel[2])
1010
980
1011 if not prec.mutable():
981 if not prec.mutable():
1012 raise error.Abort(_("cannot obsolete public changeset: %s")
982 raise error.Abort(_("cannot obsolete public changeset: %s")
1013 % prec,
983 % prec,
1014 hint="see 'hg help phases' for details")
984 hint="see 'hg help phases' for details")
1015 nprec = prec.node()
985 nprec = prec.node()
1016 nsucs = tuple(s.node() for s in sucs)
986 nsucs = tuple(s.node() for s in sucs)
1017 npare = None
987 npare = None
1018 if not nsucs:
988 if not nsucs:
1019 npare = tuple(p.node() for p in prec.parents())
989 npare = tuple(p.node() for p in prec.parents())
1020 if nprec in nsucs:
990 if nprec in nsucs:
1021 raise error.Abort(_("changeset %s cannot obsolete itself")
991 raise error.Abort(_("changeset %s cannot obsolete itself")
1022 % prec)
992 % prec)
1023
993
1024 # Effect flag can be different by relation
994 # Effect flag can be different by relation
1025 if saveeffectflag:
995 if saveeffectflag:
1026 # The effect flag is saved in a versioned field name for future
996 # The effect flag is saved in a versioned field name for future
1027 # evolution
997 # evolution
1028 effectflag = obsutil.geteffectflag(rel)
998 effectflag = obsutil.geteffectflag(rel)
1029 localmetadata[obsutil.EFFECTFLAGFIELD] = "%d" % effectflag
999 localmetadata[obsutil.EFFECTFLAGFIELD] = "%d" % effectflag
1030
1000
1031 # Creating the marker causes the hidden cache to become invalid,
1001 # Creating the marker causes the hidden cache to become invalid,
1032 # which causes recomputation when we ask for prec.parents() above.
1002 # which causes recomputation when we ask for prec.parents() above.
1033 # Resulting in n^2 behavior. So let's prepare all of the args
1003 # Resulting in n^2 behavior. So let's prepare all of the args
1034 # first, then create the markers.
1004 # first, then create the markers.
1035 markerargs.append((nprec, nsucs, npare, localmetadata))
1005 markerargs.append((nprec, nsucs, npare, localmetadata))
1036
1006
1037 for args in markerargs:
1007 for args in markerargs:
1038 nprec, nsucs, npare, localmetadata = args
1008 nprec, nsucs, npare, localmetadata = args
1039 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1009 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1040 date=date, metadata=localmetadata,
1010 date=date, metadata=localmetadata,
1041 ui=repo.ui)
1011 ui=repo.ui)
1042 repo.filteredrevcache.clear()
1012 repo.filteredrevcache.clear()
@@ -1,892 +1,925
1 # obsutil.py - utility functions for obsolescence
1 # obsutil.py - utility functions for obsolescence
2 #
2 #
3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import re
10 import re
11
11
12 from .i18n import _
12 from .i18n import _
13 from . import (
13 from . import (
14 node as nodemod,
14 node as nodemod,
15 phases,
15 phases,
16 util,
16 util,
17 )
17 )
18 from .utils import dateutil
18 from .utils import dateutil
19
19
20 ### obsolescence marker flag
21
22 ## bumpedfix flag
23 #
24 # When a changeset A' succeed to a changeset A which became public, we call A'
25 # "bumped" because it's a successors of a public changesets
26 #
27 # o A' (bumped)
28 # |`:
29 # | o A
30 # |/
31 # o Z
32 #
33 # The way to solve this situation is to create a new changeset Ad as children
34 # of A. This changeset have the same content than A'. So the diff from A to A'
35 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
36 #
37 # o Ad
38 # |`:
39 # | x A'
40 # |'|
41 # o | A
42 # |/
43 # o Z
44 #
45 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
46 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
47 # This flag mean that the successors express the changes between the public and
48 # bumped version and fix the situation, breaking the transitivity of
49 # "bumped" here.
50 bumpedfix = 1
51 usingsha256 = 2
52
20 class marker(object):
53 class marker(object):
21 """Wrap obsolete marker raw data"""
54 """Wrap obsolete marker raw data"""
22
55
23 def __init__(self, repo, data):
56 def __init__(self, repo, data):
24 # the repo argument will be used to create changectx in later version
57 # the repo argument will be used to create changectx in later version
25 self._repo = repo
58 self._repo = repo
26 self._data = data
59 self._data = data
27 self._decodedmeta = None
60 self._decodedmeta = None
28
61
29 def __hash__(self):
62 def __hash__(self):
30 return hash(self._data)
63 return hash(self._data)
31
64
32 def __eq__(self, other):
65 def __eq__(self, other):
33 if type(other) != type(self):
66 if type(other) != type(self):
34 return False
67 return False
35 return self._data == other._data
68 return self._data == other._data
36
69
37 def prednode(self):
70 def prednode(self):
38 """Predecessor changeset node identifier"""
71 """Predecessor changeset node identifier"""
39 return self._data[0]
72 return self._data[0]
40
73
41 def succnodes(self):
74 def succnodes(self):
42 """List of successor changesets node identifiers"""
75 """List of successor changesets node identifiers"""
43 return self._data[1]
76 return self._data[1]
44
77
45 def parentnodes(self):
78 def parentnodes(self):
46 """Parents of the predecessors (None if not recorded)"""
79 """Parents of the predecessors (None if not recorded)"""
47 return self._data[5]
80 return self._data[5]
48
81
49 def metadata(self):
82 def metadata(self):
50 """Decoded metadata dictionary"""
83 """Decoded metadata dictionary"""
51 return dict(self._data[3])
84 return dict(self._data[3])
52
85
53 def date(self):
86 def date(self):
54 """Creation date as (unixtime, offset)"""
87 """Creation date as (unixtime, offset)"""
55 return self._data[4]
88 return self._data[4]
56
89
57 def flags(self):
90 def flags(self):
58 """The flags field of the marker"""
91 """The flags field of the marker"""
59 return self._data[2]
92 return self._data[2]
60
93
61 def getmarkers(repo, nodes=None, exclusive=False):
94 def getmarkers(repo, nodes=None, exclusive=False):
62 """returns markers known in a repository
95 """returns markers known in a repository
63
96
64 If <nodes> is specified, only markers "relevant" to those nodes are are
97 If <nodes> is specified, only markers "relevant" to those nodes are are
65 returned"""
98 returned"""
66 if nodes is None:
99 if nodes is None:
67 rawmarkers = repo.obsstore
100 rawmarkers = repo.obsstore
68 elif exclusive:
101 elif exclusive:
69 rawmarkers = exclusivemarkers(repo, nodes)
102 rawmarkers = exclusivemarkers(repo, nodes)
70 else:
103 else:
71 rawmarkers = repo.obsstore.relevantmarkers(nodes)
104 rawmarkers = repo.obsstore.relevantmarkers(nodes)
72
105
73 for markerdata in rawmarkers:
106 for markerdata in rawmarkers:
74 yield marker(repo, markerdata)
107 yield marker(repo, markerdata)
75
108
76 def closestpredecessors(repo, nodeid):
109 def closestpredecessors(repo, nodeid):
77 """yield the list of next predecessors pointing on visible changectx nodes
110 """yield the list of next predecessors pointing on visible changectx nodes
78
111
79 This function respect the repoview filtering, filtered revision will be
112 This function respect the repoview filtering, filtered revision will be
80 considered missing.
113 considered missing.
81 """
114 """
82
115
83 precursors = repo.obsstore.predecessors
116 precursors = repo.obsstore.predecessors
84 stack = [nodeid]
117 stack = [nodeid]
85 seen = set(stack)
118 seen = set(stack)
86
119
87 while stack:
120 while stack:
88 current = stack.pop()
121 current = stack.pop()
89 currentpreccs = precursors.get(current, ())
122 currentpreccs = precursors.get(current, ())
90
123
91 for prec in currentpreccs:
124 for prec in currentpreccs:
92 precnodeid = prec[0]
125 precnodeid = prec[0]
93
126
94 # Basic cycle protection
127 # Basic cycle protection
95 if precnodeid in seen:
128 if precnodeid in seen:
96 continue
129 continue
97 seen.add(precnodeid)
130 seen.add(precnodeid)
98
131
99 if precnodeid in repo:
132 if precnodeid in repo:
100 yield precnodeid
133 yield precnodeid
101 else:
134 else:
102 stack.append(precnodeid)
135 stack.append(precnodeid)
103
136
104 def allpredecessors(obsstore, nodes, ignoreflags=0):
137 def allpredecessors(obsstore, nodes, ignoreflags=0):
105 """Yield node for every precursors of <nodes>.
138 """Yield node for every precursors of <nodes>.
106
139
107 Some precursors may be unknown locally.
140 Some precursors may be unknown locally.
108
141
109 This is a linear yield unsuited to detecting folded changesets. It includes
142 This is a linear yield unsuited to detecting folded changesets. It includes
110 initial nodes too."""
143 initial nodes too."""
111
144
112 remaining = set(nodes)
145 remaining = set(nodes)
113 seen = set(remaining)
146 seen = set(remaining)
114 while remaining:
147 while remaining:
115 current = remaining.pop()
148 current = remaining.pop()
116 yield current
149 yield current
117 for mark in obsstore.predecessors.get(current, ()):
150 for mark in obsstore.predecessors.get(current, ()):
118 # ignore marker flagged with specified flag
151 # ignore marker flagged with specified flag
119 if mark[2] & ignoreflags:
152 if mark[2] & ignoreflags:
120 continue
153 continue
121 suc = mark[0]
154 suc = mark[0]
122 if suc not in seen:
155 if suc not in seen:
123 seen.add(suc)
156 seen.add(suc)
124 remaining.add(suc)
157 remaining.add(suc)
125
158
126 def allsuccessors(obsstore, nodes, ignoreflags=0):
159 def allsuccessors(obsstore, nodes, ignoreflags=0):
127 """Yield node for every successor of <nodes>.
160 """Yield node for every successor of <nodes>.
128
161
129 Some successors may be unknown locally.
162 Some successors may be unknown locally.
130
163
131 This is a linear yield unsuited to detecting split changesets. It includes
164 This is a linear yield unsuited to detecting split changesets. It includes
132 initial nodes too."""
165 initial nodes too."""
133 remaining = set(nodes)
166 remaining = set(nodes)
134 seen = set(remaining)
167 seen = set(remaining)
135 while remaining:
168 while remaining:
136 current = remaining.pop()
169 current = remaining.pop()
137 yield current
170 yield current
138 for mark in obsstore.successors.get(current, ()):
171 for mark in obsstore.successors.get(current, ()):
139 # ignore marker flagged with specified flag
172 # ignore marker flagged with specified flag
140 if mark[2] & ignoreflags:
173 if mark[2] & ignoreflags:
141 continue
174 continue
142 for suc in mark[1]:
175 for suc in mark[1]:
143 if suc not in seen:
176 if suc not in seen:
144 seen.add(suc)
177 seen.add(suc)
145 remaining.add(suc)
178 remaining.add(suc)
146
179
147 def _filterprunes(markers):
180 def _filterprunes(markers):
148 """return a set with no prune markers"""
181 """return a set with no prune markers"""
149 return set(m for m in markers if m[1])
182 return set(m for m in markers if m[1])
150
183
151 def exclusivemarkers(repo, nodes):
184 def exclusivemarkers(repo, nodes):
152 """set of markers relevant to "nodes" but no other locally-known nodes
185 """set of markers relevant to "nodes" but no other locally-known nodes
153
186
154 This function compute the set of markers "exclusive" to a locally-known
187 This function compute the set of markers "exclusive" to a locally-known
155 node. This means we walk the markers starting from <nodes> until we reach a
188 node. This means we walk the markers starting from <nodes> until we reach a
156 locally-known precursors outside of <nodes>. Element of <nodes> with
189 locally-known precursors outside of <nodes>. Element of <nodes> with
157 locally-known successors outside of <nodes> are ignored (since their
190 locally-known successors outside of <nodes> are ignored (since their
158 precursors markers are also relevant to these successors).
191 precursors markers are also relevant to these successors).
159
192
160 For example:
193 For example:
161
194
162 # (A0 rewritten as A1)
195 # (A0 rewritten as A1)
163 #
196 #
164 # A0 <-1- A1 # Marker "1" is exclusive to A1
197 # A0 <-1- A1 # Marker "1" is exclusive to A1
165
198
166 or
199 or
167
200
168 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
201 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
169 #
202 #
170 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
203 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
171
204
172 or
205 or
173
206
174 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
207 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
175 #
208 #
176 # <-2- A1 # Marker "2" is exclusive to A0,A1
209 # <-2- A1 # Marker "2" is exclusive to A0,A1
177 # /
210 # /
178 # <-1- A0
211 # <-1- A0
179 # \
212 # \
180 # <-3- A2 # Marker "3" is exclusive to A0,A2
213 # <-3- A2 # Marker "3" is exclusive to A0,A2
181 #
214 #
182 # in addition:
215 # in addition:
183 #
216 #
184 # Markers "2,3" are exclusive to A1,A2
217 # Markers "2,3" are exclusive to A1,A2
185 # Markers "1,2,3" are exclusive to A0,A1,A2
218 # Markers "1,2,3" are exclusive to A0,A1,A2
186
219
187 See test/test-obsolete-bundle-strip.t for more examples.
220 See test/test-obsolete-bundle-strip.t for more examples.
188
221
189 An example usage is strip. When stripping a changeset, we also want to
222 An example usage is strip. When stripping a changeset, we also want to
190 strip the markers exclusive to this changeset. Otherwise we would have
223 strip the markers exclusive to this changeset. Otherwise we would have
191 "dangling"" obsolescence markers from its precursors: Obsolescence markers
224 "dangling"" obsolescence markers from its precursors: Obsolescence markers
192 marking a node as obsolete without any successors available locally.
225 marking a node as obsolete without any successors available locally.
193
226
194 As for relevant markers, the prune markers for children will be followed.
227 As for relevant markers, the prune markers for children will be followed.
195 Of course, they will only be followed if the pruned children is
228 Of course, they will only be followed if the pruned children is
196 locally-known. Since the prune markers are relevant to the pruned node.
229 locally-known. Since the prune markers are relevant to the pruned node.
197 However, while prune markers are considered relevant to the parent of the
230 However, while prune markers are considered relevant to the parent of the
198 pruned changesets, prune markers for locally-known changeset (with no
231 pruned changesets, prune markers for locally-known changeset (with no
199 successors) are considered exclusive to the pruned nodes. This allows
232 successors) are considered exclusive to the pruned nodes. This allows
200 to strip the prune markers (with the rest of the exclusive chain) alongside
233 to strip the prune markers (with the rest of the exclusive chain) alongside
201 the pruned changesets.
234 the pruned changesets.
202 """
235 """
203 # running on a filtered repository would be dangerous as markers could be
236 # running on a filtered repository would be dangerous as markers could be
204 # reported as exclusive when they are relevant for other filtered nodes.
237 # reported as exclusive when they are relevant for other filtered nodes.
205 unfi = repo.unfiltered()
238 unfi = repo.unfiltered()
206
239
207 # shortcut to various useful item
240 # shortcut to various useful item
208 nm = unfi.changelog.nodemap
241 nm = unfi.changelog.nodemap
209 precursorsmarkers = unfi.obsstore.predecessors
242 precursorsmarkers = unfi.obsstore.predecessors
210 successormarkers = unfi.obsstore.successors
243 successormarkers = unfi.obsstore.successors
211 childrenmarkers = unfi.obsstore.children
244 childrenmarkers = unfi.obsstore.children
212
245
213 # exclusive markers (return of the function)
246 # exclusive markers (return of the function)
214 exclmarkers = set()
247 exclmarkers = set()
215 # we need fast membership testing
248 # we need fast membership testing
216 nodes = set(nodes)
249 nodes = set(nodes)
217 # looking for head in the obshistory
250 # looking for head in the obshistory
218 #
251 #
219 # XXX we are ignoring all issues in regard with cycle for now.
252 # XXX we are ignoring all issues in regard with cycle for now.
220 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
253 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
221 stack.sort()
254 stack.sort()
222 # nodes already stacked
255 # nodes already stacked
223 seennodes = set(stack)
256 seennodes = set(stack)
224 while stack:
257 while stack:
225 current = stack.pop()
258 current = stack.pop()
226 # fetch precursors markers
259 # fetch precursors markers
227 markers = list(precursorsmarkers.get(current, ()))
260 markers = list(precursorsmarkers.get(current, ()))
228 # extend the list with prune markers
261 # extend the list with prune markers
229 for mark in successormarkers.get(current, ()):
262 for mark in successormarkers.get(current, ()):
230 if not mark[1]:
263 if not mark[1]:
231 markers.append(mark)
264 markers.append(mark)
232 # and markers from children (looking for prune)
265 # and markers from children (looking for prune)
233 for mark in childrenmarkers.get(current, ()):
266 for mark in childrenmarkers.get(current, ()):
234 if not mark[1]:
267 if not mark[1]:
235 markers.append(mark)
268 markers.append(mark)
236 # traverse the markers
269 # traverse the markers
237 for mark in markers:
270 for mark in markers:
238 if mark in exclmarkers:
271 if mark in exclmarkers:
239 # markers already selected
272 # markers already selected
240 continue
273 continue
241
274
242 # If the markers is about the current node, select it
275 # If the markers is about the current node, select it
243 #
276 #
244 # (this delay the addition of markers from children)
277 # (this delay the addition of markers from children)
245 if mark[1] or mark[0] == current:
278 if mark[1] or mark[0] == current:
246 exclmarkers.add(mark)
279 exclmarkers.add(mark)
247
280
248 # should we keep traversing through the precursors?
281 # should we keep traversing through the precursors?
249 prec = mark[0]
282 prec = mark[0]
250
283
251 # nodes in the stack or already processed
284 # nodes in the stack or already processed
252 if prec in seennodes:
285 if prec in seennodes:
253 continue
286 continue
254
287
255 # is this a locally known node ?
288 # is this a locally known node ?
256 known = prec in nm
289 known = prec in nm
257 # if locally-known and not in the <nodes> set the traversal
290 # if locally-known and not in the <nodes> set the traversal
258 # stop here.
291 # stop here.
259 if known and prec not in nodes:
292 if known and prec not in nodes:
260 continue
293 continue
261
294
262 # do not keep going if there are unselected markers pointing to this
295 # do not keep going if there are unselected markers pointing to this
263 # nodes. If we end up traversing these unselected markers later the
296 # nodes. If we end up traversing these unselected markers later the
264 # node will be taken care of at that point.
297 # node will be taken care of at that point.
265 precmarkers = _filterprunes(successormarkers.get(prec))
298 precmarkers = _filterprunes(successormarkers.get(prec))
266 if precmarkers.issubset(exclmarkers):
299 if precmarkers.issubset(exclmarkers):
267 seennodes.add(prec)
300 seennodes.add(prec)
268 stack.append(prec)
301 stack.append(prec)
269
302
270 return exclmarkers
303 return exclmarkers
271
304
272 def foreground(repo, nodes):
305 def foreground(repo, nodes):
273 """return all nodes in the "foreground" of other node
306 """return all nodes in the "foreground" of other node
274
307
275 The foreground of a revision is anything reachable using parent -> children
308 The foreground of a revision is anything reachable using parent -> children
276 or precursor -> successor relation. It is very similar to "descendant" but
309 or precursor -> successor relation. It is very similar to "descendant" but
277 augmented with obsolescence information.
310 augmented with obsolescence information.
278
311
279 Beware that possible obsolescence cycle may result if complex situation.
312 Beware that possible obsolescence cycle may result if complex situation.
280 """
313 """
281 repo = repo.unfiltered()
314 repo = repo.unfiltered()
282 foreground = set(repo.set('%ln::', nodes))
315 foreground = set(repo.set('%ln::', nodes))
283 if repo.obsstore:
316 if repo.obsstore:
284 # We only need this complicated logic if there is obsolescence
317 # We only need this complicated logic if there is obsolescence
285 # XXX will probably deserve an optimised revset.
318 # XXX will probably deserve an optimised revset.
286 nm = repo.changelog.nodemap
319 nm = repo.changelog.nodemap
287 plen = -1
320 plen = -1
288 # compute the whole set of successors or descendants
321 # compute the whole set of successors or descendants
289 while len(foreground) != plen:
322 while len(foreground) != plen:
290 plen = len(foreground)
323 plen = len(foreground)
291 succs = set(c.node() for c in foreground)
324 succs = set(c.node() for c in foreground)
292 mutable = [c.node() for c in foreground if c.mutable()]
325 mutable = [c.node() for c in foreground if c.mutable()]
293 succs.update(allsuccessors(repo.obsstore, mutable))
326 succs.update(allsuccessors(repo.obsstore, mutable))
294 known = (n for n in succs if n in nm)
327 known = (n for n in succs if n in nm)
295 foreground = set(repo.set('%ln::', known))
328 foreground = set(repo.set('%ln::', known))
296 return set(c.node() for c in foreground)
329 return set(c.node() for c in foreground)
297
330
298 # effectflag field
331 # effectflag field
299 #
332 #
300 # Effect-flag is a 1-byte bit field used to store what changed between a
333 # Effect-flag is a 1-byte bit field used to store what changed between a
301 # changeset and its successor(s).
334 # changeset and its successor(s).
302 #
335 #
303 # The effect flag is stored in obs-markers metadata while we iterate on the
336 # The effect flag is stored in obs-markers metadata while we iterate on the
304 # information design. That's why we have the EFFECTFLAGFIELD. If we come up
337 # information design. That's why we have the EFFECTFLAGFIELD. If we come up
305 # with an incompatible design for effect flag, we can store a new design under
338 # with an incompatible design for effect flag, we can store a new design under
306 # another field name so we don't break readers. We plan to extend the existing
339 # another field name so we don't break readers. We plan to extend the existing
307 # obsmarkers bit-field when the effect flag design will be stabilized.
340 # obsmarkers bit-field when the effect flag design will be stabilized.
308 #
341 #
309 # The effect-flag is placed behind an experimental flag
342 # The effect-flag is placed behind an experimental flag
310 # `effect-flags` set to off by default.
343 # `effect-flags` set to off by default.
311 #
344 #
312
345
313 EFFECTFLAGFIELD = "ef1"
346 EFFECTFLAGFIELD = "ef1"
314
347
315 DESCCHANGED = 1 << 0 # action changed the description
348 DESCCHANGED = 1 << 0 # action changed the description
316 METACHANGED = 1 << 1 # action change the meta
349 METACHANGED = 1 << 1 # action change the meta
317 DIFFCHANGED = 1 << 3 # action change diff introduced by the changeset
350 DIFFCHANGED = 1 << 3 # action change diff introduced by the changeset
318 PARENTCHANGED = 1 << 2 # action change the parent
351 PARENTCHANGED = 1 << 2 # action change the parent
319 USERCHANGED = 1 << 4 # the user changed
352 USERCHANGED = 1 << 4 # the user changed
320 DATECHANGED = 1 << 5 # the date changed
353 DATECHANGED = 1 << 5 # the date changed
321 BRANCHCHANGED = 1 << 6 # the branch changed
354 BRANCHCHANGED = 1 << 6 # the branch changed
322
355
323 METABLACKLIST = [
356 METABLACKLIST = [
324 re.compile('^branch$'),
357 re.compile('^branch$'),
325 re.compile('^.*-source$'),
358 re.compile('^.*-source$'),
326 re.compile('^.*_source$'),
359 re.compile('^.*_source$'),
327 re.compile('^source$'),
360 re.compile('^source$'),
328 ]
361 ]
329
362
330 def metanotblacklisted(metaitem):
363 def metanotblacklisted(metaitem):
331 """ Check that the key of a meta item (extrakey, extravalue) does not
364 """ Check that the key of a meta item (extrakey, extravalue) does not
332 match at least one of the blacklist pattern
365 match at least one of the blacklist pattern
333 """
366 """
334 metakey = metaitem[0]
367 metakey = metaitem[0]
335
368
336 return not any(pattern.match(metakey) for pattern in METABLACKLIST)
369 return not any(pattern.match(metakey) for pattern in METABLACKLIST)
337
370
338 def _prepare_hunk(hunk):
371 def _prepare_hunk(hunk):
339 """Drop all information but the username and patch"""
372 """Drop all information but the username and patch"""
340 cleanhunk = []
373 cleanhunk = []
341 for line in hunk.splitlines():
374 for line in hunk.splitlines():
342 if line.startswith(b'# User') or not line.startswith(b'#'):
375 if line.startswith(b'# User') or not line.startswith(b'#'):
343 if line.startswith(b'@@'):
376 if line.startswith(b'@@'):
344 line = b'@@\n'
377 line = b'@@\n'
345 cleanhunk.append(line)
378 cleanhunk.append(line)
346 return cleanhunk
379 return cleanhunk
347
380
348 def _getdifflines(iterdiff):
381 def _getdifflines(iterdiff):
349 """return a cleaned up lines"""
382 """return a cleaned up lines"""
350 lines = next(iterdiff, None)
383 lines = next(iterdiff, None)
351
384
352 if lines is None:
385 if lines is None:
353 return lines
386 return lines
354
387
355 return _prepare_hunk(lines)
388 return _prepare_hunk(lines)
356
389
357 def _cmpdiff(leftctx, rightctx):
390 def _cmpdiff(leftctx, rightctx):
358 """return True if both ctx introduce the "same diff"
391 """return True if both ctx introduce the "same diff"
359
392
360 This is a first and basic implementation, with many shortcoming.
393 This is a first and basic implementation, with many shortcoming.
361 """
394 """
362
395
363 # Leftctx or right ctx might be filtered, so we need to use the contexts
396 # Leftctx or right ctx might be filtered, so we need to use the contexts
364 # with an unfiltered repository to safely compute the diff
397 # with an unfiltered repository to safely compute the diff
365 leftunfi = leftctx._repo.unfiltered()[leftctx.rev()]
398 leftunfi = leftctx._repo.unfiltered()[leftctx.rev()]
366 leftdiff = leftunfi.diff(git=1)
399 leftdiff = leftunfi.diff(git=1)
367 rightunfi = rightctx._repo.unfiltered()[rightctx.rev()]
400 rightunfi = rightctx._repo.unfiltered()[rightctx.rev()]
368 rightdiff = rightunfi.diff(git=1)
401 rightdiff = rightunfi.diff(git=1)
369
402
370 left, right = (0, 0)
403 left, right = (0, 0)
371 while None not in (left, right):
404 while None not in (left, right):
372 left = _getdifflines(leftdiff)
405 left = _getdifflines(leftdiff)
373 right = _getdifflines(rightdiff)
406 right = _getdifflines(rightdiff)
374
407
375 if left != right:
408 if left != right:
376 return False
409 return False
377 return True
410 return True
378
411
379 def geteffectflag(relation):
412 def geteffectflag(relation):
380 """ From an obs-marker relation, compute what changed between the
413 """ From an obs-marker relation, compute what changed between the
381 predecessor and the successor.
414 predecessor and the successor.
382 """
415 """
383 effects = 0
416 effects = 0
384
417
385 source = relation[0]
418 source = relation[0]
386
419
387 for changectx in relation[1]:
420 for changectx in relation[1]:
388 # Check if description has changed
421 # Check if description has changed
389 if changectx.description() != source.description():
422 if changectx.description() != source.description():
390 effects |= DESCCHANGED
423 effects |= DESCCHANGED
391
424
392 # Check if user has changed
425 # Check if user has changed
393 if changectx.user() != source.user():
426 if changectx.user() != source.user():
394 effects |= USERCHANGED
427 effects |= USERCHANGED
395
428
396 # Check if date has changed
429 # Check if date has changed
397 if changectx.date() != source.date():
430 if changectx.date() != source.date():
398 effects |= DATECHANGED
431 effects |= DATECHANGED
399
432
400 # Check if branch has changed
433 # Check if branch has changed
401 if changectx.branch() != source.branch():
434 if changectx.branch() != source.branch():
402 effects |= BRANCHCHANGED
435 effects |= BRANCHCHANGED
403
436
404 # Check if at least one of the parent has changed
437 # Check if at least one of the parent has changed
405 if changectx.parents() != source.parents():
438 if changectx.parents() != source.parents():
406 effects |= PARENTCHANGED
439 effects |= PARENTCHANGED
407
440
408 # Check if other meta has changed
441 # Check if other meta has changed
409 changeextra = changectx.extra().items()
442 changeextra = changectx.extra().items()
410 ctxmeta = list(filter(metanotblacklisted, changeextra))
443 ctxmeta = list(filter(metanotblacklisted, changeextra))
411
444
412 sourceextra = source.extra().items()
445 sourceextra = source.extra().items()
413 srcmeta = list(filter(metanotblacklisted, sourceextra))
446 srcmeta = list(filter(metanotblacklisted, sourceextra))
414
447
415 if ctxmeta != srcmeta:
448 if ctxmeta != srcmeta:
416 effects |= METACHANGED
449 effects |= METACHANGED
417
450
418 # Check if the diff has changed
451 # Check if the diff has changed
419 if not _cmpdiff(source, changectx):
452 if not _cmpdiff(source, changectx):
420 effects |= DIFFCHANGED
453 effects |= DIFFCHANGED
421
454
422 return effects
455 return effects
423
456
424 def getobsoleted(repo, tr):
457 def getobsoleted(repo, tr):
425 """return the set of pre-existing revisions obsoleted by a transaction"""
458 """return the set of pre-existing revisions obsoleted by a transaction"""
426 torev = repo.unfiltered().changelog.nodemap.get
459 torev = repo.unfiltered().changelog.nodemap.get
427 phase = repo._phasecache.phase
460 phase = repo._phasecache.phase
428 succsmarkers = repo.obsstore.successors.get
461 succsmarkers = repo.obsstore.successors.get
429 public = phases.public
462 public = phases.public
430 addedmarkers = tr.changes.get('obsmarkers')
463 addedmarkers = tr.changes.get('obsmarkers')
431 addedrevs = tr.changes.get('revs')
464 addedrevs = tr.changes.get('revs')
432 seenrevs = set()
465 seenrevs = set()
433 obsoleted = set()
466 obsoleted = set()
434 for mark in addedmarkers:
467 for mark in addedmarkers:
435 node = mark[0]
468 node = mark[0]
436 rev = torev(node)
469 rev = torev(node)
437 if rev is None or rev in seenrevs or rev in addedrevs:
470 if rev is None or rev in seenrevs or rev in addedrevs:
438 continue
471 continue
439 seenrevs.add(rev)
472 seenrevs.add(rev)
440 if phase(repo, rev) == public:
473 if phase(repo, rev) == public:
441 continue
474 continue
442 if set(succsmarkers(node) or []).issubset(addedmarkers):
475 if set(succsmarkers(node) or []).issubset(addedmarkers):
443 obsoleted.add(rev)
476 obsoleted.add(rev)
444 return obsoleted
477 return obsoleted
445
478
446 class _succs(list):
479 class _succs(list):
447 """small class to represent a successors with some metadata about it"""
480 """small class to represent a successors with some metadata about it"""
448
481
449 def __init__(self, *args, **kwargs):
482 def __init__(self, *args, **kwargs):
450 super(_succs, self).__init__(*args, **kwargs)
483 super(_succs, self).__init__(*args, **kwargs)
451 self.markers = set()
484 self.markers = set()
452
485
453 def copy(self):
486 def copy(self):
454 new = _succs(self)
487 new = _succs(self)
455 new.markers = self.markers.copy()
488 new.markers = self.markers.copy()
456 return new
489 return new
457
490
458 @util.propertycache
491 @util.propertycache
459 def _set(self):
492 def _set(self):
460 # immutable
493 # immutable
461 return set(self)
494 return set(self)
462
495
463 def canmerge(self, other):
496 def canmerge(self, other):
464 return self._set.issubset(other._set)
497 return self._set.issubset(other._set)
465
498
466 def successorssets(repo, initialnode, closest=False, cache=None):
499 def successorssets(repo, initialnode, closest=False, cache=None):
467 """Return set of all latest successors of initial nodes
500 """Return set of all latest successors of initial nodes
468
501
469 The successors set of a changeset A are the group of revisions that succeed
502 The successors set of a changeset A are the group of revisions that succeed
470 A. It succeeds A as a consistent whole, each revision being only a partial
503 A. It succeeds A as a consistent whole, each revision being only a partial
471 replacement. By default, the successors set contains non-obsolete
504 replacement. By default, the successors set contains non-obsolete
472 changesets only, walking the obsolescence graph until reaching a leaf. If
505 changesets only, walking the obsolescence graph until reaching a leaf. If
473 'closest' is set to True, closest successors-sets are return (the
506 'closest' is set to True, closest successors-sets are return (the
474 obsolescence walk stops on known changesets).
507 obsolescence walk stops on known changesets).
475
508
476 This function returns the full list of successor sets which is why it
509 This function returns the full list of successor sets which is why it
477 returns a list of tuples and not just a single tuple. Each tuple is a valid
510 returns a list of tuples and not just a single tuple. Each tuple is a valid
478 successors set. Note that (A,) may be a valid successors set for changeset A
511 successors set. Note that (A,) may be a valid successors set for changeset A
479 (see below).
512 (see below).
480
513
481 In most cases, a changeset A will have a single element (e.g. the changeset
514 In most cases, a changeset A will have a single element (e.g. the changeset
482 A is replaced by A') in its successors set. Though, it is also common for a
515 A is replaced by A') in its successors set. Though, it is also common for a
483 changeset A to have no elements in its successor set (e.g. the changeset
516 changeset A to have no elements in its successor set (e.g. the changeset
484 has been pruned). Therefore, the returned list of successors sets will be
517 has been pruned). Therefore, the returned list of successors sets will be
485 [(A',)] or [], respectively.
518 [(A',)] or [], respectively.
486
519
487 When a changeset A is split into A' and B', however, it will result in a
520 When a changeset A is split into A' and B', however, it will result in a
488 successors set containing more than a single element, i.e. [(A',B')].
521 successors set containing more than a single element, i.e. [(A',B')].
489 Divergent changesets will result in multiple successors sets, i.e. [(A',),
522 Divergent changesets will result in multiple successors sets, i.e. [(A',),
490 (A'')].
523 (A'')].
491
524
492 If a changeset A is not obsolete, then it will conceptually have no
525 If a changeset A is not obsolete, then it will conceptually have no
493 successors set. To distinguish this from a pruned changeset, the successor
526 successors set. To distinguish this from a pruned changeset, the successor
494 set will contain itself only, i.e. [(A,)].
527 set will contain itself only, i.e. [(A,)].
495
528
496 Finally, final successors unknown locally are considered to be pruned
529 Finally, final successors unknown locally are considered to be pruned
497 (pruned: obsoleted without any successors). (Final: successors not affected
530 (pruned: obsoleted without any successors). (Final: successors not affected
498 by markers).
531 by markers).
499
532
500 The 'closest' mode respect the repoview filtering. For example, without
533 The 'closest' mode respect the repoview filtering. For example, without
501 filter it will stop at the first locally known changeset, with 'visible'
534 filter it will stop at the first locally known changeset, with 'visible'
502 filter it will stop on visible changesets).
535 filter it will stop on visible changesets).
503
536
504 The optional `cache` parameter is a dictionary that may contains
537 The optional `cache` parameter is a dictionary that may contains
505 precomputed successors sets. It is meant to reuse the computation of a
538 precomputed successors sets. It is meant to reuse the computation of a
506 previous call to `successorssets` when multiple calls are made at the same
539 previous call to `successorssets` when multiple calls are made at the same
507 time. The cache dictionary is updated in place. The caller is responsible
540 time. The cache dictionary is updated in place. The caller is responsible
508 for its life span. Code that makes multiple calls to `successorssets`
541 for its life span. Code that makes multiple calls to `successorssets`
509 *should* use this cache mechanism or risk a performance hit.
542 *should* use this cache mechanism or risk a performance hit.
510
543
511 Since results are different depending of the 'closest' most, the same cache
544 Since results are different depending of the 'closest' most, the same cache
512 cannot be reused for both mode.
545 cannot be reused for both mode.
513 """
546 """
514
547
515 succmarkers = repo.obsstore.successors
548 succmarkers = repo.obsstore.successors
516
549
517 # Stack of nodes we search successors sets for
550 # Stack of nodes we search successors sets for
518 toproceed = [initialnode]
551 toproceed = [initialnode]
519 # set version of above list for fast loop detection
552 # set version of above list for fast loop detection
520 # element added to "toproceed" must be added here
553 # element added to "toproceed" must be added here
521 stackedset = set(toproceed)
554 stackedset = set(toproceed)
522 if cache is None:
555 if cache is None:
523 cache = {}
556 cache = {}
524
557
525 # This while loop is the flattened version of a recursive search for
558 # This while loop is the flattened version of a recursive search for
526 # successors sets
559 # successors sets
527 #
560 #
528 # def successorssets(x):
561 # def successorssets(x):
529 # successors = directsuccessors(x)
562 # successors = directsuccessors(x)
530 # ss = [[]]
563 # ss = [[]]
531 # for succ in directsuccessors(x):
564 # for succ in directsuccessors(x):
532 # # product as in itertools cartesian product
565 # # product as in itertools cartesian product
533 # ss = product(ss, successorssets(succ))
566 # ss = product(ss, successorssets(succ))
534 # return ss
567 # return ss
535 #
568 #
536 # But we can not use plain recursive calls here:
569 # But we can not use plain recursive calls here:
537 # - that would blow the python call stack
570 # - that would blow the python call stack
538 # - obsolescence markers may have cycles, we need to handle them.
571 # - obsolescence markers may have cycles, we need to handle them.
539 #
572 #
540 # The `toproceed` list act as our call stack. Every node we search
573 # The `toproceed` list act as our call stack. Every node we search
541 # successors set for are stacked there.
574 # successors set for are stacked there.
542 #
575 #
543 # The `stackedset` is set version of this stack used to check if a node is
576 # The `stackedset` is set version of this stack used to check if a node is
544 # already stacked. This check is used to detect cycles and prevent infinite
577 # already stacked. This check is used to detect cycles and prevent infinite
545 # loop.
578 # loop.
546 #
579 #
547 # successors set of all nodes are stored in the `cache` dictionary.
580 # successors set of all nodes are stored in the `cache` dictionary.
548 #
581 #
549 # After this while loop ends we use the cache to return the successors sets
582 # After this while loop ends we use the cache to return the successors sets
550 # for the node requested by the caller.
583 # for the node requested by the caller.
551 while toproceed:
584 while toproceed:
552 # Every iteration tries to compute the successors sets of the topmost
585 # Every iteration tries to compute the successors sets of the topmost
553 # node of the stack: CURRENT.
586 # node of the stack: CURRENT.
554 #
587 #
555 # There are four possible outcomes:
588 # There are four possible outcomes:
556 #
589 #
557 # 1) We already know the successors sets of CURRENT:
590 # 1) We already know the successors sets of CURRENT:
558 # -> mission accomplished, pop it from the stack.
591 # -> mission accomplished, pop it from the stack.
559 # 2) Stop the walk:
592 # 2) Stop the walk:
560 # default case: Node is not obsolete
593 # default case: Node is not obsolete
561 # closest case: Node is known at this repo filter level
594 # closest case: Node is known at this repo filter level
562 # -> the node is its own successors sets. Add it to the cache.
595 # -> the node is its own successors sets. Add it to the cache.
563 # 3) We do not know successors set of direct successors of CURRENT:
596 # 3) We do not know successors set of direct successors of CURRENT:
564 # -> We add those successors to the stack.
597 # -> We add those successors to the stack.
565 # 4) We know successors sets of all direct successors of CURRENT:
598 # 4) We know successors sets of all direct successors of CURRENT:
566 # -> We can compute CURRENT successors set and add it to the
599 # -> We can compute CURRENT successors set and add it to the
567 # cache.
600 # cache.
568 #
601 #
569 current = toproceed[-1]
602 current = toproceed[-1]
570
603
571 # case 2 condition is a bit hairy because of closest,
604 # case 2 condition is a bit hairy because of closest,
572 # we compute it on its own
605 # we compute it on its own
573 case2condition = ((current not in succmarkers)
606 case2condition = ((current not in succmarkers)
574 or (closest and current != initialnode
607 or (closest and current != initialnode
575 and current in repo))
608 and current in repo))
576
609
577 if current in cache:
610 if current in cache:
578 # case (1): We already know the successors sets
611 # case (1): We already know the successors sets
579 stackedset.remove(toproceed.pop())
612 stackedset.remove(toproceed.pop())
580 elif case2condition:
613 elif case2condition:
581 # case (2): end of walk.
614 # case (2): end of walk.
582 if current in repo:
615 if current in repo:
583 # We have a valid successors.
616 # We have a valid successors.
584 cache[current] = [_succs((current,))]
617 cache[current] = [_succs((current,))]
585 else:
618 else:
586 # Final obsolete version is unknown locally.
619 # Final obsolete version is unknown locally.
587 # Do not count that as a valid successors
620 # Do not count that as a valid successors
588 cache[current] = []
621 cache[current] = []
589 else:
622 else:
590 # cases (3) and (4)
623 # cases (3) and (4)
591 #
624 #
592 # We proceed in two phases. Phase 1 aims to distinguish case (3)
625 # We proceed in two phases. Phase 1 aims to distinguish case (3)
593 # from case (4):
626 # from case (4):
594 #
627 #
595 # For each direct successors of CURRENT, we check whether its
628 # For each direct successors of CURRENT, we check whether its
596 # successors sets are known. If they are not, we stack the
629 # successors sets are known. If they are not, we stack the
597 # unknown node and proceed to the next iteration of the while
630 # unknown node and proceed to the next iteration of the while
598 # loop. (case 3)
631 # loop. (case 3)
599 #
632 #
600 # During this step, we may detect obsolescence cycles: a node
633 # During this step, we may detect obsolescence cycles: a node
601 # with unknown successors sets but already in the call stack.
634 # with unknown successors sets but already in the call stack.
602 # In such a situation, we arbitrary set the successors sets of
635 # In such a situation, we arbitrary set the successors sets of
603 # the node to nothing (node pruned) to break the cycle.
636 # the node to nothing (node pruned) to break the cycle.
604 #
637 #
605 # If no break was encountered we proceed to phase 2.
638 # If no break was encountered we proceed to phase 2.
606 #
639 #
607 # Phase 2 computes successors sets of CURRENT (case 4); see details
640 # Phase 2 computes successors sets of CURRENT (case 4); see details
608 # in phase 2 itself.
641 # in phase 2 itself.
609 #
642 #
610 # Note the two levels of iteration in each phase.
643 # Note the two levels of iteration in each phase.
611 # - The first one handles obsolescence markers using CURRENT as
644 # - The first one handles obsolescence markers using CURRENT as
612 # precursor (successors markers of CURRENT).
645 # precursor (successors markers of CURRENT).
613 #
646 #
614 # Having multiple entry here means divergence.
647 # Having multiple entry here means divergence.
615 #
648 #
616 # - The second one handles successors defined in each marker.
649 # - The second one handles successors defined in each marker.
617 #
650 #
618 # Having none means pruned node, multiple successors means split,
651 # Having none means pruned node, multiple successors means split,
619 # single successors are standard replacement.
652 # single successors are standard replacement.
620 #
653 #
621 for mark in sorted(succmarkers[current]):
654 for mark in sorted(succmarkers[current]):
622 for suc in mark[1]:
655 for suc in mark[1]:
623 if suc not in cache:
656 if suc not in cache:
624 if suc in stackedset:
657 if suc in stackedset:
625 # cycle breaking
658 # cycle breaking
626 cache[suc] = []
659 cache[suc] = []
627 else:
660 else:
628 # case (3) If we have not computed successors sets
661 # case (3) If we have not computed successors sets
629 # of one of those successors we add it to the
662 # of one of those successors we add it to the
630 # `toproceed` stack and stop all work for this
663 # `toproceed` stack and stop all work for this
631 # iteration.
664 # iteration.
632 toproceed.append(suc)
665 toproceed.append(suc)
633 stackedset.add(suc)
666 stackedset.add(suc)
634 break
667 break
635 else:
668 else:
636 continue
669 continue
637 break
670 break
638 else:
671 else:
639 # case (4): we know all successors sets of all direct
672 # case (4): we know all successors sets of all direct
640 # successors
673 # successors
641 #
674 #
642 # Successors set contributed by each marker depends on the
675 # Successors set contributed by each marker depends on the
643 # successors sets of all its "successors" node.
676 # successors sets of all its "successors" node.
644 #
677 #
645 # Each different marker is a divergence in the obsolescence
678 # Each different marker is a divergence in the obsolescence
646 # history. It contributes successors sets distinct from other
679 # history. It contributes successors sets distinct from other
647 # markers.
680 # markers.
648 #
681 #
649 # Within a marker, a successor may have divergent successors
682 # Within a marker, a successor may have divergent successors
650 # sets. In such a case, the marker will contribute multiple
683 # sets. In such a case, the marker will contribute multiple
651 # divergent successors sets. If multiple successors have
684 # divergent successors sets. If multiple successors have
652 # divergent successors sets, a Cartesian product is used.
685 # divergent successors sets, a Cartesian product is used.
653 #
686 #
654 # At the end we post-process successors sets to remove
687 # At the end we post-process successors sets to remove
655 # duplicated entry and successors set that are strict subset of
688 # duplicated entry and successors set that are strict subset of
656 # another one.
689 # another one.
657 succssets = []
690 succssets = []
658 for mark in sorted(succmarkers[current]):
691 for mark in sorted(succmarkers[current]):
659 # successors sets contributed by this marker
692 # successors sets contributed by this marker
660 base = _succs()
693 base = _succs()
661 base.markers.add(mark)
694 base.markers.add(mark)
662 markss = [base]
695 markss = [base]
663 for suc in mark[1]:
696 for suc in mark[1]:
664 # cardinal product with previous successors
697 # cardinal product with previous successors
665 productresult = []
698 productresult = []
666 for prefix in markss:
699 for prefix in markss:
667 for suffix in cache[suc]:
700 for suffix in cache[suc]:
668 newss = prefix.copy()
701 newss = prefix.copy()
669 newss.markers.update(suffix.markers)
702 newss.markers.update(suffix.markers)
670 for part in suffix:
703 for part in suffix:
671 # do not duplicated entry in successors set
704 # do not duplicated entry in successors set
672 # first entry wins.
705 # first entry wins.
673 if part not in newss:
706 if part not in newss:
674 newss.append(part)
707 newss.append(part)
675 productresult.append(newss)
708 productresult.append(newss)
676 markss = productresult
709 markss = productresult
677 succssets.extend(markss)
710 succssets.extend(markss)
678 # remove duplicated and subset
711 # remove duplicated and subset
679 seen = []
712 seen = []
680 final = []
713 final = []
681 candidates = sorted((s for s in succssets if s),
714 candidates = sorted((s for s in succssets if s),
682 key=len, reverse=True)
715 key=len, reverse=True)
683 for cand in candidates:
716 for cand in candidates:
684 for seensuccs in seen:
717 for seensuccs in seen:
685 if cand.canmerge(seensuccs):
718 if cand.canmerge(seensuccs):
686 seensuccs.markers.update(cand.markers)
719 seensuccs.markers.update(cand.markers)
687 break
720 break
688 else:
721 else:
689 final.append(cand)
722 final.append(cand)
690 seen.append(cand)
723 seen.append(cand)
691 final.reverse() # put small successors set first
724 final.reverse() # put small successors set first
692 cache[current] = final
725 cache[current] = final
693 return cache[initialnode]
726 return cache[initialnode]
694
727
695 def successorsandmarkers(repo, ctx):
728 def successorsandmarkers(repo, ctx):
696 """compute the raw data needed for computing obsfate
729 """compute the raw data needed for computing obsfate
697 Returns a list of dict, one dict per successors set
730 Returns a list of dict, one dict per successors set
698 """
731 """
699 if not ctx.obsolete():
732 if not ctx.obsolete():
700 return None
733 return None
701
734
702 ssets = successorssets(repo, ctx.node(), closest=True)
735 ssets = successorssets(repo, ctx.node(), closest=True)
703
736
704 # closestsuccessors returns an empty list for pruned revisions, remap it
737 # closestsuccessors returns an empty list for pruned revisions, remap it
705 # into a list containing an empty list for future processing
738 # into a list containing an empty list for future processing
706 if ssets == []:
739 if ssets == []:
707 ssets = [[]]
740 ssets = [[]]
708
741
709 # Try to recover pruned markers
742 # Try to recover pruned markers
710 succsmap = repo.obsstore.successors
743 succsmap = repo.obsstore.successors
711 fullsuccessorsets = [] # successor set + markers
744 fullsuccessorsets = [] # successor set + markers
712 for sset in ssets:
745 for sset in ssets:
713 if sset:
746 if sset:
714 fullsuccessorsets.append(sset)
747 fullsuccessorsets.append(sset)
715 else:
748 else:
716 # successorsset return an empty set() when ctx or one of its
749 # successorsset return an empty set() when ctx or one of its
717 # successors is pruned.
750 # successors is pruned.
718 # In this case, walk the obs-markers tree again starting with ctx
751 # In this case, walk the obs-markers tree again starting with ctx
719 # and find the relevant pruning obs-makers, the ones without
752 # and find the relevant pruning obs-makers, the ones without
720 # successors.
753 # successors.
721 # Having these markers allow us to compute some information about
754 # Having these markers allow us to compute some information about
722 # its fate, like who pruned this changeset and when.
755 # its fate, like who pruned this changeset and when.
723
756
724 # XXX we do not catch all prune markers (eg rewritten then pruned)
757 # XXX we do not catch all prune markers (eg rewritten then pruned)
725 # (fix me later)
758 # (fix me later)
726 foundany = False
759 foundany = False
727 for mark in succsmap.get(ctx.node(), ()):
760 for mark in succsmap.get(ctx.node(), ()):
728 if not mark[1]:
761 if not mark[1]:
729 foundany = True
762 foundany = True
730 sset = _succs()
763 sset = _succs()
731 sset.markers.add(mark)
764 sset.markers.add(mark)
732 fullsuccessorsets.append(sset)
765 fullsuccessorsets.append(sset)
733 if not foundany:
766 if not foundany:
734 fullsuccessorsets.append(_succs())
767 fullsuccessorsets.append(_succs())
735
768
736 values = []
769 values = []
737 for sset in fullsuccessorsets:
770 for sset in fullsuccessorsets:
738 values.append({'successors': sset, 'markers': sset.markers})
771 values.append({'successors': sset, 'markers': sset.markers})
739
772
740 return values
773 return values
741
774
742 def _getobsfate(successorssets):
775 def _getobsfate(successorssets):
743 """ Compute a changeset obsolescence fate based on its successorssets.
776 """ Compute a changeset obsolescence fate based on its successorssets.
744 Successors can be the tipmost ones or the immediate ones. This function
777 Successors can be the tipmost ones or the immediate ones. This function
745 return values are not meant to be shown directly to users, it is meant to
778 return values are not meant to be shown directly to users, it is meant to
746 be used by internal functions only.
779 be used by internal functions only.
747 Returns one fate from the following values:
780 Returns one fate from the following values:
748 - pruned
781 - pruned
749 - diverged
782 - diverged
750 - superseded
783 - superseded
751 - superseded_split
784 - superseded_split
752 """
785 """
753
786
754 if len(successorssets) == 0:
787 if len(successorssets) == 0:
755 # The commit has been pruned
788 # The commit has been pruned
756 return 'pruned'
789 return 'pruned'
757 elif len(successorssets) > 1:
790 elif len(successorssets) > 1:
758 return 'diverged'
791 return 'diverged'
759 else:
792 else:
760 # No divergence, only one set of successors
793 # No divergence, only one set of successors
761 successors = successorssets[0]
794 successors = successorssets[0]
762
795
763 if len(successors) == 1:
796 if len(successors) == 1:
764 return 'superseded'
797 return 'superseded'
765 else:
798 else:
766 return 'superseded_split'
799 return 'superseded_split'
767
800
768 def obsfateverb(successorset, markers):
801 def obsfateverb(successorset, markers):
769 """ Return the verb summarizing the successorset and potentially using
802 """ Return the verb summarizing the successorset and potentially using
770 information from the markers
803 information from the markers
771 """
804 """
772 if not successorset:
805 if not successorset:
773 verb = 'pruned'
806 verb = 'pruned'
774 elif len(successorset) == 1:
807 elif len(successorset) == 1:
775 verb = 'rewritten'
808 verb = 'rewritten'
776 else:
809 else:
777 verb = 'split'
810 verb = 'split'
778 return verb
811 return verb
779
812
780 def markersdates(markers):
813 def markersdates(markers):
781 """returns the list of dates for a list of markers
814 """returns the list of dates for a list of markers
782 """
815 """
783 return [m[4] for m in markers]
816 return [m[4] for m in markers]
784
817
785 def markersusers(markers):
818 def markersusers(markers):
786 """ Returns a sorted list of markers users without duplicates
819 """ Returns a sorted list of markers users without duplicates
787 """
820 """
788 markersmeta = [dict(m[3]) for m in markers]
821 markersmeta = [dict(m[3]) for m in markers]
789 users = set(meta.get('user') for meta in markersmeta if meta.get('user'))
822 users = set(meta.get('user') for meta in markersmeta if meta.get('user'))
790
823
791 return sorted(users)
824 return sorted(users)
792
825
793 def markersoperations(markers):
826 def markersoperations(markers):
794 """ Returns a sorted list of markers operations without duplicates
827 """ Returns a sorted list of markers operations without duplicates
795 """
828 """
796 markersmeta = [dict(m[3]) for m in markers]
829 markersmeta = [dict(m[3]) for m in markers]
797 operations = set(meta.get('operation') for meta in markersmeta
830 operations = set(meta.get('operation') for meta in markersmeta
798 if meta.get('operation'))
831 if meta.get('operation'))
799
832
800 return sorted(operations)
833 return sorted(operations)
801
834
802 def obsfateprinter(successors, markers, ui):
835 def obsfateprinter(successors, markers, ui):
803 """ Build a obsfate string for a single successorset using all obsfate
836 """ Build a obsfate string for a single successorset using all obsfate
804 related function defined in obsutil
837 related function defined in obsutil
805 """
838 """
806 quiet = ui.quiet
839 quiet = ui.quiet
807 verbose = ui.verbose
840 verbose = ui.verbose
808 normal = not verbose and not quiet
841 normal = not verbose and not quiet
809
842
810 line = []
843 line = []
811
844
812 # Verb
845 # Verb
813 line.append(obsfateverb(successors, markers))
846 line.append(obsfateverb(successors, markers))
814
847
815 # Operations
848 # Operations
816 operations = markersoperations(markers)
849 operations = markersoperations(markers)
817 if operations:
850 if operations:
818 line.append(" using %s" % ", ".join(operations))
851 line.append(" using %s" % ", ".join(operations))
819
852
820 # Successors
853 # Successors
821 if successors:
854 if successors:
822 fmtsuccessors = [successors.joinfmt(succ) for succ in successors]
855 fmtsuccessors = [successors.joinfmt(succ) for succ in successors]
823 line.append(" as %s" % ", ".join(fmtsuccessors))
856 line.append(" as %s" % ", ".join(fmtsuccessors))
824
857
825 # Users
858 # Users
826 users = markersusers(markers)
859 users = markersusers(markers)
827 # Filter out current user in not verbose mode to reduce amount of
860 # Filter out current user in not verbose mode to reduce amount of
828 # information
861 # information
829 if not verbose:
862 if not verbose:
830 currentuser = ui.username(acceptempty=True)
863 currentuser = ui.username(acceptempty=True)
831 if len(users) == 1 and currentuser in users:
864 if len(users) == 1 and currentuser in users:
832 users = None
865 users = None
833
866
834 if (verbose or normal) and users:
867 if (verbose or normal) and users:
835 line.append(" by %s" % ", ".join(users))
868 line.append(" by %s" % ", ".join(users))
836
869
837 # Date
870 # Date
838 dates = markersdates(markers)
871 dates = markersdates(markers)
839
872
840 if dates and verbose:
873 if dates and verbose:
841 min_date = min(dates)
874 min_date = min(dates)
842 max_date = max(dates)
875 max_date = max(dates)
843
876
844 if min_date == max_date:
877 if min_date == max_date:
845 fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
878 fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
846 line.append(" (at %s)" % fmtmin_date)
879 line.append(" (at %s)" % fmtmin_date)
847 else:
880 else:
848 fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
881 fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
849 fmtmax_date = dateutil.datestr(max_date, '%Y-%m-%d %H:%M %1%2')
882 fmtmax_date = dateutil.datestr(max_date, '%Y-%m-%d %H:%M %1%2')
850 line.append(" (between %s and %s)" % (fmtmin_date, fmtmax_date))
883 line.append(" (between %s and %s)" % (fmtmin_date, fmtmax_date))
851
884
852 return "".join(line)
885 return "".join(line)
853
886
854
887
855 filteredmsgtable = {
888 filteredmsgtable = {
856 "pruned": _("hidden revision '%s' is pruned"),
889 "pruned": _("hidden revision '%s' is pruned"),
857 "diverged": _("hidden revision '%s' has diverged"),
890 "diverged": _("hidden revision '%s' has diverged"),
858 "superseded": _("hidden revision '%s' was rewritten as: %s"),
891 "superseded": _("hidden revision '%s' was rewritten as: %s"),
859 "superseded_split": _("hidden revision '%s' was split as: %s"),
892 "superseded_split": _("hidden revision '%s' was split as: %s"),
860 "superseded_split_several": _("hidden revision '%s' was split as: %s and "
893 "superseded_split_several": _("hidden revision '%s' was split as: %s and "
861 "%d more"),
894 "%d more"),
862 }
895 }
863
896
864 def _getfilteredreason(repo, changeid, ctx):
897 def _getfilteredreason(repo, changeid, ctx):
865 """return a human-friendly string on why a obsolete changeset is hidden
898 """return a human-friendly string on why a obsolete changeset is hidden
866 """
899 """
867 successors = successorssets(repo, ctx.node())
900 successors = successorssets(repo, ctx.node())
868 fate = _getobsfate(successors)
901 fate = _getobsfate(successors)
869
902
870 # Be more precise in case the revision is superseded
903 # Be more precise in case the revision is superseded
871 if fate == 'pruned':
904 if fate == 'pruned':
872 return filteredmsgtable['pruned'] % changeid
905 return filteredmsgtable['pruned'] % changeid
873 elif fate == 'diverged':
906 elif fate == 'diverged':
874 return filteredmsgtable['diverged'] % changeid
907 return filteredmsgtable['diverged'] % changeid
875 elif fate == 'superseded':
908 elif fate == 'superseded':
876 single_successor = nodemod.short(successors[0][0])
909 single_successor = nodemod.short(successors[0][0])
877 return filteredmsgtable['superseded'] % (changeid, single_successor)
910 return filteredmsgtable['superseded'] % (changeid, single_successor)
878 elif fate == 'superseded_split':
911 elif fate == 'superseded_split':
879
912
880 succs = []
913 succs = []
881 for node_id in successors[0]:
914 for node_id in successors[0]:
882 succs.append(nodemod.short(node_id))
915 succs.append(nodemod.short(node_id))
883
916
884 if len(succs) <= 2:
917 if len(succs) <= 2:
885 fmtsuccs = ', '.join(succs)
918 fmtsuccs = ', '.join(succs)
886 return filteredmsgtable['superseded_split'] % (changeid, fmtsuccs)
919 return filteredmsgtable['superseded_split'] % (changeid, fmtsuccs)
887 else:
920 else:
888 firstsuccessors = ', '.join(succs[:2])
921 firstsuccessors = ', '.join(succs[:2])
889 remainingnumber = len(succs) - 2
922 remainingnumber = len(succs) - 2
890
923
891 args = (changeid, firstsuccessors, remainingnumber)
924 args = (changeid, firstsuccessors, remainingnumber)
892 return filteredmsgtable['superseded_split_several'] % args
925 return filteredmsgtable['superseded_split_several'] % args
General Comments 0
You need to be logged in to leave comments. Login now