##// END OF EJS Templates
obsolete: use format version 1 as the default for obsstore
Pierre-Yves David -
r22854:f8a4ce6f default
parent child Browse files
Show More
@@ -1,1146 +1,1146 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "precursor" and possible
23 The old obsoleted changeset is called a "precursor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a precursor are called "successor markers of X" because they hold
25 a precursor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "precursor markers of Y" because they hold
27 a successors are call "precursor markers of Y" because they hold
28 information about the precursors of Y.
28 information about the precursors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker are used:
45 - When changeset A is split into B and C, a single marker are used:
46
46
47 (A, (C, C))
47 (A, (C, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 import struct
70 import struct
71 import util, base85, node
71 import util, base85, node
72 import phases
72 import phases
73 from i18n import _
73 from i18n import _
74
74
75 _pack = struct.pack
75 _pack = struct.pack
76 _unpack = struct.unpack
76 _unpack = struct.unpack
77
77
78 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
78 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
79
79
80 # the obsolete feature is not mature enough to be enabled by default.
80 # the obsolete feature is not mature enough to be enabled by default.
81 # you have to rely on third party extension extension to enable this.
81 # you have to rely on third party extension extension to enable this.
82 _enabled = False
82 _enabled = False
83
83
84 ### obsolescence marker flag
84 ### obsolescence marker flag
85
85
86 ## bumpedfix flag
86 ## bumpedfix flag
87 #
87 #
88 # When a changeset A' succeed to a changeset A which became public, we call A'
88 # When a changeset A' succeed to a changeset A which became public, we call A'
89 # "bumped" because it's a successors of a public changesets
89 # "bumped" because it's a successors of a public changesets
90 #
90 #
91 # o A' (bumped)
91 # o A' (bumped)
92 # |`:
92 # |`:
93 # | o A
93 # | o A
94 # |/
94 # |/
95 # o Z
95 # o Z
96 #
96 #
97 # The way to solve this situation is to create a new changeset Ad as children
97 # The way to solve this situation is to create a new changeset Ad as children
98 # of A. This changeset have the same content than A'. So the diff from A to A'
98 # of A. This changeset have the same content than A'. So the diff from A to A'
99 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
99 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
100 #
100 #
101 # o Ad
101 # o Ad
102 # |`:
102 # |`:
103 # | x A'
103 # | x A'
104 # |'|
104 # |'|
105 # o | A
105 # o | A
106 # |/
106 # |/
107 # o Z
107 # o Z
108 #
108 #
109 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
109 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
110 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
110 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
111 # This flag mean that the successors express the changes between the public and
111 # This flag mean that the successors express the changes between the public and
112 # bumped version and fix the situation, breaking the transitivity of
112 # bumped version and fix the situation, breaking the transitivity of
113 # "bumped" here.
113 # "bumped" here.
114 bumpedfix = 1
114 bumpedfix = 1
115 usingsha256 = 2
115 usingsha256 = 2
116
116
117 ## Parsing and writing of version "0"
117 ## Parsing and writing of version "0"
118 #
118 #
119 # The header is followed by the markers. Each marker is made of:
119 # The header is followed by the markers. Each marker is made of:
120 #
120 #
121 # - 1 uint8 : number of new changesets "N", can be zero.
121 # - 1 uint8 : number of new changesets "N", can be zero.
122 #
122 #
123 # - 1 uint32: metadata size "M" in bytes.
123 # - 1 uint32: metadata size "M" in bytes.
124 #
124 #
125 # - 1 byte: a bit field. It is reserved for flags used in common
125 # - 1 byte: a bit field. It is reserved for flags used in common
126 # obsolete marker operations, to avoid repeated decoding of metadata
126 # obsolete marker operations, to avoid repeated decoding of metadata
127 # entries.
127 # entries.
128 #
128 #
129 # - 20 bytes: obsoleted changeset identifier.
129 # - 20 bytes: obsoleted changeset identifier.
130 #
130 #
131 # - N*20 bytes: new changesets identifiers.
131 # - N*20 bytes: new changesets identifiers.
132 #
132 #
133 # - M bytes: metadata as a sequence of nul-terminated strings. Each
133 # - M bytes: metadata as a sequence of nul-terminated strings. Each
134 # string contains a key and a value, separated by a colon ':', without
134 # string contains a key and a value, separated by a colon ':', without
135 # additional encoding. Keys cannot contain '\0' or ':' and values
135 # additional encoding. Keys cannot contain '\0' or ':' and values
136 # cannot contain '\0'.
136 # cannot contain '\0'.
137 _fm0version = 0
137 _fm0version = 0
138 _fm0fixed = '>BIB20s'
138 _fm0fixed = '>BIB20s'
139 _fm0node = '20s'
139 _fm0node = '20s'
140 _fm0fsize = struct.calcsize(_fm0fixed)
140 _fm0fsize = struct.calcsize(_fm0fixed)
141 _fm0fnodesize = struct.calcsize(_fm0node)
141 _fm0fnodesize = struct.calcsize(_fm0node)
142
142
143 def _fm0readmarkers(data, off=0):
143 def _fm0readmarkers(data, off=0):
144 # Loop on markers
144 # Loop on markers
145 l = len(data)
145 l = len(data)
146 while off + _fm0fsize <= l:
146 while off + _fm0fsize <= l:
147 # read fixed part
147 # read fixed part
148 cur = data[off:off + _fm0fsize]
148 cur = data[off:off + _fm0fsize]
149 off += _fm0fsize
149 off += _fm0fsize
150 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
150 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
151 # read replacement
151 # read replacement
152 sucs = ()
152 sucs = ()
153 if numsuc:
153 if numsuc:
154 s = (_fm0fnodesize * numsuc)
154 s = (_fm0fnodesize * numsuc)
155 cur = data[off:off + s]
155 cur = data[off:off + s]
156 sucs = _unpack(_fm0node * numsuc, cur)
156 sucs = _unpack(_fm0node * numsuc, cur)
157 off += s
157 off += s
158 # read metadata
158 # read metadata
159 # (metadata will be decoded on demand)
159 # (metadata will be decoded on demand)
160 metadata = data[off:off + mdsize]
160 metadata = data[off:off + mdsize]
161 if len(metadata) != mdsize:
161 if len(metadata) != mdsize:
162 raise util.Abort(_('parsing obsolete marker: metadata is too '
162 raise util.Abort(_('parsing obsolete marker: metadata is too '
163 'short, %d bytes expected, got %d')
163 'short, %d bytes expected, got %d')
164 % (mdsize, len(metadata)))
164 % (mdsize, len(metadata)))
165 off += mdsize
165 off += mdsize
166 metadata = _fm0decodemeta(metadata)
166 metadata = _fm0decodemeta(metadata)
167 try:
167 try:
168 when, offset = metadata.pop('date', '0 0').split(' ')
168 when, offset = metadata.pop('date', '0 0').split(' ')
169 date = float(when), int(offset)
169 date = float(when), int(offset)
170 except ValueError:
170 except ValueError:
171 date = (0., 0)
171 date = (0., 0)
172 parents = None
172 parents = None
173 if 'p2' in metadata:
173 if 'p2' in metadata:
174 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
174 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
175 elif 'p1' in metadata:
175 elif 'p1' in metadata:
176 parents = (metadata.pop('p1', None),)
176 parents = (metadata.pop('p1', None),)
177 elif 'p0' in metadata:
177 elif 'p0' in metadata:
178 parents = ()
178 parents = ()
179 if parents is not None:
179 if parents is not None:
180 try:
180 try:
181 parents = tuple(node.bin(p) for p in parents)
181 parents = tuple(node.bin(p) for p in parents)
182 # if parent content is not a nodeid, drop the data
182 # if parent content is not a nodeid, drop the data
183 for p in parents:
183 for p in parents:
184 if len(p) != 20:
184 if len(p) != 20:
185 parents = None
185 parents = None
186 break
186 break
187 except TypeError:
187 except TypeError:
188 # if content cannot be translated to nodeid drop the data.
188 # if content cannot be translated to nodeid drop the data.
189 parents = None
189 parents = None
190
190
191 metadata = tuple(sorted(metadata.iteritems()))
191 metadata = tuple(sorted(metadata.iteritems()))
192
192
193 yield (pre, sucs, flags, metadata, date, parents)
193 yield (pre, sucs, flags, metadata, date, parents)
194
194
195 def _fm0encodeonemarker(marker):
195 def _fm0encodeonemarker(marker):
196 pre, sucs, flags, metadata, date, parents = marker
196 pre, sucs, flags, metadata, date, parents = marker
197 if flags & usingsha256:
197 if flags & usingsha256:
198 raise util.Abort(_('cannot handle sha256 with old obsstore format'))
198 raise util.Abort(_('cannot handle sha256 with old obsstore format'))
199 metadata = dict(metadata)
199 metadata = dict(metadata)
200 metadata['date'] = '%d %i' % date
200 metadata['date'] = '%d %i' % date
201 if parents is not None:
201 if parents is not None:
202 if not parents:
202 if not parents:
203 # mark that we explicitly recorded no parents
203 # mark that we explicitly recorded no parents
204 metadata['p0'] = ''
204 metadata['p0'] = ''
205 for i, p in enumerate(parents):
205 for i, p in enumerate(parents):
206 metadata['p%i' % (i + 1)] = node.hex(p)
206 metadata['p%i' % (i + 1)] = node.hex(p)
207 metadata = _fm0encodemeta(metadata)
207 metadata = _fm0encodemeta(metadata)
208 numsuc = len(sucs)
208 numsuc = len(sucs)
209 format = _fm0fixed + (_fm0node * numsuc)
209 format = _fm0fixed + (_fm0node * numsuc)
210 data = [numsuc, len(metadata), flags, pre]
210 data = [numsuc, len(metadata), flags, pre]
211 data.extend(sucs)
211 data.extend(sucs)
212 return _pack(format, *data) + metadata
212 return _pack(format, *data) + metadata
213
213
214 def _fm0encodemeta(meta):
214 def _fm0encodemeta(meta):
215 """Return encoded metadata string to string mapping.
215 """Return encoded metadata string to string mapping.
216
216
217 Assume no ':' in key and no '\0' in both key and value."""
217 Assume no ':' in key and no '\0' in both key and value."""
218 for key, value in meta.iteritems():
218 for key, value in meta.iteritems():
219 if ':' in key or '\0' in key:
219 if ':' in key or '\0' in key:
220 raise ValueError("':' and '\0' are forbidden in metadata key'")
220 raise ValueError("':' and '\0' are forbidden in metadata key'")
221 if '\0' in value:
221 if '\0' in value:
222 raise ValueError("':' is forbidden in metadata value'")
222 raise ValueError("':' is forbidden in metadata value'")
223 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
223 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
224
224
225 def _fm0decodemeta(data):
225 def _fm0decodemeta(data):
226 """Return string to string dictionary from encoded version."""
226 """Return string to string dictionary from encoded version."""
227 d = {}
227 d = {}
228 for l in data.split('\0'):
228 for l in data.split('\0'):
229 if l:
229 if l:
230 key, value = l.split(':')
230 key, value = l.split(':')
231 d[key] = value
231 d[key] = value
232 return d
232 return d
233
233
234 ## Parsing and writing of version "1"
234 ## Parsing and writing of version "1"
235 #
235 #
236 # The header is followed by the markers. Each marker is made of:
236 # The header is followed by the markers. Each marker is made of:
237 #
237 #
238 # - uint32: total size of the marker (including this field)
238 # - uint32: total size of the marker (including this field)
239 #
239 #
240 # - float64: date in seconds since epoch
240 # - float64: date in seconds since epoch
241 #
241 #
242 # - int16: timezone offset in minutes
242 # - int16: timezone offset in minutes
243 #
243 #
244 # - uint16: a bit field. It is reserved for flags used in common
244 # - uint16: a bit field. It is reserved for flags used in common
245 # obsolete marker operations, to avoid repeated decoding of metadata
245 # obsolete marker operations, to avoid repeated decoding of metadata
246 # entries.
246 # entries.
247 #
247 #
248 # - uint8: number of successors "N", can be zero.
248 # - uint8: number of successors "N", can be zero.
249 #
249 #
250 # - uint8: number of parents "P", can be zero.
250 # - uint8: number of parents "P", can be zero.
251 #
251 #
252 # 0: parents data stored but no parent,
252 # 0: parents data stored but no parent,
253 # 1: one parent stored,
253 # 1: one parent stored,
254 # 2: two parents stored,
254 # 2: two parents stored,
255 # 3: no parent data stored
255 # 3: no parent data stored
256 #
256 #
257 # - uint8: number of metadata entries M
257 # - uint8: number of metadata entries M
258 #
258 #
259 # - 20 or 32 bytes: precursor changeset identifier.
259 # - 20 or 32 bytes: precursor changeset identifier.
260 #
260 #
261 # - N*(20 or 32) bytes: successors changesets identifiers.
261 # - N*(20 or 32) bytes: successors changesets identifiers.
262 #
262 #
263 # - P*(20 or 32) bytes: parents of the precursors changesets.
263 # - P*(20 or 32) bytes: parents of the precursors changesets.
264 #
264 #
265 # - M*(uint8, uint8): size of all metadata entries (key and value)
265 # - M*(uint8, uint8): size of all metadata entries (key and value)
266 #
266 #
267 # - remaining bytes: the metadata, each (key, value) pair after the other.
267 # - remaining bytes: the metadata, each (key, value) pair after the other.
268 _fm1version = 1
268 _fm1version = 1
269 _fm1fixed = '>IdhHBBB20s'
269 _fm1fixed = '>IdhHBBB20s'
270 _fm1nodesha1 = '20s'
270 _fm1nodesha1 = '20s'
271 _fm1nodesha256 = '32s'
271 _fm1nodesha256 = '32s'
272 _fm1fsize = struct.calcsize(_fm1fixed)
272 _fm1fsize = struct.calcsize(_fm1fixed)
273 _fm1parentnone = 3
273 _fm1parentnone = 3
274 _fm1parentshift = 14
274 _fm1parentshift = 14
275 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
275 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
276 _fm1metapair = 'BB'
276 _fm1metapair = 'BB'
277 _fm1metapairsize = struct.calcsize('BB')
277 _fm1metapairsize = struct.calcsize('BB')
278
278
279 def _fm1readmarkers(data, off=0):
279 def _fm1readmarkers(data, off=0):
280 # Loop on markers
280 # Loop on markers
281 l = len(data)
281 l = len(data)
282 while off + _fm1fsize <= l:
282 while off + _fm1fsize <= l:
283 # read fixed part
283 # read fixed part
284 cur = data[off:off + _fm1fsize]
284 cur = data[off:off + _fm1fsize]
285 off += _fm1fsize
285 off += _fm1fsize
286 fixeddata = _unpack(_fm1fixed, cur)
286 fixeddata = _unpack(_fm1fixed, cur)
287 ttsize, seconds, tz, flags, numsuc, numpar, nummeta, prec = fixeddata
287 ttsize, seconds, tz, flags, numsuc, numpar, nummeta, prec = fixeddata
288 # extract the number of parents information
288 # extract the number of parents information
289 if numpar == _fm1parentnone:
289 if numpar == _fm1parentnone:
290 numpar = None
290 numpar = None
291 # build the date tuple (upgrade tz minutes to seconds)
291 # build the date tuple (upgrade tz minutes to seconds)
292 date = (seconds, tz * 60)
292 date = (seconds, tz * 60)
293 _fm1node = _fm1nodesha1
293 _fm1node = _fm1nodesha1
294 if flags & usingsha256:
294 if flags & usingsha256:
295 _fm1node = _fm1nodesha256
295 _fm1node = _fm1nodesha256
296 fnodesize = struct.calcsize(_fm1node)
296 fnodesize = struct.calcsize(_fm1node)
297 # read replacement
297 # read replacement
298 sucs = ()
298 sucs = ()
299 if numsuc:
299 if numsuc:
300 s = (fnodesize * numsuc)
300 s = (fnodesize * numsuc)
301 cur = data[off:off + s]
301 cur = data[off:off + s]
302 sucs = _unpack(_fm1node * numsuc, cur)
302 sucs = _unpack(_fm1node * numsuc, cur)
303 off += s
303 off += s
304 # read parents
304 # read parents
305 if numpar is None:
305 if numpar is None:
306 parents = None
306 parents = None
307 elif numpar == 0:
307 elif numpar == 0:
308 parents = ()
308 parents = ()
309 elif numpar: # neither None nor zero
309 elif numpar: # neither None nor zero
310 s = (fnodesize * numpar)
310 s = (fnodesize * numpar)
311 cur = data[off:off + s]
311 cur = data[off:off + s]
312 parents = _unpack(_fm1node * numpar, cur)
312 parents = _unpack(_fm1node * numpar, cur)
313 off += s
313 off += s
314 # read metadata
314 # read metadata
315 metaformat = '>' + (_fm1metapair * nummeta)
315 metaformat = '>' + (_fm1metapair * nummeta)
316 s = _fm1metapairsize * nummeta
316 s = _fm1metapairsize * nummeta
317 metapairsize = _unpack(metaformat, data[off:off + s])
317 metapairsize = _unpack(metaformat, data[off:off + s])
318 off += s
318 off += s
319 metadata = []
319 metadata = []
320 for idx in xrange(0, len(metapairsize), 2):
320 for idx in xrange(0, len(metapairsize), 2):
321 sk = metapairsize[idx]
321 sk = metapairsize[idx]
322 sv = metapairsize[idx + 1]
322 sv = metapairsize[idx + 1]
323 key = data[off:off + sk]
323 key = data[off:off + sk]
324 value = data[off + sk:off + sk + sv]
324 value = data[off + sk:off + sk + sv]
325 assert len(key) == sk
325 assert len(key) == sk
326 assert len(value) == sv
326 assert len(value) == sv
327 metadata.append((key, value))
327 metadata.append((key, value))
328 off += sk + sv
328 off += sk + sv
329 metadata = tuple(metadata)
329 metadata = tuple(metadata)
330
330
331 yield (prec, sucs, flags, metadata, date, parents)
331 yield (prec, sucs, flags, metadata, date, parents)
332
332
333 def _fm1encodeonemarker(marker):
333 def _fm1encodeonemarker(marker):
334 pre, sucs, flags, metadata, date, parents = marker
334 pre, sucs, flags, metadata, date, parents = marker
335 # determine node size
335 # determine node size
336 _fm1node = _fm1nodesha1
336 _fm1node = _fm1nodesha1
337 if flags & usingsha256:
337 if flags & usingsha256:
338 _fm1node = _fm1nodesha256
338 _fm1node = _fm1nodesha256
339 numsuc = len(sucs)
339 numsuc = len(sucs)
340 numextranodes = numsuc
340 numextranodes = numsuc
341 if parents is None:
341 if parents is None:
342 numpar = _fm1parentnone
342 numpar = _fm1parentnone
343 else:
343 else:
344 numpar = len(parents)
344 numpar = len(parents)
345 numextranodes += numpar
345 numextranodes += numpar
346 formatnodes = _fm1node * numextranodes
346 formatnodes = _fm1node * numextranodes
347 formatmeta = _fm1metapair * len(metadata)
347 formatmeta = _fm1metapair * len(metadata)
348 format = _fm1fixed + formatnodes + formatmeta
348 format = _fm1fixed + formatnodes + formatmeta
349 # tz is stored in minutes so we divide by 60
349 # tz is stored in minutes so we divide by 60
350 tz = date[1]//60
350 tz = date[1]//60
351 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
351 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
352 data.extend(sucs)
352 data.extend(sucs)
353 if parents is not None:
353 if parents is not None:
354 data.extend(parents)
354 data.extend(parents)
355 totalsize = struct.calcsize(format)
355 totalsize = struct.calcsize(format)
356 for key, value in metadata:
356 for key, value in metadata:
357 lk = len(key)
357 lk = len(key)
358 lv = len(value)
358 lv = len(value)
359 data.append(lk)
359 data.append(lk)
360 data.append(lv)
360 data.append(lv)
361 totalsize += lk + lv
361 totalsize += lk + lv
362 data[0] = totalsize
362 data[0] = totalsize
363 data = [_pack(format, *data)]
363 data = [_pack(format, *data)]
364 for key, value in metadata:
364 for key, value in metadata:
365 data.append(key)
365 data.append(key)
366 data.append(value)
366 data.append(value)
367 return ''.join(data)
367 return ''.join(data)
368
368
369 # mapping to read/write various marker formats
369 # mapping to read/write various marker formats
370 # <version> -> (decoder, encoder)
370 # <version> -> (decoder, encoder)
371 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
371 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
372 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
372 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
373
373
374 def _readmarkers(data):
374 def _readmarkers(data):
375 """Read and enumerate markers from raw data"""
375 """Read and enumerate markers from raw data"""
376 off = 0
376 off = 0
377 diskversion = _unpack('>B', data[off:off + 1])[0]
377 diskversion = _unpack('>B', data[off:off + 1])[0]
378 off += 1
378 off += 1
379 if diskversion not in formats:
379 if diskversion not in formats:
380 raise util.Abort(_('parsing obsolete marker: unknown version %r')
380 raise util.Abort(_('parsing obsolete marker: unknown version %r')
381 % diskversion)
381 % diskversion)
382 return diskversion, formats[diskversion][0](data, off)
382 return diskversion, formats[diskversion][0](data, off)
383
383
384 def encodemarkers(markers, addheader=False, version=_fm0version):
384 def encodemarkers(markers, addheader=False, version=_fm0version):
385 # Kept separate from flushmarkers(), it will be reused for
385 # Kept separate from flushmarkers(), it will be reused for
386 # markers exchange.
386 # markers exchange.
387 encodeone = formats[version][1]
387 encodeone = formats[version][1]
388 if addheader:
388 if addheader:
389 yield _pack('>B', version)
389 yield _pack('>B', version)
390 for marker in markers:
390 for marker in markers:
391 yield encodeone(marker)
391 yield encodeone(marker)
392
392
393
393
394 class marker(object):
394 class marker(object):
395 """Wrap obsolete marker raw data"""
395 """Wrap obsolete marker raw data"""
396
396
397 def __init__(self, repo, data):
397 def __init__(self, repo, data):
398 # the repo argument will be used to create changectx in later version
398 # the repo argument will be used to create changectx in later version
399 self._repo = repo
399 self._repo = repo
400 self._data = data
400 self._data = data
401 self._decodedmeta = None
401 self._decodedmeta = None
402
402
403 def __hash__(self):
403 def __hash__(self):
404 return hash(self._data)
404 return hash(self._data)
405
405
406 def __eq__(self, other):
406 def __eq__(self, other):
407 if type(other) != type(self):
407 if type(other) != type(self):
408 return False
408 return False
409 return self._data == other._data
409 return self._data == other._data
410
410
411 def precnode(self):
411 def precnode(self):
412 """Precursor changeset node identifier"""
412 """Precursor changeset node identifier"""
413 return self._data[0]
413 return self._data[0]
414
414
415 def succnodes(self):
415 def succnodes(self):
416 """List of successor changesets node identifiers"""
416 """List of successor changesets node identifiers"""
417 return self._data[1]
417 return self._data[1]
418
418
419 def parentnodes(self):
419 def parentnodes(self):
420 """Parents of the precursors (None if not recorded)"""
420 """Parents of the precursors (None if not recorded)"""
421 return self._data[5]
421 return self._data[5]
422
422
423 def metadata(self):
423 def metadata(self):
424 """Decoded metadata dictionary"""
424 """Decoded metadata dictionary"""
425 return dict(self._data[3])
425 return dict(self._data[3])
426
426
427 def date(self):
427 def date(self):
428 """Creation date as (unixtime, offset)"""
428 """Creation date as (unixtime, offset)"""
429 return self._data[4]
429 return self._data[4]
430
430
431 def flags(self):
431 def flags(self):
432 """The flags field of the marker"""
432 """The flags field of the marker"""
433 return self._data[2]
433 return self._data[2]
434
434
435 class obsstore(object):
435 class obsstore(object):
436 """Store obsolete markers
436 """Store obsolete markers
437
437
438 Markers can be accessed with two mappings:
438 Markers can be accessed with two mappings:
439 - precursors[x] -> set(markers on precursors edges of x)
439 - precursors[x] -> set(markers on precursors edges of x)
440 - successors[x] -> set(markers on successors edges of x)
440 - successors[x] -> set(markers on successors edges of x)
441 - children[x] -> set(markers on precursors edges of children(x)
441 - children[x] -> set(markers on precursors edges of children(x)
442 """
442 """
443
443
444 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
444 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
445 # prec: nodeid, precursor changesets
445 # prec: nodeid, precursor changesets
446 # succs: tuple of nodeid, successor changesets (0-N length)
446 # succs: tuple of nodeid, successor changesets (0-N length)
447 # flag: integer, flag field carrying modifier for the markers (see doc)
447 # flag: integer, flag field carrying modifier for the markers (see doc)
448 # meta: binary blob, encoded metadata dictionary
448 # meta: binary blob, encoded metadata dictionary
449 # date: (float, int) tuple, date of marker creation
449 # date: (float, int) tuple, date of marker creation
450 # parents: (tuple of nodeid) or None, parents of precursors
450 # parents: (tuple of nodeid) or None, parents of precursors
451 # None is used when no data has been recorded
451 # None is used when no data has been recorded
452
452
453 def __init__(self, sopener, defaultformat=_fm0version):
453 def __init__(self, sopener, defaultformat=_fm1version):
454 # caches for various obsolescence related cache
454 # caches for various obsolescence related cache
455 self.caches = {}
455 self.caches = {}
456 self._all = []
456 self._all = []
457 self.precursors = {}
457 self.precursors = {}
458 self.successors = {}
458 self.successors = {}
459 self.children = {}
459 self.children = {}
460 self.sopener = sopener
460 self.sopener = sopener
461 data = sopener.tryread('obsstore')
461 data = sopener.tryread('obsstore')
462 self._version = defaultformat
462 self._version = defaultformat
463 if data:
463 if data:
464 self._version, markers = _readmarkers(data)
464 self._version, markers = _readmarkers(data)
465 self._load(markers)
465 self._load(markers)
466
466
467 def __iter__(self):
467 def __iter__(self):
468 return iter(self._all)
468 return iter(self._all)
469
469
470 def __len__(self):
470 def __len__(self):
471 return len(self._all)
471 return len(self._all)
472
472
473 def __nonzero__(self):
473 def __nonzero__(self):
474 return bool(self._all)
474 return bool(self._all)
475
475
476 def create(self, transaction, prec, succs=(), flag=0, parents=None,
476 def create(self, transaction, prec, succs=(), flag=0, parents=None,
477 date=None, metadata=None):
477 date=None, metadata=None):
478 """obsolete: add a new obsolete marker
478 """obsolete: add a new obsolete marker
479
479
480 * ensuring it is hashable
480 * ensuring it is hashable
481 * check mandatory metadata
481 * check mandatory metadata
482 * encode metadata
482 * encode metadata
483
483
484 If you are a human writing code creating marker you want to use the
484 If you are a human writing code creating marker you want to use the
485 `createmarkers` function in this module instead.
485 `createmarkers` function in this module instead.
486
486
487 return True if a new marker have been added, False if the markers
487 return True if a new marker have been added, False if the markers
488 already existed (no op).
488 already existed (no op).
489 """
489 """
490 if metadata is None:
490 if metadata is None:
491 metadata = {}
491 metadata = {}
492 if date is None:
492 if date is None:
493 if 'date' in metadata:
493 if 'date' in metadata:
494 # as a courtesy for out-of-tree extensions
494 # as a courtesy for out-of-tree extensions
495 date = util.parsedate(metadata.pop('date'))
495 date = util.parsedate(metadata.pop('date'))
496 else:
496 else:
497 date = util.makedate()
497 date = util.makedate()
498 if len(prec) != 20:
498 if len(prec) != 20:
499 raise ValueError(prec)
499 raise ValueError(prec)
500 for succ in succs:
500 for succ in succs:
501 if len(succ) != 20:
501 if len(succ) != 20:
502 raise ValueError(succ)
502 raise ValueError(succ)
503 if prec in succs:
503 if prec in succs:
504 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
504 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
505
505
506 metadata = tuple(sorted(metadata.iteritems()))
506 metadata = tuple(sorted(metadata.iteritems()))
507
507
508 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
508 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
509 return bool(self.add(transaction, [marker]))
509 return bool(self.add(transaction, [marker]))
510
510
511 def add(self, transaction, markers):
511 def add(self, transaction, markers):
512 """Add new markers to the store
512 """Add new markers to the store
513
513
514 Take care of filtering duplicate.
514 Take care of filtering duplicate.
515 Return the number of new marker."""
515 Return the number of new marker."""
516 if not _enabled:
516 if not _enabled:
517 raise util.Abort('obsolete feature is not enabled on this repo')
517 raise util.Abort('obsolete feature is not enabled on this repo')
518 known = set(self._all)
518 known = set(self._all)
519 new = []
519 new = []
520 for m in markers:
520 for m in markers:
521 if m not in known:
521 if m not in known:
522 known.add(m)
522 known.add(m)
523 new.append(m)
523 new.append(m)
524 if new:
524 if new:
525 f = self.sopener('obsstore', 'ab')
525 f = self.sopener('obsstore', 'ab')
526 try:
526 try:
527 # Whether the file's current position is at the begin or at
527 # Whether the file's current position is at the begin or at
528 # the end after opening a file for appending is implementation
528 # the end after opening a file for appending is implementation
529 # defined. So we must seek to the end before calling tell(),
529 # defined. So we must seek to the end before calling tell(),
530 # or we may get a zero offset for non-zero sized files on
530 # or we may get a zero offset for non-zero sized files on
531 # some platforms (issue3543).
531 # some platforms (issue3543).
532 f.seek(0, _SEEK_END)
532 f.seek(0, _SEEK_END)
533 offset = f.tell()
533 offset = f.tell()
534 transaction.add('obsstore', offset)
534 transaction.add('obsstore', offset)
535 # offset == 0: new file - add the version header
535 # offset == 0: new file - add the version header
536 for bytes in encodemarkers(new, offset == 0, self._version):
536 for bytes in encodemarkers(new, offset == 0, self._version):
537 f.write(bytes)
537 f.write(bytes)
538 finally:
538 finally:
539 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
539 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
540 # call 'filecacheentry.refresh()' here
540 # call 'filecacheentry.refresh()' here
541 f.close()
541 f.close()
542 self._load(new)
542 self._load(new)
543 # new marker *may* have changed several set. invalidate the cache.
543 # new marker *may* have changed several set. invalidate the cache.
544 self.caches.clear()
544 self.caches.clear()
545 # records the number of new markers for the transaction hooks
545 # records the number of new markers for the transaction hooks
546 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
546 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
547 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
547 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
548 return len(new)
548 return len(new)
549
549
550 def mergemarkers(self, transaction, data):
550 def mergemarkers(self, transaction, data):
551 """merge a binary stream of markers inside the obsstore
551 """merge a binary stream of markers inside the obsstore
552
552
553 Returns the number of new markers added."""
553 Returns the number of new markers added."""
554 version, markers = _readmarkers(data)
554 version, markers = _readmarkers(data)
555 return self.add(transaction, markers)
555 return self.add(transaction, markers)
556
556
557 def _load(self, markers):
557 def _load(self, markers):
558 for mark in markers:
558 for mark in markers:
559 self._all.append(mark)
559 self._all.append(mark)
560 pre, sucs = mark[:2]
560 pre, sucs = mark[:2]
561 self.successors.setdefault(pre, set()).add(mark)
561 self.successors.setdefault(pre, set()).add(mark)
562 for suc in sucs:
562 for suc in sucs:
563 self.precursors.setdefault(suc, set()).add(mark)
563 self.precursors.setdefault(suc, set()).add(mark)
564 parents = mark[5]
564 parents = mark[5]
565 if parents is not None:
565 if parents is not None:
566 for p in parents:
566 for p in parents:
567 self.children.setdefault(p, set()).add(mark)
567 self.children.setdefault(p, set()).add(mark)
568 if node.nullid in self.precursors:
568 if node.nullid in self.precursors:
569 raise util.Abort(_('bad obsolescence marker detected: '
569 raise util.Abort(_('bad obsolescence marker detected: '
570 'invalid successors nullid'))
570 'invalid successors nullid'))
571 def relevantmarkers(self, nodes):
571 def relevantmarkers(self, nodes):
572 """return a set of all obsolescence markers relevant to a set of nodes.
572 """return a set of all obsolescence markers relevant to a set of nodes.
573
573
574 "relevant" to a set of nodes mean:
574 "relevant" to a set of nodes mean:
575
575
576 - marker that use this changeset as successor
576 - marker that use this changeset as successor
577 - prune marker of direct children on this changeset
577 - prune marker of direct children on this changeset
578 - recursive application of the two rules on precursors of these markers
578 - recursive application of the two rules on precursors of these markers
579
579
580 It is a set so you cannot rely on order."""
580 It is a set so you cannot rely on order."""
581
581
582 pendingnodes = set(nodes)
582 pendingnodes = set(nodes)
583 seenmarkers = set()
583 seenmarkers = set()
584 seennodes = set(pendingnodes)
584 seennodes = set(pendingnodes)
585 precursorsmarkers = self.precursors
585 precursorsmarkers = self.precursors
586 children = self.children
586 children = self.children
587 while pendingnodes:
587 while pendingnodes:
588 direct = set()
588 direct = set()
589 for current in pendingnodes:
589 for current in pendingnodes:
590 direct.update(precursorsmarkers.get(current, ()))
590 direct.update(precursorsmarkers.get(current, ()))
591 pruned = [m for m in children.get(current, ()) if not m[1]]
591 pruned = [m for m in children.get(current, ()) if not m[1]]
592 direct.update(pruned)
592 direct.update(pruned)
593 direct -= seenmarkers
593 direct -= seenmarkers
594 pendingnodes = set([m[0] for m in direct])
594 pendingnodes = set([m[0] for m in direct])
595 seenmarkers |= direct
595 seenmarkers |= direct
596 pendingnodes -= seennodes
596 pendingnodes -= seennodes
597 seennodes |= pendingnodes
597 seennodes |= pendingnodes
598 return seenmarkers
598 return seenmarkers
599
599
600 def commonversion(versions):
600 def commonversion(versions):
601 """Return the newest version listed in both versions and our local formats.
601 """Return the newest version listed in both versions and our local formats.
602
602
603 Returns None if no common version exists.
603 Returns None if no common version exists.
604 """
604 """
605 versions.sort(reverse=True)
605 versions.sort(reverse=True)
606 # search for highest version known on both side
606 # search for highest version known on both side
607 for v in versions:
607 for v in versions:
608 if v in formats:
608 if v in formats:
609 return v
609 return v
610 return None
610 return None
611
611
612 # arbitrary picked to fit into 8K limit from HTTP server
612 # arbitrary picked to fit into 8K limit from HTTP server
613 # you have to take in account:
613 # you have to take in account:
614 # - the version header
614 # - the version header
615 # - the base85 encoding
615 # - the base85 encoding
616 _maxpayload = 5300
616 _maxpayload = 5300
617
617
618 def _pushkeyescape(markers):
618 def _pushkeyescape(markers):
619 """encode markers into a dict suitable for pushkey exchange
619 """encode markers into a dict suitable for pushkey exchange
620
620
621 - binary data is base85 encoded
621 - binary data is base85 encoded
622 - split in chunks smaller than 5300 bytes"""
622 - split in chunks smaller than 5300 bytes"""
623 keys = {}
623 keys = {}
624 parts = []
624 parts = []
625 currentlen = _maxpayload * 2 # ensure we create a new part
625 currentlen = _maxpayload * 2 # ensure we create a new part
626 for marker in markers:
626 for marker in markers:
627 nextdata = _fm0encodeonemarker(marker)
627 nextdata = _fm0encodeonemarker(marker)
628 if (len(nextdata) + currentlen > _maxpayload):
628 if (len(nextdata) + currentlen > _maxpayload):
629 currentpart = []
629 currentpart = []
630 currentlen = 0
630 currentlen = 0
631 parts.append(currentpart)
631 parts.append(currentpart)
632 currentpart.append(nextdata)
632 currentpart.append(nextdata)
633 currentlen += len(nextdata)
633 currentlen += len(nextdata)
634 for idx, part in enumerate(reversed(parts)):
634 for idx, part in enumerate(reversed(parts)):
635 data = ''.join([_pack('>B', _fm0version)] + part)
635 data = ''.join([_pack('>B', _fm0version)] + part)
636 keys['dump%i' % idx] = base85.b85encode(data)
636 keys['dump%i' % idx] = base85.b85encode(data)
637 return keys
637 return keys
638
638
639 def listmarkers(repo):
639 def listmarkers(repo):
640 """List markers over pushkey"""
640 """List markers over pushkey"""
641 if not repo.obsstore:
641 if not repo.obsstore:
642 return {}
642 return {}
643 return _pushkeyescape(repo.obsstore)
643 return _pushkeyescape(repo.obsstore)
644
644
645 def pushmarker(repo, key, old, new):
645 def pushmarker(repo, key, old, new):
646 """Push markers over pushkey"""
646 """Push markers over pushkey"""
647 if not key.startswith('dump'):
647 if not key.startswith('dump'):
648 repo.ui.warn(_('unknown key: %r') % key)
648 repo.ui.warn(_('unknown key: %r') % key)
649 return 0
649 return 0
650 if old:
650 if old:
651 repo.ui.warn(_('unexpected old value for %r') % key)
651 repo.ui.warn(_('unexpected old value for %r') % key)
652 return 0
652 return 0
653 data = base85.b85decode(new)
653 data = base85.b85decode(new)
654 lock = repo.lock()
654 lock = repo.lock()
655 try:
655 try:
656 tr = repo.transaction('pushkey: obsolete markers')
656 tr = repo.transaction('pushkey: obsolete markers')
657 try:
657 try:
658 repo.obsstore.mergemarkers(tr, data)
658 repo.obsstore.mergemarkers(tr, data)
659 tr.close()
659 tr.close()
660 return 1
660 return 1
661 finally:
661 finally:
662 tr.release()
662 tr.release()
663 finally:
663 finally:
664 lock.release()
664 lock.release()
665
665
666 def getmarkers(repo, nodes=None):
666 def getmarkers(repo, nodes=None):
667 """returns markers known in a repository
667 """returns markers known in a repository
668
668
669 If <nodes> is specified, only markers "relevant" to those nodes are are
669 If <nodes> is specified, only markers "relevant" to those nodes are are
670 returned"""
670 returned"""
671 if nodes is None:
671 if nodes is None:
672 rawmarkers = repo.obsstore
672 rawmarkers = repo.obsstore
673 else:
673 else:
674 rawmarkers = repo.obsstore.relevantmarkers(nodes)
674 rawmarkers = repo.obsstore.relevantmarkers(nodes)
675
675
676 for markerdata in rawmarkers:
676 for markerdata in rawmarkers:
677 yield marker(repo, markerdata)
677 yield marker(repo, markerdata)
678
678
679 def relevantmarkers(repo, node):
679 def relevantmarkers(repo, node):
680 """all obsolete markers relevant to some revision"""
680 """all obsolete markers relevant to some revision"""
681 for markerdata in repo.obsstore.relevantmarkers(node):
681 for markerdata in repo.obsstore.relevantmarkers(node):
682 yield marker(repo, markerdata)
682 yield marker(repo, markerdata)
683
683
684
684
685 def precursormarkers(ctx):
685 def precursormarkers(ctx):
686 """obsolete marker marking this changeset as a successors"""
686 """obsolete marker marking this changeset as a successors"""
687 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
687 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
688 yield marker(ctx._repo, data)
688 yield marker(ctx._repo, data)
689
689
690 def successormarkers(ctx):
690 def successormarkers(ctx):
691 """obsolete marker making this changeset obsolete"""
691 """obsolete marker making this changeset obsolete"""
692 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
692 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
693 yield marker(ctx._repo, data)
693 yield marker(ctx._repo, data)
694
694
695 def allsuccessors(obsstore, nodes, ignoreflags=0):
695 def allsuccessors(obsstore, nodes, ignoreflags=0):
696 """Yield node for every successor of <nodes>.
696 """Yield node for every successor of <nodes>.
697
697
698 Some successors may be unknown locally.
698 Some successors may be unknown locally.
699
699
700 This is a linear yield unsuited to detecting split changesets. It includes
700 This is a linear yield unsuited to detecting split changesets. It includes
701 initial nodes too."""
701 initial nodes too."""
702 remaining = set(nodes)
702 remaining = set(nodes)
703 seen = set(remaining)
703 seen = set(remaining)
704 while remaining:
704 while remaining:
705 current = remaining.pop()
705 current = remaining.pop()
706 yield current
706 yield current
707 for mark in obsstore.successors.get(current, ()):
707 for mark in obsstore.successors.get(current, ()):
708 # ignore marker flagged with specified flag
708 # ignore marker flagged with specified flag
709 if mark[2] & ignoreflags:
709 if mark[2] & ignoreflags:
710 continue
710 continue
711 for suc in mark[1]:
711 for suc in mark[1]:
712 if suc not in seen:
712 if suc not in seen:
713 seen.add(suc)
713 seen.add(suc)
714 remaining.add(suc)
714 remaining.add(suc)
715
715
716 def allprecursors(obsstore, nodes, ignoreflags=0):
716 def allprecursors(obsstore, nodes, ignoreflags=0):
717 """Yield node for every precursors of <nodes>.
717 """Yield node for every precursors of <nodes>.
718
718
719 Some precursors may be unknown locally.
719 Some precursors may be unknown locally.
720
720
721 This is a linear yield unsuited to detecting folded changesets. It includes
721 This is a linear yield unsuited to detecting folded changesets. It includes
722 initial nodes too."""
722 initial nodes too."""
723
723
724 remaining = set(nodes)
724 remaining = set(nodes)
725 seen = set(remaining)
725 seen = set(remaining)
726 while remaining:
726 while remaining:
727 current = remaining.pop()
727 current = remaining.pop()
728 yield current
728 yield current
729 for mark in obsstore.precursors.get(current, ()):
729 for mark in obsstore.precursors.get(current, ()):
730 # ignore marker flagged with specified flag
730 # ignore marker flagged with specified flag
731 if mark[2] & ignoreflags:
731 if mark[2] & ignoreflags:
732 continue
732 continue
733 suc = mark[0]
733 suc = mark[0]
734 if suc not in seen:
734 if suc not in seen:
735 seen.add(suc)
735 seen.add(suc)
736 remaining.add(suc)
736 remaining.add(suc)
737
737
738 def foreground(repo, nodes):
738 def foreground(repo, nodes):
739 """return all nodes in the "foreground" of other node
739 """return all nodes in the "foreground" of other node
740
740
741 The foreground of a revision is anything reachable using parent -> children
741 The foreground of a revision is anything reachable using parent -> children
742 or precursor -> successor relation. It is very similar to "descendant" but
742 or precursor -> successor relation. It is very similar to "descendant" but
743 augmented with obsolescence information.
743 augmented with obsolescence information.
744
744
745 Beware that possible obsolescence cycle may result if complex situation.
745 Beware that possible obsolescence cycle may result if complex situation.
746 """
746 """
747 repo = repo.unfiltered()
747 repo = repo.unfiltered()
748 foreground = set(repo.set('%ln::', nodes))
748 foreground = set(repo.set('%ln::', nodes))
749 if repo.obsstore:
749 if repo.obsstore:
750 # We only need this complicated logic if there is obsolescence
750 # We only need this complicated logic if there is obsolescence
751 # XXX will probably deserve an optimised revset.
751 # XXX will probably deserve an optimised revset.
752 nm = repo.changelog.nodemap
752 nm = repo.changelog.nodemap
753 plen = -1
753 plen = -1
754 # compute the whole set of successors or descendants
754 # compute the whole set of successors or descendants
755 while len(foreground) != plen:
755 while len(foreground) != plen:
756 plen = len(foreground)
756 plen = len(foreground)
757 succs = set(c.node() for c in foreground)
757 succs = set(c.node() for c in foreground)
758 mutable = [c.node() for c in foreground if c.mutable()]
758 mutable = [c.node() for c in foreground if c.mutable()]
759 succs.update(allsuccessors(repo.obsstore, mutable))
759 succs.update(allsuccessors(repo.obsstore, mutable))
760 known = (n for n in succs if n in nm)
760 known = (n for n in succs if n in nm)
761 foreground = set(repo.set('%ln::', known))
761 foreground = set(repo.set('%ln::', known))
762 return set(c.node() for c in foreground)
762 return set(c.node() for c in foreground)
763
763
764
764
765 def successorssets(repo, initialnode, cache=None):
765 def successorssets(repo, initialnode, cache=None):
766 """Return all set of successors of initial nodes
766 """Return all set of successors of initial nodes
767
767
768 The successors set of a changeset A are a group of revisions that succeed
768 The successors set of a changeset A are a group of revisions that succeed
769 A. It succeeds A as a consistent whole, each revision being only a partial
769 A. It succeeds A as a consistent whole, each revision being only a partial
770 replacement. The successors set contains non-obsolete changesets only.
770 replacement. The successors set contains non-obsolete changesets only.
771
771
772 This function returns the full list of successor sets which is why it
772 This function returns the full list of successor sets which is why it
773 returns a list of tuples and not just a single tuple. Each tuple is a valid
773 returns a list of tuples and not just a single tuple. Each tuple is a valid
774 successors set. Not that (A,) may be a valid successors set for changeset A
774 successors set. Not that (A,) may be a valid successors set for changeset A
775 (see below).
775 (see below).
776
776
777 In most cases, a changeset A will have a single element (e.g. the changeset
777 In most cases, a changeset A will have a single element (e.g. the changeset
778 A is replaced by A') in its successors set. Though, it is also common for a
778 A is replaced by A') in its successors set. Though, it is also common for a
779 changeset A to have no elements in its successor set (e.g. the changeset
779 changeset A to have no elements in its successor set (e.g. the changeset
780 has been pruned). Therefore, the returned list of successors sets will be
780 has been pruned). Therefore, the returned list of successors sets will be
781 [(A',)] or [], respectively.
781 [(A',)] or [], respectively.
782
782
783 When a changeset A is split into A' and B', however, it will result in a
783 When a changeset A is split into A' and B', however, it will result in a
784 successors set containing more than a single element, i.e. [(A',B')].
784 successors set containing more than a single element, i.e. [(A',B')].
785 Divergent changesets will result in multiple successors sets, i.e. [(A',),
785 Divergent changesets will result in multiple successors sets, i.e. [(A',),
786 (A'')].
786 (A'')].
787
787
788 If a changeset A is not obsolete, then it will conceptually have no
788 If a changeset A is not obsolete, then it will conceptually have no
789 successors set. To distinguish this from a pruned changeset, the successor
789 successors set. To distinguish this from a pruned changeset, the successor
790 set will only contain itself, i.e. [(A,)].
790 set will only contain itself, i.e. [(A,)].
791
791
792 Finally, successors unknown locally are considered to be pruned (obsoleted
792 Finally, successors unknown locally are considered to be pruned (obsoleted
793 without any successors).
793 without any successors).
794
794
795 The optional `cache` parameter is a dictionary that may contain precomputed
795 The optional `cache` parameter is a dictionary that may contain precomputed
796 successors sets. It is meant to reuse the computation of a previous call to
796 successors sets. It is meant to reuse the computation of a previous call to
797 `successorssets` when multiple calls are made at the same time. The cache
797 `successorssets` when multiple calls are made at the same time. The cache
798 dictionary is updated in place. The caller is responsible for its live
798 dictionary is updated in place. The caller is responsible for its live
799 spawn. Code that makes multiple calls to `successorssets` *must* use this
799 spawn. Code that makes multiple calls to `successorssets` *must* use this
800 cache mechanism or suffer terrible performances.
800 cache mechanism or suffer terrible performances.
801
801
802 """
802 """
803
803
804 succmarkers = repo.obsstore.successors
804 succmarkers = repo.obsstore.successors
805
805
806 # Stack of nodes we search successors sets for
806 # Stack of nodes we search successors sets for
807 toproceed = [initialnode]
807 toproceed = [initialnode]
808 # set version of above list for fast loop detection
808 # set version of above list for fast loop detection
809 # element added to "toproceed" must be added here
809 # element added to "toproceed" must be added here
810 stackedset = set(toproceed)
810 stackedset = set(toproceed)
811 if cache is None:
811 if cache is None:
812 cache = {}
812 cache = {}
813
813
814 # This while loop is the flattened version of a recursive search for
814 # This while loop is the flattened version of a recursive search for
815 # successors sets
815 # successors sets
816 #
816 #
817 # def successorssets(x):
817 # def successorssets(x):
818 # successors = directsuccessors(x)
818 # successors = directsuccessors(x)
819 # ss = [[]]
819 # ss = [[]]
820 # for succ in directsuccessors(x):
820 # for succ in directsuccessors(x):
821 # # product as in itertools cartesian product
821 # # product as in itertools cartesian product
822 # ss = product(ss, successorssets(succ))
822 # ss = product(ss, successorssets(succ))
823 # return ss
823 # return ss
824 #
824 #
825 # But we can not use plain recursive calls here:
825 # But we can not use plain recursive calls here:
826 # - that would blow the python call stack
826 # - that would blow the python call stack
827 # - obsolescence markers may have cycles, we need to handle them.
827 # - obsolescence markers may have cycles, we need to handle them.
828 #
828 #
829 # The `toproceed` list act as our call stack. Every node we search
829 # The `toproceed` list act as our call stack. Every node we search
830 # successors set for are stacked there.
830 # successors set for are stacked there.
831 #
831 #
832 # The `stackedset` is set version of this stack used to check if a node is
832 # The `stackedset` is set version of this stack used to check if a node is
833 # already stacked. This check is used to detect cycles and prevent infinite
833 # already stacked. This check is used to detect cycles and prevent infinite
834 # loop.
834 # loop.
835 #
835 #
836 # successors set of all nodes are stored in the `cache` dictionary.
836 # successors set of all nodes are stored in the `cache` dictionary.
837 #
837 #
838 # After this while loop ends we use the cache to return the successors sets
838 # After this while loop ends we use the cache to return the successors sets
839 # for the node requested by the caller.
839 # for the node requested by the caller.
840 while toproceed:
840 while toproceed:
841 # Every iteration tries to compute the successors sets of the topmost
841 # Every iteration tries to compute the successors sets of the topmost
842 # node of the stack: CURRENT.
842 # node of the stack: CURRENT.
843 #
843 #
844 # There are four possible outcomes:
844 # There are four possible outcomes:
845 #
845 #
846 # 1) We already know the successors sets of CURRENT:
846 # 1) We already know the successors sets of CURRENT:
847 # -> mission accomplished, pop it from the stack.
847 # -> mission accomplished, pop it from the stack.
848 # 2) Node is not obsolete:
848 # 2) Node is not obsolete:
849 # -> the node is its own successors sets. Add it to the cache.
849 # -> the node is its own successors sets. Add it to the cache.
850 # 3) We do not know successors set of direct successors of CURRENT:
850 # 3) We do not know successors set of direct successors of CURRENT:
851 # -> We add those successors to the stack.
851 # -> We add those successors to the stack.
852 # 4) We know successors sets of all direct successors of CURRENT:
852 # 4) We know successors sets of all direct successors of CURRENT:
853 # -> We can compute CURRENT successors set and add it to the
853 # -> We can compute CURRENT successors set and add it to the
854 # cache.
854 # cache.
855 #
855 #
856 current = toproceed[-1]
856 current = toproceed[-1]
857 if current in cache:
857 if current in cache:
858 # case (1): We already know the successors sets
858 # case (1): We already know the successors sets
859 stackedset.remove(toproceed.pop())
859 stackedset.remove(toproceed.pop())
860 elif current not in succmarkers:
860 elif current not in succmarkers:
861 # case (2): The node is not obsolete.
861 # case (2): The node is not obsolete.
862 if current in repo:
862 if current in repo:
863 # We have a valid last successors.
863 # We have a valid last successors.
864 cache[current] = [(current,)]
864 cache[current] = [(current,)]
865 else:
865 else:
866 # Final obsolete version is unknown locally.
866 # Final obsolete version is unknown locally.
867 # Do not count that as a valid successors
867 # Do not count that as a valid successors
868 cache[current] = []
868 cache[current] = []
869 else:
869 else:
870 # cases (3) and (4)
870 # cases (3) and (4)
871 #
871 #
872 # We proceed in two phases. Phase 1 aims to distinguish case (3)
872 # We proceed in two phases. Phase 1 aims to distinguish case (3)
873 # from case (4):
873 # from case (4):
874 #
874 #
875 # For each direct successors of CURRENT, we check whether its
875 # For each direct successors of CURRENT, we check whether its
876 # successors sets are known. If they are not, we stack the
876 # successors sets are known. If they are not, we stack the
877 # unknown node and proceed to the next iteration of the while
877 # unknown node and proceed to the next iteration of the while
878 # loop. (case 3)
878 # loop. (case 3)
879 #
879 #
880 # During this step, we may detect obsolescence cycles: a node
880 # During this step, we may detect obsolescence cycles: a node
881 # with unknown successors sets but already in the call stack.
881 # with unknown successors sets but already in the call stack.
882 # In such a situation, we arbitrary set the successors sets of
882 # In such a situation, we arbitrary set the successors sets of
883 # the node to nothing (node pruned) to break the cycle.
883 # the node to nothing (node pruned) to break the cycle.
884 #
884 #
885 # If no break was encountered we proceed to phase 2.
885 # If no break was encountered we proceed to phase 2.
886 #
886 #
887 # Phase 2 computes successors sets of CURRENT (case 4); see details
887 # Phase 2 computes successors sets of CURRENT (case 4); see details
888 # in phase 2 itself.
888 # in phase 2 itself.
889 #
889 #
890 # Note the two levels of iteration in each phase.
890 # Note the two levels of iteration in each phase.
891 # - The first one handles obsolescence markers using CURRENT as
891 # - The first one handles obsolescence markers using CURRENT as
892 # precursor (successors markers of CURRENT).
892 # precursor (successors markers of CURRENT).
893 #
893 #
894 # Having multiple entry here means divergence.
894 # Having multiple entry here means divergence.
895 #
895 #
896 # - The second one handles successors defined in each marker.
896 # - The second one handles successors defined in each marker.
897 #
897 #
898 # Having none means pruned node, multiple successors means split,
898 # Having none means pruned node, multiple successors means split,
899 # single successors are standard replacement.
899 # single successors are standard replacement.
900 #
900 #
901 for mark in sorted(succmarkers[current]):
901 for mark in sorted(succmarkers[current]):
902 for suc in mark[1]:
902 for suc in mark[1]:
903 if suc not in cache:
903 if suc not in cache:
904 if suc in stackedset:
904 if suc in stackedset:
905 # cycle breaking
905 # cycle breaking
906 cache[suc] = []
906 cache[suc] = []
907 else:
907 else:
908 # case (3) If we have not computed successors sets
908 # case (3) If we have not computed successors sets
909 # of one of those successors we add it to the
909 # of one of those successors we add it to the
910 # `toproceed` stack and stop all work for this
910 # `toproceed` stack and stop all work for this
911 # iteration.
911 # iteration.
912 toproceed.append(suc)
912 toproceed.append(suc)
913 stackedset.add(suc)
913 stackedset.add(suc)
914 break
914 break
915 else:
915 else:
916 continue
916 continue
917 break
917 break
918 else:
918 else:
919 # case (4): we know all successors sets of all direct
919 # case (4): we know all successors sets of all direct
920 # successors
920 # successors
921 #
921 #
922 # Successors set contributed by each marker depends on the
922 # Successors set contributed by each marker depends on the
923 # successors sets of all its "successors" node.
923 # successors sets of all its "successors" node.
924 #
924 #
925 # Each different marker is a divergence in the obsolescence
925 # Each different marker is a divergence in the obsolescence
926 # history. It contributes successors sets distinct from other
926 # history. It contributes successors sets distinct from other
927 # markers.
927 # markers.
928 #
928 #
929 # Within a marker, a successor may have divergent successors
929 # Within a marker, a successor may have divergent successors
930 # sets. In such a case, the marker will contribute multiple
930 # sets. In such a case, the marker will contribute multiple
931 # divergent successors sets. If multiple successors have
931 # divergent successors sets. If multiple successors have
932 # divergent successors sets, a Cartesian product is used.
932 # divergent successors sets, a Cartesian product is used.
933 #
933 #
934 # At the end we post-process successors sets to remove
934 # At the end we post-process successors sets to remove
935 # duplicated entry and successors set that are strict subset of
935 # duplicated entry and successors set that are strict subset of
936 # another one.
936 # another one.
937 succssets = []
937 succssets = []
938 for mark in sorted(succmarkers[current]):
938 for mark in sorted(succmarkers[current]):
939 # successors sets contributed by this marker
939 # successors sets contributed by this marker
940 markss = [[]]
940 markss = [[]]
941 for suc in mark[1]:
941 for suc in mark[1]:
942 # cardinal product with previous successors
942 # cardinal product with previous successors
943 productresult = []
943 productresult = []
944 for prefix in markss:
944 for prefix in markss:
945 for suffix in cache[suc]:
945 for suffix in cache[suc]:
946 newss = list(prefix)
946 newss = list(prefix)
947 for part in suffix:
947 for part in suffix:
948 # do not duplicated entry in successors set
948 # do not duplicated entry in successors set
949 # first entry wins.
949 # first entry wins.
950 if part not in newss:
950 if part not in newss:
951 newss.append(part)
951 newss.append(part)
952 productresult.append(newss)
952 productresult.append(newss)
953 markss = productresult
953 markss = productresult
954 succssets.extend(markss)
954 succssets.extend(markss)
955 # remove duplicated and subset
955 # remove duplicated and subset
956 seen = []
956 seen = []
957 final = []
957 final = []
958 candidate = sorted(((set(s), s) for s in succssets if s),
958 candidate = sorted(((set(s), s) for s in succssets if s),
959 key=lambda x: len(x[1]), reverse=True)
959 key=lambda x: len(x[1]), reverse=True)
960 for setversion, listversion in candidate:
960 for setversion, listversion in candidate:
961 for seenset in seen:
961 for seenset in seen:
962 if setversion.issubset(seenset):
962 if setversion.issubset(seenset):
963 break
963 break
964 else:
964 else:
965 final.append(listversion)
965 final.append(listversion)
966 seen.append(setversion)
966 seen.append(setversion)
967 final.reverse() # put small successors set first
967 final.reverse() # put small successors set first
968 cache[current] = final
968 cache[current] = final
969 return cache[initialnode]
969 return cache[initialnode]
970
970
971 def _knownrevs(repo, nodes):
971 def _knownrevs(repo, nodes):
972 """yield revision numbers of known nodes passed in parameters
972 """yield revision numbers of known nodes passed in parameters
973
973
974 Unknown revisions are silently ignored."""
974 Unknown revisions are silently ignored."""
975 torev = repo.changelog.nodemap.get
975 torev = repo.changelog.nodemap.get
976 for n in nodes:
976 for n in nodes:
977 rev = torev(n)
977 rev = torev(n)
978 if rev is not None:
978 if rev is not None:
979 yield rev
979 yield rev
980
980
981 # mapping of 'set-name' -> <function to compute this set>
981 # mapping of 'set-name' -> <function to compute this set>
982 cachefuncs = {}
982 cachefuncs = {}
983 def cachefor(name):
983 def cachefor(name):
984 """Decorator to register a function as computing the cache for a set"""
984 """Decorator to register a function as computing the cache for a set"""
985 def decorator(func):
985 def decorator(func):
986 assert name not in cachefuncs
986 assert name not in cachefuncs
987 cachefuncs[name] = func
987 cachefuncs[name] = func
988 return func
988 return func
989 return decorator
989 return decorator
990
990
991 def getrevs(repo, name):
991 def getrevs(repo, name):
992 """Return the set of revision that belong to the <name> set
992 """Return the set of revision that belong to the <name> set
993
993
994 Such access may compute the set and cache it for future use"""
994 Such access may compute the set and cache it for future use"""
995 repo = repo.unfiltered()
995 repo = repo.unfiltered()
996 if not repo.obsstore:
996 if not repo.obsstore:
997 return frozenset()
997 return frozenset()
998 if name not in repo.obsstore.caches:
998 if name not in repo.obsstore.caches:
999 repo.obsstore.caches[name] = cachefuncs[name](repo)
999 repo.obsstore.caches[name] = cachefuncs[name](repo)
1000 return repo.obsstore.caches[name]
1000 return repo.obsstore.caches[name]
1001
1001
1002 # To be simple we need to invalidate obsolescence cache when:
1002 # To be simple we need to invalidate obsolescence cache when:
1003 #
1003 #
1004 # - new changeset is added:
1004 # - new changeset is added:
1005 # - public phase is changed
1005 # - public phase is changed
1006 # - obsolescence marker are added
1006 # - obsolescence marker are added
1007 # - strip is used a repo
1007 # - strip is used a repo
1008 def clearobscaches(repo):
1008 def clearobscaches(repo):
1009 """Remove all obsolescence related cache from a repo
1009 """Remove all obsolescence related cache from a repo
1010
1010
1011 This remove all cache in obsstore is the obsstore already exist on the
1011 This remove all cache in obsstore is the obsstore already exist on the
1012 repo.
1012 repo.
1013
1013
1014 (We could be smarter here given the exact event that trigger the cache
1014 (We could be smarter here given the exact event that trigger the cache
1015 clearing)"""
1015 clearing)"""
1016 # only clear cache is there is obsstore data in this repo
1016 # only clear cache is there is obsstore data in this repo
1017 if 'obsstore' in repo._filecache:
1017 if 'obsstore' in repo._filecache:
1018 repo.obsstore.caches.clear()
1018 repo.obsstore.caches.clear()
1019
1019
1020 @cachefor('obsolete')
1020 @cachefor('obsolete')
1021 def _computeobsoleteset(repo):
1021 def _computeobsoleteset(repo):
1022 """the set of obsolete revisions"""
1022 """the set of obsolete revisions"""
1023 obs = set()
1023 obs = set()
1024 getrev = repo.changelog.nodemap.get
1024 getrev = repo.changelog.nodemap.get
1025 getphase = repo._phasecache.phase
1025 getphase = repo._phasecache.phase
1026 for n in repo.obsstore.successors:
1026 for n in repo.obsstore.successors:
1027 rev = getrev(n)
1027 rev = getrev(n)
1028 if rev is not None and getphase(repo, rev):
1028 if rev is not None and getphase(repo, rev):
1029 obs.add(rev)
1029 obs.add(rev)
1030 return obs
1030 return obs
1031
1031
1032 @cachefor('unstable')
1032 @cachefor('unstable')
1033 def _computeunstableset(repo):
1033 def _computeunstableset(repo):
1034 """the set of non obsolete revisions with obsolete parents"""
1034 """the set of non obsolete revisions with obsolete parents"""
1035 # revset is not efficient enough here
1035 # revset is not efficient enough here
1036 # we do (obsolete()::) - obsolete() by hand
1036 # we do (obsolete()::) - obsolete() by hand
1037 obs = getrevs(repo, 'obsolete')
1037 obs = getrevs(repo, 'obsolete')
1038 if not obs:
1038 if not obs:
1039 return set()
1039 return set()
1040 cl = repo.changelog
1040 cl = repo.changelog
1041 return set(r for r in cl.descendants(obs) if r not in obs)
1041 return set(r for r in cl.descendants(obs) if r not in obs)
1042
1042
1043 @cachefor('suspended')
1043 @cachefor('suspended')
1044 def _computesuspendedset(repo):
1044 def _computesuspendedset(repo):
1045 """the set of obsolete parents with non obsolete descendants"""
1045 """the set of obsolete parents with non obsolete descendants"""
1046 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1046 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1047 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1047 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1048
1048
1049 @cachefor('extinct')
1049 @cachefor('extinct')
1050 def _computeextinctset(repo):
1050 def _computeextinctset(repo):
1051 """the set of obsolete parents without non obsolete descendants"""
1051 """the set of obsolete parents without non obsolete descendants"""
1052 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1052 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1053
1053
1054
1054
1055 @cachefor('bumped')
1055 @cachefor('bumped')
1056 def _computebumpedset(repo):
1056 def _computebumpedset(repo):
1057 """the set of revs trying to obsolete public revisions"""
1057 """the set of revs trying to obsolete public revisions"""
1058 bumped = set()
1058 bumped = set()
1059 # util function (avoid attribute lookup in the loop)
1059 # util function (avoid attribute lookup in the loop)
1060 phase = repo._phasecache.phase # would be faster to grab the full list
1060 phase = repo._phasecache.phase # would be faster to grab the full list
1061 public = phases.public
1061 public = phases.public
1062 cl = repo.changelog
1062 cl = repo.changelog
1063 torev = cl.nodemap.get
1063 torev = cl.nodemap.get
1064 obs = getrevs(repo, 'obsolete')
1064 obs = getrevs(repo, 'obsolete')
1065 for rev in repo:
1065 for rev in repo:
1066 # We only evaluate mutable, non-obsolete revision
1066 # We only evaluate mutable, non-obsolete revision
1067 if (public < phase(repo, rev)) and (rev not in obs):
1067 if (public < phase(repo, rev)) and (rev not in obs):
1068 node = cl.node(rev)
1068 node = cl.node(rev)
1069 # (future) A cache of precursors may worth if split is very common
1069 # (future) A cache of precursors may worth if split is very common
1070 for pnode in allprecursors(repo.obsstore, [node],
1070 for pnode in allprecursors(repo.obsstore, [node],
1071 ignoreflags=bumpedfix):
1071 ignoreflags=bumpedfix):
1072 prev = torev(pnode) # unfiltered! but so is phasecache
1072 prev = torev(pnode) # unfiltered! but so is phasecache
1073 if (prev is not None) and (phase(repo, prev) <= public):
1073 if (prev is not None) and (phase(repo, prev) <= public):
1074 # we have a public precursors
1074 # we have a public precursors
1075 bumped.add(rev)
1075 bumped.add(rev)
1076 break # Next draft!
1076 break # Next draft!
1077 return bumped
1077 return bumped
1078
1078
1079 @cachefor('divergent')
1079 @cachefor('divergent')
1080 def _computedivergentset(repo):
1080 def _computedivergentset(repo):
1081 """the set of rev that compete to be the final successors of some revision.
1081 """the set of rev that compete to be the final successors of some revision.
1082 """
1082 """
1083 divergent = set()
1083 divergent = set()
1084 obsstore = repo.obsstore
1084 obsstore = repo.obsstore
1085 newermap = {}
1085 newermap = {}
1086 for ctx in repo.set('(not public()) - obsolete()'):
1086 for ctx in repo.set('(not public()) - obsolete()'):
1087 mark = obsstore.precursors.get(ctx.node(), ())
1087 mark = obsstore.precursors.get(ctx.node(), ())
1088 toprocess = set(mark)
1088 toprocess = set(mark)
1089 while toprocess:
1089 while toprocess:
1090 prec = toprocess.pop()[0]
1090 prec = toprocess.pop()[0]
1091 if prec not in newermap:
1091 if prec not in newermap:
1092 successorssets(repo, prec, newermap)
1092 successorssets(repo, prec, newermap)
1093 newer = [n for n in newermap[prec] if n]
1093 newer = [n for n in newermap[prec] if n]
1094 if len(newer) > 1:
1094 if len(newer) > 1:
1095 divergent.add(ctx.rev())
1095 divergent.add(ctx.rev())
1096 break
1096 break
1097 toprocess.update(obsstore.precursors.get(prec, ()))
1097 toprocess.update(obsstore.precursors.get(prec, ()))
1098 return divergent
1098 return divergent
1099
1099
1100
1100
1101 def createmarkers(repo, relations, flag=0, date=None, metadata=None):
1101 def createmarkers(repo, relations, flag=0, date=None, metadata=None):
1102 """Add obsolete markers between changesets in a repo
1102 """Add obsolete markers between changesets in a repo
1103
1103
1104 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1104 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1105 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1105 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1106 containing metadata for this marker only. It is merged with the global
1106 containing metadata for this marker only. It is merged with the global
1107 metadata specified through the `metadata` argument of this function,
1107 metadata specified through the `metadata` argument of this function,
1108
1108
1109 Trying to obsolete a public changeset will raise an exception.
1109 Trying to obsolete a public changeset will raise an exception.
1110
1110
1111 Current user and date are used except if specified otherwise in the
1111 Current user and date are used except if specified otherwise in the
1112 metadata attribute.
1112 metadata attribute.
1113
1113
1114 This function operates within a transaction of its own, but does
1114 This function operates within a transaction of its own, but does
1115 not take any lock on the repo.
1115 not take any lock on the repo.
1116 """
1116 """
1117 # prepare metadata
1117 # prepare metadata
1118 if metadata is None:
1118 if metadata is None:
1119 metadata = {}
1119 metadata = {}
1120 if 'user' not in metadata:
1120 if 'user' not in metadata:
1121 metadata['user'] = repo.ui.username()
1121 metadata['user'] = repo.ui.username()
1122 tr = repo.transaction('add-obsolescence-marker')
1122 tr = repo.transaction('add-obsolescence-marker')
1123 try:
1123 try:
1124 for rel in relations:
1124 for rel in relations:
1125 prec = rel[0]
1125 prec = rel[0]
1126 sucs = rel[1]
1126 sucs = rel[1]
1127 localmetadata = metadata.copy()
1127 localmetadata = metadata.copy()
1128 if 2 < len(rel):
1128 if 2 < len(rel):
1129 localmetadata.update(rel[2])
1129 localmetadata.update(rel[2])
1130
1130
1131 if not prec.mutable():
1131 if not prec.mutable():
1132 raise util.Abort("cannot obsolete immutable changeset: %s"
1132 raise util.Abort("cannot obsolete immutable changeset: %s"
1133 % prec)
1133 % prec)
1134 nprec = prec.node()
1134 nprec = prec.node()
1135 nsucs = tuple(s.node() for s in sucs)
1135 nsucs = tuple(s.node() for s in sucs)
1136 npare = None
1136 npare = None
1137 if not nsucs:
1137 if not nsucs:
1138 npare = tuple(p.node() for p in prec.parents())
1138 npare = tuple(p.node() for p in prec.parents())
1139 if nprec in nsucs:
1139 if nprec in nsucs:
1140 raise util.Abort("changeset %s cannot obsolete itself" % prec)
1140 raise util.Abort("changeset %s cannot obsolete itself" % prec)
1141 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1141 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1142 date=date, metadata=localmetadata)
1142 date=date, metadata=localmetadata)
1143 repo.filteredrevcache.clear()
1143 repo.filteredrevcache.clear()
1144 tr.close()
1144 tr.close()
1145 finally:
1145 finally:
1146 tr.release()
1146 tr.release()
General Comments 0
You need to be logged in to leave comments. Login now