##// END OF EJS Templates
obsolete: use ProgrammingError over assert for volatile set registration...
Pierre-Yves David -
r32884:9d472b21 default
parent child Browse files
Show More
@@ -1,1459 +1,1461 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "precursor" and possible
23 The old obsoleted changeset is called a "precursor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a precursor are called "successor markers of X" because they hold
25 a precursor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "precursor markers of Y" because they hold
27 a successors are call "precursor markers of Y" because they hold
28 information about the precursors of Y.
28 information about the precursors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker is used:
45 - When changeset A is split into B and C, a single marker is used:
46
46
47 (A, (B, C))
47 (A, (B, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 from __future__ import absolute_import
70 from __future__ import absolute_import
71
71
72 import errno
72 import errno
73 import struct
73 import struct
74
74
75 from .i18n import _
75 from .i18n import _
76 from . import (
76 from . import (
77 error,
77 error,
78 node,
78 node,
79 phases,
79 phases,
80 policy,
80 policy,
81 util,
81 util,
82 )
82 )
83
83
84 parsers = policy.importmod(r'parsers')
84 parsers = policy.importmod(r'parsers')
85
85
86 _pack = struct.pack
86 _pack = struct.pack
87 _unpack = struct.unpack
87 _unpack = struct.unpack
88 _calcsize = struct.calcsize
88 _calcsize = struct.calcsize
89 propertycache = util.propertycache
89 propertycache = util.propertycache
90
90
91 # the obsolete feature is not mature enough to be enabled by default.
91 # the obsolete feature is not mature enough to be enabled by default.
92 # you have to rely on third party extension extension to enable this.
92 # you have to rely on third party extension extension to enable this.
93 _enabled = False
93 _enabled = False
94
94
95 # Options for obsolescence
95 # Options for obsolescence
96 createmarkersopt = 'createmarkers'
96 createmarkersopt = 'createmarkers'
97 allowunstableopt = 'allowunstable'
97 allowunstableopt = 'allowunstable'
98 exchangeopt = 'exchange'
98 exchangeopt = 'exchange'
99
99
100 def isenabled(repo, option):
100 def isenabled(repo, option):
101 """Returns True if the given repository has the given obsolete option
101 """Returns True if the given repository has the given obsolete option
102 enabled.
102 enabled.
103 """
103 """
104 result = set(repo.ui.configlist('experimental', 'evolution'))
104 result = set(repo.ui.configlist('experimental', 'evolution'))
105 if 'all' in result:
105 if 'all' in result:
106 return True
106 return True
107
107
108 # For migration purposes, temporarily return true if the config hasn't been
108 # For migration purposes, temporarily return true if the config hasn't been
109 # set but _enabled is true.
109 # set but _enabled is true.
110 if len(result) == 0 and _enabled:
110 if len(result) == 0 and _enabled:
111 return True
111 return True
112
112
113 # createmarkers must be enabled if other options are enabled
113 # createmarkers must be enabled if other options are enabled
114 if ((allowunstableopt in result or exchangeopt in result) and
114 if ((allowunstableopt in result or exchangeopt in result) and
115 not createmarkersopt in result):
115 not createmarkersopt in result):
116 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
116 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
117 "if other obsolete options are enabled"))
117 "if other obsolete options are enabled"))
118
118
119 return option in result
119 return option in result
120
120
121 ### obsolescence marker flag
121 ### obsolescence marker flag
122
122
123 ## bumpedfix flag
123 ## bumpedfix flag
124 #
124 #
125 # When a changeset A' succeed to a changeset A which became public, we call A'
125 # When a changeset A' succeed to a changeset A which became public, we call A'
126 # "bumped" because it's a successors of a public changesets
126 # "bumped" because it's a successors of a public changesets
127 #
127 #
128 # o A' (bumped)
128 # o A' (bumped)
129 # |`:
129 # |`:
130 # | o A
130 # | o A
131 # |/
131 # |/
132 # o Z
132 # o Z
133 #
133 #
134 # The way to solve this situation is to create a new changeset Ad as children
134 # The way to solve this situation is to create a new changeset Ad as children
135 # of A. This changeset have the same content than A'. So the diff from A to A'
135 # of A. This changeset have the same content than A'. So the diff from A to A'
136 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
136 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
137 #
137 #
138 # o Ad
138 # o Ad
139 # |`:
139 # |`:
140 # | x A'
140 # | x A'
141 # |'|
141 # |'|
142 # o | A
142 # o | A
143 # |/
143 # |/
144 # o Z
144 # o Z
145 #
145 #
146 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
146 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
147 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
147 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
148 # This flag mean that the successors express the changes between the public and
148 # This flag mean that the successors express the changes between the public and
149 # bumped version and fix the situation, breaking the transitivity of
149 # bumped version and fix the situation, breaking the transitivity of
150 # "bumped" here.
150 # "bumped" here.
151 bumpedfix = 1
151 bumpedfix = 1
152 usingsha256 = 2
152 usingsha256 = 2
153
153
154 ## Parsing and writing of version "0"
154 ## Parsing and writing of version "0"
155 #
155 #
156 # The header is followed by the markers. Each marker is made of:
156 # The header is followed by the markers. Each marker is made of:
157 #
157 #
158 # - 1 uint8 : number of new changesets "N", can be zero.
158 # - 1 uint8 : number of new changesets "N", can be zero.
159 #
159 #
160 # - 1 uint32: metadata size "M" in bytes.
160 # - 1 uint32: metadata size "M" in bytes.
161 #
161 #
162 # - 1 byte: a bit field. It is reserved for flags used in common
162 # - 1 byte: a bit field. It is reserved for flags used in common
163 # obsolete marker operations, to avoid repeated decoding of metadata
163 # obsolete marker operations, to avoid repeated decoding of metadata
164 # entries.
164 # entries.
165 #
165 #
166 # - 20 bytes: obsoleted changeset identifier.
166 # - 20 bytes: obsoleted changeset identifier.
167 #
167 #
168 # - N*20 bytes: new changesets identifiers.
168 # - N*20 bytes: new changesets identifiers.
169 #
169 #
170 # - M bytes: metadata as a sequence of nul-terminated strings. Each
170 # - M bytes: metadata as a sequence of nul-terminated strings. Each
171 # string contains a key and a value, separated by a colon ':', without
171 # string contains a key and a value, separated by a colon ':', without
172 # additional encoding. Keys cannot contain '\0' or ':' and values
172 # additional encoding. Keys cannot contain '\0' or ':' and values
173 # cannot contain '\0'.
173 # cannot contain '\0'.
174 _fm0version = 0
174 _fm0version = 0
175 _fm0fixed = '>BIB20s'
175 _fm0fixed = '>BIB20s'
176 _fm0node = '20s'
176 _fm0node = '20s'
177 _fm0fsize = _calcsize(_fm0fixed)
177 _fm0fsize = _calcsize(_fm0fixed)
178 _fm0fnodesize = _calcsize(_fm0node)
178 _fm0fnodesize = _calcsize(_fm0node)
179
179
180 def _fm0readmarkers(data, off):
180 def _fm0readmarkers(data, off):
181 # Loop on markers
181 # Loop on markers
182 l = len(data)
182 l = len(data)
183 while off + _fm0fsize <= l:
183 while off + _fm0fsize <= l:
184 # read fixed part
184 # read fixed part
185 cur = data[off:off + _fm0fsize]
185 cur = data[off:off + _fm0fsize]
186 off += _fm0fsize
186 off += _fm0fsize
187 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
187 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
188 # read replacement
188 # read replacement
189 sucs = ()
189 sucs = ()
190 if numsuc:
190 if numsuc:
191 s = (_fm0fnodesize * numsuc)
191 s = (_fm0fnodesize * numsuc)
192 cur = data[off:off + s]
192 cur = data[off:off + s]
193 sucs = _unpack(_fm0node * numsuc, cur)
193 sucs = _unpack(_fm0node * numsuc, cur)
194 off += s
194 off += s
195 # read metadata
195 # read metadata
196 # (metadata will be decoded on demand)
196 # (metadata will be decoded on demand)
197 metadata = data[off:off + mdsize]
197 metadata = data[off:off + mdsize]
198 if len(metadata) != mdsize:
198 if len(metadata) != mdsize:
199 raise error.Abort(_('parsing obsolete marker: metadata is too '
199 raise error.Abort(_('parsing obsolete marker: metadata is too '
200 'short, %d bytes expected, got %d')
200 'short, %d bytes expected, got %d')
201 % (mdsize, len(metadata)))
201 % (mdsize, len(metadata)))
202 off += mdsize
202 off += mdsize
203 metadata = _fm0decodemeta(metadata)
203 metadata = _fm0decodemeta(metadata)
204 try:
204 try:
205 when, offset = metadata.pop('date', '0 0').split(' ')
205 when, offset = metadata.pop('date', '0 0').split(' ')
206 date = float(when), int(offset)
206 date = float(when), int(offset)
207 except ValueError:
207 except ValueError:
208 date = (0., 0)
208 date = (0., 0)
209 parents = None
209 parents = None
210 if 'p2' in metadata:
210 if 'p2' in metadata:
211 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
211 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
212 elif 'p1' in metadata:
212 elif 'p1' in metadata:
213 parents = (metadata.pop('p1', None),)
213 parents = (metadata.pop('p1', None),)
214 elif 'p0' in metadata:
214 elif 'p0' in metadata:
215 parents = ()
215 parents = ()
216 if parents is not None:
216 if parents is not None:
217 try:
217 try:
218 parents = tuple(node.bin(p) for p in parents)
218 parents = tuple(node.bin(p) for p in parents)
219 # if parent content is not a nodeid, drop the data
219 # if parent content is not a nodeid, drop the data
220 for p in parents:
220 for p in parents:
221 if len(p) != 20:
221 if len(p) != 20:
222 parents = None
222 parents = None
223 break
223 break
224 except TypeError:
224 except TypeError:
225 # if content cannot be translated to nodeid drop the data.
225 # if content cannot be translated to nodeid drop the data.
226 parents = None
226 parents = None
227
227
228 metadata = tuple(sorted(metadata.iteritems()))
228 metadata = tuple(sorted(metadata.iteritems()))
229
229
230 yield (pre, sucs, flags, metadata, date, parents)
230 yield (pre, sucs, flags, metadata, date, parents)
231
231
232 def _fm0encodeonemarker(marker):
232 def _fm0encodeonemarker(marker):
233 pre, sucs, flags, metadata, date, parents = marker
233 pre, sucs, flags, metadata, date, parents = marker
234 if flags & usingsha256:
234 if flags & usingsha256:
235 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
235 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
236 metadata = dict(metadata)
236 metadata = dict(metadata)
237 time, tz = date
237 time, tz = date
238 metadata['date'] = '%r %i' % (time, tz)
238 metadata['date'] = '%r %i' % (time, tz)
239 if parents is not None:
239 if parents is not None:
240 if not parents:
240 if not parents:
241 # mark that we explicitly recorded no parents
241 # mark that we explicitly recorded no parents
242 metadata['p0'] = ''
242 metadata['p0'] = ''
243 for i, p in enumerate(parents, 1):
243 for i, p in enumerate(parents, 1):
244 metadata['p%i' % i] = node.hex(p)
244 metadata['p%i' % i] = node.hex(p)
245 metadata = _fm0encodemeta(metadata)
245 metadata = _fm0encodemeta(metadata)
246 numsuc = len(sucs)
246 numsuc = len(sucs)
247 format = _fm0fixed + (_fm0node * numsuc)
247 format = _fm0fixed + (_fm0node * numsuc)
248 data = [numsuc, len(metadata), flags, pre]
248 data = [numsuc, len(metadata), flags, pre]
249 data.extend(sucs)
249 data.extend(sucs)
250 return _pack(format, *data) + metadata
250 return _pack(format, *data) + metadata
251
251
252 def _fm0encodemeta(meta):
252 def _fm0encodemeta(meta):
253 """Return encoded metadata string to string mapping.
253 """Return encoded metadata string to string mapping.
254
254
255 Assume no ':' in key and no '\0' in both key and value."""
255 Assume no ':' in key and no '\0' in both key and value."""
256 for key, value in meta.iteritems():
256 for key, value in meta.iteritems():
257 if ':' in key or '\0' in key:
257 if ':' in key or '\0' in key:
258 raise ValueError("':' and '\0' are forbidden in metadata key'")
258 raise ValueError("':' and '\0' are forbidden in metadata key'")
259 if '\0' in value:
259 if '\0' in value:
260 raise ValueError("':' is forbidden in metadata value'")
260 raise ValueError("':' is forbidden in metadata value'")
261 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
261 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
262
262
263 def _fm0decodemeta(data):
263 def _fm0decodemeta(data):
264 """Return string to string dictionary from encoded version."""
264 """Return string to string dictionary from encoded version."""
265 d = {}
265 d = {}
266 for l in data.split('\0'):
266 for l in data.split('\0'):
267 if l:
267 if l:
268 key, value = l.split(':')
268 key, value = l.split(':')
269 d[key] = value
269 d[key] = value
270 return d
270 return d
271
271
272 ## Parsing and writing of version "1"
272 ## Parsing and writing of version "1"
273 #
273 #
274 # The header is followed by the markers. Each marker is made of:
274 # The header is followed by the markers. Each marker is made of:
275 #
275 #
276 # - uint32: total size of the marker (including this field)
276 # - uint32: total size of the marker (including this field)
277 #
277 #
278 # - float64: date in seconds since epoch
278 # - float64: date in seconds since epoch
279 #
279 #
280 # - int16: timezone offset in minutes
280 # - int16: timezone offset in minutes
281 #
281 #
282 # - uint16: a bit field. It is reserved for flags used in common
282 # - uint16: a bit field. It is reserved for flags used in common
283 # obsolete marker operations, to avoid repeated decoding of metadata
283 # obsolete marker operations, to avoid repeated decoding of metadata
284 # entries.
284 # entries.
285 #
285 #
286 # - uint8: number of successors "N", can be zero.
286 # - uint8: number of successors "N", can be zero.
287 #
287 #
288 # - uint8: number of parents "P", can be zero.
288 # - uint8: number of parents "P", can be zero.
289 #
289 #
290 # 0: parents data stored but no parent,
290 # 0: parents data stored but no parent,
291 # 1: one parent stored,
291 # 1: one parent stored,
292 # 2: two parents stored,
292 # 2: two parents stored,
293 # 3: no parent data stored
293 # 3: no parent data stored
294 #
294 #
295 # - uint8: number of metadata entries M
295 # - uint8: number of metadata entries M
296 #
296 #
297 # - 20 or 32 bytes: precursor changeset identifier.
297 # - 20 or 32 bytes: precursor changeset identifier.
298 #
298 #
299 # - N*(20 or 32) bytes: successors changesets identifiers.
299 # - N*(20 or 32) bytes: successors changesets identifiers.
300 #
300 #
301 # - P*(20 or 32) bytes: parents of the precursors changesets.
301 # - P*(20 or 32) bytes: parents of the precursors changesets.
302 #
302 #
303 # - M*(uint8, uint8): size of all metadata entries (key and value)
303 # - M*(uint8, uint8): size of all metadata entries (key and value)
304 #
304 #
305 # - remaining bytes: the metadata, each (key, value) pair after the other.
305 # - remaining bytes: the metadata, each (key, value) pair after the other.
306 _fm1version = 1
306 _fm1version = 1
307 _fm1fixed = '>IdhHBBB20s'
307 _fm1fixed = '>IdhHBBB20s'
308 _fm1nodesha1 = '20s'
308 _fm1nodesha1 = '20s'
309 _fm1nodesha256 = '32s'
309 _fm1nodesha256 = '32s'
310 _fm1nodesha1size = _calcsize(_fm1nodesha1)
310 _fm1nodesha1size = _calcsize(_fm1nodesha1)
311 _fm1nodesha256size = _calcsize(_fm1nodesha256)
311 _fm1nodesha256size = _calcsize(_fm1nodesha256)
312 _fm1fsize = _calcsize(_fm1fixed)
312 _fm1fsize = _calcsize(_fm1fixed)
313 _fm1parentnone = 3
313 _fm1parentnone = 3
314 _fm1parentshift = 14
314 _fm1parentshift = 14
315 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
315 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
316 _fm1metapair = 'BB'
316 _fm1metapair = 'BB'
317 _fm1metapairsize = _calcsize('BB')
317 _fm1metapairsize = _calcsize('BB')
318
318
319 def _fm1purereadmarkers(data, off):
319 def _fm1purereadmarkers(data, off):
320 # make some global constants local for performance
320 # make some global constants local for performance
321 noneflag = _fm1parentnone
321 noneflag = _fm1parentnone
322 sha2flag = usingsha256
322 sha2flag = usingsha256
323 sha1size = _fm1nodesha1size
323 sha1size = _fm1nodesha1size
324 sha2size = _fm1nodesha256size
324 sha2size = _fm1nodesha256size
325 sha1fmt = _fm1nodesha1
325 sha1fmt = _fm1nodesha1
326 sha2fmt = _fm1nodesha256
326 sha2fmt = _fm1nodesha256
327 metasize = _fm1metapairsize
327 metasize = _fm1metapairsize
328 metafmt = _fm1metapair
328 metafmt = _fm1metapair
329 fsize = _fm1fsize
329 fsize = _fm1fsize
330 unpack = _unpack
330 unpack = _unpack
331
331
332 # Loop on markers
332 # Loop on markers
333 stop = len(data) - _fm1fsize
333 stop = len(data) - _fm1fsize
334 ufixed = struct.Struct(_fm1fixed).unpack
334 ufixed = struct.Struct(_fm1fixed).unpack
335
335
336 while off <= stop:
336 while off <= stop:
337 # read fixed part
337 # read fixed part
338 o1 = off + fsize
338 o1 = off + fsize
339 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
339 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
340
340
341 if flags & sha2flag:
341 if flags & sha2flag:
342 # FIXME: prec was read as a SHA1, needs to be amended
342 # FIXME: prec was read as a SHA1, needs to be amended
343
343
344 # read 0 or more successors
344 # read 0 or more successors
345 if numsuc == 1:
345 if numsuc == 1:
346 o2 = o1 + sha2size
346 o2 = o1 + sha2size
347 sucs = (data[o1:o2],)
347 sucs = (data[o1:o2],)
348 else:
348 else:
349 o2 = o1 + sha2size * numsuc
349 o2 = o1 + sha2size * numsuc
350 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
350 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
351
351
352 # read parents
352 # read parents
353 if numpar == noneflag:
353 if numpar == noneflag:
354 o3 = o2
354 o3 = o2
355 parents = None
355 parents = None
356 elif numpar == 1:
356 elif numpar == 1:
357 o3 = o2 + sha2size
357 o3 = o2 + sha2size
358 parents = (data[o2:o3],)
358 parents = (data[o2:o3],)
359 else:
359 else:
360 o3 = o2 + sha2size * numpar
360 o3 = o2 + sha2size * numpar
361 parents = unpack(sha2fmt * numpar, data[o2:o3])
361 parents = unpack(sha2fmt * numpar, data[o2:o3])
362 else:
362 else:
363 # read 0 or more successors
363 # read 0 or more successors
364 if numsuc == 1:
364 if numsuc == 1:
365 o2 = o1 + sha1size
365 o2 = o1 + sha1size
366 sucs = (data[o1:o2],)
366 sucs = (data[o1:o2],)
367 else:
367 else:
368 o2 = o1 + sha1size * numsuc
368 o2 = o1 + sha1size * numsuc
369 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
369 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
370
370
371 # read parents
371 # read parents
372 if numpar == noneflag:
372 if numpar == noneflag:
373 o3 = o2
373 o3 = o2
374 parents = None
374 parents = None
375 elif numpar == 1:
375 elif numpar == 1:
376 o3 = o2 + sha1size
376 o3 = o2 + sha1size
377 parents = (data[o2:o3],)
377 parents = (data[o2:o3],)
378 else:
378 else:
379 o3 = o2 + sha1size * numpar
379 o3 = o2 + sha1size * numpar
380 parents = unpack(sha1fmt * numpar, data[o2:o3])
380 parents = unpack(sha1fmt * numpar, data[o2:o3])
381
381
382 # read metadata
382 # read metadata
383 off = o3 + metasize * nummeta
383 off = o3 + metasize * nummeta
384 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
384 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
385 metadata = []
385 metadata = []
386 for idx in xrange(0, len(metapairsize), 2):
386 for idx in xrange(0, len(metapairsize), 2):
387 o1 = off + metapairsize[idx]
387 o1 = off + metapairsize[idx]
388 o2 = o1 + metapairsize[idx + 1]
388 o2 = o1 + metapairsize[idx + 1]
389 metadata.append((data[off:o1], data[o1:o2]))
389 metadata.append((data[off:o1], data[o1:o2]))
390 off = o2
390 off = o2
391
391
392 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
392 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
393
393
394 def _fm1encodeonemarker(marker):
394 def _fm1encodeonemarker(marker):
395 pre, sucs, flags, metadata, date, parents = marker
395 pre, sucs, flags, metadata, date, parents = marker
396 # determine node size
396 # determine node size
397 _fm1node = _fm1nodesha1
397 _fm1node = _fm1nodesha1
398 if flags & usingsha256:
398 if flags & usingsha256:
399 _fm1node = _fm1nodesha256
399 _fm1node = _fm1nodesha256
400 numsuc = len(sucs)
400 numsuc = len(sucs)
401 numextranodes = numsuc
401 numextranodes = numsuc
402 if parents is None:
402 if parents is None:
403 numpar = _fm1parentnone
403 numpar = _fm1parentnone
404 else:
404 else:
405 numpar = len(parents)
405 numpar = len(parents)
406 numextranodes += numpar
406 numextranodes += numpar
407 formatnodes = _fm1node * numextranodes
407 formatnodes = _fm1node * numextranodes
408 formatmeta = _fm1metapair * len(metadata)
408 formatmeta = _fm1metapair * len(metadata)
409 format = _fm1fixed + formatnodes + formatmeta
409 format = _fm1fixed + formatnodes + formatmeta
410 # tz is stored in minutes so we divide by 60
410 # tz is stored in minutes so we divide by 60
411 tz = date[1]//60
411 tz = date[1]//60
412 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
412 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
413 data.extend(sucs)
413 data.extend(sucs)
414 if parents is not None:
414 if parents is not None:
415 data.extend(parents)
415 data.extend(parents)
416 totalsize = _calcsize(format)
416 totalsize = _calcsize(format)
417 for key, value in metadata:
417 for key, value in metadata:
418 lk = len(key)
418 lk = len(key)
419 lv = len(value)
419 lv = len(value)
420 data.append(lk)
420 data.append(lk)
421 data.append(lv)
421 data.append(lv)
422 totalsize += lk + lv
422 totalsize += lk + lv
423 data[0] = totalsize
423 data[0] = totalsize
424 data = [_pack(format, *data)]
424 data = [_pack(format, *data)]
425 for key, value in metadata:
425 for key, value in metadata:
426 data.append(key)
426 data.append(key)
427 data.append(value)
427 data.append(value)
428 return ''.join(data)
428 return ''.join(data)
429
429
430 def _fm1readmarkers(data, off):
430 def _fm1readmarkers(data, off):
431 native = getattr(parsers, 'fm1readmarkers', None)
431 native = getattr(parsers, 'fm1readmarkers', None)
432 if not native:
432 if not native:
433 return _fm1purereadmarkers(data, off)
433 return _fm1purereadmarkers(data, off)
434 stop = len(data) - _fm1fsize
434 stop = len(data) - _fm1fsize
435 return native(data, off, stop)
435 return native(data, off, stop)
436
436
437 # mapping to read/write various marker formats
437 # mapping to read/write various marker formats
438 # <version> -> (decoder, encoder)
438 # <version> -> (decoder, encoder)
439 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
439 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
440 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
440 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
441
441
442 def _readmarkerversion(data):
442 def _readmarkerversion(data):
443 return _unpack('>B', data[0:1])[0]
443 return _unpack('>B', data[0:1])[0]
444
444
445 @util.nogc
445 @util.nogc
446 def _readmarkers(data):
446 def _readmarkers(data):
447 """Read and enumerate markers from raw data"""
447 """Read and enumerate markers from raw data"""
448 diskversion = _readmarkerversion(data)
448 diskversion = _readmarkerversion(data)
449 off = 1
449 off = 1
450 if diskversion not in formats:
450 if diskversion not in formats:
451 msg = _('parsing obsolete marker: unknown version %r') % diskversion
451 msg = _('parsing obsolete marker: unknown version %r') % diskversion
452 raise error.UnknownVersion(msg, version=diskversion)
452 raise error.UnknownVersion(msg, version=diskversion)
453 return diskversion, formats[diskversion][0](data, off)
453 return diskversion, formats[diskversion][0](data, off)
454
454
455 def encodeheader(version=_fm0version):
455 def encodeheader(version=_fm0version):
456 return _pack('>B', version)
456 return _pack('>B', version)
457
457
458 def encodemarkers(markers, addheader=False, version=_fm0version):
458 def encodemarkers(markers, addheader=False, version=_fm0version):
459 # Kept separate from flushmarkers(), it will be reused for
459 # Kept separate from flushmarkers(), it will be reused for
460 # markers exchange.
460 # markers exchange.
461 encodeone = formats[version][1]
461 encodeone = formats[version][1]
462 if addheader:
462 if addheader:
463 yield encodeheader(version)
463 yield encodeheader(version)
464 for marker in markers:
464 for marker in markers:
465 yield encodeone(marker)
465 yield encodeone(marker)
466
466
467
467
468 class marker(object):
468 class marker(object):
469 """Wrap obsolete marker raw data"""
469 """Wrap obsolete marker raw data"""
470
470
471 def __init__(self, repo, data):
471 def __init__(self, repo, data):
472 # the repo argument will be used to create changectx in later version
472 # the repo argument will be used to create changectx in later version
473 self._repo = repo
473 self._repo = repo
474 self._data = data
474 self._data = data
475 self._decodedmeta = None
475 self._decodedmeta = None
476
476
477 def __hash__(self):
477 def __hash__(self):
478 return hash(self._data)
478 return hash(self._data)
479
479
480 def __eq__(self, other):
480 def __eq__(self, other):
481 if type(other) != type(self):
481 if type(other) != type(self):
482 return False
482 return False
483 return self._data == other._data
483 return self._data == other._data
484
484
485 def precnode(self):
485 def precnode(self):
486 """Precursor changeset node identifier"""
486 """Precursor changeset node identifier"""
487 return self._data[0]
487 return self._data[0]
488
488
489 def succnodes(self):
489 def succnodes(self):
490 """List of successor changesets node identifiers"""
490 """List of successor changesets node identifiers"""
491 return self._data[1]
491 return self._data[1]
492
492
493 def parentnodes(self):
493 def parentnodes(self):
494 """Parents of the precursors (None if not recorded)"""
494 """Parents of the precursors (None if not recorded)"""
495 return self._data[5]
495 return self._data[5]
496
496
497 def metadata(self):
497 def metadata(self):
498 """Decoded metadata dictionary"""
498 """Decoded metadata dictionary"""
499 return dict(self._data[3])
499 return dict(self._data[3])
500
500
501 def date(self):
501 def date(self):
502 """Creation date as (unixtime, offset)"""
502 """Creation date as (unixtime, offset)"""
503 return self._data[4]
503 return self._data[4]
504
504
505 def flags(self):
505 def flags(self):
506 """The flags field of the marker"""
506 """The flags field of the marker"""
507 return self._data[2]
507 return self._data[2]
508
508
509 @util.nogc
509 @util.nogc
510 def _addsuccessors(successors, markers):
510 def _addsuccessors(successors, markers):
511 for mark in markers:
511 for mark in markers:
512 successors.setdefault(mark[0], set()).add(mark)
512 successors.setdefault(mark[0], set()).add(mark)
513
513
514 @util.nogc
514 @util.nogc
515 def _addprecursors(precursors, markers):
515 def _addprecursors(precursors, markers):
516 for mark in markers:
516 for mark in markers:
517 for suc in mark[1]:
517 for suc in mark[1]:
518 precursors.setdefault(suc, set()).add(mark)
518 precursors.setdefault(suc, set()).add(mark)
519
519
520 @util.nogc
520 @util.nogc
521 def _addchildren(children, markers):
521 def _addchildren(children, markers):
522 for mark in markers:
522 for mark in markers:
523 parents = mark[5]
523 parents = mark[5]
524 if parents is not None:
524 if parents is not None:
525 for p in parents:
525 for p in parents:
526 children.setdefault(p, set()).add(mark)
526 children.setdefault(p, set()).add(mark)
527
527
528 def _checkinvalidmarkers(markers):
528 def _checkinvalidmarkers(markers):
529 """search for marker with invalid data and raise error if needed
529 """search for marker with invalid data and raise error if needed
530
530
531 Exist as a separated function to allow the evolve extension for a more
531 Exist as a separated function to allow the evolve extension for a more
532 subtle handling.
532 subtle handling.
533 """
533 """
534 for mark in markers:
534 for mark in markers:
535 if node.nullid in mark[1]:
535 if node.nullid in mark[1]:
536 raise error.Abort(_('bad obsolescence marker detected: '
536 raise error.Abort(_('bad obsolescence marker detected: '
537 'invalid successors nullid'))
537 'invalid successors nullid'))
538
538
539 class obsstore(object):
539 class obsstore(object):
540 """Store obsolete markers
540 """Store obsolete markers
541
541
542 Markers can be accessed with two mappings:
542 Markers can be accessed with two mappings:
543 - precursors[x] -> set(markers on precursors edges of x)
543 - precursors[x] -> set(markers on precursors edges of x)
544 - successors[x] -> set(markers on successors edges of x)
544 - successors[x] -> set(markers on successors edges of x)
545 - children[x] -> set(markers on precursors edges of children(x)
545 - children[x] -> set(markers on precursors edges of children(x)
546 """
546 """
547
547
548 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
548 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
549 # prec: nodeid, precursor changesets
549 # prec: nodeid, precursor changesets
550 # succs: tuple of nodeid, successor changesets (0-N length)
550 # succs: tuple of nodeid, successor changesets (0-N length)
551 # flag: integer, flag field carrying modifier for the markers (see doc)
551 # flag: integer, flag field carrying modifier for the markers (see doc)
552 # meta: binary blob, encoded metadata dictionary
552 # meta: binary blob, encoded metadata dictionary
553 # date: (float, int) tuple, date of marker creation
553 # date: (float, int) tuple, date of marker creation
554 # parents: (tuple of nodeid) or None, parents of precursors
554 # parents: (tuple of nodeid) or None, parents of precursors
555 # None is used when no data has been recorded
555 # None is used when no data has been recorded
556
556
557 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
557 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
558 # caches for various obsolescence related cache
558 # caches for various obsolescence related cache
559 self.caches = {}
559 self.caches = {}
560 self.svfs = svfs
560 self.svfs = svfs
561 self._defaultformat = defaultformat
561 self._defaultformat = defaultformat
562 self._readonly = readonly
562 self._readonly = readonly
563
563
564 def __iter__(self):
564 def __iter__(self):
565 return iter(self._all)
565 return iter(self._all)
566
566
567 def __len__(self):
567 def __len__(self):
568 return len(self._all)
568 return len(self._all)
569
569
570 def __nonzero__(self):
570 def __nonzero__(self):
571 if not self._cached('_all'):
571 if not self._cached('_all'):
572 try:
572 try:
573 return self.svfs.stat('obsstore').st_size > 1
573 return self.svfs.stat('obsstore').st_size > 1
574 except OSError as inst:
574 except OSError as inst:
575 if inst.errno != errno.ENOENT:
575 if inst.errno != errno.ENOENT:
576 raise
576 raise
577 # just build an empty _all list if no obsstore exists, which
577 # just build an empty _all list if no obsstore exists, which
578 # avoids further stat() syscalls
578 # avoids further stat() syscalls
579 pass
579 pass
580 return bool(self._all)
580 return bool(self._all)
581
581
582 __bool__ = __nonzero__
582 __bool__ = __nonzero__
583
583
584 @property
584 @property
585 def readonly(self):
585 def readonly(self):
586 """True if marker creation is disabled
586 """True if marker creation is disabled
587
587
588 Remove me in the future when obsolete marker is always on."""
588 Remove me in the future when obsolete marker is always on."""
589 return self._readonly
589 return self._readonly
590
590
591 def create(self, transaction, prec, succs=(), flag=0, parents=None,
591 def create(self, transaction, prec, succs=(), flag=0, parents=None,
592 date=None, metadata=None, ui=None):
592 date=None, metadata=None, ui=None):
593 """obsolete: add a new obsolete marker
593 """obsolete: add a new obsolete marker
594
594
595 * ensuring it is hashable
595 * ensuring it is hashable
596 * check mandatory metadata
596 * check mandatory metadata
597 * encode metadata
597 * encode metadata
598
598
599 If you are a human writing code creating marker you want to use the
599 If you are a human writing code creating marker you want to use the
600 `createmarkers` function in this module instead.
600 `createmarkers` function in this module instead.
601
601
602 return True if a new marker have been added, False if the markers
602 return True if a new marker have been added, False if the markers
603 already existed (no op).
603 already existed (no op).
604 """
604 """
605 if metadata is None:
605 if metadata is None:
606 metadata = {}
606 metadata = {}
607 if date is None:
607 if date is None:
608 if 'date' in metadata:
608 if 'date' in metadata:
609 # as a courtesy for out-of-tree extensions
609 # as a courtesy for out-of-tree extensions
610 date = util.parsedate(metadata.pop('date'))
610 date = util.parsedate(metadata.pop('date'))
611 elif ui is not None:
611 elif ui is not None:
612 date = ui.configdate('devel', 'default-date')
612 date = ui.configdate('devel', 'default-date')
613 if date is None:
613 if date is None:
614 date = util.makedate()
614 date = util.makedate()
615 else:
615 else:
616 date = util.makedate()
616 date = util.makedate()
617 if len(prec) != 20:
617 if len(prec) != 20:
618 raise ValueError(prec)
618 raise ValueError(prec)
619 for succ in succs:
619 for succ in succs:
620 if len(succ) != 20:
620 if len(succ) != 20:
621 raise ValueError(succ)
621 raise ValueError(succ)
622 if prec in succs:
622 if prec in succs:
623 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
623 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
624
624
625 metadata = tuple(sorted(metadata.iteritems()))
625 metadata = tuple(sorted(metadata.iteritems()))
626
626
627 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
627 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
628 return bool(self.add(transaction, [marker]))
628 return bool(self.add(transaction, [marker]))
629
629
630 def add(self, transaction, markers):
630 def add(self, transaction, markers):
631 """Add new markers to the store
631 """Add new markers to the store
632
632
633 Take care of filtering duplicate.
633 Take care of filtering duplicate.
634 Return the number of new marker."""
634 Return the number of new marker."""
635 if self._readonly:
635 if self._readonly:
636 raise error.Abort(_('creating obsolete markers is not enabled on '
636 raise error.Abort(_('creating obsolete markers is not enabled on '
637 'this repo'))
637 'this repo'))
638 known = set()
638 known = set()
639 getsuccessors = self.successors.get
639 getsuccessors = self.successors.get
640 new = []
640 new = []
641 for m in markers:
641 for m in markers:
642 if m not in getsuccessors(m[0], ()) and m not in known:
642 if m not in getsuccessors(m[0], ()) and m not in known:
643 known.add(m)
643 known.add(m)
644 new.append(m)
644 new.append(m)
645 if new:
645 if new:
646 f = self.svfs('obsstore', 'ab')
646 f = self.svfs('obsstore', 'ab')
647 try:
647 try:
648 offset = f.tell()
648 offset = f.tell()
649 transaction.add('obsstore', offset)
649 transaction.add('obsstore', offset)
650 # offset == 0: new file - add the version header
650 # offset == 0: new file - add the version header
651 for bytes in encodemarkers(new, offset == 0, self._version):
651 for bytes in encodemarkers(new, offset == 0, self._version):
652 f.write(bytes)
652 f.write(bytes)
653 finally:
653 finally:
654 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
654 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
655 # call 'filecacheentry.refresh()' here
655 # call 'filecacheentry.refresh()' here
656 f.close()
656 f.close()
657 self._addmarkers(new)
657 self._addmarkers(new)
658 # new marker *may* have changed several set. invalidate the cache.
658 # new marker *may* have changed several set. invalidate the cache.
659 self.caches.clear()
659 self.caches.clear()
660 # records the number of new markers for the transaction hooks
660 # records the number of new markers for the transaction hooks
661 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
661 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
662 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
662 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
663 return len(new)
663 return len(new)
664
664
665 def mergemarkers(self, transaction, data):
665 def mergemarkers(self, transaction, data):
666 """merge a binary stream of markers inside the obsstore
666 """merge a binary stream of markers inside the obsstore
667
667
668 Returns the number of new markers added."""
668 Returns the number of new markers added."""
669 version, markers = _readmarkers(data)
669 version, markers = _readmarkers(data)
670 return self.add(transaction, markers)
670 return self.add(transaction, markers)
671
671
672 @propertycache
672 @propertycache
673 def _data(self):
673 def _data(self):
674 return self.svfs.tryread('obsstore')
674 return self.svfs.tryread('obsstore')
675
675
676 @propertycache
676 @propertycache
677 def _version(self):
677 def _version(self):
678 if len(self._data) >= 1:
678 if len(self._data) >= 1:
679 return _readmarkerversion(self._data)
679 return _readmarkerversion(self._data)
680 else:
680 else:
681 return self._defaultformat
681 return self._defaultformat
682
682
683 @propertycache
683 @propertycache
684 def _all(self):
684 def _all(self):
685 data = self._data
685 data = self._data
686 if not data:
686 if not data:
687 return []
687 return []
688 self._version, markers = _readmarkers(data)
688 self._version, markers = _readmarkers(data)
689 markers = list(markers)
689 markers = list(markers)
690 _checkinvalidmarkers(markers)
690 _checkinvalidmarkers(markers)
691 return markers
691 return markers
692
692
693 @propertycache
693 @propertycache
694 def successors(self):
694 def successors(self):
695 successors = {}
695 successors = {}
696 _addsuccessors(successors, self._all)
696 _addsuccessors(successors, self._all)
697 return successors
697 return successors
698
698
699 @propertycache
699 @propertycache
700 def precursors(self):
700 def precursors(self):
701 precursors = {}
701 precursors = {}
702 _addprecursors(precursors, self._all)
702 _addprecursors(precursors, self._all)
703 return precursors
703 return precursors
704
704
705 @propertycache
705 @propertycache
706 def children(self):
706 def children(self):
707 children = {}
707 children = {}
708 _addchildren(children, self._all)
708 _addchildren(children, self._all)
709 return children
709 return children
710
710
711 def _cached(self, attr):
711 def _cached(self, attr):
712 return attr in self.__dict__
712 return attr in self.__dict__
713
713
714 def _addmarkers(self, markers):
714 def _addmarkers(self, markers):
715 markers = list(markers) # to allow repeated iteration
715 markers = list(markers) # to allow repeated iteration
716 self._all.extend(markers)
716 self._all.extend(markers)
717 if self._cached('successors'):
717 if self._cached('successors'):
718 _addsuccessors(self.successors, markers)
718 _addsuccessors(self.successors, markers)
719 if self._cached('precursors'):
719 if self._cached('precursors'):
720 _addprecursors(self.precursors, markers)
720 _addprecursors(self.precursors, markers)
721 if self._cached('children'):
721 if self._cached('children'):
722 _addchildren(self.children, markers)
722 _addchildren(self.children, markers)
723 _checkinvalidmarkers(markers)
723 _checkinvalidmarkers(markers)
724
724
725 def relevantmarkers(self, nodes):
725 def relevantmarkers(self, nodes):
726 """return a set of all obsolescence markers relevant to a set of nodes.
726 """return a set of all obsolescence markers relevant to a set of nodes.
727
727
728 "relevant" to a set of nodes mean:
728 "relevant" to a set of nodes mean:
729
729
730 - marker that use this changeset as successor
730 - marker that use this changeset as successor
731 - prune marker of direct children on this changeset
731 - prune marker of direct children on this changeset
732 - recursive application of the two rules on precursors of these markers
732 - recursive application of the two rules on precursors of these markers
733
733
734 It is a set so you cannot rely on order."""
734 It is a set so you cannot rely on order."""
735
735
736 pendingnodes = set(nodes)
736 pendingnodes = set(nodes)
737 seenmarkers = set()
737 seenmarkers = set()
738 seennodes = set(pendingnodes)
738 seennodes = set(pendingnodes)
739 precursorsmarkers = self.precursors
739 precursorsmarkers = self.precursors
740 succsmarkers = self.successors
740 succsmarkers = self.successors
741 children = self.children
741 children = self.children
742 while pendingnodes:
742 while pendingnodes:
743 direct = set()
743 direct = set()
744 for current in pendingnodes:
744 for current in pendingnodes:
745 direct.update(precursorsmarkers.get(current, ()))
745 direct.update(precursorsmarkers.get(current, ()))
746 pruned = [m for m in children.get(current, ()) if not m[1]]
746 pruned = [m for m in children.get(current, ()) if not m[1]]
747 direct.update(pruned)
747 direct.update(pruned)
748 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
748 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
749 direct.update(pruned)
749 direct.update(pruned)
750 direct -= seenmarkers
750 direct -= seenmarkers
751 pendingnodes = set([m[0] for m in direct])
751 pendingnodes = set([m[0] for m in direct])
752 seenmarkers |= direct
752 seenmarkers |= direct
753 pendingnodes -= seennodes
753 pendingnodes -= seennodes
754 seennodes |= pendingnodes
754 seennodes |= pendingnodes
755 return seenmarkers
755 return seenmarkers
756
756
757 def makestore(ui, repo):
757 def makestore(ui, repo):
758 """Create an obsstore instance from a repo."""
758 """Create an obsstore instance from a repo."""
759 # read default format for new obsstore.
759 # read default format for new obsstore.
760 # developer config: format.obsstore-version
760 # developer config: format.obsstore-version
761 defaultformat = ui.configint('format', 'obsstore-version', None)
761 defaultformat = ui.configint('format', 'obsstore-version', None)
762 # rely on obsstore class default when possible.
762 # rely on obsstore class default when possible.
763 kwargs = {}
763 kwargs = {}
764 if defaultformat is not None:
764 if defaultformat is not None:
765 kwargs['defaultformat'] = defaultformat
765 kwargs['defaultformat'] = defaultformat
766 readonly = not isenabled(repo, createmarkersopt)
766 readonly = not isenabled(repo, createmarkersopt)
767 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
767 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
768 if store and readonly:
768 if store and readonly:
769 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
769 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
770 % len(list(store)))
770 % len(list(store)))
771 return store
771 return store
772
772
773 def _filterprunes(markers):
773 def _filterprunes(markers):
774 """return a set with no prune markers"""
774 """return a set with no prune markers"""
775 return set(m for m in markers if m[1])
775 return set(m for m in markers if m[1])
776
776
777 def exclusivemarkers(repo, nodes):
777 def exclusivemarkers(repo, nodes):
778 """set of markers relevant to "nodes" but no other locally-known nodes
778 """set of markers relevant to "nodes" but no other locally-known nodes
779
779
780 This function compute the set of markers "exclusive" to a locally-known
780 This function compute the set of markers "exclusive" to a locally-known
781 node. This means we walk the markers starting from <nodes> until we reach a
781 node. This means we walk the markers starting from <nodes> until we reach a
782 locally-known precursors outside of <nodes>. Element of <nodes> with
782 locally-known precursors outside of <nodes>. Element of <nodes> with
783 locally-known successors outside of <nodes> are ignored (since their
783 locally-known successors outside of <nodes> are ignored (since their
784 precursors markers are also relevant to these successors).
784 precursors markers are also relevant to these successors).
785
785
786 For example:
786 For example:
787
787
788 # (A0 rewritten as A1)
788 # (A0 rewritten as A1)
789 #
789 #
790 # A0 <-1- A1 # Marker "1" is exclusive to A1
790 # A0 <-1- A1 # Marker "1" is exclusive to A1
791
791
792 or
792 or
793
793
794 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
794 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
795 #
795 #
796 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
796 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
797
797
798 or
798 or
799
799
800 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
800 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
801 #
801 #
802 # <-2- A1 # Marker "2" is exclusive to A0,A1
802 # <-2- A1 # Marker "2" is exclusive to A0,A1
803 # /
803 # /
804 # <-1- A0
804 # <-1- A0
805 # \
805 # \
806 # <-3- A2 # Marker "3" is exclusive to A0,A2
806 # <-3- A2 # Marker "3" is exclusive to A0,A2
807 #
807 #
808 # in addition:
808 # in addition:
809 #
809 #
810 # Markers "2,3" are exclusive to A1,A2
810 # Markers "2,3" are exclusive to A1,A2
811 # Markers "1,2,3" are exclusive to A0,A1,A2
811 # Markers "1,2,3" are exclusive to A0,A1,A2
812
812
813 See test/test-obsolete-bundle-strip.t for more examples.
813 See test/test-obsolete-bundle-strip.t for more examples.
814
814
815 An example usage is strip. When stripping a changeset, we also want to
815 An example usage is strip. When stripping a changeset, we also want to
816 strip the markers exclusive to this changeset. Otherwise we would have
816 strip the markers exclusive to this changeset. Otherwise we would have
817 "dangling"" obsolescence markers from its precursors: Obsolescence markers
817 "dangling"" obsolescence markers from its precursors: Obsolescence markers
818 marking a node as obsolete without any successors available locally.
818 marking a node as obsolete without any successors available locally.
819
819
820 As for relevant markers, the prune markers for children will be followed.
820 As for relevant markers, the prune markers for children will be followed.
821 Of course, they will only be followed if the pruned children is
821 Of course, they will only be followed if the pruned children is
822 locally-known. Since the prune markers are relevant to the pruned node.
822 locally-known. Since the prune markers are relevant to the pruned node.
823 However, while prune markers are considered relevant to the parent of the
823 However, while prune markers are considered relevant to the parent of the
824 pruned changesets, prune markers for locally-known changeset (with no
824 pruned changesets, prune markers for locally-known changeset (with no
825 successors) are considered exclusive to the pruned nodes. This allows
825 successors) are considered exclusive to the pruned nodes. This allows
826 to strip the prune markers (with the rest of the exclusive chain) alongside
826 to strip the prune markers (with the rest of the exclusive chain) alongside
827 the pruned changesets.
827 the pruned changesets.
828 """
828 """
829 # running on a filtered repository would be dangerous as markers could be
829 # running on a filtered repository would be dangerous as markers could be
830 # reported as exclusive when they are relevant for other filtered nodes.
830 # reported as exclusive when they are relevant for other filtered nodes.
831 unfi = repo.unfiltered()
831 unfi = repo.unfiltered()
832
832
833 # shortcut to various useful item
833 # shortcut to various useful item
834 nm = unfi.changelog.nodemap
834 nm = unfi.changelog.nodemap
835 precursorsmarkers = unfi.obsstore.precursors
835 precursorsmarkers = unfi.obsstore.precursors
836 successormarkers = unfi.obsstore.successors
836 successormarkers = unfi.obsstore.successors
837 childrenmarkers = unfi.obsstore.children
837 childrenmarkers = unfi.obsstore.children
838
838
839 # exclusive markers (return of the function)
839 # exclusive markers (return of the function)
840 exclmarkers = set()
840 exclmarkers = set()
841 # we need fast membership testing
841 # we need fast membership testing
842 nodes = set(nodes)
842 nodes = set(nodes)
843 # looking for head in the obshistory
843 # looking for head in the obshistory
844 #
844 #
845 # XXX we are ignoring all issues in regard with cycle for now.
845 # XXX we are ignoring all issues in regard with cycle for now.
846 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
846 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
847 stack.sort()
847 stack.sort()
848 # nodes already stacked
848 # nodes already stacked
849 seennodes = set(stack)
849 seennodes = set(stack)
850 while stack:
850 while stack:
851 current = stack.pop()
851 current = stack.pop()
852 # fetch precursors markers
852 # fetch precursors markers
853 markers = list(precursorsmarkers.get(current, ()))
853 markers = list(precursorsmarkers.get(current, ()))
854 # extend the list with prune markers
854 # extend the list with prune markers
855 for mark in successormarkers.get(current, ()):
855 for mark in successormarkers.get(current, ()):
856 if not mark[1]:
856 if not mark[1]:
857 markers.append(mark)
857 markers.append(mark)
858 # and markers from children (looking for prune)
858 # and markers from children (looking for prune)
859 for mark in childrenmarkers.get(current, ()):
859 for mark in childrenmarkers.get(current, ()):
860 if not mark[1]:
860 if not mark[1]:
861 markers.append(mark)
861 markers.append(mark)
862 # traverse the markers
862 # traverse the markers
863 for mark in markers:
863 for mark in markers:
864 if mark in exclmarkers:
864 if mark in exclmarkers:
865 # markers already selected
865 # markers already selected
866 continue
866 continue
867
867
868 # If the markers is about the current node, select it
868 # If the markers is about the current node, select it
869 #
869 #
870 # (this delay the addition of markers from children)
870 # (this delay the addition of markers from children)
871 if mark[1] or mark[0] == current:
871 if mark[1] or mark[0] == current:
872 exclmarkers.add(mark)
872 exclmarkers.add(mark)
873
873
874 # should we keep traversing through the precursors?
874 # should we keep traversing through the precursors?
875 prec = mark[0]
875 prec = mark[0]
876
876
877 # nodes in the stack or already processed
877 # nodes in the stack or already processed
878 if prec in seennodes:
878 if prec in seennodes:
879 continue
879 continue
880
880
881 # is this a locally known node ?
881 # is this a locally known node ?
882 known = prec in nm
882 known = prec in nm
883 # if locally-known and not in the <nodes> set the traversal
883 # if locally-known and not in the <nodes> set the traversal
884 # stop here.
884 # stop here.
885 if known and prec not in nodes:
885 if known and prec not in nodes:
886 continue
886 continue
887
887
888 # do not keep going if there are unselected markers pointing to this
888 # do not keep going if there are unselected markers pointing to this
889 # nodes. If we end up traversing these unselected markers later the
889 # nodes. If we end up traversing these unselected markers later the
890 # node will be taken care of at that point.
890 # node will be taken care of at that point.
891 precmarkers = _filterprunes(successormarkers.get(prec))
891 precmarkers = _filterprunes(successormarkers.get(prec))
892 if precmarkers.issubset(exclmarkers):
892 if precmarkers.issubset(exclmarkers):
893 seennodes.add(prec)
893 seennodes.add(prec)
894 stack.append(prec)
894 stack.append(prec)
895
895
896 return exclmarkers
896 return exclmarkers
897
897
898 def commonversion(versions):
898 def commonversion(versions):
899 """Return the newest version listed in both versions and our local formats.
899 """Return the newest version listed in both versions and our local formats.
900
900
901 Returns None if no common version exists.
901 Returns None if no common version exists.
902 """
902 """
903 versions.sort(reverse=True)
903 versions.sort(reverse=True)
904 # search for highest version known on both side
904 # search for highest version known on both side
905 for v in versions:
905 for v in versions:
906 if v in formats:
906 if v in formats:
907 return v
907 return v
908 return None
908 return None
909
909
910 # arbitrary picked to fit into 8K limit from HTTP server
910 # arbitrary picked to fit into 8K limit from HTTP server
911 # you have to take in account:
911 # you have to take in account:
912 # - the version header
912 # - the version header
913 # - the base85 encoding
913 # - the base85 encoding
914 _maxpayload = 5300
914 _maxpayload = 5300
915
915
916 def _pushkeyescape(markers):
916 def _pushkeyescape(markers):
917 """encode markers into a dict suitable for pushkey exchange
917 """encode markers into a dict suitable for pushkey exchange
918
918
919 - binary data is base85 encoded
919 - binary data is base85 encoded
920 - split in chunks smaller than 5300 bytes"""
920 - split in chunks smaller than 5300 bytes"""
921 keys = {}
921 keys = {}
922 parts = []
922 parts = []
923 currentlen = _maxpayload * 2 # ensure we create a new part
923 currentlen = _maxpayload * 2 # ensure we create a new part
924 for marker in markers:
924 for marker in markers:
925 nextdata = _fm0encodeonemarker(marker)
925 nextdata = _fm0encodeonemarker(marker)
926 if (len(nextdata) + currentlen > _maxpayload):
926 if (len(nextdata) + currentlen > _maxpayload):
927 currentpart = []
927 currentpart = []
928 currentlen = 0
928 currentlen = 0
929 parts.append(currentpart)
929 parts.append(currentpart)
930 currentpart.append(nextdata)
930 currentpart.append(nextdata)
931 currentlen += len(nextdata)
931 currentlen += len(nextdata)
932 for idx, part in enumerate(reversed(parts)):
932 for idx, part in enumerate(reversed(parts)):
933 data = ''.join([_pack('>B', _fm0version)] + part)
933 data = ''.join([_pack('>B', _fm0version)] + part)
934 keys['dump%i' % idx] = util.b85encode(data)
934 keys['dump%i' % idx] = util.b85encode(data)
935 return keys
935 return keys
936
936
937 def listmarkers(repo):
937 def listmarkers(repo):
938 """List markers over pushkey"""
938 """List markers over pushkey"""
939 if not repo.obsstore:
939 if not repo.obsstore:
940 return {}
940 return {}
941 return _pushkeyescape(sorted(repo.obsstore))
941 return _pushkeyescape(sorted(repo.obsstore))
942
942
943 def pushmarker(repo, key, old, new):
943 def pushmarker(repo, key, old, new):
944 """Push markers over pushkey"""
944 """Push markers over pushkey"""
945 if not key.startswith('dump'):
945 if not key.startswith('dump'):
946 repo.ui.warn(_('unknown key: %r') % key)
946 repo.ui.warn(_('unknown key: %r') % key)
947 return False
947 return False
948 if old:
948 if old:
949 repo.ui.warn(_('unexpected old value for %r') % key)
949 repo.ui.warn(_('unexpected old value for %r') % key)
950 return False
950 return False
951 data = util.b85decode(new)
951 data = util.b85decode(new)
952 lock = repo.lock()
952 lock = repo.lock()
953 try:
953 try:
954 tr = repo.transaction('pushkey: obsolete markers')
954 tr = repo.transaction('pushkey: obsolete markers')
955 try:
955 try:
956 repo.obsstore.mergemarkers(tr, data)
956 repo.obsstore.mergemarkers(tr, data)
957 repo.invalidatevolatilesets()
957 repo.invalidatevolatilesets()
958 tr.close()
958 tr.close()
959 return True
959 return True
960 finally:
960 finally:
961 tr.release()
961 tr.release()
962 finally:
962 finally:
963 lock.release()
963 lock.release()
964
964
965 def getmarkers(repo, nodes=None, exclusive=False):
965 def getmarkers(repo, nodes=None, exclusive=False):
966 """returns markers known in a repository
966 """returns markers known in a repository
967
967
968 If <nodes> is specified, only markers "relevant" to those nodes are are
968 If <nodes> is specified, only markers "relevant" to those nodes are are
969 returned"""
969 returned"""
970 if nodes is None:
970 if nodes is None:
971 rawmarkers = repo.obsstore
971 rawmarkers = repo.obsstore
972 elif exclusive:
972 elif exclusive:
973 rawmarkers = exclusivemarkers(repo, nodes)
973 rawmarkers = exclusivemarkers(repo, nodes)
974 else:
974 else:
975 rawmarkers = repo.obsstore.relevantmarkers(nodes)
975 rawmarkers = repo.obsstore.relevantmarkers(nodes)
976
976
977 for markerdata in rawmarkers:
977 for markerdata in rawmarkers:
978 yield marker(repo, markerdata)
978 yield marker(repo, markerdata)
979
979
980 def relevantmarkers(repo, node):
980 def relevantmarkers(repo, node):
981 """all obsolete markers relevant to some revision"""
981 """all obsolete markers relevant to some revision"""
982 for markerdata in repo.obsstore.relevantmarkers(node):
982 for markerdata in repo.obsstore.relevantmarkers(node):
983 yield marker(repo, markerdata)
983 yield marker(repo, markerdata)
984
984
985
985
986 def precursormarkers(ctx):
986 def precursormarkers(ctx):
987 """obsolete marker marking this changeset as a successors"""
987 """obsolete marker marking this changeset as a successors"""
988 for data in ctx.repo().obsstore.precursors.get(ctx.node(), ()):
988 for data in ctx.repo().obsstore.precursors.get(ctx.node(), ()):
989 yield marker(ctx.repo(), data)
989 yield marker(ctx.repo(), data)
990
990
991 def successormarkers(ctx):
991 def successormarkers(ctx):
992 """obsolete marker making this changeset obsolete"""
992 """obsolete marker making this changeset obsolete"""
993 for data in ctx.repo().obsstore.successors.get(ctx.node(), ()):
993 for data in ctx.repo().obsstore.successors.get(ctx.node(), ()):
994 yield marker(ctx.repo(), data)
994 yield marker(ctx.repo(), data)
995
995
996 def allsuccessors(obsstore, nodes, ignoreflags=0):
996 def allsuccessors(obsstore, nodes, ignoreflags=0):
997 """Yield node for every successor of <nodes>.
997 """Yield node for every successor of <nodes>.
998
998
999 Some successors may be unknown locally.
999 Some successors may be unknown locally.
1000
1000
1001 This is a linear yield unsuited to detecting split changesets. It includes
1001 This is a linear yield unsuited to detecting split changesets. It includes
1002 initial nodes too."""
1002 initial nodes too."""
1003 remaining = set(nodes)
1003 remaining = set(nodes)
1004 seen = set(remaining)
1004 seen = set(remaining)
1005 while remaining:
1005 while remaining:
1006 current = remaining.pop()
1006 current = remaining.pop()
1007 yield current
1007 yield current
1008 for mark in obsstore.successors.get(current, ()):
1008 for mark in obsstore.successors.get(current, ()):
1009 # ignore marker flagged with specified flag
1009 # ignore marker flagged with specified flag
1010 if mark[2] & ignoreflags:
1010 if mark[2] & ignoreflags:
1011 continue
1011 continue
1012 for suc in mark[1]:
1012 for suc in mark[1]:
1013 if suc not in seen:
1013 if suc not in seen:
1014 seen.add(suc)
1014 seen.add(suc)
1015 remaining.add(suc)
1015 remaining.add(suc)
1016
1016
1017 def allprecursors(obsstore, nodes, ignoreflags=0):
1017 def allprecursors(obsstore, nodes, ignoreflags=0):
1018 """Yield node for every precursors of <nodes>.
1018 """Yield node for every precursors of <nodes>.
1019
1019
1020 Some precursors may be unknown locally.
1020 Some precursors may be unknown locally.
1021
1021
1022 This is a linear yield unsuited to detecting folded changesets. It includes
1022 This is a linear yield unsuited to detecting folded changesets. It includes
1023 initial nodes too."""
1023 initial nodes too."""
1024
1024
1025 remaining = set(nodes)
1025 remaining = set(nodes)
1026 seen = set(remaining)
1026 seen = set(remaining)
1027 while remaining:
1027 while remaining:
1028 current = remaining.pop()
1028 current = remaining.pop()
1029 yield current
1029 yield current
1030 for mark in obsstore.precursors.get(current, ()):
1030 for mark in obsstore.precursors.get(current, ()):
1031 # ignore marker flagged with specified flag
1031 # ignore marker flagged with specified flag
1032 if mark[2] & ignoreflags:
1032 if mark[2] & ignoreflags:
1033 continue
1033 continue
1034 suc = mark[0]
1034 suc = mark[0]
1035 if suc not in seen:
1035 if suc not in seen:
1036 seen.add(suc)
1036 seen.add(suc)
1037 remaining.add(suc)
1037 remaining.add(suc)
1038
1038
1039 def foreground(repo, nodes):
1039 def foreground(repo, nodes):
1040 """return all nodes in the "foreground" of other node
1040 """return all nodes in the "foreground" of other node
1041
1041
1042 The foreground of a revision is anything reachable using parent -> children
1042 The foreground of a revision is anything reachable using parent -> children
1043 or precursor -> successor relation. It is very similar to "descendant" but
1043 or precursor -> successor relation. It is very similar to "descendant" but
1044 augmented with obsolescence information.
1044 augmented with obsolescence information.
1045
1045
1046 Beware that possible obsolescence cycle may result if complex situation.
1046 Beware that possible obsolescence cycle may result if complex situation.
1047 """
1047 """
1048 repo = repo.unfiltered()
1048 repo = repo.unfiltered()
1049 foreground = set(repo.set('%ln::', nodes))
1049 foreground = set(repo.set('%ln::', nodes))
1050 if repo.obsstore:
1050 if repo.obsstore:
1051 # We only need this complicated logic if there is obsolescence
1051 # We only need this complicated logic if there is obsolescence
1052 # XXX will probably deserve an optimised revset.
1052 # XXX will probably deserve an optimised revset.
1053 nm = repo.changelog.nodemap
1053 nm = repo.changelog.nodemap
1054 plen = -1
1054 plen = -1
1055 # compute the whole set of successors or descendants
1055 # compute the whole set of successors or descendants
1056 while len(foreground) != plen:
1056 while len(foreground) != plen:
1057 plen = len(foreground)
1057 plen = len(foreground)
1058 succs = set(c.node() for c in foreground)
1058 succs = set(c.node() for c in foreground)
1059 mutable = [c.node() for c in foreground if c.mutable()]
1059 mutable = [c.node() for c in foreground if c.mutable()]
1060 succs.update(allsuccessors(repo.obsstore, mutable))
1060 succs.update(allsuccessors(repo.obsstore, mutable))
1061 known = (n for n in succs if n in nm)
1061 known = (n for n in succs if n in nm)
1062 foreground = set(repo.set('%ln::', known))
1062 foreground = set(repo.set('%ln::', known))
1063 return set(c.node() for c in foreground)
1063 return set(c.node() for c in foreground)
1064
1064
1065
1065
1066 def successorssets(repo, initialnode, cache=None):
1066 def successorssets(repo, initialnode, cache=None):
1067 """Return set of all latest successors of initial nodes
1067 """Return set of all latest successors of initial nodes
1068
1068
1069 The successors set of a changeset A are the group of revisions that succeed
1069 The successors set of a changeset A are the group of revisions that succeed
1070 A. It succeeds A as a consistent whole, each revision being only a partial
1070 A. It succeeds A as a consistent whole, each revision being only a partial
1071 replacement. The successors set contains non-obsolete changesets only.
1071 replacement. The successors set contains non-obsolete changesets only.
1072
1072
1073 This function returns the full list of successor sets which is why it
1073 This function returns the full list of successor sets which is why it
1074 returns a list of tuples and not just a single tuple. Each tuple is a valid
1074 returns a list of tuples and not just a single tuple. Each tuple is a valid
1075 successors set. Note that (A,) may be a valid successors set for changeset A
1075 successors set. Note that (A,) may be a valid successors set for changeset A
1076 (see below).
1076 (see below).
1077
1077
1078 In most cases, a changeset A will have a single element (e.g. the changeset
1078 In most cases, a changeset A will have a single element (e.g. the changeset
1079 A is replaced by A') in its successors set. Though, it is also common for a
1079 A is replaced by A') in its successors set. Though, it is also common for a
1080 changeset A to have no elements in its successor set (e.g. the changeset
1080 changeset A to have no elements in its successor set (e.g. the changeset
1081 has been pruned). Therefore, the returned list of successors sets will be
1081 has been pruned). Therefore, the returned list of successors sets will be
1082 [(A',)] or [], respectively.
1082 [(A',)] or [], respectively.
1083
1083
1084 When a changeset A is split into A' and B', however, it will result in a
1084 When a changeset A is split into A' and B', however, it will result in a
1085 successors set containing more than a single element, i.e. [(A',B')].
1085 successors set containing more than a single element, i.e. [(A',B')].
1086 Divergent changesets will result in multiple successors sets, i.e. [(A',),
1086 Divergent changesets will result in multiple successors sets, i.e. [(A',),
1087 (A'')].
1087 (A'')].
1088
1088
1089 If a changeset A is not obsolete, then it will conceptually have no
1089 If a changeset A is not obsolete, then it will conceptually have no
1090 successors set. To distinguish this from a pruned changeset, the successor
1090 successors set. To distinguish this from a pruned changeset, the successor
1091 set will contain itself only, i.e. [(A,)].
1091 set will contain itself only, i.e. [(A,)].
1092
1092
1093 Finally, successors unknown locally are considered to be pruned (obsoleted
1093 Finally, successors unknown locally are considered to be pruned (obsoleted
1094 without any successors).
1094 without any successors).
1095
1095
1096 The optional `cache` parameter is a dictionary that may contain precomputed
1096 The optional `cache` parameter is a dictionary that may contain precomputed
1097 successors sets. It is meant to reuse the computation of a previous call to
1097 successors sets. It is meant to reuse the computation of a previous call to
1098 `successorssets` when multiple calls are made at the same time. The cache
1098 `successorssets` when multiple calls are made at the same time. The cache
1099 dictionary is updated in place. The caller is responsible for its life
1099 dictionary is updated in place. The caller is responsible for its life
1100 span. Code that makes multiple calls to `successorssets` *must* use this
1100 span. Code that makes multiple calls to `successorssets` *must* use this
1101 cache mechanism or suffer terrible performance.
1101 cache mechanism or suffer terrible performance.
1102 """
1102 """
1103
1103
1104 succmarkers = repo.obsstore.successors
1104 succmarkers = repo.obsstore.successors
1105
1105
1106 # Stack of nodes we search successors sets for
1106 # Stack of nodes we search successors sets for
1107 toproceed = [initialnode]
1107 toproceed = [initialnode]
1108 # set version of above list for fast loop detection
1108 # set version of above list for fast loop detection
1109 # element added to "toproceed" must be added here
1109 # element added to "toproceed" must be added here
1110 stackedset = set(toproceed)
1110 stackedset = set(toproceed)
1111 if cache is None:
1111 if cache is None:
1112 cache = {}
1112 cache = {}
1113
1113
1114 # This while loop is the flattened version of a recursive search for
1114 # This while loop is the flattened version of a recursive search for
1115 # successors sets
1115 # successors sets
1116 #
1116 #
1117 # def successorssets(x):
1117 # def successorssets(x):
1118 # successors = directsuccessors(x)
1118 # successors = directsuccessors(x)
1119 # ss = [[]]
1119 # ss = [[]]
1120 # for succ in directsuccessors(x):
1120 # for succ in directsuccessors(x):
1121 # # product as in itertools cartesian product
1121 # # product as in itertools cartesian product
1122 # ss = product(ss, successorssets(succ))
1122 # ss = product(ss, successorssets(succ))
1123 # return ss
1123 # return ss
1124 #
1124 #
1125 # But we can not use plain recursive calls here:
1125 # But we can not use plain recursive calls here:
1126 # - that would blow the python call stack
1126 # - that would blow the python call stack
1127 # - obsolescence markers may have cycles, we need to handle them.
1127 # - obsolescence markers may have cycles, we need to handle them.
1128 #
1128 #
1129 # The `toproceed` list act as our call stack. Every node we search
1129 # The `toproceed` list act as our call stack. Every node we search
1130 # successors set for are stacked there.
1130 # successors set for are stacked there.
1131 #
1131 #
1132 # The `stackedset` is set version of this stack used to check if a node is
1132 # The `stackedset` is set version of this stack used to check if a node is
1133 # already stacked. This check is used to detect cycles and prevent infinite
1133 # already stacked. This check is used to detect cycles and prevent infinite
1134 # loop.
1134 # loop.
1135 #
1135 #
1136 # successors set of all nodes are stored in the `cache` dictionary.
1136 # successors set of all nodes are stored in the `cache` dictionary.
1137 #
1137 #
1138 # After this while loop ends we use the cache to return the successors sets
1138 # After this while loop ends we use the cache to return the successors sets
1139 # for the node requested by the caller.
1139 # for the node requested by the caller.
1140 while toproceed:
1140 while toproceed:
1141 # Every iteration tries to compute the successors sets of the topmost
1141 # Every iteration tries to compute the successors sets of the topmost
1142 # node of the stack: CURRENT.
1142 # node of the stack: CURRENT.
1143 #
1143 #
1144 # There are four possible outcomes:
1144 # There are four possible outcomes:
1145 #
1145 #
1146 # 1) We already know the successors sets of CURRENT:
1146 # 1) We already know the successors sets of CURRENT:
1147 # -> mission accomplished, pop it from the stack.
1147 # -> mission accomplished, pop it from the stack.
1148 # 2) Node is not obsolete:
1148 # 2) Node is not obsolete:
1149 # -> the node is its own successors sets. Add it to the cache.
1149 # -> the node is its own successors sets. Add it to the cache.
1150 # 3) We do not know successors set of direct successors of CURRENT:
1150 # 3) We do not know successors set of direct successors of CURRENT:
1151 # -> We add those successors to the stack.
1151 # -> We add those successors to the stack.
1152 # 4) We know successors sets of all direct successors of CURRENT:
1152 # 4) We know successors sets of all direct successors of CURRENT:
1153 # -> We can compute CURRENT successors set and add it to the
1153 # -> We can compute CURRENT successors set and add it to the
1154 # cache.
1154 # cache.
1155 #
1155 #
1156 current = toproceed[-1]
1156 current = toproceed[-1]
1157 if current in cache:
1157 if current in cache:
1158 # case (1): We already know the successors sets
1158 # case (1): We already know the successors sets
1159 stackedset.remove(toproceed.pop())
1159 stackedset.remove(toproceed.pop())
1160 elif current not in succmarkers:
1160 elif current not in succmarkers:
1161 # case (2): The node is not obsolete.
1161 # case (2): The node is not obsolete.
1162 if current in repo:
1162 if current in repo:
1163 # We have a valid last successors.
1163 # We have a valid last successors.
1164 cache[current] = [(current,)]
1164 cache[current] = [(current,)]
1165 else:
1165 else:
1166 # Final obsolete version is unknown locally.
1166 # Final obsolete version is unknown locally.
1167 # Do not count that as a valid successors
1167 # Do not count that as a valid successors
1168 cache[current] = []
1168 cache[current] = []
1169 else:
1169 else:
1170 # cases (3) and (4)
1170 # cases (3) and (4)
1171 #
1171 #
1172 # We proceed in two phases. Phase 1 aims to distinguish case (3)
1172 # We proceed in two phases. Phase 1 aims to distinguish case (3)
1173 # from case (4):
1173 # from case (4):
1174 #
1174 #
1175 # For each direct successors of CURRENT, we check whether its
1175 # For each direct successors of CURRENT, we check whether its
1176 # successors sets are known. If they are not, we stack the
1176 # successors sets are known. If they are not, we stack the
1177 # unknown node and proceed to the next iteration of the while
1177 # unknown node and proceed to the next iteration of the while
1178 # loop. (case 3)
1178 # loop. (case 3)
1179 #
1179 #
1180 # During this step, we may detect obsolescence cycles: a node
1180 # During this step, we may detect obsolescence cycles: a node
1181 # with unknown successors sets but already in the call stack.
1181 # with unknown successors sets but already in the call stack.
1182 # In such a situation, we arbitrary set the successors sets of
1182 # In such a situation, we arbitrary set the successors sets of
1183 # the node to nothing (node pruned) to break the cycle.
1183 # the node to nothing (node pruned) to break the cycle.
1184 #
1184 #
1185 # If no break was encountered we proceed to phase 2.
1185 # If no break was encountered we proceed to phase 2.
1186 #
1186 #
1187 # Phase 2 computes successors sets of CURRENT (case 4); see details
1187 # Phase 2 computes successors sets of CURRENT (case 4); see details
1188 # in phase 2 itself.
1188 # in phase 2 itself.
1189 #
1189 #
1190 # Note the two levels of iteration in each phase.
1190 # Note the two levels of iteration in each phase.
1191 # - The first one handles obsolescence markers using CURRENT as
1191 # - The first one handles obsolescence markers using CURRENT as
1192 # precursor (successors markers of CURRENT).
1192 # precursor (successors markers of CURRENT).
1193 #
1193 #
1194 # Having multiple entry here means divergence.
1194 # Having multiple entry here means divergence.
1195 #
1195 #
1196 # - The second one handles successors defined in each marker.
1196 # - The second one handles successors defined in each marker.
1197 #
1197 #
1198 # Having none means pruned node, multiple successors means split,
1198 # Having none means pruned node, multiple successors means split,
1199 # single successors are standard replacement.
1199 # single successors are standard replacement.
1200 #
1200 #
1201 for mark in sorted(succmarkers[current]):
1201 for mark in sorted(succmarkers[current]):
1202 for suc in mark[1]:
1202 for suc in mark[1]:
1203 if suc not in cache:
1203 if suc not in cache:
1204 if suc in stackedset:
1204 if suc in stackedset:
1205 # cycle breaking
1205 # cycle breaking
1206 cache[suc] = []
1206 cache[suc] = []
1207 else:
1207 else:
1208 # case (3) If we have not computed successors sets
1208 # case (3) If we have not computed successors sets
1209 # of one of those successors we add it to the
1209 # of one of those successors we add it to the
1210 # `toproceed` stack and stop all work for this
1210 # `toproceed` stack and stop all work for this
1211 # iteration.
1211 # iteration.
1212 toproceed.append(suc)
1212 toproceed.append(suc)
1213 stackedset.add(suc)
1213 stackedset.add(suc)
1214 break
1214 break
1215 else:
1215 else:
1216 continue
1216 continue
1217 break
1217 break
1218 else:
1218 else:
1219 # case (4): we know all successors sets of all direct
1219 # case (4): we know all successors sets of all direct
1220 # successors
1220 # successors
1221 #
1221 #
1222 # Successors set contributed by each marker depends on the
1222 # Successors set contributed by each marker depends on the
1223 # successors sets of all its "successors" node.
1223 # successors sets of all its "successors" node.
1224 #
1224 #
1225 # Each different marker is a divergence in the obsolescence
1225 # Each different marker is a divergence in the obsolescence
1226 # history. It contributes successors sets distinct from other
1226 # history. It contributes successors sets distinct from other
1227 # markers.
1227 # markers.
1228 #
1228 #
1229 # Within a marker, a successor may have divergent successors
1229 # Within a marker, a successor may have divergent successors
1230 # sets. In such a case, the marker will contribute multiple
1230 # sets. In such a case, the marker will contribute multiple
1231 # divergent successors sets. If multiple successors have
1231 # divergent successors sets. If multiple successors have
1232 # divergent successors sets, a Cartesian product is used.
1232 # divergent successors sets, a Cartesian product is used.
1233 #
1233 #
1234 # At the end we post-process successors sets to remove
1234 # At the end we post-process successors sets to remove
1235 # duplicated entry and successors set that are strict subset of
1235 # duplicated entry and successors set that are strict subset of
1236 # another one.
1236 # another one.
1237 succssets = []
1237 succssets = []
1238 for mark in sorted(succmarkers[current]):
1238 for mark in sorted(succmarkers[current]):
1239 # successors sets contributed by this marker
1239 # successors sets contributed by this marker
1240 markss = [[]]
1240 markss = [[]]
1241 for suc in mark[1]:
1241 for suc in mark[1]:
1242 # cardinal product with previous successors
1242 # cardinal product with previous successors
1243 productresult = []
1243 productresult = []
1244 for prefix in markss:
1244 for prefix in markss:
1245 for suffix in cache[suc]:
1245 for suffix in cache[suc]:
1246 newss = list(prefix)
1246 newss = list(prefix)
1247 for part in suffix:
1247 for part in suffix:
1248 # do not duplicated entry in successors set
1248 # do not duplicated entry in successors set
1249 # first entry wins.
1249 # first entry wins.
1250 if part not in newss:
1250 if part not in newss:
1251 newss.append(part)
1251 newss.append(part)
1252 productresult.append(newss)
1252 productresult.append(newss)
1253 markss = productresult
1253 markss = productresult
1254 succssets.extend(markss)
1254 succssets.extend(markss)
1255 # remove duplicated and subset
1255 # remove duplicated and subset
1256 seen = []
1256 seen = []
1257 final = []
1257 final = []
1258 candidate = sorted(((set(s), s) for s in succssets if s),
1258 candidate = sorted(((set(s), s) for s in succssets if s),
1259 key=lambda x: len(x[1]), reverse=True)
1259 key=lambda x: len(x[1]), reverse=True)
1260 for setversion, listversion in candidate:
1260 for setversion, listversion in candidate:
1261 for seenset in seen:
1261 for seenset in seen:
1262 if setversion.issubset(seenset):
1262 if setversion.issubset(seenset):
1263 break
1263 break
1264 else:
1264 else:
1265 final.append(listversion)
1265 final.append(listversion)
1266 seen.append(setversion)
1266 seen.append(setversion)
1267 final.reverse() # put small successors set first
1267 final.reverse() # put small successors set first
1268 cache[current] = final
1268 cache[current] = final
1269 return cache[initialnode]
1269 return cache[initialnode]
1270
1270
1271 # mapping of 'set-name' -> <function to compute this set>
1271 # mapping of 'set-name' -> <function to compute this set>
1272 cachefuncs = {}
1272 cachefuncs = {}
1273 def cachefor(name):
1273 def cachefor(name):
1274 """Decorator to register a function as computing the cache for a set"""
1274 """Decorator to register a function as computing the cache for a set"""
1275 def decorator(func):
1275 def decorator(func):
1276 assert name not in cachefuncs
1276 if name in cachefuncs:
1277 msg = "duplicated registration for volatileset '%s' (existing: %r)"
1278 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
1277 cachefuncs[name] = func
1279 cachefuncs[name] = func
1278 return func
1280 return func
1279 return decorator
1281 return decorator
1280
1282
1281 def getrevs(repo, name):
1283 def getrevs(repo, name):
1282 """Return the set of revision that belong to the <name> set
1284 """Return the set of revision that belong to the <name> set
1283
1285
1284 Such access may compute the set and cache it for future use"""
1286 Such access may compute the set and cache it for future use"""
1285 repo = repo.unfiltered()
1287 repo = repo.unfiltered()
1286 if not repo.obsstore:
1288 if not repo.obsstore:
1287 return frozenset()
1289 return frozenset()
1288 if name not in repo.obsstore.caches:
1290 if name not in repo.obsstore.caches:
1289 repo.obsstore.caches[name] = cachefuncs[name](repo)
1291 repo.obsstore.caches[name] = cachefuncs[name](repo)
1290 return repo.obsstore.caches[name]
1292 return repo.obsstore.caches[name]
1291
1293
1292 # To be simple we need to invalidate obsolescence cache when:
1294 # To be simple we need to invalidate obsolescence cache when:
1293 #
1295 #
1294 # - new changeset is added:
1296 # - new changeset is added:
1295 # - public phase is changed
1297 # - public phase is changed
1296 # - obsolescence marker are added
1298 # - obsolescence marker are added
1297 # - strip is used a repo
1299 # - strip is used a repo
1298 def clearobscaches(repo):
1300 def clearobscaches(repo):
1299 """Remove all obsolescence related cache from a repo
1301 """Remove all obsolescence related cache from a repo
1300
1302
1301 This remove all cache in obsstore is the obsstore already exist on the
1303 This remove all cache in obsstore is the obsstore already exist on the
1302 repo.
1304 repo.
1303
1305
1304 (We could be smarter here given the exact event that trigger the cache
1306 (We could be smarter here given the exact event that trigger the cache
1305 clearing)"""
1307 clearing)"""
1306 # only clear cache is there is obsstore data in this repo
1308 # only clear cache is there is obsstore data in this repo
1307 if 'obsstore' in repo._filecache:
1309 if 'obsstore' in repo._filecache:
1308 repo.obsstore.caches.clear()
1310 repo.obsstore.caches.clear()
1309
1311
1310 @cachefor('obsolete')
1312 @cachefor('obsolete')
1311 def _computeobsoleteset(repo):
1313 def _computeobsoleteset(repo):
1312 """the set of obsolete revisions"""
1314 """the set of obsolete revisions"""
1313 getnode = repo.changelog.node
1315 getnode = repo.changelog.node
1314 notpublic = repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
1316 notpublic = repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
1315 isobs = repo.obsstore.successors.__contains__
1317 isobs = repo.obsstore.successors.__contains__
1316 obs = set(r for r in notpublic if isobs(getnode(r)))
1318 obs = set(r for r in notpublic if isobs(getnode(r)))
1317 return obs
1319 return obs
1318
1320
1319 @cachefor('unstable')
1321 @cachefor('unstable')
1320 def _computeunstableset(repo):
1322 def _computeunstableset(repo):
1321 """the set of non obsolete revisions with obsolete parents"""
1323 """the set of non obsolete revisions with obsolete parents"""
1322 revs = [(ctx.rev(), ctx) for ctx in
1324 revs = [(ctx.rev(), ctx) for ctx in
1323 repo.set('(not public()) and (not obsolete())')]
1325 repo.set('(not public()) and (not obsolete())')]
1324 revs.sort(key=lambda x:x[0])
1326 revs.sort(key=lambda x:x[0])
1325 unstable = set()
1327 unstable = set()
1326 for rev, ctx in revs:
1328 for rev, ctx in revs:
1327 # A rev is unstable if one of its parent is obsolete or unstable
1329 # A rev is unstable if one of its parent is obsolete or unstable
1328 # this works since we traverse following growing rev order
1330 # this works since we traverse following growing rev order
1329 if any((x.obsolete() or (x.rev() in unstable))
1331 if any((x.obsolete() or (x.rev() in unstable))
1330 for x in ctx.parents()):
1332 for x in ctx.parents()):
1331 unstable.add(rev)
1333 unstable.add(rev)
1332 return unstable
1334 return unstable
1333
1335
1334 @cachefor('suspended')
1336 @cachefor('suspended')
1335 def _computesuspendedset(repo):
1337 def _computesuspendedset(repo):
1336 """the set of obsolete parents with non obsolete descendants"""
1338 """the set of obsolete parents with non obsolete descendants"""
1337 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1339 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1338 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1340 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1339
1341
1340 @cachefor('extinct')
1342 @cachefor('extinct')
1341 def _computeextinctset(repo):
1343 def _computeextinctset(repo):
1342 """the set of obsolete parents without non obsolete descendants"""
1344 """the set of obsolete parents without non obsolete descendants"""
1343 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1345 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1344
1346
1345
1347
1346 @cachefor('bumped')
1348 @cachefor('bumped')
1347 def _computebumpedset(repo):
1349 def _computebumpedset(repo):
1348 """the set of revs trying to obsolete public revisions"""
1350 """the set of revs trying to obsolete public revisions"""
1349 bumped = set()
1351 bumped = set()
1350 # util function (avoid attribute lookup in the loop)
1352 # util function (avoid attribute lookup in the loop)
1351 phase = repo._phasecache.phase # would be faster to grab the full list
1353 phase = repo._phasecache.phase # would be faster to grab the full list
1352 public = phases.public
1354 public = phases.public
1353 cl = repo.changelog
1355 cl = repo.changelog
1354 torev = cl.nodemap.get
1356 torev = cl.nodemap.get
1355 for ctx in repo.set('(not public()) and (not obsolete())'):
1357 for ctx in repo.set('(not public()) and (not obsolete())'):
1356 rev = ctx.rev()
1358 rev = ctx.rev()
1357 # We only evaluate mutable, non-obsolete revision
1359 # We only evaluate mutable, non-obsolete revision
1358 node = ctx.node()
1360 node = ctx.node()
1359 # (future) A cache of precursors may worth if split is very common
1361 # (future) A cache of precursors may worth if split is very common
1360 for pnode in allprecursors(repo.obsstore, [node],
1362 for pnode in allprecursors(repo.obsstore, [node],
1361 ignoreflags=bumpedfix):
1363 ignoreflags=bumpedfix):
1362 prev = torev(pnode) # unfiltered! but so is phasecache
1364 prev = torev(pnode) # unfiltered! but so is phasecache
1363 if (prev is not None) and (phase(repo, prev) <= public):
1365 if (prev is not None) and (phase(repo, prev) <= public):
1364 # we have a public precursor
1366 # we have a public precursor
1365 bumped.add(rev)
1367 bumped.add(rev)
1366 break # Next draft!
1368 break # Next draft!
1367 return bumped
1369 return bumped
1368
1370
1369 @cachefor('divergent')
1371 @cachefor('divergent')
1370 def _computedivergentset(repo):
1372 def _computedivergentset(repo):
1371 """the set of rev that compete to be the final successors of some revision.
1373 """the set of rev that compete to be the final successors of some revision.
1372 """
1374 """
1373 divergent = set()
1375 divergent = set()
1374 obsstore = repo.obsstore
1376 obsstore = repo.obsstore
1375 newermap = {}
1377 newermap = {}
1376 for ctx in repo.set('(not public()) - obsolete()'):
1378 for ctx in repo.set('(not public()) - obsolete()'):
1377 mark = obsstore.precursors.get(ctx.node(), ())
1379 mark = obsstore.precursors.get(ctx.node(), ())
1378 toprocess = set(mark)
1380 toprocess = set(mark)
1379 seen = set()
1381 seen = set()
1380 while toprocess:
1382 while toprocess:
1381 prec = toprocess.pop()[0]
1383 prec = toprocess.pop()[0]
1382 if prec in seen:
1384 if prec in seen:
1383 continue # emergency cycle hanging prevention
1385 continue # emergency cycle hanging prevention
1384 seen.add(prec)
1386 seen.add(prec)
1385 if prec not in newermap:
1387 if prec not in newermap:
1386 successorssets(repo, prec, newermap)
1388 successorssets(repo, prec, newermap)
1387 newer = [n for n in newermap[prec] if n]
1389 newer = [n for n in newermap[prec] if n]
1388 if len(newer) > 1:
1390 if len(newer) > 1:
1389 divergent.add(ctx.rev())
1391 divergent.add(ctx.rev())
1390 break
1392 break
1391 toprocess.update(obsstore.precursors.get(prec, ()))
1393 toprocess.update(obsstore.precursors.get(prec, ()))
1392 return divergent
1394 return divergent
1393
1395
1394
1396
1395 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
1397 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
1396 operation=None):
1398 operation=None):
1397 """Add obsolete markers between changesets in a repo
1399 """Add obsolete markers between changesets in a repo
1398
1400
1399 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1401 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1400 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1402 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1401 containing metadata for this marker only. It is merged with the global
1403 containing metadata for this marker only. It is merged with the global
1402 metadata specified through the `metadata` argument of this function,
1404 metadata specified through the `metadata` argument of this function,
1403
1405
1404 Trying to obsolete a public changeset will raise an exception.
1406 Trying to obsolete a public changeset will raise an exception.
1405
1407
1406 Current user and date are used except if specified otherwise in the
1408 Current user and date are used except if specified otherwise in the
1407 metadata attribute.
1409 metadata attribute.
1408
1410
1409 This function operates within a transaction of its own, but does
1411 This function operates within a transaction of its own, but does
1410 not take any lock on the repo.
1412 not take any lock on the repo.
1411 """
1413 """
1412 # prepare metadata
1414 # prepare metadata
1413 if metadata is None:
1415 if metadata is None:
1414 metadata = {}
1416 metadata = {}
1415 if 'user' not in metadata:
1417 if 'user' not in metadata:
1416 metadata['user'] = repo.ui.username()
1418 metadata['user'] = repo.ui.username()
1417 useoperation = repo.ui.configbool('experimental',
1419 useoperation = repo.ui.configbool('experimental',
1418 'evolution.track-operation',
1420 'evolution.track-operation',
1419 False)
1421 False)
1420 if useoperation and operation:
1422 if useoperation and operation:
1421 metadata['operation'] = operation
1423 metadata['operation'] = operation
1422 tr = repo.transaction('add-obsolescence-marker')
1424 tr = repo.transaction('add-obsolescence-marker')
1423 try:
1425 try:
1424 markerargs = []
1426 markerargs = []
1425 for rel in relations:
1427 for rel in relations:
1426 prec = rel[0]
1428 prec = rel[0]
1427 sucs = rel[1]
1429 sucs = rel[1]
1428 localmetadata = metadata.copy()
1430 localmetadata = metadata.copy()
1429 if 2 < len(rel):
1431 if 2 < len(rel):
1430 localmetadata.update(rel[2])
1432 localmetadata.update(rel[2])
1431
1433
1432 if not prec.mutable():
1434 if not prec.mutable():
1433 raise error.Abort(_("cannot obsolete public changeset: %s")
1435 raise error.Abort(_("cannot obsolete public changeset: %s")
1434 % prec,
1436 % prec,
1435 hint="see 'hg help phases' for details")
1437 hint="see 'hg help phases' for details")
1436 nprec = prec.node()
1438 nprec = prec.node()
1437 nsucs = tuple(s.node() for s in sucs)
1439 nsucs = tuple(s.node() for s in sucs)
1438 npare = None
1440 npare = None
1439 if not nsucs:
1441 if not nsucs:
1440 npare = tuple(p.node() for p in prec.parents())
1442 npare = tuple(p.node() for p in prec.parents())
1441 if nprec in nsucs:
1443 if nprec in nsucs:
1442 raise error.Abort(_("changeset %s cannot obsolete itself")
1444 raise error.Abort(_("changeset %s cannot obsolete itself")
1443 % prec)
1445 % prec)
1444
1446
1445 # Creating the marker causes the hidden cache to become invalid,
1447 # Creating the marker causes the hidden cache to become invalid,
1446 # which causes recomputation when we ask for prec.parents() above.
1448 # which causes recomputation when we ask for prec.parents() above.
1447 # Resulting in n^2 behavior. So let's prepare all of the args
1449 # Resulting in n^2 behavior. So let's prepare all of the args
1448 # first, then create the markers.
1450 # first, then create the markers.
1449 markerargs.append((nprec, nsucs, npare, localmetadata))
1451 markerargs.append((nprec, nsucs, npare, localmetadata))
1450
1452
1451 for args in markerargs:
1453 for args in markerargs:
1452 nprec, nsucs, npare, localmetadata = args
1454 nprec, nsucs, npare, localmetadata = args
1453 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1455 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1454 date=date, metadata=localmetadata,
1456 date=date, metadata=localmetadata,
1455 ui=repo.ui)
1457 ui=repo.ui)
1456 repo.filteredrevcache.clear()
1458 repo.filteredrevcache.clear()
1457 tr.close()
1459 tr.close()
1458 finally:
1460 finally:
1459 tr.release()
1461 tr.release()
General Comments 0
You need to be logged in to leave comments. Login now