Show More
@@ -1,929 +1,935 b'' | |||||
1 | # obsolete.py - obsolete markers handling |
|
1 | # obsolete.py - obsolete markers handling | |
2 | # |
|
2 | # | |
3 | # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org> |
|
3 | # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org> | |
4 | # Logilab SA <contact@logilab.fr> |
|
4 | # Logilab SA <contact@logilab.fr> | |
5 | # |
|
5 | # | |
6 | # This software may be used and distributed according to the terms of the |
|
6 | # This software may be used and distributed according to the terms of the | |
7 | # GNU General Public License version 2 or any later version. |
|
7 | # GNU General Public License version 2 or any later version. | |
8 |
|
8 | |||
9 | """Obsolete marker handling |
|
9 | """Obsolete marker handling | |
10 |
|
10 | |||
11 | An obsolete marker maps an old changeset to a list of new |
|
11 | An obsolete marker maps an old changeset to a list of new | |
12 | changesets. If the list of new changesets is empty, the old changeset |
|
12 | changesets. If the list of new changesets is empty, the old changeset | |
13 | is said to be "killed". Otherwise, the old changeset is being |
|
13 | is said to be "killed". Otherwise, the old changeset is being | |
14 | "replaced" by the new changesets. |
|
14 | "replaced" by the new changesets. | |
15 |
|
15 | |||
16 | Obsolete markers can be used to record and distribute changeset graph |
|
16 | Obsolete markers can be used to record and distribute changeset graph | |
17 | transformations performed by history rewrite operations, and help |
|
17 | transformations performed by history rewrite operations, and help | |
18 | building new tools to reconcile conflicting rewrite actions. To |
|
18 | building new tools to reconcile conflicting rewrite actions. To | |
19 | facilitate conflict resolution, markers include various annotations |
|
19 | facilitate conflict resolution, markers include various annotations | |
20 | besides old and news changeset identifiers, such as creation date or |
|
20 | besides old and news changeset identifiers, such as creation date or | |
21 | author name. |
|
21 | author name. | |
22 |
|
22 | |||
23 | The old obsoleted changeset is called a "precursor" and possible |
|
23 | The old obsoleted changeset is called a "precursor" and possible | |
24 | replacements are called "successors". Markers that used changeset X as |
|
24 | replacements are called "successors". Markers that used changeset X as | |
25 | a precursor are called "successor markers of X" because they hold |
|
25 | a precursor are called "successor markers of X" because they hold | |
26 | information about the successors of X. Markers that use changeset Y as |
|
26 | information about the successors of X. Markers that use changeset Y as | |
27 | a successors are call "precursor markers of Y" because they hold |
|
27 | a successors are call "precursor markers of Y" because they hold | |
28 | information about the precursors of Y. |
|
28 | information about the precursors of Y. | |
29 |
|
29 | |||
30 | Examples: |
|
30 | Examples: | |
31 |
|
31 | |||
32 | - When changeset A is replaced by changeset A', one marker is stored: |
|
32 | - When changeset A is replaced by changeset A', one marker is stored: | |
33 |
|
33 | |||
34 | (A, (A',)) |
|
34 | (A, (A',)) | |
35 |
|
35 | |||
36 | - When changesets A and B are folded into a new changeset C, two markers are |
|
36 | - When changesets A and B are folded into a new changeset C, two markers are | |
37 | stored: |
|
37 | stored: | |
38 |
|
38 | |||
39 | (A, (C,)) and (B, (C,)) |
|
39 | (A, (C,)) and (B, (C,)) | |
40 |
|
40 | |||
41 | - When changeset A is simply "pruned" from the graph, a marker is created: |
|
41 | - When changeset A is simply "pruned" from the graph, a marker is created: | |
42 |
|
42 | |||
43 | (A, ()) |
|
43 | (A, ()) | |
44 |
|
44 | |||
45 | - When changeset A is split into B and C, a single marker are used: |
|
45 | - When changeset A is split into B and C, a single marker are used: | |
46 |
|
46 | |||
47 | (A, (C, C)) |
|
47 | (A, (C, C)) | |
48 |
|
48 | |||
49 | We use a single marker to distinguish the "split" case from the "divergence" |
|
49 | We use a single marker to distinguish the "split" case from the "divergence" | |
50 | case. If two independent operations rewrite the same changeset A in to A' and |
|
50 | case. If two independent operations rewrite the same changeset A in to A' and | |
51 | A'', we have an error case: divergent rewriting. We can detect it because |
|
51 | A'', we have an error case: divergent rewriting. We can detect it because | |
52 | two markers will be created independently: |
|
52 | two markers will be created independently: | |
53 |
|
53 | |||
54 | (A, (B,)) and (A, (C,)) |
|
54 | (A, (B,)) and (A, (C,)) | |
55 |
|
55 | |||
56 | Format |
|
56 | Format | |
57 | ------ |
|
57 | ------ | |
58 |
|
58 | |||
59 | Markers are stored in an append-only file stored in |
|
59 | Markers are stored in an append-only file stored in | |
60 | '.hg/store/obsstore'. |
|
60 | '.hg/store/obsstore'. | |
61 |
|
61 | |||
62 | The file starts with a version header: |
|
62 | The file starts with a version header: | |
63 |
|
63 | |||
64 | - 1 unsigned byte: version number, starting at zero. |
|
64 | - 1 unsigned byte: version number, starting at zero. | |
65 |
|
65 | |||
66 |
|
66 | |||
67 | The header is followed by the markers. Each marker is made of: |
|
67 | The header is followed by the markers. Each marker is made of: | |
68 |
|
68 | |||
69 | - 1 unsigned byte: number of new changesets "N", can be zero. |
|
69 | - 1 unsigned byte: number of new changesets "N", can be zero. | |
70 |
|
70 | |||
71 | - 1 unsigned 32-bits integer: metadata size "M" in bytes. |
|
71 | - 1 unsigned 32-bits integer: metadata size "M" in bytes. | |
72 |
|
72 | |||
73 | - 1 byte: a bit field. It is reserved for flags used in common |
|
73 | - 1 byte: a bit field. It is reserved for flags used in common | |
74 | obsolete marker operations, to avoid repeated decoding of metadata |
|
74 | obsolete marker operations, to avoid repeated decoding of metadata | |
75 | entries. |
|
75 | entries. | |
76 |
|
76 | |||
77 | - 20 bytes: obsoleted changeset identifier. |
|
77 | - 20 bytes: obsoleted changeset identifier. | |
78 |
|
78 | |||
79 | - N*20 bytes: new changesets identifiers. |
|
79 | - N*20 bytes: new changesets identifiers. | |
80 |
|
80 | |||
81 | - M bytes: metadata as a sequence of nul-terminated strings. Each |
|
81 | - M bytes: metadata as a sequence of nul-terminated strings. Each | |
82 | string contains a key and a value, separated by a colon ':', without |
|
82 | string contains a key and a value, separated by a colon ':', without | |
83 | additional encoding. Keys cannot contain '\0' or ':' and values |
|
83 | additional encoding. Keys cannot contain '\0' or ':' and values | |
84 | cannot contain '\0'. |
|
84 | cannot contain '\0'. | |
85 |
|
85 | |||
86 | """ |
|
86 | """ | |
87 | import struct |
|
87 | import struct | |
88 | import util, base85, node |
|
88 | import util, base85, node | |
89 | import phases |
|
89 | import phases | |
90 | from i18n import _ |
|
90 | from i18n import _ | |
91 |
|
91 | |||
92 | _pack = struct.pack |
|
92 | _pack = struct.pack | |
93 | _unpack = struct.unpack |
|
93 | _unpack = struct.unpack | |
94 |
|
94 | |||
95 | _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5 |
|
95 | _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5 | |
96 |
|
96 | |||
97 | # the obsolete feature is not mature enough to be enabled by default. |
|
97 | # the obsolete feature is not mature enough to be enabled by default. | |
98 | # you have to rely on third party extension extension to enable this. |
|
98 | # you have to rely on third party extension extension to enable this. | |
99 | _enabled = False |
|
99 | _enabled = False | |
100 |
|
100 | |||
101 | # data used for parsing and writing |
|
101 | # data used for parsing and writing | |
102 | _fmversion = 0 |
|
102 | _fmversion = 0 | |
103 | _fmfixed = '>BIB20s' |
|
103 | _fmfixed = '>BIB20s' | |
104 | _fmnode = '20s' |
|
104 | _fmnode = '20s' | |
105 | _fmfsize = struct.calcsize(_fmfixed) |
|
105 | _fmfsize = struct.calcsize(_fmfixed) | |
106 | _fnodesize = struct.calcsize(_fmnode) |
|
106 | _fnodesize = struct.calcsize(_fmnode) | |
107 |
|
107 | |||
108 | ### obsolescence marker flag |
|
108 | ### obsolescence marker flag | |
109 |
|
109 | |||
110 | ## bumpedfix flag |
|
110 | ## bumpedfix flag | |
111 | # |
|
111 | # | |
112 | # When a changeset A' succeed to a changeset A which became public, we call A' |
|
112 | # When a changeset A' succeed to a changeset A which became public, we call A' | |
113 | # "bumped" because it's a successors of a public changesets |
|
113 | # "bumped" because it's a successors of a public changesets | |
114 | # |
|
114 | # | |
115 | # o A' (bumped) |
|
115 | # o A' (bumped) | |
116 | # |`: |
|
116 | # |`: | |
117 | # | o A |
|
117 | # | o A | |
118 | # |/ |
|
118 | # |/ | |
119 | # o Z |
|
119 | # o Z | |
120 | # |
|
120 | # | |
121 | # The way to solve this situation is to create a new changeset Ad as children |
|
121 | # The way to solve this situation is to create a new changeset Ad as children | |
122 | # of A. This changeset have the same content than A'. So the diff from A to A' |
|
122 | # of A. This changeset have the same content than A'. So the diff from A to A' | |
123 | # is the same than the diff from A to Ad. Ad is marked as a successors of A' |
|
123 | # is the same than the diff from A to Ad. Ad is marked as a successors of A' | |
124 | # |
|
124 | # | |
125 | # o Ad |
|
125 | # o Ad | |
126 | # |`: |
|
126 | # |`: | |
127 | # | x A' |
|
127 | # | x A' | |
128 | # |'| |
|
128 | # |'| | |
129 | # o | A |
|
129 | # o | A | |
130 | # |/ |
|
130 | # |/ | |
131 | # o Z |
|
131 | # o Z | |
132 | # |
|
132 | # | |
133 | # But by transitivity Ad is also a successors of A. To avoid having Ad marked |
|
133 | # But by transitivity Ad is also a successors of A. To avoid having Ad marked | |
134 | # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>. |
|
134 | # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>. | |
135 | # This flag mean that the successors express the changes between the public and |
|
135 | # This flag mean that the successors express the changes between the public and | |
136 | # bumped version and fix the situation, breaking the transitivity of |
|
136 | # bumped version and fix the situation, breaking the transitivity of | |
137 | # "bumped" here. |
|
137 | # "bumped" here. | |
138 | bumpedfix = 1 |
|
138 | bumpedfix = 1 | |
139 |
|
139 | |||
140 | def _readmarkers(data): |
|
140 | def _readmarkers(data): | |
141 | """Read and enumerate markers from raw data""" |
|
141 | """Read and enumerate markers from raw data""" | |
142 | off = 0 |
|
142 | off = 0 | |
143 | diskversion = _unpack('>B', data[off:off + 1])[0] |
|
143 | diskversion = _unpack('>B', data[off:off + 1])[0] | |
144 | off += 1 |
|
144 | off += 1 | |
145 | if diskversion != _fmversion: |
|
145 | if diskversion != _fmversion: | |
146 | raise util.Abort(_('parsing obsolete marker: unknown version %r') |
|
146 | raise util.Abort(_('parsing obsolete marker: unknown version %r') | |
147 | % diskversion) |
|
147 | % diskversion) | |
148 |
|
148 | |||
149 | # Loop on markers |
|
149 | # Loop on markers | |
150 | l = len(data) |
|
150 | l = len(data) | |
151 | while off + _fmfsize <= l: |
|
151 | while off + _fmfsize <= l: | |
152 | # read fixed part |
|
152 | # read fixed part | |
153 | cur = data[off:off + _fmfsize] |
|
153 | cur = data[off:off + _fmfsize] | |
154 | off += _fmfsize |
|
154 | off += _fmfsize | |
155 | nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur) |
|
155 | nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur) | |
156 | # read replacement |
|
156 | # read replacement | |
157 | sucs = () |
|
157 | sucs = () | |
158 | if nbsuc: |
|
158 | if nbsuc: | |
159 | s = (_fnodesize * nbsuc) |
|
159 | s = (_fnodesize * nbsuc) | |
160 | cur = data[off:off + s] |
|
160 | cur = data[off:off + s] | |
161 | sucs = _unpack(_fmnode * nbsuc, cur) |
|
161 | sucs = _unpack(_fmnode * nbsuc, cur) | |
162 | off += s |
|
162 | off += s | |
163 | # read metadata |
|
163 | # read metadata | |
164 | # (metadata will be decoded on demand) |
|
164 | # (metadata will be decoded on demand) | |
165 | metadata = data[off:off + mdsize] |
|
165 | metadata = data[off:off + mdsize] | |
166 | if len(metadata) != mdsize: |
|
166 | if len(metadata) != mdsize: | |
167 | raise util.Abort(_('parsing obsolete marker: metadata is too ' |
|
167 | raise util.Abort(_('parsing obsolete marker: metadata is too ' | |
168 | 'short, %d bytes expected, got %d') |
|
168 | 'short, %d bytes expected, got %d') | |
169 | % (mdsize, len(metadata))) |
|
169 | % (mdsize, len(metadata))) | |
170 | off += mdsize |
|
170 | off += mdsize | |
171 | meta = decodemeta(metadata) |
|
171 | meta = decodemeta(metadata) | |
172 | try: |
|
172 | try: | |
173 | date = util.parsedate(decodemeta(metadata).pop('date', '0 0')) |
|
173 | date = util.parsedate(decodemeta(metadata).pop('date', '0 0')) | |
174 | except util.Abort: |
|
174 | except util.Abort: | |
175 | date = (0., 0) |
|
175 | date = (0., 0) | |
176 | parents = None |
|
176 | parents = None | |
177 | if 'p2' in meta: |
|
177 | if 'p2' in meta: | |
178 | parents = (meta.pop('p1', None), meta.pop('p2', None)) |
|
178 | parents = (meta.pop('p1', None), meta.pop('p2', None)) | |
179 | elif 'p1' in meta: |
|
179 | elif 'p1' in meta: | |
180 | parents = (meta.pop('p1', None),) |
|
180 | parents = (meta.pop('p1', None),) | |
181 | elif 'p0' in meta: |
|
181 | elif 'p0' in meta: | |
182 | parents = () |
|
182 | parents = () | |
183 | if parents is not None: |
|
183 | if parents is not None: | |
184 | try: |
|
184 | try: | |
185 | parents = tuple(node.bin(p) for p in parents) |
|
185 | parents = tuple(node.bin(p) for p in parents) | |
186 | # if parent content is not a nodeid, drop the data |
|
186 | # if parent content is not a nodeid, drop the data | |
187 | for p in parents: |
|
187 | for p in parents: | |
188 | if len(p) != 20: |
|
188 | if len(p) != 20: | |
189 | parents = None |
|
189 | parents = None | |
190 | break |
|
190 | break | |
191 | except TypeError: |
|
191 | except TypeError: | |
192 | # if content cannot be translated to nodeid drop the data. |
|
192 | # if content cannot be translated to nodeid drop the data. | |
193 | parents = None |
|
193 | parents = None | |
194 |
|
194 | |||
195 | metadata = encodemeta(meta) |
|
195 | metadata = encodemeta(meta) | |
196 |
|
196 | |||
197 | yield (pre, sucs, flags, metadata, date, parents) |
|
197 | yield (pre, sucs, flags, metadata, date, parents) | |
198 |
|
198 | |||
199 | def encodemeta(meta): |
|
199 | def encodemeta(meta): | |
200 | """Return encoded metadata string to string mapping. |
|
200 | """Return encoded metadata string to string mapping. | |
201 |
|
201 | |||
202 | Assume no ':' in key and no '\0' in both key and value.""" |
|
202 | Assume no ':' in key and no '\0' in both key and value.""" | |
203 | for key, value in meta.iteritems(): |
|
203 | for key, value in meta.iteritems(): | |
204 | if ':' in key or '\0' in key: |
|
204 | if ':' in key or '\0' in key: | |
205 | raise ValueError("':' and '\0' are forbidden in metadata key'") |
|
205 | raise ValueError("':' and '\0' are forbidden in metadata key'") | |
206 | if '\0' in value: |
|
206 | if '\0' in value: | |
207 | raise ValueError("':' is forbidden in metadata value'") |
|
207 | raise ValueError("':' is forbidden in metadata value'") | |
208 | return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)]) |
|
208 | return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)]) | |
209 |
|
209 | |||
210 | def decodemeta(data): |
|
210 | def decodemeta(data): | |
211 | """Return string to string dictionary from encoded version.""" |
|
211 | """Return string to string dictionary from encoded version.""" | |
212 | d = {} |
|
212 | d = {} | |
213 | for l in data.split('\0'): |
|
213 | for l in data.split('\0'): | |
214 | if l: |
|
214 | if l: | |
215 | key, value = l.split(':') |
|
215 | key, value = l.split(':') | |
216 | d[key] = value |
|
216 | d[key] = value | |
217 | return d |
|
217 | return d | |
218 |
|
218 | |||
219 | class marker(object): |
|
219 | class marker(object): | |
220 | """Wrap obsolete marker raw data""" |
|
220 | """Wrap obsolete marker raw data""" | |
221 |
|
221 | |||
222 | def __init__(self, repo, data): |
|
222 | def __init__(self, repo, data): | |
223 | # the repo argument will be used to create changectx in later version |
|
223 | # the repo argument will be used to create changectx in later version | |
224 | self._repo = repo |
|
224 | self._repo = repo | |
225 | self._data = data |
|
225 | self._data = data | |
226 | self._decodedmeta = None |
|
226 | self._decodedmeta = None | |
227 |
|
227 | |||
228 | def __hash__(self): |
|
228 | def __hash__(self): | |
229 | return hash(self._data) |
|
229 | return hash(self._data) | |
230 |
|
230 | |||
231 | def __eq__(self, other): |
|
231 | def __eq__(self, other): | |
232 | if type(other) != type(self): |
|
232 | if type(other) != type(self): | |
233 | return False |
|
233 | return False | |
234 | return self._data == other._data |
|
234 | return self._data == other._data | |
235 |
|
235 | |||
236 | def precnode(self): |
|
236 | def precnode(self): | |
237 | """Precursor changeset node identifier""" |
|
237 | """Precursor changeset node identifier""" | |
238 | return self._data[0] |
|
238 | return self._data[0] | |
239 |
|
239 | |||
240 | def succnodes(self): |
|
240 | def succnodes(self): | |
241 | """List of successor changesets node identifiers""" |
|
241 | """List of successor changesets node identifiers""" | |
242 | return self._data[1] |
|
242 | return self._data[1] | |
243 |
|
243 | |||
244 | def parentnodes(self): |
|
244 | def parentnodes(self): | |
245 | """Parents of the precursors (None if not recorded)""" |
|
245 | """Parents of the precursors (None if not recorded)""" | |
246 | return self._data[5] |
|
246 | return self._data[5] | |
247 |
|
247 | |||
248 | def metadata(self): |
|
248 | def metadata(self): | |
249 | """Decoded metadata dictionary""" |
|
249 | """Decoded metadata dictionary""" | |
250 | if self._decodedmeta is None: |
|
250 | if self._decodedmeta is None: | |
251 | self._decodedmeta = decodemeta(self._data[3]) |
|
251 | self._decodedmeta = decodemeta(self._data[3]) | |
252 | return self._decodedmeta |
|
252 | return self._decodedmeta | |
253 |
|
253 | |||
254 | def date(self): |
|
254 | def date(self): | |
255 | """Creation date as (unixtime, offset)""" |
|
255 | """Creation date as (unixtime, offset)""" | |
256 | return self._data[4] |
|
256 | return self._data[4] | |
257 |
|
257 | |||
258 | def flags(self): |
|
258 | def flags(self): | |
259 | """The flags field of the marker""" |
|
259 | """The flags field of the marker""" | |
260 | return self._data[2] |
|
260 | return self._data[2] | |
261 |
|
261 | |||
262 | class obsstore(object): |
|
262 | class obsstore(object): | |
263 | """Store obsolete markers |
|
263 | """Store obsolete markers | |
264 |
|
264 | |||
265 | Markers can be accessed with two mappings: |
|
265 | Markers can be accessed with two mappings: | |
266 | - precursors[x] -> set(markers on precursors edges of x) |
|
266 | - precursors[x] -> set(markers on precursors edges of x) | |
267 | - successors[x] -> set(markers on successors edges of x) |
|
267 | - successors[x] -> set(markers on successors edges of x) | |
|
268 | - children[x] -> set(markers on precursors edges of children(x) | |||
268 | """ |
|
269 | """ | |
269 |
|
270 | |||
270 | fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents') |
|
271 | fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents') | |
271 | # prec: nodeid, precursor changesets |
|
272 | # prec: nodeid, precursor changesets | |
272 | # succs: tuple of nodeid, successor changesets (0-N length) |
|
273 | # succs: tuple of nodeid, successor changesets (0-N length) | |
273 | # flag: integer, flag field carrying modifier for the markers (see doc) |
|
274 | # flag: integer, flag field carrying modifier for the markers (see doc) | |
274 | # meta: binary blob, encoded metadata dictionary |
|
275 | # meta: binary blob, encoded metadata dictionary | |
275 | # date: (float, int) tuple, date of marker creation |
|
276 | # date: (float, int) tuple, date of marker creation | |
276 | # parents: (tuple of nodeid) or None, parents of precursors |
|
277 | # parents: (tuple of nodeid) or None, parents of precursors | |
277 | # None is used when no data has been recorded |
|
278 | # None is used when no data has been recorded | |
278 |
|
279 | |||
279 | def __init__(self, sopener): |
|
280 | def __init__(self, sopener): | |
280 | # caches for various obsolescence related cache |
|
281 | # caches for various obsolescence related cache | |
281 | self.caches = {} |
|
282 | self.caches = {} | |
282 | self._all = [] |
|
283 | self._all = [] | |
283 | self.precursors = {} |
|
284 | self.precursors = {} | |
284 | self.successors = {} |
|
285 | self.successors = {} | |
|
286 | self.children = {} | |||
285 | self.sopener = sopener |
|
287 | self.sopener = sopener | |
286 | data = sopener.tryread('obsstore') |
|
288 | data = sopener.tryread('obsstore') | |
287 | if data: |
|
289 | if data: | |
288 | self._load(_readmarkers(data)) |
|
290 | self._load(_readmarkers(data)) | |
289 |
|
291 | |||
290 | def __iter__(self): |
|
292 | def __iter__(self): | |
291 | return iter(self._all) |
|
293 | return iter(self._all) | |
292 |
|
294 | |||
293 | def __len__(self): |
|
295 | def __len__(self): | |
294 | return len(self._all) |
|
296 | return len(self._all) | |
295 |
|
297 | |||
296 | def __nonzero__(self): |
|
298 | def __nonzero__(self): | |
297 | return bool(self._all) |
|
299 | return bool(self._all) | |
298 |
|
300 | |||
299 | def create(self, transaction, prec, succs=(), flag=0, parents=None, |
|
301 | def create(self, transaction, prec, succs=(), flag=0, parents=None, | |
300 | date=None, metadata=None): |
|
302 | date=None, metadata=None): | |
301 | """obsolete: add a new obsolete marker |
|
303 | """obsolete: add a new obsolete marker | |
302 |
|
304 | |||
303 | * ensuring it is hashable |
|
305 | * ensuring it is hashable | |
304 | * check mandatory metadata |
|
306 | * check mandatory metadata | |
305 | * encode metadata |
|
307 | * encode metadata | |
306 |
|
308 | |||
307 | If you are a human writing code creating marker you want to use the |
|
309 | If you are a human writing code creating marker you want to use the | |
308 | `createmarkers` function in this module instead. |
|
310 | `createmarkers` function in this module instead. | |
309 |
|
311 | |||
310 | return True if a new marker have been added, False if the markers |
|
312 | return True if a new marker have been added, False if the markers | |
311 | already existed (no op). |
|
313 | already existed (no op). | |
312 | """ |
|
314 | """ | |
313 | if metadata is None: |
|
315 | if metadata is None: | |
314 | metadata = {} |
|
316 | metadata = {} | |
315 | if date is None: |
|
317 | if date is None: | |
316 | if 'date' in metadata: |
|
318 | if 'date' in metadata: | |
317 | # as a courtesy for out-of-tree extensions |
|
319 | # as a courtesy for out-of-tree extensions | |
318 | date = util.parsedate(metadata.pop('date')) |
|
320 | date = util.parsedate(metadata.pop('date')) | |
319 | else: |
|
321 | else: | |
320 | date = util.makedate() |
|
322 | date = util.makedate() | |
321 | if len(prec) != 20: |
|
323 | if len(prec) != 20: | |
322 | raise ValueError(prec) |
|
324 | raise ValueError(prec) | |
323 | for succ in succs: |
|
325 | for succ in succs: | |
324 | if len(succ) != 20: |
|
326 | if len(succ) != 20: | |
325 | raise ValueError(succ) |
|
327 | raise ValueError(succ) | |
326 | if prec in succs: |
|
328 | if prec in succs: | |
327 | raise ValueError(_('in-marker cycle with %s') % node.hex(prec)) |
|
329 | raise ValueError(_('in-marker cycle with %s') % node.hex(prec)) | |
328 | marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata), |
|
330 | marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata), | |
329 | date, parents) |
|
331 | date, parents) | |
330 | return bool(self.add(transaction, [marker])) |
|
332 | return bool(self.add(transaction, [marker])) | |
331 |
|
333 | |||
332 | def add(self, transaction, markers): |
|
334 | def add(self, transaction, markers): | |
333 | """Add new markers to the store |
|
335 | """Add new markers to the store | |
334 |
|
336 | |||
335 | Take care of filtering duplicate. |
|
337 | Take care of filtering duplicate. | |
336 | Return the number of new marker.""" |
|
338 | Return the number of new marker.""" | |
337 | if not _enabled: |
|
339 | if not _enabled: | |
338 | raise util.Abort('obsolete feature is not enabled on this repo') |
|
340 | raise util.Abort('obsolete feature is not enabled on this repo') | |
339 | known = set(self._all) |
|
341 | known = set(self._all) | |
340 | new = [] |
|
342 | new = [] | |
341 | for m in markers: |
|
343 | for m in markers: | |
342 | if m not in known: |
|
344 | if m not in known: | |
343 | known.add(m) |
|
345 | known.add(m) | |
344 | new.append(m) |
|
346 | new.append(m) | |
345 | if new: |
|
347 | if new: | |
346 | f = self.sopener('obsstore', 'ab') |
|
348 | f = self.sopener('obsstore', 'ab') | |
347 | try: |
|
349 | try: | |
348 | # Whether the file's current position is at the begin or at |
|
350 | # Whether the file's current position is at the begin or at | |
349 | # the end after opening a file for appending is implementation |
|
351 | # the end after opening a file for appending is implementation | |
350 | # defined. So we must seek to the end before calling tell(), |
|
352 | # defined. So we must seek to the end before calling tell(), | |
351 | # or we may get a zero offset for non-zero sized files on |
|
353 | # or we may get a zero offset for non-zero sized files on | |
352 | # some platforms (issue3543). |
|
354 | # some platforms (issue3543). | |
353 | f.seek(0, _SEEK_END) |
|
355 | f.seek(0, _SEEK_END) | |
354 | offset = f.tell() |
|
356 | offset = f.tell() | |
355 | transaction.add('obsstore', offset) |
|
357 | transaction.add('obsstore', offset) | |
356 | # offset == 0: new file - add the version header |
|
358 | # offset == 0: new file - add the version header | |
357 | for bytes in _encodemarkers(new, offset == 0): |
|
359 | for bytes in _encodemarkers(new, offset == 0): | |
358 | f.write(bytes) |
|
360 | f.write(bytes) | |
359 | finally: |
|
361 | finally: | |
360 | # XXX: f.close() == filecache invalidation == obsstore rebuilt. |
|
362 | # XXX: f.close() == filecache invalidation == obsstore rebuilt. | |
361 | # call 'filecacheentry.refresh()' here |
|
363 | # call 'filecacheentry.refresh()' here | |
362 | f.close() |
|
364 | f.close() | |
363 | self._load(new) |
|
365 | self._load(new) | |
364 | # new marker *may* have changed several set. invalidate the cache. |
|
366 | # new marker *may* have changed several set. invalidate the cache. | |
365 | self.caches.clear() |
|
367 | self.caches.clear() | |
366 | return len(new) |
|
368 | return len(new) | |
367 |
|
369 | |||
368 | def mergemarkers(self, transaction, data): |
|
370 | def mergemarkers(self, transaction, data): | |
369 | markers = _readmarkers(data) |
|
371 | markers = _readmarkers(data) | |
370 | self.add(transaction, markers) |
|
372 | self.add(transaction, markers) | |
371 |
|
373 | |||
372 | def _load(self, markers): |
|
374 | def _load(self, markers): | |
373 | for mark in markers: |
|
375 | for mark in markers: | |
374 | self._all.append(mark) |
|
376 | self._all.append(mark) | |
375 | pre, sucs = mark[:2] |
|
377 | pre, sucs = mark[:2] | |
376 | self.successors.setdefault(pre, set()).add(mark) |
|
378 | self.successors.setdefault(pre, set()).add(mark) | |
377 | for suc in sucs: |
|
379 | for suc in sucs: | |
378 | self.precursors.setdefault(suc, set()).add(mark) |
|
380 | self.precursors.setdefault(suc, set()).add(mark) | |
|
381 | parents = mark[5] | |||
|
382 | if parents is not None: | |||
|
383 | for p in parents: | |||
|
384 | self.children.setdefault(p, set()).add(mark) | |||
379 | if node.nullid in self.precursors: |
|
385 | if node.nullid in self.precursors: | |
380 | raise util.Abort(_('bad obsolescence marker detected: ' |
|
386 | raise util.Abort(_('bad obsolescence marker detected: ' | |
381 | 'invalid successors nullid')) |
|
387 | 'invalid successors nullid')) | |
382 |
|
388 | |||
383 | def _encodemarkers(markers, addheader=False): |
|
389 | def _encodemarkers(markers, addheader=False): | |
384 | # Kept separate from flushmarkers(), it will be reused for |
|
390 | # Kept separate from flushmarkers(), it will be reused for | |
385 | # markers exchange. |
|
391 | # markers exchange. | |
386 | if addheader: |
|
392 | if addheader: | |
387 | yield _pack('>B', _fmversion) |
|
393 | yield _pack('>B', _fmversion) | |
388 | for marker in markers: |
|
394 | for marker in markers: | |
389 | yield _encodeonemarker(marker) |
|
395 | yield _encodeonemarker(marker) | |
390 |
|
396 | |||
391 |
|
397 | |||
392 | def _encodeonemarker(marker): |
|
398 | def _encodeonemarker(marker): | |
393 | pre, sucs, flags, metadata, date, parents = marker |
|
399 | pre, sucs, flags, metadata, date, parents = marker | |
394 | metadata = decodemeta(metadata) |
|
400 | metadata = decodemeta(metadata) | |
395 | metadata['date'] = '%d %i' % date |
|
401 | metadata['date'] = '%d %i' % date | |
396 | if parents is not None: |
|
402 | if parents is not None: | |
397 | if not parents: |
|
403 | if not parents: | |
398 | # mark that we explicitly recorded no parents |
|
404 | # mark that we explicitly recorded no parents | |
399 | metadata['p0'] = '' |
|
405 | metadata['p0'] = '' | |
400 | for i, p in enumerate(parents): |
|
406 | for i, p in enumerate(parents): | |
401 | metadata['p%i' % (i + 1)] = node.hex(p) |
|
407 | metadata['p%i' % (i + 1)] = node.hex(p) | |
402 | metadata = encodemeta(metadata) |
|
408 | metadata = encodemeta(metadata) | |
403 | nbsuc = len(sucs) |
|
409 | nbsuc = len(sucs) | |
404 | format = _fmfixed + (_fmnode * nbsuc) |
|
410 | format = _fmfixed + (_fmnode * nbsuc) | |
405 | data = [nbsuc, len(metadata), flags, pre] |
|
411 | data = [nbsuc, len(metadata), flags, pre] | |
406 | data.extend(sucs) |
|
412 | data.extend(sucs) | |
407 | return _pack(format, *data) + metadata |
|
413 | return _pack(format, *data) + metadata | |
408 |
|
414 | |||
409 | # arbitrary picked to fit into 8K limit from HTTP server |
|
415 | # arbitrary picked to fit into 8K limit from HTTP server | |
410 | # you have to take in account: |
|
416 | # you have to take in account: | |
411 | # - the version header |
|
417 | # - the version header | |
412 | # - the base85 encoding |
|
418 | # - the base85 encoding | |
413 | _maxpayload = 5300 |
|
419 | _maxpayload = 5300 | |
414 |
|
420 | |||
415 | def _pushkeyescape(markers): |
|
421 | def _pushkeyescape(markers): | |
416 | """encode markers into a dict suitable for pushkey exchange |
|
422 | """encode markers into a dict suitable for pushkey exchange | |
417 |
|
423 | |||
418 | - binary data is base85 encoded |
|
424 | - binary data is base85 encoded | |
419 | - split in chunks smaller than 5300 bytes""" |
|
425 | - split in chunks smaller than 5300 bytes""" | |
420 | keys = {} |
|
426 | keys = {} | |
421 | parts = [] |
|
427 | parts = [] | |
422 | currentlen = _maxpayload * 2 # ensure we create a new part |
|
428 | currentlen = _maxpayload * 2 # ensure we create a new part | |
423 | for marker in markers: |
|
429 | for marker in markers: | |
424 | nextdata = _encodeonemarker(marker) |
|
430 | nextdata = _encodeonemarker(marker) | |
425 | if (len(nextdata) + currentlen > _maxpayload): |
|
431 | if (len(nextdata) + currentlen > _maxpayload): | |
426 | currentpart = [] |
|
432 | currentpart = [] | |
427 | currentlen = 0 |
|
433 | currentlen = 0 | |
428 | parts.append(currentpart) |
|
434 | parts.append(currentpart) | |
429 | currentpart.append(nextdata) |
|
435 | currentpart.append(nextdata) | |
430 | currentlen += len(nextdata) |
|
436 | currentlen += len(nextdata) | |
431 | for idx, part in enumerate(reversed(parts)): |
|
437 | for idx, part in enumerate(reversed(parts)): | |
432 | data = ''.join([_pack('>B', _fmversion)] + part) |
|
438 | data = ''.join([_pack('>B', _fmversion)] + part) | |
433 | keys['dump%i' % idx] = base85.b85encode(data) |
|
439 | keys['dump%i' % idx] = base85.b85encode(data) | |
434 | return keys |
|
440 | return keys | |
435 |
|
441 | |||
436 | def listmarkers(repo): |
|
442 | def listmarkers(repo): | |
437 | """List markers over pushkey""" |
|
443 | """List markers over pushkey""" | |
438 | if not repo.obsstore: |
|
444 | if not repo.obsstore: | |
439 | return {} |
|
445 | return {} | |
440 | return _pushkeyescape(repo.obsstore) |
|
446 | return _pushkeyescape(repo.obsstore) | |
441 |
|
447 | |||
442 | def pushmarker(repo, key, old, new): |
|
448 | def pushmarker(repo, key, old, new): | |
443 | """Push markers over pushkey""" |
|
449 | """Push markers over pushkey""" | |
444 | if not key.startswith('dump'): |
|
450 | if not key.startswith('dump'): | |
445 | repo.ui.warn(_('unknown key: %r') % key) |
|
451 | repo.ui.warn(_('unknown key: %r') % key) | |
446 | return 0 |
|
452 | return 0 | |
447 | if old: |
|
453 | if old: | |
448 | repo.ui.warn(_('unexpected old value for %r') % key) |
|
454 | repo.ui.warn(_('unexpected old value for %r') % key) | |
449 | return 0 |
|
455 | return 0 | |
450 | data = base85.b85decode(new) |
|
456 | data = base85.b85decode(new) | |
451 | lock = repo.lock() |
|
457 | lock = repo.lock() | |
452 | try: |
|
458 | try: | |
453 | tr = repo.transaction('pushkey: obsolete markers') |
|
459 | tr = repo.transaction('pushkey: obsolete markers') | |
454 | try: |
|
460 | try: | |
455 | repo.obsstore.mergemarkers(tr, data) |
|
461 | repo.obsstore.mergemarkers(tr, data) | |
456 | tr.close() |
|
462 | tr.close() | |
457 | return 1 |
|
463 | return 1 | |
458 | finally: |
|
464 | finally: | |
459 | tr.release() |
|
465 | tr.release() | |
460 | finally: |
|
466 | finally: | |
461 | lock.release() |
|
467 | lock.release() | |
462 |
|
468 | |||
463 | def allmarkers(repo): |
|
469 | def allmarkers(repo): | |
464 | """all obsolete markers known in a repository""" |
|
470 | """all obsolete markers known in a repository""" | |
465 | for markerdata in repo.obsstore: |
|
471 | for markerdata in repo.obsstore: | |
466 | yield marker(repo, markerdata) |
|
472 | yield marker(repo, markerdata) | |
467 |
|
473 | |||
468 | def precursormarkers(ctx): |
|
474 | def precursormarkers(ctx): | |
469 | """obsolete marker marking this changeset as a successors""" |
|
475 | """obsolete marker marking this changeset as a successors""" | |
470 | for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()): |
|
476 | for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()): | |
471 | yield marker(ctx._repo, data) |
|
477 | yield marker(ctx._repo, data) | |
472 |
|
478 | |||
473 | def successormarkers(ctx): |
|
479 | def successormarkers(ctx): | |
474 | """obsolete marker making this changeset obsolete""" |
|
480 | """obsolete marker making this changeset obsolete""" | |
475 | for data in ctx._repo.obsstore.successors.get(ctx.node(), ()): |
|
481 | for data in ctx._repo.obsstore.successors.get(ctx.node(), ()): | |
476 | yield marker(ctx._repo, data) |
|
482 | yield marker(ctx._repo, data) | |
477 |
|
483 | |||
478 | def allsuccessors(obsstore, nodes, ignoreflags=0): |
|
484 | def allsuccessors(obsstore, nodes, ignoreflags=0): | |
479 | """Yield node for every successor of <nodes>. |
|
485 | """Yield node for every successor of <nodes>. | |
480 |
|
486 | |||
481 | Some successors may be unknown locally. |
|
487 | Some successors may be unknown locally. | |
482 |
|
488 | |||
483 | This is a linear yield unsuited to detecting split changesets. It includes |
|
489 | This is a linear yield unsuited to detecting split changesets. It includes | |
484 | initial nodes too.""" |
|
490 | initial nodes too.""" | |
485 | remaining = set(nodes) |
|
491 | remaining = set(nodes) | |
486 | seen = set(remaining) |
|
492 | seen = set(remaining) | |
487 | while remaining: |
|
493 | while remaining: | |
488 | current = remaining.pop() |
|
494 | current = remaining.pop() | |
489 | yield current |
|
495 | yield current | |
490 | for mark in obsstore.successors.get(current, ()): |
|
496 | for mark in obsstore.successors.get(current, ()): | |
491 | # ignore marker flagged with specified flag |
|
497 | # ignore marker flagged with specified flag | |
492 | if mark[2] & ignoreflags: |
|
498 | if mark[2] & ignoreflags: | |
493 | continue |
|
499 | continue | |
494 | for suc in mark[1]: |
|
500 | for suc in mark[1]: | |
495 | if suc not in seen: |
|
501 | if suc not in seen: | |
496 | seen.add(suc) |
|
502 | seen.add(suc) | |
497 | remaining.add(suc) |
|
503 | remaining.add(suc) | |
498 |
|
504 | |||
499 | def allprecursors(obsstore, nodes, ignoreflags=0): |
|
505 | def allprecursors(obsstore, nodes, ignoreflags=0): | |
500 | """Yield node for every precursors of <nodes>. |
|
506 | """Yield node for every precursors of <nodes>. | |
501 |
|
507 | |||
502 | Some precursors may be unknown locally. |
|
508 | Some precursors may be unknown locally. | |
503 |
|
509 | |||
504 | This is a linear yield unsuited to detecting folded changesets. It includes |
|
510 | This is a linear yield unsuited to detecting folded changesets. It includes | |
505 | initial nodes too.""" |
|
511 | initial nodes too.""" | |
506 |
|
512 | |||
507 | remaining = set(nodes) |
|
513 | remaining = set(nodes) | |
508 | seen = set(remaining) |
|
514 | seen = set(remaining) | |
509 | while remaining: |
|
515 | while remaining: | |
510 | current = remaining.pop() |
|
516 | current = remaining.pop() | |
511 | yield current |
|
517 | yield current | |
512 | for mark in obsstore.precursors.get(current, ()): |
|
518 | for mark in obsstore.precursors.get(current, ()): | |
513 | # ignore marker flagged with specified flag |
|
519 | # ignore marker flagged with specified flag | |
514 | if mark[2] & ignoreflags: |
|
520 | if mark[2] & ignoreflags: | |
515 | continue |
|
521 | continue | |
516 | suc = mark[0] |
|
522 | suc = mark[0] | |
517 | if suc not in seen: |
|
523 | if suc not in seen: | |
518 | seen.add(suc) |
|
524 | seen.add(suc) | |
519 | remaining.add(suc) |
|
525 | remaining.add(suc) | |
520 |
|
526 | |||
521 | def foreground(repo, nodes): |
|
527 | def foreground(repo, nodes): | |
522 | """return all nodes in the "foreground" of other node |
|
528 | """return all nodes in the "foreground" of other node | |
523 |
|
529 | |||
524 | The foreground of a revision is anything reachable using parent -> children |
|
530 | The foreground of a revision is anything reachable using parent -> children | |
525 | or precursor -> successor relation. It is very similar to "descendant" but |
|
531 | or precursor -> successor relation. It is very similar to "descendant" but | |
526 | augmented with obsolescence information. |
|
532 | augmented with obsolescence information. | |
527 |
|
533 | |||
528 | Beware that possible obsolescence cycle may result if complex situation. |
|
534 | Beware that possible obsolescence cycle may result if complex situation. | |
529 | """ |
|
535 | """ | |
530 | repo = repo.unfiltered() |
|
536 | repo = repo.unfiltered() | |
531 | foreground = set(repo.set('%ln::', nodes)) |
|
537 | foreground = set(repo.set('%ln::', nodes)) | |
532 | if repo.obsstore: |
|
538 | if repo.obsstore: | |
533 | # We only need this complicated logic if there is obsolescence |
|
539 | # We only need this complicated logic if there is obsolescence | |
534 | # XXX will probably deserve an optimised revset. |
|
540 | # XXX will probably deserve an optimised revset. | |
535 | nm = repo.changelog.nodemap |
|
541 | nm = repo.changelog.nodemap | |
536 | plen = -1 |
|
542 | plen = -1 | |
537 | # compute the whole set of successors or descendants |
|
543 | # compute the whole set of successors or descendants | |
538 | while len(foreground) != plen: |
|
544 | while len(foreground) != plen: | |
539 | plen = len(foreground) |
|
545 | plen = len(foreground) | |
540 | succs = set(c.node() for c in foreground) |
|
546 | succs = set(c.node() for c in foreground) | |
541 | mutable = [c.node() for c in foreground if c.mutable()] |
|
547 | mutable = [c.node() for c in foreground if c.mutable()] | |
542 | succs.update(allsuccessors(repo.obsstore, mutable)) |
|
548 | succs.update(allsuccessors(repo.obsstore, mutable)) | |
543 | known = (n for n in succs if n in nm) |
|
549 | known = (n for n in succs if n in nm) | |
544 | foreground = set(repo.set('%ln::', known)) |
|
550 | foreground = set(repo.set('%ln::', known)) | |
545 | return set(c.node() for c in foreground) |
|
551 | return set(c.node() for c in foreground) | |
546 |
|
552 | |||
547 |
|
553 | |||
548 | def successorssets(repo, initialnode, cache=None): |
|
554 | def successorssets(repo, initialnode, cache=None): | |
549 | """Return all set of successors of initial nodes |
|
555 | """Return all set of successors of initial nodes | |
550 |
|
556 | |||
551 | The successors set of a changeset A are a group of revisions that succeed |
|
557 | The successors set of a changeset A are a group of revisions that succeed | |
552 | A. It succeeds A as a consistent whole, each revision being only a partial |
|
558 | A. It succeeds A as a consistent whole, each revision being only a partial | |
553 | replacement. The successors set contains non-obsolete changesets only. |
|
559 | replacement. The successors set contains non-obsolete changesets only. | |
554 |
|
560 | |||
555 | This function returns the full list of successor sets which is why it |
|
561 | This function returns the full list of successor sets which is why it | |
556 | returns a list of tuples and not just a single tuple. Each tuple is a valid |
|
562 | returns a list of tuples and not just a single tuple. Each tuple is a valid | |
557 | successors set. Not that (A,) may be a valid successors set for changeset A |
|
563 | successors set. Not that (A,) may be a valid successors set for changeset A | |
558 | (see below). |
|
564 | (see below). | |
559 |
|
565 | |||
560 | In most cases, a changeset A will have a single element (e.g. the changeset |
|
566 | In most cases, a changeset A will have a single element (e.g. the changeset | |
561 | A is replaced by A') in its successors set. Though, it is also common for a |
|
567 | A is replaced by A') in its successors set. Though, it is also common for a | |
562 | changeset A to have no elements in its successor set (e.g. the changeset |
|
568 | changeset A to have no elements in its successor set (e.g. the changeset | |
563 | has been pruned). Therefore, the returned list of successors sets will be |
|
569 | has been pruned). Therefore, the returned list of successors sets will be | |
564 | [(A',)] or [], respectively. |
|
570 | [(A',)] or [], respectively. | |
565 |
|
571 | |||
566 | When a changeset A is split into A' and B', however, it will result in a |
|
572 | When a changeset A is split into A' and B', however, it will result in a | |
567 | successors set containing more than a single element, i.e. [(A',B')]. |
|
573 | successors set containing more than a single element, i.e. [(A',B')]. | |
568 | Divergent changesets will result in multiple successors sets, i.e. [(A',), |
|
574 | Divergent changesets will result in multiple successors sets, i.e. [(A',), | |
569 | (A'')]. |
|
575 | (A'')]. | |
570 |
|
576 | |||
571 | If a changeset A is not obsolete, then it will conceptually have no |
|
577 | If a changeset A is not obsolete, then it will conceptually have no | |
572 | successors set. To distinguish this from a pruned changeset, the successor |
|
578 | successors set. To distinguish this from a pruned changeset, the successor | |
573 | set will only contain itself, i.e. [(A,)]. |
|
579 | set will only contain itself, i.e. [(A,)]. | |
574 |
|
580 | |||
575 | Finally, successors unknown locally are considered to be pruned (obsoleted |
|
581 | Finally, successors unknown locally are considered to be pruned (obsoleted | |
576 | without any successors). |
|
582 | without any successors). | |
577 |
|
583 | |||
578 | The optional `cache` parameter is a dictionary that may contain precomputed |
|
584 | The optional `cache` parameter is a dictionary that may contain precomputed | |
579 | successors sets. It is meant to reuse the computation of a previous call to |
|
585 | successors sets. It is meant to reuse the computation of a previous call to | |
580 | `successorssets` when multiple calls are made at the same time. The cache |
|
586 | `successorssets` when multiple calls are made at the same time. The cache | |
581 | dictionary is updated in place. The caller is responsible for its live |
|
587 | dictionary is updated in place. The caller is responsible for its live | |
582 | spawn. Code that makes multiple calls to `successorssets` *must* use this |
|
588 | spawn. Code that makes multiple calls to `successorssets` *must* use this | |
583 | cache mechanism or suffer terrible performances. |
|
589 | cache mechanism or suffer terrible performances. | |
584 |
|
590 | |||
585 | """ |
|
591 | """ | |
586 |
|
592 | |||
587 | succmarkers = repo.obsstore.successors |
|
593 | succmarkers = repo.obsstore.successors | |
588 |
|
594 | |||
589 | # Stack of nodes we search successors sets for |
|
595 | # Stack of nodes we search successors sets for | |
590 | toproceed = [initialnode] |
|
596 | toproceed = [initialnode] | |
591 | # set version of above list for fast loop detection |
|
597 | # set version of above list for fast loop detection | |
592 | # element added to "toproceed" must be added here |
|
598 | # element added to "toproceed" must be added here | |
593 | stackedset = set(toproceed) |
|
599 | stackedset = set(toproceed) | |
594 | if cache is None: |
|
600 | if cache is None: | |
595 | cache = {} |
|
601 | cache = {} | |
596 |
|
602 | |||
597 | # This while loop is the flattened version of a recursive search for |
|
603 | # This while loop is the flattened version of a recursive search for | |
598 | # successors sets |
|
604 | # successors sets | |
599 | # |
|
605 | # | |
600 | # def successorssets(x): |
|
606 | # def successorssets(x): | |
601 | # successors = directsuccessors(x) |
|
607 | # successors = directsuccessors(x) | |
602 | # ss = [[]] |
|
608 | # ss = [[]] | |
603 | # for succ in directsuccessors(x): |
|
609 | # for succ in directsuccessors(x): | |
604 | # # product as in itertools cartesian product |
|
610 | # # product as in itertools cartesian product | |
605 | # ss = product(ss, successorssets(succ)) |
|
611 | # ss = product(ss, successorssets(succ)) | |
606 | # return ss |
|
612 | # return ss | |
607 | # |
|
613 | # | |
608 | # But we can not use plain recursive calls here: |
|
614 | # But we can not use plain recursive calls here: | |
609 | # - that would blow the python call stack |
|
615 | # - that would blow the python call stack | |
610 | # - obsolescence markers may have cycles, we need to handle them. |
|
616 | # - obsolescence markers may have cycles, we need to handle them. | |
611 | # |
|
617 | # | |
612 | # The `toproceed` list act as our call stack. Every node we search |
|
618 | # The `toproceed` list act as our call stack. Every node we search | |
613 | # successors set for are stacked there. |
|
619 | # successors set for are stacked there. | |
614 | # |
|
620 | # | |
615 | # The `stackedset` is set version of this stack used to check if a node is |
|
621 | # The `stackedset` is set version of this stack used to check if a node is | |
616 | # already stacked. This check is used to detect cycles and prevent infinite |
|
622 | # already stacked. This check is used to detect cycles and prevent infinite | |
617 | # loop. |
|
623 | # loop. | |
618 | # |
|
624 | # | |
619 | # successors set of all nodes are stored in the `cache` dictionary. |
|
625 | # successors set of all nodes are stored in the `cache` dictionary. | |
620 | # |
|
626 | # | |
621 | # After this while loop ends we use the cache to return the successors sets |
|
627 | # After this while loop ends we use the cache to return the successors sets | |
622 | # for the node requested by the caller. |
|
628 | # for the node requested by the caller. | |
623 | while toproceed: |
|
629 | while toproceed: | |
624 | # Every iteration tries to compute the successors sets of the topmost |
|
630 | # Every iteration tries to compute the successors sets of the topmost | |
625 | # node of the stack: CURRENT. |
|
631 | # node of the stack: CURRENT. | |
626 | # |
|
632 | # | |
627 | # There are four possible outcomes: |
|
633 | # There are four possible outcomes: | |
628 | # |
|
634 | # | |
629 | # 1) We already know the successors sets of CURRENT: |
|
635 | # 1) We already know the successors sets of CURRENT: | |
630 | # -> mission accomplished, pop it from the stack. |
|
636 | # -> mission accomplished, pop it from the stack. | |
631 | # 2) Node is not obsolete: |
|
637 | # 2) Node is not obsolete: | |
632 | # -> the node is its own successors sets. Add it to the cache. |
|
638 | # -> the node is its own successors sets. Add it to the cache. | |
633 | # 3) We do not know successors set of direct successors of CURRENT: |
|
639 | # 3) We do not know successors set of direct successors of CURRENT: | |
634 | # -> We add those successors to the stack. |
|
640 | # -> We add those successors to the stack. | |
635 | # 4) We know successors sets of all direct successors of CURRENT: |
|
641 | # 4) We know successors sets of all direct successors of CURRENT: | |
636 | # -> We can compute CURRENT successors set and add it to the |
|
642 | # -> We can compute CURRENT successors set and add it to the | |
637 | # cache. |
|
643 | # cache. | |
638 | # |
|
644 | # | |
639 | current = toproceed[-1] |
|
645 | current = toproceed[-1] | |
640 | if current in cache: |
|
646 | if current in cache: | |
641 | # case (1): We already know the successors sets |
|
647 | # case (1): We already know the successors sets | |
642 | stackedset.remove(toproceed.pop()) |
|
648 | stackedset.remove(toproceed.pop()) | |
643 | elif current not in succmarkers: |
|
649 | elif current not in succmarkers: | |
644 | # case (2): The node is not obsolete. |
|
650 | # case (2): The node is not obsolete. | |
645 | if current in repo: |
|
651 | if current in repo: | |
646 | # We have a valid last successors. |
|
652 | # We have a valid last successors. | |
647 | cache[current] = [(current,)] |
|
653 | cache[current] = [(current,)] | |
648 | else: |
|
654 | else: | |
649 | # Final obsolete version is unknown locally. |
|
655 | # Final obsolete version is unknown locally. | |
650 | # Do not count that as a valid successors |
|
656 | # Do not count that as a valid successors | |
651 | cache[current] = [] |
|
657 | cache[current] = [] | |
652 | else: |
|
658 | else: | |
653 | # cases (3) and (4) |
|
659 | # cases (3) and (4) | |
654 | # |
|
660 | # | |
655 | # We proceed in two phases. Phase 1 aims to distinguish case (3) |
|
661 | # We proceed in two phases. Phase 1 aims to distinguish case (3) | |
656 | # from case (4): |
|
662 | # from case (4): | |
657 | # |
|
663 | # | |
658 | # For each direct successors of CURRENT, we check whether its |
|
664 | # For each direct successors of CURRENT, we check whether its | |
659 | # successors sets are known. If they are not, we stack the |
|
665 | # successors sets are known. If they are not, we stack the | |
660 | # unknown node and proceed to the next iteration of the while |
|
666 | # unknown node and proceed to the next iteration of the while | |
661 | # loop. (case 3) |
|
667 | # loop. (case 3) | |
662 | # |
|
668 | # | |
663 | # During this step, we may detect obsolescence cycles: a node |
|
669 | # During this step, we may detect obsolescence cycles: a node | |
664 | # with unknown successors sets but already in the call stack. |
|
670 | # with unknown successors sets but already in the call stack. | |
665 | # In such a situation, we arbitrary set the successors sets of |
|
671 | # In such a situation, we arbitrary set the successors sets of | |
666 | # the node to nothing (node pruned) to break the cycle. |
|
672 | # the node to nothing (node pruned) to break the cycle. | |
667 | # |
|
673 | # | |
668 | # If no break was encountered we proceed to phase 2. |
|
674 | # If no break was encountered we proceed to phase 2. | |
669 | # |
|
675 | # | |
670 | # Phase 2 computes successors sets of CURRENT (case 4); see details |
|
676 | # Phase 2 computes successors sets of CURRENT (case 4); see details | |
671 | # in phase 2 itself. |
|
677 | # in phase 2 itself. | |
672 | # |
|
678 | # | |
673 | # Note the two levels of iteration in each phase. |
|
679 | # Note the two levels of iteration in each phase. | |
674 | # - The first one handles obsolescence markers using CURRENT as |
|
680 | # - The first one handles obsolescence markers using CURRENT as | |
675 | # precursor (successors markers of CURRENT). |
|
681 | # precursor (successors markers of CURRENT). | |
676 | # |
|
682 | # | |
677 | # Having multiple entry here means divergence. |
|
683 | # Having multiple entry here means divergence. | |
678 | # |
|
684 | # | |
679 | # - The second one handles successors defined in each marker. |
|
685 | # - The second one handles successors defined in each marker. | |
680 | # |
|
686 | # | |
681 | # Having none means pruned node, multiple successors means split, |
|
687 | # Having none means pruned node, multiple successors means split, | |
682 | # single successors are standard replacement. |
|
688 | # single successors are standard replacement. | |
683 | # |
|
689 | # | |
684 | for mark in sorted(succmarkers[current]): |
|
690 | for mark in sorted(succmarkers[current]): | |
685 | for suc in mark[1]: |
|
691 | for suc in mark[1]: | |
686 | if suc not in cache: |
|
692 | if suc not in cache: | |
687 | if suc in stackedset: |
|
693 | if suc in stackedset: | |
688 | # cycle breaking |
|
694 | # cycle breaking | |
689 | cache[suc] = [] |
|
695 | cache[suc] = [] | |
690 | else: |
|
696 | else: | |
691 | # case (3) If we have not computed successors sets |
|
697 | # case (3) If we have not computed successors sets | |
692 | # of one of those successors we add it to the |
|
698 | # of one of those successors we add it to the | |
693 | # `toproceed` stack and stop all work for this |
|
699 | # `toproceed` stack and stop all work for this | |
694 | # iteration. |
|
700 | # iteration. | |
695 | toproceed.append(suc) |
|
701 | toproceed.append(suc) | |
696 | stackedset.add(suc) |
|
702 | stackedset.add(suc) | |
697 | break |
|
703 | break | |
698 | else: |
|
704 | else: | |
699 | continue |
|
705 | continue | |
700 | break |
|
706 | break | |
701 | else: |
|
707 | else: | |
702 | # case (4): we know all successors sets of all direct |
|
708 | # case (4): we know all successors sets of all direct | |
703 | # successors |
|
709 | # successors | |
704 | # |
|
710 | # | |
705 | # Successors set contributed by each marker depends on the |
|
711 | # Successors set contributed by each marker depends on the | |
706 | # successors sets of all its "successors" node. |
|
712 | # successors sets of all its "successors" node. | |
707 | # |
|
713 | # | |
708 | # Each different marker is a divergence in the obsolescence |
|
714 | # Each different marker is a divergence in the obsolescence | |
709 | # history. It contributes successors sets distinct from other |
|
715 | # history. It contributes successors sets distinct from other | |
710 | # markers. |
|
716 | # markers. | |
711 | # |
|
717 | # | |
712 | # Within a marker, a successor may have divergent successors |
|
718 | # Within a marker, a successor may have divergent successors | |
713 | # sets. In such a case, the marker will contribute multiple |
|
719 | # sets. In such a case, the marker will contribute multiple | |
714 | # divergent successors sets. If multiple successors have |
|
720 | # divergent successors sets. If multiple successors have | |
715 | # divergent successors sets, a Cartesian product is used. |
|
721 | # divergent successors sets, a Cartesian product is used. | |
716 | # |
|
722 | # | |
717 | # At the end we post-process successors sets to remove |
|
723 | # At the end we post-process successors sets to remove | |
718 | # duplicated entry and successors set that are strict subset of |
|
724 | # duplicated entry and successors set that are strict subset of | |
719 | # another one. |
|
725 | # another one. | |
720 | succssets = [] |
|
726 | succssets = [] | |
721 | for mark in sorted(succmarkers[current]): |
|
727 | for mark in sorted(succmarkers[current]): | |
722 | # successors sets contributed by this marker |
|
728 | # successors sets contributed by this marker | |
723 | markss = [[]] |
|
729 | markss = [[]] | |
724 | for suc in mark[1]: |
|
730 | for suc in mark[1]: | |
725 | # cardinal product with previous successors |
|
731 | # cardinal product with previous successors | |
726 | productresult = [] |
|
732 | productresult = [] | |
727 | for prefix in markss: |
|
733 | for prefix in markss: | |
728 | for suffix in cache[suc]: |
|
734 | for suffix in cache[suc]: | |
729 | newss = list(prefix) |
|
735 | newss = list(prefix) | |
730 | for part in suffix: |
|
736 | for part in suffix: | |
731 | # do not duplicated entry in successors set |
|
737 | # do not duplicated entry in successors set | |
732 | # first entry wins. |
|
738 | # first entry wins. | |
733 | if part not in newss: |
|
739 | if part not in newss: | |
734 | newss.append(part) |
|
740 | newss.append(part) | |
735 | productresult.append(newss) |
|
741 | productresult.append(newss) | |
736 | markss = productresult |
|
742 | markss = productresult | |
737 | succssets.extend(markss) |
|
743 | succssets.extend(markss) | |
738 | # remove duplicated and subset |
|
744 | # remove duplicated and subset | |
739 | seen = [] |
|
745 | seen = [] | |
740 | final = [] |
|
746 | final = [] | |
741 | candidate = sorted(((set(s), s) for s in succssets if s), |
|
747 | candidate = sorted(((set(s), s) for s in succssets if s), | |
742 | key=lambda x: len(x[1]), reverse=True) |
|
748 | key=lambda x: len(x[1]), reverse=True) | |
743 | for setversion, listversion in candidate: |
|
749 | for setversion, listversion in candidate: | |
744 | for seenset in seen: |
|
750 | for seenset in seen: | |
745 | if setversion.issubset(seenset): |
|
751 | if setversion.issubset(seenset): | |
746 | break |
|
752 | break | |
747 | else: |
|
753 | else: | |
748 | final.append(listversion) |
|
754 | final.append(listversion) | |
749 | seen.append(setversion) |
|
755 | seen.append(setversion) | |
750 | final.reverse() # put small successors set first |
|
756 | final.reverse() # put small successors set first | |
751 | cache[current] = final |
|
757 | cache[current] = final | |
752 | return cache[initialnode] |
|
758 | return cache[initialnode] | |
753 |
|
759 | |||
754 | def _knownrevs(repo, nodes): |
|
760 | def _knownrevs(repo, nodes): | |
755 | """yield revision numbers of known nodes passed in parameters |
|
761 | """yield revision numbers of known nodes passed in parameters | |
756 |
|
762 | |||
757 | Unknown revisions are silently ignored.""" |
|
763 | Unknown revisions are silently ignored.""" | |
758 | torev = repo.changelog.nodemap.get |
|
764 | torev = repo.changelog.nodemap.get | |
759 | for n in nodes: |
|
765 | for n in nodes: | |
760 | rev = torev(n) |
|
766 | rev = torev(n) | |
761 | if rev is not None: |
|
767 | if rev is not None: | |
762 | yield rev |
|
768 | yield rev | |
763 |
|
769 | |||
764 | # mapping of 'set-name' -> <function to compute this set> |
|
770 | # mapping of 'set-name' -> <function to compute this set> | |
765 | cachefuncs = {} |
|
771 | cachefuncs = {} | |
766 | def cachefor(name): |
|
772 | def cachefor(name): | |
767 | """Decorator to register a function as computing the cache for a set""" |
|
773 | """Decorator to register a function as computing the cache for a set""" | |
768 | def decorator(func): |
|
774 | def decorator(func): | |
769 | assert name not in cachefuncs |
|
775 | assert name not in cachefuncs | |
770 | cachefuncs[name] = func |
|
776 | cachefuncs[name] = func | |
771 | return func |
|
777 | return func | |
772 | return decorator |
|
778 | return decorator | |
773 |
|
779 | |||
774 | def getrevs(repo, name): |
|
780 | def getrevs(repo, name): | |
775 | """Return the set of revision that belong to the <name> set |
|
781 | """Return the set of revision that belong to the <name> set | |
776 |
|
782 | |||
777 | Such access may compute the set and cache it for future use""" |
|
783 | Such access may compute the set and cache it for future use""" | |
778 | repo = repo.unfiltered() |
|
784 | repo = repo.unfiltered() | |
779 | if not repo.obsstore: |
|
785 | if not repo.obsstore: | |
780 | return () |
|
786 | return () | |
781 | if name not in repo.obsstore.caches: |
|
787 | if name not in repo.obsstore.caches: | |
782 | repo.obsstore.caches[name] = cachefuncs[name](repo) |
|
788 | repo.obsstore.caches[name] = cachefuncs[name](repo) | |
783 | return repo.obsstore.caches[name] |
|
789 | return repo.obsstore.caches[name] | |
784 |
|
790 | |||
785 | # To be simple we need to invalidate obsolescence cache when: |
|
791 | # To be simple we need to invalidate obsolescence cache when: | |
786 | # |
|
792 | # | |
787 | # - new changeset is added: |
|
793 | # - new changeset is added: | |
788 | # - public phase is changed |
|
794 | # - public phase is changed | |
789 | # - obsolescence marker are added |
|
795 | # - obsolescence marker are added | |
790 | # - strip is used a repo |
|
796 | # - strip is used a repo | |
791 | def clearobscaches(repo): |
|
797 | def clearobscaches(repo): | |
792 | """Remove all obsolescence related cache from a repo |
|
798 | """Remove all obsolescence related cache from a repo | |
793 |
|
799 | |||
794 | This remove all cache in obsstore is the obsstore already exist on the |
|
800 | This remove all cache in obsstore is the obsstore already exist on the | |
795 | repo. |
|
801 | repo. | |
796 |
|
802 | |||
797 | (We could be smarter here given the exact event that trigger the cache |
|
803 | (We could be smarter here given the exact event that trigger the cache | |
798 | clearing)""" |
|
804 | clearing)""" | |
799 | # only clear cache is there is obsstore data in this repo |
|
805 | # only clear cache is there is obsstore data in this repo | |
800 | if 'obsstore' in repo._filecache: |
|
806 | if 'obsstore' in repo._filecache: | |
801 | repo.obsstore.caches.clear() |
|
807 | repo.obsstore.caches.clear() | |
802 |
|
808 | |||
803 | @cachefor('obsolete') |
|
809 | @cachefor('obsolete') | |
804 | def _computeobsoleteset(repo): |
|
810 | def _computeobsoleteset(repo): | |
805 | """the set of obsolete revisions""" |
|
811 | """the set of obsolete revisions""" | |
806 | obs = set() |
|
812 | obs = set() | |
807 | getrev = repo.changelog.nodemap.get |
|
813 | getrev = repo.changelog.nodemap.get | |
808 | getphase = repo._phasecache.phase |
|
814 | getphase = repo._phasecache.phase | |
809 | for n in repo.obsstore.successors: |
|
815 | for n in repo.obsstore.successors: | |
810 | rev = getrev(n) |
|
816 | rev = getrev(n) | |
811 | if rev is not None and getphase(repo, rev): |
|
817 | if rev is not None and getphase(repo, rev): | |
812 | obs.add(rev) |
|
818 | obs.add(rev) | |
813 | return obs |
|
819 | return obs | |
814 |
|
820 | |||
815 | @cachefor('unstable') |
|
821 | @cachefor('unstable') | |
816 | def _computeunstableset(repo): |
|
822 | def _computeunstableset(repo): | |
817 | """the set of non obsolete revisions with obsolete parents""" |
|
823 | """the set of non obsolete revisions with obsolete parents""" | |
818 | # revset is not efficient enough here |
|
824 | # revset is not efficient enough here | |
819 | # we do (obsolete()::) - obsolete() by hand |
|
825 | # we do (obsolete()::) - obsolete() by hand | |
820 | obs = getrevs(repo, 'obsolete') |
|
826 | obs = getrevs(repo, 'obsolete') | |
821 | if not obs: |
|
827 | if not obs: | |
822 | return set() |
|
828 | return set() | |
823 | cl = repo.changelog |
|
829 | cl = repo.changelog | |
824 | return set(r for r in cl.descendants(obs) if r not in obs) |
|
830 | return set(r for r in cl.descendants(obs) if r not in obs) | |
825 |
|
831 | |||
826 | @cachefor('suspended') |
|
832 | @cachefor('suspended') | |
827 | def _computesuspendedset(repo): |
|
833 | def _computesuspendedset(repo): | |
828 | """the set of obsolete parents with non obsolete descendants""" |
|
834 | """the set of obsolete parents with non obsolete descendants""" | |
829 | suspended = repo.changelog.ancestors(getrevs(repo, 'unstable')) |
|
835 | suspended = repo.changelog.ancestors(getrevs(repo, 'unstable')) | |
830 | return set(r for r in getrevs(repo, 'obsolete') if r in suspended) |
|
836 | return set(r for r in getrevs(repo, 'obsolete') if r in suspended) | |
831 |
|
837 | |||
832 | @cachefor('extinct') |
|
838 | @cachefor('extinct') | |
833 | def _computeextinctset(repo): |
|
839 | def _computeextinctset(repo): | |
834 | """the set of obsolete parents without non obsolete descendants""" |
|
840 | """the set of obsolete parents without non obsolete descendants""" | |
835 | return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended') |
|
841 | return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended') | |
836 |
|
842 | |||
837 |
|
843 | |||
838 | @cachefor('bumped') |
|
844 | @cachefor('bumped') | |
839 | def _computebumpedset(repo): |
|
845 | def _computebumpedset(repo): | |
840 | """the set of revs trying to obsolete public revisions""" |
|
846 | """the set of revs trying to obsolete public revisions""" | |
841 | bumped = set() |
|
847 | bumped = set() | |
842 | # util function (avoid attribute lookup in the loop) |
|
848 | # util function (avoid attribute lookup in the loop) | |
843 | phase = repo._phasecache.phase # would be faster to grab the full list |
|
849 | phase = repo._phasecache.phase # would be faster to grab the full list | |
844 | public = phases.public |
|
850 | public = phases.public | |
845 | cl = repo.changelog |
|
851 | cl = repo.changelog | |
846 | torev = cl.nodemap.get |
|
852 | torev = cl.nodemap.get | |
847 | obs = getrevs(repo, 'obsolete') |
|
853 | obs = getrevs(repo, 'obsolete') | |
848 | for rev in repo: |
|
854 | for rev in repo: | |
849 | # We only evaluate mutable, non-obsolete revision |
|
855 | # We only evaluate mutable, non-obsolete revision | |
850 | if (public < phase(repo, rev)) and (rev not in obs): |
|
856 | if (public < phase(repo, rev)) and (rev not in obs): | |
851 | node = cl.node(rev) |
|
857 | node = cl.node(rev) | |
852 | # (future) A cache of precursors may worth if split is very common |
|
858 | # (future) A cache of precursors may worth if split is very common | |
853 | for pnode in allprecursors(repo.obsstore, [node], |
|
859 | for pnode in allprecursors(repo.obsstore, [node], | |
854 | ignoreflags=bumpedfix): |
|
860 | ignoreflags=bumpedfix): | |
855 | prev = torev(pnode) # unfiltered! but so is phasecache |
|
861 | prev = torev(pnode) # unfiltered! but so is phasecache | |
856 | if (prev is not None) and (phase(repo, prev) <= public): |
|
862 | if (prev is not None) and (phase(repo, prev) <= public): | |
857 | # we have a public precursors |
|
863 | # we have a public precursors | |
858 | bumped.add(rev) |
|
864 | bumped.add(rev) | |
859 | break # Next draft! |
|
865 | break # Next draft! | |
860 | return bumped |
|
866 | return bumped | |
861 |
|
867 | |||
862 | @cachefor('divergent') |
|
868 | @cachefor('divergent') | |
863 | def _computedivergentset(repo): |
|
869 | def _computedivergentset(repo): | |
864 | """the set of rev that compete to be the final successors of some revision. |
|
870 | """the set of rev that compete to be the final successors of some revision. | |
865 | """ |
|
871 | """ | |
866 | divergent = set() |
|
872 | divergent = set() | |
867 | obsstore = repo.obsstore |
|
873 | obsstore = repo.obsstore | |
868 | newermap = {} |
|
874 | newermap = {} | |
869 | for ctx in repo.set('(not public()) - obsolete()'): |
|
875 | for ctx in repo.set('(not public()) - obsolete()'): | |
870 | mark = obsstore.precursors.get(ctx.node(), ()) |
|
876 | mark = obsstore.precursors.get(ctx.node(), ()) | |
871 | toprocess = set(mark) |
|
877 | toprocess = set(mark) | |
872 | while toprocess: |
|
878 | while toprocess: | |
873 | prec = toprocess.pop()[0] |
|
879 | prec = toprocess.pop()[0] | |
874 | if prec not in newermap: |
|
880 | if prec not in newermap: | |
875 | successorssets(repo, prec, newermap) |
|
881 | successorssets(repo, prec, newermap) | |
876 | newer = [n for n in newermap[prec] if n] |
|
882 | newer = [n for n in newermap[prec] if n] | |
877 | if len(newer) > 1: |
|
883 | if len(newer) > 1: | |
878 | divergent.add(ctx.rev()) |
|
884 | divergent.add(ctx.rev()) | |
879 | break |
|
885 | break | |
880 | toprocess.update(obsstore.precursors.get(prec, ())) |
|
886 | toprocess.update(obsstore.precursors.get(prec, ())) | |
881 | return divergent |
|
887 | return divergent | |
882 |
|
888 | |||
883 |
|
889 | |||
884 | def createmarkers(repo, relations, flag=0, date=None, metadata=None): |
|
890 | def createmarkers(repo, relations, flag=0, date=None, metadata=None): | |
885 | """Add obsolete markers between changesets in a repo |
|
891 | """Add obsolete markers between changesets in a repo | |
886 |
|
892 | |||
887 | <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}]) |
|
893 | <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}]) | |
888 | tuple. `old` and `news` are changectx. metadata is an optional dictionary |
|
894 | tuple. `old` and `news` are changectx. metadata is an optional dictionary | |
889 | containing metadata for this marker only. It is merged with the global |
|
895 | containing metadata for this marker only. It is merged with the global | |
890 | metadata specified through the `metadata` argument of this function, |
|
896 | metadata specified through the `metadata` argument of this function, | |
891 |
|
897 | |||
892 | Trying to obsolete a public changeset will raise an exception. |
|
898 | Trying to obsolete a public changeset will raise an exception. | |
893 |
|
899 | |||
894 | Current user and date are used except if specified otherwise in the |
|
900 | Current user and date are used except if specified otherwise in the | |
895 | metadata attribute. |
|
901 | metadata attribute. | |
896 |
|
902 | |||
897 | This function operates within a transaction of its own, but does |
|
903 | This function operates within a transaction of its own, but does | |
898 | not take any lock on the repo. |
|
904 | not take any lock on the repo. | |
899 | """ |
|
905 | """ | |
900 | # prepare metadata |
|
906 | # prepare metadata | |
901 | if metadata is None: |
|
907 | if metadata is None: | |
902 | metadata = {} |
|
908 | metadata = {} | |
903 | if 'user' not in metadata: |
|
909 | if 'user' not in metadata: | |
904 | metadata['user'] = repo.ui.username() |
|
910 | metadata['user'] = repo.ui.username() | |
905 | tr = repo.transaction('add-obsolescence-marker') |
|
911 | tr = repo.transaction('add-obsolescence-marker') | |
906 | try: |
|
912 | try: | |
907 | for rel in relations: |
|
913 | for rel in relations: | |
908 | prec = rel[0] |
|
914 | prec = rel[0] | |
909 | sucs = rel[1] |
|
915 | sucs = rel[1] | |
910 | localmetadata = metadata.copy() |
|
916 | localmetadata = metadata.copy() | |
911 | if 2 < len(rel): |
|
917 | if 2 < len(rel): | |
912 | localmetadata.update(rel[2]) |
|
918 | localmetadata.update(rel[2]) | |
913 |
|
919 | |||
914 | if not prec.mutable(): |
|
920 | if not prec.mutable(): | |
915 | raise util.Abort("cannot obsolete immutable changeset: %s" |
|
921 | raise util.Abort("cannot obsolete immutable changeset: %s" | |
916 | % prec) |
|
922 | % prec) | |
917 | nprec = prec.node() |
|
923 | nprec = prec.node() | |
918 | nsucs = tuple(s.node() for s in sucs) |
|
924 | nsucs = tuple(s.node() for s in sucs) | |
919 | npare = None |
|
925 | npare = None | |
920 | if not nsucs: |
|
926 | if not nsucs: | |
921 | npare = tuple(p.node() for p in prec.parents()) |
|
927 | npare = tuple(p.node() for p in prec.parents()) | |
922 | if nprec in nsucs: |
|
928 | if nprec in nsucs: | |
923 | raise util.Abort("changeset %s cannot obsolete itself" % prec) |
|
929 | raise util.Abort("changeset %s cannot obsolete itself" % prec) | |
924 | repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare, |
|
930 | repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare, | |
925 | date=date, metadata=localmetadata) |
|
931 | date=date, metadata=localmetadata) | |
926 | repo.filteredrevcache.clear() |
|
932 | repo.filteredrevcache.clear() | |
927 | tr.close() |
|
933 | tr.close() | |
928 | finally: |
|
934 | finally: | |
929 | tr.release() |
|
935 | tr.release() |
General Comments 0
You need to be logged in to leave comments.
Login now