Show More
@@ -1,1033 +1,1033 | |||||
1 | # obsolete.py - obsolete markers handling |
|
1 | # obsolete.py - obsolete markers handling | |
2 | # |
|
2 | # | |
3 | # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org> |
|
3 | # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org> | |
4 | # Logilab SA <contact@logilab.fr> |
|
4 | # Logilab SA <contact@logilab.fr> | |
5 | # |
|
5 | # | |
6 | # This software may be used and distributed according to the terms of the |
|
6 | # This software may be used and distributed according to the terms of the | |
7 | # GNU General Public License version 2 or any later version. |
|
7 | # GNU General Public License version 2 or any later version. | |
8 |
|
8 | |||
9 | """Obsolete marker handling |
|
9 | """Obsolete marker handling | |
10 |
|
10 | |||
11 | An obsolete marker maps an old changeset to a list of new |
|
11 | An obsolete marker maps an old changeset to a list of new | |
12 | changesets. If the list of new changesets is empty, the old changeset |
|
12 | changesets. If the list of new changesets is empty, the old changeset | |
13 | is said to be "killed". Otherwise, the old changeset is being |
|
13 | is said to be "killed". Otherwise, the old changeset is being | |
14 | "replaced" by the new changesets. |
|
14 | "replaced" by the new changesets. | |
15 |
|
15 | |||
16 | Obsolete markers can be used to record and distribute changeset graph |
|
16 | Obsolete markers can be used to record and distribute changeset graph | |
17 | transformations performed by history rewrite operations, and help |
|
17 | transformations performed by history rewrite operations, and help | |
18 | building new tools to reconcile conflicting rewrite actions. To |
|
18 | building new tools to reconcile conflicting rewrite actions. To | |
19 | facilitate conflict resolution, markers include various annotations |
|
19 | facilitate conflict resolution, markers include various annotations | |
20 | besides old and news changeset identifiers, such as creation date or |
|
20 | besides old and news changeset identifiers, such as creation date or | |
21 | author name. |
|
21 | author name. | |
22 |
|
22 | |||
23 | The old obsoleted changeset is called a "predecessor" and possible |
|
23 | The old obsoleted changeset is called a "predecessor" and possible | |
24 | replacements are called "successors". Markers that used changeset X as |
|
24 | replacements are called "successors". Markers that used changeset X as | |
25 | a predecessor are called "successor markers of X" because they hold |
|
25 | a predecessor are called "successor markers of X" because they hold | |
26 | information about the successors of X. Markers that use changeset Y as |
|
26 | information about the successors of X. Markers that use changeset Y as | |
27 | a successors are call "predecessor markers of Y" because they hold |
|
27 | a successors are call "predecessor markers of Y" because they hold | |
28 | information about the predecessors of Y. |
|
28 | information about the predecessors of Y. | |
29 |
|
29 | |||
30 | Examples: |
|
30 | Examples: | |
31 |
|
31 | |||
32 | - When changeset A is replaced by changeset A', one marker is stored: |
|
32 | - When changeset A is replaced by changeset A', one marker is stored: | |
33 |
|
33 | |||
34 | (A, (A',)) |
|
34 | (A, (A',)) | |
35 |
|
35 | |||
36 | - When changesets A and B are folded into a new changeset C, two markers are |
|
36 | - When changesets A and B are folded into a new changeset C, two markers are | |
37 | stored: |
|
37 | stored: | |
38 |
|
38 | |||
39 | (A, (C,)) and (B, (C,)) |
|
39 | (A, (C,)) and (B, (C,)) | |
40 |
|
40 | |||
41 | - When changeset A is simply "pruned" from the graph, a marker is created: |
|
41 | - When changeset A is simply "pruned" from the graph, a marker is created: | |
42 |
|
42 | |||
43 | (A, ()) |
|
43 | (A, ()) | |
44 |
|
44 | |||
45 | - When changeset A is split into B and C, a single marker is used: |
|
45 | - When changeset A is split into B and C, a single marker is used: | |
46 |
|
46 | |||
47 | (A, (B, C)) |
|
47 | (A, (B, C)) | |
48 |
|
48 | |||
49 | We use a single marker to distinguish the "split" case from the "divergence" |
|
49 | We use a single marker to distinguish the "split" case from the "divergence" | |
50 | case. If two independent operations rewrite the same changeset A in to A' and |
|
50 | case. If two independent operations rewrite the same changeset A in to A' and | |
51 | A'', we have an error case: divergent rewriting. We can detect it because |
|
51 | A'', we have an error case: divergent rewriting. We can detect it because | |
52 | two markers will be created independently: |
|
52 | two markers will be created independently: | |
53 |
|
53 | |||
54 | (A, (B,)) and (A, (C,)) |
|
54 | (A, (B,)) and (A, (C,)) | |
55 |
|
55 | |||
56 | Format |
|
56 | Format | |
57 | ------ |
|
57 | ------ | |
58 |
|
58 | |||
59 | Markers are stored in an append-only file stored in |
|
59 | Markers are stored in an append-only file stored in | |
60 | '.hg/store/obsstore'. |
|
60 | '.hg/store/obsstore'. | |
61 |
|
61 | |||
62 | The file starts with a version header: |
|
62 | The file starts with a version header: | |
63 |
|
63 | |||
64 | - 1 unsigned byte: version number, starting at zero. |
|
64 | - 1 unsigned byte: version number, starting at zero. | |
65 |
|
65 | |||
66 | The header is followed by the markers. Marker format depend of the version. See |
|
66 | The header is followed by the markers. Marker format depend of the version. See | |
67 | comment associated with each format for details. |
|
67 | comment associated with each format for details. | |
68 |
|
68 | |||
69 | """ |
|
69 | """ | |
70 | from __future__ import absolute_import |
|
70 | from __future__ import absolute_import | |
71 |
|
71 | |||
72 | import errno |
|
72 | import errno | |
73 | import struct |
|
73 | import struct | |
74 |
|
74 | |||
75 | from .i18n import _ |
|
75 | from .i18n import _ | |
76 | from . import ( |
|
76 | from . import ( | |
77 | encoding, |
|
77 | encoding, | |
78 | error, |
|
78 | error, | |
79 | node, |
|
79 | node, | |
80 | obsutil, |
|
80 | obsutil, | |
81 | phases, |
|
81 | phases, | |
82 | policy, |
|
82 | policy, | |
83 | pycompat, |
|
83 | pycompat, | |
84 | util, |
|
84 | util, | |
85 | ) |
|
85 | ) | |
86 | from .utils import dateutil |
|
86 | from .utils import dateutil | |
87 |
|
87 | |||
88 | parsers = policy.importmod(r'parsers') |
|
88 | parsers = policy.importmod(r'parsers') | |
89 |
|
89 | |||
90 | _pack = struct.pack |
|
90 | _pack = struct.pack | |
91 | _unpack = struct.unpack |
|
91 | _unpack = struct.unpack | |
92 | _calcsize = struct.calcsize |
|
92 | _calcsize = struct.calcsize | |
93 | propertycache = util.propertycache |
|
93 | propertycache = util.propertycache | |
94 |
|
94 | |||
95 | # the obsolete feature is not mature enough to be enabled by default. |
|
95 | # the obsolete feature is not mature enough to be enabled by default. | |
96 | # you have to rely on third party extension extension to enable this. |
|
96 | # you have to rely on third party extension extension to enable this. | |
97 | _enabled = False |
|
97 | _enabled = False | |
98 |
|
98 | |||
99 | # Options for obsolescence |
|
99 | # Options for obsolescence | |
100 | createmarkersopt = 'createmarkers' |
|
100 | createmarkersopt = 'createmarkers' | |
101 | allowunstableopt = 'allowunstable' |
|
101 | allowunstableopt = 'allowunstable' | |
102 | exchangeopt = 'exchange' |
|
102 | exchangeopt = 'exchange' | |
103 |
|
103 | |||
104 | def _getoptionvalue(repo, option): |
|
104 | def _getoptionvalue(repo, option): | |
105 | """Returns True if the given repository has the given obsolete option |
|
105 | """Returns True if the given repository has the given obsolete option | |
106 | enabled. |
|
106 | enabled. | |
107 | """ |
|
107 | """ | |
108 | configkey = 'evolution.%s' % option |
|
108 | configkey = 'evolution.%s' % option | |
109 | newconfig = repo.ui.configbool('experimental', configkey) |
|
109 | newconfig = repo.ui.configbool('experimental', configkey) | |
110 |
|
110 | |||
111 | # Return the value only if defined |
|
111 | # Return the value only if defined | |
112 | if newconfig is not None: |
|
112 | if newconfig is not None: | |
113 | return newconfig |
|
113 | return newconfig | |
114 |
|
114 | |||
115 | # Fallback on generic option |
|
115 | # Fallback on generic option | |
116 | try: |
|
116 | try: | |
117 | return repo.ui.configbool('experimental', 'evolution') |
|
117 | return repo.ui.configbool('experimental', 'evolution') | |
118 | except (error.ConfigError, AttributeError): |
|
118 | except (error.ConfigError, AttributeError): | |
119 | # Fallback on old-fashion config |
|
119 | # Fallback on old-fashion config | |
120 | # inconsistent config: experimental.evolution |
|
120 | # inconsistent config: experimental.evolution | |
121 | result = set(repo.ui.configlist('experimental', 'evolution')) |
|
121 | result = set(repo.ui.configlist('experimental', 'evolution')) | |
122 |
|
122 | |||
123 | if 'all' in result: |
|
123 | if 'all' in result: | |
124 | return True |
|
124 | return True | |
125 |
|
125 | |||
126 | # For migration purposes, temporarily return true if the config hasn't |
|
126 | # For migration purposes, temporarily return true if the config hasn't | |
127 | # been set but _enabled is true. |
|
127 | # been set but _enabled is true. | |
128 | if len(result) == 0 and _enabled: |
|
128 | if len(result) == 0 and _enabled: | |
129 | return True |
|
129 | return True | |
130 |
|
130 | |||
131 | # Temporary hack for next check |
|
131 | # Temporary hack for next check | |
132 | newconfig = repo.ui.config('experimental', 'evolution.createmarkers') |
|
132 | newconfig = repo.ui.config('experimental', 'evolution.createmarkers') | |
133 | if newconfig: |
|
133 | if newconfig: | |
134 | result.add('createmarkers') |
|
134 | result.add('createmarkers') | |
135 |
|
135 | |||
136 | return option in result |
|
136 | return option in result | |
137 |
|
137 | |||
138 | def getoptions(repo): |
|
138 | def getoptions(repo): | |
139 | """Returns dicts showing state of obsolescence features.""" |
|
139 | """Returns dicts showing state of obsolescence features.""" | |
140 |
|
140 | |||
141 | createmarkersvalue = _getoptionvalue(repo, createmarkersopt) |
|
141 | createmarkersvalue = _getoptionvalue(repo, createmarkersopt) | |
142 | unstablevalue = _getoptionvalue(repo, allowunstableopt) |
|
142 | unstablevalue = _getoptionvalue(repo, allowunstableopt) | |
143 | exchangevalue = _getoptionvalue(repo, exchangeopt) |
|
143 | exchangevalue = _getoptionvalue(repo, exchangeopt) | |
144 |
|
144 | |||
145 | # createmarkers must be enabled if other options are enabled |
|
145 | # createmarkers must be enabled if other options are enabled | |
146 | if ((unstablevalue or exchangevalue) and not createmarkersvalue): |
|
146 | if ((unstablevalue or exchangevalue) and not createmarkersvalue): | |
147 | raise error.Abort(_("'createmarkers' obsolete option must be enabled " |
|
147 | raise error.Abort(_("'createmarkers' obsolete option must be enabled " | |
148 | "if other obsolete options are enabled")) |
|
148 | "if other obsolete options are enabled")) | |
149 |
|
149 | |||
150 | return { |
|
150 | return { | |
151 | createmarkersopt: createmarkersvalue, |
|
151 | createmarkersopt: createmarkersvalue, | |
152 | allowunstableopt: unstablevalue, |
|
152 | allowunstableopt: unstablevalue, | |
153 | exchangeopt: exchangevalue, |
|
153 | exchangeopt: exchangevalue, | |
154 | } |
|
154 | } | |
155 |
|
155 | |||
156 | def isenabled(repo, option): |
|
156 | def isenabled(repo, option): | |
157 | """Returns True if the given repository has the given obsolete option |
|
157 | """Returns True if the given repository has the given obsolete option | |
158 | enabled. |
|
158 | enabled. | |
159 | """ |
|
159 | """ | |
160 | return getoptions(repo)[option] |
|
160 | return getoptions(repo)[option] | |
161 |
|
161 | |||
162 | # Creating aliases for marker flags because evolve extension looks for |
|
162 | # Creating aliases for marker flags because evolve extension looks for | |
163 | # bumpedfix in obsolete.py |
|
163 | # bumpedfix in obsolete.py | |
164 | bumpedfix = obsutil.bumpedfix |
|
164 | bumpedfix = obsutil.bumpedfix | |
165 | usingsha256 = obsutil.usingsha256 |
|
165 | usingsha256 = obsutil.usingsha256 | |
166 |
|
166 | |||
167 | ## Parsing and writing of version "0" |
|
167 | ## Parsing and writing of version "0" | |
168 | # |
|
168 | # | |
169 | # The header is followed by the markers. Each marker is made of: |
|
169 | # The header is followed by the markers. Each marker is made of: | |
170 | # |
|
170 | # | |
171 | # - 1 uint8 : number of new changesets "N", can be zero. |
|
171 | # - 1 uint8 : number of new changesets "N", can be zero. | |
172 | # |
|
172 | # | |
173 | # - 1 uint32: metadata size "M" in bytes. |
|
173 | # - 1 uint32: metadata size "M" in bytes. | |
174 | # |
|
174 | # | |
175 | # - 1 byte: a bit field. It is reserved for flags used in common |
|
175 | # - 1 byte: a bit field. It is reserved for flags used in common | |
176 | # obsolete marker operations, to avoid repeated decoding of metadata |
|
176 | # obsolete marker operations, to avoid repeated decoding of metadata | |
177 | # entries. |
|
177 | # entries. | |
178 | # |
|
178 | # | |
179 | # - 20 bytes: obsoleted changeset identifier. |
|
179 | # - 20 bytes: obsoleted changeset identifier. | |
180 | # |
|
180 | # | |
181 | # - N*20 bytes: new changesets identifiers. |
|
181 | # - N*20 bytes: new changesets identifiers. | |
182 | # |
|
182 | # | |
183 | # - M bytes: metadata as a sequence of nul-terminated strings. Each |
|
183 | # - M bytes: metadata as a sequence of nul-terminated strings. Each | |
184 | # string contains a key and a value, separated by a colon ':', without |
|
184 | # string contains a key and a value, separated by a colon ':', without | |
185 | # additional encoding. Keys cannot contain '\0' or ':' and values |
|
185 | # additional encoding. Keys cannot contain '\0' or ':' and values | |
186 | # cannot contain '\0'. |
|
186 | # cannot contain '\0'. | |
187 | _fm0version = 0 |
|
187 | _fm0version = 0 | |
188 | _fm0fixed = '>BIB20s' |
|
188 | _fm0fixed = '>BIB20s' | |
189 | _fm0node = '20s' |
|
189 | _fm0node = '20s' | |
190 | _fm0fsize = _calcsize(_fm0fixed) |
|
190 | _fm0fsize = _calcsize(_fm0fixed) | |
191 | _fm0fnodesize = _calcsize(_fm0node) |
|
191 | _fm0fnodesize = _calcsize(_fm0node) | |
192 |
|
192 | |||
193 | def _fm0readmarkers(data, off, stop): |
|
193 | def _fm0readmarkers(data, off, stop): | |
194 | # Loop on markers |
|
194 | # Loop on markers | |
195 | while off < stop: |
|
195 | while off < stop: | |
196 | # read fixed part |
|
196 | # read fixed part | |
197 | cur = data[off:off + _fm0fsize] |
|
197 | cur = data[off:off + _fm0fsize] | |
198 | off += _fm0fsize |
|
198 | off += _fm0fsize | |
199 | numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur) |
|
199 | numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur) | |
200 | # read replacement |
|
200 | # read replacement | |
201 | sucs = () |
|
201 | sucs = () | |
202 | if numsuc: |
|
202 | if numsuc: | |
203 | s = (_fm0fnodesize * numsuc) |
|
203 | s = (_fm0fnodesize * numsuc) | |
204 | cur = data[off:off + s] |
|
204 | cur = data[off:off + s] | |
205 | sucs = _unpack(_fm0node * numsuc, cur) |
|
205 | sucs = _unpack(_fm0node * numsuc, cur) | |
206 | off += s |
|
206 | off += s | |
207 | # read metadata |
|
207 | # read metadata | |
208 | # (metadata will be decoded on demand) |
|
208 | # (metadata will be decoded on demand) | |
209 | metadata = data[off:off + mdsize] |
|
209 | metadata = data[off:off + mdsize] | |
210 | if len(metadata) != mdsize: |
|
210 | if len(metadata) != mdsize: | |
211 | raise error.Abort(_('parsing obsolete marker: metadata is too ' |
|
211 | raise error.Abort(_('parsing obsolete marker: metadata is too ' | |
212 | 'short, %d bytes expected, got %d') |
|
212 | 'short, %d bytes expected, got %d') | |
213 | % (mdsize, len(metadata))) |
|
213 | % (mdsize, len(metadata))) | |
214 | off += mdsize |
|
214 | off += mdsize | |
215 | metadata = _fm0decodemeta(metadata) |
|
215 | metadata = _fm0decodemeta(metadata) | |
216 | try: |
|
216 | try: | |
217 | when, offset = metadata.pop('date', '0 0').split(' ') |
|
217 | when, offset = metadata.pop('date', '0 0').split(' ') | |
218 | date = float(when), int(offset) |
|
218 | date = float(when), int(offset) | |
219 | except ValueError: |
|
219 | except ValueError: | |
220 | date = (0., 0) |
|
220 | date = (0., 0) | |
221 | parents = None |
|
221 | parents = None | |
222 | if 'p2' in metadata: |
|
222 | if 'p2' in metadata: | |
223 | parents = (metadata.pop('p1', None), metadata.pop('p2', None)) |
|
223 | parents = (metadata.pop('p1', None), metadata.pop('p2', None)) | |
224 | elif 'p1' in metadata: |
|
224 | elif 'p1' in metadata: | |
225 | parents = (metadata.pop('p1', None),) |
|
225 | parents = (metadata.pop('p1', None),) | |
226 | elif 'p0' in metadata: |
|
226 | elif 'p0' in metadata: | |
227 | parents = () |
|
227 | parents = () | |
228 | if parents is not None: |
|
228 | if parents is not None: | |
229 | try: |
|
229 | try: | |
230 | parents = tuple(node.bin(p) for p in parents) |
|
230 | parents = tuple(node.bin(p) for p in parents) | |
231 | # if parent content is not a nodeid, drop the data |
|
231 | # if parent content is not a nodeid, drop the data | |
232 | for p in parents: |
|
232 | for p in parents: | |
233 | if len(p) != 20: |
|
233 | if len(p) != 20: | |
234 | parents = None |
|
234 | parents = None | |
235 | break |
|
235 | break | |
236 | except TypeError: |
|
236 | except TypeError: | |
237 | # if content cannot be translated to nodeid drop the data. |
|
237 | # if content cannot be translated to nodeid drop the data. | |
238 | parents = None |
|
238 | parents = None | |
239 |
|
239 | |||
240 | metadata = tuple(sorted(metadata.iteritems())) |
|
240 | metadata = tuple(sorted(metadata.iteritems())) | |
241 |
|
241 | |||
242 | yield (pre, sucs, flags, metadata, date, parents) |
|
242 | yield (pre, sucs, flags, metadata, date, parents) | |
243 |
|
243 | |||
244 | def _fm0encodeonemarker(marker): |
|
244 | def _fm0encodeonemarker(marker): | |
245 | pre, sucs, flags, metadata, date, parents = marker |
|
245 | pre, sucs, flags, metadata, date, parents = marker | |
246 | if flags & usingsha256: |
|
246 | if flags & usingsha256: | |
247 | raise error.Abort(_('cannot handle sha256 with old obsstore format')) |
|
247 | raise error.Abort(_('cannot handle sha256 with old obsstore format')) | |
248 | metadata = dict(metadata) |
|
248 | metadata = dict(metadata) | |
249 | time, tz = date |
|
249 | time, tz = date | |
250 | metadata['date'] = '%r %i' % (time, tz) |
|
250 | metadata['date'] = '%r %i' % (time, tz) | |
251 | if parents is not None: |
|
251 | if parents is not None: | |
252 | if not parents: |
|
252 | if not parents: | |
253 | # mark that we explicitly recorded no parents |
|
253 | # mark that we explicitly recorded no parents | |
254 | metadata['p0'] = '' |
|
254 | metadata['p0'] = '' | |
255 | for i, p in enumerate(parents, 1): |
|
255 | for i, p in enumerate(parents, 1): | |
256 | metadata['p%i' % i] = node.hex(p) |
|
256 | metadata['p%i' % i] = node.hex(p) | |
257 | metadata = _fm0encodemeta(metadata) |
|
257 | metadata = _fm0encodemeta(metadata) | |
258 | numsuc = len(sucs) |
|
258 | numsuc = len(sucs) | |
259 | format = _fm0fixed + (_fm0node * numsuc) |
|
259 | format = _fm0fixed + (_fm0node * numsuc) | |
260 | data = [numsuc, len(metadata), flags, pre] |
|
260 | data = [numsuc, len(metadata), flags, pre] | |
261 | data.extend(sucs) |
|
261 | data.extend(sucs) | |
262 | return _pack(format, *data) + metadata |
|
262 | return _pack(format, *data) + metadata | |
263 |
|
263 | |||
264 | def _fm0encodemeta(meta): |
|
264 | def _fm0encodemeta(meta): | |
265 | """Return encoded metadata string to string mapping. |
|
265 | """Return encoded metadata string to string mapping. | |
266 |
|
266 | |||
267 | Assume no ':' in key and no '\0' in both key and value.""" |
|
267 | Assume no ':' in key and no '\0' in both key and value.""" | |
268 | for key, value in meta.iteritems(): |
|
268 | for key, value in meta.iteritems(): | |
269 | if ':' in key or '\0' in key: |
|
269 | if ':' in key or '\0' in key: | |
270 | raise ValueError("':' and '\0' are forbidden in metadata key'") |
|
270 | raise ValueError("':' and '\0' are forbidden in metadata key'") | |
271 | if '\0' in value: |
|
271 | if '\0' in value: | |
272 | raise ValueError("':' is forbidden in metadata value'") |
|
272 | raise ValueError("':' is forbidden in metadata value'") | |
273 | return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)]) |
|
273 | return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)]) | |
274 |
|
274 | |||
275 | def _fm0decodemeta(data): |
|
275 | def _fm0decodemeta(data): | |
276 | """Return string to string dictionary from encoded version.""" |
|
276 | """Return string to string dictionary from encoded version.""" | |
277 | d = {} |
|
277 | d = {} | |
278 | for l in data.split('\0'): |
|
278 | for l in data.split('\0'): | |
279 | if l: |
|
279 | if l: | |
280 | key, value = l.split(':') |
|
280 | key, value = l.split(':') | |
281 | d[key] = value |
|
281 | d[key] = value | |
282 | return d |
|
282 | return d | |
283 |
|
283 | |||
284 | ## Parsing and writing of version "1" |
|
284 | ## Parsing and writing of version "1" | |
285 | # |
|
285 | # | |
286 | # The header is followed by the markers. Each marker is made of: |
|
286 | # The header is followed by the markers. Each marker is made of: | |
287 | # |
|
287 | # | |
288 | # - uint32: total size of the marker (including this field) |
|
288 | # - uint32: total size of the marker (including this field) | |
289 | # |
|
289 | # | |
290 | # - float64: date in seconds since epoch |
|
290 | # - float64: date in seconds since epoch | |
291 | # |
|
291 | # | |
292 | # - int16: timezone offset in minutes |
|
292 | # - int16: timezone offset in minutes | |
293 | # |
|
293 | # | |
294 | # - uint16: a bit field. It is reserved for flags used in common |
|
294 | # - uint16: a bit field. It is reserved for flags used in common | |
295 | # obsolete marker operations, to avoid repeated decoding of metadata |
|
295 | # obsolete marker operations, to avoid repeated decoding of metadata | |
296 | # entries. |
|
296 | # entries. | |
297 | # |
|
297 | # | |
298 | # - uint8: number of successors "N", can be zero. |
|
298 | # - uint8: number of successors "N", can be zero. | |
299 | # |
|
299 | # | |
300 | # - uint8: number of parents "P", can be zero. |
|
300 | # - uint8: number of parents "P", can be zero. | |
301 | # |
|
301 | # | |
302 | # 0: parents data stored but no parent, |
|
302 | # 0: parents data stored but no parent, | |
303 | # 1: one parent stored, |
|
303 | # 1: one parent stored, | |
304 | # 2: two parents stored, |
|
304 | # 2: two parents stored, | |
305 | # 3: no parent data stored |
|
305 | # 3: no parent data stored | |
306 | # |
|
306 | # | |
307 | # - uint8: number of metadata entries M |
|
307 | # - uint8: number of metadata entries M | |
308 | # |
|
308 | # | |
309 | # - 20 or 32 bytes: predecessor changeset identifier. |
|
309 | # - 20 or 32 bytes: predecessor changeset identifier. | |
310 | # |
|
310 | # | |
311 | # - N*(20 or 32) bytes: successors changesets identifiers. |
|
311 | # - N*(20 or 32) bytes: successors changesets identifiers. | |
312 | # |
|
312 | # | |
313 | # - P*(20 or 32) bytes: parents of the predecessors changesets. |
|
313 | # - P*(20 or 32) bytes: parents of the predecessors changesets. | |
314 | # |
|
314 | # | |
315 | # - M*(uint8, uint8): size of all metadata entries (key and value) |
|
315 | # - M*(uint8, uint8): size of all metadata entries (key and value) | |
316 | # |
|
316 | # | |
317 | # - remaining bytes: the metadata, each (key, value) pair after the other. |
|
317 | # - remaining bytes: the metadata, each (key, value) pair after the other. | |
318 | _fm1version = 1 |
|
318 | _fm1version = 1 | |
319 | _fm1fixed = '>IdhHBBB20s' |
|
319 | _fm1fixed = '>IdhHBBB20s' | |
320 | _fm1nodesha1 = '20s' |
|
320 | _fm1nodesha1 = '20s' | |
321 | _fm1nodesha256 = '32s' |
|
321 | _fm1nodesha256 = '32s' | |
322 | _fm1nodesha1size = _calcsize(_fm1nodesha1) |
|
322 | _fm1nodesha1size = _calcsize(_fm1nodesha1) | |
323 | _fm1nodesha256size = _calcsize(_fm1nodesha256) |
|
323 | _fm1nodesha256size = _calcsize(_fm1nodesha256) | |
324 | _fm1fsize = _calcsize(_fm1fixed) |
|
324 | _fm1fsize = _calcsize(_fm1fixed) | |
325 | _fm1parentnone = 3 |
|
325 | _fm1parentnone = 3 | |
326 | _fm1parentshift = 14 |
|
326 | _fm1parentshift = 14 | |
327 | _fm1parentmask = (_fm1parentnone << _fm1parentshift) |
|
327 | _fm1parentmask = (_fm1parentnone << _fm1parentshift) | |
328 | _fm1metapair = 'BB' |
|
328 | _fm1metapair = 'BB' | |
329 | _fm1metapairsize = _calcsize(_fm1metapair) |
|
329 | _fm1metapairsize = _calcsize(_fm1metapair) | |
330 |
|
330 | |||
331 | def _fm1purereadmarkers(data, off, stop): |
|
331 | def _fm1purereadmarkers(data, off, stop): | |
332 | # make some global constants local for performance |
|
332 | # make some global constants local for performance | |
333 | noneflag = _fm1parentnone |
|
333 | noneflag = _fm1parentnone | |
334 | sha2flag = usingsha256 |
|
334 | sha2flag = usingsha256 | |
335 | sha1size = _fm1nodesha1size |
|
335 | sha1size = _fm1nodesha1size | |
336 | sha2size = _fm1nodesha256size |
|
336 | sha2size = _fm1nodesha256size | |
337 | sha1fmt = _fm1nodesha1 |
|
337 | sha1fmt = _fm1nodesha1 | |
338 | sha2fmt = _fm1nodesha256 |
|
338 | sha2fmt = _fm1nodesha256 | |
339 | metasize = _fm1metapairsize |
|
339 | metasize = _fm1metapairsize | |
340 | metafmt = _fm1metapair |
|
340 | metafmt = _fm1metapair | |
341 | fsize = _fm1fsize |
|
341 | fsize = _fm1fsize | |
342 | unpack = _unpack |
|
342 | unpack = _unpack | |
343 |
|
343 | |||
344 | # Loop on markers |
|
344 | # Loop on markers | |
345 | ufixed = struct.Struct(_fm1fixed).unpack |
|
345 | ufixed = struct.Struct(_fm1fixed).unpack | |
346 |
|
346 | |||
347 | while off < stop: |
|
347 | while off < stop: | |
348 | # read fixed part |
|
348 | # read fixed part | |
349 | o1 = off + fsize |
|
349 | o1 = off + fsize | |
350 | t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1]) |
|
350 | t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1]) | |
351 |
|
351 | |||
352 | if flags & sha2flag: |
|
352 | if flags & sha2flag: | |
353 | # FIXME: prec was read as a SHA1, needs to be amended |
|
353 | # FIXME: prec was read as a SHA1, needs to be amended | |
354 |
|
354 | |||
355 | # read 0 or more successors |
|
355 | # read 0 or more successors | |
356 | if numsuc == 1: |
|
356 | if numsuc == 1: | |
357 | o2 = o1 + sha2size |
|
357 | o2 = o1 + sha2size | |
358 | sucs = (data[o1:o2],) |
|
358 | sucs = (data[o1:o2],) | |
359 | else: |
|
359 | else: | |
360 | o2 = o1 + sha2size * numsuc |
|
360 | o2 = o1 + sha2size * numsuc | |
361 | sucs = unpack(sha2fmt * numsuc, data[o1:o2]) |
|
361 | sucs = unpack(sha2fmt * numsuc, data[o1:o2]) | |
362 |
|
362 | |||
363 | # read parents |
|
363 | # read parents | |
364 | if numpar == noneflag: |
|
364 | if numpar == noneflag: | |
365 | o3 = o2 |
|
365 | o3 = o2 | |
366 | parents = None |
|
366 | parents = None | |
367 | elif numpar == 1: |
|
367 | elif numpar == 1: | |
368 | o3 = o2 + sha2size |
|
368 | o3 = o2 + sha2size | |
369 | parents = (data[o2:o3],) |
|
369 | parents = (data[o2:o3],) | |
370 | else: |
|
370 | else: | |
371 | o3 = o2 + sha2size * numpar |
|
371 | o3 = o2 + sha2size * numpar | |
372 | parents = unpack(sha2fmt * numpar, data[o2:o3]) |
|
372 | parents = unpack(sha2fmt * numpar, data[o2:o3]) | |
373 | else: |
|
373 | else: | |
374 | # read 0 or more successors |
|
374 | # read 0 or more successors | |
375 | if numsuc == 1: |
|
375 | if numsuc == 1: | |
376 | o2 = o1 + sha1size |
|
376 | o2 = o1 + sha1size | |
377 | sucs = (data[o1:o2],) |
|
377 | sucs = (data[o1:o2],) | |
378 | else: |
|
378 | else: | |
379 | o2 = o1 + sha1size * numsuc |
|
379 | o2 = o1 + sha1size * numsuc | |
380 | sucs = unpack(sha1fmt * numsuc, data[o1:o2]) |
|
380 | sucs = unpack(sha1fmt * numsuc, data[o1:o2]) | |
381 |
|
381 | |||
382 | # read parents |
|
382 | # read parents | |
383 | if numpar == noneflag: |
|
383 | if numpar == noneflag: | |
384 | o3 = o2 |
|
384 | o3 = o2 | |
385 | parents = None |
|
385 | parents = None | |
386 | elif numpar == 1: |
|
386 | elif numpar == 1: | |
387 | o3 = o2 + sha1size |
|
387 | o3 = o2 + sha1size | |
388 | parents = (data[o2:o3],) |
|
388 | parents = (data[o2:o3],) | |
389 | else: |
|
389 | else: | |
390 | o3 = o2 + sha1size * numpar |
|
390 | o3 = o2 + sha1size * numpar | |
391 | parents = unpack(sha1fmt * numpar, data[o2:o3]) |
|
391 | parents = unpack(sha1fmt * numpar, data[o2:o3]) | |
392 |
|
392 | |||
393 | # read metadata |
|
393 | # read metadata | |
394 | off = o3 + metasize * nummeta |
|
394 | off = o3 + metasize * nummeta | |
395 | metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off]) |
|
395 | metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off]) | |
396 | metadata = [] |
|
396 | metadata = [] | |
397 | for idx in pycompat.xrange(0, len(metapairsize), 2): |
|
397 | for idx in pycompat.xrange(0, len(metapairsize), 2): | |
398 | o1 = off + metapairsize[idx] |
|
398 | o1 = off + metapairsize[idx] | |
399 | o2 = o1 + metapairsize[idx + 1] |
|
399 | o2 = o1 + metapairsize[idx + 1] | |
400 | metadata.append((data[off:o1], data[o1:o2])) |
|
400 | metadata.append((data[off:o1], data[o1:o2])) | |
401 | off = o2 |
|
401 | off = o2 | |
402 |
|
402 | |||
403 | yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents) |
|
403 | yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents) | |
404 |
|
404 | |||
405 | def _fm1encodeonemarker(marker): |
|
405 | def _fm1encodeonemarker(marker): | |
406 | pre, sucs, flags, metadata, date, parents = marker |
|
406 | pre, sucs, flags, metadata, date, parents = marker | |
407 | # determine node size |
|
407 | # determine node size | |
408 | _fm1node = _fm1nodesha1 |
|
408 | _fm1node = _fm1nodesha1 | |
409 | if flags & usingsha256: |
|
409 | if flags & usingsha256: | |
410 | _fm1node = _fm1nodesha256 |
|
410 | _fm1node = _fm1nodesha256 | |
411 | numsuc = len(sucs) |
|
411 | numsuc = len(sucs) | |
412 | numextranodes = numsuc |
|
412 | numextranodes = numsuc | |
413 | if parents is None: |
|
413 | if parents is None: | |
414 | numpar = _fm1parentnone |
|
414 | numpar = _fm1parentnone | |
415 | else: |
|
415 | else: | |
416 | numpar = len(parents) |
|
416 | numpar = len(parents) | |
417 | numextranodes += numpar |
|
417 | numextranodes += numpar | |
418 | formatnodes = _fm1node * numextranodes |
|
418 | formatnodes = _fm1node * numextranodes | |
419 | formatmeta = _fm1metapair * len(metadata) |
|
419 | formatmeta = _fm1metapair * len(metadata) | |
420 | format = _fm1fixed + formatnodes + formatmeta |
|
420 | format = _fm1fixed + formatnodes + formatmeta | |
421 | # tz is stored in minutes so we divide by 60 |
|
421 | # tz is stored in minutes so we divide by 60 | |
422 | tz = date[1]//60 |
|
422 | tz = date[1]//60 | |
423 | data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre] |
|
423 | data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre] | |
424 | data.extend(sucs) |
|
424 | data.extend(sucs) | |
425 | if parents is not None: |
|
425 | if parents is not None: | |
426 | data.extend(parents) |
|
426 | data.extend(parents) | |
427 | totalsize = _calcsize(format) |
|
427 | totalsize = _calcsize(format) | |
428 | for key, value in metadata: |
|
428 | for key, value in metadata: | |
429 | lk = len(key) |
|
429 | lk = len(key) | |
430 | lv = len(value) |
|
430 | lv = len(value) | |
431 | if lk > 255: |
|
431 | if lk > 255: | |
432 | msg = ('obsstore metadata key cannot be longer than 255 bytes' |
|
432 | msg = ('obsstore metadata key cannot be longer than 255 bytes' | |
433 | ' (key "%s" is %u bytes)') % (key, lk) |
|
433 | ' (key "%s" is %u bytes)') % (key, lk) | |
434 | raise error.ProgrammingError(msg) |
|
434 | raise error.ProgrammingError(msg) | |
435 | if lv > 255: |
|
435 | if lv > 255: | |
436 | msg = ('obsstore metadata value cannot be longer than 255 bytes' |
|
436 | msg = ('obsstore metadata value cannot be longer than 255 bytes' | |
437 | ' (value "%s" for key "%s" is %u bytes)') % (value, key, lv) |
|
437 | ' (value "%s" for key "%s" is %u bytes)') % (value, key, lv) | |
438 | raise error.ProgrammingError(msg) |
|
438 | raise error.ProgrammingError(msg) | |
439 | data.append(lk) |
|
439 | data.append(lk) | |
440 | data.append(lv) |
|
440 | data.append(lv) | |
441 | totalsize += lk + lv |
|
441 | totalsize += lk + lv | |
442 | data[0] = totalsize |
|
442 | data[0] = totalsize | |
443 | data = [_pack(format, *data)] |
|
443 | data = [_pack(format, *data)] | |
444 | for key, value in metadata: |
|
444 | for key, value in metadata: | |
445 | data.append(key) |
|
445 | data.append(key) | |
446 | data.append(value) |
|
446 | data.append(value) | |
447 | return ''.join(data) |
|
447 | return ''.join(data) | |
448 |
|
448 | |||
449 | def _fm1readmarkers(data, off, stop): |
|
449 | def _fm1readmarkers(data, off, stop): | |
450 | native = getattr(parsers, 'fm1readmarkers', None) |
|
450 | native = getattr(parsers, 'fm1readmarkers', None) | |
451 | if not native: |
|
451 | if not native: | |
452 | return _fm1purereadmarkers(data, off, stop) |
|
452 | return _fm1purereadmarkers(data, off, stop) | |
453 | return native(data, off, stop) |
|
453 | return native(data, off, stop) | |
454 |
|
454 | |||
455 | # mapping to read/write various marker formats |
|
455 | # mapping to read/write various marker formats | |
456 | # <version> -> (decoder, encoder) |
|
456 | # <version> -> (decoder, encoder) | |
457 | formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker), |
|
457 | formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker), | |
458 | _fm1version: (_fm1readmarkers, _fm1encodeonemarker)} |
|
458 | _fm1version: (_fm1readmarkers, _fm1encodeonemarker)} | |
459 |
|
459 | |||
460 | def _readmarkerversion(data): |
|
460 | def _readmarkerversion(data): | |
461 | return _unpack('>B', data[0:1])[0] |
|
461 | return _unpack('>B', data[0:1])[0] | |
462 |
|
462 | |||
463 | @util.nogc |
|
463 | @util.nogc | |
464 | def _readmarkers(data, off=None, stop=None): |
|
464 | def _readmarkers(data, off=None, stop=None): | |
465 | """Read and enumerate markers from raw data""" |
|
465 | """Read and enumerate markers from raw data""" | |
466 | diskversion = _readmarkerversion(data) |
|
466 | diskversion = _readmarkerversion(data) | |
467 | if not off: |
|
467 | if not off: | |
468 | off = 1 # skip 1 byte version number |
|
468 | off = 1 # skip 1 byte version number | |
469 | if stop is None: |
|
469 | if stop is None: | |
470 | stop = len(data) |
|
470 | stop = len(data) | |
471 | if diskversion not in formats: |
|
471 | if diskversion not in formats: | |
472 | msg = _('parsing obsolete marker: unknown version %r') % diskversion |
|
472 | msg = _('parsing obsolete marker: unknown version %r') % diskversion | |
473 | raise error.UnknownVersion(msg, version=diskversion) |
|
473 | raise error.UnknownVersion(msg, version=diskversion) | |
474 | return diskversion, formats[diskversion][0](data, off, stop) |
|
474 | return diskversion, formats[diskversion][0](data, off, stop) | |
475 |
|
475 | |||
476 | def encodeheader(version=_fm0version): |
|
476 | def encodeheader(version=_fm0version): | |
477 | return _pack('>B', version) |
|
477 | return _pack('>B', version) | |
478 |
|
478 | |||
479 | def encodemarkers(markers, addheader=False, version=_fm0version): |
|
479 | def encodemarkers(markers, addheader=False, version=_fm0version): | |
480 | # Kept separate from flushmarkers(), it will be reused for |
|
480 | # Kept separate from flushmarkers(), it will be reused for | |
481 | # markers exchange. |
|
481 | # markers exchange. | |
482 | encodeone = formats[version][1] |
|
482 | encodeone = formats[version][1] | |
483 | if addheader: |
|
483 | if addheader: | |
484 | yield encodeheader(version) |
|
484 | yield encodeheader(version) | |
485 | for marker in markers: |
|
485 | for marker in markers: | |
486 | yield encodeone(marker) |
|
486 | yield encodeone(marker) | |
487 |
|
487 | |||
488 | @util.nogc |
|
488 | @util.nogc | |
489 | def _addsuccessors(successors, markers): |
|
489 | def _addsuccessors(successors, markers): | |
490 | for mark in markers: |
|
490 | for mark in markers: | |
491 | successors.setdefault(mark[0], set()).add(mark) |
|
491 | successors.setdefault(mark[0], set()).add(mark) | |
492 |
|
492 | |||
493 | @util.nogc |
|
493 | @util.nogc | |
494 | def _addpredecessors(predecessors, markers): |
|
494 | def _addpredecessors(predecessors, markers): | |
495 | for mark in markers: |
|
495 | for mark in markers: | |
496 | for suc in mark[1]: |
|
496 | for suc in mark[1]: | |
497 | predecessors.setdefault(suc, set()).add(mark) |
|
497 | predecessors.setdefault(suc, set()).add(mark) | |
498 |
|
498 | |||
499 | @util.nogc |
|
499 | @util.nogc | |
500 | def _addchildren(children, markers): |
|
500 | def _addchildren(children, markers): | |
501 | for mark in markers: |
|
501 | for mark in markers: | |
502 | parents = mark[5] |
|
502 | parents = mark[5] | |
503 | if parents is not None: |
|
503 | if parents is not None: | |
504 | for p in parents: |
|
504 | for p in parents: | |
505 | children.setdefault(p, set()).add(mark) |
|
505 | children.setdefault(p, set()).add(mark) | |
506 |
|
506 | |||
507 | def _checkinvalidmarkers(markers): |
|
507 | def _checkinvalidmarkers(markers): | |
508 | """search for marker with invalid data and raise error if needed |
|
508 | """search for marker with invalid data and raise error if needed | |
509 |
|
509 | |||
510 | Exist as a separated function to allow the evolve extension for a more |
|
510 | Exist as a separated function to allow the evolve extension for a more | |
511 | subtle handling. |
|
511 | subtle handling. | |
512 | """ |
|
512 | """ | |
513 | for mark in markers: |
|
513 | for mark in markers: | |
514 | if node.nullid in mark[1]: |
|
514 | if node.nullid in mark[1]: | |
515 | raise error.Abort(_('bad obsolescence marker detected: ' |
|
515 | raise error.Abort(_('bad obsolescence marker detected: ' | |
516 | 'invalid successors nullid')) |
|
516 | 'invalid successors nullid')) | |
517 |
|
517 | |||
518 | class obsstore(object): |
|
518 | class obsstore(object): | |
519 | """Store obsolete markers |
|
519 | """Store obsolete markers | |
520 |
|
520 | |||
521 | Markers can be accessed with two mappings: |
|
521 | Markers can be accessed with two mappings: | |
522 | - predecessors[x] -> set(markers on predecessors edges of x) |
|
522 | - predecessors[x] -> set(markers on predecessors edges of x) | |
523 | - successors[x] -> set(markers on successors edges of x) |
|
523 | - successors[x] -> set(markers on successors edges of x) | |
524 | - children[x] -> set(markers on predecessors edges of children(x) |
|
524 | - children[x] -> set(markers on predecessors edges of children(x) | |
525 | """ |
|
525 | """ | |
526 |
|
526 | |||
527 | fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents') |
|
527 | fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents') | |
528 | # prec: nodeid, predecessors changesets |
|
528 | # prec: nodeid, predecessors changesets | |
529 | # succs: tuple of nodeid, successor changesets (0-N length) |
|
529 | # succs: tuple of nodeid, successor changesets (0-N length) | |
530 | # flag: integer, flag field carrying modifier for the markers (see doc) |
|
530 | # flag: integer, flag field carrying modifier for the markers (see doc) | |
531 | # meta: binary blob in UTF-8, encoded metadata dictionary |
|
531 | # meta: binary blob in UTF-8, encoded metadata dictionary | |
532 | # date: (float, int) tuple, date of marker creation |
|
532 | # date: (float, int) tuple, date of marker creation | |
533 | # parents: (tuple of nodeid) or None, parents of predecessors |
|
533 | # parents: (tuple of nodeid) or None, parents of predecessors | |
534 | # None is used when no data has been recorded |
|
534 | # None is used when no data has been recorded | |
535 |
|
535 | |||
536 | def __init__(self, svfs, defaultformat=_fm1version, readonly=False): |
|
536 | def __init__(self, svfs, defaultformat=_fm1version, readonly=False): | |
537 | # caches for various obsolescence related cache |
|
537 | # caches for various obsolescence related cache | |
538 | self.caches = {} |
|
538 | self.caches = {} | |
539 | self.svfs = svfs |
|
539 | self.svfs = svfs | |
540 | self._defaultformat = defaultformat |
|
540 | self._defaultformat = defaultformat | |
541 | self._readonly = readonly |
|
541 | self._readonly = readonly | |
542 |
|
542 | |||
543 | def __iter__(self): |
|
543 | def __iter__(self): | |
544 | return iter(self._all) |
|
544 | return iter(self._all) | |
545 |
|
545 | |||
546 | def __len__(self): |
|
546 | def __len__(self): | |
547 | return len(self._all) |
|
547 | return len(self._all) | |
548 |
|
548 | |||
549 | def __nonzero__(self): |
|
549 | def __nonzero__(self): | |
550 | if not self._cached(r'_all'): |
|
550 | if not self._cached(r'_all'): | |
551 | try: |
|
551 | try: | |
552 | return self.svfs.stat('obsstore').st_size > 1 |
|
552 | return self.svfs.stat('obsstore').st_size > 1 | |
553 | except OSError as inst: |
|
553 | except OSError as inst: | |
554 | if inst.errno != errno.ENOENT: |
|
554 | if inst.errno != errno.ENOENT: | |
555 | raise |
|
555 | raise | |
556 | # just build an empty _all list if no obsstore exists, which |
|
556 | # just build an empty _all list if no obsstore exists, which | |
557 | # avoids further stat() syscalls |
|
557 | # avoids further stat() syscalls | |
558 | return bool(self._all) |
|
558 | return bool(self._all) | |
559 |
|
559 | |||
560 | __bool__ = __nonzero__ |
|
560 | __bool__ = __nonzero__ | |
561 |
|
561 | |||
562 | @property |
|
562 | @property | |
563 | def readonly(self): |
|
563 | def readonly(self): | |
564 | """True if marker creation is disabled |
|
564 | """True if marker creation is disabled | |
565 |
|
565 | |||
566 | Remove me in the future when obsolete marker is always on.""" |
|
566 | Remove me in the future when obsolete marker is always on.""" | |
567 | return self._readonly |
|
567 | return self._readonly | |
568 |
|
568 | |||
569 | def create(self, transaction, prec, succs=(), flag=0, parents=None, |
|
569 | def create(self, transaction, prec, succs=(), flag=0, parents=None, | |
570 | date=None, metadata=None, ui=None): |
|
570 | date=None, metadata=None, ui=None): | |
571 | """obsolete: add a new obsolete marker |
|
571 | """obsolete: add a new obsolete marker | |
572 |
|
572 | |||
573 | * ensuring it is hashable |
|
573 | * ensuring it is hashable | |
574 | * check mandatory metadata |
|
574 | * check mandatory metadata | |
575 | * encode metadata |
|
575 | * encode metadata | |
576 |
|
576 | |||
577 | If you are a human writing code creating marker you want to use the |
|
577 | If you are a human writing code creating marker you want to use the | |
578 | `createmarkers` function in this module instead. |
|
578 | `createmarkers` function in this module instead. | |
579 |
|
579 | |||
580 | return True if a new marker have been added, False if the markers |
|
580 | return True if a new marker have been added, False if the markers | |
581 | already existed (no op). |
|
581 | already existed (no op). | |
582 | """ |
|
582 | """ | |
583 | if metadata is None: |
|
583 | if metadata is None: | |
584 | metadata = {} |
|
584 | metadata = {} | |
585 | if date is None: |
|
585 | if date is None: | |
586 | if 'date' in metadata: |
|
586 | if 'date' in metadata: | |
587 | # as a courtesy for out-of-tree extensions |
|
587 | # as a courtesy for out-of-tree extensions | |
588 | date = dateutil.parsedate(metadata.pop('date')) |
|
588 | date = dateutil.parsedate(metadata.pop('date')) | |
589 | elif ui is not None: |
|
589 | elif ui is not None: | |
590 | date = ui.configdate('devel', 'default-date') |
|
590 | date = ui.configdate('devel', 'default-date') | |
591 | if date is None: |
|
591 | if date is None: | |
592 | date = dateutil.makedate() |
|
592 | date = dateutil.makedate() | |
593 | else: |
|
593 | else: | |
594 | date = dateutil.makedate() |
|
594 | date = dateutil.makedate() | |
595 | if len(prec) != 20: |
|
595 | if len(prec) != 20: | |
596 | raise ValueError(prec) |
|
596 | raise ValueError(prec) | |
597 | for succ in succs: |
|
597 | for succ in succs: | |
598 | if len(succ) != 20: |
|
598 | if len(succ) != 20: | |
599 | raise ValueError(succ) |
|
599 | raise ValueError(succ) | |
600 | if prec in succs: |
|
600 | if prec in succs: | |
601 | raise ValueError(_('in-marker cycle with %s') % node.hex(prec)) |
|
601 | raise ValueError(_('in-marker cycle with %s') % node.hex(prec)) | |
602 |
|
602 | |||
603 | metadata = tuple(sorted(metadata.iteritems())) |
|
603 | metadata = tuple(sorted(metadata.iteritems())) | |
604 | for k, v in metadata: |
|
604 | for k, v in metadata: | |
605 | try: |
|
605 | try: | |
606 | # might be better to reject non-ASCII keys |
|
606 | # might be better to reject non-ASCII keys | |
607 | k.decode('utf-8') |
|
607 | k.decode('utf-8') | |
608 | v.decode('utf-8') |
|
608 | v.decode('utf-8') | |
609 | except UnicodeDecodeError: |
|
609 | except UnicodeDecodeError: | |
610 | raise error.ProgrammingError( |
|
610 | raise error.ProgrammingError( | |
611 | 'obsstore metadata must be valid UTF-8 sequence ' |
|
611 | 'obsstore metadata must be valid UTF-8 sequence ' | |
612 | '(key = %r, value = %r)' |
|
612 | '(key = %r, value = %r)' | |
613 | % (pycompat.bytestr(k), pycompat.bytestr(v))) |
|
613 | % (pycompat.bytestr(k), pycompat.bytestr(v))) | |
614 |
|
614 | |||
615 | marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents) |
|
615 | marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents) | |
616 | return bool(self.add(transaction, [marker])) |
|
616 | return bool(self.add(transaction, [marker])) | |
617 |
|
617 | |||
618 | def add(self, transaction, markers): |
|
618 | def add(self, transaction, markers): | |
619 | """Add new markers to the store |
|
619 | """Add new markers to the store | |
620 |
|
620 | |||
621 | Take care of filtering duplicate. |
|
621 | Take care of filtering duplicate. | |
622 | Return the number of new marker.""" |
|
622 | Return the number of new marker.""" | |
623 | if self._readonly: |
|
623 | if self._readonly: | |
624 | raise error.Abort(_('creating obsolete markers is not enabled on ' |
|
624 | raise error.Abort(_('creating obsolete markers is not enabled on ' | |
625 | 'this repo')) |
|
625 | 'this repo')) | |
626 | known = set() |
|
626 | known = set() | |
627 | getsuccessors = self.successors.get |
|
627 | getsuccessors = self.successors.get | |
628 | new = [] |
|
628 | new = [] | |
629 | for m in markers: |
|
629 | for m in markers: | |
630 | if m not in getsuccessors(m[0], ()) and m not in known: |
|
630 | if m not in getsuccessors(m[0], ()) and m not in known: | |
631 | known.add(m) |
|
631 | known.add(m) | |
632 | new.append(m) |
|
632 | new.append(m) | |
633 | if new: |
|
633 | if new: | |
634 | f = self.svfs('obsstore', 'ab') |
|
634 | f = self.svfs('obsstore', 'ab') | |
635 | try: |
|
635 | try: | |
636 | offset = f.tell() |
|
636 | offset = f.tell() | |
637 | transaction.add('obsstore', offset) |
|
637 | transaction.add('obsstore', offset) | |
638 | # offset == 0: new file - add the version header |
|
638 | # offset == 0: new file - add the version header | |
639 | data = b''.join(encodemarkers(new, offset == 0, self._version)) |
|
639 | data = b''.join(encodemarkers(new, offset == 0, self._version)) | |
640 | f.write(data) |
|
640 | f.write(data) | |
641 | finally: |
|
641 | finally: | |
642 | # XXX: f.close() == filecache invalidation == obsstore rebuilt. |
|
642 | # XXX: f.close() == filecache invalidation == obsstore rebuilt. | |
643 | # call 'filecacheentry.refresh()' here |
|
643 | # call 'filecacheentry.refresh()' here | |
644 | f.close() |
|
644 | f.close() | |
645 | addedmarkers = transaction.changes.get('obsmarkers') |
|
645 | addedmarkers = transaction.changes.get('obsmarkers') | |
646 | if addedmarkers is not None: |
|
646 | if addedmarkers is not None: | |
647 | addedmarkers.update(new) |
|
647 | addedmarkers.update(new) | |
648 | self._addmarkers(new, data) |
|
648 | self._addmarkers(new, data) | |
649 | # new marker *may* have changed several set. invalidate the cache. |
|
649 | # new marker *may* have changed several set. invalidate the cache. | |
650 | self.caches.clear() |
|
650 | self.caches.clear() | |
651 | # records the number of new markers for the transaction hooks |
|
651 | # records the number of new markers for the transaction hooks | |
652 | previous = int(transaction.hookargs.get('new_obsmarkers', '0')) |
|
652 | previous = int(transaction.hookargs.get('new_obsmarkers', '0')) | |
653 | transaction.hookargs['new_obsmarkers'] = '%d' % (previous + len(new)) |
|
653 | transaction.hookargs['new_obsmarkers'] = '%d' % (previous + len(new)) | |
654 | return len(new) |
|
654 | return len(new) | |
655 |
|
655 | |||
656 | def mergemarkers(self, transaction, data): |
|
656 | def mergemarkers(self, transaction, data): | |
657 | """merge a binary stream of markers inside the obsstore |
|
657 | """merge a binary stream of markers inside the obsstore | |
658 |
|
658 | |||
659 | Returns the number of new markers added.""" |
|
659 | Returns the number of new markers added.""" | |
660 | version, markers = _readmarkers(data) |
|
660 | version, markers = _readmarkers(data) | |
661 | return self.add(transaction, markers) |
|
661 | return self.add(transaction, markers) | |
662 |
|
662 | |||
663 | @propertycache |
|
663 | @propertycache | |
664 | def _data(self): |
|
664 | def _data(self): | |
665 | return self.svfs.tryread('obsstore') |
|
665 | return self.svfs.tryread('obsstore') | |
666 |
|
666 | |||
667 | @propertycache |
|
667 | @propertycache | |
668 | def _version(self): |
|
668 | def _version(self): | |
669 | if len(self._data) >= 1: |
|
669 | if len(self._data) >= 1: | |
670 | return _readmarkerversion(self._data) |
|
670 | return _readmarkerversion(self._data) | |
671 | else: |
|
671 | else: | |
672 | return self._defaultformat |
|
672 | return self._defaultformat | |
673 |
|
673 | |||
674 | @propertycache |
|
674 | @propertycache | |
675 | def _all(self): |
|
675 | def _all(self): | |
676 | data = self._data |
|
676 | data = self._data | |
677 | if not data: |
|
677 | if not data: | |
678 | return [] |
|
678 | return [] | |
679 | self._version, markers = _readmarkers(data) |
|
679 | self._version, markers = _readmarkers(data) | |
680 | markers = list(markers) |
|
680 | markers = list(markers) | |
681 | _checkinvalidmarkers(markers) |
|
681 | _checkinvalidmarkers(markers) | |
682 | return markers |
|
682 | return markers | |
683 |
|
683 | |||
684 | @propertycache |
|
684 | @propertycache | |
685 | def successors(self): |
|
685 | def successors(self): | |
686 | successors = {} |
|
686 | successors = {} | |
687 | _addsuccessors(successors, self._all) |
|
687 | _addsuccessors(successors, self._all) | |
688 | return successors |
|
688 | return successors | |
689 |
|
689 | |||
690 | @propertycache |
|
690 | @propertycache | |
691 | def predecessors(self): |
|
691 | def predecessors(self): | |
692 | predecessors = {} |
|
692 | predecessors = {} | |
693 | _addpredecessors(predecessors, self._all) |
|
693 | _addpredecessors(predecessors, self._all) | |
694 | return predecessors |
|
694 | return predecessors | |
695 |
|
695 | |||
696 | @propertycache |
|
696 | @propertycache | |
697 | def children(self): |
|
697 | def children(self): | |
698 | children = {} |
|
698 | children = {} | |
699 | _addchildren(children, self._all) |
|
699 | _addchildren(children, self._all) | |
700 | return children |
|
700 | return children | |
701 |
|
701 | |||
702 | def _cached(self, attr): |
|
702 | def _cached(self, attr): | |
703 | return attr in self.__dict__ |
|
703 | return attr in self.__dict__ | |
704 |
|
704 | |||
705 | def _addmarkers(self, markers, rawdata): |
|
705 | def _addmarkers(self, markers, rawdata): | |
706 | markers = list(markers) # to allow repeated iteration |
|
706 | markers = list(markers) # to allow repeated iteration | |
707 | self._data = self._data + rawdata |
|
707 | self._data = self._data + rawdata | |
708 | self._all.extend(markers) |
|
708 | self._all.extend(markers) | |
709 | if self._cached(r'successors'): |
|
709 | if self._cached(r'successors'): | |
710 | _addsuccessors(self.successors, markers) |
|
710 | _addsuccessors(self.successors, markers) | |
711 | if self._cached(r'predecessors'): |
|
711 | if self._cached(r'predecessors'): | |
712 | _addpredecessors(self.predecessors, markers) |
|
712 | _addpredecessors(self.predecessors, markers) | |
713 | if self._cached(r'children'): |
|
713 | if self._cached(r'children'): | |
714 | _addchildren(self.children, markers) |
|
714 | _addchildren(self.children, markers) | |
715 | _checkinvalidmarkers(markers) |
|
715 | _checkinvalidmarkers(markers) | |
716 |
|
716 | |||
717 | def relevantmarkers(self, nodes): |
|
717 | def relevantmarkers(self, nodes): | |
718 | """return a set of all obsolescence markers relevant to a set of nodes. |
|
718 | """return a set of all obsolescence markers relevant to a set of nodes. | |
719 |
|
719 | |||
720 | "relevant" to a set of nodes mean: |
|
720 | "relevant" to a set of nodes mean: | |
721 |
|
721 | |||
722 | - marker that use this changeset as successor |
|
722 | - marker that use this changeset as successor | |
723 | - prune marker of direct children on this changeset |
|
723 | - prune marker of direct children on this changeset | |
724 | - recursive application of the two rules on predecessors of these |
|
724 | - recursive application of the two rules on predecessors of these | |
725 | markers |
|
725 | markers | |
726 |
|
726 | |||
727 | It is a set so you cannot rely on order.""" |
|
727 | It is a set so you cannot rely on order.""" | |
728 |
|
728 | |||
729 | pendingnodes = set(nodes) |
|
729 | pendingnodes = set(nodes) | |
730 | seenmarkers = set() |
|
730 | seenmarkers = set() | |
731 | seennodes = set(pendingnodes) |
|
731 | seennodes = set(pendingnodes) | |
732 | precursorsmarkers = self.predecessors |
|
732 | precursorsmarkers = self.predecessors | |
733 | succsmarkers = self.successors |
|
733 | succsmarkers = self.successors | |
734 | children = self.children |
|
734 | children = self.children | |
735 | while pendingnodes: |
|
735 | while pendingnodes: | |
736 | direct = set() |
|
736 | direct = set() | |
737 | for current in pendingnodes: |
|
737 | for current in pendingnodes: | |
738 | direct.update(precursorsmarkers.get(current, ())) |
|
738 | direct.update(precursorsmarkers.get(current, ())) | |
739 | pruned = [m for m in children.get(current, ()) if not m[1]] |
|
739 | pruned = [m for m in children.get(current, ()) if not m[1]] | |
740 | direct.update(pruned) |
|
740 | direct.update(pruned) | |
741 | pruned = [m for m in succsmarkers.get(current, ()) if not m[1]] |
|
741 | pruned = [m for m in succsmarkers.get(current, ()) if not m[1]] | |
742 | direct.update(pruned) |
|
742 | direct.update(pruned) | |
743 | direct -= seenmarkers |
|
743 | direct -= seenmarkers | |
744 | pendingnodes = set([m[0] for m in direct]) |
|
744 | pendingnodes = set([m[0] for m in direct]) | |
745 | seenmarkers |= direct |
|
745 | seenmarkers |= direct | |
746 | pendingnodes -= seennodes |
|
746 | pendingnodes -= seennodes | |
747 | seennodes |= pendingnodes |
|
747 | seennodes |= pendingnodes | |
748 | return seenmarkers |
|
748 | return seenmarkers | |
749 |
|
749 | |||
750 | def makestore(ui, repo): |
|
750 | def makestore(ui, repo): | |
751 | """Create an obsstore instance from a repo.""" |
|
751 | """Create an obsstore instance from a repo.""" | |
752 | # read default format for new obsstore. |
|
752 | # read default format for new obsstore. | |
753 | # developer config: format.obsstore-version |
|
753 | # developer config: format.obsstore-version | |
754 | defaultformat = ui.configint('format', 'obsstore-version') |
|
754 | defaultformat = ui.configint('format', 'obsstore-version') | |
755 | # rely on obsstore class default when possible. |
|
755 | # rely on obsstore class default when possible. | |
756 | kwargs = {} |
|
756 | kwargs = {} | |
757 | if defaultformat is not None: |
|
757 | if defaultformat is not None: | |
758 | kwargs[r'defaultformat'] = defaultformat |
|
758 | kwargs[r'defaultformat'] = defaultformat | |
759 | readonly = not isenabled(repo, createmarkersopt) |
|
759 | readonly = not isenabled(repo, createmarkersopt) | |
760 | store = obsstore(repo.svfs, readonly=readonly, **kwargs) |
|
760 | store = obsstore(repo.svfs, readonly=readonly, **kwargs) | |
761 | if store and readonly: |
|
761 | if store and readonly: | |
762 | ui.warn(_('obsolete feature not enabled but %i markers found!\n') |
|
762 | ui.warn(_('obsolete feature not enabled but %i markers found!\n') | |
763 | % len(list(store))) |
|
763 | % len(list(store))) | |
764 | return store |
|
764 | return store | |
765 |
|
765 | |||
766 | def commonversion(versions): |
|
766 | def commonversion(versions): | |
767 | """Return the newest version listed in both versions and our local formats. |
|
767 | """Return the newest version listed in both versions and our local formats. | |
768 |
|
768 | |||
769 | Returns None if no common version exists. |
|
769 | Returns None if no common version exists. | |
770 | """ |
|
770 | """ | |
771 | versions.sort(reverse=True) |
|
771 | versions.sort(reverse=True) | |
772 | # search for highest version known on both side |
|
772 | # search for highest version known on both side | |
773 | for v in versions: |
|
773 | for v in versions: | |
774 | if v in formats: |
|
774 | if v in formats: | |
775 | return v |
|
775 | return v | |
776 | return None |
|
776 | return None | |
777 |
|
777 | |||
778 | # arbitrary picked to fit into 8K limit from HTTP server |
|
778 | # arbitrary picked to fit into 8K limit from HTTP server | |
779 | # you have to take in account: |
|
779 | # you have to take in account: | |
780 | # - the version header |
|
780 | # - the version header | |
781 | # - the base85 encoding |
|
781 | # - the base85 encoding | |
782 | _maxpayload = 5300 |
|
782 | _maxpayload = 5300 | |
783 |
|
783 | |||
784 | def _pushkeyescape(markers): |
|
784 | def _pushkeyescape(markers): | |
785 | """encode markers into a dict suitable for pushkey exchange |
|
785 | """encode markers into a dict suitable for pushkey exchange | |
786 |
|
786 | |||
787 | - binary data is base85 encoded |
|
787 | - binary data is base85 encoded | |
788 | - split in chunks smaller than 5300 bytes""" |
|
788 | - split in chunks smaller than 5300 bytes""" | |
789 | keys = {} |
|
789 | keys = {} | |
790 | parts = [] |
|
790 | parts = [] | |
791 | currentlen = _maxpayload * 2 # ensure we create a new part |
|
791 | currentlen = _maxpayload * 2 # ensure we create a new part | |
792 | for marker in markers: |
|
792 | for marker in markers: | |
793 | nextdata = _fm0encodeonemarker(marker) |
|
793 | nextdata = _fm0encodeonemarker(marker) | |
794 | if (len(nextdata) + currentlen > _maxpayload): |
|
794 | if (len(nextdata) + currentlen > _maxpayload): | |
795 | currentpart = [] |
|
795 | currentpart = [] | |
796 | currentlen = 0 |
|
796 | currentlen = 0 | |
797 | parts.append(currentpart) |
|
797 | parts.append(currentpart) | |
798 | currentpart.append(nextdata) |
|
798 | currentpart.append(nextdata) | |
799 | currentlen += len(nextdata) |
|
799 | currentlen += len(nextdata) | |
800 | for idx, part in enumerate(reversed(parts)): |
|
800 | for idx, part in enumerate(reversed(parts)): | |
801 | data = ''.join([_pack('>B', _fm0version)] + part) |
|
801 | data = ''.join([_pack('>B', _fm0version)] + part) | |
802 | keys['dump%i' % idx] = util.b85encode(data) |
|
802 | keys['dump%i' % idx] = util.b85encode(data) | |
803 | return keys |
|
803 | return keys | |
804 |
|
804 | |||
805 | def listmarkers(repo): |
|
805 | def listmarkers(repo): | |
806 | """List markers over pushkey""" |
|
806 | """List markers over pushkey""" | |
807 | if not repo.obsstore: |
|
807 | if not repo.obsstore: | |
808 | return {} |
|
808 | return {} | |
809 | return _pushkeyescape(sorted(repo.obsstore)) |
|
809 | return _pushkeyescape(sorted(repo.obsstore)) | |
810 |
|
810 | |||
811 | def pushmarker(repo, key, old, new): |
|
811 | def pushmarker(repo, key, old, new): | |
812 | """Push markers over pushkey""" |
|
812 | """Push markers over pushkey""" | |
813 | if not key.startswith('dump'): |
|
813 | if not key.startswith('dump'): | |
814 | repo.ui.warn(_('unknown key: %r') % key) |
|
814 | repo.ui.warn(_('unknown key: %r') % key) | |
815 | return False |
|
815 | return False | |
816 | if old: |
|
816 | if old: | |
817 | repo.ui.warn(_('unexpected old value for %r') % key) |
|
817 | repo.ui.warn(_('unexpected old value for %r') % key) | |
818 | return False |
|
818 | return False | |
819 | data = util.b85decode(new) |
|
819 | data = util.b85decode(new) | |
820 | with repo.lock(), repo.transaction('pushkey: obsolete markers') as tr: |
|
820 | with repo.lock(), repo.transaction('pushkey: obsolete markers') as tr: | |
821 | repo.obsstore.mergemarkers(tr, data) |
|
821 | repo.obsstore.mergemarkers(tr, data) | |
822 | repo.invalidatevolatilesets() |
|
822 | repo.invalidatevolatilesets() | |
823 | return True |
|
823 | return True | |
824 |
|
824 | |||
825 | # mapping of 'set-name' -> <function to compute this set> |
|
825 | # mapping of 'set-name' -> <function to compute this set> | |
826 | cachefuncs = {} |
|
826 | cachefuncs = {} | |
827 | def cachefor(name): |
|
827 | def cachefor(name): | |
828 | """Decorator to register a function as computing the cache for a set""" |
|
828 | """Decorator to register a function as computing the cache for a set""" | |
829 | def decorator(func): |
|
829 | def decorator(func): | |
830 | if name in cachefuncs: |
|
830 | if name in cachefuncs: | |
831 | msg = "duplicated registration for volatileset '%s' (existing: %r)" |
|
831 | msg = "duplicated registration for volatileset '%s' (existing: %r)" | |
832 | raise error.ProgrammingError(msg % (name, cachefuncs[name])) |
|
832 | raise error.ProgrammingError(msg % (name, cachefuncs[name])) | |
833 | cachefuncs[name] = func |
|
833 | cachefuncs[name] = func | |
834 | return func |
|
834 | return func | |
835 | return decorator |
|
835 | return decorator | |
836 |
|
836 | |||
837 | def getrevs(repo, name): |
|
837 | def getrevs(repo, name): | |
838 | """Return the set of revision that belong to the <name> set |
|
838 | """Return the set of revision that belong to the <name> set | |
839 |
|
839 | |||
840 | Such access may compute the set and cache it for future use""" |
|
840 | Such access may compute the set and cache it for future use""" | |
841 | repo = repo.unfiltered() |
|
841 | repo = repo.unfiltered() | |
842 | if not repo.obsstore: |
|
842 | if not repo.obsstore: | |
843 | return frozenset() |
|
843 | return frozenset() | |
844 | if name not in repo.obsstore.caches: |
|
844 | if name not in repo.obsstore.caches: | |
845 | repo.obsstore.caches[name] = cachefuncs[name](repo) |
|
845 | repo.obsstore.caches[name] = cachefuncs[name](repo) | |
846 | return repo.obsstore.caches[name] |
|
846 | return repo.obsstore.caches[name] | |
847 |
|
847 | |||
848 | # To be simple we need to invalidate obsolescence cache when: |
|
848 | # To be simple we need to invalidate obsolescence cache when: | |
849 | # |
|
849 | # | |
850 | # - new changeset is added: |
|
850 | # - new changeset is added: | |
851 | # - public phase is changed |
|
851 | # - public phase is changed | |
852 | # - obsolescence marker are added |
|
852 | # - obsolescence marker are added | |
853 | # - strip is used a repo |
|
853 | # - strip is used a repo | |
854 | def clearobscaches(repo): |
|
854 | def clearobscaches(repo): | |
855 | """Remove all obsolescence related cache from a repo |
|
855 | """Remove all obsolescence related cache from a repo | |
856 |
|
856 | |||
857 | This remove all cache in obsstore is the obsstore already exist on the |
|
857 | This remove all cache in obsstore is the obsstore already exist on the | |
858 | repo. |
|
858 | repo. | |
859 |
|
859 | |||
860 | (We could be smarter here given the exact event that trigger the cache |
|
860 | (We could be smarter here given the exact event that trigger the cache | |
861 | clearing)""" |
|
861 | clearing)""" | |
862 | # only clear cache is there is obsstore data in this repo |
|
862 | # only clear cache is there is obsstore data in this repo | |
863 | if 'obsstore' in repo._filecache: |
|
863 | if 'obsstore' in repo._filecache: | |
864 | repo.obsstore.caches.clear() |
|
864 | repo.obsstore.caches.clear() | |
865 |
|
865 | |||
866 | def _mutablerevs(repo): |
|
866 | def _mutablerevs(repo): | |
867 | """the set of mutable revision in the repository""" |
|
867 | """the set of mutable revision in the repository""" | |
868 | return repo._phasecache.getrevset(repo, phases.mutablephases) |
|
868 | return repo._phasecache.getrevset(repo, phases.mutablephases) | |
869 |
|
869 | |||
870 | @cachefor('obsolete') |
|
870 | @cachefor('obsolete') | |
871 | def _computeobsoleteset(repo): |
|
871 | def _computeobsoleteset(repo): | |
872 | """the set of obsolete revisions""" |
|
872 | """the set of obsolete revisions""" | |
873 | getnode = repo.changelog.node |
|
873 | getnode = repo.changelog.node | |
874 | notpublic = _mutablerevs(repo) |
|
874 | notpublic = _mutablerevs(repo) | |
875 | isobs = repo.obsstore.successors.__contains__ |
|
875 | isobs = repo.obsstore.successors.__contains__ | |
876 | obs = set(r for r in notpublic if isobs(getnode(r))) |
|
876 | obs = set(r for r in notpublic if isobs(getnode(r))) | |
877 | return obs |
|
877 | return obs | |
878 |
|
878 | |||
879 | @cachefor('orphan') |
|
879 | @cachefor('orphan') | |
880 | def _computeorphanset(repo): |
|
880 | def _computeorphanset(repo): | |
881 | """the set of non obsolete revisions with obsolete parents""" |
|
881 | """the set of non obsolete revisions with obsolete parents""" | |
882 | pfunc = repo.changelog.parentrevs |
|
882 | pfunc = repo.changelog.parentrevs | |
883 | mutable = _mutablerevs(repo) |
|
883 | mutable = _mutablerevs(repo) | |
884 | obsolete = getrevs(repo, 'obsolete') |
|
884 | obsolete = getrevs(repo, 'obsolete') | |
885 | others = mutable - obsolete |
|
885 | others = mutable - obsolete | |
886 | unstable = set() |
|
886 | unstable = set() | |
887 | for r in sorted(others): |
|
887 | for r in sorted(others): | |
888 | # A rev is unstable if one of its parent is obsolete or unstable |
|
888 | # A rev is unstable if one of its parent is obsolete or unstable | |
889 | # this works since we traverse following growing rev order |
|
889 | # this works since we traverse following growing rev order | |
890 | for p in pfunc(r): |
|
890 | for p in pfunc(r): | |
891 | if p in obsolete or p in unstable: |
|
891 | if p in obsolete or p in unstable: | |
892 | unstable.add(r) |
|
892 | unstable.add(r) | |
893 | break |
|
893 | break | |
894 | return unstable |
|
894 | return unstable | |
895 |
|
895 | |||
896 | @cachefor('suspended') |
|
896 | @cachefor('suspended') | |
897 | def _computesuspendedset(repo): |
|
897 | def _computesuspendedset(repo): | |
898 | """the set of obsolete parents with non obsolete descendants""" |
|
898 | """the set of obsolete parents with non obsolete descendants""" | |
899 | suspended = repo.changelog.ancestors(getrevs(repo, 'orphan')) |
|
899 | suspended = repo.changelog.ancestors(getrevs(repo, 'orphan')) | |
900 | return set(r for r in getrevs(repo, 'obsolete') if r in suspended) |
|
900 | return set(r for r in getrevs(repo, 'obsolete') if r in suspended) | |
901 |
|
901 | |||
902 | @cachefor('extinct') |
|
902 | @cachefor('extinct') | |
903 | def _computeextinctset(repo): |
|
903 | def _computeextinctset(repo): | |
904 | """the set of obsolete parents without non obsolete descendants""" |
|
904 | """the set of obsolete parents without non obsolete descendants""" | |
905 | return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended') |
|
905 | return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended') | |
906 |
|
906 | |||
907 | @cachefor('phasedivergent') |
|
907 | @cachefor('phasedivergent') | |
908 | def _computephasedivergentset(repo): |
|
908 | def _computephasedivergentset(repo): | |
909 | """the set of revs trying to obsolete public revisions""" |
|
909 | """the set of revs trying to obsolete public revisions""" | |
910 | bumped = set() |
|
910 | bumped = set() | |
911 | # util function (avoid attribute lookup in the loop) |
|
911 | # util function (avoid attribute lookup in the loop) | |
912 | phase = repo._phasecache.phase # would be faster to grab the full list |
|
912 | phase = repo._phasecache.phase # would be faster to grab the full list | |
913 | public = phases.public |
|
913 | public = phases.public | |
914 | cl = repo.changelog |
|
914 | cl = repo.changelog | |
915 | torev = cl.nodemap.get |
|
915 | torev = cl.nodemap.get | |
916 | tonode = cl.node |
|
916 | tonode = cl.node | |
917 | for rev in repo.revs('(not public()) and (not obsolete())'): |
|
917 | for rev in repo.revs('(not public()) and (not obsolete())'): | |
918 | # We only evaluate mutable, non-obsolete revision |
|
918 | # We only evaluate mutable, non-obsolete revision | |
919 | node = tonode(rev) |
|
919 | node = tonode(rev) | |
920 | # (future) A cache of predecessors may worth if split is very common |
|
920 | # (future) A cache of predecessors may worth if split is very common | |
921 | for pnode in obsutil.allpredecessors(repo.obsstore, [node], |
|
921 | for pnode in obsutil.allpredecessors(repo.obsstore, [node], | |
922 | ignoreflags=bumpedfix): |
|
922 | ignoreflags=bumpedfix): | |
923 | prev = torev(pnode) # unfiltered! but so is phasecache |
|
923 | prev = torev(pnode) # unfiltered! but so is phasecache | |
924 | if (prev is not None) and (phase(repo, prev) <= public): |
|
924 | if (prev is not None) and (phase(repo, prev) <= public): | |
925 | # we have a public predecessor |
|
925 | # we have a public predecessor | |
926 | bumped.add(rev) |
|
926 | bumped.add(rev) | |
927 | break # Next draft! |
|
927 | break # Next draft! | |
928 | return bumped |
|
928 | return bumped | |
929 |
|
929 | |||
930 | @cachefor('contentdivergent') |
|
930 | @cachefor('contentdivergent') | |
931 | def _computecontentdivergentset(repo): |
|
931 | def _computecontentdivergentset(repo): | |
932 | """the set of rev that compete to be the final successors of some revision. |
|
932 | """the set of rev that compete to be the final successors of some revision. | |
933 | """ |
|
933 | """ | |
934 | divergent = set() |
|
934 | divergent = set() | |
935 | obsstore = repo.obsstore |
|
935 | obsstore = repo.obsstore | |
936 | newermap = {} |
|
936 | newermap = {} | |
937 | tonode = repo.changelog.node |
|
937 | tonode = repo.changelog.node | |
938 | for rev in repo.revs('(not public()) - obsolete()'): |
|
938 | for rev in repo.revs('(not public()) - obsolete()'): | |
939 | node = tonode(rev) |
|
939 | node = tonode(rev) | |
940 | mark = obsstore.predecessors.get(node, ()) |
|
940 | mark = obsstore.predecessors.get(node, ()) | |
941 | toprocess = set(mark) |
|
941 | toprocess = set(mark) | |
942 | seen = set() |
|
942 | seen = set() | |
943 | while toprocess: |
|
943 | while toprocess: | |
944 | prec = toprocess.pop()[0] |
|
944 | prec = toprocess.pop()[0] | |
945 | if prec in seen: |
|
945 | if prec in seen: | |
946 | continue # emergency cycle hanging prevention |
|
946 | continue # emergency cycle hanging prevention | |
947 | seen.add(prec) |
|
947 | seen.add(prec) | |
948 | if prec not in newermap: |
|
948 | if prec not in newermap: | |
949 | obsutil.successorssets(repo, prec, cache=newermap) |
|
949 | obsutil.successorssets(repo, prec, cache=newermap) | |
950 | newer = [n for n in newermap[prec] if n] |
|
950 | newer = [n for n in newermap[prec] if n] | |
951 | if len(newer) > 1: |
|
951 | if len(newer) > 1: | |
952 | divergent.add(rev) |
|
952 | divergent.add(rev) | |
953 | break |
|
953 | break | |
954 | toprocess.update(obsstore.predecessors.get(prec, ())) |
|
954 | toprocess.update(obsstore.predecessors.get(prec, ())) | |
955 | return divergent |
|
955 | return divergent | |
956 |
|
956 | |||
957 |
|
957 | |||
958 | def createmarkers(repo, relations, flag=0, date=None, metadata=None, |
|
958 | def createmarkers(repo, relations, flag=0, date=None, metadata=None, | |
959 | operation=None): |
|
959 | operation=None): | |
960 | """Add obsolete markers between changesets in a repo |
|
960 | """Add obsolete markers between changesets in a repo | |
961 |
|
961 | |||
962 | <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}]) |
|
962 | <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}]) | |
963 | tuple. `old` and `news` are changectx. metadata is an optional dictionary |
|
963 | tuple. `old` and `news` are changectx. metadata is an optional dictionary | |
964 | containing metadata for this marker only. It is merged with the global |
|
964 | containing metadata for this marker only. It is merged with the global | |
965 | metadata specified through the `metadata` argument of this function. |
|
965 | metadata specified through the `metadata` argument of this function. | |
966 | Any string values in metadata must be UTF-8 bytes. |
|
966 | Any string values in metadata must be UTF-8 bytes. | |
967 |
|
967 | |||
968 | Trying to obsolete a public changeset will raise an exception. |
|
968 | Trying to obsolete a public changeset will raise an exception. | |
969 |
|
969 | |||
970 | Current user and date are used except if specified otherwise in the |
|
970 | Current user and date are used except if specified otherwise in the | |
971 | metadata attribute. |
|
971 | metadata attribute. | |
972 |
|
972 | |||
973 | This function operates within a transaction of its own, but does |
|
973 | This function operates within a transaction of its own, but does | |
974 | not take any lock on the repo. |
|
974 | not take any lock on the repo. | |
975 | """ |
|
975 | """ | |
976 | # prepare metadata |
|
976 | # prepare metadata | |
977 | if metadata is None: |
|
977 | if metadata is None: | |
978 | metadata = {} |
|
978 | metadata = {} | |
979 | if 'user' not in metadata: |
|
979 | if 'user' not in metadata: | |
980 | luser = repo.ui.config('devel', 'user.obsmarker') or repo.ui.username() |
|
980 | luser = repo.ui.config('devel', 'user.obsmarker') or repo.ui.username() | |
981 | metadata['user'] = encoding.fromlocal(luser) |
|
981 | metadata['user'] = encoding.fromlocal(luser) | |
982 |
|
982 | |||
983 | # Operation metadata handling |
|
983 | # Operation metadata handling | |
984 | useoperation = repo.ui.configbool('experimental', |
|
984 | useoperation = repo.ui.configbool('experimental', | |
985 | 'evolution.track-operation') |
|
985 | 'evolution.track-operation') | |
986 | if useoperation and operation: |
|
986 | if useoperation and operation: | |
987 | metadata['operation'] = operation |
|
987 | metadata['operation'] = operation | |
988 |
|
988 | |||
989 | # Effect flag metadata handling |
|
989 | # Effect flag metadata handling | |
990 | saveeffectflag = repo.ui.configbool('experimental', |
|
990 | saveeffectflag = repo.ui.configbool('experimental', | |
991 | 'evolution.effect-flags') |
|
991 | 'evolution.effect-flags') | |
992 |
|
992 | |||
993 | with repo.transaction('add-obsolescence-marker') as tr: |
|
993 | with repo.transaction('add-obsolescence-marker') as tr: | |
994 | markerargs = [] |
|
994 | markerargs = [] | |
995 | for rel in relations: |
|
995 | for rel in relations: | |
996 | prec = rel[0] |
|
996 | prec = rel[0] | |
997 | sucs = rel[1] |
|
997 | sucs = rel[1] | |
998 | localmetadata = metadata.copy() |
|
998 | localmetadata = metadata.copy() | |
999 | if 2 < len(rel): |
|
999 | if 2 < len(rel): | |
1000 | localmetadata.update(rel[2]) |
|
1000 | localmetadata.update(rel[2]) | |
1001 |
|
1001 | |||
1002 | if not prec.mutable(): |
|
1002 | if not prec.mutable(): | |
1003 | raise error.Abort(_("cannot obsolete public changeset: %s") |
|
1003 | raise error.Abort(_("cannot obsolete public changeset: %s") | |
1004 | % prec, |
|
1004 | % prec, | |
1005 | hint="see 'hg help phases' for details") |
|
1005 | hint="see 'hg help phases' for details") | |
1006 | nprec = prec.node() |
|
1006 | nprec = prec.node() | |
1007 | nsucs = tuple(s.node() for s in sucs) |
|
1007 | nsucs = tuple(s.node() for s in sucs) | |
1008 | npare = None |
|
1008 | npare = None | |
1009 | if not nsucs: |
|
1009 | if not nsucs: | |
1010 | npare = tuple(p.node() for p in prec.parents()) |
|
1010 | npare = tuple(p.node() for p in prec.parents()) | |
1011 | if nprec in nsucs: |
|
1011 | if nprec in nsucs: | |
1012 | raise error.Abort(_("changeset %s cannot obsolete itself") |
|
1012 | raise error.Abort(_("changeset %s cannot obsolete itself") | |
1013 | % prec) |
|
1013 | % prec) | |
1014 |
|
1014 | |||
1015 | # Effect flag can be different by relation |
|
1015 | # Effect flag can be different by relation | |
1016 | if saveeffectflag: |
|
1016 | if saveeffectflag: | |
1017 | # The effect flag is saved in a versioned field name for future |
|
1017 | # The effect flag is saved in a versioned field name for future | |
1018 | # evolution |
|
1018 | # evolution | |
1019 |
effectflag = obsutil.geteffectflag( |
|
1019 | effectflag = obsutil.geteffectflag(prec, sucs) | |
1020 | localmetadata[obsutil.EFFECTFLAGFIELD] = "%d" % effectflag |
|
1020 | localmetadata[obsutil.EFFECTFLAGFIELD] = "%d" % effectflag | |
1021 |
|
1021 | |||
1022 | # Creating the marker causes the hidden cache to become invalid, |
|
1022 | # Creating the marker causes the hidden cache to become invalid, | |
1023 | # which causes recomputation when we ask for prec.parents() above. |
|
1023 | # which causes recomputation when we ask for prec.parents() above. | |
1024 | # Resulting in n^2 behavior. So let's prepare all of the args |
|
1024 | # Resulting in n^2 behavior. So let's prepare all of the args | |
1025 | # first, then create the markers. |
|
1025 | # first, then create the markers. | |
1026 | markerargs.append((nprec, nsucs, npare, localmetadata)) |
|
1026 | markerargs.append((nprec, nsucs, npare, localmetadata)) | |
1027 |
|
1027 | |||
1028 | for args in markerargs: |
|
1028 | for args in markerargs: | |
1029 | nprec, nsucs, npare, localmetadata = args |
|
1029 | nprec, nsucs, npare, localmetadata = args | |
1030 | repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare, |
|
1030 | repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare, | |
1031 | date=date, metadata=localmetadata, |
|
1031 | date=date, metadata=localmetadata, | |
1032 | ui=repo.ui) |
|
1032 | ui=repo.ui) | |
1033 | repo.filteredrevcache.clear() |
|
1033 | repo.filteredrevcache.clear() |
@@ -1,982 +1,980 | |||||
1 | # obsutil.py - utility functions for obsolescence |
|
1 | # obsutil.py - utility functions for obsolescence | |
2 | # |
|
2 | # | |
3 | # Copyright 2017 Boris Feld <boris.feld@octobus.net> |
|
3 | # Copyright 2017 Boris Feld <boris.feld@octobus.net> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | import re |
|
10 | import re | |
11 |
|
11 | |||
12 | from .i18n import _ |
|
12 | from .i18n import _ | |
13 | from . import ( |
|
13 | from . import ( | |
14 | diffutil, |
|
14 | diffutil, | |
15 | encoding, |
|
15 | encoding, | |
16 | node as nodemod, |
|
16 | node as nodemod, | |
17 | phases, |
|
17 | phases, | |
18 | util, |
|
18 | util, | |
19 | ) |
|
19 | ) | |
20 | from .utils import ( |
|
20 | from .utils import ( | |
21 | dateutil, |
|
21 | dateutil, | |
22 | ) |
|
22 | ) | |
23 |
|
23 | |||
24 | ### obsolescence marker flag |
|
24 | ### obsolescence marker flag | |
25 |
|
25 | |||
26 | ## bumpedfix flag |
|
26 | ## bumpedfix flag | |
27 | # |
|
27 | # | |
28 | # When a changeset A' succeed to a changeset A which became public, we call A' |
|
28 | # When a changeset A' succeed to a changeset A which became public, we call A' | |
29 | # "bumped" because it's a successors of a public changesets |
|
29 | # "bumped" because it's a successors of a public changesets | |
30 | # |
|
30 | # | |
31 | # o A' (bumped) |
|
31 | # o A' (bumped) | |
32 | # |`: |
|
32 | # |`: | |
33 | # | o A |
|
33 | # | o A | |
34 | # |/ |
|
34 | # |/ | |
35 | # o Z |
|
35 | # o Z | |
36 | # |
|
36 | # | |
37 | # The way to solve this situation is to create a new changeset Ad as children |
|
37 | # The way to solve this situation is to create a new changeset Ad as children | |
38 | # of A. This changeset have the same content than A'. So the diff from A to A' |
|
38 | # of A. This changeset have the same content than A'. So the diff from A to A' | |
39 | # is the same than the diff from A to Ad. Ad is marked as a successors of A' |
|
39 | # is the same than the diff from A to Ad. Ad is marked as a successors of A' | |
40 | # |
|
40 | # | |
41 | # o Ad |
|
41 | # o Ad | |
42 | # |`: |
|
42 | # |`: | |
43 | # | x A' |
|
43 | # | x A' | |
44 | # |'| |
|
44 | # |'| | |
45 | # o | A |
|
45 | # o | A | |
46 | # |/ |
|
46 | # |/ | |
47 | # o Z |
|
47 | # o Z | |
48 | # |
|
48 | # | |
49 | # But by transitivity Ad is also a successors of A. To avoid having Ad marked |
|
49 | # But by transitivity Ad is also a successors of A. To avoid having Ad marked | |
50 | # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>. |
|
50 | # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>. | |
51 | # This flag mean that the successors express the changes between the public and |
|
51 | # This flag mean that the successors express the changes between the public and | |
52 | # bumped version and fix the situation, breaking the transitivity of |
|
52 | # bumped version and fix the situation, breaking the transitivity of | |
53 | # "bumped" here. |
|
53 | # "bumped" here. | |
54 | bumpedfix = 1 |
|
54 | bumpedfix = 1 | |
55 | usingsha256 = 2 |
|
55 | usingsha256 = 2 | |
56 |
|
56 | |||
57 | class marker(object): |
|
57 | class marker(object): | |
58 | """Wrap obsolete marker raw data""" |
|
58 | """Wrap obsolete marker raw data""" | |
59 |
|
59 | |||
60 | def __init__(self, repo, data): |
|
60 | def __init__(self, repo, data): | |
61 | # the repo argument will be used to create changectx in later version |
|
61 | # the repo argument will be used to create changectx in later version | |
62 | self._repo = repo |
|
62 | self._repo = repo | |
63 | self._data = data |
|
63 | self._data = data | |
64 | self._decodedmeta = None |
|
64 | self._decodedmeta = None | |
65 |
|
65 | |||
66 | def __hash__(self): |
|
66 | def __hash__(self): | |
67 | return hash(self._data) |
|
67 | return hash(self._data) | |
68 |
|
68 | |||
69 | def __eq__(self, other): |
|
69 | def __eq__(self, other): | |
70 | if type(other) != type(self): |
|
70 | if type(other) != type(self): | |
71 | return False |
|
71 | return False | |
72 | return self._data == other._data |
|
72 | return self._data == other._data | |
73 |
|
73 | |||
74 | def prednode(self): |
|
74 | def prednode(self): | |
75 | """Predecessor changeset node identifier""" |
|
75 | """Predecessor changeset node identifier""" | |
76 | return self._data[0] |
|
76 | return self._data[0] | |
77 |
|
77 | |||
78 | def succnodes(self): |
|
78 | def succnodes(self): | |
79 | """List of successor changesets node identifiers""" |
|
79 | """List of successor changesets node identifiers""" | |
80 | return self._data[1] |
|
80 | return self._data[1] | |
81 |
|
81 | |||
82 | def parentnodes(self): |
|
82 | def parentnodes(self): | |
83 | """Parents of the predecessors (None if not recorded)""" |
|
83 | """Parents of the predecessors (None if not recorded)""" | |
84 | return self._data[5] |
|
84 | return self._data[5] | |
85 |
|
85 | |||
86 | def metadata(self): |
|
86 | def metadata(self): | |
87 | """Decoded metadata dictionary""" |
|
87 | """Decoded metadata dictionary""" | |
88 | return dict(self._data[3]) |
|
88 | return dict(self._data[3]) | |
89 |
|
89 | |||
90 | def date(self): |
|
90 | def date(self): | |
91 | """Creation date as (unixtime, offset)""" |
|
91 | """Creation date as (unixtime, offset)""" | |
92 | return self._data[4] |
|
92 | return self._data[4] | |
93 |
|
93 | |||
94 | def flags(self): |
|
94 | def flags(self): | |
95 | """The flags field of the marker""" |
|
95 | """The flags field of the marker""" | |
96 | return self._data[2] |
|
96 | return self._data[2] | |
97 |
|
97 | |||
98 | def getmarkers(repo, nodes=None, exclusive=False): |
|
98 | def getmarkers(repo, nodes=None, exclusive=False): | |
99 | """returns markers known in a repository |
|
99 | """returns markers known in a repository | |
100 |
|
100 | |||
101 | If <nodes> is specified, only markers "relevant" to those nodes are are |
|
101 | If <nodes> is specified, only markers "relevant" to those nodes are are | |
102 | returned""" |
|
102 | returned""" | |
103 | if nodes is None: |
|
103 | if nodes is None: | |
104 | rawmarkers = repo.obsstore |
|
104 | rawmarkers = repo.obsstore | |
105 | elif exclusive: |
|
105 | elif exclusive: | |
106 | rawmarkers = exclusivemarkers(repo, nodes) |
|
106 | rawmarkers = exclusivemarkers(repo, nodes) | |
107 | else: |
|
107 | else: | |
108 | rawmarkers = repo.obsstore.relevantmarkers(nodes) |
|
108 | rawmarkers = repo.obsstore.relevantmarkers(nodes) | |
109 |
|
109 | |||
110 | for markerdata in rawmarkers: |
|
110 | for markerdata in rawmarkers: | |
111 | yield marker(repo, markerdata) |
|
111 | yield marker(repo, markerdata) | |
112 |
|
112 | |||
113 | def closestpredecessors(repo, nodeid): |
|
113 | def closestpredecessors(repo, nodeid): | |
114 | """yield the list of next predecessors pointing on visible changectx nodes |
|
114 | """yield the list of next predecessors pointing on visible changectx nodes | |
115 |
|
115 | |||
116 | This function respect the repoview filtering, filtered revision will be |
|
116 | This function respect the repoview filtering, filtered revision will be | |
117 | considered missing. |
|
117 | considered missing. | |
118 | """ |
|
118 | """ | |
119 |
|
119 | |||
120 | precursors = repo.obsstore.predecessors |
|
120 | precursors = repo.obsstore.predecessors | |
121 | stack = [nodeid] |
|
121 | stack = [nodeid] | |
122 | seen = set(stack) |
|
122 | seen = set(stack) | |
123 |
|
123 | |||
124 | while stack: |
|
124 | while stack: | |
125 | current = stack.pop() |
|
125 | current = stack.pop() | |
126 | currentpreccs = precursors.get(current, ()) |
|
126 | currentpreccs = precursors.get(current, ()) | |
127 |
|
127 | |||
128 | for prec in currentpreccs: |
|
128 | for prec in currentpreccs: | |
129 | precnodeid = prec[0] |
|
129 | precnodeid = prec[0] | |
130 |
|
130 | |||
131 | # Basic cycle protection |
|
131 | # Basic cycle protection | |
132 | if precnodeid in seen: |
|
132 | if precnodeid in seen: | |
133 | continue |
|
133 | continue | |
134 | seen.add(precnodeid) |
|
134 | seen.add(precnodeid) | |
135 |
|
135 | |||
136 | if precnodeid in repo: |
|
136 | if precnodeid in repo: | |
137 | yield precnodeid |
|
137 | yield precnodeid | |
138 | else: |
|
138 | else: | |
139 | stack.append(precnodeid) |
|
139 | stack.append(precnodeid) | |
140 |
|
140 | |||
141 | def allpredecessors(obsstore, nodes, ignoreflags=0): |
|
141 | def allpredecessors(obsstore, nodes, ignoreflags=0): | |
142 | """Yield node for every precursors of <nodes>. |
|
142 | """Yield node for every precursors of <nodes>. | |
143 |
|
143 | |||
144 | Some precursors may be unknown locally. |
|
144 | Some precursors may be unknown locally. | |
145 |
|
145 | |||
146 | This is a linear yield unsuited to detecting folded changesets. It includes |
|
146 | This is a linear yield unsuited to detecting folded changesets. It includes | |
147 | initial nodes too.""" |
|
147 | initial nodes too.""" | |
148 |
|
148 | |||
149 | remaining = set(nodes) |
|
149 | remaining = set(nodes) | |
150 | seen = set(remaining) |
|
150 | seen = set(remaining) | |
151 | while remaining: |
|
151 | while remaining: | |
152 | current = remaining.pop() |
|
152 | current = remaining.pop() | |
153 | yield current |
|
153 | yield current | |
154 | for mark in obsstore.predecessors.get(current, ()): |
|
154 | for mark in obsstore.predecessors.get(current, ()): | |
155 | # ignore marker flagged with specified flag |
|
155 | # ignore marker flagged with specified flag | |
156 | if mark[2] & ignoreflags: |
|
156 | if mark[2] & ignoreflags: | |
157 | continue |
|
157 | continue | |
158 | suc = mark[0] |
|
158 | suc = mark[0] | |
159 | if suc not in seen: |
|
159 | if suc not in seen: | |
160 | seen.add(suc) |
|
160 | seen.add(suc) | |
161 | remaining.add(suc) |
|
161 | remaining.add(suc) | |
162 |
|
162 | |||
163 | def allsuccessors(obsstore, nodes, ignoreflags=0): |
|
163 | def allsuccessors(obsstore, nodes, ignoreflags=0): | |
164 | """Yield node for every successor of <nodes>. |
|
164 | """Yield node for every successor of <nodes>. | |
165 |
|
165 | |||
166 | Some successors may be unknown locally. |
|
166 | Some successors may be unknown locally. | |
167 |
|
167 | |||
168 | This is a linear yield unsuited to detecting split changesets. It includes |
|
168 | This is a linear yield unsuited to detecting split changesets. It includes | |
169 | initial nodes too.""" |
|
169 | initial nodes too.""" | |
170 | remaining = set(nodes) |
|
170 | remaining = set(nodes) | |
171 | seen = set(remaining) |
|
171 | seen = set(remaining) | |
172 | while remaining: |
|
172 | while remaining: | |
173 | current = remaining.pop() |
|
173 | current = remaining.pop() | |
174 | yield current |
|
174 | yield current | |
175 | for mark in obsstore.successors.get(current, ()): |
|
175 | for mark in obsstore.successors.get(current, ()): | |
176 | # ignore marker flagged with specified flag |
|
176 | # ignore marker flagged with specified flag | |
177 | if mark[2] & ignoreflags: |
|
177 | if mark[2] & ignoreflags: | |
178 | continue |
|
178 | continue | |
179 | for suc in mark[1]: |
|
179 | for suc in mark[1]: | |
180 | if suc not in seen: |
|
180 | if suc not in seen: | |
181 | seen.add(suc) |
|
181 | seen.add(suc) | |
182 | remaining.add(suc) |
|
182 | remaining.add(suc) | |
183 |
|
183 | |||
184 | def _filterprunes(markers): |
|
184 | def _filterprunes(markers): | |
185 | """return a set with no prune markers""" |
|
185 | """return a set with no prune markers""" | |
186 | return set(m for m in markers if m[1]) |
|
186 | return set(m for m in markers if m[1]) | |
187 |
|
187 | |||
188 | def exclusivemarkers(repo, nodes): |
|
188 | def exclusivemarkers(repo, nodes): | |
189 | """set of markers relevant to "nodes" but no other locally-known nodes |
|
189 | """set of markers relevant to "nodes" but no other locally-known nodes | |
190 |
|
190 | |||
191 | This function compute the set of markers "exclusive" to a locally-known |
|
191 | This function compute the set of markers "exclusive" to a locally-known | |
192 | node. This means we walk the markers starting from <nodes> until we reach a |
|
192 | node. This means we walk the markers starting from <nodes> until we reach a | |
193 | locally-known precursors outside of <nodes>. Element of <nodes> with |
|
193 | locally-known precursors outside of <nodes>. Element of <nodes> with | |
194 | locally-known successors outside of <nodes> are ignored (since their |
|
194 | locally-known successors outside of <nodes> are ignored (since their | |
195 | precursors markers are also relevant to these successors). |
|
195 | precursors markers are also relevant to these successors). | |
196 |
|
196 | |||
197 | For example: |
|
197 | For example: | |
198 |
|
198 | |||
199 | # (A0 rewritten as A1) |
|
199 | # (A0 rewritten as A1) | |
200 | # |
|
200 | # | |
201 | # A0 <-1- A1 # Marker "1" is exclusive to A1 |
|
201 | # A0 <-1- A1 # Marker "1" is exclusive to A1 | |
202 |
|
202 | |||
203 | or |
|
203 | or | |
204 |
|
204 | |||
205 | # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally) |
|
205 | # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally) | |
206 | # |
|
206 | # | |
207 | # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1 |
|
207 | # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1 | |
208 |
|
208 | |||
209 | or |
|
209 | or | |
210 |
|
210 | |||
211 | # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence)) |
|
211 | # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence)) | |
212 | # |
|
212 | # | |
213 | # <-2- A1 # Marker "2" is exclusive to A0,A1 |
|
213 | # <-2- A1 # Marker "2" is exclusive to A0,A1 | |
214 | # / |
|
214 | # / | |
215 | # <-1- A0 |
|
215 | # <-1- A0 | |
216 | # \ |
|
216 | # \ | |
217 | # <-3- A2 # Marker "3" is exclusive to A0,A2 |
|
217 | # <-3- A2 # Marker "3" is exclusive to A0,A2 | |
218 | # |
|
218 | # | |
219 | # in addition: |
|
219 | # in addition: | |
220 | # |
|
220 | # | |
221 | # Markers "2,3" are exclusive to A1,A2 |
|
221 | # Markers "2,3" are exclusive to A1,A2 | |
222 | # Markers "1,2,3" are exclusive to A0,A1,A2 |
|
222 | # Markers "1,2,3" are exclusive to A0,A1,A2 | |
223 |
|
223 | |||
224 | See test/test-obsolete-bundle-strip.t for more examples. |
|
224 | See test/test-obsolete-bundle-strip.t for more examples. | |
225 |
|
225 | |||
226 | An example usage is strip. When stripping a changeset, we also want to |
|
226 | An example usage is strip. When stripping a changeset, we also want to | |
227 | strip the markers exclusive to this changeset. Otherwise we would have |
|
227 | strip the markers exclusive to this changeset. Otherwise we would have | |
228 | "dangling"" obsolescence markers from its precursors: Obsolescence markers |
|
228 | "dangling"" obsolescence markers from its precursors: Obsolescence markers | |
229 | marking a node as obsolete without any successors available locally. |
|
229 | marking a node as obsolete without any successors available locally. | |
230 |
|
230 | |||
231 | As for relevant markers, the prune markers for children will be followed. |
|
231 | As for relevant markers, the prune markers for children will be followed. | |
232 | Of course, they will only be followed if the pruned children is |
|
232 | Of course, they will only be followed if the pruned children is | |
233 | locally-known. Since the prune markers are relevant to the pruned node. |
|
233 | locally-known. Since the prune markers are relevant to the pruned node. | |
234 | However, while prune markers are considered relevant to the parent of the |
|
234 | However, while prune markers are considered relevant to the parent of the | |
235 | pruned changesets, prune markers for locally-known changeset (with no |
|
235 | pruned changesets, prune markers for locally-known changeset (with no | |
236 | successors) are considered exclusive to the pruned nodes. This allows |
|
236 | successors) are considered exclusive to the pruned nodes. This allows | |
237 | to strip the prune markers (with the rest of the exclusive chain) alongside |
|
237 | to strip the prune markers (with the rest of the exclusive chain) alongside | |
238 | the pruned changesets. |
|
238 | the pruned changesets. | |
239 | """ |
|
239 | """ | |
240 | # running on a filtered repository would be dangerous as markers could be |
|
240 | # running on a filtered repository would be dangerous as markers could be | |
241 | # reported as exclusive when they are relevant for other filtered nodes. |
|
241 | # reported as exclusive when they are relevant for other filtered nodes. | |
242 | unfi = repo.unfiltered() |
|
242 | unfi = repo.unfiltered() | |
243 |
|
243 | |||
244 | # shortcut to various useful item |
|
244 | # shortcut to various useful item | |
245 | nm = unfi.changelog.nodemap |
|
245 | nm = unfi.changelog.nodemap | |
246 | precursorsmarkers = unfi.obsstore.predecessors |
|
246 | precursorsmarkers = unfi.obsstore.predecessors | |
247 | successormarkers = unfi.obsstore.successors |
|
247 | successormarkers = unfi.obsstore.successors | |
248 | childrenmarkers = unfi.obsstore.children |
|
248 | childrenmarkers = unfi.obsstore.children | |
249 |
|
249 | |||
250 | # exclusive markers (return of the function) |
|
250 | # exclusive markers (return of the function) | |
251 | exclmarkers = set() |
|
251 | exclmarkers = set() | |
252 | # we need fast membership testing |
|
252 | # we need fast membership testing | |
253 | nodes = set(nodes) |
|
253 | nodes = set(nodes) | |
254 | # looking for head in the obshistory |
|
254 | # looking for head in the obshistory | |
255 | # |
|
255 | # | |
256 | # XXX we are ignoring all issues in regard with cycle for now. |
|
256 | # XXX we are ignoring all issues in regard with cycle for now. | |
257 | stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))] |
|
257 | stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))] | |
258 | stack.sort() |
|
258 | stack.sort() | |
259 | # nodes already stacked |
|
259 | # nodes already stacked | |
260 | seennodes = set(stack) |
|
260 | seennodes = set(stack) | |
261 | while stack: |
|
261 | while stack: | |
262 | current = stack.pop() |
|
262 | current = stack.pop() | |
263 | # fetch precursors markers |
|
263 | # fetch precursors markers | |
264 | markers = list(precursorsmarkers.get(current, ())) |
|
264 | markers = list(precursorsmarkers.get(current, ())) | |
265 | # extend the list with prune markers |
|
265 | # extend the list with prune markers | |
266 | for mark in successormarkers.get(current, ()): |
|
266 | for mark in successormarkers.get(current, ()): | |
267 | if not mark[1]: |
|
267 | if not mark[1]: | |
268 | markers.append(mark) |
|
268 | markers.append(mark) | |
269 | # and markers from children (looking for prune) |
|
269 | # and markers from children (looking for prune) | |
270 | for mark in childrenmarkers.get(current, ()): |
|
270 | for mark in childrenmarkers.get(current, ()): | |
271 | if not mark[1]: |
|
271 | if not mark[1]: | |
272 | markers.append(mark) |
|
272 | markers.append(mark) | |
273 | # traverse the markers |
|
273 | # traverse the markers | |
274 | for mark in markers: |
|
274 | for mark in markers: | |
275 | if mark in exclmarkers: |
|
275 | if mark in exclmarkers: | |
276 | # markers already selected |
|
276 | # markers already selected | |
277 | continue |
|
277 | continue | |
278 |
|
278 | |||
279 | # If the markers is about the current node, select it |
|
279 | # If the markers is about the current node, select it | |
280 | # |
|
280 | # | |
281 | # (this delay the addition of markers from children) |
|
281 | # (this delay the addition of markers from children) | |
282 | if mark[1] or mark[0] == current: |
|
282 | if mark[1] or mark[0] == current: | |
283 | exclmarkers.add(mark) |
|
283 | exclmarkers.add(mark) | |
284 |
|
284 | |||
285 | # should we keep traversing through the precursors? |
|
285 | # should we keep traversing through the precursors? | |
286 | prec = mark[0] |
|
286 | prec = mark[0] | |
287 |
|
287 | |||
288 | # nodes in the stack or already processed |
|
288 | # nodes in the stack or already processed | |
289 | if prec in seennodes: |
|
289 | if prec in seennodes: | |
290 | continue |
|
290 | continue | |
291 |
|
291 | |||
292 | # is this a locally known node ? |
|
292 | # is this a locally known node ? | |
293 | known = prec in nm |
|
293 | known = prec in nm | |
294 | # if locally-known and not in the <nodes> set the traversal |
|
294 | # if locally-known and not in the <nodes> set the traversal | |
295 | # stop here. |
|
295 | # stop here. | |
296 | if known and prec not in nodes: |
|
296 | if known and prec not in nodes: | |
297 | continue |
|
297 | continue | |
298 |
|
298 | |||
299 | # do not keep going if there are unselected markers pointing to this |
|
299 | # do not keep going if there are unselected markers pointing to this | |
300 | # nodes. If we end up traversing these unselected markers later the |
|
300 | # nodes. If we end up traversing these unselected markers later the | |
301 | # node will be taken care of at that point. |
|
301 | # node will be taken care of at that point. | |
302 | precmarkers = _filterprunes(successormarkers.get(prec)) |
|
302 | precmarkers = _filterprunes(successormarkers.get(prec)) | |
303 | if precmarkers.issubset(exclmarkers): |
|
303 | if precmarkers.issubset(exclmarkers): | |
304 | seennodes.add(prec) |
|
304 | seennodes.add(prec) | |
305 | stack.append(prec) |
|
305 | stack.append(prec) | |
306 |
|
306 | |||
307 | return exclmarkers |
|
307 | return exclmarkers | |
308 |
|
308 | |||
309 | def foreground(repo, nodes): |
|
309 | def foreground(repo, nodes): | |
310 | """return all nodes in the "foreground" of other node |
|
310 | """return all nodes in the "foreground" of other node | |
311 |
|
311 | |||
312 | The foreground of a revision is anything reachable using parent -> children |
|
312 | The foreground of a revision is anything reachable using parent -> children | |
313 | or precursor -> successor relation. It is very similar to "descendant" but |
|
313 | or precursor -> successor relation. It is very similar to "descendant" but | |
314 | augmented with obsolescence information. |
|
314 | augmented with obsolescence information. | |
315 |
|
315 | |||
316 | Beware that possible obsolescence cycle may result if complex situation. |
|
316 | Beware that possible obsolescence cycle may result if complex situation. | |
317 | """ |
|
317 | """ | |
318 | repo = repo.unfiltered() |
|
318 | repo = repo.unfiltered() | |
319 | foreground = set(repo.set('%ln::', nodes)) |
|
319 | foreground = set(repo.set('%ln::', nodes)) | |
320 | if repo.obsstore: |
|
320 | if repo.obsstore: | |
321 | # We only need this complicated logic if there is obsolescence |
|
321 | # We only need this complicated logic if there is obsolescence | |
322 | # XXX will probably deserve an optimised revset. |
|
322 | # XXX will probably deserve an optimised revset. | |
323 | nm = repo.changelog.nodemap |
|
323 | nm = repo.changelog.nodemap | |
324 | plen = -1 |
|
324 | plen = -1 | |
325 | # compute the whole set of successors or descendants |
|
325 | # compute the whole set of successors or descendants | |
326 | while len(foreground) != plen: |
|
326 | while len(foreground) != plen: | |
327 | plen = len(foreground) |
|
327 | plen = len(foreground) | |
328 | succs = set(c.node() for c in foreground) |
|
328 | succs = set(c.node() for c in foreground) | |
329 | mutable = [c.node() for c in foreground if c.mutable()] |
|
329 | mutable = [c.node() for c in foreground if c.mutable()] | |
330 | succs.update(allsuccessors(repo.obsstore, mutable)) |
|
330 | succs.update(allsuccessors(repo.obsstore, mutable)) | |
331 | known = (n for n in succs if n in nm) |
|
331 | known = (n for n in succs if n in nm) | |
332 | foreground = set(repo.set('%ln::', known)) |
|
332 | foreground = set(repo.set('%ln::', known)) | |
333 | return set(c.node() for c in foreground) |
|
333 | return set(c.node() for c in foreground) | |
334 |
|
334 | |||
335 | # effectflag field |
|
335 | # effectflag field | |
336 | # |
|
336 | # | |
337 | # Effect-flag is a 1-byte bit field used to store what changed between a |
|
337 | # Effect-flag is a 1-byte bit field used to store what changed between a | |
338 | # changeset and its successor(s). |
|
338 | # changeset and its successor(s). | |
339 | # |
|
339 | # | |
340 | # The effect flag is stored in obs-markers metadata while we iterate on the |
|
340 | # The effect flag is stored in obs-markers metadata while we iterate on the | |
341 | # information design. That's why we have the EFFECTFLAGFIELD. If we come up |
|
341 | # information design. That's why we have the EFFECTFLAGFIELD. If we come up | |
342 | # with an incompatible design for effect flag, we can store a new design under |
|
342 | # with an incompatible design for effect flag, we can store a new design under | |
343 | # another field name so we don't break readers. We plan to extend the existing |
|
343 | # another field name so we don't break readers. We plan to extend the existing | |
344 | # obsmarkers bit-field when the effect flag design will be stabilized. |
|
344 | # obsmarkers bit-field when the effect flag design will be stabilized. | |
345 | # |
|
345 | # | |
346 | # The effect-flag is placed behind an experimental flag |
|
346 | # The effect-flag is placed behind an experimental flag | |
347 | # `effect-flags` set to off by default. |
|
347 | # `effect-flags` set to off by default. | |
348 | # |
|
348 | # | |
349 |
|
349 | |||
350 | EFFECTFLAGFIELD = "ef1" |
|
350 | EFFECTFLAGFIELD = "ef1" | |
351 |
|
351 | |||
352 | DESCCHANGED = 1 << 0 # action changed the description |
|
352 | DESCCHANGED = 1 << 0 # action changed the description | |
353 | METACHANGED = 1 << 1 # action change the meta |
|
353 | METACHANGED = 1 << 1 # action change the meta | |
354 | DIFFCHANGED = 1 << 3 # action change diff introduced by the changeset |
|
354 | DIFFCHANGED = 1 << 3 # action change diff introduced by the changeset | |
355 | PARENTCHANGED = 1 << 2 # action change the parent |
|
355 | PARENTCHANGED = 1 << 2 # action change the parent | |
356 | USERCHANGED = 1 << 4 # the user changed |
|
356 | USERCHANGED = 1 << 4 # the user changed | |
357 | DATECHANGED = 1 << 5 # the date changed |
|
357 | DATECHANGED = 1 << 5 # the date changed | |
358 | BRANCHCHANGED = 1 << 6 # the branch changed |
|
358 | BRANCHCHANGED = 1 << 6 # the branch changed | |
359 |
|
359 | |||
360 | METABLACKLIST = [ |
|
360 | METABLACKLIST = [ | |
361 | re.compile('^branch$'), |
|
361 | re.compile('^branch$'), | |
362 | re.compile('^.*-source$'), |
|
362 | re.compile('^.*-source$'), | |
363 | re.compile('^.*_source$'), |
|
363 | re.compile('^.*_source$'), | |
364 | re.compile('^source$'), |
|
364 | re.compile('^source$'), | |
365 | ] |
|
365 | ] | |
366 |
|
366 | |||
367 | def metanotblacklisted(metaitem): |
|
367 | def metanotblacklisted(metaitem): | |
368 | """ Check that the key of a meta item (extrakey, extravalue) does not |
|
368 | """ Check that the key of a meta item (extrakey, extravalue) does not | |
369 | match at least one of the blacklist pattern |
|
369 | match at least one of the blacklist pattern | |
370 | """ |
|
370 | """ | |
371 | metakey = metaitem[0] |
|
371 | metakey = metaitem[0] | |
372 |
|
372 | |||
373 | return not any(pattern.match(metakey) for pattern in METABLACKLIST) |
|
373 | return not any(pattern.match(metakey) for pattern in METABLACKLIST) | |
374 |
|
374 | |||
375 | def _prepare_hunk(hunk): |
|
375 | def _prepare_hunk(hunk): | |
376 | """Drop all information but the username and patch""" |
|
376 | """Drop all information but the username and patch""" | |
377 | cleanhunk = [] |
|
377 | cleanhunk = [] | |
378 | for line in hunk.splitlines(): |
|
378 | for line in hunk.splitlines(): | |
379 | if line.startswith(b'# User') or not line.startswith(b'#'): |
|
379 | if line.startswith(b'# User') or not line.startswith(b'#'): | |
380 | if line.startswith(b'@@'): |
|
380 | if line.startswith(b'@@'): | |
381 | line = b'@@\n' |
|
381 | line = b'@@\n' | |
382 | cleanhunk.append(line) |
|
382 | cleanhunk.append(line) | |
383 | return cleanhunk |
|
383 | return cleanhunk | |
384 |
|
384 | |||
385 | def _getdifflines(iterdiff): |
|
385 | def _getdifflines(iterdiff): | |
386 | """return a cleaned up lines""" |
|
386 | """return a cleaned up lines""" | |
387 | lines = next(iterdiff, None) |
|
387 | lines = next(iterdiff, None) | |
388 |
|
388 | |||
389 | if lines is None: |
|
389 | if lines is None: | |
390 | return lines |
|
390 | return lines | |
391 |
|
391 | |||
392 | return _prepare_hunk(lines) |
|
392 | return _prepare_hunk(lines) | |
393 |
|
393 | |||
394 | def _cmpdiff(leftctx, rightctx): |
|
394 | def _cmpdiff(leftctx, rightctx): | |
395 | """return True if both ctx introduce the "same diff" |
|
395 | """return True if both ctx introduce the "same diff" | |
396 |
|
396 | |||
397 | This is a first and basic implementation, with many shortcoming. |
|
397 | This is a first and basic implementation, with many shortcoming. | |
398 | """ |
|
398 | """ | |
399 | diffopts = diffutil.diffallopts(leftctx.repo().ui, {'git': True}) |
|
399 | diffopts = diffutil.diffallopts(leftctx.repo().ui, {'git': True}) | |
400 | # Leftctx or right ctx might be filtered, so we need to use the contexts |
|
400 | # Leftctx or right ctx might be filtered, so we need to use the contexts | |
401 | # with an unfiltered repository to safely compute the diff |
|
401 | # with an unfiltered repository to safely compute the diff | |
402 | leftunfi = leftctx._repo.unfiltered()[leftctx.rev()] |
|
402 | leftunfi = leftctx._repo.unfiltered()[leftctx.rev()] | |
403 | leftdiff = leftunfi.diff(opts=diffopts) |
|
403 | leftdiff = leftunfi.diff(opts=diffopts) | |
404 | rightunfi = rightctx._repo.unfiltered()[rightctx.rev()] |
|
404 | rightunfi = rightctx._repo.unfiltered()[rightctx.rev()] | |
405 | rightdiff = rightunfi.diff(opts=diffopts) |
|
405 | rightdiff = rightunfi.diff(opts=diffopts) | |
406 |
|
406 | |||
407 | left, right = (0, 0) |
|
407 | left, right = (0, 0) | |
408 | while None not in (left, right): |
|
408 | while None not in (left, right): | |
409 | left = _getdifflines(leftdiff) |
|
409 | left = _getdifflines(leftdiff) | |
410 | right = _getdifflines(rightdiff) |
|
410 | right = _getdifflines(rightdiff) | |
411 |
|
411 | |||
412 | if left != right: |
|
412 | if left != right: | |
413 | return False |
|
413 | return False | |
414 | return True |
|
414 | return True | |
415 |
|
415 | |||
416 |
def geteffectflag( |
|
416 | def geteffectflag(source, successors): | |
417 | """ From an obs-marker relation, compute what changed between the |
|
417 | """ From an obs-marker relation, compute what changed between the | |
418 | predecessor and the successor. |
|
418 | predecessor and the successor. | |
419 | """ |
|
419 | """ | |
420 | effects = 0 |
|
420 | effects = 0 | |
421 |
|
421 | |||
422 | source = relation[0] |
|
422 | for changectx in successors: | |
423 |
|
||||
424 | for changectx in relation[1]: |
|
|||
425 | # Check if description has changed |
|
423 | # Check if description has changed | |
426 | if changectx.description() != source.description(): |
|
424 | if changectx.description() != source.description(): | |
427 | effects |= DESCCHANGED |
|
425 | effects |= DESCCHANGED | |
428 |
|
426 | |||
429 | # Check if user has changed |
|
427 | # Check if user has changed | |
430 | if changectx.user() != source.user(): |
|
428 | if changectx.user() != source.user(): | |
431 | effects |= USERCHANGED |
|
429 | effects |= USERCHANGED | |
432 |
|
430 | |||
433 | # Check if date has changed |
|
431 | # Check if date has changed | |
434 | if changectx.date() != source.date(): |
|
432 | if changectx.date() != source.date(): | |
435 | effects |= DATECHANGED |
|
433 | effects |= DATECHANGED | |
436 |
|
434 | |||
437 | # Check if branch has changed |
|
435 | # Check if branch has changed | |
438 | if changectx.branch() != source.branch(): |
|
436 | if changectx.branch() != source.branch(): | |
439 | effects |= BRANCHCHANGED |
|
437 | effects |= BRANCHCHANGED | |
440 |
|
438 | |||
441 | # Check if at least one of the parent has changed |
|
439 | # Check if at least one of the parent has changed | |
442 | if changectx.parents() != source.parents(): |
|
440 | if changectx.parents() != source.parents(): | |
443 | effects |= PARENTCHANGED |
|
441 | effects |= PARENTCHANGED | |
444 |
|
442 | |||
445 | # Check if other meta has changed |
|
443 | # Check if other meta has changed | |
446 | changeextra = changectx.extra().items() |
|
444 | changeextra = changectx.extra().items() | |
447 | ctxmeta = list(filter(metanotblacklisted, changeextra)) |
|
445 | ctxmeta = list(filter(metanotblacklisted, changeextra)) | |
448 |
|
446 | |||
449 | sourceextra = source.extra().items() |
|
447 | sourceextra = source.extra().items() | |
450 | srcmeta = list(filter(metanotblacklisted, sourceextra)) |
|
448 | srcmeta = list(filter(metanotblacklisted, sourceextra)) | |
451 |
|
449 | |||
452 | if ctxmeta != srcmeta: |
|
450 | if ctxmeta != srcmeta: | |
453 | effects |= METACHANGED |
|
451 | effects |= METACHANGED | |
454 |
|
452 | |||
455 | # Check if the diff has changed |
|
453 | # Check if the diff has changed | |
456 | if not _cmpdiff(source, changectx): |
|
454 | if not _cmpdiff(source, changectx): | |
457 | effects |= DIFFCHANGED |
|
455 | effects |= DIFFCHANGED | |
458 |
|
456 | |||
459 | return effects |
|
457 | return effects | |
460 |
|
458 | |||
461 | def getobsoleted(repo, tr): |
|
459 | def getobsoleted(repo, tr): | |
462 | """return the set of pre-existing revisions obsoleted by a transaction""" |
|
460 | """return the set of pre-existing revisions obsoleted by a transaction""" | |
463 | torev = repo.unfiltered().changelog.nodemap.get |
|
461 | torev = repo.unfiltered().changelog.nodemap.get | |
464 | phase = repo._phasecache.phase |
|
462 | phase = repo._phasecache.phase | |
465 | succsmarkers = repo.obsstore.successors.get |
|
463 | succsmarkers = repo.obsstore.successors.get | |
466 | public = phases.public |
|
464 | public = phases.public | |
467 | addedmarkers = tr.changes['obsmarkers'] |
|
465 | addedmarkers = tr.changes['obsmarkers'] | |
468 | origrepolen = tr.changes['origrepolen'] |
|
466 | origrepolen = tr.changes['origrepolen'] | |
469 | seenrevs = set() |
|
467 | seenrevs = set() | |
470 | obsoleted = set() |
|
468 | obsoleted = set() | |
471 | for mark in addedmarkers: |
|
469 | for mark in addedmarkers: | |
472 | node = mark[0] |
|
470 | node = mark[0] | |
473 | rev = torev(node) |
|
471 | rev = torev(node) | |
474 | if rev is None or rev in seenrevs or rev >= origrepolen: |
|
472 | if rev is None or rev in seenrevs or rev >= origrepolen: | |
475 | continue |
|
473 | continue | |
476 | seenrevs.add(rev) |
|
474 | seenrevs.add(rev) | |
477 | if phase(repo, rev) == public: |
|
475 | if phase(repo, rev) == public: | |
478 | continue |
|
476 | continue | |
479 | if set(succsmarkers(node) or []).issubset(addedmarkers): |
|
477 | if set(succsmarkers(node) or []).issubset(addedmarkers): | |
480 | obsoleted.add(rev) |
|
478 | obsoleted.add(rev) | |
481 | return obsoleted |
|
479 | return obsoleted | |
482 |
|
480 | |||
483 | class _succs(list): |
|
481 | class _succs(list): | |
484 | """small class to represent a successors with some metadata about it""" |
|
482 | """small class to represent a successors with some metadata about it""" | |
485 |
|
483 | |||
486 | def __init__(self, *args, **kwargs): |
|
484 | def __init__(self, *args, **kwargs): | |
487 | super(_succs, self).__init__(*args, **kwargs) |
|
485 | super(_succs, self).__init__(*args, **kwargs) | |
488 | self.markers = set() |
|
486 | self.markers = set() | |
489 |
|
487 | |||
490 | def copy(self): |
|
488 | def copy(self): | |
491 | new = _succs(self) |
|
489 | new = _succs(self) | |
492 | new.markers = self.markers.copy() |
|
490 | new.markers = self.markers.copy() | |
493 | return new |
|
491 | return new | |
494 |
|
492 | |||
495 | @util.propertycache |
|
493 | @util.propertycache | |
496 | def _set(self): |
|
494 | def _set(self): | |
497 | # immutable |
|
495 | # immutable | |
498 | return set(self) |
|
496 | return set(self) | |
499 |
|
497 | |||
500 | def canmerge(self, other): |
|
498 | def canmerge(self, other): | |
501 | return self._set.issubset(other._set) |
|
499 | return self._set.issubset(other._set) | |
502 |
|
500 | |||
503 | def successorssets(repo, initialnode, closest=False, cache=None): |
|
501 | def successorssets(repo, initialnode, closest=False, cache=None): | |
504 | """Return set of all latest successors of initial nodes |
|
502 | """Return set of all latest successors of initial nodes | |
505 |
|
503 | |||
506 | The successors set of a changeset A are the group of revisions that succeed |
|
504 | The successors set of a changeset A are the group of revisions that succeed | |
507 | A. It succeeds A as a consistent whole, each revision being only a partial |
|
505 | A. It succeeds A as a consistent whole, each revision being only a partial | |
508 | replacement. By default, the successors set contains non-obsolete |
|
506 | replacement. By default, the successors set contains non-obsolete | |
509 | changesets only, walking the obsolescence graph until reaching a leaf. If |
|
507 | changesets only, walking the obsolescence graph until reaching a leaf. If | |
510 | 'closest' is set to True, closest successors-sets are return (the |
|
508 | 'closest' is set to True, closest successors-sets are return (the | |
511 | obsolescence walk stops on known changesets). |
|
509 | obsolescence walk stops on known changesets). | |
512 |
|
510 | |||
513 | This function returns the full list of successor sets which is why it |
|
511 | This function returns the full list of successor sets which is why it | |
514 | returns a list of tuples and not just a single tuple. Each tuple is a valid |
|
512 | returns a list of tuples and not just a single tuple. Each tuple is a valid | |
515 | successors set. Note that (A,) may be a valid successors set for changeset A |
|
513 | successors set. Note that (A,) may be a valid successors set for changeset A | |
516 | (see below). |
|
514 | (see below). | |
517 |
|
515 | |||
518 | In most cases, a changeset A will have a single element (e.g. the changeset |
|
516 | In most cases, a changeset A will have a single element (e.g. the changeset | |
519 | A is replaced by A') in its successors set. Though, it is also common for a |
|
517 | A is replaced by A') in its successors set. Though, it is also common for a | |
520 | changeset A to have no elements in its successor set (e.g. the changeset |
|
518 | changeset A to have no elements in its successor set (e.g. the changeset | |
521 | has been pruned). Therefore, the returned list of successors sets will be |
|
519 | has been pruned). Therefore, the returned list of successors sets will be | |
522 | [(A',)] or [], respectively. |
|
520 | [(A',)] or [], respectively. | |
523 |
|
521 | |||
524 | When a changeset A is split into A' and B', however, it will result in a |
|
522 | When a changeset A is split into A' and B', however, it will result in a | |
525 | successors set containing more than a single element, i.e. [(A',B')]. |
|
523 | successors set containing more than a single element, i.e. [(A',B')]. | |
526 | Divergent changesets will result in multiple successors sets, i.e. [(A',), |
|
524 | Divergent changesets will result in multiple successors sets, i.e. [(A',), | |
527 | (A'')]. |
|
525 | (A'')]. | |
528 |
|
526 | |||
529 | If a changeset A is not obsolete, then it will conceptually have no |
|
527 | If a changeset A is not obsolete, then it will conceptually have no | |
530 | successors set. To distinguish this from a pruned changeset, the successor |
|
528 | successors set. To distinguish this from a pruned changeset, the successor | |
531 | set will contain itself only, i.e. [(A,)]. |
|
529 | set will contain itself only, i.e. [(A,)]. | |
532 |
|
530 | |||
533 | Finally, final successors unknown locally are considered to be pruned |
|
531 | Finally, final successors unknown locally are considered to be pruned | |
534 | (pruned: obsoleted without any successors). (Final: successors not affected |
|
532 | (pruned: obsoleted without any successors). (Final: successors not affected | |
535 | by markers). |
|
533 | by markers). | |
536 |
|
534 | |||
537 | The 'closest' mode respect the repoview filtering. For example, without |
|
535 | The 'closest' mode respect the repoview filtering. For example, without | |
538 | filter it will stop at the first locally known changeset, with 'visible' |
|
536 | filter it will stop at the first locally known changeset, with 'visible' | |
539 | filter it will stop on visible changesets). |
|
537 | filter it will stop on visible changesets). | |
540 |
|
538 | |||
541 | The optional `cache` parameter is a dictionary that may contains |
|
539 | The optional `cache` parameter is a dictionary that may contains | |
542 | precomputed successors sets. It is meant to reuse the computation of a |
|
540 | precomputed successors sets. It is meant to reuse the computation of a | |
543 | previous call to `successorssets` when multiple calls are made at the same |
|
541 | previous call to `successorssets` when multiple calls are made at the same | |
544 | time. The cache dictionary is updated in place. The caller is responsible |
|
542 | time. The cache dictionary is updated in place. The caller is responsible | |
545 | for its life span. Code that makes multiple calls to `successorssets` |
|
543 | for its life span. Code that makes multiple calls to `successorssets` | |
546 | *should* use this cache mechanism or risk a performance hit. |
|
544 | *should* use this cache mechanism or risk a performance hit. | |
547 |
|
545 | |||
548 | Since results are different depending of the 'closest' most, the same cache |
|
546 | Since results are different depending of the 'closest' most, the same cache | |
549 | cannot be reused for both mode. |
|
547 | cannot be reused for both mode. | |
550 | """ |
|
548 | """ | |
551 |
|
549 | |||
552 | succmarkers = repo.obsstore.successors |
|
550 | succmarkers = repo.obsstore.successors | |
553 |
|
551 | |||
554 | # Stack of nodes we search successors sets for |
|
552 | # Stack of nodes we search successors sets for | |
555 | toproceed = [initialnode] |
|
553 | toproceed = [initialnode] | |
556 | # set version of above list for fast loop detection |
|
554 | # set version of above list for fast loop detection | |
557 | # element added to "toproceed" must be added here |
|
555 | # element added to "toproceed" must be added here | |
558 | stackedset = set(toproceed) |
|
556 | stackedset = set(toproceed) | |
559 | if cache is None: |
|
557 | if cache is None: | |
560 | cache = {} |
|
558 | cache = {} | |
561 |
|
559 | |||
562 | # This while loop is the flattened version of a recursive search for |
|
560 | # This while loop is the flattened version of a recursive search for | |
563 | # successors sets |
|
561 | # successors sets | |
564 | # |
|
562 | # | |
565 | # def successorssets(x): |
|
563 | # def successorssets(x): | |
566 | # successors = directsuccessors(x) |
|
564 | # successors = directsuccessors(x) | |
567 | # ss = [[]] |
|
565 | # ss = [[]] | |
568 | # for succ in directsuccessors(x): |
|
566 | # for succ in directsuccessors(x): | |
569 | # # product as in itertools cartesian product |
|
567 | # # product as in itertools cartesian product | |
570 | # ss = product(ss, successorssets(succ)) |
|
568 | # ss = product(ss, successorssets(succ)) | |
571 | # return ss |
|
569 | # return ss | |
572 | # |
|
570 | # | |
573 | # But we can not use plain recursive calls here: |
|
571 | # But we can not use plain recursive calls here: | |
574 | # - that would blow the python call stack |
|
572 | # - that would blow the python call stack | |
575 | # - obsolescence markers may have cycles, we need to handle them. |
|
573 | # - obsolescence markers may have cycles, we need to handle them. | |
576 | # |
|
574 | # | |
577 | # The `toproceed` list act as our call stack. Every node we search |
|
575 | # The `toproceed` list act as our call stack. Every node we search | |
578 | # successors set for are stacked there. |
|
576 | # successors set for are stacked there. | |
579 | # |
|
577 | # | |
580 | # The `stackedset` is set version of this stack used to check if a node is |
|
578 | # The `stackedset` is set version of this stack used to check if a node is | |
581 | # already stacked. This check is used to detect cycles and prevent infinite |
|
579 | # already stacked. This check is used to detect cycles and prevent infinite | |
582 | # loop. |
|
580 | # loop. | |
583 | # |
|
581 | # | |
584 | # successors set of all nodes are stored in the `cache` dictionary. |
|
582 | # successors set of all nodes are stored in the `cache` dictionary. | |
585 | # |
|
583 | # | |
586 | # After this while loop ends we use the cache to return the successors sets |
|
584 | # After this while loop ends we use the cache to return the successors sets | |
587 | # for the node requested by the caller. |
|
585 | # for the node requested by the caller. | |
588 | while toproceed: |
|
586 | while toproceed: | |
589 | # Every iteration tries to compute the successors sets of the topmost |
|
587 | # Every iteration tries to compute the successors sets of the topmost | |
590 | # node of the stack: CURRENT. |
|
588 | # node of the stack: CURRENT. | |
591 | # |
|
589 | # | |
592 | # There are four possible outcomes: |
|
590 | # There are four possible outcomes: | |
593 | # |
|
591 | # | |
594 | # 1) We already know the successors sets of CURRENT: |
|
592 | # 1) We already know the successors sets of CURRENT: | |
595 | # -> mission accomplished, pop it from the stack. |
|
593 | # -> mission accomplished, pop it from the stack. | |
596 | # 2) Stop the walk: |
|
594 | # 2) Stop the walk: | |
597 | # default case: Node is not obsolete |
|
595 | # default case: Node is not obsolete | |
598 | # closest case: Node is known at this repo filter level |
|
596 | # closest case: Node is known at this repo filter level | |
599 | # -> the node is its own successors sets. Add it to the cache. |
|
597 | # -> the node is its own successors sets. Add it to the cache. | |
600 | # 3) We do not know successors set of direct successors of CURRENT: |
|
598 | # 3) We do not know successors set of direct successors of CURRENT: | |
601 | # -> We add those successors to the stack. |
|
599 | # -> We add those successors to the stack. | |
602 | # 4) We know successors sets of all direct successors of CURRENT: |
|
600 | # 4) We know successors sets of all direct successors of CURRENT: | |
603 | # -> We can compute CURRENT successors set and add it to the |
|
601 | # -> We can compute CURRENT successors set and add it to the | |
604 | # cache. |
|
602 | # cache. | |
605 | # |
|
603 | # | |
606 | current = toproceed[-1] |
|
604 | current = toproceed[-1] | |
607 |
|
605 | |||
608 | # case 2 condition is a bit hairy because of closest, |
|
606 | # case 2 condition is a bit hairy because of closest, | |
609 | # we compute it on its own |
|
607 | # we compute it on its own | |
610 | case2condition = ((current not in succmarkers) |
|
608 | case2condition = ((current not in succmarkers) | |
611 | or (closest and current != initialnode |
|
609 | or (closest and current != initialnode | |
612 | and current in repo)) |
|
610 | and current in repo)) | |
613 |
|
611 | |||
614 | if current in cache: |
|
612 | if current in cache: | |
615 | # case (1): We already know the successors sets |
|
613 | # case (1): We already know the successors sets | |
616 | stackedset.remove(toproceed.pop()) |
|
614 | stackedset.remove(toproceed.pop()) | |
617 | elif case2condition: |
|
615 | elif case2condition: | |
618 | # case (2): end of walk. |
|
616 | # case (2): end of walk. | |
619 | if current in repo: |
|
617 | if current in repo: | |
620 | # We have a valid successors. |
|
618 | # We have a valid successors. | |
621 | cache[current] = [_succs((current,))] |
|
619 | cache[current] = [_succs((current,))] | |
622 | else: |
|
620 | else: | |
623 | # Final obsolete version is unknown locally. |
|
621 | # Final obsolete version is unknown locally. | |
624 | # Do not count that as a valid successors |
|
622 | # Do not count that as a valid successors | |
625 | cache[current] = [] |
|
623 | cache[current] = [] | |
626 | else: |
|
624 | else: | |
627 | # cases (3) and (4) |
|
625 | # cases (3) and (4) | |
628 | # |
|
626 | # | |
629 | # We proceed in two phases. Phase 1 aims to distinguish case (3) |
|
627 | # We proceed in two phases. Phase 1 aims to distinguish case (3) | |
630 | # from case (4): |
|
628 | # from case (4): | |
631 | # |
|
629 | # | |
632 | # For each direct successors of CURRENT, we check whether its |
|
630 | # For each direct successors of CURRENT, we check whether its | |
633 | # successors sets are known. If they are not, we stack the |
|
631 | # successors sets are known. If they are not, we stack the | |
634 | # unknown node and proceed to the next iteration of the while |
|
632 | # unknown node and proceed to the next iteration of the while | |
635 | # loop. (case 3) |
|
633 | # loop. (case 3) | |
636 | # |
|
634 | # | |
637 | # During this step, we may detect obsolescence cycles: a node |
|
635 | # During this step, we may detect obsolescence cycles: a node | |
638 | # with unknown successors sets but already in the call stack. |
|
636 | # with unknown successors sets but already in the call stack. | |
639 | # In such a situation, we arbitrary set the successors sets of |
|
637 | # In such a situation, we arbitrary set the successors sets of | |
640 | # the node to nothing (node pruned) to break the cycle. |
|
638 | # the node to nothing (node pruned) to break the cycle. | |
641 | # |
|
639 | # | |
642 | # If no break was encountered we proceed to phase 2. |
|
640 | # If no break was encountered we proceed to phase 2. | |
643 | # |
|
641 | # | |
644 | # Phase 2 computes successors sets of CURRENT (case 4); see details |
|
642 | # Phase 2 computes successors sets of CURRENT (case 4); see details | |
645 | # in phase 2 itself. |
|
643 | # in phase 2 itself. | |
646 | # |
|
644 | # | |
647 | # Note the two levels of iteration in each phase. |
|
645 | # Note the two levels of iteration in each phase. | |
648 | # - The first one handles obsolescence markers using CURRENT as |
|
646 | # - The first one handles obsolescence markers using CURRENT as | |
649 | # precursor (successors markers of CURRENT). |
|
647 | # precursor (successors markers of CURRENT). | |
650 | # |
|
648 | # | |
651 | # Having multiple entry here means divergence. |
|
649 | # Having multiple entry here means divergence. | |
652 | # |
|
650 | # | |
653 | # - The second one handles successors defined in each marker. |
|
651 | # - The second one handles successors defined in each marker. | |
654 | # |
|
652 | # | |
655 | # Having none means pruned node, multiple successors means split, |
|
653 | # Having none means pruned node, multiple successors means split, | |
656 | # single successors are standard replacement. |
|
654 | # single successors are standard replacement. | |
657 | # |
|
655 | # | |
658 | for mark in sorted(succmarkers[current]): |
|
656 | for mark in sorted(succmarkers[current]): | |
659 | for suc in mark[1]: |
|
657 | for suc in mark[1]: | |
660 | if suc not in cache: |
|
658 | if suc not in cache: | |
661 | if suc in stackedset: |
|
659 | if suc in stackedset: | |
662 | # cycle breaking |
|
660 | # cycle breaking | |
663 | cache[suc] = [] |
|
661 | cache[suc] = [] | |
664 | else: |
|
662 | else: | |
665 | # case (3) If we have not computed successors sets |
|
663 | # case (3) If we have not computed successors sets | |
666 | # of one of those successors we add it to the |
|
664 | # of one of those successors we add it to the | |
667 | # `toproceed` stack and stop all work for this |
|
665 | # `toproceed` stack and stop all work for this | |
668 | # iteration. |
|
666 | # iteration. | |
669 | toproceed.append(suc) |
|
667 | toproceed.append(suc) | |
670 | stackedset.add(suc) |
|
668 | stackedset.add(suc) | |
671 | break |
|
669 | break | |
672 | else: |
|
670 | else: | |
673 | continue |
|
671 | continue | |
674 | break |
|
672 | break | |
675 | else: |
|
673 | else: | |
676 | # case (4): we know all successors sets of all direct |
|
674 | # case (4): we know all successors sets of all direct | |
677 | # successors |
|
675 | # successors | |
678 | # |
|
676 | # | |
679 | # Successors set contributed by each marker depends on the |
|
677 | # Successors set contributed by each marker depends on the | |
680 | # successors sets of all its "successors" node. |
|
678 | # successors sets of all its "successors" node. | |
681 | # |
|
679 | # | |
682 | # Each different marker is a divergence in the obsolescence |
|
680 | # Each different marker is a divergence in the obsolescence | |
683 | # history. It contributes successors sets distinct from other |
|
681 | # history. It contributes successors sets distinct from other | |
684 | # markers. |
|
682 | # markers. | |
685 | # |
|
683 | # | |
686 | # Within a marker, a successor may have divergent successors |
|
684 | # Within a marker, a successor may have divergent successors | |
687 | # sets. In such a case, the marker will contribute multiple |
|
685 | # sets. In such a case, the marker will contribute multiple | |
688 | # divergent successors sets. If multiple successors have |
|
686 | # divergent successors sets. If multiple successors have | |
689 | # divergent successors sets, a Cartesian product is used. |
|
687 | # divergent successors sets, a Cartesian product is used. | |
690 | # |
|
688 | # | |
691 | # At the end we post-process successors sets to remove |
|
689 | # At the end we post-process successors sets to remove | |
692 | # duplicated entry and successors set that are strict subset of |
|
690 | # duplicated entry and successors set that are strict subset of | |
693 | # another one. |
|
691 | # another one. | |
694 | succssets = [] |
|
692 | succssets = [] | |
695 | for mark in sorted(succmarkers[current]): |
|
693 | for mark in sorted(succmarkers[current]): | |
696 | # successors sets contributed by this marker |
|
694 | # successors sets contributed by this marker | |
697 | base = _succs() |
|
695 | base = _succs() | |
698 | base.markers.add(mark) |
|
696 | base.markers.add(mark) | |
699 | markss = [base] |
|
697 | markss = [base] | |
700 | for suc in mark[1]: |
|
698 | for suc in mark[1]: | |
701 | # cardinal product with previous successors |
|
699 | # cardinal product with previous successors | |
702 | productresult = [] |
|
700 | productresult = [] | |
703 | for prefix in markss: |
|
701 | for prefix in markss: | |
704 | for suffix in cache[suc]: |
|
702 | for suffix in cache[suc]: | |
705 | newss = prefix.copy() |
|
703 | newss = prefix.copy() | |
706 | newss.markers.update(suffix.markers) |
|
704 | newss.markers.update(suffix.markers) | |
707 | for part in suffix: |
|
705 | for part in suffix: | |
708 | # do not duplicated entry in successors set |
|
706 | # do not duplicated entry in successors set | |
709 | # first entry wins. |
|
707 | # first entry wins. | |
710 | if part not in newss: |
|
708 | if part not in newss: | |
711 | newss.append(part) |
|
709 | newss.append(part) | |
712 | productresult.append(newss) |
|
710 | productresult.append(newss) | |
713 | markss = productresult |
|
711 | markss = productresult | |
714 | succssets.extend(markss) |
|
712 | succssets.extend(markss) | |
715 | # remove duplicated and subset |
|
713 | # remove duplicated and subset | |
716 | seen = [] |
|
714 | seen = [] | |
717 | final = [] |
|
715 | final = [] | |
718 | candidates = sorted((s for s in succssets if s), |
|
716 | candidates = sorted((s for s in succssets if s), | |
719 | key=len, reverse=True) |
|
717 | key=len, reverse=True) | |
720 | for cand in candidates: |
|
718 | for cand in candidates: | |
721 | for seensuccs in seen: |
|
719 | for seensuccs in seen: | |
722 | if cand.canmerge(seensuccs): |
|
720 | if cand.canmerge(seensuccs): | |
723 | seensuccs.markers.update(cand.markers) |
|
721 | seensuccs.markers.update(cand.markers) | |
724 | break |
|
722 | break | |
725 | else: |
|
723 | else: | |
726 | final.append(cand) |
|
724 | final.append(cand) | |
727 | seen.append(cand) |
|
725 | seen.append(cand) | |
728 | final.reverse() # put small successors set first |
|
726 | final.reverse() # put small successors set first | |
729 | cache[current] = final |
|
727 | cache[current] = final | |
730 | return cache[initialnode] |
|
728 | return cache[initialnode] | |
731 |
|
729 | |||
732 | def successorsandmarkers(repo, ctx): |
|
730 | def successorsandmarkers(repo, ctx): | |
733 | """compute the raw data needed for computing obsfate |
|
731 | """compute the raw data needed for computing obsfate | |
734 | Returns a list of dict, one dict per successors set |
|
732 | Returns a list of dict, one dict per successors set | |
735 | """ |
|
733 | """ | |
736 | if not ctx.obsolete(): |
|
734 | if not ctx.obsolete(): | |
737 | return None |
|
735 | return None | |
738 |
|
736 | |||
739 | ssets = successorssets(repo, ctx.node(), closest=True) |
|
737 | ssets = successorssets(repo, ctx.node(), closest=True) | |
740 |
|
738 | |||
741 | # closestsuccessors returns an empty list for pruned revisions, remap it |
|
739 | # closestsuccessors returns an empty list for pruned revisions, remap it | |
742 | # into a list containing an empty list for future processing |
|
740 | # into a list containing an empty list for future processing | |
743 | if ssets == []: |
|
741 | if ssets == []: | |
744 | ssets = [[]] |
|
742 | ssets = [[]] | |
745 |
|
743 | |||
746 | # Try to recover pruned markers |
|
744 | # Try to recover pruned markers | |
747 | succsmap = repo.obsstore.successors |
|
745 | succsmap = repo.obsstore.successors | |
748 | fullsuccessorsets = [] # successor set + markers |
|
746 | fullsuccessorsets = [] # successor set + markers | |
749 | for sset in ssets: |
|
747 | for sset in ssets: | |
750 | if sset: |
|
748 | if sset: | |
751 | fullsuccessorsets.append(sset) |
|
749 | fullsuccessorsets.append(sset) | |
752 | else: |
|
750 | else: | |
753 | # successorsset return an empty set() when ctx or one of its |
|
751 | # successorsset return an empty set() when ctx or one of its | |
754 | # successors is pruned. |
|
752 | # successors is pruned. | |
755 | # In this case, walk the obs-markers tree again starting with ctx |
|
753 | # In this case, walk the obs-markers tree again starting with ctx | |
756 | # and find the relevant pruning obs-makers, the ones without |
|
754 | # and find the relevant pruning obs-makers, the ones without | |
757 | # successors. |
|
755 | # successors. | |
758 | # Having these markers allow us to compute some information about |
|
756 | # Having these markers allow us to compute some information about | |
759 | # its fate, like who pruned this changeset and when. |
|
757 | # its fate, like who pruned this changeset and when. | |
760 |
|
758 | |||
761 | # XXX we do not catch all prune markers (eg rewritten then pruned) |
|
759 | # XXX we do not catch all prune markers (eg rewritten then pruned) | |
762 | # (fix me later) |
|
760 | # (fix me later) | |
763 | foundany = False |
|
761 | foundany = False | |
764 | for mark in succsmap.get(ctx.node(), ()): |
|
762 | for mark in succsmap.get(ctx.node(), ()): | |
765 | if not mark[1]: |
|
763 | if not mark[1]: | |
766 | foundany = True |
|
764 | foundany = True | |
767 | sset = _succs() |
|
765 | sset = _succs() | |
768 | sset.markers.add(mark) |
|
766 | sset.markers.add(mark) | |
769 | fullsuccessorsets.append(sset) |
|
767 | fullsuccessorsets.append(sset) | |
770 | if not foundany: |
|
768 | if not foundany: | |
771 | fullsuccessorsets.append(_succs()) |
|
769 | fullsuccessorsets.append(_succs()) | |
772 |
|
770 | |||
773 | values = [] |
|
771 | values = [] | |
774 | for sset in fullsuccessorsets: |
|
772 | for sset in fullsuccessorsets: | |
775 | values.append({'successors': sset, 'markers': sset.markers}) |
|
773 | values.append({'successors': sset, 'markers': sset.markers}) | |
776 |
|
774 | |||
777 | return values |
|
775 | return values | |
778 |
|
776 | |||
779 | def _getobsfate(successorssets): |
|
777 | def _getobsfate(successorssets): | |
780 | """ Compute a changeset obsolescence fate based on its successorssets. |
|
778 | """ Compute a changeset obsolescence fate based on its successorssets. | |
781 | Successors can be the tipmost ones or the immediate ones. This function |
|
779 | Successors can be the tipmost ones or the immediate ones. This function | |
782 | return values are not meant to be shown directly to users, it is meant to |
|
780 | return values are not meant to be shown directly to users, it is meant to | |
783 | be used by internal functions only. |
|
781 | be used by internal functions only. | |
784 | Returns one fate from the following values: |
|
782 | Returns one fate from the following values: | |
785 | - pruned |
|
783 | - pruned | |
786 | - diverged |
|
784 | - diverged | |
787 | - superseded |
|
785 | - superseded | |
788 | - superseded_split |
|
786 | - superseded_split | |
789 | """ |
|
787 | """ | |
790 |
|
788 | |||
791 | if len(successorssets) == 0: |
|
789 | if len(successorssets) == 0: | |
792 | # The commit has been pruned |
|
790 | # The commit has been pruned | |
793 | return 'pruned' |
|
791 | return 'pruned' | |
794 | elif len(successorssets) > 1: |
|
792 | elif len(successorssets) > 1: | |
795 | return 'diverged' |
|
793 | return 'diverged' | |
796 | else: |
|
794 | else: | |
797 | # No divergence, only one set of successors |
|
795 | # No divergence, only one set of successors | |
798 | successors = successorssets[0] |
|
796 | successors = successorssets[0] | |
799 |
|
797 | |||
800 | if len(successors) == 1: |
|
798 | if len(successors) == 1: | |
801 | return 'superseded' |
|
799 | return 'superseded' | |
802 | else: |
|
800 | else: | |
803 | return 'superseded_split' |
|
801 | return 'superseded_split' | |
804 |
|
802 | |||
805 | def obsfateverb(successorset, markers): |
|
803 | def obsfateverb(successorset, markers): | |
806 | """ Return the verb summarizing the successorset and potentially using |
|
804 | """ Return the verb summarizing the successorset and potentially using | |
807 | information from the markers |
|
805 | information from the markers | |
808 | """ |
|
806 | """ | |
809 | if not successorset: |
|
807 | if not successorset: | |
810 | verb = 'pruned' |
|
808 | verb = 'pruned' | |
811 | elif len(successorset) == 1: |
|
809 | elif len(successorset) == 1: | |
812 | verb = 'rewritten' |
|
810 | verb = 'rewritten' | |
813 | else: |
|
811 | else: | |
814 | verb = 'split' |
|
812 | verb = 'split' | |
815 | return verb |
|
813 | return verb | |
816 |
|
814 | |||
817 | def markersdates(markers): |
|
815 | def markersdates(markers): | |
818 | """returns the list of dates for a list of markers |
|
816 | """returns the list of dates for a list of markers | |
819 | """ |
|
817 | """ | |
820 | return [m[4] for m in markers] |
|
818 | return [m[4] for m in markers] | |
821 |
|
819 | |||
822 | def markersusers(markers): |
|
820 | def markersusers(markers): | |
823 | """ Returns a sorted list of markers users without duplicates |
|
821 | """ Returns a sorted list of markers users without duplicates | |
824 | """ |
|
822 | """ | |
825 | markersmeta = [dict(m[3]) for m in markers] |
|
823 | markersmeta = [dict(m[3]) for m in markers] | |
826 | users = set(encoding.tolocal(meta['user']) for meta in markersmeta |
|
824 | users = set(encoding.tolocal(meta['user']) for meta in markersmeta | |
827 | if meta.get('user')) |
|
825 | if meta.get('user')) | |
828 |
|
826 | |||
829 | return sorted(users) |
|
827 | return sorted(users) | |
830 |
|
828 | |||
831 | def markersoperations(markers): |
|
829 | def markersoperations(markers): | |
832 | """ Returns a sorted list of markers operations without duplicates |
|
830 | """ Returns a sorted list of markers operations without duplicates | |
833 | """ |
|
831 | """ | |
834 | markersmeta = [dict(m[3]) for m in markers] |
|
832 | markersmeta = [dict(m[3]) for m in markers] | |
835 | operations = set(meta.get('operation') for meta in markersmeta |
|
833 | operations = set(meta.get('operation') for meta in markersmeta | |
836 | if meta.get('operation')) |
|
834 | if meta.get('operation')) | |
837 |
|
835 | |||
838 | return sorted(operations) |
|
836 | return sorted(operations) | |
839 |
|
837 | |||
840 | def obsfateprinter(ui, repo, successors, markers, formatctx): |
|
838 | def obsfateprinter(ui, repo, successors, markers, formatctx): | |
841 | """ Build a obsfate string for a single successorset using all obsfate |
|
839 | """ Build a obsfate string for a single successorset using all obsfate | |
842 | related function defined in obsutil |
|
840 | related function defined in obsutil | |
843 | """ |
|
841 | """ | |
844 | quiet = ui.quiet |
|
842 | quiet = ui.quiet | |
845 | verbose = ui.verbose |
|
843 | verbose = ui.verbose | |
846 | normal = not verbose and not quiet |
|
844 | normal = not verbose and not quiet | |
847 |
|
845 | |||
848 | line = [] |
|
846 | line = [] | |
849 |
|
847 | |||
850 | # Verb |
|
848 | # Verb | |
851 | line.append(obsfateverb(successors, markers)) |
|
849 | line.append(obsfateverb(successors, markers)) | |
852 |
|
850 | |||
853 | # Operations |
|
851 | # Operations | |
854 | operations = markersoperations(markers) |
|
852 | operations = markersoperations(markers) | |
855 | if operations: |
|
853 | if operations: | |
856 | line.append(" using %s" % ", ".join(operations)) |
|
854 | line.append(" using %s" % ", ".join(operations)) | |
857 |
|
855 | |||
858 | # Successors |
|
856 | # Successors | |
859 | if successors: |
|
857 | if successors: | |
860 | fmtsuccessors = [formatctx(repo[succ]) for succ in successors] |
|
858 | fmtsuccessors = [formatctx(repo[succ]) for succ in successors] | |
861 | line.append(" as %s" % ", ".join(fmtsuccessors)) |
|
859 | line.append(" as %s" % ", ".join(fmtsuccessors)) | |
862 |
|
860 | |||
863 | # Users |
|
861 | # Users | |
864 | users = markersusers(markers) |
|
862 | users = markersusers(markers) | |
865 | # Filter out current user in not verbose mode to reduce amount of |
|
863 | # Filter out current user in not verbose mode to reduce amount of | |
866 | # information |
|
864 | # information | |
867 | if not verbose: |
|
865 | if not verbose: | |
868 | currentuser = ui.username(acceptempty=True) |
|
866 | currentuser = ui.username(acceptempty=True) | |
869 | if len(users) == 1 and currentuser in users: |
|
867 | if len(users) == 1 and currentuser in users: | |
870 | users = None |
|
868 | users = None | |
871 |
|
869 | |||
872 | if (verbose or normal) and users: |
|
870 | if (verbose or normal) and users: | |
873 | line.append(" by %s" % ", ".join(users)) |
|
871 | line.append(" by %s" % ", ".join(users)) | |
874 |
|
872 | |||
875 | # Date |
|
873 | # Date | |
876 | dates = markersdates(markers) |
|
874 | dates = markersdates(markers) | |
877 |
|
875 | |||
878 | if dates and verbose: |
|
876 | if dates and verbose: | |
879 | min_date = min(dates) |
|
877 | min_date = min(dates) | |
880 | max_date = max(dates) |
|
878 | max_date = max(dates) | |
881 |
|
879 | |||
882 | if min_date == max_date: |
|
880 | if min_date == max_date: | |
883 | fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2') |
|
881 | fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2') | |
884 | line.append(" (at %s)" % fmtmin_date) |
|
882 | line.append(" (at %s)" % fmtmin_date) | |
885 | else: |
|
883 | else: | |
886 | fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2') |
|
884 | fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2') | |
887 | fmtmax_date = dateutil.datestr(max_date, '%Y-%m-%d %H:%M %1%2') |
|
885 | fmtmax_date = dateutil.datestr(max_date, '%Y-%m-%d %H:%M %1%2') | |
888 | line.append(" (between %s and %s)" % (fmtmin_date, fmtmax_date)) |
|
886 | line.append(" (between %s and %s)" % (fmtmin_date, fmtmax_date)) | |
889 |
|
887 | |||
890 | return "".join(line) |
|
888 | return "".join(line) | |
891 |
|
889 | |||
892 |
|
890 | |||
893 | filteredmsgtable = { |
|
891 | filteredmsgtable = { | |
894 | "pruned": _("hidden revision '%s' is pruned"), |
|
892 | "pruned": _("hidden revision '%s' is pruned"), | |
895 | "diverged": _("hidden revision '%s' has diverged"), |
|
893 | "diverged": _("hidden revision '%s' has diverged"), | |
896 | "superseded": _("hidden revision '%s' was rewritten as: %s"), |
|
894 | "superseded": _("hidden revision '%s' was rewritten as: %s"), | |
897 | "superseded_split": _("hidden revision '%s' was split as: %s"), |
|
895 | "superseded_split": _("hidden revision '%s' was split as: %s"), | |
898 | "superseded_split_several": _("hidden revision '%s' was split as: %s and " |
|
896 | "superseded_split_several": _("hidden revision '%s' was split as: %s and " | |
899 | "%d more"), |
|
897 | "%d more"), | |
900 | } |
|
898 | } | |
901 |
|
899 | |||
902 | def _getfilteredreason(repo, changeid, ctx): |
|
900 | def _getfilteredreason(repo, changeid, ctx): | |
903 | """return a human-friendly string on why a obsolete changeset is hidden |
|
901 | """return a human-friendly string on why a obsolete changeset is hidden | |
904 | """ |
|
902 | """ | |
905 | successors = successorssets(repo, ctx.node()) |
|
903 | successors = successorssets(repo, ctx.node()) | |
906 | fate = _getobsfate(successors) |
|
904 | fate = _getobsfate(successors) | |
907 |
|
905 | |||
908 | # Be more precise in case the revision is superseded |
|
906 | # Be more precise in case the revision is superseded | |
909 | if fate == 'pruned': |
|
907 | if fate == 'pruned': | |
910 | return filteredmsgtable['pruned'] % changeid |
|
908 | return filteredmsgtable['pruned'] % changeid | |
911 | elif fate == 'diverged': |
|
909 | elif fate == 'diverged': | |
912 | return filteredmsgtable['diverged'] % changeid |
|
910 | return filteredmsgtable['diverged'] % changeid | |
913 | elif fate == 'superseded': |
|
911 | elif fate == 'superseded': | |
914 | single_successor = nodemod.short(successors[0][0]) |
|
912 | single_successor = nodemod.short(successors[0][0]) | |
915 | return filteredmsgtable['superseded'] % (changeid, single_successor) |
|
913 | return filteredmsgtable['superseded'] % (changeid, single_successor) | |
916 | elif fate == 'superseded_split': |
|
914 | elif fate == 'superseded_split': | |
917 |
|
915 | |||
918 | succs = [] |
|
916 | succs = [] | |
919 | for node_id in successors[0]: |
|
917 | for node_id in successors[0]: | |
920 | succs.append(nodemod.short(node_id)) |
|
918 | succs.append(nodemod.short(node_id)) | |
921 |
|
919 | |||
922 | if len(succs) <= 2: |
|
920 | if len(succs) <= 2: | |
923 | fmtsuccs = ', '.join(succs) |
|
921 | fmtsuccs = ', '.join(succs) | |
924 | return filteredmsgtable['superseded_split'] % (changeid, fmtsuccs) |
|
922 | return filteredmsgtable['superseded_split'] % (changeid, fmtsuccs) | |
925 | else: |
|
923 | else: | |
926 | firstsuccessors = ', '.join(succs[:2]) |
|
924 | firstsuccessors = ', '.join(succs[:2]) | |
927 | remainingnumber = len(succs) - 2 |
|
925 | remainingnumber = len(succs) - 2 | |
928 |
|
926 | |||
929 | args = (changeid, firstsuccessors, remainingnumber) |
|
927 | args = (changeid, firstsuccessors, remainingnumber) | |
930 | return filteredmsgtable['superseded_split_several'] % args |
|
928 | return filteredmsgtable['superseded_split_several'] % args | |
931 |
|
929 | |||
932 | def divergentsets(repo, ctx): |
|
930 | def divergentsets(repo, ctx): | |
933 | """Compute sets of commits divergent with a given one""" |
|
931 | """Compute sets of commits divergent with a given one""" | |
934 | cache = {} |
|
932 | cache = {} | |
935 | base = {} |
|
933 | base = {} | |
936 | for n in allpredecessors(repo.obsstore, [ctx.node()]): |
|
934 | for n in allpredecessors(repo.obsstore, [ctx.node()]): | |
937 | if n == ctx.node(): |
|
935 | if n == ctx.node(): | |
938 | # a node can't be a base for divergence with itself |
|
936 | # a node can't be a base for divergence with itself | |
939 | continue |
|
937 | continue | |
940 | nsuccsets = successorssets(repo, n, cache) |
|
938 | nsuccsets = successorssets(repo, n, cache) | |
941 | for nsuccset in nsuccsets: |
|
939 | for nsuccset in nsuccsets: | |
942 | if ctx.node() in nsuccset: |
|
940 | if ctx.node() in nsuccset: | |
943 | # we are only interested in *other* successor sets |
|
941 | # we are only interested in *other* successor sets | |
944 | continue |
|
942 | continue | |
945 | if tuple(nsuccset) in base: |
|
943 | if tuple(nsuccset) in base: | |
946 | # we already know the latest base for this divergency |
|
944 | # we already know the latest base for this divergency | |
947 | continue |
|
945 | continue | |
948 | base[tuple(nsuccset)] = n |
|
946 | base[tuple(nsuccset)] = n | |
949 | return [{'divergentnodes': divset, 'commonpredecessor': b} |
|
947 | return [{'divergentnodes': divset, 'commonpredecessor': b} | |
950 | for divset, b in base.iteritems()] |
|
948 | for divset, b in base.iteritems()] | |
951 |
|
949 | |||
952 | def whyunstable(repo, ctx): |
|
950 | def whyunstable(repo, ctx): | |
953 | result = [] |
|
951 | result = [] | |
954 | if ctx.orphan(): |
|
952 | if ctx.orphan(): | |
955 | for parent in ctx.parents(): |
|
953 | for parent in ctx.parents(): | |
956 | kind = None |
|
954 | kind = None | |
957 | if parent.orphan(): |
|
955 | if parent.orphan(): | |
958 | kind = 'orphan' |
|
956 | kind = 'orphan' | |
959 | elif parent.obsolete(): |
|
957 | elif parent.obsolete(): | |
960 | kind = 'obsolete' |
|
958 | kind = 'obsolete' | |
961 | if kind is not None: |
|
959 | if kind is not None: | |
962 | result.append({'instability': 'orphan', |
|
960 | result.append({'instability': 'orphan', | |
963 | 'reason': '%s parent' % kind, |
|
961 | 'reason': '%s parent' % kind, | |
964 | 'node': parent.hex()}) |
|
962 | 'node': parent.hex()}) | |
965 | if ctx.phasedivergent(): |
|
963 | if ctx.phasedivergent(): | |
966 | predecessors = allpredecessors(repo.obsstore, [ctx.node()], |
|
964 | predecessors = allpredecessors(repo.obsstore, [ctx.node()], | |
967 | ignoreflags=bumpedfix) |
|
965 | ignoreflags=bumpedfix) | |
968 | immutable = [repo[p] for p in predecessors |
|
966 | immutable = [repo[p] for p in predecessors | |
969 | if p in repo and not repo[p].mutable()] |
|
967 | if p in repo and not repo[p].mutable()] | |
970 | for predecessor in immutable: |
|
968 | for predecessor in immutable: | |
971 | result.append({'instability': 'phase-divergent', |
|
969 | result.append({'instability': 'phase-divergent', | |
972 | 'reason': 'immutable predecessor', |
|
970 | 'reason': 'immutable predecessor', | |
973 | 'node': predecessor.hex()}) |
|
971 | 'node': predecessor.hex()}) | |
974 | if ctx.contentdivergent(): |
|
972 | if ctx.contentdivergent(): | |
975 | dsets = divergentsets(repo, ctx) |
|
973 | dsets = divergentsets(repo, ctx) | |
976 | for dset in dsets: |
|
974 | for dset in dsets: | |
977 | divnodes = [repo[n] for n in dset['divergentnodes']] |
|
975 | divnodes = [repo[n] for n in dset['divergentnodes']] | |
978 | result.append({'instability': 'content-divergent', |
|
976 | result.append({'instability': 'content-divergent', | |
979 | 'divergentnodes': divnodes, |
|
977 | 'divergentnodes': divnodes, | |
980 | 'reason': 'predecessor', |
|
978 | 'reason': 'predecessor', | |
981 | 'node': nodemod.hex(dset['commonpredecessor'])}) |
|
979 | 'node': nodemod.hex(dset['commonpredecessor'])}) | |
982 | return result |
|
980 | return result |
General Comments 0
You need to be logged in to leave comments.
Login now