Show More
@@ -1,1439 +1,1442 b'' | |||||
1 | # obsolete.py - obsolete markers handling |
|
1 | # obsolete.py - obsolete markers handling | |
2 | # |
|
2 | # | |
3 | # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org> |
|
3 | # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org> | |
4 | # Logilab SA <contact@logilab.fr> |
|
4 | # Logilab SA <contact@logilab.fr> | |
5 | # |
|
5 | # | |
6 | # This software may be used and distributed according to the terms of the |
|
6 | # This software may be used and distributed according to the terms of the | |
7 | # GNU General Public License version 2 or any later version. |
|
7 | # GNU General Public License version 2 or any later version. | |
8 |
|
8 | |||
9 | """Obsolete marker handling |
|
9 | """Obsolete marker handling | |
10 |
|
10 | |||
11 | An obsolete marker maps an old changeset to a list of new |
|
11 | An obsolete marker maps an old changeset to a list of new | |
12 | changesets. If the list of new changesets is empty, the old changeset |
|
12 | changesets. If the list of new changesets is empty, the old changeset | |
13 | is said to be "killed". Otherwise, the old changeset is being |
|
13 | is said to be "killed". Otherwise, the old changeset is being | |
14 | "replaced" by the new changesets. |
|
14 | "replaced" by the new changesets. | |
15 |
|
15 | |||
16 | Obsolete markers can be used to record and distribute changeset graph |
|
16 | Obsolete markers can be used to record and distribute changeset graph | |
17 | transformations performed by history rewrite operations, and help |
|
17 | transformations performed by history rewrite operations, and help | |
18 | building new tools to reconcile conflicting rewrite actions. To |
|
18 | building new tools to reconcile conflicting rewrite actions. To | |
19 | facilitate conflict resolution, markers include various annotations |
|
19 | facilitate conflict resolution, markers include various annotations | |
20 | besides old and news changeset identifiers, such as creation date or |
|
20 | besides old and news changeset identifiers, such as creation date or | |
21 | author name. |
|
21 | author name. | |
22 |
|
22 | |||
23 | The old obsoleted changeset is called a "precursor" and possible |
|
23 | The old obsoleted changeset is called a "precursor" and possible | |
24 | replacements are called "successors". Markers that used changeset X as |
|
24 | replacements are called "successors". Markers that used changeset X as | |
25 | a precursor are called "successor markers of X" because they hold |
|
25 | a precursor are called "successor markers of X" because they hold | |
26 | information about the successors of X. Markers that use changeset Y as |
|
26 | information about the successors of X. Markers that use changeset Y as | |
27 | a successors are call "precursor markers of Y" because they hold |
|
27 | a successors are call "precursor markers of Y" because they hold | |
28 | information about the precursors of Y. |
|
28 | information about the precursors of Y. | |
29 |
|
29 | |||
30 | Examples: |
|
30 | Examples: | |
31 |
|
31 | |||
32 | - When changeset A is replaced by changeset A', one marker is stored: |
|
32 | - When changeset A is replaced by changeset A', one marker is stored: | |
33 |
|
33 | |||
34 | (A, (A',)) |
|
34 | (A, (A',)) | |
35 |
|
35 | |||
36 | - When changesets A and B are folded into a new changeset C, two markers are |
|
36 | - When changesets A and B are folded into a new changeset C, two markers are | |
37 | stored: |
|
37 | stored: | |
38 |
|
38 | |||
39 | (A, (C,)) and (B, (C,)) |
|
39 | (A, (C,)) and (B, (C,)) | |
40 |
|
40 | |||
41 | - When changeset A is simply "pruned" from the graph, a marker is created: |
|
41 | - When changeset A is simply "pruned" from the graph, a marker is created: | |
42 |
|
42 | |||
43 | (A, ()) |
|
43 | (A, ()) | |
44 |
|
44 | |||
45 | - When changeset A is split into B and C, a single marker is used: |
|
45 | - When changeset A is split into B and C, a single marker is used: | |
46 |
|
46 | |||
47 | (A, (B, C)) |
|
47 | (A, (B, C)) | |
48 |
|
48 | |||
49 | We use a single marker to distinguish the "split" case from the "divergence" |
|
49 | We use a single marker to distinguish the "split" case from the "divergence" | |
50 | case. If two independent operations rewrite the same changeset A in to A' and |
|
50 | case. If two independent operations rewrite the same changeset A in to A' and | |
51 | A'', we have an error case: divergent rewriting. We can detect it because |
|
51 | A'', we have an error case: divergent rewriting. We can detect it because | |
52 | two markers will be created independently: |
|
52 | two markers will be created independently: | |
53 |
|
53 | |||
54 | (A, (B,)) and (A, (C,)) |
|
54 | (A, (B,)) and (A, (C,)) | |
55 |
|
55 | |||
56 | Format |
|
56 | Format | |
57 | ------ |
|
57 | ------ | |
58 |
|
58 | |||
59 | Markers are stored in an append-only file stored in |
|
59 | Markers are stored in an append-only file stored in | |
60 | '.hg/store/obsstore'. |
|
60 | '.hg/store/obsstore'. | |
61 |
|
61 | |||
62 | The file starts with a version header: |
|
62 | The file starts with a version header: | |
63 |
|
63 | |||
64 | - 1 unsigned byte: version number, starting at zero. |
|
64 | - 1 unsigned byte: version number, starting at zero. | |
65 |
|
65 | |||
66 | The header is followed by the markers. Marker format depend of the version. See |
|
66 | The header is followed by the markers. Marker format depend of the version. See | |
67 | comment associated with each format for details. |
|
67 | comment associated with each format for details. | |
68 |
|
68 | |||
69 | """ |
|
69 | """ | |
70 | from __future__ import absolute_import |
|
70 | from __future__ import absolute_import | |
71 |
|
71 | |||
72 | import errno |
|
72 | import errno | |
73 | import struct |
|
73 | import struct | |
74 |
|
74 | |||
75 | from .i18n import _ |
|
75 | from .i18n import _ | |
76 | from . import ( |
|
76 | from . import ( | |
77 | error, |
|
77 | error, | |
78 | node, |
|
78 | node, | |
79 | phases, |
|
79 | phases, | |
80 | policy, |
|
80 | policy, | |
81 | util, |
|
81 | util, | |
82 | ) |
|
82 | ) | |
83 |
|
83 | |||
84 | parsers = policy.importmod(r'parsers') |
|
84 | parsers = policy.importmod(r'parsers') | |
85 |
|
85 | |||
86 | _pack = struct.pack |
|
86 | _pack = struct.pack | |
87 | _unpack = struct.unpack |
|
87 | _unpack = struct.unpack | |
88 | _calcsize = struct.calcsize |
|
88 | _calcsize = struct.calcsize | |
89 | propertycache = util.propertycache |
|
89 | propertycache = util.propertycache | |
90 |
|
90 | |||
91 | # the obsolete feature is not mature enough to be enabled by default. |
|
91 | # the obsolete feature is not mature enough to be enabled by default. | |
92 | # you have to rely on third party extension extension to enable this. |
|
92 | # you have to rely on third party extension extension to enable this. | |
93 | _enabled = False |
|
93 | _enabled = False | |
94 |
|
94 | |||
95 | # Options for obsolescence |
|
95 | # Options for obsolescence | |
96 | createmarkersopt = 'createmarkers' |
|
96 | createmarkersopt = 'createmarkers' | |
97 | allowunstableopt = 'allowunstable' |
|
97 | allowunstableopt = 'allowunstable' | |
98 | exchangeopt = 'exchange' |
|
98 | exchangeopt = 'exchange' | |
99 |
|
99 | |||
100 | def isenabled(repo, option): |
|
100 | def isenabled(repo, option): | |
101 | """Returns True if the given repository has the given obsolete option |
|
101 | """Returns True if the given repository has the given obsolete option | |
102 | enabled. |
|
102 | enabled. | |
103 | """ |
|
103 | """ | |
104 | result = set(repo.ui.configlist('experimental', 'evolution')) |
|
104 | result = set(repo.ui.configlist('experimental', 'evolution')) | |
105 | if 'all' in result: |
|
105 | if 'all' in result: | |
106 | return True |
|
106 | return True | |
107 |
|
107 | |||
108 | # For migration purposes, temporarily return true if the config hasn't been |
|
108 | # For migration purposes, temporarily return true if the config hasn't been | |
109 | # set but _enabled is true. |
|
109 | # set but _enabled is true. | |
110 | if len(result) == 0 and _enabled: |
|
110 | if len(result) == 0 and _enabled: | |
111 | return True |
|
111 | return True | |
112 |
|
112 | |||
113 | # createmarkers must be enabled if other options are enabled |
|
113 | # createmarkers must be enabled if other options are enabled | |
114 | if ((allowunstableopt in result or exchangeopt in result) and |
|
114 | if ((allowunstableopt in result or exchangeopt in result) and | |
115 | not createmarkersopt in result): |
|
115 | not createmarkersopt in result): | |
116 | raise error.Abort(_("'createmarkers' obsolete option must be enabled " |
|
116 | raise error.Abort(_("'createmarkers' obsolete option must be enabled " | |
117 | "if other obsolete options are enabled")) |
|
117 | "if other obsolete options are enabled")) | |
118 |
|
118 | |||
119 | return option in result |
|
119 | return option in result | |
120 |
|
120 | |||
121 | ### obsolescence marker flag |
|
121 | ### obsolescence marker flag | |
122 |
|
122 | |||
123 | ## bumpedfix flag |
|
123 | ## bumpedfix flag | |
124 | # |
|
124 | # | |
125 | # When a changeset A' succeed to a changeset A which became public, we call A' |
|
125 | # When a changeset A' succeed to a changeset A which became public, we call A' | |
126 | # "bumped" because it's a successors of a public changesets |
|
126 | # "bumped" because it's a successors of a public changesets | |
127 | # |
|
127 | # | |
128 | # o A' (bumped) |
|
128 | # o A' (bumped) | |
129 | # |`: |
|
129 | # |`: | |
130 | # | o A |
|
130 | # | o A | |
131 | # |/ |
|
131 | # |/ | |
132 | # o Z |
|
132 | # o Z | |
133 | # |
|
133 | # | |
134 | # The way to solve this situation is to create a new changeset Ad as children |
|
134 | # The way to solve this situation is to create a new changeset Ad as children | |
135 | # of A. This changeset have the same content than A'. So the diff from A to A' |
|
135 | # of A. This changeset have the same content than A'. So the diff from A to A' | |
136 | # is the same than the diff from A to Ad. Ad is marked as a successors of A' |
|
136 | # is the same than the diff from A to Ad. Ad is marked as a successors of A' | |
137 | # |
|
137 | # | |
138 | # o Ad |
|
138 | # o Ad | |
139 | # |`: |
|
139 | # |`: | |
140 | # | x A' |
|
140 | # | x A' | |
141 | # |'| |
|
141 | # |'| | |
142 | # o | A |
|
142 | # o | A | |
143 | # |/ |
|
143 | # |/ | |
144 | # o Z |
|
144 | # o Z | |
145 | # |
|
145 | # | |
146 | # But by transitivity Ad is also a successors of A. To avoid having Ad marked |
|
146 | # But by transitivity Ad is also a successors of A. To avoid having Ad marked | |
147 | # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>. |
|
147 | # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>. | |
148 | # This flag mean that the successors express the changes between the public and |
|
148 | # This flag mean that the successors express the changes between the public and | |
149 | # bumped version and fix the situation, breaking the transitivity of |
|
149 | # bumped version and fix the situation, breaking the transitivity of | |
150 | # "bumped" here. |
|
150 | # "bumped" here. | |
151 | bumpedfix = 1 |
|
151 | bumpedfix = 1 | |
152 | usingsha256 = 2 |
|
152 | usingsha256 = 2 | |
153 |
|
153 | |||
154 | ## Parsing and writing of version "0" |
|
154 | ## Parsing and writing of version "0" | |
155 | # |
|
155 | # | |
156 | # The header is followed by the markers. Each marker is made of: |
|
156 | # The header is followed by the markers. Each marker is made of: | |
157 | # |
|
157 | # | |
158 | # - 1 uint8 : number of new changesets "N", can be zero. |
|
158 | # - 1 uint8 : number of new changesets "N", can be zero. | |
159 | # |
|
159 | # | |
160 | # - 1 uint32: metadata size "M" in bytes. |
|
160 | # - 1 uint32: metadata size "M" in bytes. | |
161 | # |
|
161 | # | |
162 | # - 1 byte: a bit field. It is reserved for flags used in common |
|
162 | # - 1 byte: a bit field. It is reserved for flags used in common | |
163 | # obsolete marker operations, to avoid repeated decoding of metadata |
|
163 | # obsolete marker operations, to avoid repeated decoding of metadata | |
164 | # entries. |
|
164 | # entries. | |
165 | # |
|
165 | # | |
166 | # - 20 bytes: obsoleted changeset identifier. |
|
166 | # - 20 bytes: obsoleted changeset identifier. | |
167 | # |
|
167 | # | |
168 | # - N*20 bytes: new changesets identifiers. |
|
168 | # - N*20 bytes: new changesets identifiers. | |
169 | # |
|
169 | # | |
170 | # - M bytes: metadata as a sequence of nul-terminated strings. Each |
|
170 | # - M bytes: metadata as a sequence of nul-terminated strings. Each | |
171 | # string contains a key and a value, separated by a colon ':', without |
|
171 | # string contains a key and a value, separated by a colon ':', without | |
172 | # additional encoding. Keys cannot contain '\0' or ':' and values |
|
172 | # additional encoding. Keys cannot contain '\0' or ':' and values | |
173 | # cannot contain '\0'. |
|
173 | # cannot contain '\0'. | |
174 | _fm0version = 0 |
|
174 | _fm0version = 0 | |
175 | _fm0fixed = '>BIB20s' |
|
175 | _fm0fixed = '>BIB20s' | |
176 | _fm0node = '20s' |
|
176 | _fm0node = '20s' | |
177 | _fm0fsize = _calcsize(_fm0fixed) |
|
177 | _fm0fsize = _calcsize(_fm0fixed) | |
178 | _fm0fnodesize = _calcsize(_fm0node) |
|
178 | _fm0fnodesize = _calcsize(_fm0node) | |
179 |
|
179 | |||
180 | def _fm0readmarkers(data, off): |
|
180 | def _fm0readmarkers(data, off): | |
181 | # Loop on markers |
|
181 | # Loop on markers | |
182 | l = len(data) |
|
182 | l = len(data) | |
183 | while off + _fm0fsize <= l: |
|
183 | while off + _fm0fsize <= l: | |
184 | # read fixed part |
|
184 | # read fixed part | |
185 | cur = data[off:off + _fm0fsize] |
|
185 | cur = data[off:off + _fm0fsize] | |
186 | off += _fm0fsize |
|
186 | off += _fm0fsize | |
187 | numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur) |
|
187 | numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur) | |
188 | # read replacement |
|
188 | # read replacement | |
189 | sucs = () |
|
189 | sucs = () | |
190 | if numsuc: |
|
190 | if numsuc: | |
191 | s = (_fm0fnodesize * numsuc) |
|
191 | s = (_fm0fnodesize * numsuc) | |
192 | cur = data[off:off + s] |
|
192 | cur = data[off:off + s] | |
193 | sucs = _unpack(_fm0node * numsuc, cur) |
|
193 | sucs = _unpack(_fm0node * numsuc, cur) | |
194 | off += s |
|
194 | off += s | |
195 | # read metadata |
|
195 | # read metadata | |
196 | # (metadata will be decoded on demand) |
|
196 | # (metadata will be decoded on demand) | |
197 | metadata = data[off:off + mdsize] |
|
197 | metadata = data[off:off + mdsize] | |
198 | if len(metadata) != mdsize: |
|
198 | if len(metadata) != mdsize: | |
199 | raise error.Abort(_('parsing obsolete marker: metadata is too ' |
|
199 | raise error.Abort(_('parsing obsolete marker: metadata is too ' | |
200 | 'short, %d bytes expected, got %d') |
|
200 | 'short, %d bytes expected, got %d') | |
201 | % (mdsize, len(metadata))) |
|
201 | % (mdsize, len(metadata))) | |
202 | off += mdsize |
|
202 | off += mdsize | |
203 | metadata = _fm0decodemeta(metadata) |
|
203 | metadata = _fm0decodemeta(metadata) | |
204 | try: |
|
204 | try: | |
205 | when, offset = metadata.pop('date', '0 0').split(' ') |
|
205 | when, offset = metadata.pop('date', '0 0').split(' ') | |
206 | date = float(when), int(offset) |
|
206 | date = float(when), int(offset) | |
207 | except ValueError: |
|
207 | except ValueError: | |
208 | date = (0., 0) |
|
208 | date = (0., 0) | |
209 | parents = None |
|
209 | parents = None | |
210 | if 'p2' in metadata: |
|
210 | if 'p2' in metadata: | |
211 | parents = (metadata.pop('p1', None), metadata.pop('p2', None)) |
|
211 | parents = (metadata.pop('p1', None), metadata.pop('p2', None)) | |
212 | elif 'p1' in metadata: |
|
212 | elif 'p1' in metadata: | |
213 | parents = (metadata.pop('p1', None),) |
|
213 | parents = (metadata.pop('p1', None),) | |
214 | elif 'p0' in metadata: |
|
214 | elif 'p0' in metadata: | |
215 | parents = () |
|
215 | parents = () | |
216 | if parents is not None: |
|
216 | if parents is not None: | |
217 | try: |
|
217 | try: | |
218 | parents = tuple(node.bin(p) for p in parents) |
|
218 | parents = tuple(node.bin(p) for p in parents) | |
219 | # if parent content is not a nodeid, drop the data |
|
219 | # if parent content is not a nodeid, drop the data | |
220 | for p in parents: |
|
220 | for p in parents: | |
221 | if len(p) != 20: |
|
221 | if len(p) != 20: | |
222 | parents = None |
|
222 | parents = None | |
223 | break |
|
223 | break | |
224 | except TypeError: |
|
224 | except TypeError: | |
225 | # if content cannot be translated to nodeid drop the data. |
|
225 | # if content cannot be translated to nodeid drop the data. | |
226 | parents = None |
|
226 | parents = None | |
227 |
|
227 | |||
228 | metadata = tuple(sorted(metadata.iteritems())) |
|
228 | metadata = tuple(sorted(metadata.iteritems())) | |
229 |
|
229 | |||
230 | yield (pre, sucs, flags, metadata, date, parents) |
|
230 | yield (pre, sucs, flags, metadata, date, parents) | |
231 |
|
231 | |||
232 | def _fm0encodeonemarker(marker): |
|
232 | def _fm0encodeonemarker(marker): | |
233 | pre, sucs, flags, metadata, date, parents = marker |
|
233 | pre, sucs, flags, metadata, date, parents = marker | |
234 | if flags & usingsha256: |
|
234 | if flags & usingsha256: | |
235 | raise error.Abort(_('cannot handle sha256 with old obsstore format')) |
|
235 | raise error.Abort(_('cannot handle sha256 with old obsstore format')) | |
236 | metadata = dict(metadata) |
|
236 | metadata = dict(metadata) | |
237 | time, tz = date |
|
237 | time, tz = date | |
238 | metadata['date'] = '%r %i' % (time, tz) |
|
238 | metadata['date'] = '%r %i' % (time, tz) | |
239 | if parents is not None: |
|
239 | if parents is not None: | |
240 | if not parents: |
|
240 | if not parents: | |
241 | # mark that we explicitly recorded no parents |
|
241 | # mark that we explicitly recorded no parents | |
242 | metadata['p0'] = '' |
|
242 | metadata['p0'] = '' | |
243 | for i, p in enumerate(parents, 1): |
|
243 | for i, p in enumerate(parents, 1): | |
244 | metadata['p%i' % i] = node.hex(p) |
|
244 | metadata['p%i' % i] = node.hex(p) | |
245 | metadata = _fm0encodemeta(metadata) |
|
245 | metadata = _fm0encodemeta(metadata) | |
246 | numsuc = len(sucs) |
|
246 | numsuc = len(sucs) | |
247 | format = _fm0fixed + (_fm0node * numsuc) |
|
247 | format = _fm0fixed + (_fm0node * numsuc) | |
248 | data = [numsuc, len(metadata), flags, pre] |
|
248 | data = [numsuc, len(metadata), flags, pre] | |
249 | data.extend(sucs) |
|
249 | data.extend(sucs) | |
250 | return _pack(format, *data) + metadata |
|
250 | return _pack(format, *data) + metadata | |
251 |
|
251 | |||
252 | def _fm0encodemeta(meta): |
|
252 | def _fm0encodemeta(meta): | |
253 | """Return encoded metadata string to string mapping. |
|
253 | """Return encoded metadata string to string mapping. | |
254 |
|
254 | |||
255 | Assume no ':' in key and no '\0' in both key and value.""" |
|
255 | Assume no ':' in key and no '\0' in both key and value.""" | |
256 | for key, value in meta.iteritems(): |
|
256 | for key, value in meta.iteritems(): | |
257 | if ':' in key or '\0' in key: |
|
257 | if ':' in key or '\0' in key: | |
258 | raise ValueError("':' and '\0' are forbidden in metadata key'") |
|
258 | raise ValueError("':' and '\0' are forbidden in metadata key'") | |
259 | if '\0' in value: |
|
259 | if '\0' in value: | |
260 | raise ValueError("':' is forbidden in metadata value'") |
|
260 | raise ValueError("':' is forbidden in metadata value'") | |
261 | return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)]) |
|
261 | return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)]) | |
262 |
|
262 | |||
263 | def _fm0decodemeta(data): |
|
263 | def _fm0decodemeta(data): | |
264 | """Return string to string dictionary from encoded version.""" |
|
264 | """Return string to string dictionary from encoded version.""" | |
265 | d = {} |
|
265 | d = {} | |
266 | for l in data.split('\0'): |
|
266 | for l in data.split('\0'): | |
267 | if l: |
|
267 | if l: | |
268 | key, value = l.split(':') |
|
268 | key, value = l.split(':') | |
269 | d[key] = value |
|
269 | d[key] = value | |
270 | return d |
|
270 | return d | |
271 |
|
271 | |||
272 | ## Parsing and writing of version "1" |
|
272 | ## Parsing and writing of version "1" | |
273 | # |
|
273 | # | |
274 | # The header is followed by the markers. Each marker is made of: |
|
274 | # The header is followed by the markers. Each marker is made of: | |
275 | # |
|
275 | # | |
276 | # - uint32: total size of the marker (including this field) |
|
276 | # - uint32: total size of the marker (including this field) | |
277 | # |
|
277 | # | |
278 | # - float64: date in seconds since epoch |
|
278 | # - float64: date in seconds since epoch | |
279 | # |
|
279 | # | |
280 | # - int16: timezone offset in minutes |
|
280 | # - int16: timezone offset in minutes | |
281 | # |
|
281 | # | |
282 | # - uint16: a bit field. It is reserved for flags used in common |
|
282 | # - uint16: a bit field. It is reserved for flags used in common | |
283 | # obsolete marker operations, to avoid repeated decoding of metadata |
|
283 | # obsolete marker operations, to avoid repeated decoding of metadata | |
284 | # entries. |
|
284 | # entries. | |
285 | # |
|
285 | # | |
286 | # - uint8: number of successors "N", can be zero. |
|
286 | # - uint8: number of successors "N", can be zero. | |
287 | # |
|
287 | # | |
288 | # - uint8: number of parents "P", can be zero. |
|
288 | # - uint8: number of parents "P", can be zero. | |
289 | # |
|
289 | # | |
290 | # 0: parents data stored but no parent, |
|
290 | # 0: parents data stored but no parent, | |
291 | # 1: one parent stored, |
|
291 | # 1: one parent stored, | |
292 | # 2: two parents stored, |
|
292 | # 2: two parents stored, | |
293 | # 3: no parent data stored |
|
293 | # 3: no parent data stored | |
294 | # |
|
294 | # | |
295 | # - uint8: number of metadata entries M |
|
295 | # - uint8: number of metadata entries M | |
296 | # |
|
296 | # | |
297 | # - 20 or 32 bytes: precursor changeset identifier. |
|
297 | # - 20 or 32 bytes: precursor changeset identifier. | |
298 | # |
|
298 | # | |
299 | # - N*(20 or 32) bytes: successors changesets identifiers. |
|
299 | # - N*(20 or 32) bytes: successors changesets identifiers. | |
300 | # |
|
300 | # | |
301 | # - P*(20 or 32) bytes: parents of the precursors changesets. |
|
301 | # - P*(20 or 32) bytes: parents of the precursors changesets. | |
302 | # |
|
302 | # | |
303 | # - M*(uint8, uint8): size of all metadata entries (key and value) |
|
303 | # - M*(uint8, uint8): size of all metadata entries (key and value) | |
304 | # |
|
304 | # | |
305 | # - remaining bytes: the metadata, each (key, value) pair after the other. |
|
305 | # - remaining bytes: the metadata, each (key, value) pair after the other. | |
306 | _fm1version = 1 |
|
306 | _fm1version = 1 | |
307 | _fm1fixed = '>IdhHBBB20s' |
|
307 | _fm1fixed = '>IdhHBBB20s' | |
308 | _fm1nodesha1 = '20s' |
|
308 | _fm1nodesha1 = '20s' | |
309 | _fm1nodesha256 = '32s' |
|
309 | _fm1nodesha256 = '32s' | |
310 | _fm1nodesha1size = _calcsize(_fm1nodesha1) |
|
310 | _fm1nodesha1size = _calcsize(_fm1nodesha1) | |
311 | _fm1nodesha256size = _calcsize(_fm1nodesha256) |
|
311 | _fm1nodesha256size = _calcsize(_fm1nodesha256) | |
312 | _fm1fsize = _calcsize(_fm1fixed) |
|
312 | _fm1fsize = _calcsize(_fm1fixed) | |
313 | _fm1parentnone = 3 |
|
313 | _fm1parentnone = 3 | |
314 | _fm1parentshift = 14 |
|
314 | _fm1parentshift = 14 | |
315 | _fm1parentmask = (_fm1parentnone << _fm1parentshift) |
|
315 | _fm1parentmask = (_fm1parentnone << _fm1parentshift) | |
316 | _fm1metapair = 'BB' |
|
316 | _fm1metapair = 'BB' | |
317 | _fm1metapairsize = _calcsize('BB') |
|
317 | _fm1metapairsize = _calcsize('BB') | |
318 |
|
318 | |||
319 | def _fm1purereadmarkers(data, off): |
|
319 | def _fm1purereadmarkers(data, off): | |
320 | # make some global constants local for performance |
|
320 | # make some global constants local for performance | |
321 | noneflag = _fm1parentnone |
|
321 | noneflag = _fm1parentnone | |
322 | sha2flag = usingsha256 |
|
322 | sha2flag = usingsha256 | |
323 | sha1size = _fm1nodesha1size |
|
323 | sha1size = _fm1nodesha1size | |
324 | sha2size = _fm1nodesha256size |
|
324 | sha2size = _fm1nodesha256size | |
325 | sha1fmt = _fm1nodesha1 |
|
325 | sha1fmt = _fm1nodesha1 | |
326 | sha2fmt = _fm1nodesha256 |
|
326 | sha2fmt = _fm1nodesha256 | |
327 | metasize = _fm1metapairsize |
|
327 | metasize = _fm1metapairsize | |
328 | metafmt = _fm1metapair |
|
328 | metafmt = _fm1metapair | |
329 | fsize = _fm1fsize |
|
329 | fsize = _fm1fsize | |
330 | unpack = _unpack |
|
330 | unpack = _unpack | |
331 |
|
331 | |||
332 | # Loop on markers |
|
332 | # Loop on markers | |
333 | stop = len(data) - _fm1fsize |
|
333 | stop = len(data) - _fm1fsize | |
334 | ufixed = struct.Struct(_fm1fixed).unpack |
|
334 | ufixed = struct.Struct(_fm1fixed).unpack | |
335 |
|
335 | |||
336 | while off <= stop: |
|
336 | while off <= stop: | |
337 | # read fixed part |
|
337 | # read fixed part | |
338 | o1 = off + fsize |
|
338 | o1 = off + fsize | |
339 | t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1]) |
|
339 | t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1]) | |
340 |
|
340 | |||
341 | if flags & sha2flag: |
|
341 | if flags & sha2flag: | |
342 | # FIXME: prec was read as a SHA1, needs to be amended |
|
342 | # FIXME: prec was read as a SHA1, needs to be amended | |
343 |
|
343 | |||
344 | # read 0 or more successors |
|
344 | # read 0 or more successors | |
345 | if numsuc == 1: |
|
345 | if numsuc == 1: | |
346 | o2 = o1 + sha2size |
|
346 | o2 = o1 + sha2size | |
347 | sucs = (data[o1:o2],) |
|
347 | sucs = (data[o1:o2],) | |
348 | else: |
|
348 | else: | |
349 | o2 = o1 + sha2size * numsuc |
|
349 | o2 = o1 + sha2size * numsuc | |
350 | sucs = unpack(sha2fmt * numsuc, data[o1:o2]) |
|
350 | sucs = unpack(sha2fmt * numsuc, data[o1:o2]) | |
351 |
|
351 | |||
352 | # read parents |
|
352 | # read parents | |
353 | if numpar == noneflag: |
|
353 | if numpar == noneflag: | |
354 | o3 = o2 |
|
354 | o3 = o2 | |
355 | parents = None |
|
355 | parents = None | |
356 | elif numpar == 1: |
|
356 | elif numpar == 1: | |
357 | o3 = o2 + sha2size |
|
357 | o3 = o2 + sha2size | |
358 | parents = (data[o2:o3],) |
|
358 | parents = (data[o2:o3],) | |
359 | else: |
|
359 | else: | |
360 | o3 = o2 + sha2size * numpar |
|
360 | o3 = o2 + sha2size * numpar | |
361 | parents = unpack(sha2fmt * numpar, data[o2:o3]) |
|
361 | parents = unpack(sha2fmt * numpar, data[o2:o3]) | |
362 | else: |
|
362 | else: | |
363 | # read 0 or more successors |
|
363 | # read 0 or more successors | |
364 | if numsuc == 1: |
|
364 | if numsuc == 1: | |
365 | o2 = o1 + sha1size |
|
365 | o2 = o1 + sha1size | |
366 | sucs = (data[o1:o2],) |
|
366 | sucs = (data[o1:o2],) | |
367 | else: |
|
367 | else: | |
368 | o2 = o1 + sha1size * numsuc |
|
368 | o2 = o1 + sha1size * numsuc | |
369 | sucs = unpack(sha1fmt * numsuc, data[o1:o2]) |
|
369 | sucs = unpack(sha1fmt * numsuc, data[o1:o2]) | |
370 |
|
370 | |||
371 | # read parents |
|
371 | # read parents | |
372 | if numpar == noneflag: |
|
372 | if numpar == noneflag: | |
373 | o3 = o2 |
|
373 | o3 = o2 | |
374 | parents = None |
|
374 | parents = None | |
375 | elif numpar == 1: |
|
375 | elif numpar == 1: | |
376 | o3 = o2 + sha1size |
|
376 | o3 = o2 + sha1size | |
377 | parents = (data[o2:o3],) |
|
377 | parents = (data[o2:o3],) | |
378 | else: |
|
378 | else: | |
379 | o3 = o2 + sha1size * numpar |
|
379 | o3 = o2 + sha1size * numpar | |
380 | parents = unpack(sha1fmt * numpar, data[o2:o3]) |
|
380 | parents = unpack(sha1fmt * numpar, data[o2:o3]) | |
381 |
|
381 | |||
382 | # read metadata |
|
382 | # read metadata | |
383 | off = o3 + metasize * nummeta |
|
383 | off = o3 + metasize * nummeta | |
384 | metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off]) |
|
384 | metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off]) | |
385 | metadata = [] |
|
385 | metadata = [] | |
386 | for idx in xrange(0, len(metapairsize), 2): |
|
386 | for idx in xrange(0, len(metapairsize), 2): | |
387 | o1 = off + metapairsize[idx] |
|
387 | o1 = off + metapairsize[idx] | |
388 | o2 = o1 + metapairsize[idx + 1] |
|
388 | o2 = o1 + metapairsize[idx + 1] | |
389 | metadata.append((data[off:o1], data[o1:o2])) |
|
389 | metadata.append((data[off:o1], data[o1:o2])) | |
390 | off = o2 |
|
390 | off = o2 | |
391 |
|
391 | |||
392 | yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents) |
|
392 | yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents) | |
393 |
|
393 | |||
394 | def _fm1encodeonemarker(marker): |
|
394 | def _fm1encodeonemarker(marker): | |
395 | pre, sucs, flags, metadata, date, parents = marker |
|
395 | pre, sucs, flags, metadata, date, parents = marker | |
396 | # determine node size |
|
396 | # determine node size | |
397 | _fm1node = _fm1nodesha1 |
|
397 | _fm1node = _fm1nodesha1 | |
398 | if flags & usingsha256: |
|
398 | if flags & usingsha256: | |
399 | _fm1node = _fm1nodesha256 |
|
399 | _fm1node = _fm1nodesha256 | |
400 | numsuc = len(sucs) |
|
400 | numsuc = len(sucs) | |
401 | numextranodes = numsuc |
|
401 | numextranodes = numsuc | |
402 | if parents is None: |
|
402 | if parents is None: | |
403 | numpar = _fm1parentnone |
|
403 | numpar = _fm1parentnone | |
404 | else: |
|
404 | else: | |
405 | numpar = len(parents) |
|
405 | numpar = len(parents) | |
406 | numextranodes += numpar |
|
406 | numextranodes += numpar | |
407 | formatnodes = _fm1node * numextranodes |
|
407 | formatnodes = _fm1node * numextranodes | |
408 | formatmeta = _fm1metapair * len(metadata) |
|
408 | formatmeta = _fm1metapair * len(metadata) | |
409 | format = _fm1fixed + formatnodes + formatmeta |
|
409 | format = _fm1fixed + formatnodes + formatmeta | |
410 | # tz is stored in minutes so we divide by 60 |
|
410 | # tz is stored in minutes so we divide by 60 | |
411 | tz = date[1]//60 |
|
411 | tz = date[1]//60 | |
412 | data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre] |
|
412 | data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre] | |
413 | data.extend(sucs) |
|
413 | data.extend(sucs) | |
414 | if parents is not None: |
|
414 | if parents is not None: | |
415 | data.extend(parents) |
|
415 | data.extend(parents) | |
416 | totalsize = _calcsize(format) |
|
416 | totalsize = _calcsize(format) | |
417 | for key, value in metadata: |
|
417 | for key, value in metadata: | |
418 | lk = len(key) |
|
418 | lk = len(key) | |
419 | lv = len(value) |
|
419 | lv = len(value) | |
420 | data.append(lk) |
|
420 | data.append(lk) | |
421 | data.append(lv) |
|
421 | data.append(lv) | |
422 | totalsize += lk + lv |
|
422 | totalsize += lk + lv | |
423 | data[0] = totalsize |
|
423 | data[0] = totalsize | |
424 | data = [_pack(format, *data)] |
|
424 | data = [_pack(format, *data)] | |
425 | for key, value in metadata: |
|
425 | for key, value in metadata: | |
426 | data.append(key) |
|
426 | data.append(key) | |
427 | data.append(value) |
|
427 | data.append(value) | |
428 | return ''.join(data) |
|
428 | return ''.join(data) | |
429 |
|
429 | |||
430 | def _fm1readmarkers(data, off): |
|
430 | def _fm1readmarkers(data, off): | |
431 | native = getattr(parsers, 'fm1readmarkers', None) |
|
431 | native = getattr(parsers, 'fm1readmarkers', None) | |
432 | if not native: |
|
432 | if not native: | |
433 | return _fm1purereadmarkers(data, off) |
|
433 | return _fm1purereadmarkers(data, off) | |
434 | stop = len(data) - _fm1fsize |
|
434 | stop = len(data) - _fm1fsize | |
435 | return native(data, off, stop) |
|
435 | return native(data, off, stop) | |
436 |
|
436 | |||
437 | # mapping to read/write various marker formats |
|
437 | # mapping to read/write various marker formats | |
438 | # <version> -> (decoder, encoder) |
|
438 | # <version> -> (decoder, encoder) | |
439 | formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker), |
|
439 | formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker), | |
440 | _fm1version: (_fm1readmarkers, _fm1encodeonemarker)} |
|
440 | _fm1version: (_fm1readmarkers, _fm1encodeonemarker)} | |
441 |
|
441 | |||
442 | def _readmarkerversion(data): |
|
442 | def _readmarkerversion(data): | |
443 | return _unpack('>B', data[0:1])[0] |
|
443 | return _unpack('>B', data[0:1])[0] | |
444 |
|
444 | |||
445 | @util.nogc |
|
445 | @util.nogc | |
446 | def _readmarkers(data): |
|
446 | def _readmarkers(data): | |
447 | """Read and enumerate markers from raw data""" |
|
447 | """Read and enumerate markers from raw data""" | |
448 | diskversion = _readmarkerversion(data) |
|
448 | diskversion = _readmarkerversion(data) | |
449 | off = 1 |
|
449 | off = 1 | |
450 | if diskversion not in formats: |
|
450 | if diskversion not in formats: | |
451 | msg = _('parsing obsolete marker: unknown version %r') % diskversion |
|
451 | msg = _('parsing obsolete marker: unknown version %r') % diskversion | |
452 | raise error.UnknownVersion(msg, version=diskversion) |
|
452 | raise error.UnknownVersion(msg, version=diskversion) | |
453 | return diskversion, formats[diskversion][0](data, off) |
|
453 | return diskversion, formats[diskversion][0](data, off) | |
454 |
|
454 | |||
|
455 | def encodeheader(version=_fm0version): | |||
|
456 | return _pack('>B', version) | |||
|
457 | ||||
455 | def encodemarkers(markers, addheader=False, version=_fm0version): |
|
458 | def encodemarkers(markers, addheader=False, version=_fm0version): | |
456 | # Kept separate from flushmarkers(), it will be reused for |
|
459 | # Kept separate from flushmarkers(), it will be reused for | |
457 | # markers exchange. |
|
460 | # markers exchange. | |
458 | encodeone = formats[version][1] |
|
461 | encodeone = formats[version][1] | |
459 | if addheader: |
|
462 | if addheader: | |
460 |
yield |
|
463 | yield encodeheader(version) | |
461 | for marker in markers: |
|
464 | for marker in markers: | |
462 | yield encodeone(marker) |
|
465 | yield encodeone(marker) | |
463 |
|
466 | |||
464 |
|
467 | |||
465 | class marker(object): |
|
468 | class marker(object): | |
466 | """Wrap obsolete marker raw data""" |
|
469 | """Wrap obsolete marker raw data""" | |
467 |
|
470 | |||
468 | def __init__(self, repo, data): |
|
471 | def __init__(self, repo, data): | |
469 | # the repo argument will be used to create changectx in later version |
|
472 | # the repo argument will be used to create changectx in later version | |
470 | self._repo = repo |
|
473 | self._repo = repo | |
471 | self._data = data |
|
474 | self._data = data | |
472 | self._decodedmeta = None |
|
475 | self._decodedmeta = None | |
473 |
|
476 | |||
474 | def __hash__(self): |
|
477 | def __hash__(self): | |
475 | return hash(self._data) |
|
478 | return hash(self._data) | |
476 |
|
479 | |||
477 | def __eq__(self, other): |
|
480 | def __eq__(self, other): | |
478 | if type(other) != type(self): |
|
481 | if type(other) != type(self): | |
479 | return False |
|
482 | return False | |
480 | return self._data == other._data |
|
483 | return self._data == other._data | |
481 |
|
484 | |||
482 | def precnode(self): |
|
485 | def precnode(self): | |
483 | """Precursor changeset node identifier""" |
|
486 | """Precursor changeset node identifier""" | |
484 | return self._data[0] |
|
487 | return self._data[0] | |
485 |
|
488 | |||
486 | def succnodes(self): |
|
489 | def succnodes(self): | |
487 | """List of successor changesets node identifiers""" |
|
490 | """List of successor changesets node identifiers""" | |
488 | return self._data[1] |
|
491 | return self._data[1] | |
489 |
|
492 | |||
490 | def parentnodes(self): |
|
493 | def parentnodes(self): | |
491 | """Parents of the precursors (None if not recorded)""" |
|
494 | """Parents of the precursors (None if not recorded)""" | |
492 | return self._data[5] |
|
495 | return self._data[5] | |
493 |
|
496 | |||
494 | def metadata(self): |
|
497 | def metadata(self): | |
495 | """Decoded metadata dictionary""" |
|
498 | """Decoded metadata dictionary""" | |
496 | return dict(self._data[3]) |
|
499 | return dict(self._data[3]) | |
497 |
|
500 | |||
498 | def date(self): |
|
501 | def date(self): | |
499 | """Creation date as (unixtime, offset)""" |
|
502 | """Creation date as (unixtime, offset)""" | |
500 | return self._data[4] |
|
503 | return self._data[4] | |
501 |
|
504 | |||
502 | def flags(self): |
|
505 | def flags(self): | |
503 | """The flags field of the marker""" |
|
506 | """The flags field of the marker""" | |
504 | return self._data[2] |
|
507 | return self._data[2] | |
505 |
|
508 | |||
506 | @util.nogc |
|
509 | @util.nogc | |
507 | def _addsuccessors(successors, markers): |
|
510 | def _addsuccessors(successors, markers): | |
508 | for mark in markers: |
|
511 | for mark in markers: | |
509 | successors.setdefault(mark[0], set()).add(mark) |
|
512 | successors.setdefault(mark[0], set()).add(mark) | |
510 |
|
513 | |||
511 | @util.nogc |
|
514 | @util.nogc | |
512 | def _addprecursors(precursors, markers): |
|
515 | def _addprecursors(precursors, markers): | |
513 | for mark in markers: |
|
516 | for mark in markers: | |
514 | for suc in mark[1]: |
|
517 | for suc in mark[1]: | |
515 | precursors.setdefault(suc, set()).add(mark) |
|
518 | precursors.setdefault(suc, set()).add(mark) | |
516 |
|
519 | |||
517 | @util.nogc |
|
520 | @util.nogc | |
518 | def _addchildren(children, markers): |
|
521 | def _addchildren(children, markers): | |
519 | for mark in markers: |
|
522 | for mark in markers: | |
520 | parents = mark[5] |
|
523 | parents = mark[5] | |
521 | if parents is not None: |
|
524 | if parents is not None: | |
522 | for p in parents: |
|
525 | for p in parents: | |
523 | children.setdefault(p, set()).add(mark) |
|
526 | children.setdefault(p, set()).add(mark) | |
524 |
|
527 | |||
525 | def _checkinvalidmarkers(markers): |
|
528 | def _checkinvalidmarkers(markers): | |
526 | """search for marker with invalid data and raise error if needed |
|
529 | """search for marker with invalid data and raise error if needed | |
527 |
|
530 | |||
528 | Exist as a separated function to allow the evolve extension for a more |
|
531 | Exist as a separated function to allow the evolve extension for a more | |
529 | subtle handling. |
|
532 | subtle handling. | |
530 | """ |
|
533 | """ | |
531 | for mark in markers: |
|
534 | for mark in markers: | |
532 | if node.nullid in mark[1]: |
|
535 | if node.nullid in mark[1]: | |
533 | raise error.Abort(_('bad obsolescence marker detected: ' |
|
536 | raise error.Abort(_('bad obsolescence marker detected: ' | |
534 | 'invalid successors nullid')) |
|
537 | 'invalid successors nullid')) | |
535 |
|
538 | |||
536 | class obsstore(object): |
|
539 | class obsstore(object): | |
537 | """Store obsolete markers |
|
540 | """Store obsolete markers | |
538 |
|
541 | |||
539 | Markers can be accessed with two mappings: |
|
542 | Markers can be accessed with two mappings: | |
540 | - precursors[x] -> set(markers on precursors edges of x) |
|
543 | - precursors[x] -> set(markers on precursors edges of x) | |
541 | - successors[x] -> set(markers on successors edges of x) |
|
544 | - successors[x] -> set(markers on successors edges of x) | |
542 | - children[x] -> set(markers on precursors edges of children(x) |
|
545 | - children[x] -> set(markers on precursors edges of children(x) | |
543 | """ |
|
546 | """ | |
544 |
|
547 | |||
545 | fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents') |
|
548 | fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents') | |
546 | # prec: nodeid, precursor changesets |
|
549 | # prec: nodeid, precursor changesets | |
547 | # succs: tuple of nodeid, successor changesets (0-N length) |
|
550 | # succs: tuple of nodeid, successor changesets (0-N length) | |
548 | # flag: integer, flag field carrying modifier for the markers (see doc) |
|
551 | # flag: integer, flag field carrying modifier for the markers (see doc) | |
549 | # meta: binary blob, encoded metadata dictionary |
|
552 | # meta: binary blob, encoded metadata dictionary | |
550 | # date: (float, int) tuple, date of marker creation |
|
553 | # date: (float, int) tuple, date of marker creation | |
551 | # parents: (tuple of nodeid) or None, parents of precursors |
|
554 | # parents: (tuple of nodeid) or None, parents of precursors | |
552 | # None is used when no data has been recorded |
|
555 | # None is used when no data has been recorded | |
553 |
|
556 | |||
554 | def __init__(self, svfs, defaultformat=_fm1version, readonly=False): |
|
557 | def __init__(self, svfs, defaultformat=_fm1version, readonly=False): | |
555 | # caches for various obsolescence related cache |
|
558 | # caches for various obsolescence related cache | |
556 | self.caches = {} |
|
559 | self.caches = {} | |
557 | self.svfs = svfs |
|
560 | self.svfs = svfs | |
558 | self._defaultformat = defaultformat |
|
561 | self._defaultformat = defaultformat | |
559 | self._readonly = readonly |
|
562 | self._readonly = readonly | |
560 |
|
563 | |||
561 | def __iter__(self): |
|
564 | def __iter__(self): | |
562 | return iter(self._all) |
|
565 | return iter(self._all) | |
563 |
|
566 | |||
564 | def __len__(self): |
|
567 | def __len__(self): | |
565 | return len(self._all) |
|
568 | return len(self._all) | |
566 |
|
569 | |||
567 | def __nonzero__(self): |
|
570 | def __nonzero__(self): | |
568 | if not self._cached('_all'): |
|
571 | if not self._cached('_all'): | |
569 | try: |
|
572 | try: | |
570 | return self.svfs.stat('obsstore').st_size > 1 |
|
573 | return self.svfs.stat('obsstore').st_size > 1 | |
571 | except OSError as inst: |
|
574 | except OSError as inst: | |
572 | if inst.errno != errno.ENOENT: |
|
575 | if inst.errno != errno.ENOENT: | |
573 | raise |
|
576 | raise | |
574 | # just build an empty _all list if no obsstore exists, which |
|
577 | # just build an empty _all list if no obsstore exists, which | |
575 | # avoids further stat() syscalls |
|
578 | # avoids further stat() syscalls | |
576 | pass |
|
579 | pass | |
577 | return bool(self._all) |
|
580 | return bool(self._all) | |
578 |
|
581 | |||
579 | __bool__ = __nonzero__ |
|
582 | __bool__ = __nonzero__ | |
580 |
|
583 | |||
581 | @property |
|
584 | @property | |
582 | def readonly(self): |
|
585 | def readonly(self): | |
583 | """True if marker creation is disabled |
|
586 | """True if marker creation is disabled | |
584 |
|
587 | |||
585 | Remove me in the future when obsolete marker is always on.""" |
|
588 | Remove me in the future when obsolete marker is always on.""" | |
586 | return self._readonly |
|
589 | return self._readonly | |
587 |
|
590 | |||
588 | def create(self, transaction, prec, succs=(), flag=0, parents=None, |
|
591 | def create(self, transaction, prec, succs=(), flag=0, parents=None, | |
589 | date=None, metadata=None, ui=None): |
|
592 | date=None, metadata=None, ui=None): | |
590 | """obsolete: add a new obsolete marker |
|
593 | """obsolete: add a new obsolete marker | |
591 |
|
594 | |||
592 | * ensuring it is hashable |
|
595 | * ensuring it is hashable | |
593 | * check mandatory metadata |
|
596 | * check mandatory metadata | |
594 | * encode metadata |
|
597 | * encode metadata | |
595 |
|
598 | |||
596 | If you are a human writing code creating marker you want to use the |
|
599 | If you are a human writing code creating marker you want to use the | |
597 | `createmarkers` function in this module instead. |
|
600 | `createmarkers` function in this module instead. | |
598 |
|
601 | |||
599 | return True if a new marker have been added, False if the markers |
|
602 | return True if a new marker have been added, False if the markers | |
600 | already existed (no op). |
|
603 | already existed (no op). | |
601 | """ |
|
604 | """ | |
602 | if metadata is None: |
|
605 | if metadata is None: | |
603 | metadata = {} |
|
606 | metadata = {} | |
604 | if date is None: |
|
607 | if date is None: | |
605 | if 'date' in metadata: |
|
608 | if 'date' in metadata: | |
606 | # as a courtesy for out-of-tree extensions |
|
609 | # as a courtesy for out-of-tree extensions | |
607 | date = util.parsedate(metadata.pop('date')) |
|
610 | date = util.parsedate(metadata.pop('date')) | |
608 | elif ui is not None: |
|
611 | elif ui is not None: | |
609 | date = ui.configdate('devel', 'default-date') |
|
612 | date = ui.configdate('devel', 'default-date') | |
610 | if date is None: |
|
613 | if date is None: | |
611 | date = util.makedate() |
|
614 | date = util.makedate() | |
612 | else: |
|
615 | else: | |
613 | date = util.makedate() |
|
616 | date = util.makedate() | |
614 | if len(prec) != 20: |
|
617 | if len(prec) != 20: | |
615 | raise ValueError(prec) |
|
618 | raise ValueError(prec) | |
616 | for succ in succs: |
|
619 | for succ in succs: | |
617 | if len(succ) != 20: |
|
620 | if len(succ) != 20: | |
618 | raise ValueError(succ) |
|
621 | raise ValueError(succ) | |
619 | if prec in succs: |
|
622 | if prec in succs: | |
620 | raise ValueError(_('in-marker cycle with %s') % node.hex(prec)) |
|
623 | raise ValueError(_('in-marker cycle with %s') % node.hex(prec)) | |
621 |
|
624 | |||
622 | metadata = tuple(sorted(metadata.iteritems())) |
|
625 | metadata = tuple(sorted(metadata.iteritems())) | |
623 |
|
626 | |||
624 | marker = (str(prec), tuple(succs), int(flag), metadata, date, parents) |
|
627 | marker = (str(prec), tuple(succs), int(flag), metadata, date, parents) | |
625 | return bool(self.add(transaction, [marker])) |
|
628 | return bool(self.add(transaction, [marker])) | |
626 |
|
629 | |||
627 | def add(self, transaction, markers): |
|
630 | def add(self, transaction, markers): | |
628 | """Add new markers to the store |
|
631 | """Add new markers to the store | |
629 |
|
632 | |||
630 | Take care of filtering duplicate. |
|
633 | Take care of filtering duplicate. | |
631 | Return the number of new marker.""" |
|
634 | Return the number of new marker.""" | |
632 | if self._readonly: |
|
635 | if self._readonly: | |
633 | raise error.Abort(_('creating obsolete markers is not enabled on ' |
|
636 | raise error.Abort(_('creating obsolete markers is not enabled on ' | |
634 | 'this repo')) |
|
637 | 'this repo')) | |
635 | known = set(self._all) |
|
638 | known = set(self._all) | |
636 | new = [] |
|
639 | new = [] | |
637 | for m in markers: |
|
640 | for m in markers: | |
638 | if m not in known: |
|
641 | if m not in known: | |
639 | known.add(m) |
|
642 | known.add(m) | |
640 | new.append(m) |
|
643 | new.append(m) | |
641 | if new: |
|
644 | if new: | |
642 | f = self.svfs('obsstore', 'ab') |
|
645 | f = self.svfs('obsstore', 'ab') | |
643 | try: |
|
646 | try: | |
644 | offset = f.tell() |
|
647 | offset = f.tell() | |
645 | transaction.add('obsstore', offset) |
|
648 | transaction.add('obsstore', offset) | |
646 | # offset == 0: new file - add the version header |
|
649 | # offset == 0: new file - add the version header | |
647 | for bytes in encodemarkers(new, offset == 0, self._version): |
|
650 | for bytes in encodemarkers(new, offset == 0, self._version): | |
648 | f.write(bytes) |
|
651 | f.write(bytes) | |
649 | finally: |
|
652 | finally: | |
650 | # XXX: f.close() == filecache invalidation == obsstore rebuilt. |
|
653 | # XXX: f.close() == filecache invalidation == obsstore rebuilt. | |
651 | # call 'filecacheentry.refresh()' here |
|
654 | # call 'filecacheentry.refresh()' here | |
652 | f.close() |
|
655 | f.close() | |
653 | self._addmarkers(new) |
|
656 | self._addmarkers(new) | |
654 | # new marker *may* have changed several set. invalidate the cache. |
|
657 | # new marker *may* have changed several set. invalidate the cache. | |
655 | self.caches.clear() |
|
658 | self.caches.clear() | |
656 | # records the number of new markers for the transaction hooks |
|
659 | # records the number of new markers for the transaction hooks | |
657 | previous = int(transaction.hookargs.get('new_obsmarkers', '0')) |
|
660 | previous = int(transaction.hookargs.get('new_obsmarkers', '0')) | |
658 | transaction.hookargs['new_obsmarkers'] = str(previous + len(new)) |
|
661 | transaction.hookargs['new_obsmarkers'] = str(previous + len(new)) | |
659 | return len(new) |
|
662 | return len(new) | |
660 |
|
663 | |||
661 | def mergemarkers(self, transaction, data): |
|
664 | def mergemarkers(self, transaction, data): | |
662 | """merge a binary stream of markers inside the obsstore |
|
665 | """merge a binary stream of markers inside the obsstore | |
663 |
|
666 | |||
664 | Returns the number of new markers added.""" |
|
667 | Returns the number of new markers added.""" | |
665 | version, markers = _readmarkers(data) |
|
668 | version, markers = _readmarkers(data) | |
666 | return self.add(transaction, markers) |
|
669 | return self.add(transaction, markers) | |
667 |
|
670 | |||
668 | @propertycache |
|
671 | @propertycache | |
669 | def _data(self): |
|
672 | def _data(self): | |
670 | return self.svfs.tryread('obsstore') |
|
673 | return self.svfs.tryread('obsstore') | |
671 |
|
674 | |||
672 | @propertycache |
|
675 | @propertycache | |
673 | def _version(self): |
|
676 | def _version(self): | |
674 | if len(self._data) >= 1: |
|
677 | if len(self._data) >= 1: | |
675 | return _readmarkerversion(self._data) |
|
678 | return _readmarkerversion(self._data) | |
676 | else: |
|
679 | else: | |
677 | return self._defaultformat |
|
680 | return self._defaultformat | |
678 |
|
681 | |||
679 | @propertycache |
|
682 | @propertycache | |
680 | def _all(self): |
|
683 | def _all(self): | |
681 | data = self._data |
|
684 | data = self._data | |
682 | if not data: |
|
685 | if not data: | |
683 | return [] |
|
686 | return [] | |
684 | self._version, markers = _readmarkers(data) |
|
687 | self._version, markers = _readmarkers(data) | |
685 | markers = list(markers) |
|
688 | markers = list(markers) | |
686 | _checkinvalidmarkers(markers) |
|
689 | _checkinvalidmarkers(markers) | |
687 | return markers |
|
690 | return markers | |
688 |
|
691 | |||
689 | @propertycache |
|
692 | @propertycache | |
690 | def successors(self): |
|
693 | def successors(self): | |
691 | successors = {} |
|
694 | successors = {} | |
692 | _addsuccessors(successors, self._all) |
|
695 | _addsuccessors(successors, self._all) | |
693 | return successors |
|
696 | return successors | |
694 |
|
697 | |||
695 | @propertycache |
|
698 | @propertycache | |
696 | def precursors(self): |
|
699 | def precursors(self): | |
697 | precursors = {} |
|
700 | precursors = {} | |
698 | _addprecursors(precursors, self._all) |
|
701 | _addprecursors(precursors, self._all) | |
699 | return precursors |
|
702 | return precursors | |
700 |
|
703 | |||
701 | @propertycache |
|
704 | @propertycache | |
702 | def children(self): |
|
705 | def children(self): | |
703 | children = {} |
|
706 | children = {} | |
704 | _addchildren(children, self._all) |
|
707 | _addchildren(children, self._all) | |
705 | return children |
|
708 | return children | |
706 |
|
709 | |||
707 | def _cached(self, attr): |
|
710 | def _cached(self, attr): | |
708 | return attr in self.__dict__ |
|
711 | return attr in self.__dict__ | |
709 |
|
712 | |||
710 | def _addmarkers(self, markers): |
|
713 | def _addmarkers(self, markers): | |
711 | markers = list(markers) # to allow repeated iteration |
|
714 | markers = list(markers) # to allow repeated iteration | |
712 | self._all.extend(markers) |
|
715 | self._all.extend(markers) | |
713 | if self._cached('successors'): |
|
716 | if self._cached('successors'): | |
714 | _addsuccessors(self.successors, markers) |
|
717 | _addsuccessors(self.successors, markers) | |
715 | if self._cached('precursors'): |
|
718 | if self._cached('precursors'): | |
716 | _addprecursors(self.precursors, markers) |
|
719 | _addprecursors(self.precursors, markers) | |
717 | if self._cached('children'): |
|
720 | if self._cached('children'): | |
718 | _addchildren(self.children, markers) |
|
721 | _addchildren(self.children, markers) | |
719 | _checkinvalidmarkers(markers) |
|
722 | _checkinvalidmarkers(markers) | |
720 |
|
723 | |||
721 | def relevantmarkers(self, nodes): |
|
724 | def relevantmarkers(self, nodes): | |
722 | """return a set of all obsolescence markers relevant to a set of nodes. |
|
725 | """return a set of all obsolescence markers relevant to a set of nodes. | |
723 |
|
726 | |||
724 | "relevant" to a set of nodes mean: |
|
727 | "relevant" to a set of nodes mean: | |
725 |
|
728 | |||
726 | - marker that use this changeset as successor |
|
729 | - marker that use this changeset as successor | |
727 | - prune marker of direct children on this changeset |
|
730 | - prune marker of direct children on this changeset | |
728 | - recursive application of the two rules on precursors of these markers |
|
731 | - recursive application of the two rules on precursors of these markers | |
729 |
|
732 | |||
730 | It is a set so you cannot rely on order.""" |
|
733 | It is a set so you cannot rely on order.""" | |
731 |
|
734 | |||
732 | pendingnodes = set(nodes) |
|
735 | pendingnodes = set(nodes) | |
733 | seenmarkers = set() |
|
736 | seenmarkers = set() | |
734 | seennodes = set(pendingnodes) |
|
737 | seennodes = set(pendingnodes) | |
735 | precursorsmarkers = self.precursors |
|
738 | precursorsmarkers = self.precursors | |
736 | succsmarkers = self.successors |
|
739 | succsmarkers = self.successors | |
737 | children = self.children |
|
740 | children = self.children | |
738 | while pendingnodes: |
|
741 | while pendingnodes: | |
739 | direct = set() |
|
742 | direct = set() | |
740 | for current in pendingnodes: |
|
743 | for current in pendingnodes: | |
741 | direct.update(precursorsmarkers.get(current, ())) |
|
744 | direct.update(precursorsmarkers.get(current, ())) | |
742 | pruned = [m for m in children.get(current, ()) if not m[1]] |
|
745 | pruned = [m for m in children.get(current, ()) if not m[1]] | |
743 | direct.update(pruned) |
|
746 | direct.update(pruned) | |
744 | pruned = [m for m in succsmarkers.get(current, ()) if not m[1]] |
|
747 | pruned = [m for m in succsmarkers.get(current, ()) if not m[1]] | |
745 | direct.update(pruned) |
|
748 | direct.update(pruned) | |
746 | direct -= seenmarkers |
|
749 | direct -= seenmarkers | |
747 | pendingnodes = set([m[0] for m in direct]) |
|
750 | pendingnodes = set([m[0] for m in direct]) | |
748 | seenmarkers |= direct |
|
751 | seenmarkers |= direct | |
749 | pendingnodes -= seennodes |
|
752 | pendingnodes -= seennodes | |
750 | seennodes |= pendingnodes |
|
753 | seennodes |= pendingnodes | |
751 | return seenmarkers |
|
754 | return seenmarkers | |
752 |
|
755 | |||
753 | def _filterprunes(markers): |
|
756 | def _filterprunes(markers): | |
754 | """return a set with no prune markers""" |
|
757 | """return a set with no prune markers""" | |
755 | return set(m for m in markers if m[1]) |
|
758 | return set(m for m in markers if m[1]) | |
756 |
|
759 | |||
757 | def exclusivemarkers(repo, nodes): |
|
760 | def exclusivemarkers(repo, nodes): | |
758 | """set of markers relevant to "nodes" but no other locally-known nodes |
|
761 | """set of markers relevant to "nodes" but no other locally-known nodes | |
759 |
|
762 | |||
760 | This function compute the set of markers "exclusive" to a locally-known |
|
763 | This function compute the set of markers "exclusive" to a locally-known | |
761 | node. This means we walk the markers starting from <nodes> until we reach a |
|
764 | node. This means we walk the markers starting from <nodes> until we reach a | |
762 | locally-known precursors outside of <nodes>. Element of <nodes> with |
|
765 | locally-known precursors outside of <nodes>. Element of <nodes> with | |
763 | locally-known successors outside of <nodes> are ignored (since their |
|
766 | locally-known successors outside of <nodes> are ignored (since their | |
764 | precursors markers are also relevant to these successors). |
|
767 | precursors markers are also relevant to these successors). | |
765 |
|
768 | |||
766 | For example: |
|
769 | For example: | |
767 |
|
770 | |||
768 | # (A0 rewritten as A1) |
|
771 | # (A0 rewritten as A1) | |
769 | # |
|
772 | # | |
770 | # A0 <-1- A1 # Marker "1" is exclusive to A1 |
|
773 | # A0 <-1- A1 # Marker "1" is exclusive to A1 | |
771 |
|
774 | |||
772 | or |
|
775 | or | |
773 |
|
776 | |||
774 | # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally) |
|
777 | # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally) | |
775 | # |
|
778 | # | |
776 | # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1 |
|
779 | # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1 | |
777 |
|
780 | |||
778 | or |
|
781 | or | |
779 |
|
782 | |||
780 | # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence)) |
|
783 | # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence)) | |
781 | # |
|
784 | # | |
782 | # <-2- A1 # Marker "2" is exclusive to A0,A1 |
|
785 | # <-2- A1 # Marker "2" is exclusive to A0,A1 | |
783 | # / |
|
786 | # / | |
784 | # <-1- A0 |
|
787 | # <-1- A0 | |
785 | # \ |
|
788 | # \ | |
786 | # <-3- A2 # Marker "3" is exclusive to A0,A2 |
|
789 | # <-3- A2 # Marker "3" is exclusive to A0,A2 | |
787 | # |
|
790 | # | |
788 | # in addition: |
|
791 | # in addition: | |
789 | # |
|
792 | # | |
790 | # Markers "2,3" are exclusive to A1,A2 |
|
793 | # Markers "2,3" are exclusive to A1,A2 | |
791 | # Markers "1,2,3" are exclusive to A0,A1,A2 |
|
794 | # Markers "1,2,3" are exclusive to A0,A1,A2 | |
792 |
|
795 | |||
793 | See test/test-obsolete-bundle-strip.t for more examples. |
|
796 | See test/test-obsolete-bundle-strip.t for more examples. | |
794 |
|
797 | |||
795 | An example usage is strip. When stripping a changeset, we also want to |
|
798 | An example usage is strip. When stripping a changeset, we also want to | |
796 | strip the markers exclusive to this changeset. Otherwise we would have |
|
799 | strip the markers exclusive to this changeset. Otherwise we would have | |
797 | "dangling"" obsolescence markers from its precursors: Obsolescence markers |
|
800 | "dangling"" obsolescence markers from its precursors: Obsolescence markers | |
798 | marking a node as obsolete without any successors available locally. |
|
801 | marking a node as obsolete without any successors available locally. | |
799 |
|
802 | |||
800 | As for relevant markers, the prune markers for children will be followed. |
|
803 | As for relevant markers, the prune markers for children will be followed. | |
801 | Of course, they will only be followed if the pruned children is |
|
804 | Of course, they will only be followed if the pruned children is | |
802 | locally-known. Since the prune markers are relevant to the pruned node. |
|
805 | locally-known. Since the prune markers are relevant to the pruned node. | |
803 | However, while prune markers are considered relevant to the parent of the |
|
806 | However, while prune markers are considered relevant to the parent of the | |
804 | pruned changesets, prune markers for locally-known changeset (with no |
|
807 | pruned changesets, prune markers for locally-known changeset (with no | |
805 | successors) are considered exclusive to the pruned nodes. This allows |
|
808 | successors) are considered exclusive to the pruned nodes. This allows | |
806 | to strip the prune markers (with the rest of the exclusive chain) alongside |
|
809 | to strip the prune markers (with the rest of the exclusive chain) alongside | |
807 | the pruned changesets. |
|
810 | the pruned changesets. | |
808 | """ |
|
811 | """ | |
809 | # running on a filtered repository would be dangerous as markers could be |
|
812 | # running on a filtered repository would be dangerous as markers could be | |
810 | # reported as exclusive when they are relevant for other filtered nodes. |
|
813 | # reported as exclusive when they are relevant for other filtered nodes. | |
811 | unfi = repo.unfiltered() |
|
814 | unfi = repo.unfiltered() | |
812 |
|
815 | |||
813 | # shortcut to various useful item |
|
816 | # shortcut to various useful item | |
814 | nm = unfi.changelog.nodemap |
|
817 | nm = unfi.changelog.nodemap | |
815 | precursorsmarkers = unfi.obsstore.precursors |
|
818 | precursorsmarkers = unfi.obsstore.precursors | |
816 | successormarkers = unfi.obsstore.successors |
|
819 | successormarkers = unfi.obsstore.successors | |
817 | childrenmarkers = unfi.obsstore.children |
|
820 | childrenmarkers = unfi.obsstore.children | |
818 |
|
821 | |||
819 | # exclusive markers (return of the function) |
|
822 | # exclusive markers (return of the function) | |
820 | exclmarkers = set() |
|
823 | exclmarkers = set() | |
821 | # we need fast membership testing |
|
824 | # we need fast membership testing | |
822 | nodes = set(nodes) |
|
825 | nodes = set(nodes) | |
823 | # looking for head in the obshistory |
|
826 | # looking for head in the obshistory | |
824 | # |
|
827 | # | |
825 | # XXX we are ignoring all issues in regard with cycle for now. |
|
828 | # XXX we are ignoring all issues in regard with cycle for now. | |
826 | stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))] |
|
829 | stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))] | |
827 | stack.sort() |
|
830 | stack.sort() | |
828 | # nodes already stacked |
|
831 | # nodes already stacked | |
829 | seennodes = set(stack) |
|
832 | seennodes = set(stack) | |
830 | while stack: |
|
833 | while stack: | |
831 | current = stack.pop() |
|
834 | current = stack.pop() | |
832 | # fetch precursors markers |
|
835 | # fetch precursors markers | |
833 | markers = list(precursorsmarkers.get(current, ())) |
|
836 | markers = list(precursorsmarkers.get(current, ())) | |
834 | # extend the list with prune markers |
|
837 | # extend the list with prune markers | |
835 | for mark in successormarkers.get(current, ()): |
|
838 | for mark in successormarkers.get(current, ()): | |
836 | if not mark[1]: |
|
839 | if not mark[1]: | |
837 | markers.append(mark) |
|
840 | markers.append(mark) | |
838 | # and markers from children (looking for prune) |
|
841 | # and markers from children (looking for prune) | |
839 | for mark in childrenmarkers.get(current, ()): |
|
842 | for mark in childrenmarkers.get(current, ()): | |
840 | if not mark[1]: |
|
843 | if not mark[1]: | |
841 | markers.append(mark) |
|
844 | markers.append(mark) | |
842 | # traverse the markers |
|
845 | # traverse the markers | |
843 | for mark in markers: |
|
846 | for mark in markers: | |
844 | if mark in exclmarkers: |
|
847 | if mark in exclmarkers: | |
845 | # markers already selected |
|
848 | # markers already selected | |
846 | continue |
|
849 | continue | |
847 |
|
850 | |||
848 | # If the markers is about the current node, select it |
|
851 | # If the markers is about the current node, select it | |
849 | # |
|
852 | # | |
850 | # (this delay the addition of markers from children) |
|
853 | # (this delay the addition of markers from children) | |
851 | if mark[1] or mark[0] == current: |
|
854 | if mark[1] or mark[0] == current: | |
852 | exclmarkers.add(mark) |
|
855 | exclmarkers.add(mark) | |
853 |
|
856 | |||
854 | # should we keep traversing through the precursors? |
|
857 | # should we keep traversing through the precursors? | |
855 | prec = mark[0] |
|
858 | prec = mark[0] | |
856 |
|
859 | |||
857 | # nodes in the stack or already processed |
|
860 | # nodes in the stack or already processed | |
858 | if prec in seennodes: |
|
861 | if prec in seennodes: | |
859 | continue |
|
862 | continue | |
860 |
|
863 | |||
861 | # is this a locally known node ? |
|
864 | # is this a locally known node ? | |
862 | known = prec in nm |
|
865 | known = prec in nm | |
863 | # if locally-known and not in the <nodes> set the traversal |
|
866 | # if locally-known and not in the <nodes> set the traversal | |
864 | # stop here. |
|
867 | # stop here. | |
865 | if known and prec not in nodes: |
|
868 | if known and prec not in nodes: | |
866 | continue |
|
869 | continue | |
867 |
|
870 | |||
868 | # do not keep going if there are unselected markers pointing to this |
|
871 | # do not keep going if there are unselected markers pointing to this | |
869 | # nodes. If we end up traversing these unselected markers later the |
|
872 | # nodes. If we end up traversing these unselected markers later the | |
870 | # node will be taken care of at that point. |
|
873 | # node will be taken care of at that point. | |
871 | precmarkers = _filterprunes(successormarkers.get(prec)) |
|
874 | precmarkers = _filterprunes(successormarkers.get(prec)) | |
872 | if precmarkers.issubset(exclmarkers): |
|
875 | if precmarkers.issubset(exclmarkers): | |
873 | seennodes.add(prec) |
|
876 | seennodes.add(prec) | |
874 | stack.append(prec) |
|
877 | stack.append(prec) | |
875 |
|
878 | |||
876 | return exclmarkers |
|
879 | return exclmarkers | |
877 |
|
880 | |||
878 | def commonversion(versions): |
|
881 | def commonversion(versions): | |
879 | """Return the newest version listed in both versions and our local formats. |
|
882 | """Return the newest version listed in both versions and our local formats. | |
880 |
|
883 | |||
881 | Returns None if no common version exists. |
|
884 | Returns None if no common version exists. | |
882 | """ |
|
885 | """ | |
883 | versions.sort(reverse=True) |
|
886 | versions.sort(reverse=True) | |
884 | # search for highest version known on both side |
|
887 | # search for highest version known on both side | |
885 | for v in versions: |
|
888 | for v in versions: | |
886 | if v in formats: |
|
889 | if v in formats: | |
887 | return v |
|
890 | return v | |
888 | return None |
|
891 | return None | |
889 |
|
892 | |||
890 | # arbitrary picked to fit into 8K limit from HTTP server |
|
893 | # arbitrary picked to fit into 8K limit from HTTP server | |
891 | # you have to take in account: |
|
894 | # you have to take in account: | |
892 | # - the version header |
|
895 | # - the version header | |
893 | # - the base85 encoding |
|
896 | # - the base85 encoding | |
894 | _maxpayload = 5300 |
|
897 | _maxpayload = 5300 | |
895 |
|
898 | |||
896 | def _pushkeyescape(markers): |
|
899 | def _pushkeyescape(markers): | |
897 | """encode markers into a dict suitable for pushkey exchange |
|
900 | """encode markers into a dict suitable for pushkey exchange | |
898 |
|
901 | |||
899 | - binary data is base85 encoded |
|
902 | - binary data is base85 encoded | |
900 | - split in chunks smaller than 5300 bytes""" |
|
903 | - split in chunks smaller than 5300 bytes""" | |
901 | keys = {} |
|
904 | keys = {} | |
902 | parts = [] |
|
905 | parts = [] | |
903 | currentlen = _maxpayload * 2 # ensure we create a new part |
|
906 | currentlen = _maxpayload * 2 # ensure we create a new part | |
904 | for marker in markers: |
|
907 | for marker in markers: | |
905 | nextdata = _fm0encodeonemarker(marker) |
|
908 | nextdata = _fm0encodeonemarker(marker) | |
906 | if (len(nextdata) + currentlen > _maxpayload): |
|
909 | if (len(nextdata) + currentlen > _maxpayload): | |
907 | currentpart = [] |
|
910 | currentpart = [] | |
908 | currentlen = 0 |
|
911 | currentlen = 0 | |
909 | parts.append(currentpart) |
|
912 | parts.append(currentpart) | |
910 | currentpart.append(nextdata) |
|
913 | currentpart.append(nextdata) | |
911 | currentlen += len(nextdata) |
|
914 | currentlen += len(nextdata) | |
912 | for idx, part in enumerate(reversed(parts)): |
|
915 | for idx, part in enumerate(reversed(parts)): | |
913 | data = ''.join([_pack('>B', _fm0version)] + part) |
|
916 | data = ''.join([_pack('>B', _fm0version)] + part) | |
914 | keys['dump%i' % idx] = util.b85encode(data) |
|
917 | keys['dump%i' % idx] = util.b85encode(data) | |
915 | return keys |
|
918 | return keys | |
916 |
|
919 | |||
917 | def listmarkers(repo): |
|
920 | def listmarkers(repo): | |
918 | """List markers over pushkey""" |
|
921 | """List markers over pushkey""" | |
919 | if not repo.obsstore: |
|
922 | if not repo.obsstore: | |
920 | return {} |
|
923 | return {} | |
921 | return _pushkeyescape(sorted(repo.obsstore)) |
|
924 | return _pushkeyescape(sorted(repo.obsstore)) | |
922 |
|
925 | |||
923 | def pushmarker(repo, key, old, new): |
|
926 | def pushmarker(repo, key, old, new): | |
924 | """Push markers over pushkey""" |
|
927 | """Push markers over pushkey""" | |
925 | if not key.startswith('dump'): |
|
928 | if not key.startswith('dump'): | |
926 | repo.ui.warn(_('unknown key: %r') % key) |
|
929 | repo.ui.warn(_('unknown key: %r') % key) | |
927 | return 0 |
|
930 | return 0 | |
928 | if old: |
|
931 | if old: | |
929 | repo.ui.warn(_('unexpected old value for %r') % key) |
|
932 | repo.ui.warn(_('unexpected old value for %r') % key) | |
930 | return 0 |
|
933 | return 0 | |
931 | data = util.b85decode(new) |
|
934 | data = util.b85decode(new) | |
932 | lock = repo.lock() |
|
935 | lock = repo.lock() | |
933 | try: |
|
936 | try: | |
934 | tr = repo.transaction('pushkey: obsolete markers') |
|
937 | tr = repo.transaction('pushkey: obsolete markers') | |
935 | try: |
|
938 | try: | |
936 | repo.obsstore.mergemarkers(tr, data) |
|
939 | repo.obsstore.mergemarkers(tr, data) | |
937 | repo.invalidatevolatilesets() |
|
940 | repo.invalidatevolatilesets() | |
938 | tr.close() |
|
941 | tr.close() | |
939 | return 1 |
|
942 | return 1 | |
940 | finally: |
|
943 | finally: | |
941 | tr.release() |
|
944 | tr.release() | |
942 | finally: |
|
945 | finally: | |
943 | lock.release() |
|
946 | lock.release() | |
944 |
|
947 | |||
945 | def getmarkers(repo, nodes=None, exclusive=False): |
|
948 | def getmarkers(repo, nodes=None, exclusive=False): | |
946 | """returns markers known in a repository |
|
949 | """returns markers known in a repository | |
947 |
|
950 | |||
948 | If <nodes> is specified, only markers "relevant" to those nodes are are |
|
951 | If <nodes> is specified, only markers "relevant" to those nodes are are | |
949 | returned""" |
|
952 | returned""" | |
950 | if nodes is None: |
|
953 | if nodes is None: | |
951 | rawmarkers = repo.obsstore |
|
954 | rawmarkers = repo.obsstore | |
952 | elif exclusive: |
|
955 | elif exclusive: | |
953 | rawmarkers = exclusivemarkers(repo, nodes) |
|
956 | rawmarkers = exclusivemarkers(repo, nodes) | |
954 | else: |
|
957 | else: | |
955 | rawmarkers = repo.obsstore.relevantmarkers(nodes) |
|
958 | rawmarkers = repo.obsstore.relevantmarkers(nodes) | |
956 |
|
959 | |||
957 | for markerdata in rawmarkers: |
|
960 | for markerdata in rawmarkers: | |
958 | yield marker(repo, markerdata) |
|
961 | yield marker(repo, markerdata) | |
959 |
|
962 | |||
960 | def relevantmarkers(repo, node): |
|
963 | def relevantmarkers(repo, node): | |
961 | """all obsolete markers relevant to some revision""" |
|
964 | """all obsolete markers relevant to some revision""" | |
962 | for markerdata in repo.obsstore.relevantmarkers(node): |
|
965 | for markerdata in repo.obsstore.relevantmarkers(node): | |
963 | yield marker(repo, markerdata) |
|
966 | yield marker(repo, markerdata) | |
964 |
|
967 | |||
965 |
|
968 | |||
966 | def precursormarkers(ctx): |
|
969 | def precursormarkers(ctx): | |
967 | """obsolete marker marking this changeset as a successors""" |
|
970 | """obsolete marker marking this changeset as a successors""" | |
968 | for data in ctx.repo().obsstore.precursors.get(ctx.node(), ()): |
|
971 | for data in ctx.repo().obsstore.precursors.get(ctx.node(), ()): | |
969 | yield marker(ctx.repo(), data) |
|
972 | yield marker(ctx.repo(), data) | |
970 |
|
973 | |||
971 | def successormarkers(ctx): |
|
974 | def successormarkers(ctx): | |
972 | """obsolete marker making this changeset obsolete""" |
|
975 | """obsolete marker making this changeset obsolete""" | |
973 | for data in ctx.repo().obsstore.successors.get(ctx.node(), ()): |
|
976 | for data in ctx.repo().obsstore.successors.get(ctx.node(), ()): | |
974 | yield marker(ctx.repo(), data) |
|
977 | yield marker(ctx.repo(), data) | |
975 |
|
978 | |||
976 | def allsuccessors(obsstore, nodes, ignoreflags=0): |
|
979 | def allsuccessors(obsstore, nodes, ignoreflags=0): | |
977 | """Yield node for every successor of <nodes>. |
|
980 | """Yield node for every successor of <nodes>. | |
978 |
|
981 | |||
979 | Some successors may be unknown locally. |
|
982 | Some successors may be unknown locally. | |
980 |
|
983 | |||
981 | This is a linear yield unsuited to detecting split changesets. It includes |
|
984 | This is a linear yield unsuited to detecting split changesets. It includes | |
982 | initial nodes too.""" |
|
985 | initial nodes too.""" | |
983 | remaining = set(nodes) |
|
986 | remaining = set(nodes) | |
984 | seen = set(remaining) |
|
987 | seen = set(remaining) | |
985 | while remaining: |
|
988 | while remaining: | |
986 | current = remaining.pop() |
|
989 | current = remaining.pop() | |
987 | yield current |
|
990 | yield current | |
988 | for mark in obsstore.successors.get(current, ()): |
|
991 | for mark in obsstore.successors.get(current, ()): | |
989 | # ignore marker flagged with specified flag |
|
992 | # ignore marker flagged with specified flag | |
990 | if mark[2] & ignoreflags: |
|
993 | if mark[2] & ignoreflags: | |
991 | continue |
|
994 | continue | |
992 | for suc in mark[1]: |
|
995 | for suc in mark[1]: | |
993 | if suc not in seen: |
|
996 | if suc not in seen: | |
994 | seen.add(suc) |
|
997 | seen.add(suc) | |
995 | remaining.add(suc) |
|
998 | remaining.add(suc) | |
996 |
|
999 | |||
997 | def allprecursors(obsstore, nodes, ignoreflags=0): |
|
1000 | def allprecursors(obsstore, nodes, ignoreflags=0): | |
998 | """Yield node for every precursors of <nodes>. |
|
1001 | """Yield node for every precursors of <nodes>. | |
999 |
|
1002 | |||
1000 | Some precursors may be unknown locally. |
|
1003 | Some precursors may be unknown locally. | |
1001 |
|
1004 | |||
1002 | This is a linear yield unsuited to detecting folded changesets. It includes |
|
1005 | This is a linear yield unsuited to detecting folded changesets. It includes | |
1003 | initial nodes too.""" |
|
1006 | initial nodes too.""" | |
1004 |
|
1007 | |||
1005 | remaining = set(nodes) |
|
1008 | remaining = set(nodes) | |
1006 | seen = set(remaining) |
|
1009 | seen = set(remaining) | |
1007 | while remaining: |
|
1010 | while remaining: | |
1008 | current = remaining.pop() |
|
1011 | current = remaining.pop() | |
1009 | yield current |
|
1012 | yield current | |
1010 | for mark in obsstore.precursors.get(current, ()): |
|
1013 | for mark in obsstore.precursors.get(current, ()): | |
1011 | # ignore marker flagged with specified flag |
|
1014 | # ignore marker flagged with specified flag | |
1012 | if mark[2] & ignoreflags: |
|
1015 | if mark[2] & ignoreflags: | |
1013 | continue |
|
1016 | continue | |
1014 | suc = mark[0] |
|
1017 | suc = mark[0] | |
1015 | if suc not in seen: |
|
1018 | if suc not in seen: | |
1016 | seen.add(suc) |
|
1019 | seen.add(suc) | |
1017 | remaining.add(suc) |
|
1020 | remaining.add(suc) | |
1018 |
|
1021 | |||
1019 | def foreground(repo, nodes): |
|
1022 | def foreground(repo, nodes): | |
1020 | """return all nodes in the "foreground" of other node |
|
1023 | """return all nodes in the "foreground" of other node | |
1021 |
|
1024 | |||
1022 | The foreground of a revision is anything reachable using parent -> children |
|
1025 | The foreground of a revision is anything reachable using parent -> children | |
1023 | or precursor -> successor relation. It is very similar to "descendant" but |
|
1026 | or precursor -> successor relation. It is very similar to "descendant" but | |
1024 | augmented with obsolescence information. |
|
1027 | augmented with obsolescence information. | |
1025 |
|
1028 | |||
1026 | Beware that possible obsolescence cycle may result if complex situation. |
|
1029 | Beware that possible obsolescence cycle may result if complex situation. | |
1027 | """ |
|
1030 | """ | |
1028 | repo = repo.unfiltered() |
|
1031 | repo = repo.unfiltered() | |
1029 | foreground = set(repo.set('%ln::', nodes)) |
|
1032 | foreground = set(repo.set('%ln::', nodes)) | |
1030 | if repo.obsstore: |
|
1033 | if repo.obsstore: | |
1031 | # We only need this complicated logic if there is obsolescence |
|
1034 | # We only need this complicated logic if there is obsolescence | |
1032 | # XXX will probably deserve an optimised revset. |
|
1035 | # XXX will probably deserve an optimised revset. | |
1033 | nm = repo.changelog.nodemap |
|
1036 | nm = repo.changelog.nodemap | |
1034 | plen = -1 |
|
1037 | plen = -1 | |
1035 | # compute the whole set of successors or descendants |
|
1038 | # compute the whole set of successors or descendants | |
1036 | while len(foreground) != plen: |
|
1039 | while len(foreground) != plen: | |
1037 | plen = len(foreground) |
|
1040 | plen = len(foreground) | |
1038 | succs = set(c.node() for c in foreground) |
|
1041 | succs = set(c.node() for c in foreground) | |
1039 | mutable = [c.node() for c in foreground if c.mutable()] |
|
1042 | mutable = [c.node() for c in foreground if c.mutable()] | |
1040 | succs.update(allsuccessors(repo.obsstore, mutable)) |
|
1043 | succs.update(allsuccessors(repo.obsstore, mutable)) | |
1041 | known = (n for n in succs if n in nm) |
|
1044 | known = (n for n in succs if n in nm) | |
1042 | foreground = set(repo.set('%ln::', known)) |
|
1045 | foreground = set(repo.set('%ln::', known)) | |
1043 | return set(c.node() for c in foreground) |
|
1046 | return set(c.node() for c in foreground) | |
1044 |
|
1047 | |||
1045 |
|
1048 | |||
1046 | def successorssets(repo, initialnode, cache=None): |
|
1049 | def successorssets(repo, initialnode, cache=None): | |
1047 | """Return set of all latest successors of initial nodes |
|
1050 | """Return set of all latest successors of initial nodes | |
1048 |
|
1051 | |||
1049 | The successors set of a changeset A are the group of revisions that succeed |
|
1052 | The successors set of a changeset A are the group of revisions that succeed | |
1050 | A. It succeeds A as a consistent whole, each revision being only a partial |
|
1053 | A. It succeeds A as a consistent whole, each revision being only a partial | |
1051 | replacement. The successors set contains non-obsolete changesets only. |
|
1054 | replacement. The successors set contains non-obsolete changesets only. | |
1052 |
|
1055 | |||
1053 | This function returns the full list of successor sets which is why it |
|
1056 | This function returns the full list of successor sets which is why it | |
1054 | returns a list of tuples and not just a single tuple. Each tuple is a valid |
|
1057 | returns a list of tuples and not just a single tuple. Each tuple is a valid | |
1055 | successors set. Note that (A,) may be a valid successors set for changeset A |
|
1058 | successors set. Note that (A,) may be a valid successors set for changeset A | |
1056 | (see below). |
|
1059 | (see below). | |
1057 |
|
1060 | |||
1058 | In most cases, a changeset A will have a single element (e.g. the changeset |
|
1061 | In most cases, a changeset A will have a single element (e.g. the changeset | |
1059 | A is replaced by A') in its successors set. Though, it is also common for a |
|
1062 | A is replaced by A') in its successors set. Though, it is also common for a | |
1060 | changeset A to have no elements in its successor set (e.g. the changeset |
|
1063 | changeset A to have no elements in its successor set (e.g. the changeset | |
1061 | has been pruned). Therefore, the returned list of successors sets will be |
|
1064 | has been pruned). Therefore, the returned list of successors sets will be | |
1062 | [(A',)] or [], respectively. |
|
1065 | [(A',)] or [], respectively. | |
1063 |
|
1066 | |||
1064 | When a changeset A is split into A' and B', however, it will result in a |
|
1067 | When a changeset A is split into A' and B', however, it will result in a | |
1065 | successors set containing more than a single element, i.e. [(A',B')]. |
|
1068 | successors set containing more than a single element, i.e. [(A',B')]. | |
1066 | Divergent changesets will result in multiple successors sets, i.e. [(A',), |
|
1069 | Divergent changesets will result in multiple successors sets, i.e. [(A',), | |
1067 | (A'')]. |
|
1070 | (A'')]. | |
1068 |
|
1071 | |||
1069 | If a changeset A is not obsolete, then it will conceptually have no |
|
1072 | If a changeset A is not obsolete, then it will conceptually have no | |
1070 | successors set. To distinguish this from a pruned changeset, the successor |
|
1073 | successors set. To distinguish this from a pruned changeset, the successor | |
1071 | set will contain itself only, i.e. [(A,)]. |
|
1074 | set will contain itself only, i.e. [(A,)]. | |
1072 |
|
1075 | |||
1073 | Finally, successors unknown locally are considered to be pruned (obsoleted |
|
1076 | Finally, successors unknown locally are considered to be pruned (obsoleted | |
1074 | without any successors). |
|
1077 | without any successors). | |
1075 |
|
1078 | |||
1076 | The optional `cache` parameter is a dictionary that may contain precomputed |
|
1079 | The optional `cache` parameter is a dictionary that may contain precomputed | |
1077 | successors sets. It is meant to reuse the computation of a previous call to |
|
1080 | successors sets. It is meant to reuse the computation of a previous call to | |
1078 | `successorssets` when multiple calls are made at the same time. The cache |
|
1081 | `successorssets` when multiple calls are made at the same time. The cache | |
1079 | dictionary is updated in place. The caller is responsible for its life |
|
1082 | dictionary is updated in place. The caller is responsible for its life | |
1080 | span. Code that makes multiple calls to `successorssets` *must* use this |
|
1083 | span. Code that makes multiple calls to `successorssets` *must* use this | |
1081 | cache mechanism or suffer terrible performance. |
|
1084 | cache mechanism or suffer terrible performance. | |
1082 | """ |
|
1085 | """ | |
1083 |
|
1086 | |||
1084 | succmarkers = repo.obsstore.successors |
|
1087 | succmarkers = repo.obsstore.successors | |
1085 |
|
1088 | |||
1086 | # Stack of nodes we search successors sets for |
|
1089 | # Stack of nodes we search successors sets for | |
1087 | toproceed = [initialnode] |
|
1090 | toproceed = [initialnode] | |
1088 | # set version of above list for fast loop detection |
|
1091 | # set version of above list for fast loop detection | |
1089 | # element added to "toproceed" must be added here |
|
1092 | # element added to "toproceed" must be added here | |
1090 | stackedset = set(toproceed) |
|
1093 | stackedset = set(toproceed) | |
1091 | if cache is None: |
|
1094 | if cache is None: | |
1092 | cache = {} |
|
1095 | cache = {} | |
1093 |
|
1096 | |||
1094 | # This while loop is the flattened version of a recursive search for |
|
1097 | # This while loop is the flattened version of a recursive search for | |
1095 | # successors sets |
|
1098 | # successors sets | |
1096 | # |
|
1099 | # | |
1097 | # def successorssets(x): |
|
1100 | # def successorssets(x): | |
1098 | # successors = directsuccessors(x) |
|
1101 | # successors = directsuccessors(x) | |
1099 | # ss = [[]] |
|
1102 | # ss = [[]] | |
1100 | # for succ in directsuccessors(x): |
|
1103 | # for succ in directsuccessors(x): | |
1101 | # # product as in itertools cartesian product |
|
1104 | # # product as in itertools cartesian product | |
1102 | # ss = product(ss, successorssets(succ)) |
|
1105 | # ss = product(ss, successorssets(succ)) | |
1103 | # return ss |
|
1106 | # return ss | |
1104 | # |
|
1107 | # | |
1105 | # But we can not use plain recursive calls here: |
|
1108 | # But we can not use plain recursive calls here: | |
1106 | # - that would blow the python call stack |
|
1109 | # - that would blow the python call stack | |
1107 | # - obsolescence markers may have cycles, we need to handle them. |
|
1110 | # - obsolescence markers may have cycles, we need to handle them. | |
1108 | # |
|
1111 | # | |
1109 | # The `toproceed` list act as our call stack. Every node we search |
|
1112 | # The `toproceed` list act as our call stack. Every node we search | |
1110 | # successors set for are stacked there. |
|
1113 | # successors set for are stacked there. | |
1111 | # |
|
1114 | # | |
1112 | # The `stackedset` is set version of this stack used to check if a node is |
|
1115 | # The `stackedset` is set version of this stack used to check if a node is | |
1113 | # already stacked. This check is used to detect cycles and prevent infinite |
|
1116 | # already stacked. This check is used to detect cycles and prevent infinite | |
1114 | # loop. |
|
1117 | # loop. | |
1115 | # |
|
1118 | # | |
1116 | # successors set of all nodes are stored in the `cache` dictionary. |
|
1119 | # successors set of all nodes are stored in the `cache` dictionary. | |
1117 | # |
|
1120 | # | |
1118 | # After this while loop ends we use the cache to return the successors sets |
|
1121 | # After this while loop ends we use the cache to return the successors sets | |
1119 | # for the node requested by the caller. |
|
1122 | # for the node requested by the caller. | |
1120 | while toproceed: |
|
1123 | while toproceed: | |
1121 | # Every iteration tries to compute the successors sets of the topmost |
|
1124 | # Every iteration tries to compute the successors sets of the topmost | |
1122 | # node of the stack: CURRENT. |
|
1125 | # node of the stack: CURRENT. | |
1123 | # |
|
1126 | # | |
1124 | # There are four possible outcomes: |
|
1127 | # There are four possible outcomes: | |
1125 | # |
|
1128 | # | |
1126 | # 1) We already know the successors sets of CURRENT: |
|
1129 | # 1) We already know the successors sets of CURRENT: | |
1127 | # -> mission accomplished, pop it from the stack. |
|
1130 | # -> mission accomplished, pop it from the stack. | |
1128 | # 2) Node is not obsolete: |
|
1131 | # 2) Node is not obsolete: | |
1129 | # -> the node is its own successors sets. Add it to the cache. |
|
1132 | # -> the node is its own successors sets. Add it to the cache. | |
1130 | # 3) We do not know successors set of direct successors of CURRENT: |
|
1133 | # 3) We do not know successors set of direct successors of CURRENT: | |
1131 | # -> We add those successors to the stack. |
|
1134 | # -> We add those successors to the stack. | |
1132 | # 4) We know successors sets of all direct successors of CURRENT: |
|
1135 | # 4) We know successors sets of all direct successors of CURRENT: | |
1133 | # -> We can compute CURRENT successors set and add it to the |
|
1136 | # -> We can compute CURRENT successors set and add it to the | |
1134 | # cache. |
|
1137 | # cache. | |
1135 | # |
|
1138 | # | |
1136 | current = toproceed[-1] |
|
1139 | current = toproceed[-1] | |
1137 | if current in cache: |
|
1140 | if current in cache: | |
1138 | # case (1): We already know the successors sets |
|
1141 | # case (1): We already know the successors sets | |
1139 | stackedset.remove(toproceed.pop()) |
|
1142 | stackedset.remove(toproceed.pop()) | |
1140 | elif current not in succmarkers: |
|
1143 | elif current not in succmarkers: | |
1141 | # case (2): The node is not obsolete. |
|
1144 | # case (2): The node is not obsolete. | |
1142 | if current in repo: |
|
1145 | if current in repo: | |
1143 | # We have a valid last successors. |
|
1146 | # We have a valid last successors. | |
1144 | cache[current] = [(current,)] |
|
1147 | cache[current] = [(current,)] | |
1145 | else: |
|
1148 | else: | |
1146 | # Final obsolete version is unknown locally. |
|
1149 | # Final obsolete version is unknown locally. | |
1147 | # Do not count that as a valid successors |
|
1150 | # Do not count that as a valid successors | |
1148 | cache[current] = [] |
|
1151 | cache[current] = [] | |
1149 | else: |
|
1152 | else: | |
1150 | # cases (3) and (4) |
|
1153 | # cases (3) and (4) | |
1151 | # |
|
1154 | # | |
1152 | # We proceed in two phases. Phase 1 aims to distinguish case (3) |
|
1155 | # We proceed in two phases. Phase 1 aims to distinguish case (3) | |
1153 | # from case (4): |
|
1156 | # from case (4): | |
1154 | # |
|
1157 | # | |
1155 | # For each direct successors of CURRENT, we check whether its |
|
1158 | # For each direct successors of CURRENT, we check whether its | |
1156 | # successors sets are known. If they are not, we stack the |
|
1159 | # successors sets are known. If they are not, we stack the | |
1157 | # unknown node and proceed to the next iteration of the while |
|
1160 | # unknown node and proceed to the next iteration of the while | |
1158 | # loop. (case 3) |
|
1161 | # loop. (case 3) | |
1159 | # |
|
1162 | # | |
1160 | # During this step, we may detect obsolescence cycles: a node |
|
1163 | # During this step, we may detect obsolescence cycles: a node | |
1161 | # with unknown successors sets but already in the call stack. |
|
1164 | # with unknown successors sets but already in the call stack. | |
1162 | # In such a situation, we arbitrary set the successors sets of |
|
1165 | # In such a situation, we arbitrary set the successors sets of | |
1163 | # the node to nothing (node pruned) to break the cycle. |
|
1166 | # the node to nothing (node pruned) to break the cycle. | |
1164 | # |
|
1167 | # | |
1165 | # If no break was encountered we proceed to phase 2. |
|
1168 | # If no break was encountered we proceed to phase 2. | |
1166 | # |
|
1169 | # | |
1167 | # Phase 2 computes successors sets of CURRENT (case 4); see details |
|
1170 | # Phase 2 computes successors sets of CURRENT (case 4); see details | |
1168 | # in phase 2 itself. |
|
1171 | # in phase 2 itself. | |
1169 | # |
|
1172 | # | |
1170 | # Note the two levels of iteration in each phase. |
|
1173 | # Note the two levels of iteration in each phase. | |
1171 | # - The first one handles obsolescence markers using CURRENT as |
|
1174 | # - The first one handles obsolescence markers using CURRENT as | |
1172 | # precursor (successors markers of CURRENT). |
|
1175 | # precursor (successors markers of CURRENT). | |
1173 | # |
|
1176 | # | |
1174 | # Having multiple entry here means divergence. |
|
1177 | # Having multiple entry here means divergence. | |
1175 | # |
|
1178 | # | |
1176 | # - The second one handles successors defined in each marker. |
|
1179 | # - The second one handles successors defined in each marker. | |
1177 | # |
|
1180 | # | |
1178 | # Having none means pruned node, multiple successors means split, |
|
1181 | # Having none means pruned node, multiple successors means split, | |
1179 | # single successors are standard replacement. |
|
1182 | # single successors are standard replacement. | |
1180 | # |
|
1183 | # | |
1181 | for mark in sorted(succmarkers[current]): |
|
1184 | for mark in sorted(succmarkers[current]): | |
1182 | for suc in mark[1]: |
|
1185 | for suc in mark[1]: | |
1183 | if suc not in cache: |
|
1186 | if suc not in cache: | |
1184 | if suc in stackedset: |
|
1187 | if suc in stackedset: | |
1185 | # cycle breaking |
|
1188 | # cycle breaking | |
1186 | cache[suc] = [] |
|
1189 | cache[suc] = [] | |
1187 | else: |
|
1190 | else: | |
1188 | # case (3) If we have not computed successors sets |
|
1191 | # case (3) If we have not computed successors sets | |
1189 | # of one of those successors we add it to the |
|
1192 | # of one of those successors we add it to the | |
1190 | # `toproceed` stack and stop all work for this |
|
1193 | # `toproceed` stack and stop all work for this | |
1191 | # iteration. |
|
1194 | # iteration. | |
1192 | toproceed.append(suc) |
|
1195 | toproceed.append(suc) | |
1193 | stackedset.add(suc) |
|
1196 | stackedset.add(suc) | |
1194 | break |
|
1197 | break | |
1195 | else: |
|
1198 | else: | |
1196 | continue |
|
1199 | continue | |
1197 | break |
|
1200 | break | |
1198 | else: |
|
1201 | else: | |
1199 | # case (4): we know all successors sets of all direct |
|
1202 | # case (4): we know all successors sets of all direct | |
1200 | # successors |
|
1203 | # successors | |
1201 | # |
|
1204 | # | |
1202 | # Successors set contributed by each marker depends on the |
|
1205 | # Successors set contributed by each marker depends on the | |
1203 | # successors sets of all its "successors" node. |
|
1206 | # successors sets of all its "successors" node. | |
1204 | # |
|
1207 | # | |
1205 | # Each different marker is a divergence in the obsolescence |
|
1208 | # Each different marker is a divergence in the obsolescence | |
1206 | # history. It contributes successors sets distinct from other |
|
1209 | # history. It contributes successors sets distinct from other | |
1207 | # markers. |
|
1210 | # markers. | |
1208 | # |
|
1211 | # | |
1209 | # Within a marker, a successor may have divergent successors |
|
1212 | # Within a marker, a successor may have divergent successors | |
1210 | # sets. In such a case, the marker will contribute multiple |
|
1213 | # sets. In such a case, the marker will contribute multiple | |
1211 | # divergent successors sets. If multiple successors have |
|
1214 | # divergent successors sets. If multiple successors have | |
1212 | # divergent successors sets, a Cartesian product is used. |
|
1215 | # divergent successors sets, a Cartesian product is used. | |
1213 | # |
|
1216 | # | |
1214 | # At the end we post-process successors sets to remove |
|
1217 | # At the end we post-process successors sets to remove | |
1215 | # duplicated entry and successors set that are strict subset of |
|
1218 | # duplicated entry and successors set that are strict subset of | |
1216 | # another one. |
|
1219 | # another one. | |
1217 | succssets = [] |
|
1220 | succssets = [] | |
1218 | for mark in sorted(succmarkers[current]): |
|
1221 | for mark in sorted(succmarkers[current]): | |
1219 | # successors sets contributed by this marker |
|
1222 | # successors sets contributed by this marker | |
1220 | markss = [[]] |
|
1223 | markss = [[]] | |
1221 | for suc in mark[1]: |
|
1224 | for suc in mark[1]: | |
1222 | # cardinal product with previous successors |
|
1225 | # cardinal product with previous successors | |
1223 | productresult = [] |
|
1226 | productresult = [] | |
1224 | for prefix in markss: |
|
1227 | for prefix in markss: | |
1225 | for suffix in cache[suc]: |
|
1228 | for suffix in cache[suc]: | |
1226 | newss = list(prefix) |
|
1229 | newss = list(prefix) | |
1227 | for part in suffix: |
|
1230 | for part in suffix: | |
1228 | # do not duplicated entry in successors set |
|
1231 | # do not duplicated entry in successors set | |
1229 | # first entry wins. |
|
1232 | # first entry wins. | |
1230 | if part not in newss: |
|
1233 | if part not in newss: | |
1231 | newss.append(part) |
|
1234 | newss.append(part) | |
1232 | productresult.append(newss) |
|
1235 | productresult.append(newss) | |
1233 | markss = productresult |
|
1236 | markss = productresult | |
1234 | succssets.extend(markss) |
|
1237 | succssets.extend(markss) | |
1235 | # remove duplicated and subset |
|
1238 | # remove duplicated and subset | |
1236 | seen = [] |
|
1239 | seen = [] | |
1237 | final = [] |
|
1240 | final = [] | |
1238 | candidate = sorted(((set(s), s) for s in succssets if s), |
|
1241 | candidate = sorted(((set(s), s) for s in succssets if s), | |
1239 | key=lambda x: len(x[1]), reverse=True) |
|
1242 | key=lambda x: len(x[1]), reverse=True) | |
1240 | for setversion, listversion in candidate: |
|
1243 | for setversion, listversion in candidate: | |
1241 | for seenset in seen: |
|
1244 | for seenset in seen: | |
1242 | if setversion.issubset(seenset): |
|
1245 | if setversion.issubset(seenset): | |
1243 | break |
|
1246 | break | |
1244 | else: |
|
1247 | else: | |
1245 | final.append(listversion) |
|
1248 | final.append(listversion) | |
1246 | seen.append(setversion) |
|
1249 | seen.append(setversion) | |
1247 | final.reverse() # put small successors set first |
|
1250 | final.reverse() # put small successors set first | |
1248 | cache[current] = final |
|
1251 | cache[current] = final | |
1249 | return cache[initialnode] |
|
1252 | return cache[initialnode] | |
1250 |
|
1253 | |||
1251 | # mapping of 'set-name' -> <function to compute this set> |
|
1254 | # mapping of 'set-name' -> <function to compute this set> | |
1252 | cachefuncs = {} |
|
1255 | cachefuncs = {} | |
1253 | def cachefor(name): |
|
1256 | def cachefor(name): | |
1254 | """Decorator to register a function as computing the cache for a set""" |
|
1257 | """Decorator to register a function as computing the cache for a set""" | |
1255 | def decorator(func): |
|
1258 | def decorator(func): | |
1256 | assert name not in cachefuncs |
|
1259 | assert name not in cachefuncs | |
1257 | cachefuncs[name] = func |
|
1260 | cachefuncs[name] = func | |
1258 | return func |
|
1261 | return func | |
1259 | return decorator |
|
1262 | return decorator | |
1260 |
|
1263 | |||
1261 | def getrevs(repo, name): |
|
1264 | def getrevs(repo, name): | |
1262 | """Return the set of revision that belong to the <name> set |
|
1265 | """Return the set of revision that belong to the <name> set | |
1263 |
|
1266 | |||
1264 | Such access may compute the set and cache it for future use""" |
|
1267 | Such access may compute the set and cache it for future use""" | |
1265 | repo = repo.unfiltered() |
|
1268 | repo = repo.unfiltered() | |
1266 | if not repo.obsstore: |
|
1269 | if not repo.obsstore: | |
1267 | return frozenset() |
|
1270 | return frozenset() | |
1268 | if name not in repo.obsstore.caches: |
|
1271 | if name not in repo.obsstore.caches: | |
1269 | repo.obsstore.caches[name] = cachefuncs[name](repo) |
|
1272 | repo.obsstore.caches[name] = cachefuncs[name](repo) | |
1270 | return repo.obsstore.caches[name] |
|
1273 | return repo.obsstore.caches[name] | |
1271 |
|
1274 | |||
1272 | # To be simple we need to invalidate obsolescence cache when: |
|
1275 | # To be simple we need to invalidate obsolescence cache when: | |
1273 | # |
|
1276 | # | |
1274 | # - new changeset is added: |
|
1277 | # - new changeset is added: | |
1275 | # - public phase is changed |
|
1278 | # - public phase is changed | |
1276 | # - obsolescence marker are added |
|
1279 | # - obsolescence marker are added | |
1277 | # - strip is used a repo |
|
1280 | # - strip is used a repo | |
1278 | def clearobscaches(repo): |
|
1281 | def clearobscaches(repo): | |
1279 | """Remove all obsolescence related cache from a repo |
|
1282 | """Remove all obsolescence related cache from a repo | |
1280 |
|
1283 | |||
1281 | This remove all cache in obsstore is the obsstore already exist on the |
|
1284 | This remove all cache in obsstore is the obsstore already exist on the | |
1282 | repo. |
|
1285 | repo. | |
1283 |
|
1286 | |||
1284 | (We could be smarter here given the exact event that trigger the cache |
|
1287 | (We could be smarter here given the exact event that trigger the cache | |
1285 | clearing)""" |
|
1288 | clearing)""" | |
1286 | # only clear cache is there is obsstore data in this repo |
|
1289 | # only clear cache is there is obsstore data in this repo | |
1287 | if 'obsstore' in repo._filecache: |
|
1290 | if 'obsstore' in repo._filecache: | |
1288 | repo.obsstore.caches.clear() |
|
1291 | repo.obsstore.caches.clear() | |
1289 |
|
1292 | |||
1290 | @cachefor('obsolete') |
|
1293 | @cachefor('obsolete') | |
1291 | def _computeobsoleteset(repo): |
|
1294 | def _computeobsoleteset(repo): | |
1292 | """the set of obsolete revisions""" |
|
1295 | """the set of obsolete revisions""" | |
1293 | getnode = repo.changelog.node |
|
1296 | getnode = repo.changelog.node | |
1294 | notpublic = repo._phasecache.getrevset(repo, (phases.draft, phases.secret)) |
|
1297 | notpublic = repo._phasecache.getrevset(repo, (phases.draft, phases.secret)) | |
1295 | isobs = repo.obsstore.successors.__contains__ |
|
1298 | isobs = repo.obsstore.successors.__contains__ | |
1296 | obs = set(r for r in notpublic if isobs(getnode(r))) |
|
1299 | obs = set(r for r in notpublic if isobs(getnode(r))) | |
1297 | return obs |
|
1300 | return obs | |
1298 |
|
1301 | |||
1299 | @cachefor('unstable') |
|
1302 | @cachefor('unstable') | |
1300 | def _computeunstableset(repo): |
|
1303 | def _computeunstableset(repo): | |
1301 | """the set of non obsolete revisions with obsolete parents""" |
|
1304 | """the set of non obsolete revisions with obsolete parents""" | |
1302 | revs = [(ctx.rev(), ctx) for ctx in |
|
1305 | revs = [(ctx.rev(), ctx) for ctx in | |
1303 | repo.set('(not public()) and (not obsolete())')] |
|
1306 | repo.set('(not public()) and (not obsolete())')] | |
1304 | revs.sort(key=lambda x:x[0]) |
|
1307 | revs.sort(key=lambda x:x[0]) | |
1305 | unstable = set() |
|
1308 | unstable = set() | |
1306 | for rev, ctx in revs: |
|
1309 | for rev, ctx in revs: | |
1307 | # A rev is unstable if one of its parent is obsolete or unstable |
|
1310 | # A rev is unstable if one of its parent is obsolete or unstable | |
1308 | # this works since we traverse following growing rev order |
|
1311 | # this works since we traverse following growing rev order | |
1309 | if any((x.obsolete() or (x.rev() in unstable)) |
|
1312 | if any((x.obsolete() or (x.rev() in unstable)) | |
1310 | for x in ctx.parents()): |
|
1313 | for x in ctx.parents()): | |
1311 | unstable.add(rev) |
|
1314 | unstable.add(rev) | |
1312 | return unstable |
|
1315 | return unstable | |
1313 |
|
1316 | |||
1314 | @cachefor('suspended') |
|
1317 | @cachefor('suspended') | |
1315 | def _computesuspendedset(repo): |
|
1318 | def _computesuspendedset(repo): | |
1316 | """the set of obsolete parents with non obsolete descendants""" |
|
1319 | """the set of obsolete parents with non obsolete descendants""" | |
1317 | suspended = repo.changelog.ancestors(getrevs(repo, 'unstable')) |
|
1320 | suspended = repo.changelog.ancestors(getrevs(repo, 'unstable')) | |
1318 | return set(r for r in getrevs(repo, 'obsolete') if r in suspended) |
|
1321 | return set(r for r in getrevs(repo, 'obsolete') if r in suspended) | |
1319 |
|
1322 | |||
1320 | @cachefor('extinct') |
|
1323 | @cachefor('extinct') | |
1321 | def _computeextinctset(repo): |
|
1324 | def _computeextinctset(repo): | |
1322 | """the set of obsolete parents without non obsolete descendants""" |
|
1325 | """the set of obsolete parents without non obsolete descendants""" | |
1323 | return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended') |
|
1326 | return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended') | |
1324 |
|
1327 | |||
1325 |
|
1328 | |||
1326 | @cachefor('bumped') |
|
1329 | @cachefor('bumped') | |
1327 | def _computebumpedset(repo): |
|
1330 | def _computebumpedset(repo): | |
1328 | """the set of revs trying to obsolete public revisions""" |
|
1331 | """the set of revs trying to obsolete public revisions""" | |
1329 | bumped = set() |
|
1332 | bumped = set() | |
1330 | # util function (avoid attribute lookup in the loop) |
|
1333 | # util function (avoid attribute lookup in the loop) | |
1331 | phase = repo._phasecache.phase # would be faster to grab the full list |
|
1334 | phase = repo._phasecache.phase # would be faster to grab the full list | |
1332 | public = phases.public |
|
1335 | public = phases.public | |
1333 | cl = repo.changelog |
|
1336 | cl = repo.changelog | |
1334 | torev = cl.nodemap.get |
|
1337 | torev = cl.nodemap.get | |
1335 | for ctx in repo.set('(not public()) and (not obsolete())'): |
|
1338 | for ctx in repo.set('(not public()) and (not obsolete())'): | |
1336 | rev = ctx.rev() |
|
1339 | rev = ctx.rev() | |
1337 | # We only evaluate mutable, non-obsolete revision |
|
1340 | # We only evaluate mutable, non-obsolete revision | |
1338 | node = ctx.node() |
|
1341 | node = ctx.node() | |
1339 | # (future) A cache of precursors may worth if split is very common |
|
1342 | # (future) A cache of precursors may worth if split is very common | |
1340 | for pnode in allprecursors(repo.obsstore, [node], |
|
1343 | for pnode in allprecursors(repo.obsstore, [node], | |
1341 | ignoreflags=bumpedfix): |
|
1344 | ignoreflags=bumpedfix): | |
1342 | prev = torev(pnode) # unfiltered! but so is phasecache |
|
1345 | prev = torev(pnode) # unfiltered! but so is phasecache | |
1343 | if (prev is not None) and (phase(repo, prev) <= public): |
|
1346 | if (prev is not None) and (phase(repo, prev) <= public): | |
1344 | # we have a public precursor |
|
1347 | # we have a public precursor | |
1345 | bumped.add(rev) |
|
1348 | bumped.add(rev) | |
1346 | break # Next draft! |
|
1349 | break # Next draft! | |
1347 | return bumped |
|
1350 | return bumped | |
1348 |
|
1351 | |||
1349 | @cachefor('divergent') |
|
1352 | @cachefor('divergent') | |
1350 | def _computedivergentset(repo): |
|
1353 | def _computedivergentset(repo): | |
1351 | """the set of rev that compete to be the final successors of some revision. |
|
1354 | """the set of rev that compete to be the final successors of some revision. | |
1352 | """ |
|
1355 | """ | |
1353 | divergent = set() |
|
1356 | divergent = set() | |
1354 | obsstore = repo.obsstore |
|
1357 | obsstore = repo.obsstore | |
1355 | newermap = {} |
|
1358 | newermap = {} | |
1356 | for ctx in repo.set('(not public()) - obsolete()'): |
|
1359 | for ctx in repo.set('(not public()) - obsolete()'): | |
1357 | mark = obsstore.precursors.get(ctx.node(), ()) |
|
1360 | mark = obsstore.precursors.get(ctx.node(), ()) | |
1358 | toprocess = set(mark) |
|
1361 | toprocess = set(mark) | |
1359 | seen = set() |
|
1362 | seen = set() | |
1360 | while toprocess: |
|
1363 | while toprocess: | |
1361 | prec = toprocess.pop()[0] |
|
1364 | prec = toprocess.pop()[0] | |
1362 | if prec in seen: |
|
1365 | if prec in seen: | |
1363 | continue # emergency cycle hanging prevention |
|
1366 | continue # emergency cycle hanging prevention | |
1364 | seen.add(prec) |
|
1367 | seen.add(prec) | |
1365 | if prec not in newermap: |
|
1368 | if prec not in newermap: | |
1366 | successorssets(repo, prec, newermap) |
|
1369 | successorssets(repo, prec, newermap) | |
1367 | newer = [n for n in newermap[prec] if n] |
|
1370 | newer = [n for n in newermap[prec] if n] | |
1368 | if len(newer) > 1: |
|
1371 | if len(newer) > 1: | |
1369 | divergent.add(ctx.rev()) |
|
1372 | divergent.add(ctx.rev()) | |
1370 | break |
|
1373 | break | |
1371 | toprocess.update(obsstore.precursors.get(prec, ())) |
|
1374 | toprocess.update(obsstore.precursors.get(prec, ())) | |
1372 | return divergent |
|
1375 | return divergent | |
1373 |
|
1376 | |||
1374 |
|
1377 | |||
1375 | def createmarkers(repo, relations, flag=0, date=None, metadata=None, |
|
1378 | def createmarkers(repo, relations, flag=0, date=None, metadata=None, | |
1376 | operation=None): |
|
1379 | operation=None): | |
1377 | """Add obsolete markers between changesets in a repo |
|
1380 | """Add obsolete markers between changesets in a repo | |
1378 |
|
1381 | |||
1379 | <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}]) |
|
1382 | <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}]) | |
1380 | tuple. `old` and `news` are changectx. metadata is an optional dictionary |
|
1383 | tuple. `old` and `news` are changectx. metadata is an optional dictionary | |
1381 | containing metadata for this marker only. It is merged with the global |
|
1384 | containing metadata for this marker only. It is merged with the global | |
1382 | metadata specified through the `metadata` argument of this function, |
|
1385 | metadata specified through the `metadata` argument of this function, | |
1383 |
|
1386 | |||
1384 | Trying to obsolete a public changeset will raise an exception. |
|
1387 | Trying to obsolete a public changeset will raise an exception. | |
1385 |
|
1388 | |||
1386 | Current user and date are used except if specified otherwise in the |
|
1389 | Current user and date are used except if specified otherwise in the | |
1387 | metadata attribute. |
|
1390 | metadata attribute. | |
1388 |
|
1391 | |||
1389 | This function operates within a transaction of its own, but does |
|
1392 | This function operates within a transaction of its own, but does | |
1390 | not take any lock on the repo. |
|
1393 | not take any lock on the repo. | |
1391 | """ |
|
1394 | """ | |
1392 | # prepare metadata |
|
1395 | # prepare metadata | |
1393 | if metadata is None: |
|
1396 | if metadata is None: | |
1394 | metadata = {} |
|
1397 | metadata = {} | |
1395 | if 'user' not in metadata: |
|
1398 | if 'user' not in metadata: | |
1396 | metadata['user'] = repo.ui.username() |
|
1399 | metadata['user'] = repo.ui.username() | |
1397 | useoperation = repo.ui.configbool('experimental', |
|
1400 | useoperation = repo.ui.configbool('experimental', | |
1398 | 'evolution.track-operation', |
|
1401 | 'evolution.track-operation', | |
1399 | False) |
|
1402 | False) | |
1400 | if useoperation and operation: |
|
1403 | if useoperation and operation: | |
1401 | metadata['operation'] = operation |
|
1404 | metadata['operation'] = operation | |
1402 | tr = repo.transaction('add-obsolescence-marker') |
|
1405 | tr = repo.transaction('add-obsolescence-marker') | |
1403 | try: |
|
1406 | try: | |
1404 | markerargs = [] |
|
1407 | markerargs = [] | |
1405 | for rel in relations: |
|
1408 | for rel in relations: | |
1406 | prec = rel[0] |
|
1409 | prec = rel[0] | |
1407 | sucs = rel[1] |
|
1410 | sucs = rel[1] | |
1408 | localmetadata = metadata.copy() |
|
1411 | localmetadata = metadata.copy() | |
1409 | if 2 < len(rel): |
|
1412 | if 2 < len(rel): | |
1410 | localmetadata.update(rel[2]) |
|
1413 | localmetadata.update(rel[2]) | |
1411 |
|
1414 | |||
1412 | if not prec.mutable(): |
|
1415 | if not prec.mutable(): | |
1413 | raise error.Abort(_("cannot obsolete public changeset: %s") |
|
1416 | raise error.Abort(_("cannot obsolete public changeset: %s") | |
1414 | % prec, |
|
1417 | % prec, | |
1415 | hint="see 'hg help phases' for details") |
|
1418 | hint="see 'hg help phases' for details") | |
1416 | nprec = prec.node() |
|
1419 | nprec = prec.node() | |
1417 | nsucs = tuple(s.node() for s in sucs) |
|
1420 | nsucs = tuple(s.node() for s in sucs) | |
1418 | npare = None |
|
1421 | npare = None | |
1419 | if not nsucs: |
|
1422 | if not nsucs: | |
1420 | npare = tuple(p.node() for p in prec.parents()) |
|
1423 | npare = tuple(p.node() for p in prec.parents()) | |
1421 | if nprec in nsucs: |
|
1424 | if nprec in nsucs: | |
1422 | raise error.Abort(_("changeset %s cannot obsolete itself") |
|
1425 | raise error.Abort(_("changeset %s cannot obsolete itself") | |
1423 | % prec) |
|
1426 | % prec) | |
1424 |
|
1427 | |||
1425 | # Creating the marker causes the hidden cache to become invalid, |
|
1428 | # Creating the marker causes the hidden cache to become invalid, | |
1426 | # which causes recomputation when we ask for prec.parents() above. |
|
1429 | # which causes recomputation when we ask for prec.parents() above. | |
1427 | # Resulting in n^2 behavior. So let's prepare all of the args |
|
1430 | # Resulting in n^2 behavior. So let's prepare all of the args | |
1428 | # first, then create the markers. |
|
1431 | # first, then create the markers. | |
1429 | markerargs.append((nprec, nsucs, npare, localmetadata)) |
|
1432 | markerargs.append((nprec, nsucs, npare, localmetadata)) | |
1430 |
|
1433 | |||
1431 | for args in markerargs: |
|
1434 | for args in markerargs: | |
1432 | nprec, nsucs, npare, localmetadata = args |
|
1435 | nprec, nsucs, npare, localmetadata = args | |
1433 | repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare, |
|
1436 | repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare, | |
1434 | date=date, metadata=localmetadata, |
|
1437 | date=date, metadata=localmetadata, | |
1435 | ui=repo.ui) |
|
1438 | ui=repo.ui) | |
1436 | repo.filteredrevcache.clear() |
|
1439 | repo.filteredrevcache.clear() | |
1437 | tr.close() |
|
1440 | tr.close() | |
1438 | finally: |
|
1441 | finally: | |
1439 | tr.release() |
|
1442 | tr.release() |
General Comments 0
You need to be logged in to leave comments.
Login now