Show More
@@ -1,331 +1,331 b'' | |||||
1 | # obsolete.py - obsolete markers handling |
|
1 | # obsolete.py - obsolete markers handling | |
2 | # |
|
2 | # | |
3 | # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org> |
|
3 | # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org> | |
4 | # Logilab SA <contact@logilab.fr> |
|
4 | # Logilab SA <contact@logilab.fr> | |
5 | # |
|
5 | # | |
6 | # This software may be used and distributed according to the terms of the |
|
6 | # This software may be used and distributed according to the terms of the | |
7 | # GNU General Public License version 2 or any later version. |
|
7 | # GNU General Public License version 2 or any later version. | |
8 |
|
8 | |||
9 | """Obsolete markers handling |
|
9 | """Obsolete markers handling | |
10 |
|
10 | |||
11 | An obsolete marker maps an old changeset to a list of new |
|
11 | An obsolete marker maps an old changeset to a list of new | |
12 | changesets. If the list of new changesets is empty, the old changeset |
|
12 | changesets. If the list of new changesets is empty, the old changeset | |
13 | is said to be "killed". Otherwise, the old changeset is being |
|
13 | is said to be "killed". Otherwise, the old changeset is being | |
14 | "replaced" by the new changesets. |
|
14 | "replaced" by the new changesets. | |
15 |
|
15 | |||
16 | Obsolete markers can be used to record and distribute changeset graph |
|
16 | Obsolete markers can be used to record and distribute changeset graph | |
17 | transformations performed by history rewriting operations, and help |
|
17 | transformations performed by history rewriting operations, and help | |
18 | building new tools to reconciliate conflicting rewriting actions. To |
|
18 | building new tools to reconciliate conflicting rewriting actions. To | |
19 | facilitate conflicts resolution, markers include various annotations |
|
19 | facilitate conflicts resolution, markers include various annotations | |
20 | besides old and news changeset identifiers, such as creation date or |
|
20 | besides old and news changeset identifiers, such as creation date or | |
21 | author name. |
|
21 | author name. | |
22 |
|
22 | |||
23 |
|
23 | |||
24 | Format |
|
24 | Format | |
25 | ------ |
|
25 | ------ | |
26 |
|
26 | |||
27 | Markers are stored in an append-only file stored in |
|
27 | Markers are stored in an append-only file stored in | |
28 | '.hg/store/obsstore'. |
|
28 | '.hg/store/obsstore'. | |
29 |
|
29 | |||
30 | The file starts with a version header: |
|
30 | The file starts with a version header: | |
31 |
|
31 | |||
32 | - 1 unsigned byte: version number, starting at zero. |
|
32 | - 1 unsigned byte: version number, starting at zero. | |
33 |
|
33 | |||
34 |
|
34 | |||
35 | The header is followed by the markers. Each marker is made of: |
|
35 | The header is followed by the markers. Each marker is made of: | |
36 |
|
36 | |||
37 | - 1 unsigned byte: number of new changesets "R", could be zero. |
|
37 | - 1 unsigned byte: number of new changesets "R", could be zero. | |
38 |
|
38 | |||
39 | - 1 unsigned 32-bits integer: metadata size "M" in bytes. |
|
39 | - 1 unsigned 32-bits integer: metadata size "M" in bytes. | |
40 |
|
40 | |||
41 | - 1 byte: a bit field. It is reserved for flags used in obsolete |
|
41 | - 1 byte: a bit field. It is reserved for flags used in obsolete | |
42 | markers common operations, to avoid repeated decoding of metadata |
|
42 | markers common operations, to avoid repeated decoding of metadata | |
43 | entries. |
|
43 | entries. | |
44 |
|
44 | |||
45 | - 20 bytes: obsoleted changeset identifier. |
|
45 | - 20 bytes: obsoleted changeset identifier. | |
46 |
|
46 | |||
47 | - N*20 bytes: new changesets identifiers. |
|
47 | - N*20 bytes: new changesets identifiers. | |
48 |
|
48 | |||
49 | - M bytes: metadata as a sequence of nul-terminated strings. Each |
|
49 | - M bytes: metadata as a sequence of nul-terminated strings. Each | |
50 | string contains a key and a value, separated by a color ':', without |
|
50 | string contains a key and a value, separated by a color ':', without | |
51 | additional encoding. Keys cannot contain '\0' or ':' and values |
|
51 | additional encoding. Keys cannot contain '\0' or ':' and values | |
52 | cannot contain '\0'. |
|
52 | cannot contain '\0'. | |
53 | """ |
|
53 | """ | |
54 | import struct |
|
54 | import struct | |
55 | from mercurial import util, base85 |
|
55 | from mercurial import util, base85 | |
56 | from i18n import _ |
|
56 | from i18n import _ | |
57 |
|
57 | |||
58 | # the obsolete feature is not mature enought to be enabled by default. |
|
58 | # the obsolete feature is not mature enought to be enabled by default. | |
59 | # you have to rely on third party extension extension to enable this. |
|
59 | # you have to rely on third party extension extension to enable this. | |
60 | _enabled = False |
|
60 | _enabled = False | |
61 |
|
61 | |||
62 | _pack = struct.pack |
|
62 | _pack = struct.pack | |
63 | _unpack = struct.unpack |
|
63 | _unpack = struct.unpack | |
64 |
|
64 | |||
65 | # the obsolete feature is not mature enought to be enabled by default. |
|
65 | # the obsolete feature is not mature enought to be enabled by default. | |
66 | # you have to rely on third party extension extension to enable this. |
|
66 | # you have to rely on third party extension extension to enable this. | |
67 | _enabled = False |
|
67 | _enabled = False | |
68 |
|
68 | |||
69 | # data used for parsing and writing |
|
69 | # data used for parsing and writing | |
70 | _fmversion = 0 |
|
70 | _fmversion = 0 | |
71 | _fmfixed = '>BIB20s' |
|
71 | _fmfixed = '>BIB20s' | |
72 | _fmnode = '20s' |
|
72 | _fmnode = '20s' | |
73 | _fmfsize = struct.calcsize(_fmfixed) |
|
73 | _fmfsize = struct.calcsize(_fmfixed) | |
74 | _fnodesize = struct.calcsize(_fmnode) |
|
74 | _fnodesize = struct.calcsize(_fmnode) | |
75 |
|
75 | |||
76 | def _readmarkers(data): |
|
76 | def _readmarkers(data): | |
77 | """Read and enumerate markers from raw data""" |
|
77 | """Read and enumerate markers from raw data""" | |
78 | off = 0 |
|
78 | off = 0 | |
79 | diskversion = _unpack('>B', data[off:off + 1])[0] |
|
79 | diskversion = _unpack('>B', data[off:off + 1])[0] | |
80 | off += 1 |
|
80 | off += 1 | |
81 | if diskversion != _fmversion: |
|
81 | if diskversion != _fmversion: | |
82 | raise util.Abort(_('parsing obsolete marker: unknown version %r') |
|
82 | raise util.Abort(_('parsing obsolete marker: unknown version %r') | |
83 | % diskversion) |
|
83 | % diskversion) | |
84 |
|
84 | |||
85 | # Loop on markers |
|
85 | # Loop on markers | |
86 | l = len(data) |
|
86 | l = len(data) | |
87 | while off + _fmfsize <= l: |
|
87 | while off + _fmfsize <= l: | |
88 | # read fixed part |
|
88 | # read fixed part | |
89 | cur = data[off:off + _fmfsize] |
|
89 | cur = data[off:off + _fmfsize] | |
90 | off += _fmfsize |
|
90 | off += _fmfsize | |
91 | nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur) |
|
91 | nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur) | |
92 | # read replacement |
|
92 | # read replacement | |
93 | sucs = () |
|
93 | sucs = () | |
94 | if nbsuc: |
|
94 | if nbsuc: | |
95 | s = (_fnodesize * nbsuc) |
|
95 | s = (_fnodesize * nbsuc) | |
96 | cur = data[off:off + s] |
|
96 | cur = data[off:off + s] | |
97 | sucs = _unpack(_fmnode * nbsuc, cur) |
|
97 | sucs = _unpack(_fmnode * nbsuc, cur) | |
98 | off += s |
|
98 | off += s | |
99 | # read metadata |
|
99 | # read metadata | |
100 | # (metadata will be decoded on demand) |
|
100 | # (metadata will be decoded on demand) | |
101 | metadata = data[off:off + mdsize] |
|
101 | metadata = data[off:off + mdsize] | |
102 | if len(metadata) != mdsize: |
|
102 | if len(metadata) != mdsize: | |
103 | raise util.Abort(_('parsing obsolete marker: metadata is too ' |
|
103 | raise util.Abort(_('parsing obsolete marker: metadata is too ' | |
104 | 'short, %d bytes expected, got %d') |
|
104 | 'short, %d bytes expected, got %d') | |
105 | % (mdsize, len(metadata))) |
|
105 | % (mdsize, len(metadata))) | |
106 | off += mdsize |
|
106 | off += mdsize | |
107 | yield (pre, sucs, flags, metadata) |
|
107 | yield (pre, sucs, flags, metadata) | |
108 |
|
108 | |||
109 | def encodemeta(meta): |
|
109 | def encodemeta(meta): | |
110 | """Return encoded metadata string to string mapping. |
|
110 | """Return encoded metadata string to string mapping. | |
111 |
|
111 | |||
112 | Assume no ':' in key and no '\0' in both key and value.""" |
|
112 | Assume no ':' in key and no '\0' in both key and value.""" | |
113 | for key, value in meta.iteritems(): |
|
113 | for key, value in meta.iteritems(): | |
114 | if ':' in key or '\0' in key: |
|
114 | if ':' in key or '\0' in key: | |
115 | raise ValueError("':' and '\0' are forbidden in metadata key'") |
|
115 | raise ValueError("':' and '\0' are forbidden in metadata key'") | |
116 | if '\0' in value: |
|
116 | if '\0' in value: | |
117 | raise ValueError("':' are forbidden in metadata value'") |
|
117 | raise ValueError("':' are forbidden in metadata value'") | |
118 | return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)]) |
|
118 | return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)]) | |
119 |
|
119 | |||
120 | def decodemeta(data): |
|
120 | def decodemeta(data): | |
121 | """Return string to string dictionary from encoded version.""" |
|
121 | """Return string to string dictionary from encoded version.""" | |
122 | d = {} |
|
122 | d = {} | |
123 | for l in data.split('\0'): |
|
123 | for l in data.split('\0'): | |
124 | if l: |
|
124 | if l: | |
125 | key, value = l.split(':') |
|
125 | key, value = l.split(':') | |
126 | d[key] = value |
|
126 | d[key] = value | |
127 | return d |
|
127 | return d | |
128 |
|
128 | |||
129 | class marker(object): |
|
129 | class marker(object): | |
130 | """Wrap obsolete marker raw data""" |
|
130 | """Wrap obsolete marker raw data""" | |
131 |
|
131 | |||
132 | def __init__(self, repo, data): |
|
132 | def __init__(self, repo, data): | |
133 | # the repo argument will be used to create changectx in later version |
|
133 | # the repo argument will be used to create changectx in later version | |
134 | self._repo = repo |
|
134 | self._repo = repo | |
135 | self._data = data |
|
135 | self._data = data | |
136 | self._decodedmeta = None |
|
136 | self._decodedmeta = None | |
137 |
|
137 | |||
138 | def precnode(self): |
|
138 | def precnode(self): | |
139 | """Precursor changeset node identifier""" |
|
139 | """Precursor changeset node identifier""" | |
140 | return self._data[0] |
|
140 | return self._data[0] | |
141 |
|
141 | |||
142 | def succnodes(self): |
|
142 | def succnodes(self): | |
143 | """List of successor changesets node identifiers""" |
|
143 | """List of successor changesets node identifiers""" | |
144 | return self._data[1] |
|
144 | return self._data[1] | |
145 |
|
145 | |||
146 | def metadata(self): |
|
146 | def metadata(self): | |
147 | """Decoded metadata dictionary""" |
|
147 | """Decoded metadata dictionary""" | |
148 | if self._decodedmeta is None: |
|
148 | if self._decodedmeta is None: | |
149 | self._decodedmeta = decodemeta(self._data[3]) |
|
149 | self._decodedmeta = decodemeta(self._data[3]) | |
150 | return self._decodedmeta |
|
150 | return self._decodedmeta | |
151 |
|
151 | |||
152 | def date(self): |
|
152 | def date(self): | |
153 | """Creation date as (unixtime, offset)""" |
|
153 | """Creation date as (unixtime, offset)""" | |
154 | parts = self.metadata()['date'].split(' ') |
|
154 | parts = self.metadata()['date'].split(' ') | |
155 | return (float(parts[0]), int(parts[1])) |
|
155 | return (float(parts[0]), int(parts[1])) | |
156 |
|
156 | |||
157 | class obsstore(object): |
|
157 | class obsstore(object): | |
158 | """Store obsolete markers |
|
158 | """Store obsolete markers | |
159 |
|
159 | |||
160 | Markers can be accessed with two mappings: |
|
160 | Markers can be accessed with two mappings: | |
161 | - precursors: old -> set(new) |
|
161 | - precursors: old -> set(new) | |
162 | - successors: new -> set(old) |
|
162 | - successors: new -> set(old) | |
163 | """ |
|
163 | """ | |
164 |
|
164 | |||
165 | def __init__(self, sopener): |
|
165 | def __init__(self, sopener): | |
166 | self._all = [] |
|
166 | self._all = [] | |
167 | # new markers to serialize |
|
167 | # new markers to serialize | |
168 | self.precursors = {} |
|
168 | self.precursors = {} | |
169 | self.successors = {} |
|
169 | self.successors = {} | |
170 | self.sopener = sopener |
|
170 | self.sopener = sopener | |
171 | data = sopener.tryread('obsstore') |
|
171 | data = sopener.tryread('obsstore') | |
172 | if data: |
|
172 | if data: | |
173 | self._load(_readmarkers(data)) |
|
173 | self._load(_readmarkers(data)) | |
174 |
|
174 | |||
175 | def __iter__(self): |
|
175 | def __iter__(self): | |
176 | return iter(self._all) |
|
176 | return iter(self._all) | |
177 |
|
177 | |||
178 | def __nonzero__(self): |
|
178 | def __nonzero__(self): | |
179 | return bool(self._all) |
|
179 | return bool(self._all) | |
180 |
|
180 | |||
181 | def create(self, transaction, prec, succs=(), flag=0, metadata=None): |
|
181 | def create(self, transaction, prec, succs=(), flag=0, metadata=None): | |
182 | """obsolete: add a new obsolete marker |
|
182 | """obsolete: add a new obsolete marker | |
183 |
|
183 | |||
184 | * ensuring it is hashable |
|
184 | * ensuring it is hashable | |
185 | * check mandatory metadata |
|
185 | * check mandatory metadata | |
186 | * encode metadata |
|
186 | * encode metadata | |
187 | """ |
|
187 | """ | |
188 | if metadata is None: |
|
188 | if metadata is None: | |
189 | metadata = {} |
|
189 | metadata = {} | |
190 | if len(prec) != 20: |
|
190 | if len(prec) != 20: | |
191 | raise ValueError(prec) |
|
191 | raise ValueError(prec) | |
192 | for succ in succs: |
|
192 | for succ in succs: | |
193 | if len(succ) != 20: |
|
193 | if len(succ) != 20: | |
194 | raise ValueError(succ) |
|
194 | raise ValueError(succ) | |
195 | marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata)) |
|
195 | marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata)) | |
196 | self.add(transaction, [marker]) |
|
196 | self.add(transaction, [marker]) | |
197 |
|
197 | |||
198 | def add(self, transaction, markers): |
|
198 | def add(self, transaction, markers): | |
199 | """Add new markers to the store |
|
199 | """Add new markers to the store | |
200 |
|
200 | |||
201 | Take care of filtering duplicate. |
|
201 | Take care of filtering duplicate. | |
202 | Return the number of new marker.""" |
|
202 | Return the number of new marker.""" | |
203 | if not _enabled: |
|
203 | if not _enabled: | |
204 | raise util.Abort('obsolete feature is not enabled on this repo') |
|
204 | raise util.Abort('obsolete feature is not enabled on this repo') | |
205 | new = [m for m in markers if m not in self._all] |
|
205 | new = [m for m in markers if m not in self._all] | |
206 | if new: |
|
206 | if new: | |
207 | f = self.sopener('obsstore', 'ab') |
|
207 | f = self.sopener('obsstore', 'ab') | |
208 | try: |
|
208 | try: | |
209 | # Whether the file's current position is at the begin or at |
|
209 | # Whether the file's current position is at the begin or at | |
210 | # the end after opening a file for appending is implementation |
|
210 | # the end after opening a file for appending is implementation | |
211 | # defined. So we must seek to the end before calling tell(), |
|
211 | # defined. So we must seek to the end before calling tell(), | |
212 | # or we may get a zero offset for non-zero sized files on |
|
212 | # or we may get a zero offset for non-zero sized files on | |
213 | # some platforms (issue3543). |
|
213 | # some platforms (issue3543). | |
214 | f.seek(0, 2) # os.SEEK_END |
|
214 | f.seek(0, 2) # os.SEEK_END | |
215 | offset = f.tell() |
|
215 | offset = f.tell() | |
216 | transaction.add('obsstore', offset) |
|
216 | transaction.add('obsstore', offset) | |
217 | # offset == 0: new file - add the version header |
|
217 | # offset == 0: new file - add the version header | |
218 | for bytes in _encodemarkers(new, offset == 0): |
|
218 | for bytes in _encodemarkers(new, offset == 0): | |
219 | f.write(bytes) |
|
219 | f.write(bytes) | |
220 | finally: |
|
220 | finally: | |
221 | # XXX: f.close() == filecache invalidation == obsstore rebuilt. |
|
221 | # XXX: f.close() == filecache invalidation == obsstore rebuilt. | |
222 | # call 'filecacheentry.refresh()' here |
|
222 | # call 'filecacheentry.refresh()' here | |
223 | f.close() |
|
223 | f.close() | |
224 | self._load(new) |
|
224 | self._load(new) | |
225 | return len(new) |
|
225 | return len(new) | |
226 |
|
226 | |||
227 | def mergemarkers(self, transation, data): |
|
227 | def mergemarkers(self, transation, data): | |
228 | markers = _readmarkers(data) |
|
228 | markers = _readmarkers(data) | |
229 | self.add(transation, markers) |
|
229 | self.add(transation, markers) | |
230 |
|
230 | |||
231 | def _load(self, markers): |
|
231 | def _load(self, markers): | |
232 | for mark in markers: |
|
232 | for mark in markers: | |
233 | self._all.append(mark) |
|
233 | self._all.append(mark) | |
234 | pre, sucs = mark[:2] |
|
234 | pre, sucs = mark[:2] | |
235 | self.precursors.setdefault(pre, set()).add(mark) |
|
235 | self.precursors.setdefault(pre, set()).add(mark) | |
236 | for suc in sucs: |
|
236 | for suc in sucs: | |
237 | self.successors.setdefault(suc, set()).add(mark) |
|
237 | self.successors.setdefault(suc, set()).add(mark) | |
238 |
|
238 | |||
239 | def _encodemarkers(markers, addheader=False): |
|
239 | def _encodemarkers(markers, addheader=False): | |
240 | # Kept separate from flushmarkers(), it will be reused for |
|
240 | # Kept separate from flushmarkers(), it will be reused for | |
241 | # markers exchange. |
|
241 | # markers exchange. | |
242 | if addheader: |
|
242 | if addheader: | |
243 | yield _pack('>B', _fmversion) |
|
243 | yield _pack('>B', _fmversion) | |
244 | for marker in markers: |
|
244 | for marker in markers: | |
245 | yield _encodeonemarker(marker) |
|
245 | yield _encodeonemarker(marker) | |
246 |
|
246 | |||
247 |
|
247 | |||
248 | def _encodeonemarker(marker): |
|
248 | def _encodeonemarker(marker): | |
249 | pre, sucs, flags, metadata = marker |
|
249 | pre, sucs, flags, metadata = marker | |
250 | nbsuc = len(sucs) |
|
250 | nbsuc = len(sucs) | |
251 | format = _fmfixed + (_fmnode * nbsuc) |
|
251 | format = _fmfixed + (_fmnode * nbsuc) | |
252 | data = [nbsuc, len(metadata), flags, pre] |
|
252 | data = [nbsuc, len(metadata), flags, pre] | |
253 | data.extend(sucs) |
|
253 | data.extend(sucs) | |
254 | return _pack(format, *data) + metadata |
|
254 | return _pack(format, *data) + metadata | |
255 |
|
255 | |||
256 | # arbitrary picked to fit into 8K limit from HTTP server |
|
256 | # arbitrary picked to fit into 8K limit from HTTP server | |
257 | # you have to take in account: |
|
257 | # you have to take in account: | |
258 | # - the version header |
|
258 | # - the version header | |
259 | # - the base85 encoding |
|
259 | # - the base85 encoding | |
260 | _maxpayload = 5300 |
|
260 | _maxpayload = 5300 | |
261 |
|
261 | |||
262 | def listmarkers(repo): |
|
262 | def listmarkers(repo): | |
263 | """List markers over pushkey""" |
|
263 | """List markers over pushkey""" | |
264 | if not repo.obsstore: |
|
264 | if not repo.obsstore: | |
265 | return {} |
|
265 | return {} | |
266 | keys = {} |
|
266 | keys = {} | |
267 | parts = [] |
|
267 | parts = [] | |
268 | currentlen = _maxpayload * 2 # ensure we create a new part |
|
268 | currentlen = _maxpayload * 2 # ensure we create a new part | |
269 | for marker in repo.obsstore: |
|
269 | for marker in repo.obsstore: | |
270 | nextdata = _encodeonemarker(marker) |
|
270 | nextdata = _encodeonemarker(marker) | |
271 | if (len(nextdata) + currentlen > _maxpayload): |
|
271 | if (len(nextdata) + currentlen > _maxpayload): | |
272 | currentpart = [] |
|
272 | currentpart = [] | |
273 | currentlen = 0 |
|
273 | currentlen = 0 | |
274 | parts.append(currentpart) |
|
274 | parts.append(currentpart) | |
275 | currentpart.append(nextdata) |
|
275 | currentpart.append(nextdata) | |
276 | currentlen += len(nextdata) |
|
276 | currentlen += len(nextdata) | |
277 | for idx, part in enumerate(reversed(parts)): |
|
277 | for idx, part in enumerate(reversed(parts)): | |
278 | data = ''.join([_pack('>B', _fmversion)] + part) |
|
278 | data = ''.join([_pack('>B', _fmversion)] + part) | |
279 | keys['dump%i' % idx] = base85.b85encode(data) |
|
279 | keys['dump%i' % idx] = base85.b85encode(data) | |
280 | return keys |
|
280 | return keys | |
281 |
|
281 | |||
282 | def pushmarker(repo, key, old, new): |
|
282 | def pushmarker(repo, key, old, new): | |
283 | """Push markers over pushkey""" |
|
283 | """Push markers over pushkey""" | |
284 | if not key.startswith('dump'): |
|
284 | if not key.startswith('dump'): | |
285 | repo.ui.warn(_('unknown key: %r') % key) |
|
285 | repo.ui.warn(_('unknown key: %r') % key) | |
286 | return 0 |
|
286 | return 0 | |
287 | if old: |
|
287 | if old: | |
288 | repo.ui.warn(_('unexpected old value') % key) |
|
288 | repo.ui.warn(_('unexpected old value') % key) | |
289 | return 0 |
|
289 | return 0 | |
290 | data = base85.b85decode(new) |
|
290 | data = base85.b85decode(new) | |
291 | lock = repo.lock() |
|
291 | lock = repo.lock() | |
292 | try: |
|
292 | try: | |
293 | tr = repo.transaction('pushkey: obsolete markers') |
|
293 | tr = repo.transaction('pushkey: obsolete markers') | |
294 | try: |
|
294 | try: | |
295 | repo.obsstore.mergemarkers(tr, data) |
|
295 | repo.obsstore.mergemarkers(tr, data) | |
296 | tr.close() |
|
296 | tr.close() | |
297 | return 1 |
|
297 | return 1 | |
298 | finally: |
|
298 | finally: | |
299 | tr.release() |
|
299 | tr.release() | |
300 | finally: |
|
300 | finally: | |
301 | lock.release() |
|
301 | lock.release() | |
302 |
|
302 | |||
303 | def allmarkers(repo): |
|
303 | def allmarkers(repo): | |
304 | """all obsolete markers known in a repository""" |
|
304 | """all obsolete markers known in a repository""" | |
305 | for markerdata in repo.obsstore: |
|
305 | for markerdata in repo.obsstore: | |
306 | yield marker(repo, markerdata) |
|
306 | yield marker(repo, markerdata) | |
307 |
|
307 | |||
308 | def precursormarkers(ctx): |
|
308 | def precursormarkers(ctx): | |
309 | """obsolete marker making this changeset obsolete""" |
|
309 | """obsolete marker making this changeset obsolete""" | |
310 | for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()): |
|
310 | for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()): | |
311 | yield marker(ctx._repo, data) |
|
311 | yield marker(ctx._repo, data) | |
312 |
|
312 | |||
313 | def successormarkers(ctx): |
|
313 | def successormarkers(ctx): | |
314 | """obsolete marker marking this changeset as a successors""" |
|
314 | """obsolete marker marking this changeset as a successors""" | |
315 | for data in ctx._repo.obsstore.successors.get(ctx.node(), ()): |
|
315 | for data in ctx._repo.obsstore.successors.get(ctx.node(), ()): | |
316 | yield marker(ctx._repo, data) |
|
316 | yield marker(ctx._repo, data) | |
317 |
|
317 | |||
318 | def anysuccessors(obsstore, node): |
|
318 | def anysuccessors(obsstore, node): | |
319 | """Yield every successor of <node> |
|
319 | """Yield every successor of <node> | |
320 |
|
320 | |||
321 |
This this a linear yield unsuitable to detect split |
|
321 | This this a linear yield unsuitable to detect split changeset.""" | |
322 | remaining = set([node]) |
|
322 | remaining = set([node]) | |
323 | seen = set(remaining) |
|
323 | seen = set(remaining) | |
324 | while remaining: |
|
324 | while remaining: | |
325 | current = remaining.pop() |
|
325 | current = remaining.pop() | |
326 | yield current |
|
326 | yield current | |
327 | for mark in obsstore.precursors.get(current, ()): |
|
327 | for mark in obsstore.precursors.get(current, ()): | |
328 | for suc in mark[1]: |
|
328 | for suc in mark[1]: | |
329 | if suc not in seen: |
|
329 | if suc not in seen: | |
330 | seen.add(suc) |
|
330 | seen.add(suc) | |
331 | remaining.add(suc) |
|
331 | remaining.add(suc) |
General Comments 0
You need to be logged in to leave comments.
Login now