Show More
@@ -1,799 +1,792 b'' | |||||
1 | # parsers.py - Python implementation of parsers.c |
|
1 | # parsers.py - Python implementation of parsers.c | |
2 | # |
|
2 | # | |
3 | # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others |
|
3 | # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | import struct |
|
10 | import struct | |
11 | import zlib |
|
11 | import zlib | |
12 |
|
12 | |||
13 | from ..node import ( |
|
13 | from ..node import ( | |
14 | nullrev, |
|
14 | nullrev, | |
15 | sha1nodeconstants, |
|
15 | sha1nodeconstants, | |
16 | ) |
|
16 | ) | |
17 | from ..thirdparty import attr |
|
17 | from ..thirdparty import attr | |
18 | from .. import ( |
|
18 | from .. import ( | |
19 | error, |
|
19 | error, | |
20 | pycompat, |
|
20 | pycompat, | |
21 | revlogutils, |
|
21 | revlogutils, | |
22 | util, |
|
22 | util, | |
23 | ) |
|
23 | ) | |
24 |
|
24 | |||
25 | from ..revlogutils import nodemap as nodemaputil |
|
25 | from ..revlogutils import nodemap as nodemaputil | |
26 | from ..revlogutils import constants as revlog_constants |
|
26 | from ..revlogutils import constants as revlog_constants | |
27 |
|
27 | |||
28 | stringio = pycompat.bytesio |
|
28 | stringio = pycompat.bytesio | |
29 |
|
29 | |||
30 |
|
30 | |||
31 | _pack = struct.pack |
|
31 | _pack = struct.pack | |
32 | _unpack = struct.unpack |
|
32 | _unpack = struct.unpack | |
33 | _compress = zlib.compress |
|
33 | _compress = zlib.compress | |
34 | _decompress = zlib.decompress |
|
34 | _decompress = zlib.decompress | |
35 |
|
35 | |||
36 |
|
36 | |||
37 | # a special value used internally for `size` if the file come from the other parent |
|
37 | # a special value used internally for `size` if the file come from the other parent | |
38 | FROM_P2 = -2 |
|
38 | FROM_P2 = -2 | |
39 |
|
39 | |||
40 | # a special value used internally for `size` if the file is modified/merged/added |
|
40 | # a special value used internally for `size` if the file is modified/merged/added | |
41 | NONNORMAL = -1 |
|
41 | NONNORMAL = -1 | |
42 |
|
42 | |||
43 | # a special value used internally for `time` if the time is ambigeous |
|
43 | # a special value used internally for `time` if the time is ambigeous | |
44 | AMBIGUOUS_TIME = -1 |
|
44 | AMBIGUOUS_TIME = -1 | |
45 |
|
45 | |||
46 |
|
46 | |||
47 | @attr.s(slots=True, init=False) |
|
47 | @attr.s(slots=True, init=False) | |
48 | class DirstateItem(object): |
|
48 | class DirstateItem(object): | |
49 | """represent a dirstate entry |
|
49 | """represent a dirstate entry | |
50 |
|
50 | |||
51 | It contains: |
|
51 | It contains: | |
52 |
|
52 | |||
53 | - state (one of 'n', 'a', 'r', 'm') |
|
53 | - state (one of 'n', 'a', 'r', 'm') | |
54 | - mode, |
|
54 | - mode, | |
55 | - size, |
|
55 | - size, | |
56 | - mtime, |
|
56 | - mtime, | |
57 | """ |
|
57 | """ | |
58 |
|
58 | |||
59 | _wc_tracked = attr.ib() |
|
59 | _wc_tracked = attr.ib() | |
60 | _p1_tracked = attr.ib() |
|
60 | _p1_tracked = attr.ib() | |
61 | _p2_tracked = attr.ib() |
|
61 | _p2_tracked = attr.ib() | |
62 | # the three item above should probably be combined |
|
62 | # the three item above should probably be combined | |
63 | # |
|
63 | # | |
64 | # However it is unclear if they properly cover some of the most advanced |
|
64 | # However it is unclear if they properly cover some of the most advanced | |
65 | # merge case. So we should probably wait on this to be settled. |
|
65 | # merge case. So we should probably wait on this to be settled. | |
66 | _merged = attr.ib() |
|
66 | _merged = attr.ib() | |
67 | _clean_p1 = attr.ib() |
|
67 | _clean_p1 = attr.ib() | |
68 | _clean_p2 = attr.ib() |
|
68 | _clean_p2 = attr.ib() | |
69 | _possibly_dirty = attr.ib() |
|
69 | _possibly_dirty = attr.ib() | |
70 | _mode = attr.ib() |
|
70 | _mode = attr.ib() | |
71 | _size = attr.ib() |
|
71 | _size = attr.ib() | |
72 | _mtime = attr.ib() |
|
72 | _mtime = attr.ib() | |
73 |
|
73 | |||
74 | def __init__( |
|
74 | def __init__( | |
75 | self, |
|
75 | self, | |
76 | wc_tracked=False, |
|
76 | wc_tracked=False, | |
77 | p1_tracked=False, |
|
77 | p1_tracked=False, | |
78 | p2_tracked=False, |
|
78 | p2_tracked=False, | |
79 | merged=False, |
|
79 | merged=False, | |
80 | clean_p1=False, |
|
80 | clean_p1=False, | |
81 | clean_p2=False, |
|
81 | clean_p2=False, | |
82 | possibly_dirty=False, |
|
82 | possibly_dirty=False, | |
83 | parentfiledata=None, |
|
83 | parentfiledata=None, | |
84 | ): |
|
84 | ): | |
85 | if merged and (clean_p1 or clean_p2): |
|
85 | if merged and (clean_p1 or clean_p2): | |
86 | msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`' |
|
86 | msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`' | |
87 | raise error.ProgrammingError(msg) |
|
87 | raise error.ProgrammingError(msg) | |
88 |
|
88 | |||
89 | self._wc_tracked = wc_tracked |
|
89 | self._wc_tracked = wc_tracked | |
90 | self._p1_tracked = p1_tracked |
|
90 | self._p1_tracked = p1_tracked | |
91 | self._p2_tracked = p2_tracked |
|
91 | self._p2_tracked = p2_tracked | |
92 | self._merged = merged |
|
92 | self._merged = merged | |
93 | self._clean_p1 = clean_p1 |
|
93 | self._clean_p1 = clean_p1 | |
94 | self._clean_p2 = clean_p2 |
|
94 | self._clean_p2 = clean_p2 | |
95 | self._possibly_dirty = possibly_dirty |
|
95 | self._possibly_dirty = possibly_dirty | |
96 | if parentfiledata is None: |
|
96 | if parentfiledata is None: | |
97 | self._mode = None |
|
97 | self._mode = None | |
98 | self._size = None |
|
98 | self._size = None | |
99 | self._mtime = None |
|
99 | self._mtime = None | |
100 | else: |
|
100 | else: | |
101 | self._mode = parentfiledata[0] |
|
101 | self._mode = parentfiledata[0] | |
102 | self._size = parentfiledata[1] |
|
102 | self._size = parentfiledata[1] | |
103 | self._mtime = parentfiledata[2] |
|
103 | self._mtime = parentfiledata[2] | |
104 |
|
104 | |||
105 | @classmethod |
|
105 | @classmethod | |
106 | def new_added(cls): |
|
106 | def new_added(cls): | |
107 | """constructor to help legacy API to build a new "added" item |
|
107 | """constructor to help legacy API to build a new "added" item | |
108 |
|
108 | |||
109 | Should eventually be removed |
|
109 | Should eventually be removed | |
110 | """ |
|
110 | """ | |
111 | instance = cls() |
|
111 | instance = cls() | |
112 | instance._wc_tracked = True |
|
112 | instance._wc_tracked = True | |
113 | instance._p1_tracked = False |
|
113 | instance._p1_tracked = False | |
114 | instance._p2_tracked = False |
|
114 | instance._p2_tracked = False | |
115 | return instance |
|
115 | return instance | |
116 |
|
116 | |||
117 | @classmethod |
|
117 | @classmethod | |
118 | def new_merged(cls): |
|
118 | def new_merged(cls): | |
119 | """constructor to help legacy API to build a new "merged" item |
|
119 | """constructor to help legacy API to build a new "merged" item | |
120 |
|
120 | |||
121 | Should eventually be removed |
|
121 | Should eventually be removed | |
122 | """ |
|
122 | """ | |
123 | instance = cls() |
|
123 | instance = cls() | |
124 | instance._wc_tracked = True |
|
124 | instance._wc_tracked = True | |
125 | instance._p1_tracked = True # might not be True because of rename ? |
|
125 | instance._p1_tracked = True # might not be True because of rename ? | |
126 | instance._p2_tracked = True # might not be True because of rename ? |
|
126 | instance._p2_tracked = True # might not be True because of rename ? | |
127 | instance._merged = True |
|
127 | instance._merged = True | |
128 | return instance |
|
128 | return instance | |
129 |
|
129 | |||
130 | @classmethod |
|
130 | @classmethod | |
131 | def new_from_p2(cls): |
|
131 | def new_from_p2(cls): | |
132 | """constructor to help legacy API to build a new "from_p2" item |
|
132 | """constructor to help legacy API to build a new "from_p2" item | |
133 |
|
133 | |||
134 | Should eventually be removed |
|
134 | Should eventually be removed | |
135 | """ |
|
135 | """ | |
136 | instance = cls() |
|
136 | instance = cls() | |
137 | instance._wc_tracked = True |
|
137 | instance._wc_tracked = True | |
138 | instance._p1_tracked = False # might actually be True |
|
138 | instance._p1_tracked = False # might actually be True | |
139 | instance._p2_tracked = True |
|
139 | instance._p2_tracked = True | |
140 | instance._clean_p2 = True |
|
140 | instance._clean_p2 = True | |
141 | return instance |
|
141 | return instance | |
142 |
|
142 | |||
143 | @classmethod |
|
143 | @classmethod | |
144 | def new_possibly_dirty(cls): |
|
144 | def new_possibly_dirty(cls): | |
145 | """constructor to help legacy API to build a new "possibly_dirty" item |
|
145 | """constructor to help legacy API to build a new "possibly_dirty" item | |
146 |
|
146 | |||
147 | Should eventually be removed |
|
147 | Should eventually be removed | |
148 | """ |
|
148 | """ | |
149 | instance = cls() |
|
149 | instance = cls() | |
150 | instance._wc_tracked = True |
|
150 | instance._wc_tracked = True | |
151 | instance._p1_tracked = True |
|
151 | instance._p1_tracked = True | |
152 | instance._possibly_dirty = True |
|
152 | instance._possibly_dirty = True | |
153 | return instance |
|
153 | return instance | |
154 |
|
154 | |||
155 | @classmethod |
|
155 | @classmethod | |
156 | def new_normal(cls, mode, size, mtime): |
|
156 | def new_normal(cls, mode, size, mtime): | |
157 | """constructor to help legacy API to build a new "normal" item |
|
157 | """constructor to help legacy API to build a new "normal" item | |
158 |
|
158 | |||
159 | Should eventually be removed |
|
159 | Should eventually be removed | |
160 | """ |
|
160 | """ | |
161 | assert size != FROM_P2 |
|
161 | assert size != FROM_P2 | |
162 | assert size != NONNORMAL |
|
162 | assert size != NONNORMAL | |
163 | instance = cls() |
|
163 | instance = cls() | |
164 | instance._wc_tracked = True |
|
164 | instance._wc_tracked = True | |
165 | instance._p1_tracked = True |
|
165 | instance._p1_tracked = True | |
166 | instance._mode = mode |
|
166 | instance._mode = mode | |
167 | instance._size = size |
|
167 | instance._size = size | |
168 | instance._mtime = mtime |
|
168 | instance._mtime = mtime | |
169 | return instance |
|
169 | return instance | |
170 |
|
170 | |||
171 | @classmethod |
|
171 | @classmethod | |
172 | def from_v1_data(cls, state, mode, size, mtime): |
|
172 | def from_v1_data(cls, state, mode, size, mtime): | |
173 | """Build a new DirstateItem object from V1 data |
|
173 | """Build a new DirstateItem object from V1 data | |
174 |
|
174 | |||
175 | Since the dirstate-v1 format is frozen, the signature of this function |
|
175 | Since the dirstate-v1 format is frozen, the signature of this function | |
176 | is not expected to change, unlike the __init__ one. |
|
176 | is not expected to change, unlike the __init__ one. | |
177 | """ |
|
177 | """ | |
178 | if state == b'm': |
|
178 | if state == b'm': | |
179 | return cls.new_merged() |
|
179 | return cls.new_merged() | |
180 | elif state == b'a': |
|
180 | elif state == b'a': | |
181 | return cls.new_added() |
|
181 | return cls.new_added() | |
182 | elif state == b'r': |
|
182 | elif state == b'r': | |
183 | instance = cls() |
|
183 | instance = cls() | |
184 | instance._wc_tracked = False |
|
184 | instance._wc_tracked = False | |
185 | if size == NONNORMAL: |
|
185 | if size == NONNORMAL: | |
186 | instance._merged = True |
|
186 | instance._merged = True | |
187 | instance._p1_tracked = ( |
|
187 | instance._p1_tracked = ( | |
188 | True # might not be True because of rename ? |
|
188 | True # might not be True because of rename ? | |
189 | ) |
|
189 | ) | |
190 | instance._p2_tracked = ( |
|
190 | instance._p2_tracked = ( | |
191 | True # might not be True because of rename ? |
|
191 | True # might not be True because of rename ? | |
192 | ) |
|
192 | ) | |
193 | elif size == FROM_P2: |
|
193 | elif size == FROM_P2: | |
194 | instance._clean_p2 = True |
|
194 | instance._clean_p2 = True | |
195 | instance._p1_tracked = ( |
|
195 | instance._p1_tracked = ( | |
196 | False # We actually don't know (file history) |
|
196 | False # We actually don't know (file history) | |
197 | ) |
|
197 | ) | |
198 | instance._p2_tracked = True |
|
198 | instance._p2_tracked = True | |
199 | else: |
|
199 | else: | |
200 | instance._p1_tracked = True |
|
200 | instance._p1_tracked = True | |
201 | return instance |
|
201 | return instance | |
202 | elif state == b'n': |
|
202 | elif state == b'n': | |
203 | if size == FROM_P2: |
|
203 | if size == FROM_P2: | |
204 | return cls.new_from_p2() |
|
204 | return cls.new_from_p2() | |
205 | elif size == NONNORMAL: |
|
205 | elif size == NONNORMAL: | |
206 | return cls.new_possibly_dirty() |
|
206 | return cls.new_possibly_dirty() | |
207 | elif mtime == AMBIGUOUS_TIME: |
|
207 | elif mtime == AMBIGUOUS_TIME: | |
208 | instance = cls.new_normal(mode, size, 42) |
|
208 | instance = cls.new_normal(mode, size, 42) | |
209 | instance._mtime = None |
|
209 | instance._mtime = None | |
210 | instance._possibly_dirty = True |
|
210 | instance._possibly_dirty = True | |
211 | return instance |
|
211 | return instance | |
212 | else: |
|
212 | else: | |
213 | return cls.new_normal(mode, size, mtime) |
|
213 | return cls.new_normal(mode, size, mtime) | |
214 | else: |
|
214 | else: | |
215 | raise RuntimeError(b'unknown state: %s' % state) |
|
215 | raise RuntimeError(b'unknown state: %s' % state) | |
216 |
|
216 | |||
217 | def set_possibly_dirty(self): |
|
217 | def set_possibly_dirty(self): | |
218 | """Mark a file as "possibly dirty" |
|
218 | """Mark a file as "possibly dirty" | |
219 |
|
219 | |||
220 | This means the next status call will have to actually check its content |
|
220 | This means the next status call will have to actually check its content | |
221 | to make sure it is correct. |
|
221 | to make sure it is correct. | |
222 | """ |
|
222 | """ | |
223 | self._possibly_dirty = True |
|
223 | self._possibly_dirty = True | |
224 |
|
224 | |||
225 | def set_untracked(self): |
|
225 | def set_untracked(self): | |
226 | """mark a file as untracked in the working copy |
|
226 | """mark a file as untracked in the working copy | |
227 |
|
227 | |||
228 | This will ultimately be called by command like `hg remove`. |
|
228 | This will ultimately be called by command like `hg remove`. | |
229 | """ |
|
229 | """ | |
230 | # backup the previous state (useful for merge) |
|
230 | # backup the previous state (useful for merge) | |
231 | self._wc_tracked = False |
|
231 | self._wc_tracked = False | |
232 | self._mode = None |
|
232 | self._mode = None | |
233 | self._size = None |
|
233 | self._size = None | |
234 | self._mtime = None |
|
234 | self._mtime = None | |
235 |
|
235 | |||
236 | @property |
|
236 | @property | |
237 | def mode(self): |
|
237 | def mode(self): | |
238 | return self.v1_mode() |
|
238 | return self.v1_mode() | |
239 |
|
239 | |||
240 | @property |
|
240 | @property | |
241 | def size(self): |
|
241 | def size(self): | |
242 | return self.v1_size() |
|
242 | return self.v1_size() | |
243 |
|
243 | |||
244 | @property |
|
244 | @property | |
245 | def mtime(self): |
|
245 | def mtime(self): | |
246 | return self.v1_mtime() |
|
246 | return self.v1_mtime() | |
247 |
|
247 | |||
248 | @property |
|
248 | @property | |
249 | def state(self): |
|
249 | def state(self): | |
250 | """ |
|
250 | """ | |
251 | States are: |
|
251 | States are: | |
252 | n normal |
|
252 | n normal | |
253 | m needs merging |
|
253 | m needs merging | |
254 | r marked for removal |
|
254 | r marked for removal | |
255 | a marked for addition |
|
255 | a marked for addition | |
256 |
|
256 | |||
257 | XXX This "state" is a bit obscure and mostly a direct expression of the |
|
257 | XXX This "state" is a bit obscure and mostly a direct expression of the | |
258 | dirstatev1 format. It would make sense to ultimately deprecate it in |
|
258 | dirstatev1 format. It would make sense to ultimately deprecate it in | |
259 | favor of the more "semantic" attributes. |
|
259 | favor of the more "semantic" attributes. | |
260 | """ |
|
260 | """ | |
261 | return self.v1_state() |
|
261 | return self.v1_state() | |
262 |
|
262 | |||
263 | @property |
|
263 | @property | |
264 | def tracked(self): |
|
264 | def tracked(self): | |
265 | """True is the file is tracked in the working copy""" |
|
265 | """True is the file is tracked in the working copy""" | |
266 | return self._wc_tracked |
|
266 | return self._wc_tracked | |
267 |
|
267 | |||
268 | @property |
|
268 | @property | |
269 | def added(self): |
|
269 | def added(self): | |
270 | """True if the file has been added""" |
|
270 | """True if the file has been added""" | |
271 | return self._wc_tracked and not (self._p1_tracked or self._p2_tracked) |
|
271 | return self._wc_tracked and not (self._p1_tracked or self._p2_tracked) | |
272 |
|
272 | |||
273 | @property |
|
273 | @property | |
274 | def merged(self): |
|
274 | def merged(self): | |
275 | """True if the file has been merged |
|
275 | """True if the file has been merged | |
276 |
|
276 | |||
277 | Should only be set if a merge is in progress in the dirstate |
|
277 | Should only be set if a merge is in progress in the dirstate | |
278 | """ |
|
278 | """ | |
279 | return self._wc_tracked and self._merged |
|
279 | return self._wc_tracked and self._merged | |
280 |
|
280 | |||
281 | @property |
|
281 | @property | |
282 | def from_p2(self): |
|
282 | def from_p2(self): | |
283 | """True if the file have been fetched from p2 during the current merge |
|
283 | """True if the file have been fetched from p2 during the current merge | |
284 |
|
284 | |||
285 | This is only True is the file is currently tracked. |
|
285 | This is only True is the file is currently tracked. | |
286 |
|
286 | |||
287 | Should only be set if a merge is in progress in the dirstate |
|
287 | Should only be set if a merge is in progress in the dirstate | |
288 | """ |
|
288 | """ | |
289 | if not self._wc_tracked: |
|
289 | if not self._wc_tracked: | |
290 | return False |
|
290 | return False | |
291 | return self._clean_p2 or (not self._p1_tracked and self._p2_tracked) |
|
291 | return self._clean_p2 or (not self._p1_tracked and self._p2_tracked) | |
292 |
|
292 | |||
293 | @property |
|
293 | @property | |
294 | def from_p2_removed(self): |
|
294 | def from_p2_removed(self): | |
295 | """True if the file has been removed, but was "from_p2" initially |
|
295 | """True if the file has been removed, but was "from_p2" initially | |
296 |
|
296 | |||
297 | This property seems like an abstraction leakage and should probably be |
|
297 | This property seems like an abstraction leakage and should probably be | |
298 | dealt in this class (or maybe the dirstatemap) directly. |
|
298 | dealt in this class (or maybe the dirstatemap) directly. | |
299 | """ |
|
299 | """ | |
300 | return self.removed and self._clean_p2 |
|
300 | return self.removed and self._clean_p2 | |
301 |
|
301 | |||
302 | @property |
|
302 | @property | |
303 | def removed(self): |
|
303 | def removed(self): | |
304 | """True if the file has been removed""" |
|
304 | """True if the file has been removed""" | |
305 | return not self._wc_tracked and (self._p1_tracked or self._p2_tracked) |
|
305 | return not self._wc_tracked and (self._p1_tracked or self._p2_tracked) | |
306 |
|
306 | |||
307 | @property |
|
307 | @property | |
308 | def merged_removed(self): |
|
308 | def merged_removed(self): | |
309 | """True if the file has been removed, but was "merged" initially |
|
309 | """True if the file has been removed, but was "merged" initially | |
310 |
|
310 | |||
311 | This property seems like an abstraction leakage and should probably be |
|
311 | This property seems like an abstraction leakage and should probably be | |
312 | dealt in this class (or maybe the dirstatemap) directly. |
|
312 | dealt in this class (or maybe the dirstatemap) directly. | |
313 | """ |
|
313 | """ | |
314 | return self.removed and self._merged |
|
314 | return self.removed and self._merged | |
315 |
|
315 | |||
316 | @property |
|
316 | @property | |
317 | def dm_nonnormal(self): |
|
317 | def dm_nonnormal(self): | |
318 | """True is the entry is non-normal in the dirstatemap sense |
|
318 | """True is the entry is non-normal in the dirstatemap sense | |
319 |
|
319 | |||
320 | There is no reason for any code, but the dirstatemap one to use this. |
|
320 | There is no reason for any code, but the dirstatemap one to use this. | |
321 | """ |
|
321 | """ | |
322 | return self.v1_state() != b'n' or self.v1_mtime() == AMBIGUOUS_TIME |
|
322 | return self.v1_state() != b'n' or self.v1_mtime() == AMBIGUOUS_TIME | |
323 |
|
323 | |||
324 | @property |
|
324 | @property | |
325 | def dm_otherparent(self): |
|
325 | def dm_otherparent(self): | |
326 | """True is the entry is `otherparent` in the dirstatemap sense |
|
326 | """True is the entry is `otherparent` in the dirstatemap sense | |
327 |
|
327 | |||
328 | There is no reason for any code, but the dirstatemap one to use this. |
|
328 | There is no reason for any code, but the dirstatemap one to use this. | |
329 | """ |
|
329 | """ | |
330 | return self.v1_size() == FROM_P2 |
|
330 | return self.v1_size() == FROM_P2 | |
331 |
|
331 | |||
332 | def v1_state(self): |
|
332 | def v1_state(self): | |
333 | """return a "state" suitable for v1 serialization""" |
|
333 | """return a "state" suitable for v1 serialization""" | |
334 | if not (self._p1_tracked or self._p2_tracked or self._wc_tracked): |
|
334 | if not (self._p1_tracked or self._p2_tracked or self._wc_tracked): | |
335 | # the object has no state to record, this is -currently- |
|
335 | # the object has no state to record, this is -currently- | |
336 | # unsupported |
|
336 | # unsupported | |
337 | raise RuntimeError('untracked item') |
|
337 | raise RuntimeError('untracked item') | |
338 | elif self.removed: |
|
338 | elif self.removed: | |
339 | return b'r' |
|
339 | return b'r' | |
340 | elif self.merged: |
|
340 | elif self.merged: | |
341 | return b'm' |
|
341 | return b'm' | |
342 | elif self.added: |
|
342 | elif self.added: | |
343 | return b'a' |
|
343 | return b'a' | |
344 | else: |
|
344 | else: | |
345 | return b'n' |
|
345 | return b'n' | |
346 |
|
346 | |||
347 | def v1_mode(self): |
|
347 | def v1_mode(self): | |
348 | """return a "mode" suitable for v1 serialization""" |
|
348 | """return a "mode" suitable for v1 serialization""" | |
349 | return self._mode if self._mode is not None else 0 |
|
349 | return self._mode if self._mode is not None else 0 | |
350 |
|
350 | |||
351 | def v1_size(self): |
|
351 | def v1_size(self): | |
352 | """return a "size" suitable for v1 serialization""" |
|
352 | """return a "size" suitable for v1 serialization""" | |
353 | if not (self._p1_tracked or self._p2_tracked or self._wc_tracked): |
|
353 | if not (self._p1_tracked or self._p2_tracked or self._wc_tracked): | |
354 | # the object has no state to record, this is -currently- |
|
354 | # the object has no state to record, this is -currently- | |
355 | # unsupported |
|
355 | # unsupported | |
356 | raise RuntimeError('untracked item') |
|
356 | raise RuntimeError('untracked item') | |
357 | elif self.merged_removed: |
|
357 | elif self.merged_removed: | |
358 | return NONNORMAL |
|
358 | return NONNORMAL | |
359 | elif self.from_p2_removed: |
|
359 | elif self.from_p2_removed: | |
360 | return FROM_P2 |
|
360 | return FROM_P2 | |
361 | elif self.removed: |
|
361 | elif self.removed: | |
362 | return 0 |
|
362 | return 0 | |
363 | elif self.merged: |
|
363 | elif self.merged: | |
364 | return FROM_P2 |
|
364 | return FROM_P2 | |
365 | elif self.added: |
|
365 | elif self.added: | |
366 | return NONNORMAL |
|
366 | return NONNORMAL | |
367 | elif self.from_p2: |
|
367 | elif self.from_p2: | |
368 | return FROM_P2 |
|
368 | return FROM_P2 | |
369 | elif self._possibly_dirty: |
|
369 | elif self._possibly_dirty: | |
370 | return self._size if self._size is not None else NONNORMAL |
|
370 | return self._size if self._size is not None else NONNORMAL | |
371 | else: |
|
371 | else: | |
372 | return self._size |
|
372 | return self._size | |
373 |
|
373 | |||
374 | def v1_mtime(self): |
|
374 | def v1_mtime(self): | |
375 | """return a "mtime" suitable for v1 serialization""" |
|
375 | """return a "mtime" suitable for v1 serialization""" | |
376 | if not (self._p1_tracked or self._p2_tracked or self._wc_tracked): |
|
376 | if not (self._p1_tracked or self._p2_tracked or self._wc_tracked): | |
377 | # the object has no state to record, this is -currently- |
|
377 | # the object has no state to record, this is -currently- | |
378 | # unsupported |
|
378 | # unsupported | |
379 | raise RuntimeError('untracked item') |
|
379 | raise RuntimeError('untracked item') | |
380 |
elif |
|
380 | elif self.removed: | |
381 | return 0 |
|
381 | return 0 | |
382 | elif self._possibly_dirty: |
|
382 | elif self._possibly_dirty: | |
383 | return AMBIGUOUS_TIME |
|
383 | return AMBIGUOUS_TIME | |
384 |
elif self. |
|
384 | elif self.merged: | |
385 | return AMBIGUOUS_TIME |
|
|||
386 | elif not (self._p1_tracked or self._p2_tracked) and self._wc_tracked: |
|
|||
387 | return AMBIGUOUS_TIME |
|
385 | return AMBIGUOUS_TIME | |
388 |
elif self. |
|
386 | elif self.added: | |
389 | return AMBIGUOUS_TIME |
|
|||
390 | elif not self._p1_tracked and self._p2_tracked and self._wc_tracked: |
|
|||
391 | return AMBIGUOUS_TIME |
|
387 | return AMBIGUOUS_TIME | |
392 |
elif self. |
|
388 | elif self.from_p2: | |
393 | if self._mtime is None: |
|
389 | return AMBIGUOUS_TIME | |
394 | return 0 |
|
|||
395 | else: |
|
|||
396 | return self._mtime |
|
|||
397 | else: |
|
390 | else: | |
398 | raise RuntimeError('unreachable') |
|
391 | return self._mtime if self._mtime is not None else 0 | |
399 |
|
392 | |||
400 | def need_delay(self, now): |
|
393 | def need_delay(self, now): | |
401 | """True if the stored mtime would be ambiguous with the current time""" |
|
394 | """True if the stored mtime would be ambiguous with the current time""" | |
402 | return self.v1_state() == b'n' and self.v1_mtime() == now |
|
395 | return self.v1_state() == b'n' and self.v1_mtime() == now | |
403 |
|
396 | |||
404 |
|
397 | |||
405 | def gettype(q): |
|
398 | def gettype(q): | |
406 | return int(q & 0xFFFF) |
|
399 | return int(q & 0xFFFF) | |
407 |
|
400 | |||
408 |
|
401 | |||
409 | class BaseIndexObject(object): |
|
402 | class BaseIndexObject(object): | |
410 | # Can I be passed to an algorithme implemented in Rust ? |
|
403 | # Can I be passed to an algorithme implemented in Rust ? | |
411 | rust_ext_compat = 0 |
|
404 | rust_ext_compat = 0 | |
412 | # Format of an index entry according to Python's `struct` language |
|
405 | # Format of an index entry according to Python's `struct` language | |
413 | index_format = revlog_constants.INDEX_ENTRY_V1 |
|
406 | index_format = revlog_constants.INDEX_ENTRY_V1 | |
414 | # Size of a C unsigned long long int, platform independent |
|
407 | # Size of a C unsigned long long int, platform independent | |
415 | big_int_size = struct.calcsize(b'>Q') |
|
408 | big_int_size = struct.calcsize(b'>Q') | |
416 | # Size of a C long int, platform independent |
|
409 | # Size of a C long int, platform independent | |
417 | int_size = struct.calcsize(b'>i') |
|
410 | int_size = struct.calcsize(b'>i') | |
418 | # An empty index entry, used as a default value to be overridden, or nullrev |
|
411 | # An empty index entry, used as a default value to be overridden, or nullrev | |
419 | null_item = ( |
|
412 | null_item = ( | |
420 | 0, |
|
413 | 0, | |
421 | 0, |
|
414 | 0, | |
422 | 0, |
|
415 | 0, | |
423 | -1, |
|
416 | -1, | |
424 | -1, |
|
417 | -1, | |
425 | -1, |
|
418 | -1, | |
426 | -1, |
|
419 | -1, | |
427 | sha1nodeconstants.nullid, |
|
420 | sha1nodeconstants.nullid, | |
428 | 0, |
|
421 | 0, | |
429 | 0, |
|
422 | 0, | |
430 | revlog_constants.COMP_MODE_INLINE, |
|
423 | revlog_constants.COMP_MODE_INLINE, | |
431 | revlog_constants.COMP_MODE_INLINE, |
|
424 | revlog_constants.COMP_MODE_INLINE, | |
432 | ) |
|
425 | ) | |
433 |
|
426 | |||
434 | @util.propertycache |
|
427 | @util.propertycache | |
435 | def entry_size(self): |
|
428 | def entry_size(self): | |
436 | return self.index_format.size |
|
429 | return self.index_format.size | |
437 |
|
430 | |||
438 | @property |
|
431 | @property | |
439 | def nodemap(self): |
|
432 | def nodemap(self): | |
440 | msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]" |
|
433 | msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]" | |
441 | util.nouideprecwarn(msg, b'5.3', stacklevel=2) |
|
434 | util.nouideprecwarn(msg, b'5.3', stacklevel=2) | |
442 | return self._nodemap |
|
435 | return self._nodemap | |
443 |
|
436 | |||
444 | @util.propertycache |
|
437 | @util.propertycache | |
445 | def _nodemap(self): |
|
438 | def _nodemap(self): | |
446 | nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev}) |
|
439 | nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev}) | |
447 | for r in range(0, len(self)): |
|
440 | for r in range(0, len(self)): | |
448 | n = self[r][7] |
|
441 | n = self[r][7] | |
449 | nodemap[n] = r |
|
442 | nodemap[n] = r | |
450 | return nodemap |
|
443 | return nodemap | |
451 |
|
444 | |||
452 | def has_node(self, node): |
|
445 | def has_node(self, node): | |
453 | """return True if the node exist in the index""" |
|
446 | """return True if the node exist in the index""" | |
454 | return node in self._nodemap |
|
447 | return node in self._nodemap | |
455 |
|
448 | |||
456 | def rev(self, node): |
|
449 | def rev(self, node): | |
457 | """return a revision for a node |
|
450 | """return a revision for a node | |
458 |
|
451 | |||
459 | If the node is unknown, raise a RevlogError""" |
|
452 | If the node is unknown, raise a RevlogError""" | |
460 | return self._nodemap[node] |
|
453 | return self._nodemap[node] | |
461 |
|
454 | |||
462 | def get_rev(self, node): |
|
455 | def get_rev(self, node): | |
463 | """return a revision for a node |
|
456 | """return a revision for a node | |
464 |
|
457 | |||
465 | If the node is unknown, return None""" |
|
458 | If the node is unknown, return None""" | |
466 | return self._nodemap.get(node) |
|
459 | return self._nodemap.get(node) | |
467 |
|
460 | |||
468 | def _stripnodes(self, start): |
|
461 | def _stripnodes(self, start): | |
469 | if '_nodemap' in vars(self): |
|
462 | if '_nodemap' in vars(self): | |
470 | for r in range(start, len(self)): |
|
463 | for r in range(start, len(self)): | |
471 | n = self[r][7] |
|
464 | n = self[r][7] | |
472 | del self._nodemap[n] |
|
465 | del self._nodemap[n] | |
473 |
|
466 | |||
474 | def clearcaches(self): |
|
467 | def clearcaches(self): | |
475 | self.__dict__.pop('_nodemap', None) |
|
468 | self.__dict__.pop('_nodemap', None) | |
476 |
|
469 | |||
477 | def __len__(self): |
|
470 | def __len__(self): | |
478 | return self._lgt + len(self._extra) |
|
471 | return self._lgt + len(self._extra) | |
479 |
|
472 | |||
480 | def append(self, tup): |
|
473 | def append(self, tup): | |
481 | if '_nodemap' in vars(self): |
|
474 | if '_nodemap' in vars(self): | |
482 | self._nodemap[tup[7]] = len(self) |
|
475 | self._nodemap[tup[7]] = len(self) | |
483 | data = self._pack_entry(len(self), tup) |
|
476 | data = self._pack_entry(len(self), tup) | |
484 | self._extra.append(data) |
|
477 | self._extra.append(data) | |
485 |
|
478 | |||
486 | def _pack_entry(self, rev, entry): |
|
479 | def _pack_entry(self, rev, entry): | |
487 | assert entry[8] == 0 |
|
480 | assert entry[8] == 0 | |
488 | assert entry[9] == 0 |
|
481 | assert entry[9] == 0 | |
489 | return self.index_format.pack(*entry[:8]) |
|
482 | return self.index_format.pack(*entry[:8]) | |
490 |
|
483 | |||
491 | def _check_index(self, i): |
|
484 | def _check_index(self, i): | |
492 | if not isinstance(i, int): |
|
485 | if not isinstance(i, int): | |
493 | raise TypeError(b"expecting int indexes") |
|
486 | raise TypeError(b"expecting int indexes") | |
494 | if i < 0 or i >= len(self): |
|
487 | if i < 0 or i >= len(self): | |
495 | raise IndexError |
|
488 | raise IndexError | |
496 |
|
489 | |||
497 | def __getitem__(self, i): |
|
490 | def __getitem__(self, i): | |
498 | if i == -1: |
|
491 | if i == -1: | |
499 | return self.null_item |
|
492 | return self.null_item | |
500 | self._check_index(i) |
|
493 | self._check_index(i) | |
501 | if i >= self._lgt: |
|
494 | if i >= self._lgt: | |
502 | data = self._extra[i - self._lgt] |
|
495 | data = self._extra[i - self._lgt] | |
503 | else: |
|
496 | else: | |
504 | index = self._calculate_index(i) |
|
497 | index = self._calculate_index(i) | |
505 | data = self._data[index : index + self.entry_size] |
|
498 | data = self._data[index : index + self.entry_size] | |
506 | r = self._unpack_entry(i, data) |
|
499 | r = self._unpack_entry(i, data) | |
507 | if self._lgt and i == 0: |
|
500 | if self._lgt and i == 0: | |
508 | offset = revlogutils.offset_type(0, gettype(r[0])) |
|
501 | offset = revlogutils.offset_type(0, gettype(r[0])) | |
509 | r = (offset,) + r[1:] |
|
502 | r = (offset,) + r[1:] | |
510 | return r |
|
503 | return r | |
511 |
|
504 | |||
512 | def _unpack_entry(self, rev, data): |
|
505 | def _unpack_entry(self, rev, data): | |
513 | r = self.index_format.unpack(data) |
|
506 | r = self.index_format.unpack(data) | |
514 | r = r + ( |
|
507 | r = r + ( | |
515 | 0, |
|
508 | 0, | |
516 | 0, |
|
509 | 0, | |
517 | revlog_constants.COMP_MODE_INLINE, |
|
510 | revlog_constants.COMP_MODE_INLINE, | |
518 | revlog_constants.COMP_MODE_INLINE, |
|
511 | revlog_constants.COMP_MODE_INLINE, | |
519 | ) |
|
512 | ) | |
520 | return r |
|
513 | return r | |
521 |
|
514 | |||
522 | def pack_header(self, header): |
|
515 | def pack_header(self, header): | |
523 | """pack header information as binary""" |
|
516 | """pack header information as binary""" | |
524 | v_fmt = revlog_constants.INDEX_HEADER |
|
517 | v_fmt = revlog_constants.INDEX_HEADER | |
525 | return v_fmt.pack(header) |
|
518 | return v_fmt.pack(header) | |
526 |
|
519 | |||
527 | def entry_binary(self, rev): |
|
520 | def entry_binary(self, rev): | |
528 | """return the raw binary string representing a revision""" |
|
521 | """return the raw binary string representing a revision""" | |
529 | entry = self[rev] |
|
522 | entry = self[rev] | |
530 | p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8]) |
|
523 | p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8]) | |
531 | if rev == 0: |
|
524 | if rev == 0: | |
532 | p = p[revlog_constants.INDEX_HEADER.size :] |
|
525 | p = p[revlog_constants.INDEX_HEADER.size :] | |
533 | return p |
|
526 | return p | |
534 |
|
527 | |||
535 |
|
528 | |||
536 | class IndexObject(BaseIndexObject): |
|
529 | class IndexObject(BaseIndexObject): | |
537 | def __init__(self, data): |
|
530 | def __init__(self, data): | |
538 | assert len(data) % self.entry_size == 0, ( |
|
531 | assert len(data) % self.entry_size == 0, ( | |
539 | len(data), |
|
532 | len(data), | |
540 | self.entry_size, |
|
533 | self.entry_size, | |
541 | len(data) % self.entry_size, |
|
534 | len(data) % self.entry_size, | |
542 | ) |
|
535 | ) | |
543 | self._data = data |
|
536 | self._data = data | |
544 | self._lgt = len(data) // self.entry_size |
|
537 | self._lgt = len(data) // self.entry_size | |
545 | self._extra = [] |
|
538 | self._extra = [] | |
546 |
|
539 | |||
547 | def _calculate_index(self, i): |
|
540 | def _calculate_index(self, i): | |
548 | return i * self.entry_size |
|
541 | return i * self.entry_size | |
549 |
|
542 | |||
550 | def __delitem__(self, i): |
|
543 | def __delitem__(self, i): | |
551 | if not isinstance(i, slice) or not i.stop == -1 or i.step is not None: |
|
544 | if not isinstance(i, slice) or not i.stop == -1 or i.step is not None: | |
552 | raise ValueError(b"deleting slices only supports a:-1 with step 1") |
|
545 | raise ValueError(b"deleting slices only supports a:-1 with step 1") | |
553 | i = i.start |
|
546 | i = i.start | |
554 | self._check_index(i) |
|
547 | self._check_index(i) | |
555 | self._stripnodes(i) |
|
548 | self._stripnodes(i) | |
556 | if i < self._lgt: |
|
549 | if i < self._lgt: | |
557 | self._data = self._data[: i * self.entry_size] |
|
550 | self._data = self._data[: i * self.entry_size] | |
558 | self._lgt = i |
|
551 | self._lgt = i | |
559 | self._extra = [] |
|
552 | self._extra = [] | |
560 | else: |
|
553 | else: | |
561 | self._extra = self._extra[: i - self._lgt] |
|
554 | self._extra = self._extra[: i - self._lgt] | |
562 |
|
555 | |||
563 |
|
556 | |||
564 | class PersistentNodeMapIndexObject(IndexObject): |
|
557 | class PersistentNodeMapIndexObject(IndexObject): | |
565 | """a Debug oriented class to test persistent nodemap |
|
558 | """a Debug oriented class to test persistent nodemap | |
566 |
|
559 | |||
567 | We need a simple python object to test API and higher level behavior. See |
|
560 | We need a simple python object to test API and higher level behavior. See | |
568 | the Rust implementation for more serious usage. This should be used only |
|
561 | the Rust implementation for more serious usage. This should be used only | |
569 | through the dedicated `devel.persistent-nodemap` config. |
|
562 | through the dedicated `devel.persistent-nodemap` config. | |
570 | """ |
|
563 | """ | |
571 |
|
564 | |||
572 | def nodemap_data_all(self): |
|
565 | def nodemap_data_all(self): | |
573 | """Return bytes containing a full serialization of a nodemap |
|
566 | """Return bytes containing a full serialization of a nodemap | |
574 |
|
567 | |||
575 | The nodemap should be valid for the full set of revisions in the |
|
568 | The nodemap should be valid for the full set of revisions in the | |
576 | index.""" |
|
569 | index.""" | |
577 | return nodemaputil.persistent_data(self) |
|
570 | return nodemaputil.persistent_data(self) | |
578 |
|
571 | |||
579 | def nodemap_data_incremental(self): |
|
572 | def nodemap_data_incremental(self): | |
580 | """Return bytes containing a incremental update to persistent nodemap |
|
573 | """Return bytes containing a incremental update to persistent nodemap | |
581 |
|
574 | |||
582 | This containst the data for an append-only update of the data provided |
|
575 | This containst the data for an append-only update of the data provided | |
583 | in the last call to `update_nodemap_data`. |
|
576 | in the last call to `update_nodemap_data`. | |
584 | """ |
|
577 | """ | |
585 | if self._nm_root is None: |
|
578 | if self._nm_root is None: | |
586 | return None |
|
579 | return None | |
587 | docket = self._nm_docket |
|
580 | docket = self._nm_docket | |
588 | changed, data = nodemaputil.update_persistent_data( |
|
581 | changed, data = nodemaputil.update_persistent_data( | |
589 | self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev |
|
582 | self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev | |
590 | ) |
|
583 | ) | |
591 |
|
584 | |||
592 | self._nm_root = self._nm_max_idx = self._nm_docket = None |
|
585 | self._nm_root = self._nm_max_idx = self._nm_docket = None | |
593 | return docket, changed, data |
|
586 | return docket, changed, data | |
594 |
|
587 | |||
595 | def update_nodemap_data(self, docket, nm_data): |
|
588 | def update_nodemap_data(self, docket, nm_data): | |
596 | """provide full block of persisted binary data for a nodemap |
|
589 | """provide full block of persisted binary data for a nodemap | |
597 |
|
590 | |||
598 | The data are expected to come from disk. See `nodemap_data_all` for a |
|
591 | The data are expected to come from disk. See `nodemap_data_all` for a | |
599 | produceur of such data.""" |
|
592 | produceur of such data.""" | |
600 | if nm_data is not None: |
|
593 | if nm_data is not None: | |
601 | self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data) |
|
594 | self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data) | |
602 | if self._nm_root: |
|
595 | if self._nm_root: | |
603 | self._nm_docket = docket |
|
596 | self._nm_docket = docket | |
604 | else: |
|
597 | else: | |
605 | self._nm_root = self._nm_max_idx = self._nm_docket = None |
|
598 | self._nm_root = self._nm_max_idx = self._nm_docket = None | |
606 |
|
599 | |||
607 |
|
600 | |||
608 | class InlinedIndexObject(BaseIndexObject): |
|
601 | class InlinedIndexObject(BaseIndexObject): | |
609 | def __init__(self, data, inline=0): |
|
602 | def __init__(self, data, inline=0): | |
610 | self._data = data |
|
603 | self._data = data | |
611 | self._lgt = self._inline_scan(None) |
|
604 | self._lgt = self._inline_scan(None) | |
612 | self._inline_scan(self._lgt) |
|
605 | self._inline_scan(self._lgt) | |
613 | self._extra = [] |
|
606 | self._extra = [] | |
614 |
|
607 | |||
615 | def _inline_scan(self, lgt): |
|
608 | def _inline_scan(self, lgt): | |
616 | off = 0 |
|
609 | off = 0 | |
617 | if lgt is not None: |
|
610 | if lgt is not None: | |
618 | self._offsets = [0] * lgt |
|
611 | self._offsets = [0] * lgt | |
619 | count = 0 |
|
612 | count = 0 | |
620 | while off <= len(self._data) - self.entry_size: |
|
613 | while off <= len(self._data) - self.entry_size: | |
621 | start = off + self.big_int_size |
|
614 | start = off + self.big_int_size | |
622 | (s,) = struct.unpack( |
|
615 | (s,) = struct.unpack( | |
623 | b'>i', |
|
616 | b'>i', | |
624 | self._data[start : start + self.int_size], |
|
617 | self._data[start : start + self.int_size], | |
625 | ) |
|
618 | ) | |
626 | if lgt is not None: |
|
619 | if lgt is not None: | |
627 | self._offsets[count] = off |
|
620 | self._offsets[count] = off | |
628 | count += 1 |
|
621 | count += 1 | |
629 | off += self.entry_size + s |
|
622 | off += self.entry_size + s | |
630 | if off != len(self._data): |
|
623 | if off != len(self._data): | |
631 | raise ValueError(b"corrupted data") |
|
624 | raise ValueError(b"corrupted data") | |
632 | return count |
|
625 | return count | |
633 |
|
626 | |||
634 | def __delitem__(self, i): |
|
627 | def __delitem__(self, i): | |
635 | if not isinstance(i, slice) or not i.stop == -1 or i.step is not None: |
|
628 | if not isinstance(i, slice) or not i.stop == -1 or i.step is not None: | |
636 | raise ValueError(b"deleting slices only supports a:-1 with step 1") |
|
629 | raise ValueError(b"deleting slices only supports a:-1 with step 1") | |
637 | i = i.start |
|
630 | i = i.start | |
638 | self._check_index(i) |
|
631 | self._check_index(i) | |
639 | self._stripnodes(i) |
|
632 | self._stripnodes(i) | |
640 | if i < self._lgt: |
|
633 | if i < self._lgt: | |
641 | self._offsets = self._offsets[:i] |
|
634 | self._offsets = self._offsets[:i] | |
642 | self._lgt = i |
|
635 | self._lgt = i | |
643 | self._extra = [] |
|
636 | self._extra = [] | |
644 | else: |
|
637 | else: | |
645 | self._extra = self._extra[: i - self._lgt] |
|
638 | self._extra = self._extra[: i - self._lgt] | |
646 |
|
639 | |||
647 | def _calculate_index(self, i): |
|
640 | def _calculate_index(self, i): | |
648 | return self._offsets[i] |
|
641 | return self._offsets[i] | |
649 |
|
642 | |||
650 |
|
643 | |||
651 | def parse_index2(data, inline, revlogv2=False): |
|
644 | def parse_index2(data, inline, revlogv2=False): | |
652 | if not inline: |
|
645 | if not inline: | |
653 | cls = IndexObject2 if revlogv2 else IndexObject |
|
646 | cls = IndexObject2 if revlogv2 else IndexObject | |
654 | return cls(data), None |
|
647 | return cls(data), None | |
655 | cls = InlinedIndexObject |
|
648 | cls = InlinedIndexObject | |
656 | return cls(data, inline), (0, data) |
|
649 | return cls(data, inline), (0, data) | |
657 |
|
650 | |||
658 |
|
651 | |||
659 | def parse_index_cl_v2(data): |
|
652 | def parse_index_cl_v2(data): | |
660 | return IndexChangelogV2(data), None |
|
653 | return IndexChangelogV2(data), None | |
661 |
|
654 | |||
662 |
|
655 | |||
663 | class IndexObject2(IndexObject): |
|
656 | class IndexObject2(IndexObject): | |
664 | index_format = revlog_constants.INDEX_ENTRY_V2 |
|
657 | index_format = revlog_constants.INDEX_ENTRY_V2 | |
665 |
|
658 | |||
666 | def replace_sidedata_info( |
|
659 | def replace_sidedata_info( | |
667 | self, |
|
660 | self, | |
668 | rev, |
|
661 | rev, | |
669 | sidedata_offset, |
|
662 | sidedata_offset, | |
670 | sidedata_length, |
|
663 | sidedata_length, | |
671 | offset_flags, |
|
664 | offset_flags, | |
672 | compression_mode, |
|
665 | compression_mode, | |
673 | ): |
|
666 | ): | |
674 | """ |
|
667 | """ | |
675 | Replace an existing index entry's sidedata offset and length with new |
|
668 | Replace an existing index entry's sidedata offset and length with new | |
676 | ones. |
|
669 | ones. | |
677 | This cannot be used outside of the context of sidedata rewriting, |
|
670 | This cannot be used outside of the context of sidedata rewriting, | |
678 | inside the transaction that creates the revision `rev`. |
|
671 | inside the transaction that creates the revision `rev`. | |
679 | """ |
|
672 | """ | |
680 | if rev < 0: |
|
673 | if rev < 0: | |
681 | raise KeyError |
|
674 | raise KeyError | |
682 | self._check_index(rev) |
|
675 | self._check_index(rev) | |
683 | if rev < self._lgt: |
|
676 | if rev < self._lgt: | |
684 | msg = b"cannot rewrite entries outside of this transaction" |
|
677 | msg = b"cannot rewrite entries outside of this transaction" | |
685 | raise KeyError(msg) |
|
678 | raise KeyError(msg) | |
686 | else: |
|
679 | else: | |
687 | entry = list(self[rev]) |
|
680 | entry = list(self[rev]) | |
688 | entry[0] = offset_flags |
|
681 | entry[0] = offset_flags | |
689 | entry[8] = sidedata_offset |
|
682 | entry[8] = sidedata_offset | |
690 | entry[9] = sidedata_length |
|
683 | entry[9] = sidedata_length | |
691 | entry[11] = compression_mode |
|
684 | entry[11] = compression_mode | |
692 | entry = tuple(entry) |
|
685 | entry = tuple(entry) | |
693 | new = self._pack_entry(rev, entry) |
|
686 | new = self._pack_entry(rev, entry) | |
694 | self._extra[rev - self._lgt] = new |
|
687 | self._extra[rev - self._lgt] = new | |
695 |
|
688 | |||
696 | def _unpack_entry(self, rev, data): |
|
689 | def _unpack_entry(self, rev, data): | |
697 | data = self.index_format.unpack(data) |
|
690 | data = self.index_format.unpack(data) | |
698 | entry = data[:10] |
|
691 | entry = data[:10] | |
699 | data_comp = data[10] & 3 |
|
692 | data_comp = data[10] & 3 | |
700 | sidedata_comp = (data[10] & (3 << 2)) >> 2 |
|
693 | sidedata_comp = (data[10] & (3 << 2)) >> 2 | |
701 | return entry + (data_comp, sidedata_comp) |
|
694 | return entry + (data_comp, sidedata_comp) | |
702 |
|
695 | |||
703 | def _pack_entry(self, rev, entry): |
|
696 | def _pack_entry(self, rev, entry): | |
704 | data = entry[:10] |
|
697 | data = entry[:10] | |
705 | data_comp = entry[10] & 3 |
|
698 | data_comp = entry[10] & 3 | |
706 | sidedata_comp = (entry[11] & 3) << 2 |
|
699 | sidedata_comp = (entry[11] & 3) << 2 | |
707 | data += (data_comp | sidedata_comp,) |
|
700 | data += (data_comp | sidedata_comp,) | |
708 |
|
701 | |||
709 | return self.index_format.pack(*data) |
|
702 | return self.index_format.pack(*data) | |
710 |
|
703 | |||
711 | def entry_binary(self, rev): |
|
704 | def entry_binary(self, rev): | |
712 | """return the raw binary string representing a revision""" |
|
705 | """return the raw binary string representing a revision""" | |
713 | entry = self[rev] |
|
706 | entry = self[rev] | |
714 | return self._pack_entry(rev, entry) |
|
707 | return self._pack_entry(rev, entry) | |
715 |
|
708 | |||
716 | def pack_header(self, header): |
|
709 | def pack_header(self, header): | |
717 | """pack header information as binary""" |
|
710 | """pack header information as binary""" | |
718 | msg = 'version header should go in the docket, not the index: %d' |
|
711 | msg = 'version header should go in the docket, not the index: %d' | |
719 | msg %= header |
|
712 | msg %= header | |
720 | raise error.ProgrammingError(msg) |
|
713 | raise error.ProgrammingError(msg) | |
721 |
|
714 | |||
722 |
|
715 | |||
723 | class IndexChangelogV2(IndexObject2): |
|
716 | class IndexChangelogV2(IndexObject2): | |
724 | index_format = revlog_constants.INDEX_ENTRY_CL_V2 |
|
717 | index_format = revlog_constants.INDEX_ENTRY_CL_V2 | |
725 |
|
718 | |||
726 | def _unpack_entry(self, rev, data, r=True): |
|
719 | def _unpack_entry(self, rev, data, r=True): | |
727 | items = self.index_format.unpack(data) |
|
720 | items = self.index_format.unpack(data) | |
728 | entry = items[:3] + (rev, rev) + items[3:8] |
|
721 | entry = items[:3] + (rev, rev) + items[3:8] | |
729 | data_comp = items[8] & 3 |
|
722 | data_comp = items[8] & 3 | |
730 | sidedata_comp = (items[8] >> 2) & 3 |
|
723 | sidedata_comp = (items[8] >> 2) & 3 | |
731 | return entry + (data_comp, sidedata_comp) |
|
724 | return entry + (data_comp, sidedata_comp) | |
732 |
|
725 | |||
733 | def _pack_entry(self, rev, entry): |
|
726 | def _pack_entry(self, rev, entry): | |
734 | assert entry[3] == rev, entry[3] |
|
727 | assert entry[3] == rev, entry[3] | |
735 | assert entry[4] == rev, entry[4] |
|
728 | assert entry[4] == rev, entry[4] | |
736 | data = entry[:3] + entry[5:10] |
|
729 | data = entry[:3] + entry[5:10] | |
737 | data_comp = entry[10] & 3 |
|
730 | data_comp = entry[10] & 3 | |
738 | sidedata_comp = (entry[11] & 3) << 2 |
|
731 | sidedata_comp = (entry[11] & 3) << 2 | |
739 | data += (data_comp | sidedata_comp,) |
|
732 | data += (data_comp | sidedata_comp,) | |
740 | return self.index_format.pack(*data) |
|
733 | return self.index_format.pack(*data) | |
741 |
|
734 | |||
742 |
|
735 | |||
743 | def parse_index_devel_nodemap(data, inline): |
|
736 | def parse_index_devel_nodemap(data, inline): | |
744 | """like parse_index2, but alway return a PersistentNodeMapIndexObject""" |
|
737 | """like parse_index2, but alway return a PersistentNodeMapIndexObject""" | |
745 | return PersistentNodeMapIndexObject(data), None |
|
738 | return PersistentNodeMapIndexObject(data), None | |
746 |
|
739 | |||
747 |
|
740 | |||
748 | def parse_dirstate(dmap, copymap, st): |
|
741 | def parse_dirstate(dmap, copymap, st): | |
749 | parents = [st[:20], st[20:40]] |
|
742 | parents = [st[:20], st[20:40]] | |
750 | # dereference fields so they will be local in loop |
|
743 | # dereference fields so they will be local in loop | |
751 | format = b">cllll" |
|
744 | format = b">cllll" | |
752 | e_size = struct.calcsize(format) |
|
745 | e_size = struct.calcsize(format) | |
753 | pos1 = 40 |
|
746 | pos1 = 40 | |
754 | l = len(st) |
|
747 | l = len(st) | |
755 |
|
748 | |||
756 | # the inner loop |
|
749 | # the inner loop | |
757 | while pos1 < l: |
|
750 | while pos1 < l: | |
758 | pos2 = pos1 + e_size |
|
751 | pos2 = pos1 + e_size | |
759 | e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster |
|
752 | e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster | |
760 | pos1 = pos2 + e[4] |
|
753 | pos1 = pos2 + e[4] | |
761 | f = st[pos2:pos1] |
|
754 | f = st[pos2:pos1] | |
762 | if b'\0' in f: |
|
755 | if b'\0' in f: | |
763 | f, c = f.split(b'\0') |
|
756 | f, c = f.split(b'\0') | |
764 | copymap[f] = c |
|
757 | copymap[f] = c | |
765 | dmap[f] = DirstateItem.from_v1_data(*e[:4]) |
|
758 | dmap[f] = DirstateItem.from_v1_data(*e[:4]) | |
766 | return parents |
|
759 | return parents | |
767 |
|
760 | |||
768 |
|
761 | |||
769 | def pack_dirstate(dmap, copymap, pl, now): |
|
762 | def pack_dirstate(dmap, copymap, pl, now): | |
770 | now = int(now) |
|
763 | now = int(now) | |
771 | cs = stringio() |
|
764 | cs = stringio() | |
772 | write = cs.write |
|
765 | write = cs.write | |
773 | write(b"".join(pl)) |
|
766 | write(b"".join(pl)) | |
774 | for f, e in pycompat.iteritems(dmap): |
|
767 | for f, e in pycompat.iteritems(dmap): | |
775 | if e.need_delay(now): |
|
768 | if e.need_delay(now): | |
776 | # The file was last modified "simultaneously" with the current |
|
769 | # The file was last modified "simultaneously" with the current | |
777 | # write to dirstate (i.e. within the same second for file- |
|
770 | # write to dirstate (i.e. within the same second for file- | |
778 | # systems with a granularity of 1 sec). This commonly happens |
|
771 | # systems with a granularity of 1 sec). This commonly happens | |
779 | # for at least a couple of files on 'update'. |
|
772 | # for at least a couple of files on 'update'. | |
780 | # The user could change the file without changing its size |
|
773 | # The user could change the file without changing its size | |
781 | # within the same second. Invalidate the file's mtime in |
|
774 | # within the same second. Invalidate the file's mtime in | |
782 | # dirstate, forcing future 'status' calls to compare the |
|
775 | # dirstate, forcing future 'status' calls to compare the | |
783 | # contents of the file if the size is the same. This prevents |
|
776 | # contents of the file if the size is the same. This prevents | |
784 | # mistakenly treating such files as clean. |
|
777 | # mistakenly treating such files as clean. | |
785 | e.set_possibly_dirty() |
|
778 | e.set_possibly_dirty() | |
786 |
|
779 | |||
787 | if f in copymap: |
|
780 | if f in copymap: | |
788 | f = b"%s\0%s" % (f, copymap[f]) |
|
781 | f = b"%s\0%s" % (f, copymap[f]) | |
789 | e = _pack( |
|
782 | e = _pack( | |
790 | b">cllll", |
|
783 | b">cllll", | |
791 | e.v1_state(), |
|
784 | e.v1_state(), | |
792 | e.v1_mode(), |
|
785 | e.v1_mode(), | |
793 | e.v1_size(), |
|
786 | e.v1_size(), | |
794 | e.v1_mtime(), |
|
787 | e.v1_mtime(), | |
795 | len(f), |
|
788 | len(f), | |
796 | ) |
|
789 | ) | |
797 | write(e) |
|
790 | write(e) | |
798 | write(f) |
|
791 | write(f) | |
799 | return cs.getvalue() |
|
792 | return cs.getvalue() |
General Comments 0
You need to be logged in to leave comments.
Login now