Show More
@@ -1,1098 +1,1114 b'' | |||||
1 | # store.py - repository store handling for Mercurial |
|
1 | # store.py - repository store handling for Mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2008 Olivia Mackall <olivia@selenic.com> |
|
3 | # Copyright 2008 Olivia Mackall <olivia@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | import collections |
|
8 | import collections | |
9 | import functools |
|
9 | import functools | |
10 | import os |
|
10 | import os | |
11 | import re |
|
11 | import re | |
12 | import stat |
|
12 | import stat | |
13 | from typing import Generator |
|
13 | from typing import Generator | |
14 |
|
14 | |||
15 | from .i18n import _ |
|
15 | from .i18n import _ | |
16 | from .pycompat import getattr |
|
16 | from .pycompat import getattr | |
17 | from .thirdparty import attr |
|
17 | from .thirdparty import attr | |
18 | from .node import hex |
|
18 | from .node import hex | |
19 | from . import ( |
|
19 | from . import ( | |
20 | changelog, |
|
20 | changelog, | |
21 | error, |
|
21 | error, | |
|
22 | filelog, | |||
22 | manifest, |
|
23 | manifest, | |
23 | policy, |
|
24 | policy, | |
24 | pycompat, |
|
25 | pycompat, | |
25 | util, |
|
26 | util, | |
26 | vfs as vfsmod, |
|
27 | vfs as vfsmod, | |
27 | ) |
|
28 | ) | |
28 | from .utils import hashutil |
|
29 | from .utils import hashutil | |
29 |
|
30 | |||
30 | parsers = policy.importmod('parsers') |
|
31 | parsers = policy.importmod('parsers') | |
31 | # how much bytes should be read from fncache in one read |
|
32 | # how much bytes should be read from fncache in one read | |
32 | # It is done to prevent loading large fncache files into memory |
|
33 | # It is done to prevent loading large fncache files into memory | |
33 | fncache_chunksize = 10 ** 6 |
|
34 | fncache_chunksize = 10 ** 6 | |
34 |
|
35 | |||
35 |
|
36 | |||
36 | def _match_tracked_entry(entry, matcher): |
|
37 | def _match_tracked_entry(entry, matcher): | |
37 | """parses a fncache entry and returns whether the entry is tracking a path |
|
38 | """parses a fncache entry and returns whether the entry is tracking a path | |
38 | matched by matcher or not. |
|
39 | matched by matcher or not. | |
39 |
|
40 | |||
40 | If matcher is None, returns True""" |
|
41 | If matcher is None, returns True""" | |
41 |
|
42 | |||
42 | if matcher is None: |
|
43 | if matcher is None: | |
43 | return True |
|
44 | return True | |
44 | if entry.is_filelog: |
|
45 | if entry.is_filelog: | |
45 | return matcher(entry.target_id) |
|
46 | return matcher(entry.target_id) | |
46 | elif entry.is_manifestlog: |
|
47 | elif entry.is_manifestlog: | |
47 | return matcher.visitdir(entry.target_id.rstrip(b'/')) |
|
48 | return matcher.visitdir(entry.target_id.rstrip(b'/')) | |
48 | raise error.ProgrammingError(b"cannot process entry %r" % entry) |
|
49 | raise error.ProgrammingError(b"cannot process entry %r" % entry) | |
49 |
|
50 | |||
50 |
|
51 | |||
51 | # This avoids a collision between a file named foo and a dir named |
|
52 | # This avoids a collision between a file named foo and a dir named | |
52 | # foo.i or foo.d |
|
53 | # foo.i or foo.d | |
53 | def _encodedir(path): |
|
54 | def _encodedir(path): | |
54 | """ |
|
55 | """ | |
55 | >>> _encodedir(b'data/foo.i') |
|
56 | >>> _encodedir(b'data/foo.i') | |
56 | 'data/foo.i' |
|
57 | 'data/foo.i' | |
57 | >>> _encodedir(b'data/foo.i/bla.i') |
|
58 | >>> _encodedir(b'data/foo.i/bla.i') | |
58 | 'data/foo.i.hg/bla.i' |
|
59 | 'data/foo.i.hg/bla.i' | |
59 | >>> _encodedir(b'data/foo.i.hg/bla.i') |
|
60 | >>> _encodedir(b'data/foo.i.hg/bla.i') | |
60 | 'data/foo.i.hg.hg/bla.i' |
|
61 | 'data/foo.i.hg.hg/bla.i' | |
61 | >>> _encodedir(b'data/foo.i\\ndata/foo.i/bla.i\\ndata/foo.i.hg/bla.i\\n') |
|
62 | >>> _encodedir(b'data/foo.i\\ndata/foo.i/bla.i\\ndata/foo.i.hg/bla.i\\n') | |
62 | 'data/foo.i\\ndata/foo.i.hg/bla.i\\ndata/foo.i.hg.hg/bla.i\\n' |
|
63 | 'data/foo.i\\ndata/foo.i.hg/bla.i\\ndata/foo.i.hg.hg/bla.i\\n' | |
63 | """ |
|
64 | """ | |
64 | return ( |
|
65 | return ( | |
65 | path.replace(b".hg/", b".hg.hg/") |
|
66 | path.replace(b".hg/", b".hg.hg/") | |
66 | .replace(b".i/", b".i.hg/") |
|
67 | .replace(b".i/", b".i.hg/") | |
67 | .replace(b".d/", b".d.hg/") |
|
68 | .replace(b".d/", b".d.hg/") | |
68 | ) |
|
69 | ) | |
69 |
|
70 | |||
70 |
|
71 | |||
71 | encodedir = getattr(parsers, 'encodedir', _encodedir) |
|
72 | encodedir = getattr(parsers, 'encodedir', _encodedir) | |
72 |
|
73 | |||
73 |
|
74 | |||
74 | def decodedir(path): |
|
75 | def decodedir(path): | |
75 | """ |
|
76 | """ | |
76 | >>> decodedir(b'data/foo.i') |
|
77 | >>> decodedir(b'data/foo.i') | |
77 | 'data/foo.i' |
|
78 | 'data/foo.i' | |
78 | >>> decodedir(b'data/foo.i.hg/bla.i') |
|
79 | >>> decodedir(b'data/foo.i.hg/bla.i') | |
79 | 'data/foo.i/bla.i' |
|
80 | 'data/foo.i/bla.i' | |
80 | >>> decodedir(b'data/foo.i.hg.hg/bla.i') |
|
81 | >>> decodedir(b'data/foo.i.hg.hg/bla.i') | |
81 | 'data/foo.i.hg/bla.i' |
|
82 | 'data/foo.i.hg/bla.i' | |
82 | """ |
|
83 | """ | |
83 | if b".hg/" not in path: |
|
84 | if b".hg/" not in path: | |
84 | return path |
|
85 | return path | |
85 | return ( |
|
86 | return ( | |
86 | path.replace(b".d.hg/", b".d/") |
|
87 | path.replace(b".d.hg/", b".d/") | |
87 | .replace(b".i.hg/", b".i/") |
|
88 | .replace(b".i.hg/", b".i/") | |
88 | .replace(b".hg.hg/", b".hg/") |
|
89 | .replace(b".hg.hg/", b".hg/") | |
89 | ) |
|
90 | ) | |
90 |
|
91 | |||
91 |
|
92 | |||
92 | def _reserved(): |
|
93 | def _reserved(): | |
93 | """characters that are problematic for filesystems |
|
94 | """characters that are problematic for filesystems | |
94 |
|
95 | |||
95 | * ascii escapes (0..31) |
|
96 | * ascii escapes (0..31) | |
96 | * ascii hi (126..255) |
|
97 | * ascii hi (126..255) | |
97 | * windows specials |
|
98 | * windows specials | |
98 |
|
99 | |||
99 | these characters will be escaped by encodefunctions |
|
100 | these characters will be escaped by encodefunctions | |
100 | """ |
|
101 | """ | |
101 | winreserved = [ord(x) for x in u'\\:*?"<>|'] |
|
102 | winreserved = [ord(x) for x in u'\\:*?"<>|'] | |
102 | for x in range(32): |
|
103 | for x in range(32): | |
103 | yield x |
|
104 | yield x | |
104 | for x in range(126, 256): |
|
105 | for x in range(126, 256): | |
105 | yield x |
|
106 | yield x | |
106 | for x in winreserved: |
|
107 | for x in winreserved: | |
107 | yield x |
|
108 | yield x | |
108 |
|
109 | |||
109 |
|
110 | |||
110 | def _buildencodefun(): |
|
111 | def _buildencodefun(): | |
111 | """ |
|
112 | """ | |
112 | >>> enc, dec = _buildencodefun() |
|
113 | >>> enc, dec = _buildencodefun() | |
113 |
|
114 | |||
114 | >>> enc(b'nothing/special.txt') |
|
115 | >>> enc(b'nothing/special.txt') | |
115 | 'nothing/special.txt' |
|
116 | 'nothing/special.txt' | |
116 | >>> dec(b'nothing/special.txt') |
|
117 | >>> dec(b'nothing/special.txt') | |
117 | 'nothing/special.txt' |
|
118 | 'nothing/special.txt' | |
118 |
|
119 | |||
119 | >>> enc(b'HELLO') |
|
120 | >>> enc(b'HELLO') | |
120 | '_h_e_l_l_o' |
|
121 | '_h_e_l_l_o' | |
121 | >>> dec(b'_h_e_l_l_o') |
|
122 | >>> dec(b'_h_e_l_l_o') | |
122 | 'HELLO' |
|
123 | 'HELLO' | |
123 |
|
124 | |||
124 | >>> enc(b'hello:world?') |
|
125 | >>> enc(b'hello:world?') | |
125 | 'hello~3aworld~3f' |
|
126 | 'hello~3aworld~3f' | |
126 | >>> dec(b'hello~3aworld~3f') |
|
127 | >>> dec(b'hello~3aworld~3f') | |
127 | 'hello:world?' |
|
128 | 'hello:world?' | |
128 |
|
129 | |||
129 | >>> enc(b'the\\x07quick\\xADshot') |
|
130 | >>> enc(b'the\\x07quick\\xADshot') | |
130 | 'the~07quick~adshot' |
|
131 | 'the~07quick~adshot' | |
131 | >>> dec(b'the~07quick~adshot') |
|
132 | >>> dec(b'the~07quick~adshot') | |
132 | 'the\\x07quick\\xadshot' |
|
133 | 'the\\x07quick\\xadshot' | |
133 | """ |
|
134 | """ | |
134 | e = b'_' |
|
135 | e = b'_' | |
135 | xchr = pycompat.bytechr |
|
136 | xchr = pycompat.bytechr | |
136 | asciistr = list(map(xchr, range(127))) |
|
137 | asciistr = list(map(xchr, range(127))) | |
137 | capitals = list(range(ord(b"A"), ord(b"Z") + 1)) |
|
138 | capitals = list(range(ord(b"A"), ord(b"Z") + 1)) | |
138 |
|
139 | |||
139 | cmap = {x: x for x in asciistr} |
|
140 | cmap = {x: x for x in asciistr} | |
140 | for x in _reserved(): |
|
141 | for x in _reserved(): | |
141 | cmap[xchr(x)] = b"~%02x" % x |
|
142 | cmap[xchr(x)] = b"~%02x" % x | |
142 | for x in capitals + [ord(e)]: |
|
143 | for x in capitals + [ord(e)]: | |
143 | cmap[xchr(x)] = e + xchr(x).lower() |
|
144 | cmap[xchr(x)] = e + xchr(x).lower() | |
144 |
|
145 | |||
145 | dmap = {} |
|
146 | dmap = {} | |
146 | for k, v in cmap.items(): |
|
147 | for k, v in cmap.items(): | |
147 | dmap[v] = k |
|
148 | dmap[v] = k | |
148 |
|
149 | |||
149 | def decode(s): |
|
150 | def decode(s): | |
150 | i = 0 |
|
151 | i = 0 | |
151 | while i < len(s): |
|
152 | while i < len(s): | |
152 | for l in range(1, 4): |
|
153 | for l in range(1, 4): | |
153 | try: |
|
154 | try: | |
154 | yield dmap[s[i : i + l]] |
|
155 | yield dmap[s[i : i + l]] | |
155 | i += l |
|
156 | i += l | |
156 | break |
|
157 | break | |
157 | except KeyError: |
|
158 | except KeyError: | |
158 | pass |
|
159 | pass | |
159 | else: |
|
160 | else: | |
160 | raise KeyError |
|
161 | raise KeyError | |
161 |
|
162 | |||
162 | return ( |
|
163 | return ( | |
163 | lambda s: b''.join([cmap[s[c : c + 1]] for c in range(len(s))]), |
|
164 | lambda s: b''.join([cmap[s[c : c + 1]] for c in range(len(s))]), | |
164 | lambda s: b''.join(list(decode(s))), |
|
165 | lambda s: b''.join(list(decode(s))), | |
165 | ) |
|
166 | ) | |
166 |
|
167 | |||
167 |
|
168 | |||
168 | _encodefname, _decodefname = _buildencodefun() |
|
169 | _encodefname, _decodefname = _buildencodefun() | |
169 |
|
170 | |||
170 |
|
171 | |||
171 | def encodefilename(s): |
|
172 | def encodefilename(s): | |
172 | """ |
|
173 | """ | |
173 | >>> encodefilename(b'foo.i/bar.d/bla.hg/hi:world?/HELLO') |
|
174 | >>> encodefilename(b'foo.i/bar.d/bla.hg/hi:world?/HELLO') | |
174 | 'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o' |
|
175 | 'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o' | |
175 | """ |
|
176 | """ | |
176 | return _encodefname(encodedir(s)) |
|
177 | return _encodefname(encodedir(s)) | |
177 |
|
178 | |||
178 |
|
179 | |||
179 | def decodefilename(s): |
|
180 | def decodefilename(s): | |
180 | """ |
|
181 | """ | |
181 | >>> decodefilename(b'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o') |
|
182 | >>> decodefilename(b'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o') | |
182 | 'foo.i/bar.d/bla.hg/hi:world?/HELLO' |
|
183 | 'foo.i/bar.d/bla.hg/hi:world?/HELLO' | |
183 | """ |
|
184 | """ | |
184 | return decodedir(_decodefname(s)) |
|
185 | return decodedir(_decodefname(s)) | |
185 |
|
186 | |||
186 |
|
187 | |||
187 | def _buildlowerencodefun(): |
|
188 | def _buildlowerencodefun(): | |
188 | """ |
|
189 | """ | |
189 | >>> f = _buildlowerencodefun() |
|
190 | >>> f = _buildlowerencodefun() | |
190 | >>> f(b'nothing/special.txt') |
|
191 | >>> f(b'nothing/special.txt') | |
191 | 'nothing/special.txt' |
|
192 | 'nothing/special.txt' | |
192 | >>> f(b'HELLO') |
|
193 | >>> f(b'HELLO') | |
193 | 'hello' |
|
194 | 'hello' | |
194 | >>> f(b'hello:world?') |
|
195 | >>> f(b'hello:world?') | |
195 | 'hello~3aworld~3f' |
|
196 | 'hello~3aworld~3f' | |
196 | >>> f(b'the\\x07quick\\xADshot') |
|
197 | >>> f(b'the\\x07quick\\xADshot') | |
197 | 'the~07quick~adshot' |
|
198 | 'the~07quick~adshot' | |
198 | """ |
|
199 | """ | |
199 | xchr = pycompat.bytechr |
|
200 | xchr = pycompat.bytechr | |
200 | cmap = {xchr(x): xchr(x) for x in range(127)} |
|
201 | cmap = {xchr(x): xchr(x) for x in range(127)} | |
201 | for x in _reserved(): |
|
202 | for x in _reserved(): | |
202 | cmap[xchr(x)] = b"~%02x" % x |
|
203 | cmap[xchr(x)] = b"~%02x" % x | |
203 | for x in range(ord(b"A"), ord(b"Z") + 1): |
|
204 | for x in range(ord(b"A"), ord(b"Z") + 1): | |
204 | cmap[xchr(x)] = xchr(x).lower() |
|
205 | cmap[xchr(x)] = xchr(x).lower() | |
205 |
|
206 | |||
206 | def lowerencode(s): |
|
207 | def lowerencode(s): | |
207 | return b"".join([cmap[c] for c in pycompat.iterbytestr(s)]) |
|
208 | return b"".join([cmap[c] for c in pycompat.iterbytestr(s)]) | |
208 |
|
209 | |||
209 | return lowerencode |
|
210 | return lowerencode | |
210 |
|
211 | |||
211 |
|
212 | |||
212 | lowerencode = getattr(parsers, 'lowerencode', None) or _buildlowerencodefun() |
|
213 | lowerencode = getattr(parsers, 'lowerencode', None) or _buildlowerencodefun() | |
213 |
|
214 | |||
214 | # Windows reserved names: con, prn, aux, nul, com1..com9, lpt1..lpt9 |
|
215 | # Windows reserved names: con, prn, aux, nul, com1..com9, lpt1..lpt9 | |
215 | _winres3 = (b'aux', b'con', b'prn', b'nul') # length 3 |
|
216 | _winres3 = (b'aux', b'con', b'prn', b'nul') # length 3 | |
216 | _winres4 = (b'com', b'lpt') # length 4 (with trailing 1..9) |
|
217 | _winres4 = (b'com', b'lpt') # length 4 (with trailing 1..9) | |
217 |
|
218 | |||
218 |
|
219 | |||
219 | def _auxencode(path, dotencode): |
|
220 | def _auxencode(path, dotencode): | |
220 | """ |
|
221 | """ | |
221 | Encodes filenames containing names reserved by Windows or which end in |
|
222 | Encodes filenames containing names reserved by Windows or which end in | |
222 | period or space. Does not touch other single reserved characters c. |
|
223 | period or space. Does not touch other single reserved characters c. | |
223 | Specifically, c in '\\:*?"<>|' or ord(c) <= 31 are *not* encoded here. |
|
224 | Specifically, c in '\\:*?"<>|' or ord(c) <= 31 are *not* encoded here. | |
224 | Additionally encodes space or period at the beginning, if dotencode is |
|
225 | Additionally encodes space or period at the beginning, if dotencode is | |
225 | True. Parameter path is assumed to be all lowercase. |
|
226 | True. Parameter path is assumed to be all lowercase. | |
226 | A segment only needs encoding if a reserved name appears as a |
|
227 | A segment only needs encoding if a reserved name appears as a | |
227 | basename (e.g. "aux", "aux.foo"). A directory or file named "foo.aux" |
|
228 | basename (e.g. "aux", "aux.foo"). A directory or file named "foo.aux" | |
228 | doesn't need encoding. |
|
229 | doesn't need encoding. | |
229 |
|
230 | |||
230 | >>> s = b'.foo/aux.txt/txt.aux/con/prn/nul/foo.' |
|
231 | >>> s = b'.foo/aux.txt/txt.aux/con/prn/nul/foo.' | |
231 | >>> _auxencode(s.split(b'/'), True) |
|
232 | >>> _auxencode(s.split(b'/'), True) | |
232 | ['~2efoo', 'au~78.txt', 'txt.aux', 'co~6e', 'pr~6e', 'nu~6c', 'foo~2e'] |
|
233 | ['~2efoo', 'au~78.txt', 'txt.aux', 'co~6e', 'pr~6e', 'nu~6c', 'foo~2e'] | |
233 | >>> s = b'.com1com2/lpt9.lpt4.lpt1/conprn/com0/lpt0/foo.' |
|
234 | >>> s = b'.com1com2/lpt9.lpt4.lpt1/conprn/com0/lpt0/foo.' | |
234 | >>> _auxencode(s.split(b'/'), False) |
|
235 | >>> _auxencode(s.split(b'/'), False) | |
235 | ['.com1com2', 'lp~749.lpt4.lpt1', 'conprn', 'com0', 'lpt0', 'foo~2e'] |
|
236 | ['.com1com2', 'lp~749.lpt4.lpt1', 'conprn', 'com0', 'lpt0', 'foo~2e'] | |
236 | >>> _auxencode([b'foo. '], True) |
|
237 | >>> _auxencode([b'foo. '], True) | |
237 | ['foo.~20'] |
|
238 | ['foo.~20'] | |
238 | >>> _auxencode([b' .foo'], True) |
|
239 | >>> _auxencode([b' .foo'], True) | |
239 | ['~20.foo'] |
|
240 | ['~20.foo'] | |
240 | """ |
|
241 | """ | |
241 | for i, n in enumerate(path): |
|
242 | for i, n in enumerate(path): | |
242 | if not n: |
|
243 | if not n: | |
243 | continue |
|
244 | continue | |
244 | if dotencode and n[0] in b'. ': |
|
245 | if dotencode and n[0] in b'. ': | |
245 | n = b"~%02x" % ord(n[0:1]) + n[1:] |
|
246 | n = b"~%02x" % ord(n[0:1]) + n[1:] | |
246 | path[i] = n |
|
247 | path[i] = n | |
247 | else: |
|
248 | else: | |
248 | l = n.find(b'.') |
|
249 | l = n.find(b'.') | |
249 | if l == -1: |
|
250 | if l == -1: | |
250 | l = len(n) |
|
251 | l = len(n) | |
251 | if (l == 3 and n[:3] in _winres3) or ( |
|
252 | if (l == 3 and n[:3] in _winres3) or ( | |
252 | l == 4 |
|
253 | l == 4 | |
253 | and n[3:4] <= b'9' |
|
254 | and n[3:4] <= b'9' | |
254 | and n[3:4] >= b'1' |
|
255 | and n[3:4] >= b'1' | |
255 | and n[:3] in _winres4 |
|
256 | and n[:3] in _winres4 | |
256 | ): |
|
257 | ): | |
257 | # encode third letter ('aux' -> 'au~78') |
|
258 | # encode third letter ('aux' -> 'au~78') | |
258 | ec = b"~%02x" % ord(n[2:3]) |
|
259 | ec = b"~%02x" % ord(n[2:3]) | |
259 | n = n[0:2] + ec + n[3:] |
|
260 | n = n[0:2] + ec + n[3:] | |
260 | path[i] = n |
|
261 | path[i] = n | |
261 | if n[-1] in b'. ': |
|
262 | if n[-1] in b'. ': | |
262 | # encode last period or space ('foo...' -> 'foo..~2e') |
|
263 | # encode last period or space ('foo...' -> 'foo..~2e') | |
263 | path[i] = n[:-1] + b"~%02x" % ord(n[-1:]) |
|
264 | path[i] = n[:-1] + b"~%02x" % ord(n[-1:]) | |
264 | return path |
|
265 | return path | |
265 |
|
266 | |||
266 |
|
267 | |||
267 | _maxstorepathlen = 120 |
|
268 | _maxstorepathlen = 120 | |
268 | _dirprefixlen = 8 |
|
269 | _dirprefixlen = 8 | |
269 | _maxshortdirslen = 8 * (_dirprefixlen + 1) - 4 |
|
270 | _maxshortdirslen = 8 * (_dirprefixlen + 1) - 4 | |
270 |
|
271 | |||
271 |
|
272 | |||
272 | def _hashencode(path, dotencode): |
|
273 | def _hashencode(path, dotencode): | |
273 | digest = hex(hashutil.sha1(path).digest()) |
|
274 | digest = hex(hashutil.sha1(path).digest()) | |
274 | le = lowerencode(path[5:]).split(b'/') # skips prefix 'data/' or 'meta/' |
|
275 | le = lowerencode(path[5:]).split(b'/') # skips prefix 'data/' or 'meta/' | |
275 | parts = _auxencode(le, dotencode) |
|
276 | parts = _auxencode(le, dotencode) | |
276 | basename = parts[-1] |
|
277 | basename = parts[-1] | |
277 | _root, ext = os.path.splitext(basename) |
|
278 | _root, ext = os.path.splitext(basename) | |
278 | sdirs = [] |
|
279 | sdirs = [] | |
279 | sdirslen = 0 |
|
280 | sdirslen = 0 | |
280 | for p in parts[:-1]: |
|
281 | for p in parts[:-1]: | |
281 | d = p[:_dirprefixlen] |
|
282 | d = p[:_dirprefixlen] | |
282 | if d[-1] in b'. ': |
|
283 | if d[-1] in b'. ': | |
283 | # Windows can't access dirs ending in period or space |
|
284 | # Windows can't access dirs ending in period or space | |
284 | d = d[:-1] + b'_' |
|
285 | d = d[:-1] + b'_' | |
285 | if sdirslen == 0: |
|
286 | if sdirslen == 0: | |
286 | t = len(d) |
|
287 | t = len(d) | |
287 | else: |
|
288 | else: | |
288 | t = sdirslen + 1 + len(d) |
|
289 | t = sdirslen + 1 + len(d) | |
289 | if t > _maxshortdirslen: |
|
290 | if t > _maxshortdirslen: | |
290 | break |
|
291 | break | |
291 | sdirs.append(d) |
|
292 | sdirs.append(d) | |
292 | sdirslen = t |
|
293 | sdirslen = t | |
293 | dirs = b'/'.join(sdirs) |
|
294 | dirs = b'/'.join(sdirs) | |
294 | if len(dirs) > 0: |
|
295 | if len(dirs) > 0: | |
295 | dirs += b'/' |
|
296 | dirs += b'/' | |
296 | res = b'dh/' + dirs + digest + ext |
|
297 | res = b'dh/' + dirs + digest + ext | |
297 | spaceleft = _maxstorepathlen - len(res) |
|
298 | spaceleft = _maxstorepathlen - len(res) | |
298 | if spaceleft > 0: |
|
299 | if spaceleft > 0: | |
299 | filler = basename[:spaceleft] |
|
300 | filler = basename[:spaceleft] | |
300 | res = b'dh/' + dirs + filler + digest + ext |
|
301 | res = b'dh/' + dirs + filler + digest + ext | |
301 | return res |
|
302 | return res | |
302 |
|
303 | |||
303 |
|
304 | |||
304 | def _hybridencode(path, dotencode): |
|
305 | def _hybridencode(path, dotencode): | |
305 | """encodes path with a length limit |
|
306 | """encodes path with a length limit | |
306 |
|
307 | |||
307 | Encodes all paths that begin with 'data/', according to the following. |
|
308 | Encodes all paths that begin with 'data/', according to the following. | |
308 |
|
309 | |||
309 | Default encoding (reversible): |
|
310 | Default encoding (reversible): | |
310 |
|
311 | |||
311 | Encodes all uppercase letters 'X' as '_x'. All reserved or illegal |
|
312 | Encodes all uppercase letters 'X' as '_x'. All reserved or illegal | |
312 | characters are encoded as '~xx', where xx is the two digit hex code |
|
313 | characters are encoded as '~xx', where xx is the two digit hex code | |
313 | of the character (see encodefilename). |
|
314 | of the character (see encodefilename). | |
314 | Relevant path components consisting of Windows reserved filenames are |
|
315 | Relevant path components consisting of Windows reserved filenames are | |
315 | masked by encoding the third character ('aux' -> 'au~78', see _auxencode). |
|
316 | masked by encoding the third character ('aux' -> 'au~78', see _auxencode). | |
316 |
|
317 | |||
317 | Hashed encoding (not reversible): |
|
318 | Hashed encoding (not reversible): | |
318 |
|
319 | |||
319 | If the default-encoded path is longer than _maxstorepathlen, a |
|
320 | If the default-encoded path is longer than _maxstorepathlen, a | |
320 | non-reversible hybrid hashing of the path is done instead. |
|
321 | non-reversible hybrid hashing of the path is done instead. | |
321 | This encoding uses up to _dirprefixlen characters of all directory |
|
322 | This encoding uses up to _dirprefixlen characters of all directory | |
322 | levels of the lowerencoded path, but not more levels than can fit into |
|
323 | levels of the lowerencoded path, but not more levels than can fit into | |
323 | _maxshortdirslen. |
|
324 | _maxshortdirslen. | |
324 | Then follows the filler followed by the sha digest of the full path. |
|
325 | Then follows the filler followed by the sha digest of the full path. | |
325 | The filler is the beginning of the basename of the lowerencoded path |
|
326 | The filler is the beginning of the basename of the lowerencoded path | |
326 | (the basename is everything after the last path separator). The filler |
|
327 | (the basename is everything after the last path separator). The filler | |
327 | is as long as possible, filling in characters from the basename until |
|
328 | is as long as possible, filling in characters from the basename until | |
328 | the encoded path has _maxstorepathlen characters (or all chars of the |
|
329 | the encoded path has _maxstorepathlen characters (or all chars of the | |
329 | basename have been taken). |
|
330 | basename have been taken). | |
330 | The extension (e.g. '.i' or '.d') is preserved. |
|
331 | The extension (e.g. '.i' or '.d') is preserved. | |
331 |
|
332 | |||
332 | The string 'data/' at the beginning is replaced with 'dh/', if the hashed |
|
333 | The string 'data/' at the beginning is replaced with 'dh/', if the hashed | |
333 | encoding was used. |
|
334 | encoding was used. | |
334 | """ |
|
335 | """ | |
335 | path = encodedir(path) |
|
336 | path = encodedir(path) | |
336 | ef = _encodefname(path).split(b'/') |
|
337 | ef = _encodefname(path).split(b'/') | |
337 | res = b'/'.join(_auxencode(ef, dotencode)) |
|
338 | res = b'/'.join(_auxencode(ef, dotencode)) | |
338 | if len(res) > _maxstorepathlen: |
|
339 | if len(res) > _maxstorepathlen: | |
339 | res = _hashencode(path, dotencode) |
|
340 | res = _hashencode(path, dotencode) | |
340 | return res |
|
341 | return res | |
341 |
|
342 | |||
342 |
|
343 | |||
343 | def _pathencode(path): |
|
344 | def _pathencode(path): | |
344 | de = encodedir(path) |
|
345 | de = encodedir(path) | |
345 | if len(path) > _maxstorepathlen: |
|
346 | if len(path) > _maxstorepathlen: | |
346 | return _hashencode(de, True) |
|
347 | return _hashencode(de, True) | |
347 | ef = _encodefname(de).split(b'/') |
|
348 | ef = _encodefname(de).split(b'/') | |
348 | res = b'/'.join(_auxencode(ef, True)) |
|
349 | res = b'/'.join(_auxencode(ef, True)) | |
349 | if len(res) > _maxstorepathlen: |
|
350 | if len(res) > _maxstorepathlen: | |
350 | return _hashencode(de, True) |
|
351 | return _hashencode(de, True) | |
351 | return res |
|
352 | return res | |
352 |
|
353 | |||
353 |
|
354 | |||
354 | _pathencode = getattr(parsers, 'pathencode', _pathencode) |
|
355 | _pathencode = getattr(parsers, 'pathencode', _pathencode) | |
355 |
|
356 | |||
356 |
|
357 | |||
357 | def _plainhybridencode(f): |
|
358 | def _plainhybridencode(f): | |
358 | return _hybridencode(f, False) |
|
359 | return _hybridencode(f, False) | |
359 |
|
360 | |||
360 |
|
361 | |||
361 | def _calcmode(vfs): |
|
362 | def _calcmode(vfs): | |
362 | try: |
|
363 | try: | |
363 | # files in .hg/ will be created using this mode |
|
364 | # files in .hg/ will be created using this mode | |
364 | mode = vfs.stat().st_mode |
|
365 | mode = vfs.stat().st_mode | |
365 | # avoid some useless chmods |
|
366 | # avoid some useless chmods | |
366 | if (0o777 & ~util.umask) == (0o777 & mode): |
|
367 | if (0o777 & ~util.umask) == (0o777 & mode): | |
367 | mode = None |
|
368 | mode = None | |
368 | except OSError: |
|
369 | except OSError: | |
369 | mode = None |
|
370 | mode = None | |
370 | return mode |
|
371 | return mode | |
371 |
|
372 | |||
372 |
|
373 | |||
373 | _data = [ |
|
374 | _data = [ | |
374 | b'bookmarks', |
|
375 | b'bookmarks', | |
375 | b'narrowspec', |
|
376 | b'narrowspec', | |
376 | b'data', |
|
377 | b'data', | |
377 | b'meta', |
|
378 | b'meta', | |
378 | b'00manifest.d', |
|
379 | b'00manifest.d', | |
379 | b'00manifest.i', |
|
380 | b'00manifest.i', | |
380 | b'00changelog.d', |
|
381 | b'00changelog.d', | |
381 | b'00changelog.i', |
|
382 | b'00changelog.i', | |
382 | b'phaseroots', |
|
383 | b'phaseroots', | |
383 | b'obsstore', |
|
384 | b'obsstore', | |
384 | b'requires', |
|
385 | b'requires', | |
385 | ] |
|
386 | ] | |
386 |
|
387 | |||
387 | REVLOG_FILES_MAIN_EXT = (b'.i',) |
|
388 | REVLOG_FILES_MAIN_EXT = (b'.i',) | |
388 | REVLOG_FILES_OTHER_EXT = ( |
|
389 | REVLOG_FILES_OTHER_EXT = ( | |
389 | b'.idx', |
|
390 | b'.idx', | |
390 | b'.d', |
|
391 | b'.d', | |
391 | b'.dat', |
|
392 | b'.dat', | |
392 | b'.n', |
|
393 | b'.n', | |
393 | b'.nd', |
|
394 | b'.nd', | |
394 | b'.sda', |
|
395 | b'.sda', | |
395 | ) |
|
396 | ) | |
396 | # file extension that also use a `-SOMELONGIDHASH.ext` form |
|
397 | # file extension that also use a `-SOMELONGIDHASH.ext` form | |
397 | REVLOG_FILES_LONG_EXT = ( |
|
398 | REVLOG_FILES_LONG_EXT = ( | |
398 | b'.nd', |
|
399 | b'.nd', | |
399 | b'.idx', |
|
400 | b'.idx', | |
400 | b'.dat', |
|
401 | b'.dat', | |
401 | b'.sda', |
|
402 | b'.sda', | |
402 | ) |
|
403 | ) | |
403 | # files that are "volatile" and might change between listing and streaming |
|
404 | # files that are "volatile" and might change between listing and streaming | |
404 | # |
|
405 | # | |
405 | # note: the ".nd" file are nodemap data and won't "change" but they might be |
|
406 | # note: the ".nd" file are nodemap data and won't "change" but they might be | |
406 | # deleted. |
|
407 | # deleted. | |
407 | REVLOG_FILES_VOLATILE_EXT = (b'.n', b'.nd') |
|
408 | REVLOG_FILES_VOLATILE_EXT = (b'.n', b'.nd') | |
408 |
|
409 | |||
409 | # some exception to the above matching |
|
410 | # some exception to the above matching | |
410 | # |
|
411 | # | |
411 | # XXX This is currently not in use because of issue6542 |
|
412 | # XXX This is currently not in use because of issue6542 | |
412 | EXCLUDED = re.compile(br'.*undo\.[^/]+\.(nd?|i)$') |
|
413 | EXCLUDED = re.compile(br'.*undo\.[^/]+\.(nd?|i)$') | |
413 |
|
414 | |||
414 |
|
415 | |||
415 | def is_revlog(f, kind, st): |
|
416 | def is_revlog(f, kind, st): | |
416 | if kind != stat.S_IFREG: |
|
417 | if kind != stat.S_IFREG: | |
417 | return None |
|
418 | return None | |
418 | return revlog_type(f) |
|
419 | return revlog_type(f) | |
419 |
|
420 | |||
420 |
|
421 | |||
421 | def revlog_type(f): |
|
422 | def revlog_type(f): | |
422 | # XXX we need to filter `undo.` created by the transaction here, however |
|
423 | # XXX we need to filter `undo.` created by the transaction here, however | |
423 | # being naive about it also filter revlog for `undo.*` files, leading to |
|
424 | # being naive about it also filter revlog for `undo.*` files, leading to | |
424 | # issue6542. So we no longer use EXCLUDED. |
|
425 | # issue6542. So we no longer use EXCLUDED. | |
425 | if f.endswith(REVLOG_FILES_MAIN_EXT): |
|
426 | if f.endswith(REVLOG_FILES_MAIN_EXT): | |
426 | return FILEFLAGS_REVLOG_MAIN |
|
427 | return FILEFLAGS_REVLOG_MAIN | |
427 | elif f.endswith(REVLOG_FILES_OTHER_EXT): |
|
428 | elif f.endswith(REVLOG_FILES_OTHER_EXT): | |
428 | t = FILETYPE_FILELOG_OTHER |
|
429 | t = FILETYPE_FILELOG_OTHER | |
429 | if f.endswith(REVLOG_FILES_VOLATILE_EXT): |
|
430 | if f.endswith(REVLOG_FILES_VOLATILE_EXT): | |
430 | t |= FILEFLAGS_VOLATILE |
|
431 | t |= FILEFLAGS_VOLATILE | |
431 | return t |
|
432 | return t | |
432 | return None |
|
433 | return None | |
433 |
|
434 | |||
434 |
|
435 | |||
435 | # the file is part of changelog data |
|
436 | # the file is part of changelog data | |
436 | FILEFLAGS_CHANGELOG = 1 << 13 |
|
437 | FILEFLAGS_CHANGELOG = 1 << 13 | |
437 | # the file is part of manifest data |
|
438 | # the file is part of manifest data | |
438 | FILEFLAGS_MANIFESTLOG = 1 << 12 |
|
439 | FILEFLAGS_MANIFESTLOG = 1 << 12 | |
439 | # the file is part of filelog data |
|
440 | # the file is part of filelog data | |
440 | FILEFLAGS_FILELOG = 1 << 11 |
|
441 | FILEFLAGS_FILELOG = 1 << 11 | |
441 | # file that are not directly part of a revlog |
|
442 | # file that are not directly part of a revlog | |
442 | FILEFLAGS_OTHER = 1 << 10 |
|
443 | FILEFLAGS_OTHER = 1 << 10 | |
443 |
|
444 | |||
444 | # the main entry point for a revlog |
|
445 | # the main entry point for a revlog | |
445 | FILEFLAGS_REVLOG_MAIN = 1 << 1 |
|
446 | FILEFLAGS_REVLOG_MAIN = 1 << 1 | |
446 | # a secondary file for a revlog |
|
447 | # a secondary file for a revlog | |
447 | FILEFLAGS_REVLOG_OTHER = 1 << 0 |
|
448 | FILEFLAGS_REVLOG_OTHER = 1 << 0 | |
448 |
|
449 | |||
449 | # files that are "volatile" and might change between listing and streaming |
|
450 | # files that are "volatile" and might change between listing and streaming | |
450 | FILEFLAGS_VOLATILE = 1 << 20 |
|
451 | FILEFLAGS_VOLATILE = 1 << 20 | |
451 |
|
452 | |||
452 | FILETYPE_CHANGELOG_MAIN = FILEFLAGS_CHANGELOG | FILEFLAGS_REVLOG_MAIN |
|
453 | FILETYPE_CHANGELOG_MAIN = FILEFLAGS_CHANGELOG | FILEFLAGS_REVLOG_MAIN | |
453 | FILETYPE_CHANGELOG_OTHER = FILEFLAGS_CHANGELOG | FILEFLAGS_REVLOG_OTHER |
|
454 | FILETYPE_CHANGELOG_OTHER = FILEFLAGS_CHANGELOG | FILEFLAGS_REVLOG_OTHER | |
454 | FILETYPE_MANIFESTLOG_MAIN = FILEFLAGS_MANIFESTLOG | FILEFLAGS_REVLOG_MAIN |
|
455 | FILETYPE_MANIFESTLOG_MAIN = FILEFLAGS_MANIFESTLOG | FILEFLAGS_REVLOG_MAIN | |
455 | FILETYPE_MANIFESTLOG_OTHER = FILEFLAGS_MANIFESTLOG | FILEFLAGS_REVLOG_OTHER |
|
456 | FILETYPE_MANIFESTLOG_OTHER = FILEFLAGS_MANIFESTLOG | FILEFLAGS_REVLOG_OTHER | |
456 | FILETYPE_FILELOG_MAIN = FILEFLAGS_FILELOG | FILEFLAGS_REVLOG_MAIN |
|
457 | FILETYPE_FILELOG_MAIN = FILEFLAGS_FILELOG | FILEFLAGS_REVLOG_MAIN | |
457 | FILETYPE_FILELOG_OTHER = FILEFLAGS_FILELOG | FILEFLAGS_REVLOG_OTHER |
|
458 | FILETYPE_FILELOG_OTHER = FILEFLAGS_FILELOG | FILEFLAGS_REVLOG_OTHER | |
458 | FILETYPE_OTHER = FILEFLAGS_OTHER |
|
459 | FILETYPE_OTHER = FILEFLAGS_OTHER | |
459 |
|
460 | |||
460 |
|
461 | |||
461 | @attr.s(slots=True, init=False) |
|
462 | @attr.s(slots=True, init=False) | |
462 | class BaseStoreEntry: |
|
463 | class BaseStoreEntry: | |
463 | """An entry in the store |
|
464 | """An entry in the store | |
464 |
|
465 | |||
465 | This is returned by `store.walk` and represent some data in the store.""" |
|
466 | This is returned by `store.walk` and represent some data in the store.""" | |
466 |
|
467 | |||
467 |
|
468 | |||
468 | @attr.s(slots=True, init=False) |
|
469 | @attr.s(slots=True, init=False) | |
469 | class SimpleStoreEntry(BaseStoreEntry): |
|
470 | class SimpleStoreEntry(BaseStoreEntry): | |
470 | """A generic entry in the store""" |
|
471 | """A generic entry in the store""" | |
471 |
|
472 | |||
472 | is_revlog = False |
|
473 | is_revlog = False | |
473 |
|
474 | |||
474 | _entry_path = attr.ib() |
|
475 | _entry_path = attr.ib() | |
475 | _is_volatile = attr.ib(default=False) |
|
476 | _is_volatile = attr.ib(default=False) | |
476 | _file_size = attr.ib(default=None) |
|
477 | _file_size = attr.ib(default=None) | |
477 |
|
478 | |||
478 | def __init__( |
|
479 | def __init__( | |
479 | self, |
|
480 | self, | |
480 | entry_path, |
|
481 | entry_path, | |
481 | is_volatile=False, |
|
482 | is_volatile=False, | |
482 | file_size=None, |
|
483 | file_size=None, | |
483 | ): |
|
484 | ): | |
484 | super().__init__() |
|
485 | super().__init__() | |
485 | self._entry_path = entry_path |
|
486 | self._entry_path = entry_path | |
486 | self._is_volatile = is_volatile |
|
487 | self._is_volatile = is_volatile | |
487 | self._file_size = file_size |
|
488 | self._file_size = file_size | |
488 |
|
489 | |||
489 | def files(self): |
|
490 | def files(self): | |
490 | return [ |
|
491 | return [ | |
491 | StoreFile( |
|
492 | StoreFile( | |
492 | unencoded_path=self._entry_path, |
|
493 | unencoded_path=self._entry_path, | |
493 | file_size=self._file_size, |
|
494 | file_size=self._file_size, | |
494 | is_volatile=self._is_volatile, |
|
495 | is_volatile=self._is_volatile, | |
495 | ) |
|
496 | ) | |
496 | ] |
|
497 | ] | |
497 |
|
498 | |||
498 |
|
499 | |||
499 | @attr.s(slots=True, init=False) |
|
500 | @attr.s(slots=True, init=False) | |
500 | class RevlogStoreEntry(BaseStoreEntry): |
|
501 | class RevlogStoreEntry(BaseStoreEntry): | |
501 | """A revlog entry in the store""" |
|
502 | """A revlog entry in the store""" | |
502 |
|
503 | |||
503 | is_revlog = True |
|
504 | is_revlog = True | |
504 |
|
505 | |||
505 | revlog_type = attr.ib(default=None) |
|
506 | revlog_type = attr.ib(default=None) | |
506 | target_id = attr.ib(default=None) |
|
507 | target_id = attr.ib(default=None) | |
507 | _path_prefix = attr.ib(default=None) |
|
508 | _path_prefix = attr.ib(default=None) | |
508 | _details = attr.ib(default=None) |
|
509 | _details = attr.ib(default=None) | |
509 |
|
510 | |||
510 | def __init__( |
|
511 | def __init__( | |
511 | self, |
|
512 | self, | |
512 | revlog_type, |
|
513 | revlog_type, | |
513 | path_prefix, |
|
514 | path_prefix, | |
514 | target_id, |
|
515 | target_id, | |
515 | details, |
|
516 | details, | |
516 | ): |
|
517 | ): | |
517 | super().__init__() |
|
518 | super().__init__() | |
518 | self.revlog_type = revlog_type |
|
519 | self.revlog_type = revlog_type | |
519 | self.target_id = target_id |
|
520 | self.target_id = target_id | |
520 | self._path_prefix = path_prefix |
|
521 | self._path_prefix = path_prefix | |
521 | assert b'.i' in details, (path_prefix, details) |
|
522 | assert b'.i' in details, (path_prefix, details) | |
522 | self._details = details |
|
523 | self._details = details | |
523 |
|
524 | |||
524 | @property |
|
525 | @property | |
525 | def is_changelog(self): |
|
526 | def is_changelog(self): | |
526 | return self.revlog_type & FILEFLAGS_CHANGELOG |
|
527 | return self.revlog_type & FILEFLAGS_CHANGELOG | |
527 |
|
528 | |||
528 | @property |
|
529 | @property | |
529 | def is_manifestlog(self): |
|
530 | def is_manifestlog(self): | |
530 | return self.revlog_type & FILEFLAGS_MANIFESTLOG |
|
531 | return self.revlog_type & FILEFLAGS_MANIFESTLOG | |
531 |
|
532 | |||
532 | @property |
|
533 | @property | |
533 | def is_filelog(self): |
|
534 | def is_filelog(self): | |
534 | return self.revlog_type & FILEFLAGS_FILELOG |
|
535 | return self.revlog_type & FILEFLAGS_FILELOG | |
535 |
|
536 | |||
536 | def main_file_path(self): |
|
537 | def main_file_path(self): | |
537 | """unencoded path of the main revlog file""" |
|
538 | """unencoded path of the main revlog file""" | |
538 | return self._path_prefix + b'.i' |
|
539 | return self._path_prefix + b'.i' | |
539 |
|
540 | |||
540 | def files(self): |
|
541 | def files(self): | |
541 | files = [] |
|
542 | files = [] | |
542 | for ext in sorted(self._details, key=_ext_key): |
|
543 | for ext in sorted(self._details, key=_ext_key): | |
543 | path = self._path_prefix + ext |
|
544 | path = self._path_prefix + ext | |
544 | data = self._details[ext] |
|
545 | data = self._details[ext] | |
545 | files.append(StoreFile(unencoded_path=path, **data)) |
|
546 | files.append(StoreFile(unencoded_path=path, **data)) | |
546 | return files |
|
547 | return files | |
547 |
|
548 | |||
|
549 | def get_revlog_instance(self, repo): | |||
|
550 | """Obtain a revlog instance from this store entry | |||
|
551 | ||||
|
552 | An instance of the appropriate class is returned. | |||
|
553 | """ | |||
|
554 | if self.is_changelog: | |||
|
555 | return changelog.changelog(repo.svfs) | |||
|
556 | elif self.is_manifestlog: | |||
|
557 | mandir = self.target_id.rstrip(b'/') | |||
|
558 | return manifest.manifestrevlog( | |||
|
559 | repo.nodeconstants, repo.svfs, tree=mandir | |||
|
560 | ) | |||
|
561 | else: | |||
|
562 | return filelog.filelog(repo.svfs, self.target_id) | |||
|
563 | ||||
548 |
|
564 | |||
549 | @attr.s(slots=True) |
|
565 | @attr.s(slots=True) | |
550 | class StoreFile: |
|
566 | class StoreFile: | |
551 | """a file matching an entry""" |
|
567 | """a file matching an entry""" | |
552 |
|
568 | |||
553 | unencoded_path = attr.ib() |
|
569 | unencoded_path = attr.ib() | |
554 | _file_size = attr.ib(default=None) |
|
570 | _file_size = attr.ib(default=None) | |
555 | is_volatile = attr.ib(default=False) |
|
571 | is_volatile = attr.ib(default=False) | |
556 |
|
572 | |||
557 | def file_size(self, vfs): |
|
573 | def file_size(self, vfs): | |
558 | if self._file_size is not None: |
|
574 | if self._file_size is not None: | |
559 | return self._file_size |
|
575 | return self._file_size | |
560 | try: |
|
576 | try: | |
561 | return vfs.stat(self.unencoded_path).st_size |
|
577 | return vfs.stat(self.unencoded_path).st_size | |
562 | except FileNotFoundError: |
|
578 | except FileNotFoundError: | |
563 | return 0 |
|
579 | return 0 | |
564 |
|
580 | |||
565 |
|
581 | |||
566 | def _gather_revlog(files_data): |
|
582 | def _gather_revlog(files_data): | |
567 | """group files per revlog prefix |
|
583 | """group files per revlog prefix | |
568 |
|
584 | |||
569 | The returns a two level nested dict. The top level key is the revlog prefix |
|
585 | The returns a two level nested dict. The top level key is the revlog prefix | |
570 | without extension, the second level is all the file "suffix" that were |
|
586 | without extension, the second level is all the file "suffix" that were | |
571 | seen for this revlog and arbitrary file data as value. |
|
587 | seen for this revlog and arbitrary file data as value. | |
572 | """ |
|
588 | """ | |
573 | revlogs = collections.defaultdict(dict) |
|
589 | revlogs = collections.defaultdict(dict) | |
574 | for u, value in files_data: |
|
590 | for u, value in files_data: | |
575 | name, ext = _split_revlog_ext(u) |
|
591 | name, ext = _split_revlog_ext(u) | |
576 | revlogs[name][ext] = value |
|
592 | revlogs[name][ext] = value | |
577 | return sorted(revlogs.items()) |
|
593 | return sorted(revlogs.items()) | |
578 |
|
594 | |||
579 |
|
595 | |||
580 | def _split_revlog_ext(filename): |
|
596 | def _split_revlog_ext(filename): | |
581 | """split the revlog file prefix from the variable extension""" |
|
597 | """split the revlog file prefix from the variable extension""" | |
582 | if filename.endswith(REVLOG_FILES_LONG_EXT): |
|
598 | if filename.endswith(REVLOG_FILES_LONG_EXT): | |
583 | char = b'-' |
|
599 | char = b'-' | |
584 | else: |
|
600 | else: | |
585 | char = b'.' |
|
601 | char = b'.' | |
586 | idx = filename.rfind(char) |
|
602 | idx = filename.rfind(char) | |
587 | return filename[:idx], filename[idx:] |
|
603 | return filename[:idx], filename[idx:] | |
588 |
|
604 | |||
589 |
|
605 | |||
590 | def _ext_key(ext): |
|
606 | def _ext_key(ext): | |
591 | """a key to order revlog suffix |
|
607 | """a key to order revlog suffix | |
592 |
|
608 | |||
593 | important to issue .i after other entry.""" |
|
609 | important to issue .i after other entry.""" | |
594 | # the only important part of this order is to keep the `.i` last. |
|
610 | # the only important part of this order is to keep the `.i` last. | |
595 | if ext.endswith(b'.n'): |
|
611 | if ext.endswith(b'.n'): | |
596 | return (0, ext) |
|
612 | return (0, ext) | |
597 | elif ext.endswith(b'.nd'): |
|
613 | elif ext.endswith(b'.nd'): | |
598 | return (10, ext) |
|
614 | return (10, ext) | |
599 | elif ext.endswith(b'.d'): |
|
615 | elif ext.endswith(b'.d'): | |
600 | return (20, ext) |
|
616 | return (20, ext) | |
601 | elif ext.endswith(b'.i'): |
|
617 | elif ext.endswith(b'.i'): | |
602 | return (50, ext) |
|
618 | return (50, ext) | |
603 | else: |
|
619 | else: | |
604 | return (40, ext) |
|
620 | return (40, ext) | |
605 |
|
621 | |||
606 |
|
622 | |||
607 | class basicstore: |
|
623 | class basicstore: | |
608 | '''base class for local repository stores''' |
|
624 | '''base class for local repository stores''' | |
609 |
|
625 | |||
610 | def __init__(self, path, vfstype): |
|
626 | def __init__(self, path, vfstype): | |
611 | vfs = vfstype(path) |
|
627 | vfs = vfstype(path) | |
612 | self.path = vfs.base |
|
628 | self.path = vfs.base | |
613 | self.createmode = _calcmode(vfs) |
|
629 | self.createmode = _calcmode(vfs) | |
614 | vfs.createmode = self.createmode |
|
630 | vfs.createmode = self.createmode | |
615 | self.rawvfs = vfs |
|
631 | self.rawvfs = vfs | |
616 | self.vfs = vfsmod.filtervfs(vfs, encodedir) |
|
632 | self.vfs = vfsmod.filtervfs(vfs, encodedir) | |
617 | self.opener = self.vfs |
|
633 | self.opener = self.vfs | |
618 |
|
634 | |||
619 | def join(self, f): |
|
635 | def join(self, f): | |
620 | return self.path + b'/' + encodedir(f) |
|
636 | return self.path + b'/' + encodedir(f) | |
621 |
|
637 | |||
622 | def _walk(self, relpath, recurse, undecodable=None): |
|
638 | def _walk(self, relpath, recurse, undecodable=None): | |
623 | '''yields (revlog_type, unencoded, size)''' |
|
639 | '''yields (revlog_type, unencoded, size)''' | |
624 | path = self.path |
|
640 | path = self.path | |
625 | if relpath: |
|
641 | if relpath: | |
626 | path += b'/' + relpath |
|
642 | path += b'/' + relpath | |
627 | striplen = len(self.path) + 1 |
|
643 | striplen = len(self.path) + 1 | |
628 | l = [] |
|
644 | l = [] | |
629 | if self.rawvfs.isdir(path): |
|
645 | if self.rawvfs.isdir(path): | |
630 | visit = [path] |
|
646 | visit = [path] | |
631 | readdir = self.rawvfs.readdir |
|
647 | readdir = self.rawvfs.readdir | |
632 | while visit: |
|
648 | while visit: | |
633 | p = visit.pop() |
|
649 | p = visit.pop() | |
634 | for f, kind, st in readdir(p, stat=True): |
|
650 | for f, kind, st in readdir(p, stat=True): | |
635 | fp = p + b'/' + f |
|
651 | fp = p + b'/' + f | |
636 | rl_type = is_revlog(f, kind, st) |
|
652 | rl_type = is_revlog(f, kind, st) | |
637 | if rl_type is not None: |
|
653 | if rl_type is not None: | |
638 | n = util.pconvert(fp[striplen:]) |
|
654 | n = util.pconvert(fp[striplen:]) | |
639 | l.append((decodedir(n), (rl_type, st.st_size))) |
|
655 | l.append((decodedir(n), (rl_type, st.st_size))) | |
640 | elif kind == stat.S_IFDIR and recurse: |
|
656 | elif kind == stat.S_IFDIR and recurse: | |
641 | visit.append(fp) |
|
657 | visit.append(fp) | |
642 |
|
658 | |||
643 | l.sort() |
|
659 | l.sort() | |
644 | return l |
|
660 | return l | |
645 |
|
661 | |||
646 | def changelog(self, trypending, concurrencychecker=None): |
|
662 | def changelog(self, trypending, concurrencychecker=None): | |
647 | return changelog.changelog( |
|
663 | return changelog.changelog( | |
648 | self.vfs, |
|
664 | self.vfs, | |
649 | trypending=trypending, |
|
665 | trypending=trypending, | |
650 | concurrencychecker=concurrencychecker, |
|
666 | concurrencychecker=concurrencychecker, | |
651 | ) |
|
667 | ) | |
652 |
|
668 | |||
653 | def manifestlog(self, repo, storenarrowmatch): |
|
669 | def manifestlog(self, repo, storenarrowmatch): | |
654 | rootstore = manifest.manifestrevlog(repo.nodeconstants, self.vfs) |
|
670 | rootstore = manifest.manifestrevlog(repo.nodeconstants, self.vfs) | |
655 | return manifest.manifestlog(self.vfs, repo, rootstore, storenarrowmatch) |
|
671 | return manifest.manifestlog(self.vfs, repo, rootstore, storenarrowmatch) | |
656 |
|
672 | |||
657 | def data_entries( |
|
673 | def data_entries( | |
658 | self, matcher=None, undecodable=None |
|
674 | self, matcher=None, undecodable=None | |
659 | ) -> Generator[BaseStoreEntry, None, None]: |
|
675 | ) -> Generator[BaseStoreEntry, None, None]: | |
660 | """Like walk, but excluding the changelog and root manifest. |
|
676 | """Like walk, but excluding the changelog and root manifest. | |
661 |
|
677 | |||
662 | When [undecodable] is None, revlogs names that can't be |
|
678 | When [undecodable] is None, revlogs names that can't be | |
663 | decoded cause an exception. When it is provided, it should |
|
679 | decoded cause an exception. When it is provided, it should | |
664 | be a list and the filenames that can't be decoded are added |
|
680 | be a list and the filenames that can't be decoded are added | |
665 | to it instead. This is very rarely needed.""" |
|
681 | to it instead. This is very rarely needed.""" | |
666 | dirs = [ |
|
682 | dirs = [ | |
667 | (b'data', FILEFLAGS_FILELOG), |
|
683 | (b'data', FILEFLAGS_FILELOG), | |
668 | (b'meta', FILEFLAGS_MANIFESTLOG), |
|
684 | (b'meta', FILEFLAGS_MANIFESTLOG), | |
669 | ] |
|
685 | ] | |
670 | for base_dir, rl_type in dirs: |
|
686 | for base_dir, rl_type in dirs: | |
671 | files = self._walk(base_dir, True, undecodable=undecodable) |
|
687 | files = self._walk(base_dir, True, undecodable=undecodable) | |
672 | files = (f for f in files if f[1][0] is not None) |
|
688 | files = (f for f in files if f[1][0] is not None) | |
673 | for revlog, details in _gather_revlog(files): |
|
689 | for revlog, details in _gather_revlog(files): | |
674 | file_details = {} |
|
690 | file_details = {} | |
675 | revlog_target_id = revlog.split(b'/', 1)[1] |
|
691 | revlog_target_id = revlog.split(b'/', 1)[1] | |
676 | for ext, (t, s) in sorted(details.items()): |
|
692 | for ext, (t, s) in sorted(details.items()): | |
677 | file_details[ext] = { |
|
693 | file_details[ext] = { | |
678 | 'is_volatile': bool(t & FILEFLAGS_VOLATILE), |
|
694 | 'is_volatile': bool(t & FILEFLAGS_VOLATILE), | |
679 | 'file_size': s, |
|
695 | 'file_size': s, | |
680 | } |
|
696 | } | |
681 | yield RevlogStoreEntry( |
|
697 | yield RevlogStoreEntry( | |
682 | path_prefix=revlog, |
|
698 | path_prefix=revlog, | |
683 | revlog_type=rl_type, |
|
699 | revlog_type=rl_type, | |
684 | target_id=revlog_target_id, |
|
700 | target_id=revlog_target_id, | |
685 | details=file_details, |
|
701 | details=file_details, | |
686 | ) |
|
702 | ) | |
687 |
|
703 | |||
688 | def top_entries( |
|
704 | def top_entries( | |
689 | self, phase=False, obsolescence=False |
|
705 | self, phase=False, obsolescence=False | |
690 | ) -> Generator[BaseStoreEntry, None, None]: |
|
706 | ) -> Generator[BaseStoreEntry, None, None]: | |
691 | if phase and self.vfs.exists(b'phaseroots'): |
|
707 | if phase and self.vfs.exists(b'phaseroots'): | |
692 | yield SimpleStoreEntry( |
|
708 | yield SimpleStoreEntry( | |
693 | entry_path=b'phaseroots', |
|
709 | entry_path=b'phaseroots', | |
694 | is_volatile=True, |
|
710 | is_volatile=True, | |
695 | ) |
|
711 | ) | |
696 |
|
712 | |||
697 | if obsolescence and self.vfs.exists(b'obsstore'): |
|
713 | if obsolescence and self.vfs.exists(b'obsstore'): | |
698 | # XXX if we had the file size it could be non-volatile |
|
714 | # XXX if we had the file size it could be non-volatile | |
699 | yield SimpleStoreEntry( |
|
715 | yield SimpleStoreEntry( | |
700 | entry_path=b'obsstore', |
|
716 | entry_path=b'obsstore', | |
701 | is_volatile=True, |
|
717 | is_volatile=True, | |
702 | ) |
|
718 | ) | |
703 |
|
719 | |||
704 | files = reversed(self._walk(b'', False)) |
|
720 | files = reversed(self._walk(b'', False)) | |
705 |
|
721 | |||
706 | changelogs = collections.defaultdict(dict) |
|
722 | changelogs = collections.defaultdict(dict) | |
707 | manifestlogs = collections.defaultdict(dict) |
|
723 | manifestlogs = collections.defaultdict(dict) | |
708 |
|
724 | |||
709 | for u, (t, s) in files: |
|
725 | for u, (t, s) in files: | |
710 | if u.startswith(b'00changelog'): |
|
726 | if u.startswith(b'00changelog'): | |
711 | name, ext = _split_revlog_ext(u) |
|
727 | name, ext = _split_revlog_ext(u) | |
712 | changelogs[name][ext] = (t, s) |
|
728 | changelogs[name][ext] = (t, s) | |
713 | elif u.startswith(b'00manifest'): |
|
729 | elif u.startswith(b'00manifest'): | |
714 | name, ext = _split_revlog_ext(u) |
|
730 | name, ext = _split_revlog_ext(u) | |
715 | manifestlogs[name][ext] = (t, s) |
|
731 | manifestlogs[name][ext] = (t, s) | |
716 | else: |
|
732 | else: | |
717 | yield SimpleStoreEntry( |
|
733 | yield SimpleStoreEntry( | |
718 | entry_path=u, |
|
734 | entry_path=u, | |
719 | is_volatile=bool(t & FILEFLAGS_VOLATILE), |
|
735 | is_volatile=bool(t & FILEFLAGS_VOLATILE), | |
720 | file_size=s, |
|
736 | file_size=s, | |
721 | ) |
|
737 | ) | |
722 | # yield manifest before changelog |
|
738 | # yield manifest before changelog | |
723 | top_rl = [ |
|
739 | top_rl = [ | |
724 | (manifestlogs, FILEFLAGS_MANIFESTLOG), |
|
740 | (manifestlogs, FILEFLAGS_MANIFESTLOG), | |
725 | (changelogs, FILEFLAGS_CHANGELOG), |
|
741 | (changelogs, FILEFLAGS_CHANGELOG), | |
726 | ] |
|
742 | ] | |
727 | assert len(manifestlogs) <= 1 |
|
743 | assert len(manifestlogs) <= 1 | |
728 | assert len(changelogs) <= 1 |
|
744 | assert len(changelogs) <= 1 | |
729 | for data, revlog_type in top_rl: |
|
745 | for data, revlog_type in top_rl: | |
730 | for revlog, details in sorted(data.items()): |
|
746 | for revlog, details in sorted(data.items()): | |
731 | file_details = {} |
|
747 | file_details = {} | |
732 | for ext, (t, s) in details.items(): |
|
748 | for ext, (t, s) in details.items(): | |
733 | file_details[ext] = { |
|
749 | file_details[ext] = { | |
734 | 'is_volatile': bool(t & FILEFLAGS_VOLATILE), |
|
750 | 'is_volatile': bool(t & FILEFLAGS_VOLATILE), | |
735 | 'file_size': s, |
|
751 | 'file_size': s, | |
736 | } |
|
752 | } | |
737 | yield RevlogStoreEntry( |
|
753 | yield RevlogStoreEntry( | |
738 | path_prefix=revlog, |
|
754 | path_prefix=revlog, | |
739 | revlog_type=revlog_type, |
|
755 | revlog_type=revlog_type, | |
740 | target_id=b'', |
|
756 | target_id=b'', | |
741 | details=file_details, |
|
757 | details=file_details, | |
742 | ) |
|
758 | ) | |
743 |
|
759 | |||
744 | def walk( |
|
760 | def walk( | |
745 | self, matcher=None, phase=False, obsolescence=False |
|
761 | self, matcher=None, phase=False, obsolescence=False | |
746 | ) -> Generator[BaseStoreEntry, None, None]: |
|
762 | ) -> Generator[BaseStoreEntry, None, None]: | |
747 | """return files related to data storage (ie: revlogs) |
|
763 | """return files related to data storage (ie: revlogs) | |
748 |
|
764 | |||
749 | yields instance from BaseStoreEntry subclasses |
|
765 | yields instance from BaseStoreEntry subclasses | |
750 |
|
766 | |||
751 | if a matcher is passed, storage files of only those tracked paths |
|
767 | if a matcher is passed, storage files of only those tracked paths | |
752 | are passed with matches the matcher |
|
768 | are passed with matches the matcher | |
753 | """ |
|
769 | """ | |
754 | # yield data files first |
|
770 | # yield data files first | |
755 | for x in self.data_entries(matcher): |
|
771 | for x in self.data_entries(matcher): | |
756 | yield x |
|
772 | yield x | |
757 | for x in self.top_entries(phase=phase, obsolescence=obsolescence): |
|
773 | for x in self.top_entries(phase=phase, obsolescence=obsolescence): | |
758 | yield x |
|
774 | yield x | |
759 |
|
775 | |||
760 | def copylist(self): |
|
776 | def copylist(self): | |
761 | return _data |
|
777 | return _data | |
762 |
|
778 | |||
763 | def write(self, tr): |
|
779 | def write(self, tr): | |
764 | pass |
|
780 | pass | |
765 |
|
781 | |||
766 | def invalidatecaches(self): |
|
782 | def invalidatecaches(self): | |
767 | pass |
|
783 | pass | |
768 |
|
784 | |||
769 | def markremoved(self, fn): |
|
785 | def markremoved(self, fn): | |
770 | pass |
|
786 | pass | |
771 |
|
787 | |||
772 | def __contains__(self, path): |
|
788 | def __contains__(self, path): | |
773 | '''Checks if the store contains path''' |
|
789 | '''Checks if the store contains path''' | |
774 | path = b"/".join((b"data", path)) |
|
790 | path = b"/".join((b"data", path)) | |
775 | # file? |
|
791 | # file? | |
776 | if self.vfs.exists(path + b".i"): |
|
792 | if self.vfs.exists(path + b".i"): | |
777 | return True |
|
793 | return True | |
778 | # dir? |
|
794 | # dir? | |
779 | if not path.endswith(b"/"): |
|
795 | if not path.endswith(b"/"): | |
780 | path = path + b"/" |
|
796 | path = path + b"/" | |
781 | return self.vfs.exists(path) |
|
797 | return self.vfs.exists(path) | |
782 |
|
798 | |||
783 |
|
799 | |||
784 | class encodedstore(basicstore): |
|
800 | class encodedstore(basicstore): | |
785 | def __init__(self, path, vfstype): |
|
801 | def __init__(self, path, vfstype): | |
786 | vfs = vfstype(path + b'/store') |
|
802 | vfs = vfstype(path + b'/store') | |
787 | self.path = vfs.base |
|
803 | self.path = vfs.base | |
788 | self.createmode = _calcmode(vfs) |
|
804 | self.createmode = _calcmode(vfs) | |
789 | vfs.createmode = self.createmode |
|
805 | vfs.createmode = self.createmode | |
790 | self.rawvfs = vfs |
|
806 | self.rawvfs = vfs | |
791 | self.vfs = vfsmod.filtervfs(vfs, encodefilename) |
|
807 | self.vfs = vfsmod.filtervfs(vfs, encodefilename) | |
792 | self.opener = self.vfs |
|
808 | self.opener = self.vfs | |
793 |
|
809 | |||
794 | def _walk(self, relpath, recurse, undecodable=None): |
|
810 | def _walk(self, relpath, recurse, undecodable=None): | |
795 | old = super()._walk(relpath, recurse) |
|
811 | old = super()._walk(relpath, recurse) | |
796 | new = [] |
|
812 | new = [] | |
797 | for f1, value in old: |
|
813 | for f1, value in old: | |
798 | try: |
|
814 | try: | |
799 | f2 = decodefilename(f1) |
|
815 | f2 = decodefilename(f1) | |
800 | except KeyError: |
|
816 | except KeyError: | |
801 | if undecodable is None: |
|
817 | if undecodable is None: | |
802 | msg = _(b'undecodable revlog name %s') % f1 |
|
818 | msg = _(b'undecodable revlog name %s') % f1 | |
803 | raise error.StorageError(msg) |
|
819 | raise error.StorageError(msg) | |
804 | else: |
|
820 | else: | |
805 | undecodable.append(f1) |
|
821 | undecodable.append(f1) | |
806 | continue |
|
822 | continue | |
807 | new.append((f2, value)) |
|
823 | new.append((f2, value)) | |
808 | return new |
|
824 | return new | |
809 |
|
825 | |||
810 | def data_entries( |
|
826 | def data_entries( | |
811 | self, matcher=None, undecodable=None |
|
827 | self, matcher=None, undecodable=None | |
812 | ) -> Generator[BaseStoreEntry, None, None]: |
|
828 | ) -> Generator[BaseStoreEntry, None, None]: | |
813 | entries = super(encodedstore, self).data_entries( |
|
829 | entries = super(encodedstore, self).data_entries( | |
814 | undecodable=undecodable |
|
830 | undecodable=undecodable | |
815 | ) |
|
831 | ) | |
816 | for entry in entries: |
|
832 | for entry in entries: | |
817 | if _match_tracked_entry(entry, matcher): |
|
833 | if _match_tracked_entry(entry, matcher): | |
818 | yield entry |
|
834 | yield entry | |
819 |
|
835 | |||
820 | def join(self, f): |
|
836 | def join(self, f): | |
821 | return self.path + b'/' + encodefilename(f) |
|
837 | return self.path + b'/' + encodefilename(f) | |
822 |
|
838 | |||
823 | def copylist(self): |
|
839 | def copylist(self): | |
824 | return [b'requires', b'00changelog.i'] + [b'store/' + f for f in _data] |
|
840 | return [b'requires', b'00changelog.i'] + [b'store/' + f for f in _data] | |
825 |
|
841 | |||
826 |
|
842 | |||
827 | class fncache: |
|
843 | class fncache: | |
828 | # the filename used to be partially encoded |
|
844 | # the filename used to be partially encoded | |
829 | # hence the encodedir/decodedir dance |
|
845 | # hence the encodedir/decodedir dance | |
830 | def __init__(self, vfs): |
|
846 | def __init__(self, vfs): | |
831 | self.vfs = vfs |
|
847 | self.vfs = vfs | |
832 | self._ignores = set() |
|
848 | self._ignores = set() | |
833 | self.entries = None |
|
849 | self.entries = None | |
834 | self._dirty = False |
|
850 | self._dirty = False | |
835 | # set of new additions to fncache |
|
851 | # set of new additions to fncache | |
836 | self.addls = set() |
|
852 | self.addls = set() | |
837 |
|
853 | |||
838 | def ensureloaded(self, warn=None): |
|
854 | def ensureloaded(self, warn=None): | |
839 | """read the fncache file if not already read. |
|
855 | """read the fncache file if not already read. | |
840 |
|
856 | |||
841 | If the file on disk is corrupted, raise. If warn is provided, |
|
857 | If the file on disk is corrupted, raise. If warn is provided, | |
842 | warn and keep going instead.""" |
|
858 | warn and keep going instead.""" | |
843 | if self.entries is None: |
|
859 | if self.entries is None: | |
844 | self._load(warn) |
|
860 | self._load(warn) | |
845 |
|
861 | |||
846 | def _load(self, warn=None): |
|
862 | def _load(self, warn=None): | |
847 | '''fill the entries from the fncache file''' |
|
863 | '''fill the entries from the fncache file''' | |
848 | self._dirty = False |
|
864 | self._dirty = False | |
849 | try: |
|
865 | try: | |
850 | fp = self.vfs(b'fncache', mode=b'rb') |
|
866 | fp = self.vfs(b'fncache', mode=b'rb') | |
851 | except IOError: |
|
867 | except IOError: | |
852 | # skip nonexistent file |
|
868 | # skip nonexistent file | |
853 | self.entries = set() |
|
869 | self.entries = set() | |
854 | return |
|
870 | return | |
855 |
|
871 | |||
856 | self.entries = set() |
|
872 | self.entries = set() | |
857 | chunk = b'' |
|
873 | chunk = b'' | |
858 | for c in iter(functools.partial(fp.read, fncache_chunksize), b''): |
|
874 | for c in iter(functools.partial(fp.read, fncache_chunksize), b''): | |
859 | chunk += c |
|
875 | chunk += c | |
860 | try: |
|
876 | try: | |
861 | p = chunk.rindex(b'\n') |
|
877 | p = chunk.rindex(b'\n') | |
862 | self.entries.update(decodedir(chunk[: p + 1]).splitlines()) |
|
878 | self.entries.update(decodedir(chunk[: p + 1]).splitlines()) | |
863 | chunk = chunk[p + 1 :] |
|
879 | chunk = chunk[p + 1 :] | |
864 | except ValueError: |
|
880 | except ValueError: | |
865 | # substring '\n' not found, maybe the entry is bigger than the |
|
881 | # substring '\n' not found, maybe the entry is bigger than the | |
866 | # chunksize, so let's keep iterating |
|
882 | # chunksize, so let's keep iterating | |
867 | pass |
|
883 | pass | |
868 |
|
884 | |||
869 | if chunk: |
|
885 | if chunk: | |
870 | msg = _(b"fncache does not ends with a newline") |
|
886 | msg = _(b"fncache does not ends with a newline") | |
871 | if warn: |
|
887 | if warn: | |
872 | warn(msg + b'\n') |
|
888 | warn(msg + b'\n') | |
873 | else: |
|
889 | else: | |
874 | raise error.Abort( |
|
890 | raise error.Abort( | |
875 | msg, |
|
891 | msg, | |
876 | hint=_( |
|
892 | hint=_( | |
877 | b"use 'hg debugrebuildfncache' to " |
|
893 | b"use 'hg debugrebuildfncache' to " | |
878 | b"rebuild the fncache" |
|
894 | b"rebuild the fncache" | |
879 | ), |
|
895 | ), | |
880 | ) |
|
896 | ) | |
881 | self._checkentries(fp, warn) |
|
897 | self._checkentries(fp, warn) | |
882 | fp.close() |
|
898 | fp.close() | |
883 |
|
899 | |||
884 | def _checkentries(self, fp, warn): |
|
900 | def _checkentries(self, fp, warn): | |
885 | """make sure there is no empty string in entries""" |
|
901 | """make sure there is no empty string in entries""" | |
886 | if b'' in self.entries: |
|
902 | if b'' in self.entries: | |
887 | fp.seek(0) |
|
903 | fp.seek(0) | |
888 | for n, line in enumerate(fp): |
|
904 | for n, line in enumerate(fp): | |
889 | if not line.rstrip(b'\n'): |
|
905 | if not line.rstrip(b'\n'): | |
890 | t = _(b'invalid entry in fncache, line %d') % (n + 1) |
|
906 | t = _(b'invalid entry in fncache, line %d') % (n + 1) | |
891 | if warn: |
|
907 | if warn: | |
892 | warn(t + b'\n') |
|
908 | warn(t + b'\n') | |
893 | else: |
|
909 | else: | |
894 | raise error.Abort(t) |
|
910 | raise error.Abort(t) | |
895 |
|
911 | |||
896 | def write(self, tr): |
|
912 | def write(self, tr): | |
897 | if self._dirty: |
|
913 | if self._dirty: | |
898 | assert self.entries is not None |
|
914 | assert self.entries is not None | |
899 | self.entries = self.entries | self.addls |
|
915 | self.entries = self.entries | self.addls | |
900 | self.addls = set() |
|
916 | self.addls = set() | |
901 | tr.addbackup(b'fncache') |
|
917 | tr.addbackup(b'fncache') | |
902 | fp = self.vfs(b'fncache', mode=b'wb', atomictemp=True) |
|
918 | fp = self.vfs(b'fncache', mode=b'wb', atomictemp=True) | |
903 | if self.entries: |
|
919 | if self.entries: | |
904 | fp.write(encodedir(b'\n'.join(self.entries) + b'\n')) |
|
920 | fp.write(encodedir(b'\n'.join(self.entries) + b'\n')) | |
905 | fp.close() |
|
921 | fp.close() | |
906 | self._dirty = False |
|
922 | self._dirty = False | |
907 | if self.addls: |
|
923 | if self.addls: | |
908 | # if we have just new entries, let's append them to the fncache |
|
924 | # if we have just new entries, let's append them to the fncache | |
909 | tr.addbackup(b'fncache') |
|
925 | tr.addbackup(b'fncache') | |
910 | fp = self.vfs(b'fncache', mode=b'ab', atomictemp=True) |
|
926 | fp = self.vfs(b'fncache', mode=b'ab', atomictemp=True) | |
911 | if self.addls: |
|
927 | if self.addls: | |
912 | fp.write(encodedir(b'\n'.join(self.addls) + b'\n')) |
|
928 | fp.write(encodedir(b'\n'.join(self.addls) + b'\n')) | |
913 | fp.close() |
|
929 | fp.close() | |
914 | self.entries = None |
|
930 | self.entries = None | |
915 | self.addls = set() |
|
931 | self.addls = set() | |
916 |
|
932 | |||
917 | def addignore(self, fn): |
|
933 | def addignore(self, fn): | |
918 | self._ignores.add(fn) |
|
934 | self._ignores.add(fn) | |
919 |
|
935 | |||
920 | def add(self, fn): |
|
936 | def add(self, fn): | |
921 | if fn in self._ignores: |
|
937 | if fn in self._ignores: | |
922 | return |
|
938 | return | |
923 | if self.entries is None: |
|
939 | if self.entries is None: | |
924 | self._load() |
|
940 | self._load() | |
925 | if fn not in self.entries: |
|
941 | if fn not in self.entries: | |
926 | self.addls.add(fn) |
|
942 | self.addls.add(fn) | |
927 |
|
943 | |||
928 | def remove(self, fn): |
|
944 | def remove(self, fn): | |
929 | if self.entries is None: |
|
945 | if self.entries is None: | |
930 | self._load() |
|
946 | self._load() | |
931 | if fn in self.addls: |
|
947 | if fn in self.addls: | |
932 | self.addls.remove(fn) |
|
948 | self.addls.remove(fn) | |
933 | return |
|
949 | return | |
934 | try: |
|
950 | try: | |
935 | self.entries.remove(fn) |
|
951 | self.entries.remove(fn) | |
936 | self._dirty = True |
|
952 | self._dirty = True | |
937 | except KeyError: |
|
953 | except KeyError: | |
938 | pass |
|
954 | pass | |
939 |
|
955 | |||
940 | def __contains__(self, fn): |
|
956 | def __contains__(self, fn): | |
941 | if fn in self.addls: |
|
957 | if fn in self.addls: | |
942 | return True |
|
958 | return True | |
943 | if self.entries is None: |
|
959 | if self.entries is None: | |
944 | self._load() |
|
960 | self._load() | |
945 | return fn in self.entries |
|
961 | return fn in self.entries | |
946 |
|
962 | |||
947 | def __iter__(self): |
|
963 | def __iter__(self): | |
948 | if self.entries is None: |
|
964 | if self.entries is None: | |
949 | self._load() |
|
965 | self._load() | |
950 | return iter(self.entries | self.addls) |
|
966 | return iter(self.entries | self.addls) | |
951 |
|
967 | |||
952 |
|
968 | |||
953 | class _fncachevfs(vfsmod.proxyvfs): |
|
969 | class _fncachevfs(vfsmod.proxyvfs): | |
954 | def __init__(self, vfs, fnc, encode): |
|
970 | def __init__(self, vfs, fnc, encode): | |
955 | vfsmod.proxyvfs.__init__(self, vfs) |
|
971 | vfsmod.proxyvfs.__init__(self, vfs) | |
956 | self.fncache = fnc |
|
972 | self.fncache = fnc | |
957 | self.encode = encode |
|
973 | self.encode = encode | |
958 |
|
974 | |||
959 | def __call__(self, path, mode=b'r', *args, **kw): |
|
975 | def __call__(self, path, mode=b'r', *args, **kw): | |
960 | encoded = self.encode(path) |
|
976 | encoded = self.encode(path) | |
961 | if ( |
|
977 | if ( | |
962 | mode not in (b'r', b'rb') |
|
978 | mode not in (b'r', b'rb') | |
963 | and (path.startswith(b'data/') or path.startswith(b'meta/')) |
|
979 | and (path.startswith(b'data/') or path.startswith(b'meta/')) | |
964 | and revlog_type(path) is not None |
|
980 | and revlog_type(path) is not None | |
965 | ): |
|
981 | ): | |
966 | # do not trigger a fncache load when adding a file that already is |
|
982 | # do not trigger a fncache load when adding a file that already is | |
967 | # known to exist. |
|
983 | # known to exist. | |
968 | notload = self.fncache.entries is None and self.vfs.exists(encoded) |
|
984 | notload = self.fncache.entries is None and self.vfs.exists(encoded) | |
969 | if notload and b'r+' in mode and not self.vfs.stat(encoded).st_size: |
|
985 | if notload and b'r+' in mode and not self.vfs.stat(encoded).st_size: | |
970 | # when appending to an existing file, if the file has size zero, |
|
986 | # when appending to an existing file, if the file has size zero, | |
971 | # it should be considered as missing. Such zero-size files are |
|
987 | # it should be considered as missing. Such zero-size files are | |
972 | # the result of truncation when a transaction is aborted. |
|
988 | # the result of truncation when a transaction is aborted. | |
973 | notload = False |
|
989 | notload = False | |
974 | if not notload: |
|
990 | if not notload: | |
975 | self.fncache.add(path) |
|
991 | self.fncache.add(path) | |
976 | return self.vfs(encoded, mode, *args, **kw) |
|
992 | return self.vfs(encoded, mode, *args, **kw) | |
977 |
|
993 | |||
978 | def join(self, path): |
|
994 | def join(self, path): | |
979 | if path: |
|
995 | if path: | |
980 | return self.vfs.join(self.encode(path)) |
|
996 | return self.vfs.join(self.encode(path)) | |
981 | else: |
|
997 | else: | |
982 | return self.vfs.join(path) |
|
998 | return self.vfs.join(path) | |
983 |
|
999 | |||
984 | def register_file(self, path): |
|
1000 | def register_file(self, path): | |
985 | """generic hook point to lets fncache steer its stew""" |
|
1001 | """generic hook point to lets fncache steer its stew""" | |
986 | if path.startswith(b'data/') or path.startswith(b'meta/'): |
|
1002 | if path.startswith(b'data/') or path.startswith(b'meta/'): | |
987 | self.fncache.add(path) |
|
1003 | self.fncache.add(path) | |
988 |
|
1004 | |||
989 |
|
1005 | |||
990 | class fncachestore(basicstore): |
|
1006 | class fncachestore(basicstore): | |
991 | def __init__(self, path, vfstype, dotencode): |
|
1007 | def __init__(self, path, vfstype, dotencode): | |
992 | if dotencode: |
|
1008 | if dotencode: | |
993 | encode = _pathencode |
|
1009 | encode = _pathencode | |
994 | else: |
|
1010 | else: | |
995 | encode = _plainhybridencode |
|
1011 | encode = _plainhybridencode | |
996 | self.encode = encode |
|
1012 | self.encode = encode | |
997 | vfs = vfstype(path + b'/store') |
|
1013 | vfs = vfstype(path + b'/store') | |
998 | self.path = vfs.base |
|
1014 | self.path = vfs.base | |
999 | self.pathsep = self.path + b'/' |
|
1015 | self.pathsep = self.path + b'/' | |
1000 | self.createmode = _calcmode(vfs) |
|
1016 | self.createmode = _calcmode(vfs) | |
1001 | vfs.createmode = self.createmode |
|
1017 | vfs.createmode = self.createmode | |
1002 | self.rawvfs = vfs |
|
1018 | self.rawvfs = vfs | |
1003 | fnc = fncache(vfs) |
|
1019 | fnc = fncache(vfs) | |
1004 | self.fncache = fnc |
|
1020 | self.fncache = fnc | |
1005 | self.vfs = _fncachevfs(vfs, fnc, encode) |
|
1021 | self.vfs = _fncachevfs(vfs, fnc, encode) | |
1006 | self.opener = self.vfs |
|
1022 | self.opener = self.vfs | |
1007 |
|
1023 | |||
1008 | def join(self, f): |
|
1024 | def join(self, f): | |
1009 | return self.pathsep + self.encode(f) |
|
1025 | return self.pathsep + self.encode(f) | |
1010 |
|
1026 | |||
1011 | def getsize(self, path): |
|
1027 | def getsize(self, path): | |
1012 | return self.rawvfs.stat(path).st_size |
|
1028 | return self.rawvfs.stat(path).st_size | |
1013 |
|
1029 | |||
1014 | def data_entries( |
|
1030 | def data_entries( | |
1015 | self, matcher=None, undecodable=None |
|
1031 | self, matcher=None, undecodable=None | |
1016 | ) -> Generator[BaseStoreEntry, None, None]: |
|
1032 | ) -> Generator[BaseStoreEntry, None, None]: | |
1017 | files = ((f, revlog_type(f)) for f in self.fncache) |
|
1033 | files = ((f, revlog_type(f)) for f in self.fncache) | |
1018 | # Note: all files in fncache should be revlog related, However the |
|
1034 | # Note: all files in fncache should be revlog related, However the | |
1019 | # fncache might contains such file added by previous version of |
|
1035 | # fncache might contains such file added by previous version of | |
1020 | # Mercurial. |
|
1036 | # Mercurial. | |
1021 | files = (f for f in files if f[1] is not None) |
|
1037 | files = (f for f in files if f[1] is not None) | |
1022 | by_revlog = _gather_revlog(files) |
|
1038 | by_revlog = _gather_revlog(files) | |
1023 | for revlog, details in by_revlog: |
|
1039 | for revlog, details in by_revlog: | |
1024 | file_details = {} |
|
1040 | file_details = {} | |
1025 | if revlog.startswith(b'data/'): |
|
1041 | if revlog.startswith(b'data/'): | |
1026 | rl_type = FILEFLAGS_FILELOG |
|
1042 | rl_type = FILEFLAGS_FILELOG | |
1027 | revlog_target_id = revlog.split(b'/', 1)[1] |
|
1043 | revlog_target_id = revlog.split(b'/', 1)[1] | |
1028 | elif revlog.startswith(b'meta/'): |
|
1044 | elif revlog.startswith(b'meta/'): | |
1029 | rl_type = FILEFLAGS_MANIFESTLOG |
|
1045 | rl_type = FILEFLAGS_MANIFESTLOG | |
1030 | # drop the initial directory and the `00manifest` file part |
|
1046 | # drop the initial directory and the `00manifest` file part | |
1031 | tmp = revlog.split(b'/', 1)[1] |
|
1047 | tmp = revlog.split(b'/', 1)[1] | |
1032 | revlog_target_id = tmp.rsplit(b'/', 1)[0] + b'/' |
|
1048 | revlog_target_id = tmp.rsplit(b'/', 1)[0] + b'/' | |
1033 | else: |
|
1049 | else: | |
1034 | # unreachable |
|
1050 | # unreachable | |
1035 | assert False, revlog |
|
1051 | assert False, revlog | |
1036 | for ext, t in details.items(): |
|
1052 | for ext, t in details.items(): | |
1037 | file_details[ext] = { |
|
1053 | file_details[ext] = { | |
1038 | 'is_volatile': bool(t & FILEFLAGS_VOLATILE), |
|
1054 | 'is_volatile': bool(t & FILEFLAGS_VOLATILE), | |
1039 | } |
|
1055 | } | |
1040 | entry = RevlogStoreEntry( |
|
1056 | entry = RevlogStoreEntry( | |
1041 | path_prefix=revlog, |
|
1057 | path_prefix=revlog, | |
1042 | revlog_type=rl_type, |
|
1058 | revlog_type=rl_type, | |
1043 | target_id=revlog_target_id, |
|
1059 | target_id=revlog_target_id, | |
1044 | details=file_details, |
|
1060 | details=file_details, | |
1045 | ) |
|
1061 | ) | |
1046 | if _match_tracked_entry(entry, matcher): |
|
1062 | if _match_tracked_entry(entry, matcher): | |
1047 | yield entry |
|
1063 | yield entry | |
1048 |
|
1064 | |||
1049 | def copylist(self): |
|
1065 | def copylist(self): | |
1050 | d = ( |
|
1066 | d = ( | |
1051 | b'bookmarks', |
|
1067 | b'bookmarks', | |
1052 | b'narrowspec', |
|
1068 | b'narrowspec', | |
1053 | b'data', |
|
1069 | b'data', | |
1054 | b'meta', |
|
1070 | b'meta', | |
1055 | b'dh', |
|
1071 | b'dh', | |
1056 | b'fncache', |
|
1072 | b'fncache', | |
1057 | b'phaseroots', |
|
1073 | b'phaseroots', | |
1058 | b'obsstore', |
|
1074 | b'obsstore', | |
1059 | b'00manifest.d', |
|
1075 | b'00manifest.d', | |
1060 | b'00manifest.i', |
|
1076 | b'00manifest.i', | |
1061 | b'00changelog.d', |
|
1077 | b'00changelog.d', | |
1062 | b'00changelog.i', |
|
1078 | b'00changelog.i', | |
1063 | b'requires', |
|
1079 | b'requires', | |
1064 | ) |
|
1080 | ) | |
1065 | return [b'requires', b'00changelog.i'] + [b'store/' + f for f in d] |
|
1081 | return [b'requires', b'00changelog.i'] + [b'store/' + f for f in d] | |
1066 |
|
1082 | |||
1067 | def write(self, tr): |
|
1083 | def write(self, tr): | |
1068 | self.fncache.write(tr) |
|
1084 | self.fncache.write(tr) | |
1069 |
|
1085 | |||
1070 | def invalidatecaches(self): |
|
1086 | def invalidatecaches(self): | |
1071 | self.fncache.entries = None |
|
1087 | self.fncache.entries = None | |
1072 | self.fncache.addls = set() |
|
1088 | self.fncache.addls = set() | |
1073 |
|
1089 | |||
1074 | def markremoved(self, fn): |
|
1090 | def markremoved(self, fn): | |
1075 | self.fncache.remove(fn) |
|
1091 | self.fncache.remove(fn) | |
1076 |
|
1092 | |||
1077 | def _exists(self, f): |
|
1093 | def _exists(self, f): | |
1078 | ef = self.encode(f) |
|
1094 | ef = self.encode(f) | |
1079 | try: |
|
1095 | try: | |
1080 | self.getsize(ef) |
|
1096 | self.getsize(ef) | |
1081 | return True |
|
1097 | return True | |
1082 | except FileNotFoundError: |
|
1098 | except FileNotFoundError: | |
1083 | return False |
|
1099 | return False | |
1084 |
|
1100 | |||
1085 | def __contains__(self, path): |
|
1101 | def __contains__(self, path): | |
1086 | '''Checks if the store contains path''' |
|
1102 | '''Checks if the store contains path''' | |
1087 | path = b"/".join((b"data", path)) |
|
1103 | path = b"/".join((b"data", path)) | |
1088 | # check for files (exact match) |
|
1104 | # check for files (exact match) | |
1089 | e = path + b'.i' |
|
1105 | e = path + b'.i' | |
1090 | if e in self.fncache and self._exists(e): |
|
1106 | if e in self.fncache and self._exists(e): | |
1091 | return True |
|
1107 | return True | |
1092 | # now check for directories (prefix match) |
|
1108 | # now check for directories (prefix match) | |
1093 | if not path.endswith(b'/'): |
|
1109 | if not path.endswith(b'/'): | |
1094 | path += b'/' |
|
1110 | path += b'/' | |
1095 | for e in self.fncache: |
|
1111 | for e in self.fncache: | |
1096 | if e.startswith(path) and self._exists(e): |
|
1112 | if e.startswith(path) and self._exists(e): | |
1097 | return True |
|
1113 | return True | |
1098 | return False |
|
1114 | return False |
@@ -1,668 +1,649 b'' | |||||
1 | # upgrade.py - functions for in place upgrade of Mercurial repository |
|
1 | # upgrade.py - functions for in place upgrade of Mercurial repository | |
2 | # |
|
2 | # | |
3 | # Copyright (c) 2016-present, Gregory Szorc |
|
3 | # Copyright (c) 2016-present, Gregory Szorc | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 |
|
8 | |||
9 | import stat |
|
9 | import stat | |
10 |
|
10 | |||
11 | from ..i18n import _ |
|
11 | from ..i18n import _ | |
12 | from ..pycompat import getattr |
|
12 | from ..pycompat import getattr | |
13 | from .. import ( |
|
13 | from .. import ( | |
14 | changelog, |
|
|||
15 | error, |
|
14 | error, | |
16 | filelog, |
|
|||
17 | manifest, |
|
|||
18 | metadata, |
|
15 | metadata, | |
19 | pycompat, |
|
16 | pycompat, | |
20 | requirements, |
|
17 | requirements, | |
21 | scmutil, |
|
18 | scmutil, | |
22 | store, |
|
19 | store, | |
23 | util, |
|
20 | util, | |
24 | vfs as vfsmod, |
|
21 | vfs as vfsmod, | |
25 | ) |
|
22 | ) | |
26 | from ..revlogutils import ( |
|
23 | from ..revlogutils import ( | |
27 | constants as revlogconst, |
|
24 | constants as revlogconst, | |
28 | flagutil, |
|
25 | flagutil, | |
29 | nodemap, |
|
26 | nodemap, | |
30 | sidedata as sidedatamod, |
|
27 | sidedata as sidedatamod, | |
31 | ) |
|
28 | ) | |
32 | from . import actions as upgrade_actions |
|
29 | from . import actions as upgrade_actions | |
33 |
|
30 | |||
34 |
|
31 | |||
35 | def get_sidedata_helpers(srcrepo, dstrepo): |
|
32 | def get_sidedata_helpers(srcrepo, dstrepo): | |
36 | use_w = srcrepo.ui.configbool(b'experimental', b'worker.repository-upgrade') |
|
33 | use_w = srcrepo.ui.configbool(b'experimental', b'worker.repository-upgrade') | |
37 | sequential = pycompat.iswindows or not use_w |
|
34 | sequential = pycompat.iswindows or not use_w | |
38 | if not sequential: |
|
35 | if not sequential: | |
39 | srcrepo.register_sidedata_computer( |
|
36 | srcrepo.register_sidedata_computer( | |
40 | revlogconst.KIND_CHANGELOG, |
|
37 | revlogconst.KIND_CHANGELOG, | |
41 | sidedatamod.SD_FILES, |
|
38 | sidedatamod.SD_FILES, | |
42 | (sidedatamod.SD_FILES,), |
|
39 | (sidedatamod.SD_FILES,), | |
43 | metadata._get_worker_sidedata_adder(srcrepo, dstrepo), |
|
40 | metadata._get_worker_sidedata_adder(srcrepo, dstrepo), | |
44 | flagutil.REVIDX_HASCOPIESINFO, |
|
41 | flagutil.REVIDX_HASCOPIESINFO, | |
45 | replace=True, |
|
42 | replace=True, | |
46 | ) |
|
43 | ) | |
47 | return sidedatamod.get_sidedata_helpers(srcrepo, dstrepo._wanted_sidedata) |
|
44 | return sidedatamod.get_sidedata_helpers(srcrepo, dstrepo._wanted_sidedata) | |
48 |
|
45 | |||
49 |
|
46 | |||
50 | def _revlog_from_store_entry(repo, entry): |
|
|||
51 | """Obtain a revlog from a repo store entry. |
|
|||
52 |
|
||||
53 | An instance of the appropriate class is returned. |
|
|||
54 | """ |
|
|||
55 | if entry.is_changelog: |
|
|||
56 | return changelog.changelog(repo.svfs) |
|
|||
57 | elif entry.is_manifestlog: |
|
|||
58 | mandir = entry.target_id.rstrip(b'/') |
|
|||
59 | return manifest.manifestrevlog( |
|
|||
60 | repo.nodeconstants, repo.svfs, tree=mandir |
|
|||
61 | ) |
|
|||
62 | else: |
|
|||
63 | return filelog.filelog(repo.svfs, entry.target_id) |
|
|||
64 |
|
||||
65 |
|
||||
66 | def _copyrevlog(tr, destrepo, oldrl, entry): |
|
47 | def _copyrevlog(tr, destrepo, oldrl, entry): | |
67 | """copy all relevant files for `oldrl` into `destrepo` store |
|
48 | """copy all relevant files for `oldrl` into `destrepo` store | |
68 |
|
49 | |||
69 | Files are copied "as is" without any transformation. The copy is performed |
|
50 | Files are copied "as is" without any transformation. The copy is performed | |
70 | without extra checks. Callers are responsible for making sure the copied |
|
51 | without extra checks. Callers are responsible for making sure the copied | |
71 | content is compatible with format of the destination repository. |
|
52 | content is compatible with format of the destination repository. | |
72 | """ |
|
53 | """ | |
73 | oldrl = getattr(oldrl, '_revlog', oldrl) |
|
54 | oldrl = getattr(oldrl, '_revlog', oldrl) | |
74 |
newrl = _revlog_ |
|
55 | newrl = entry.get_revlog_instance(destrepo) | |
75 | newrl = getattr(newrl, '_revlog', newrl) |
|
56 | newrl = getattr(newrl, '_revlog', newrl) | |
76 |
|
57 | |||
77 | oldvfs = oldrl.opener |
|
58 | oldvfs = oldrl.opener | |
78 | newvfs = newrl.opener |
|
59 | newvfs = newrl.opener | |
79 | oldindex = oldvfs.join(oldrl._indexfile) |
|
60 | oldindex = oldvfs.join(oldrl._indexfile) | |
80 | newindex = newvfs.join(newrl._indexfile) |
|
61 | newindex = newvfs.join(newrl._indexfile) | |
81 | olddata = oldvfs.join(oldrl._datafile) |
|
62 | olddata = oldvfs.join(oldrl._datafile) | |
82 | newdata = newvfs.join(newrl._datafile) |
|
63 | newdata = newvfs.join(newrl._datafile) | |
83 |
|
64 | |||
84 | with newvfs(newrl._indexfile, b'w'): |
|
65 | with newvfs(newrl._indexfile, b'w'): | |
85 | pass # create all the directories |
|
66 | pass # create all the directories | |
86 |
|
67 | |||
87 | util.copyfile(oldindex, newindex) |
|
68 | util.copyfile(oldindex, newindex) | |
88 | copydata = oldrl.opener.exists(oldrl._datafile) |
|
69 | copydata = oldrl.opener.exists(oldrl._datafile) | |
89 | if copydata: |
|
70 | if copydata: | |
90 | util.copyfile(olddata, newdata) |
|
71 | util.copyfile(olddata, newdata) | |
91 |
|
72 | |||
92 | if entry.is_filelog: |
|
73 | if entry.is_filelog: | |
93 | unencodedname = entry.main_file_path() |
|
74 | unencodedname = entry.main_file_path() | |
94 | destrepo.svfs.fncache.add(unencodedname) |
|
75 | destrepo.svfs.fncache.add(unencodedname) | |
95 | if copydata: |
|
76 | if copydata: | |
96 | destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d') |
|
77 | destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d') | |
97 |
|
78 | |||
98 |
|
79 | |||
99 | UPGRADE_CHANGELOG = b"changelog" |
|
80 | UPGRADE_CHANGELOG = b"changelog" | |
100 | UPGRADE_MANIFEST = b"manifest" |
|
81 | UPGRADE_MANIFEST = b"manifest" | |
101 | UPGRADE_FILELOGS = b"all-filelogs" |
|
82 | UPGRADE_FILELOGS = b"all-filelogs" | |
102 |
|
83 | |||
103 | UPGRADE_ALL_REVLOGS = frozenset( |
|
84 | UPGRADE_ALL_REVLOGS = frozenset( | |
104 | [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOGS] |
|
85 | [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOGS] | |
105 | ) |
|
86 | ) | |
106 |
|
87 | |||
107 |
|
88 | |||
108 | def matchrevlog(revlogfilter, entry): |
|
89 | def matchrevlog(revlogfilter, entry): | |
109 | """check if a revlog is selected for cloning. |
|
90 | """check if a revlog is selected for cloning. | |
110 |
|
91 | |||
111 | In other words, are there any updates which need to be done on revlog |
|
92 | In other words, are there any updates which need to be done on revlog | |
112 | or it can be blindly copied. |
|
93 | or it can be blindly copied. | |
113 |
|
94 | |||
114 | The store entry is checked against the passed filter""" |
|
95 | The store entry is checked against the passed filter""" | |
115 | if entry.is_changelog: |
|
96 | if entry.is_changelog: | |
116 | return UPGRADE_CHANGELOG in revlogfilter |
|
97 | return UPGRADE_CHANGELOG in revlogfilter | |
117 | elif entry.is_manifestlog: |
|
98 | elif entry.is_manifestlog: | |
118 | return UPGRADE_MANIFEST in revlogfilter |
|
99 | return UPGRADE_MANIFEST in revlogfilter | |
119 | assert entry.is_filelog |
|
100 | assert entry.is_filelog | |
120 | return UPGRADE_FILELOGS in revlogfilter |
|
101 | return UPGRADE_FILELOGS in revlogfilter | |
121 |
|
102 | |||
122 |
|
103 | |||
123 | def _perform_clone( |
|
104 | def _perform_clone( | |
124 | ui, |
|
105 | ui, | |
125 | dstrepo, |
|
106 | dstrepo, | |
126 | tr, |
|
107 | tr, | |
127 | old_revlog, |
|
108 | old_revlog, | |
128 | entry, |
|
109 | entry, | |
129 | upgrade_op, |
|
110 | upgrade_op, | |
130 | sidedata_helpers, |
|
111 | sidedata_helpers, | |
131 | oncopiedrevision, |
|
112 | oncopiedrevision, | |
132 | ): |
|
113 | ): | |
133 | """returns the new revlog object created""" |
|
114 | """returns the new revlog object created""" | |
134 | newrl = None |
|
115 | newrl = None | |
135 | revlog_path = entry.main_file_path() |
|
116 | revlog_path = entry.main_file_path() | |
136 | if matchrevlog(upgrade_op.revlogs_to_process, entry): |
|
117 | if matchrevlog(upgrade_op.revlogs_to_process, entry): | |
137 | ui.note( |
|
118 | ui.note( | |
138 | _(b'cloning %d revisions from %s\n') |
|
119 | _(b'cloning %d revisions from %s\n') | |
139 | % (len(old_revlog), revlog_path) |
|
120 | % (len(old_revlog), revlog_path) | |
140 | ) |
|
121 | ) | |
141 |
newrl = _revlog_ |
|
122 | newrl = entry.get_revlog_instance(dstrepo) | |
142 | old_revlog.clone( |
|
123 | old_revlog.clone( | |
143 | tr, |
|
124 | tr, | |
144 | newrl, |
|
125 | newrl, | |
145 | addrevisioncb=oncopiedrevision, |
|
126 | addrevisioncb=oncopiedrevision, | |
146 | deltareuse=upgrade_op.delta_reuse_mode, |
|
127 | deltareuse=upgrade_op.delta_reuse_mode, | |
147 | forcedeltabothparents=upgrade_op.force_re_delta_both_parents, |
|
128 | forcedeltabothparents=upgrade_op.force_re_delta_both_parents, | |
148 | sidedata_helpers=sidedata_helpers, |
|
129 | sidedata_helpers=sidedata_helpers, | |
149 | ) |
|
130 | ) | |
150 | else: |
|
131 | else: | |
151 | msg = _(b'blindly copying %s containing %i revisions\n') |
|
132 | msg = _(b'blindly copying %s containing %i revisions\n') | |
152 | ui.note(msg % (revlog_path, len(old_revlog))) |
|
133 | ui.note(msg % (revlog_path, len(old_revlog))) | |
153 | _copyrevlog(tr, dstrepo, old_revlog, entry) |
|
134 | _copyrevlog(tr, dstrepo, old_revlog, entry) | |
154 |
|
135 | |||
155 |
newrl = _revlog_ |
|
136 | newrl = entry.get_revlog_instance(dstrepo) | |
156 | return newrl |
|
137 | return newrl | |
157 |
|
138 | |||
158 |
|
139 | |||
159 | def _clonerevlogs( |
|
140 | def _clonerevlogs( | |
160 | ui, |
|
141 | ui, | |
161 | srcrepo, |
|
142 | srcrepo, | |
162 | dstrepo, |
|
143 | dstrepo, | |
163 | tr, |
|
144 | tr, | |
164 | upgrade_op, |
|
145 | upgrade_op, | |
165 | ): |
|
146 | ): | |
166 | """Copy revlogs between 2 repos.""" |
|
147 | """Copy revlogs between 2 repos.""" | |
167 | revcount = 0 |
|
148 | revcount = 0 | |
168 | srcsize = 0 |
|
149 | srcsize = 0 | |
169 | srcrawsize = 0 |
|
150 | srcrawsize = 0 | |
170 | dstsize = 0 |
|
151 | dstsize = 0 | |
171 | fcount = 0 |
|
152 | fcount = 0 | |
172 | frevcount = 0 |
|
153 | frevcount = 0 | |
173 | fsrcsize = 0 |
|
154 | fsrcsize = 0 | |
174 | frawsize = 0 |
|
155 | frawsize = 0 | |
175 | fdstsize = 0 |
|
156 | fdstsize = 0 | |
176 | mcount = 0 |
|
157 | mcount = 0 | |
177 | mrevcount = 0 |
|
158 | mrevcount = 0 | |
178 | msrcsize = 0 |
|
159 | msrcsize = 0 | |
179 | mrawsize = 0 |
|
160 | mrawsize = 0 | |
180 | mdstsize = 0 |
|
161 | mdstsize = 0 | |
181 | crevcount = 0 |
|
162 | crevcount = 0 | |
182 | csrcsize = 0 |
|
163 | csrcsize = 0 | |
183 | crawsize = 0 |
|
164 | crawsize = 0 | |
184 | cdstsize = 0 |
|
165 | cdstsize = 0 | |
185 |
|
166 | |||
186 | alldatafiles = list(srcrepo.store.walk()) |
|
167 | alldatafiles = list(srcrepo.store.walk()) | |
187 | # mapping of data files which needs to be cloned |
|
168 | # mapping of data files which needs to be cloned | |
188 | # key is unencoded filename |
|
169 | # key is unencoded filename | |
189 | # value is revlog_object_from_srcrepo |
|
170 | # value is revlog_object_from_srcrepo | |
190 | manifests = {} |
|
171 | manifests = {} | |
191 | changelogs = {} |
|
172 | changelogs = {} | |
192 | filelogs = {} |
|
173 | filelogs = {} | |
193 |
|
174 | |||
194 | # Perform a pass to collect metadata. This validates we can open all |
|
175 | # Perform a pass to collect metadata. This validates we can open all | |
195 | # source files and allows a unified progress bar to be displayed. |
|
176 | # source files and allows a unified progress bar to be displayed. | |
196 | for entry in alldatafiles: |
|
177 | for entry in alldatafiles: | |
197 | if not entry.is_revlog: |
|
178 | if not entry.is_revlog: | |
198 | continue |
|
179 | continue | |
199 |
|
180 | |||
200 |
rl = _revlog_ |
|
181 | rl = entry.get_revlog_instance(srcrepo) | |
201 |
|
182 | |||
202 | info = rl.storageinfo( |
|
183 | info = rl.storageinfo( | |
203 | exclusivefiles=True, |
|
184 | exclusivefiles=True, | |
204 | revisionscount=True, |
|
185 | revisionscount=True, | |
205 | trackedsize=True, |
|
186 | trackedsize=True, | |
206 | storedsize=True, |
|
187 | storedsize=True, | |
207 | ) |
|
188 | ) | |
208 |
|
189 | |||
209 | revcount += info[b'revisionscount'] or 0 |
|
190 | revcount += info[b'revisionscount'] or 0 | |
210 | datasize = info[b'storedsize'] or 0 |
|
191 | datasize = info[b'storedsize'] or 0 | |
211 | rawsize = info[b'trackedsize'] or 0 |
|
192 | rawsize = info[b'trackedsize'] or 0 | |
212 |
|
193 | |||
213 | srcsize += datasize |
|
194 | srcsize += datasize | |
214 | srcrawsize += rawsize |
|
195 | srcrawsize += rawsize | |
215 |
|
196 | |||
216 | # This is for the separate progress bars. |
|
197 | # This is for the separate progress bars. | |
217 | if entry.is_changelog: |
|
198 | if entry.is_changelog: | |
218 | changelogs[entry.target_id] = entry |
|
199 | changelogs[entry.target_id] = entry | |
219 | crevcount += len(rl) |
|
200 | crevcount += len(rl) | |
220 | csrcsize += datasize |
|
201 | csrcsize += datasize | |
221 | crawsize += rawsize |
|
202 | crawsize += rawsize | |
222 | elif entry.is_manifestlog: |
|
203 | elif entry.is_manifestlog: | |
223 | manifests[entry.target_id] = entry |
|
204 | manifests[entry.target_id] = entry | |
224 | mcount += 1 |
|
205 | mcount += 1 | |
225 | mrevcount += len(rl) |
|
206 | mrevcount += len(rl) | |
226 | msrcsize += datasize |
|
207 | msrcsize += datasize | |
227 | mrawsize += rawsize |
|
208 | mrawsize += rawsize | |
228 | elif entry.is_filelog: |
|
209 | elif entry.is_filelog: | |
229 | filelogs[entry.target_id] = entry |
|
210 | filelogs[entry.target_id] = entry | |
230 | fcount += 1 |
|
211 | fcount += 1 | |
231 | frevcount += len(rl) |
|
212 | frevcount += len(rl) | |
232 | fsrcsize += datasize |
|
213 | fsrcsize += datasize | |
233 | frawsize += rawsize |
|
214 | frawsize += rawsize | |
234 | else: |
|
215 | else: | |
235 | error.ProgrammingError(b'unknown revlog type') |
|
216 | error.ProgrammingError(b'unknown revlog type') | |
236 |
|
217 | |||
237 | if not revcount: |
|
218 | if not revcount: | |
238 | return |
|
219 | return | |
239 |
|
220 | |||
240 | ui.status( |
|
221 | ui.status( | |
241 | _( |
|
222 | _( | |
242 | b'migrating %d total revisions (%d in filelogs, %d in manifests, ' |
|
223 | b'migrating %d total revisions (%d in filelogs, %d in manifests, ' | |
243 | b'%d in changelog)\n' |
|
224 | b'%d in changelog)\n' | |
244 | ) |
|
225 | ) | |
245 | % (revcount, frevcount, mrevcount, crevcount) |
|
226 | % (revcount, frevcount, mrevcount, crevcount) | |
246 | ) |
|
227 | ) | |
247 | ui.status( |
|
228 | ui.status( | |
248 | _(b'migrating %s in store; %s tracked data\n') |
|
229 | _(b'migrating %s in store; %s tracked data\n') | |
249 | % ((util.bytecount(srcsize), util.bytecount(srcrawsize))) |
|
230 | % ((util.bytecount(srcsize), util.bytecount(srcrawsize))) | |
250 | ) |
|
231 | ) | |
251 |
|
232 | |||
252 | # Used to keep track of progress. |
|
233 | # Used to keep track of progress. | |
253 | progress = None |
|
234 | progress = None | |
254 |
|
235 | |||
255 | def oncopiedrevision(rl, rev, node): |
|
236 | def oncopiedrevision(rl, rev, node): | |
256 | progress.increment() |
|
237 | progress.increment() | |
257 |
|
238 | |||
258 | sidedata_helpers = get_sidedata_helpers(srcrepo, dstrepo) |
|
239 | sidedata_helpers = get_sidedata_helpers(srcrepo, dstrepo) | |
259 |
|
240 | |||
260 | # Migrating filelogs |
|
241 | # Migrating filelogs | |
261 | ui.status( |
|
242 | ui.status( | |
262 | _( |
|
243 | _( | |
263 | b'migrating %d filelogs containing %d revisions ' |
|
244 | b'migrating %d filelogs containing %d revisions ' | |
264 | b'(%s in store; %s tracked data)\n' |
|
245 | b'(%s in store; %s tracked data)\n' | |
265 | ) |
|
246 | ) | |
266 | % ( |
|
247 | % ( | |
267 | fcount, |
|
248 | fcount, | |
268 | frevcount, |
|
249 | frevcount, | |
269 | util.bytecount(fsrcsize), |
|
250 | util.bytecount(fsrcsize), | |
270 | util.bytecount(frawsize), |
|
251 | util.bytecount(frawsize), | |
271 | ) |
|
252 | ) | |
272 | ) |
|
253 | ) | |
273 | progress = srcrepo.ui.makeprogress(_(b'file revisions'), total=frevcount) |
|
254 | progress = srcrepo.ui.makeprogress(_(b'file revisions'), total=frevcount) | |
274 | for target_id, entry in sorted(filelogs.items()): |
|
255 | for target_id, entry in sorted(filelogs.items()): | |
275 |
oldrl = _revlog_ |
|
256 | oldrl = entry.get_revlog_instance(srcrepo) | |
276 |
|
257 | |||
277 | newrl = _perform_clone( |
|
258 | newrl = _perform_clone( | |
278 | ui, |
|
259 | ui, | |
279 | dstrepo, |
|
260 | dstrepo, | |
280 | tr, |
|
261 | tr, | |
281 | oldrl, |
|
262 | oldrl, | |
282 | entry, |
|
263 | entry, | |
283 | upgrade_op, |
|
264 | upgrade_op, | |
284 | sidedata_helpers, |
|
265 | sidedata_helpers, | |
285 | oncopiedrevision, |
|
266 | oncopiedrevision, | |
286 | ) |
|
267 | ) | |
287 | info = newrl.storageinfo(storedsize=True) |
|
268 | info = newrl.storageinfo(storedsize=True) | |
288 | fdstsize += info[b'storedsize'] or 0 |
|
269 | fdstsize += info[b'storedsize'] or 0 | |
289 | ui.status( |
|
270 | ui.status( | |
290 | _( |
|
271 | _( | |
291 | b'finished migrating %d filelog revisions across %d ' |
|
272 | b'finished migrating %d filelog revisions across %d ' | |
292 | b'filelogs; change in size: %s\n' |
|
273 | b'filelogs; change in size: %s\n' | |
293 | ) |
|
274 | ) | |
294 | % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)) |
|
275 | % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)) | |
295 | ) |
|
276 | ) | |
296 |
|
277 | |||
297 | # Migrating manifests |
|
278 | # Migrating manifests | |
298 | ui.status( |
|
279 | ui.status( | |
299 | _( |
|
280 | _( | |
300 | b'migrating %d manifests containing %d revisions ' |
|
281 | b'migrating %d manifests containing %d revisions ' | |
301 | b'(%s in store; %s tracked data)\n' |
|
282 | b'(%s in store; %s tracked data)\n' | |
302 | ) |
|
283 | ) | |
303 | % ( |
|
284 | % ( | |
304 | mcount, |
|
285 | mcount, | |
305 | mrevcount, |
|
286 | mrevcount, | |
306 | util.bytecount(msrcsize), |
|
287 | util.bytecount(msrcsize), | |
307 | util.bytecount(mrawsize), |
|
288 | util.bytecount(mrawsize), | |
308 | ) |
|
289 | ) | |
309 | ) |
|
290 | ) | |
310 | if progress: |
|
291 | if progress: | |
311 | progress.complete() |
|
292 | progress.complete() | |
312 | progress = srcrepo.ui.makeprogress( |
|
293 | progress = srcrepo.ui.makeprogress( | |
313 | _(b'manifest revisions'), total=mrevcount |
|
294 | _(b'manifest revisions'), total=mrevcount | |
314 | ) |
|
295 | ) | |
315 | for target_id, entry in sorted(manifests.items()): |
|
296 | for target_id, entry in sorted(manifests.items()): | |
316 |
oldrl = _revlog_ |
|
297 | oldrl = entry.get_revlog_instance(srcrepo) | |
317 | newrl = _perform_clone( |
|
298 | newrl = _perform_clone( | |
318 | ui, |
|
299 | ui, | |
319 | dstrepo, |
|
300 | dstrepo, | |
320 | tr, |
|
301 | tr, | |
321 | oldrl, |
|
302 | oldrl, | |
322 | entry, |
|
303 | entry, | |
323 | upgrade_op, |
|
304 | upgrade_op, | |
324 | sidedata_helpers, |
|
305 | sidedata_helpers, | |
325 | oncopiedrevision, |
|
306 | oncopiedrevision, | |
326 | ) |
|
307 | ) | |
327 | info = newrl.storageinfo(storedsize=True) |
|
308 | info = newrl.storageinfo(storedsize=True) | |
328 | mdstsize += info[b'storedsize'] or 0 |
|
309 | mdstsize += info[b'storedsize'] or 0 | |
329 | ui.status( |
|
310 | ui.status( | |
330 | _( |
|
311 | _( | |
331 | b'finished migrating %d manifest revisions across %d ' |
|
312 | b'finished migrating %d manifest revisions across %d ' | |
332 | b'manifests; change in size: %s\n' |
|
313 | b'manifests; change in size: %s\n' | |
333 | ) |
|
314 | ) | |
334 | % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)) |
|
315 | % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)) | |
335 | ) |
|
316 | ) | |
336 |
|
317 | |||
337 | # Migrating changelog |
|
318 | # Migrating changelog | |
338 | ui.status( |
|
319 | ui.status( | |
339 | _( |
|
320 | _( | |
340 | b'migrating changelog containing %d revisions ' |
|
321 | b'migrating changelog containing %d revisions ' | |
341 | b'(%s in store; %s tracked data)\n' |
|
322 | b'(%s in store; %s tracked data)\n' | |
342 | ) |
|
323 | ) | |
343 | % ( |
|
324 | % ( | |
344 | crevcount, |
|
325 | crevcount, | |
345 | util.bytecount(csrcsize), |
|
326 | util.bytecount(csrcsize), | |
346 | util.bytecount(crawsize), |
|
327 | util.bytecount(crawsize), | |
347 | ) |
|
328 | ) | |
348 | ) |
|
329 | ) | |
349 | if progress: |
|
330 | if progress: | |
350 | progress.complete() |
|
331 | progress.complete() | |
351 | progress = srcrepo.ui.makeprogress( |
|
332 | progress = srcrepo.ui.makeprogress( | |
352 | _(b'changelog revisions'), total=crevcount |
|
333 | _(b'changelog revisions'), total=crevcount | |
353 | ) |
|
334 | ) | |
354 | for target_id, entry in sorted(changelogs.items()): |
|
335 | for target_id, entry in sorted(changelogs.items()): | |
355 |
oldrl = _revlog_ |
|
336 | oldrl = entry.get_revlog_instance(srcrepo) | |
356 | newrl = _perform_clone( |
|
337 | newrl = _perform_clone( | |
357 | ui, |
|
338 | ui, | |
358 | dstrepo, |
|
339 | dstrepo, | |
359 | tr, |
|
340 | tr, | |
360 | oldrl, |
|
341 | oldrl, | |
361 | entry, |
|
342 | entry, | |
362 | upgrade_op, |
|
343 | upgrade_op, | |
363 | sidedata_helpers, |
|
344 | sidedata_helpers, | |
364 | oncopiedrevision, |
|
345 | oncopiedrevision, | |
365 | ) |
|
346 | ) | |
366 | info = newrl.storageinfo(storedsize=True) |
|
347 | info = newrl.storageinfo(storedsize=True) | |
367 | cdstsize += info[b'storedsize'] or 0 |
|
348 | cdstsize += info[b'storedsize'] or 0 | |
368 | progress.complete() |
|
349 | progress.complete() | |
369 | ui.status( |
|
350 | ui.status( | |
370 | _( |
|
351 | _( | |
371 | b'finished migrating %d changelog revisions; change in size: ' |
|
352 | b'finished migrating %d changelog revisions; change in size: ' | |
372 | b'%s\n' |
|
353 | b'%s\n' | |
373 | ) |
|
354 | ) | |
374 | % (crevcount, util.bytecount(cdstsize - csrcsize)) |
|
355 | % (crevcount, util.bytecount(cdstsize - csrcsize)) | |
375 | ) |
|
356 | ) | |
376 |
|
357 | |||
377 | dstsize = fdstsize + mdstsize + cdstsize |
|
358 | dstsize = fdstsize + mdstsize + cdstsize | |
378 | ui.status( |
|
359 | ui.status( | |
379 | _( |
|
360 | _( | |
380 | b'finished migrating %d total revisions; total change in store ' |
|
361 | b'finished migrating %d total revisions; total change in store ' | |
381 | b'size: %s\n' |
|
362 | b'size: %s\n' | |
382 | ) |
|
363 | ) | |
383 | % (revcount, util.bytecount(dstsize - srcsize)) |
|
364 | % (revcount, util.bytecount(dstsize - srcsize)) | |
384 | ) |
|
365 | ) | |
385 |
|
366 | |||
386 |
|
367 | |||
387 | def _files_to_copy_post_revlog_clone(srcrepo): |
|
368 | def _files_to_copy_post_revlog_clone(srcrepo): | |
388 | """yields files which should be copied to destination after revlogs |
|
369 | """yields files which should be copied to destination after revlogs | |
389 | are cloned""" |
|
370 | are cloned""" | |
390 | for path, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)): |
|
371 | for path, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)): | |
391 | # don't copy revlogs as they are already cloned |
|
372 | # don't copy revlogs as they are already cloned | |
392 | if store.revlog_type(path) is not None: |
|
373 | if store.revlog_type(path) is not None: | |
393 | continue |
|
374 | continue | |
394 | # Skip transaction related files. |
|
375 | # Skip transaction related files. | |
395 | if path.startswith(b'undo'): |
|
376 | if path.startswith(b'undo'): | |
396 | continue |
|
377 | continue | |
397 | # Only copy regular files. |
|
378 | # Only copy regular files. | |
398 | if kind != stat.S_IFREG: |
|
379 | if kind != stat.S_IFREG: | |
399 | continue |
|
380 | continue | |
400 | # Skip other skipped files. |
|
381 | # Skip other skipped files. | |
401 | if path in (b'lock', b'fncache'): |
|
382 | if path in (b'lock', b'fncache'): | |
402 | continue |
|
383 | continue | |
403 | # TODO: should we skip cache too? |
|
384 | # TODO: should we skip cache too? | |
404 |
|
385 | |||
405 | yield path |
|
386 | yield path | |
406 |
|
387 | |||
407 |
|
388 | |||
408 | def _replacestores(currentrepo, upgradedrepo, backupvfs, upgrade_op): |
|
389 | def _replacestores(currentrepo, upgradedrepo, backupvfs, upgrade_op): | |
409 | """Replace the stores after current repository is upgraded |
|
390 | """Replace the stores after current repository is upgraded | |
410 |
|
391 | |||
411 | Creates a backup of current repository store at backup path |
|
392 | Creates a backup of current repository store at backup path | |
412 | Replaces upgraded store files in current repo from upgraded one |
|
393 | Replaces upgraded store files in current repo from upgraded one | |
413 |
|
394 | |||
414 | Arguments: |
|
395 | Arguments: | |
415 | currentrepo: repo object of current repository |
|
396 | currentrepo: repo object of current repository | |
416 | upgradedrepo: repo object of the upgraded data |
|
397 | upgradedrepo: repo object of the upgraded data | |
417 | backupvfs: vfs object for the backup path |
|
398 | backupvfs: vfs object for the backup path | |
418 | upgrade_op: upgrade operation object |
|
399 | upgrade_op: upgrade operation object | |
419 | to be used to decide what all is upgraded |
|
400 | to be used to decide what all is upgraded | |
420 | """ |
|
401 | """ | |
421 | # TODO: don't blindly rename everything in store |
|
402 | # TODO: don't blindly rename everything in store | |
422 | # There can be upgrades where store is not touched at all |
|
403 | # There can be upgrades where store is not touched at all | |
423 | if upgrade_op.backup_store: |
|
404 | if upgrade_op.backup_store: | |
424 | util.rename(currentrepo.spath, backupvfs.join(b'store')) |
|
405 | util.rename(currentrepo.spath, backupvfs.join(b'store')) | |
425 | else: |
|
406 | else: | |
426 | currentrepo.vfs.rmtree(b'store', forcibly=True) |
|
407 | currentrepo.vfs.rmtree(b'store', forcibly=True) | |
427 | util.rename(upgradedrepo.spath, currentrepo.spath) |
|
408 | util.rename(upgradedrepo.spath, currentrepo.spath) | |
428 |
|
409 | |||
429 |
|
410 | |||
430 | def finishdatamigration(ui, srcrepo, dstrepo, requirements): |
|
411 | def finishdatamigration(ui, srcrepo, dstrepo, requirements): | |
431 | """Hook point for extensions to perform additional actions during upgrade. |
|
412 | """Hook point for extensions to perform additional actions during upgrade. | |
432 |
|
413 | |||
433 | This function is called after revlogs and store files have been copied but |
|
414 | This function is called after revlogs and store files have been copied but | |
434 | before the new store is swapped into the original location. |
|
415 | before the new store is swapped into the original location. | |
435 | """ |
|
416 | """ | |
436 |
|
417 | |||
437 |
|
418 | |||
438 | def upgrade(ui, srcrepo, dstrepo, upgrade_op): |
|
419 | def upgrade(ui, srcrepo, dstrepo, upgrade_op): | |
439 | """Do the low-level work of upgrading a repository. |
|
420 | """Do the low-level work of upgrading a repository. | |
440 |
|
421 | |||
441 | The upgrade is effectively performed as a copy between a source |
|
422 | The upgrade is effectively performed as a copy between a source | |
442 | repository and a temporary destination repository. |
|
423 | repository and a temporary destination repository. | |
443 |
|
424 | |||
444 | The source repository is unmodified for as long as possible so the |
|
425 | The source repository is unmodified for as long as possible so the | |
445 | upgrade can abort at any time without causing loss of service for |
|
426 | upgrade can abort at any time without causing loss of service for | |
446 | readers and without corrupting the source repository. |
|
427 | readers and without corrupting the source repository. | |
447 | """ |
|
428 | """ | |
448 | assert srcrepo.currentwlock() |
|
429 | assert srcrepo.currentwlock() | |
449 | assert dstrepo.currentwlock() |
|
430 | assert dstrepo.currentwlock() | |
450 | backuppath = None |
|
431 | backuppath = None | |
451 | backupvfs = None |
|
432 | backupvfs = None | |
452 |
|
433 | |||
453 | ui.status( |
|
434 | ui.status( | |
454 | _( |
|
435 | _( | |
455 | b'(it is safe to interrupt this process any time before ' |
|
436 | b'(it is safe to interrupt this process any time before ' | |
456 | b'data migration completes)\n' |
|
437 | b'data migration completes)\n' | |
457 | ) |
|
438 | ) | |
458 | ) |
|
439 | ) | |
459 |
|
440 | |||
460 | if upgrade_actions.dirstatev2 in upgrade_op.upgrade_actions: |
|
441 | if upgrade_actions.dirstatev2 in upgrade_op.upgrade_actions: | |
461 | ui.status(_(b'upgrading to dirstate-v2 from v1\n')) |
|
442 | ui.status(_(b'upgrading to dirstate-v2 from v1\n')) | |
462 | upgrade_dirstate(ui, srcrepo, upgrade_op, b'v1', b'v2') |
|
443 | upgrade_dirstate(ui, srcrepo, upgrade_op, b'v1', b'v2') | |
463 | upgrade_op.upgrade_actions.remove(upgrade_actions.dirstatev2) |
|
444 | upgrade_op.upgrade_actions.remove(upgrade_actions.dirstatev2) | |
464 |
|
445 | |||
465 | if upgrade_actions.dirstatev2 in upgrade_op.removed_actions: |
|
446 | if upgrade_actions.dirstatev2 in upgrade_op.removed_actions: | |
466 | ui.status(_(b'downgrading from dirstate-v2 to v1\n')) |
|
447 | ui.status(_(b'downgrading from dirstate-v2 to v1\n')) | |
467 | upgrade_dirstate(ui, srcrepo, upgrade_op, b'v2', b'v1') |
|
448 | upgrade_dirstate(ui, srcrepo, upgrade_op, b'v2', b'v1') | |
468 | upgrade_op.removed_actions.remove(upgrade_actions.dirstatev2) |
|
449 | upgrade_op.removed_actions.remove(upgrade_actions.dirstatev2) | |
469 |
|
450 | |||
470 | if upgrade_actions.dirstatetrackedkey in upgrade_op.upgrade_actions: |
|
451 | if upgrade_actions.dirstatetrackedkey in upgrade_op.upgrade_actions: | |
471 | ui.status(_(b'create dirstate-tracked-hint file\n')) |
|
452 | ui.status(_(b'create dirstate-tracked-hint file\n')) | |
472 | upgrade_tracked_hint(ui, srcrepo, upgrade_op, add=True) |
|
453 | upgrade_tracked_hint(ui, srcrepo, upgrade_op, add=True) | |
473 | upgrade_op.upgrade_actions.remove(upgrade_actions.dirstatetrackedkey) |
|
454 | upgrade_op.upgrade_actions.remove(upgrade_actions.dirstatetrackedkey) | |
474 | elif upgrade_actions.dirstatetrackedkey in upgrade_op.removed_actions: |
|
455 | elif upgrade_actions.dirstatetrackedkey in upgrade_op.removed_actions: | |
475 | ui.status(_(b'remove dirstate-tracked-hint file\n')) |
|
456 | ui.status(_(b'remove dirstate-tracked-hint file\n')) | |
476 | upgrade_tracked_hint(ui, srcrepo, upgrade_op, add=False) |
|
457 | upgrade_tracked_hint(ui, srcrepo, upgrade_op, add=False) | |
477 | upgrade_op.removed_actions.remove(upgrade_actions.dirstatetrackedkey) |
|
458 | upgrade_op.removed_actions.remove(upgrade_actions.dirstatetrackedkey) | |
478 |
|
459 | |||
479 | if not (upgrade_op.upgrade_actions or upgrade_op.removed_actions): |
|
460 | if not (upgrade_op.upgrade_actions or upgrade_op.removed_actions): | |
480 | return |
|
461 | return | |
481 |
|
462 | |||
482 | if upgrade_op.requirements_only: |
|
463 | if upgrade_op.requirements_only: | |
483 | ui.status(_(b'upgrading repository requirements\n')) |
|
464 | ui.status(_(b'upgrading repository requirements\n')) | |
484 | scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements) |
|
465 | scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements) | |
485 | # if there is only one action and that is persistent nodemap upgrade |
|
466 | # if there is only one action and that is persistent nodemap upgrade | |
486 | # directly write the nodemap file and update requirements instead of going |
|
467 | # directly write the nodemap file and update requirements instead of going | |
487 | # through the whole cloning process |
|
468 | # through the whole cloning process | |
488 | elif ( |
|
469 | elif ( | |
489 | len(upgrade_op.upgrade_actions) == 1 |
|
470 | len(upgrade_op.upgrade_actions) == 1 | |
490 | and b'persistent-nodemap' in upgrade_op.upgrade_actions_names |
|
471 | and b'persistent-nodemap' in upgrade_op.upgrade_actions_names | |
491 | and not upgrade_op.removed_actions |
|
472 | and not upgrade_op.removed_actions | |
492 | ): |
|
473 | ): | |
493 | ui.status( |
|
474 | ui.status( | |
494 | _(b'upgrading repository to use persistent nodemap feature\n') |
|
475 | _(b'upgrading repository to use persistent nodemap feature\n') | |
495 | ) |
|
476 | ) | |
496 | with srcrepo.transaction(b'upgrade') as tr: |
|
477 | with srcrepo.transaction(b'upgrade') as tr: | |
497 | unfi = srcrepo.unfiltered() |
|
478 | unfi = srcrepo.unfiltered() | |
498 | cl = unfi.changelog |
|
479 | cl = unfi.changelog | |
499 | nodemap.persist_nodemap(tr, cl, force=True) |
|
480 | nodemap.persist_nodemap(tr, cl, force=True) | |
500 | # we want to directly operate on the underlying revlog to force |
|
481 | # we want to directly operate on the underlying revlog to force | |
501 | # create a nodemap file. This is fine since this is upgrade code |
|
482 | # create a nodemap file. This is fine since this is upgrade code | |
502 | # and it heavily relies on repository being revlog based |
|
483 | # and it heavily relies on repository being revlog based | |
503 | # hence accessing private attributes can be justified |
|
484 | # hence accessing private attributes can be justified | |
504 | nodemap.persist_nodemap( |
|
485 | nodemap.persist_nodemap( | |
505 | tr, unfi.manifestlog._rootstore._revlog, force=True |
|
486 | tr, unfi.manifestlog._rootstore._revlog, force=True | |
506 | ) |
|
487 | ) | |
507 | scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements) |
|
488 | scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements) | |
508 | elif ( |
|
489 | elif ( | |
509 | len(upgrade_op.removed_actions) == 1 |
|
490 | len(upgrade_op.removed_actions) == 1 | |
510 | and [ |
|
491 | and [ | |
511 | x |
|
492 | x | |
512 | for x in upgrade_op.removed_actions |
|
493 | for x in upgrade_op.removed_actions | |
513 | if x.name == b'persistent-nodemap' |
|
494 | if x.name == b'persistent-nodemap' | |
514 | ] |
|
495 | ] | |
515 | and not upgrade_op.upgrade_actions |
|
496 | and not upgrade_op.upgrade_actions | |
516 | ): |
|
497 | ): | |
517 | ui.status( |
|
498 | ui.status( | |
518 | _(b'downgrading repository to not use persistent nodemap feature\n') |
|
499 | _(b'downgrading repository to not use persistent nodemap feature\n') | |
519 | ) |
|
500 | ) | |
520 | with srcrepo.transaction(b'upgrade') as tr: |
|
501 | with srcrepo.transaction(b'upgrade') as tr: | |
521 | unfi = srcrepo.unfiltered() |
|
502 | unfi = srcrepo.unfiltered() | |
522 | cl = unfi.changelog |
|
503 | cl = unfi.changelog | |
523 | nodemap.delete_nodemap(tr, srcrepo, cl) |
|
504 | nodemap.delete_nodemap(tr, srcrepo, cl) | |
524 | # check comment 20 lines above for accessing private attributes |
|
505 | # check comment 20 lines above for accessing private attributes | |
525 | nodemap.delete_nodemap( |
|
506 | nodemap.delete_nodemap( | |
526 | tr, srcrepo, unfi.manifestlog._rootstore._revlog |
|
507 | tr, srcrepo, unfi.manifestlog._rootstore._revlog | |
527 | ) |
|
508 | ) | |
528 | scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements) |
|
509 | scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements) | |
529 | else: |
|
510 | else: | |
530 | with dstrepo.transaction(b'upgrade') as tr: |
|
511 | with dstrepo.transaction(b'upgrade') as tr: | |
531 | _clonerevlogs( |
|
512 | _clonerevlogs( | |
532 | ui, |
|
513 | ui, | |
533 | srcrepo, |
|
514 | srcrepo, | |
534 | dstrepo, |
|
515 | dstrepo, | |
535 | tr, |
|
516 | tr, | |
536 | upgrade_op, |
|
517 | upgrade_op, | |
537 | ) |
|
518 | ) | |
538 |
|
519 | |||
539 | # Now copy other files in the store directory. |
|
520 | # Now copy other files in the store directory. | |
540 | for p in _files_to_copy_post_revlog_clone(srcrepo): |
|
521 | for p in _files_to_copy_post_revlog_clone(srcrepo): | |
541 | srcrepo.ui.status(_(b'copying %s\n') % p) |
|
522 | srcrepo.ui.status(_(b'copying %s\n') % p) | |
542 | src = srcrepo.store.rawvfs.join(p) |
|
523 | src = srcrepo.store.rawvfs.join(p) | |
543 | dst = dstrepo.store.rawvfs.join(p) |
|
524 | dst = dstrepo.store.rawvfs.join(p) | |
544 | util.copyfile(src, dst, copystat=True) |
|
525 | util.copyfile(src, dst, copystat=True) | |
545 |
|
526 | |||
546 | finishdatamigration(ui, srcrepo, dstrepo, requirements) |
|
527 | finishdatamigration(ui, srcrepo, dstrepo, requirements) | |
547 |
|
528 | |||
548 | ui.status(_(b'data fully upgraded in a temporary repository\n')) |
|
529 | ui.status(_(b'data fully upgraded in a temporary repository\n')) | |
549 |
|
530 | |||
550 | if upgrade_op.backup_store: |
|
531 | if upgrade_op.backup_store: | |
551 | backuppath = pycompat.mkdtemp( |
|
532 | backuppath = pycompat.mkdtemp( | |
552 | prefix=b'upgradebackup.', dir=srcrepo.path |
|
533 | prefix=b'upgradebackup.', dir=srcrepo.path | |
553 | ) |
|
534 | ) | |
554 | backupvfs = vfsmod.vfs(backuppath) |
|
535 | backupvfs = vfsmod.vfs(backuppath) | |
555 |
|
536 | |||
556 | # Make a backup of requires file first, as it is the first to be modified. |
|
537 | # Make a backup of requires file first, as it is the first to be modified. | |
557 | util.copyfile( |
|
538 | util.copyfile( | |
558 | srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires') |
|
539 | srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires') | |
559 | ) |
|
540 | ) | |
560 |
|
541 | |||
561 | # We install an arbitrary requirement that clients must not support |
|
542 | # We install an arbitrary requirement that clients must not support | |
562 | # as a mechanism to lock out new clients during the data swap. This is |
|
543 | # as a mechanism to lock out new clients during the data swap. This is | |
563 | # better than allowing a client to continue while the repository is in |
|
544 | # better than allowing a client to continue while the repository is in | |
564 | # an inconsistent state. |
|
545 | # an inconsistent state. | |
565 | ui.status( |
|
546 | ui.status( | |
566 | _( |
|
547 | _( | |
567 | b'marking source repository as being upgraded; clients will be ' |
|
548 | b'marking source repository as being upgraded; clients will be ' | |
568 | b'unable to read from repository\n' |
|
549 | b'unable to read from repository\n' | |
569 | ) |
|
550 | ) | |
570 | ) |
|
551 | ) | |
571 | scmutil.writereporequirements( |
|
552 | scmutil.writereporequirements( | |
572 | srcrepo, srcrepo.requirements | {b'upgradeinprogress'} |
|
553 | srcrepo, srcrepo.requirements | {b'upgradeinprogress'} | |
573 | ) |
|
554 | ) | |
574 |
|
555 | |||
575 | ui.status(_(b'starting in-place swap of repository data\n')) |
|
556 | ui.status(_(b'starting in-place swap of repository data\n')) | |
576 | if upgrade_op.backup_store: |
|
557 | if upgrade_op.backup_store: | |
577 | ui.status( |
|
558 | ui.status( | |
578 | _(b'replaced files will be backed up at %s\n') % backuppath |
|
559 | _(b'replaced files will be backed up at %s\n') % backuppath | |
579 | ) |
|
560 | ) | |
580 |
|
561 | |||
581 | # Now swap in the new store directory. Doing it as a rename should make |
|
562 | # Now swap in the new store directory. Doing it as a rename should make | |
582 | # the operation nearly instantaneous and atomic (at least in well-behaved |
|
563 | # the operation nearly instantaneous and atomic (at least in well-behaved | |
583 | # environments). |
|
564 | # environments). | |
584 | ui.status(_(b'replacing store...\n')) |
|
565 | ui.status(_(b'replacing store...\n')) | |
585 | tstart = util.timer() |
|
566 | tstart = util.timer() | |
586 | _replacestores(srcrepo, dstrepo, backupvfs, upgrade_op) |
|
567 | _replacestores(srcrepo, dstrepo, backupvfs, upgrade_op) | |
587 | elapsed = util.timer() - tstart |
|
568 | elapsed = util.timer() - tstart | |
588 | ui.status( |
|
569 | ui.status( | |
589 | _( |
|
570 | _( | |
590 | b'store replacement complete; repository was inconsistent for ' |
|
571 | b'store replacement complete; repository was inconsistent for ' | |
591 | b'%0.1fs\n' |
|
572 | b'%0.1fs\n' | |
592 | ) |
|
573 | ) | |
593 | % elapsed |
|
574 | % elapsed | |
594 | ) |
|
575 | ) | |
595 |
|
576 | |||
596 | # We first write the requirements file. Any new requirements will lock |
|
577 | # We first write the requirements file. Any new requirements will lock | |
597 | # out legacy clients. |
|
578 | # out legacy clients. | |
598 | ui.status( |
|
579 | ui.status( | |
599 | _( |
|
580 | _( | |
600 | b'finalizing requirements file and making repository readable ' |
|
581 | b'finalizing requirements file and making repository readable ' | |
601 | b'again\n' |
|
582 | b'again\n' | |
602 | ) |
|
583 | ) | |
603 | ) |
|
584 | ) | |
604 | scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements) |
|
585 | scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements) | |
605 |
|
586 | |||
606 | if upgrade_op.backup_store: |
|
587 | if upgrade_op.backup_store: | |
607 | # The lock file from the old store won't be removed because nothing has a |
|
588 | # The lock file from the old store won't be removed because nothing has a | |
608 | # reference to its new location. So clean it up manually. Alternatively, we |
|
589 | # reference to its new location. So clean it up manually. Alternatively, we | |
609 | # could update srcrepo.svfs and other variables to point to the new |
|
590 | # could update srcrepo.svfs and other variables to point to the new | |
610 | # location. This is simpler. |
|
591 | # location. This is simpler. | |
611 | assert backupvfs is not None # help pytype |
|
592 | assert backupvfs is not None # help pytype | |
612 | backupvfs.unlink(b'store/lock') |
|
593 | backupvfs.unlink(b'store/lock') | |
613 |
|
594 | |||
614 | return backuppath |
|
595 | return backuppath | |
615 |
|
596 | |||
616 |
|
597 | |||
617 | def upgrade_dirstate(ui, srcrepo, upgrade_op, old, new): |
|
598 | def upgrade_dirstate(ui, srcrepo, upgrade_op, old, new): | |
618 | if upgrade_op.backup_store: |
|
599 | if upgrade_op.backup_store: | |
619 | backuppath = pycompat.mkdtemp( |
|
600 | backuppath = pycompat.mkdtemp( | |
620 | prefix=b'upgradebackup.', dir=srcrepo.path |
|
601 | prefix=b'upgradebackup.', dir=srcrepo.path | |
621 | ) |
|
602 | ) | |
622 | ui.status(_(b'replaced files will be backed up at %s\n') % backuppath) |
|
603 | ui.status(_(b'replaced files will be backed up at %s\n') % backuppath) | |
623 | backupvfs = vfsmod.vfs(backuppath) |
|
604 | backupvfs = vfsmod.vfs(backuppath) | |
624 | util.copyfile( |
|
605 | util.copyfile( | |
625 | srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires') |
|
606 | srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires') | |
626 | ) |
|
607 | ) | |
627 | try: |
|
608 | try: | |
628 | util.copyfile( |
|
609 | util.copyfile( | |
629 | srcrepo.vfs.join(b'dirstate'), backupvfs.join(b'dirstate') |
|
610 | srcrepo.vfs.join(b'dirstate'), backupvfs.join(b'dirstate') | |
630 | ) |
|
611 | ) | |
631 | except FileNotFoundError: |
|
612 | except FileNotFoundError: | |
632 | # The dirstate does not exist on an empty repo or a repo with no |
|
613 | # The dirstate does not exist on an empty repo or a repo with no | |
633 | # revision checked out |
|
614 | # revision checked out | |
634 | pass |
|
615 | pass | |
635 |
|
616 | |||
636 | assert srcrepo.dirstate._use_dirstate_v2 == (old == b'v2') |
|
617 | assert srcrepo.dirstate._use_dirstate_v2 == (old == b'v2') | |
637 | use_v2 = new == b'v2' |
|
618 | use_v2 = new == b'v2' | |
638 | if use_v2: |
|
619 | if use_v2: | |
639 | # Write the requirements *before* upgrading |
|
620 | # Write the requirements *before* upgrading | |
640 | scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements) |
|
621 | scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements) | |
641 |
|
622 | |||
642 | srcrepo.dirstate._map.preload() |
|
623 | srcrepo.dirstate._map.preload() | |
643 | srcrepo.dirstate._use_dirstate_v2 = use_v2 |
|
624 | srcrepo.dirstate._use_dirstate_v2 = use_v2 | |
644 | srcrepo.dirstate._map._use_dirstate_v2 = use_v2 |
|
625 | srcrepo.dirstate._map._use_dirstate_v2 = use_v2 | |
645 | srcrepo.dirstate._dirty = True |
|
626 | srcrepo.dirstate._dirty = True | |
646 | try: |
|
627 | try: | |
647 | srcrepo.vfs.unlink(b'dirstate') |
|
628 | srcrepo.vfs.unlink(b'dirstate') | |
648 | except FileNotFoundError: |
|
629 | except FileNotFoundError: | |
649 | # The dirstate does not exist on an empty repo or a repo with no |
|
630 | # The dirstate does not exist on an empty repo or a repo with no | |
650 | # revision checked out |
|
631 | # revision checked out | |
651 | pass |
|
632 | pass | |
652 |
|
633 | |||
653 | srcrepo.dirstate.write(None) |
|
634 | srcrepo.dirstate.write(None) | |
654 | if not use_v2: |
|
635 | if not use_v2: | |
655 | # Remove the v2 requirement *after* downgrading |
|
636 | # Remove the v2 requirement *after* downgrading | |
656 | scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements) |
|
637 | scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements) | |
657 |
|
638 | |||
658 |
|
639 | |||
659 | def upgrade_tracked_hint(ui, srcrepo, upgrade_op, add): |
|
640 | def upgrade_tracked_hint(ui, srcrepo, upgrade_op, add): | |
660 | if add: |
|
641 | if add: | |
661 | srcrepo.dirstate._use_tracked_hint = True |
|
642 | srcrepo.dirstate._use_tracked_hint = True | |
662 | srcrepo.dirstate._dirty = True |
|
643 | srcrepo.dirstate._dirty = True | |
663 | srcrepo.dirstate._dirty_tracked_set = True |
|
644 | srcrepo.dirstate._dirty_tracked_set = True | |
664 | srcrepo.dirstate.write(None) |
|
645 | srcrepo.dirstate.write(None) | |
665 | if not add: |
|
646 | if not add: | |
666 | srcrepo.dirstate.delete_tracked_hint() |
|
647 | srcrepo.dirstate.delete_tracked_hint() | |
667 |
|
648 | |||
668 | scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements) |
|
649 | scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements) |
General Comments 0
You need to be logged in to leave comments.
Login now