##// END OF EJS Templates
store: don't read the whole fncache in memory...
Pulkit Goyal -
r42144:a5648708 default
parent child Browse files
Show More
@@ -1,633 +1,650 b''
1 # store.py - repository store handling for Mercurial
1 # store.py - repository store handling for Mercurial
2 #
2 #
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import functools
11 import hashlib
12 import hashlib
12 import os
13 import os
13 import stat
14 import stat
14
15
15 from .i18n import _
16 from .i18n import _
16 from . import (
17 from . import (
17 error,
18 error,
18 node,
19 node,
19 policy,
20 policy,
20 pycompat,
21 pycompat,
21 util,
22 util,
22 vfs as vfsmod,
23 vfs as vfsmod,
23 )
24 )
24
25
25 parsers = policy.importmod(r'parsers')
26 parsers = policy.importmod(r'parsers')
27 # how much bytes should be read from fncache in one read
28 # It is done to prevent loading large fncache files into memory
29 fncache_chunksize = 10 ** 6
26
30
27 def _matchtrackedpath(path, matcher):
31 def _matchtrackedpath(path, matcher):
28 """parses a fncache entry and returns whether the entry is tracking a path
32 """parses a fncache entry and returns whether the entry is tracking a path
29 matched by matcher or not.
33 matched by matcher or not.
30
34
31 If matcher is None, returns True"""
35 If matcher is None, returns True"""
32
36
33 if matcher is None:
37 if matcher is None:
34 return True
38 return True
35 path = decodedir(path)
39 path = decodedir(path)
36 if path.startswith('data/'):
40 if path.startswith('data/'):
37 return matcher(path[len('data/'):-len('.i')])
41 return matcher(path[len('data/'):-len('.i')])
38 elif path.startswith('meta/'):
42 elif path.startswith('meta/'):
39 return matcher.visitdir(path[len('meta/'):-len('/00manifest.i')] or '.')
43 return matcher.visitdir(path[len('meta/'):-len('/00manifest.i')] or '.')
40
44
41 raise error.ProgrammingError("cannot decode path %s" % path)
45 raise error.ProgrammingError("cannot decode path %s" % path)
42
46
43 # This avoids a collision between a file named foo and a dir named
47 # This avoids a collision between a file named foo and a dir named
44 # foo.i or foo.d
48 # foo.i or foo.d
45 def _encodedir(path):
49 def _encodedir(path):
46 '''
50 '''
47 >>> _encodedir(b'data/foo.i')
51 >>> _encodedir(b'data/foo.i')
48 'data/foo.i'
52 'data/foo.i'
49 >>> _encodedir(b'data/foo.i/bla.i')
53 >>> _encodedir(b'data/foo.i/bla.i')
50 'data/foo.i.hg/bla.i'
54 'data/foo.i.hg/bla.i'
51 >>> _encodedir(b'data/foo.i.hg/bla.i')
55 >>> _encodedir(b'data/foo.i.hg/bla.i')
52 'data/foo.i.hg.hg/bla.i'
56 'data/foo.i.hg.hg/bla.i'
53 >>> _encodedir(b'data/foo.i\\ndata/foo.i/bla.i\\ndata/foo.i.hg/bla.i\\n')
57 >>> _encodedir(b'data/foo.i\\ndata/foo.i/bla.i\\ndata/foo.i.hg/bla.i\\n')
54 'data/foo.i\\ndata/foo.i.hg/bla.i\\ndata/foo.i.hg.hg/bla.i\\n'
58 'data/foo.i\\ndata/foo.i.hg/bla.i\\ndata/foo.i.hg.hg/bla.i\\n'
55 '''
59 '''
56 return (path
60 return (path
57 .replace(".hg/", ".hg.hg/")
61 .replace(".hg/", ".hg.hg/")
58 .replace(".i/", ".i.hg/")
62 .replace(".i/", ".i.hg/")
59 .replace(".d/", ".d.hg/"))
63 .replace(".d/", ".d.hg/"))
60
64
61 encodedir = getattr(parsers, 'encodedir', _encodedir)
65 encodedir = getattr(parsers, 'encodedir', _encodedir)
62
66
63 def decodedir(path):
67 def decodedir(path):
64 '''
68 '''
65 >>> decodedir(b'data/foo.i')
69 >>> decodedir(b'data/foo.i')
66 'data/foo.i'
70 'data/foo.i'
67 >>> decodedir(b'data/foo.i.hg/bla.i')
71 >>> decodedir(b'data/foo.i.hg/bla.i')
68 'data/foo.i/bla.i'
72 'data/foo.i/bla.i'
69 >>> decodedir(b'data/foo.i.hg.hg/bla.i')
73 >>> decodedir(b'data/foo.i.hg.hg/bla.i')
70 'data/foo.i.hg/bla.i'
74 'data/foo.i.hg/bla.i'
71 '''
75 '''
72 if ".hg/" not in path:
76 if ".hg/" not in path:
73 return path
77 return path
74 return (path
78 return (path
75 .replace(".d.hg/", ".d/")
79 .replace(".d.hg/", ".d/")
76 .replace(".i.hg/", ".i/")
80 .replace(".i.hg/", ".i/")
77 .replace(".hg.hg/", ".hg/"))
81 .replace(".hg.hg/", ".hg/"))
78
82
79 def _reserved():
83 def _reserved():
80 ''' characters that are problematic for filesystems
84 ''' characters that are problematic for filesystems
81
85
82 * ascii escapes (0..31)
86 * ascii escapes (0..31)
83 * ascii hi (126..255)
87 * ascii hi (126..255)
84 * windows specials
88 * windows specials
85
89
86 these characters will be escaped by encodefunctions
90 these characters will be escaped by encodefunctions
87 '''
91 '''
88 winreserved = [ord(x) for x in u'\\:*?"<>|']
92 winreserved = [ord(x) for x in u'\\:*?"<>|']
89 for x in range(32):
93 for x in range(32):
90 yield x
94 yield x
91 for x in range(126, 256):
95 for x in range(126, 256):
92 yield x
96 yield x
93 for x in winreserved:
97 for x in winreserved:
94 yield x
98 yield x
95
99
96 def _buildencodefun():
100 def _buildencodefun():
97 '''
101 '''
98 >>> enc, dec = _buildencodefun()
102 >>> enc, dec = _buildencodefun()
99
103
100 >>> enc(b'nothing/special.txt')
104 >>> enc(b'nothing/special.txt')
101 'nothing/special.txt'
105 'nothing/special.txt'
102 >>> dec(b'nothing/special.txt')
106 >>> dec(b'nothing/special.txt')
103 'nothing/special.txt'
107 'nothing/special.txt'
104
108
105 >>> enc(b'HELLO')
109 >>> enc(b'HELLO')
106 '_h_e_l_l_o'
110 '_h_e_l_l_o'
107 >>> dec(b'_h_e_l_l_o')
111 >>> dec(b'_h_e_l_l_o')
108 'HELLO'
112 'HELLO'
109
113
110 >>> enc(b'hello:world?')
114 >>> enc(b'hello:world?')
111 'hello~3aworld~3f'
115 'hello~3aworld~3f'
112 >>> dec(b'hello~3aworld~3f')
116 >>> dec(b'hello~3aworld~3f')
113 'hello:world?'
117 'hello:world?'
114
118
115 >>> enc(b'the\\x07quick\\xADshot')
119 >>> enc(b'the\\x07quick\\xADshot')
116 'the~07quick~adshot'
120 'the~07quick~adshot'
117 >>> dec(b'the~07quick~adshot')
121 >>> dec(b'the~07quick~adshot')
118 'the\\x07quick\\xadshot'
122 'the\\x07quick\\xadshot'
119 '''
123 '''
120 e = '_'
124 e = '_'
121 xchr = pycompat.bytechr
125 xchr = pycompat.bytechr
122 asciistr = list(map(xchr, range(127)))
126 asciistr = list(map(xchr, range(127)))
123 capitals = list(range(ord("A"), ord("Z") + 1))
127 capitals = list(range(ord("A"), ord("Z") + 1))
124
128
125 cmap = dict((x, x) for x in asciistr)
129 cmap = dict((x, x) for x in asciistr)
126 for x in _reserved():
130 for x in _reserved():
127 cmap[xchr(x)] = "~%02x" % x
131 cmap[xchr(x)] = "~%02x" % x
128 for x in capitals + [ord(e)]:
132 for x in capitals + [ord(e)]:
129 cmap[xchr(x)] = e + xchr(x).lower()
133 cmap[xchr(x)] = e + xchr(x).lower()
130
134
131 dmap = {}
135 dmap = {}
132 for k, v in cmap.iteritems():
136 for k, v in cmap.iteritems():
133 dmap[v] = k
137 dmap[v] = k
134 def decode(s):
138 def decode(s):
135 i = 0
139 i = 0
136 while i < len(s):
140 while i < len(s):
137 for l in pycompat.xrange(1, 4):
141 for l in pycompat.xrange(1, 4):
138 try:
142 try:
139 yield dmap[s[i:i + l]]
143 yield dmap[s[i:i + l]]
140 i += l
144 i += l
141 break
145 break
142 except KeyError:
146 except KeyError:
143 pass
147 pass
144 else:
148 else:
145 raise KeyError
149 raise KeyError
146 return (lambda s: ''.join([cmap[s[c:c + 1]]
150 return (lambda s: ''.join([cmap[s[c:c + 1]]
147 for c in pycompat.xrange(len(s))]),
151 for c in pycompat.xrange(len(s))]),
148 lambda s: ''.join(list(decode(s))))
152 lambda s: ''.join(list(decode(s))))
149
153
150 _encodefname, _decodefname = _buildencodefun()
154 _encodefname, _decodefname = _buildencodefun()
151
155
152 def encodefilename(s):
156 def encodefilename(s):
153 '''
157 '''
154 >>> encodefilename(b'foo.i/bar.d/bla.hg/hi:world?/HELLO')
158 >>> encodefilename(b'foo.i/bar.d/bla.hg/hi:world?/HELLO')
155 'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o'
159 'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o'
156 '''
160 '''
157 return _encodefname(encodedir(s))
161 return _encodefname(encodedir(s))
158
162
159 def decodefilename(s):
163 def decodefilename(s):
160 '''
164 '''
161 >>> decodefilename(b'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o')
165 >>> decodefilename(b'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o')
162 'foo.i/bar.d/bla.hg/hi:world?/HELLO'
166 'foo.i/bar.d/bla.hg/hi:world?/HELLO'
163 '''
167 '''
164 return decodedir(_decodefname(s))
168 return decodedir(_decodefname(s))
165
169
166 def _buildlowerencodefun():
170 def _buildlowerencodefun():
167 '''
171 '''
168 >>> f = _buildlowerencodefun()
172 >>> f = _buildlowerencodefun()
169 >>> f(b'nothing/special.txt')
173 >>> f(b'nothing/special.txt')
170 'nothing/special.txt'
174 'nothing/special.txt'
171 >>> f(b'HELLO')
175 >>> f(b'HELLO')
172 'hello'
176 'hello'
173 >>> f(b'hello:world?')
177 >>> f(b'hello:world?')
174 'hello~3aworld~3f'
178 'hello~3aworld~3f'
175 >>> f(b'the\\x07quick\\xADshot')
179 >>> f(b'the\\x07quick\\xADshot')
176 'the~07quick~adshot'
180 'the~07quick~adshot'
177 '''
181 '''
178 xchr = pycompat.bytechr
182 xchr = pycompat.bytechr
179 cmap = dict([(xchr(x), xchr(x)) for x in pycompat.xrange(127)])
183 cmap = dict([(xchr(x), xchr(x)) for x in pycompat.xrange(127)])
180 for x in _reserved():
184 for x in _reserved():
181 cmap[xchr(x)] = "~%02x" % x
185 cmap[xchr(x)] = "~%02x" % x
182 for x in range(ord("A"), ord("Z") + 1):
186 for x in range(ord("A"), ord("Z") + 1):
183 cmap[xchr(x)] = xchr(x).lower()
187 cmap[xchr(x)] = xchr(x).lower()
184 def lowerencode(s):
188 def lowerencode(s):
185 return "".join([cmap[c] for c in pycompat.iterbytestr(s)])
189 return "".join([cmap[c] for c in pycompat.iterbytestr(s)])
186 return lowerencode
190 return lowerencode
187
191
188 lowerencode = getattr(parsers, 'lowerencode', None) or _buildlowerencodefun()
192 lowerencode = getattr(parsers, 'lowerencode', None) or _buildlowerencodefun()
189
193
190 # Windows reserved names: con, prn, aux, nul, com1..com9, lpt1..lpt9
194 # Windows reserved names: con, prn, aux, nul, com1..com9, lpt1..lpt9
191 _winres3 = ('aux', 'con', 'prn', 'nul') # length 3
195 _winres3 = ('aux', 'con', 'prn', 'nul') # length 3
192 _winres4 = ('com', 'lpt') # length 4 (with trailing 1..9)
196 _winres4 = ('com', 'lpt') # length 4 (with trailing 1..9)
193 def _auxencode(path, dotencode):
197 def _auxencode(path, dotencode):
194 '''
198 '''
195 Encodes filenames containing names reserved by Windows or which end in
199 Encodes filenames containing names reserved by Windows or which end in
196 period or space. Does not touch other single reserved characters c.
200 period or space. Does not touch other single reserved characters c.
197 Specifically, c in '\\:*?"<>|' or ord(c) <= 31 are *not* encoded here.
201 Specifically, c in '\\:*?"<>|' or ord(c) <= 31 are *not* encoded here.
198 Additionally encodes space or period at the beginning, if dotencode is
202 Additionally encodes space or period at the beginning, if dotencode is
199 True. Parameter path is assumed to be all lowercase.
203 True. Parameter path is assumed to be all lowercase.
200 A segment only needs encoding if a reserved name appears as a
204 A segment only needs encoding if a reserved name appears as a
201 basename (e.g. "aux", "aux.foo"). A directory or file named "foo.aux"
205 basename (e.g. "aux", "aux.foo"). A directory or file named "foo.aux"
202 doesn't need encoding.
206 doesn't need encoding.
203
207
204 >>> s = b'.foo/aux.txt/txt.aux/con/prn/nul/foo.'
208 >>> s = b'.foo/aux.txt/txt.aux/con/prn/nul/foo.'
205 >>> _auxencode(s.split(b'/'), True)
209 >>> _auxencode(s.split(b'/'), True)
206 ['~2efoo', 'au~78.txt', 'txt.aux', 'co~6e', 'pr~6e', 'nu~6c', 'foo~2e']
210 ['~2efoo', 'au~78.txt', 'txt.aux', 'co~6e', 'pr~6e', 'nu~6c', 'foo~2e']
207 >>> s = b'.com1com2/lpt9.lpt4.lpt1/conprn/com0/lpt0/foo.'
211 >>> s = b'.com1com2/lpt9.lpt4.lpt1/conprn/com0/lpt0/foo.'
208 >>> _auxencode(s.split(b'/'), False)
212 >>> _auxencode(s.split(b'/'), False)
209 ['.com1com2', 'lp~749.lpt4.lpt1', 'conprn', 'com0', 'lpt0', 'foo~2e']
213 ['.com1com2', 'lp~749.lpt4.lpt1', 'conprn', 'com0', 'lpt0', 'foo~2e']
210 >>> _auxencode([b'foo. '], True)
214 >>> _auxencode([b'foo. '], True)
211 ['foo.~20']
215 ['foo.~20']
212 >>> _auxencode([b' .foo'], True)
216 >>> _auxencode([b' .foo'], True)
213 ['~20.foo']
217 ['~20.foo']
214 '''
218 '''
215 for i, n in enumerate(path):
219 for i, n in enumerate(path):
216 if not n:
220 if not n:
217 continue
221 continue
218 if dotencode and n[0] in '. ':
222 if dotencode and n[0] in '. ':
219 n = "~%02x" % ord(n[0:1]) + n[1:]
223 n = "~%02x" % ord(n[0:1]) + n[1:]
220 path[i] = n
224 path[i] = n
221 else:
225 else:
222 l = n.find('.')
226 l = n.find('.')
223 if l == -1:
227 if l == -1:
224 l = len(n)
228 l = len(n)
225 if ((l == 3 and n[:3] in _winres3) or
229 if ((l == 3 and n[:3] in _winres3) or
226 (l == 4 and n[3:4] <= '9' and n[3:4] >= '1'
230 (l == 4 and n[3:4] <= '9' and n[3:4] >= '1'
227 and n[:3] in _winres4)):
231 and n[:3] in _winres4)):
228 # encode third letter ('aux' -> 'au~78')
232 # encode third letter ('aux' -> 'au~78')
229 ec = "~%02x" % ord(n[2:3])
233 ec = "~%02x" % ord(n[2:3])
230 n = n[0:2] + ec + n[3:]
234 n = n[0:2] + ec + n[3:]
231 path[i] = n
235 path[i] = n
232 if n[-1] in '. ':
236 if n[-1] in '. ':
233 # encode last period or space ('foo...' -> 'foo..~2e')
237 # encode last period or space ('foo...' -> 'foo..~2e')
234 path[i] = n[:-1] + "~%02x" % ord(n[-1:])
238 path[i] = n[:-1] + "~%02x" % ord(n[-1:])
235 return path
239 return path
236
240
237 _maxstorepathlen = 120
241 _maxstorepathlen = 120
238 _dirprefixlen = 8
242 _dirprefixlen = 8
239 _maxshortdirslen = 8 * (_dirprefixlen + 1) - 4
243 _maxshortdirslen = 8 * (_dirprefixlen + 1) - 4
240
244
241 def _hashencode(path, dotencode):
245 def _hashencode(path, dotencode):
242 digest = node.hex(hashlib.sha1(path).digest())
246 digest = node.hex(hashlib.sha1(path).digest())
243 le = lowerencode(path[5:]).split('/') # skips prefix 'data/' or 'meta/'
247 le = lowerencode(path[5:]).split('/') # skips prefix 'data/' or 'meta/'
244 parts = _auxencode(le, dotencode)
248 parts = _auxencode(le, dotencode)
245 basename = parts[-1]
249 basename = parts[-1]
246 _root, ext = os.path.splitext(basename)
250 _root, ext = os.path.splitext(basename)
247 sdirs = []
251 sdirs = []
248 sdirslen = 0
252 sdirslen = 0
249 for p in parts[:-1]:
253 for p in parts[:-1]:
250 d = p[:_dirprefixlen]
254 d = p[:_dirprefixlen]
251 if d[-1] in '. ':
255 if d[-1] in '. ':
252 # Windows can't access dirs ending in period or space
256 # Windows can't access dirs ending in period or space
253 d = d[:-1] + '_'
257 d = d[:-1] + '_'
254 if sdirslen == 0:
258 if sdirslen == 0:
255 t = len(d)
259 t = len(d)
256 else:
260 else:
257 t = sdirslen + 1 + len(d)
261 t = sdirslen + 1 + len(d)
258 if t > _maxshortdirslen:
262 if t > _maxshortdirslen:
259 break
263 break
260 sdirs.append(d)
264 sdirs.append(d)
261 sdirslen = t
265 sdirslen = t
262 dirs = '/'.join(sdirs)
266 dirs = '/'.join(sdirs)
263 if len(dirs) > 0:
267 if len(dirs) > 0:
264 dirs += '/'
268 dirs += '/'
265 res = 'dh/' + dirs + digest + ext
269 res = 'dh/' + dirs + digest + ext
266 spaceleft = _maxstorepathlen - len(res)
270 spaceleft = _maxstorepathlen - len(res)
267 if spaceleft > 0:
271 if spaceleft > 0:
268 filler = basename[:spaceleft]
272 filler = basename[:spaceleft]
269 res = 'dh/' + dirs + filler + digest + ext
273 res = 'dh/' + dirs + filler + digest + ext
270 return res
274 return res
271
275
272 def _hybridencode(path, dotencode):
276 def _hybridencode(path, dotencode):
273 '''encodes path with a length limit
277 '''encodes path with a length limit
274
278
275 Encodes all paths that begin with 'data/', according to the following.
279 Encodes all paths that begin with 'data/', according to the following.
276
280
277 Default encoding (reversible):
281 Default encoding (reversible):
278
282
279 Encodes all uppercase letters 'X' as '_x'. All reserved or illegal
283 Encodes all uppercase letters 'X' as '_x'. All reserved or illegal
280 characters are encoded as '~xx', where xx is the two digit hex code
284 characters are encoded as '~xx', where xx is the two digit hex code
281 of the character (see encodefilename).
285 of the character (see encodefilename).
282 Relevant path components consisting of Windows reserved filenames are
286 Relevant path components consisting of Windows reserved filenames are
283 masked by encoding the third character ('aux' -> 'au~78', see _auxencode).
287 masked by encoding the third character ('aux' -> 'au~78', see _auxencode).
284
288
285 Hashed encoding (not reversible):
289 Hashed encoding (not reversible):
286
290
287 If the default-encoded path is longer than _maxstorepathlen, a
291 If the default-encoded path is longer than _maxstorepathlen, a
288 non-reversible hybrid hashing of the path is done instead.
292 non-reversible hybrid hashing of the path is done instead.
289 This encoding uses up to _dirprefixlen characters of all directory
293 This encoding uses up to _dirprefixlen characters of all directory
290 levels of the lowerencoded path, but not more levels than can fit into
294 levels of the lowerencoded path, but not more levels than can fit into
291 _maxshortdirslen.
295 _maxshortdirslen.
292 Then follows the filler followed by the sha digest of the full path.
296 Then follows the filler followed by the sha digest of the full path.
293 The filler is the beginning of the basename of the lowerencoded path
297 The filler is the beginning of the basename of the lowerencoded path
294 (the basename is everything after the last path separator). The filler
298 (the basename is everything after the last path separator). The filler
295 is as long as possible, filling in characters from the basename until
299 is as long as possible, filling in characters from the basename until
296 the encoded path has _maxstorepathlen characters (or all chars of the
300 the encoded path has _maxstorepathlen characters (or all chars of the
297 basename have been taken).
301 basename have been taken).
298 The extension (e.g. '.i' or '.d') is preserved.
302 The extension (e.g. '.i' or '.d') is preserved.
299
303
300 The string 'data/' at the beginning is replaced with 'dh/', if the hashed
304 The string 'data/' at the beginning is replaced with 'dh/', if the hashed
301 encoding was used.
305 encoding was used.
302 '''
306 '''
303 path = encodedir(path)
307 path = encodedir(path)
304 ef = _encodefname(path).split('/')
308 ef = _encodefname(path).split('/')
305 res = '/'.join(_auxencode(ef, dotencode))
309 res = '/'.join(_auxencode(ef, dotencode))
306 if len(res) > _maxstorepathlen:
310 if len(res) > _maxstorepathlen:
307 res = _hashencode(path, dotencode)
311 res = _hashencode(path, dotencode)
308 return res
312 return res
309
313
310 def _pathencode(path):
314 def _pathencode(path):
311 de = encodedir(path)
315 de = encodedir(path)
312 if len(path) > _maxstorepathlen:
316 if len(path) > _maxstorepathlen:
313 return _hashencode(de, True)
317 return _hashencode(de, True)
314 ef = _encodefname(de).split('/')
318 ef = _encodefname(de).split('/')
315 res = '/'.join(_auxencode(ef, True))
319 res = '/'.join(_auxencode(ef, True))
316 if len(res) > _maxstorepathlen:
320 if len(res) > _maxstorepathlen:
317 return _hashencode(de, True)
321 return _hashencode(de, True)
318 return res
322 return res
319
323
320 _pathencode = getattr(parsers, 'pathencode', _pathencode)
324 _pathencode = getattr(parsers, 'pathencode', _pathencode)
321
325
322 def _plainhybridencode(f):
326 def _plainhybridencode(f):
323 return _hybridencode(f, False)
327 return _hybridencode(f, False)
324
328
325 def _calcmode(vfs):
329 def _calcmode(vfs):
326 try:
330 try:
327 # files in .hg/ will be created using this mode
331 # files in .hg/ will be created using this mode
328 mode = vfs.stat().st_mode
332 mode = vfs.stat().st_mode
329 # avoid some useless chmods
333 # avoid some useless chmods
330 if (0o777 & ~util.umask) == (0o777 & mode):
334 if (0o777 & ~util.umask) == (0o777 & mode):
331 mode = None
335 mode = None
332 except OSError:
336 except OSError:
333 mode = None
337 mode = None
334 return mode
338 return mode
335
339
336 _data = ('narrowspec data meta 00manifest.d 00manifest.i'
340 _data = ('narrowspec data meta 00manifest.d 00manifest.i'
337 ' 00changelog.d 00changelog.i phaseroots obsstore')
341 ' 00changelog.d 00changelog.i phaseroots obsstore')
338
342
339 def isrevlog(f, kind, st):
343 def isrevlog(f, kind, st):
340 return kind == stat.S_IFREG and f[-2:] in ('.i', '.d')
344 return kind == stat.S_IFREG and f[-2:] in ('.i', '.d')
341
345
342 class basicstore(object):
346 class basicstore(object):
343 '''base class for local repository stores'''
347 '''base class for local repository stores'''
344 def __init__(self, path, vfstype):
348 def __init__(self, path, vfstype):
345 vfs = vfstype(path)
349 vfs = vfstype(path)
346 self.path = vfs.base
350 self.path = vfs.base
347 self.createmode = _calcmode(vfs)
351 self.createmode = _calcmode(vfs)
348 vfs.createmode = self.createmode
352 vfs.createmode = self.createmode
349 self.rawvfs = vfs
353 self.rawvfs = vfs
350 self.vfs = vfsmod.filtervfs(vfs, encodedir)
354 self.vfs = vfsmod.filtervfs(vfs, encodedir)
351 self.opener = self.vfs
355 self.opener = self.vfs
352
356
353 def join(self, f):
357 def join(self, f):
354 return self.path + '/' + encodedir(f)
358 return self.path + '/' + encodedir(f)
355
359
356 def _walk(self, relpath, recurse, filefilter=isrevlog):
360 def _walk(self, relpath, recurse, filefilter=isrevlog):
357 '''yields (unencoded, encoded, size)'''
361 '''yields (unencoded, encoded, size)'''
358 path = self.path
362 path = self.path
359 if relpath:
363 if relpath:
360 path += '/' + relpath
364 path += '/' + relpath
361 striplen = len(self.path) + 1
365 striplen = len(self.path) + 1
362 l = []
366 l = []
363 if self.rawvfs.isdir(path):
367 if self.rawvfs.isdir(path):
364 visit = [path]
368 visit = [path]
365 readdir = self.rawvfs.readdir
369 readdir = self.rawvfs.readdir
366 while visit:
370 while visit:
367 p = visit.pop()
371 p = visit.pop()
368 for f, kind, st in readdir(p, stat=True):
372 for f, kind, st in readdir(p, stat=True):
369 fp = p + '/' + f
373 fp = p + '/' + f
370 if filefilter(f, kind, st):
374 if filefilter(f, kind, st):
371 n = util.pconvert(fp[striplen:])
375 n = util.pconvert(fp[striplen:])
372 l.append((decodedir(n), n, st.st_size))
376 l.append((decodedir(n), n, st.st_size))
373 elif kind == stat.S_IFDIR and recurse:
377 elif kind == stat.S_IFDIR and recurse:
374 visit.append(fp)
378 visit.append(fp)
375 l.sort()
379 l.sort()
376 return l
380 return l
377
381
378 def datafiles(self, matcher=None):
382 def datafiles(self, matcher=None):
379 return self._walk('data', True) + self._walk('meta', True)
383 return self._walk('data', True) + self._walk('meta', True)
380
384
381 def topfiles(self):
385 def topfiles(self):
382 # yield manifest before changelog
386 # yield manifest before changelog
383 return reversed(self._walk('', False))
387 return reversed(self._walk('', False))
384
388
385 def walk(self, matcher=None):
389 def walk(self, matcher=None):
386 '''yields (unencoded, encoded, size)
390 '''yields (unencoded, encoded, size)
387
391
388 if a matcher is passed, storage files of only those tracked paths
392 if a matcher is passed, storage files of only those tracked paths
389 are passed with matches the matcher
393 are passed with matches the matcher
390 '''
394 '''
391 # yield data files first
395 # yield data files first
392 for x in self.datafiles(matcher):
396 for x in self.datafiles(matcher):
393 yield x
397 yield x
394 for x in self.topfiles():
398 for x in self.topfiles():
395 yield x
399 yield x
396
400
397 def copylist(self):
401 def copylist(self):
398 return ['requires'] + _data.split()
402 return ['requires'] + _data.split()
399
403
400 def write(self, tr):
404 def write(self, tr):
401 pass
405 pass
402
406
403 def invalidatecaches(self):
407 def invalidatecaches(self):
404 pass
408 pass
405
409
406 def markremoved(self, fn):
410 def markremoved(self, fn):
407 pass
411 pass
408
412
409 def __contains__(self, path):
413 def __contains__(self, path):
410 '''Checks if the store contains path'''
414 '''Checks if the store contains path'''
411 path = "/".join(("data", path))
415 path = "/".join(("data", path))
412 # file?
416 # file?
413 if self.vfs.exists(path + ".i"):
417 if self.vfs.exists(path + ".i"):
414 return True
418 return True
415 # dir?
419 # dir?
416 if not path.endswith("/"):
420 if not path.endswith("/"):
417 path = path + "/"
421 path = path + "/"
418 return self.vfs.exists(path)
422 return self.vfs.exists(path)
419
423
420 class encodedstore(basicstore):
424 class encodedstore(basicstore):
421 def __init__(self, path, vfstype):
425 def __init__(self, path, vfstype):
422 vfs = vfstype(path + '/store')
426 vfs = vfstype(path + '/store')
423 self.path = vfs.base
427 self.path = vfs.base
424 self.createmode = _calcmode(vfs)
428 self.createmode = _calcmode(vfs)
425 vfs.createmode = self.createmode
429 vfs.createmode = self.createmode
426 self.rawvfs = vfs
430 self.rawvfs = vfs
427 self.vfs = vfsmod.filtervfs(vfs, encodefilename)
431 self.vfs = vfsmod.filtervfs(vfs, encodefilename)
428 self.opener = self.vfs
432 self.opener = self.vfs
429
433
430 def datafiles(self, matcher=None):
434 def datafiles(self, matcher=None):
431 for a, b, size in super(encodedstore, self).datafiles():
435 for a, b, size in super(encodedstore, self).datafiles():
432 try:
436 try:
433 a = decodefilename(a)
437 a = decodefilename(a)
434 except KeyError:
438 except KeyError:
435 a = None
439 a = None
436 if a is not None and not _matchtrackedpath(a, matcher):
440 if a is not None and not _matchtrackedpath(a, matcher):
437 continue
441 continue
438 yield a, b, size
442 yield a, b, size
439
443
440 def join(self, f):
444 def join(self, f):
441 return self.path + '/' + encodefilename(f)
445 return self.path + '/' + encodefilename(f)
442
446
443 def copylist(self):
447 def copylist(self):
444 return (['requires', '00changelog.i'] +
448 return (['requires', '00changelog.i'] +
445 ['store/' + f for f in _data.split()])
449 ['store/' + f for f in _data.split()])
446
450
447 class fncache(object):
451 class fncache(object):
448 # the filename used to be partially encoded
452 # the filename used to be partially encoded
449 # hence the encodedir/decodedir dance
453 # hence the encodedir/decodedir dance
450 def __init__(self, vfs):
454 def __init__(self, vfs):
451 self.vfs = vfs
455 self.vfs = vfs
452 self.entries = None
456 self.entries = None
453 self._dirty = False
457 self._dirty = False
454 # set of new additions to fncache
458 # set of new additions to fncache
455 self.addls = set()
459 self.addls = set()
456
460
457 def _load(self):
461 def _load(self):
458 '''fill the entries from the fncache file'''
462 '''fill the entries from the fncache file'''
459 self._dirty = False
463 self._dirty = False
460 try:
464 try:
461 fp = self.vfs('fncache', mode='rb')
465 fp = self.vfs('fncache', mode='rb')
462 except IOError:
466 except IOError:
463 # skip nonexistent file
467 # skip nonexistent file
464 self.entries = set()
468 self.entries = set()
465 return
469 return
466 self.entries = set(decodedir(fp.read()).splitlines())
470
471 self.entries = set()
472 chunk = b''
473 for c in iter(functools.partial(fp.read, fncache_chunksize), b''):
474 chunk += c
475 try:
476 p = chunk.rindex(b'\n')
477 self.entries.update(decodedir(chunk[:p + 1]).splitlines())
478 chunk = chunk[p + 1:]
479 except ValueError:
480 # substring '\n' not found, maybe the entry is bigger than the
481 # chunksize, so let's keep iterating
482 pass
483
467 self._checkentries(fp)
484 self._checkentries(fp)
468 fp.close()
485 fp.close()
469
486
470 def _checkentries(self, fp):
487 def _checkentries(self, fp):
471 """ make sure there is no empty string in entries """
488 """ make sure there is no empty string in entries """
472 if '' in self.entries:
489 if '' in self.entries:
473 fp.seek(0)
490 fp.seek(0)
474 for n, line in enumerate(util.iterfile(fp)):
491 for n, line in enumerate(util.iterfile(fp)):
475 if not line.rstrip('\n'):
492 if not line.rstrip('\n'):
476 t = _('invalid entry in fncache, line %d') % (n + 1)
493 t = _('invalid entry in fncache, line %d') % (n + 1)
477 raise error.Abort(t)
494 raise error.Abort(t)
478
495
479 def write(self, tr):
496 def write(self, tr):
480 if self._dirty:
497 if self._dirty:
481 assert self.entries is not None
498 assert self.entries is not None
482 self.entries = self.entries | self.addls
499 self.entries = self.entries | self.addls
483 self.addls = set()
500 self.addls = set()
484 tr.addbackup('fncache')
501 tr.addbackup('fncache')
485 fp = self.vfs('fncache', mode='wb', atomictemp=True)
502 fp = self.vfs('fncache', mode='wb', atomictemp=True)
486 if self.entries:
503 if self.entries:
487 fp.write(encodedir('\n'.join(self.entries) + '\n'))
504 fp.write(encodedir('\n'.join(self.entries) + '\n'))
488 fp.close()
505 fp.close()
489 self._dirty = False
506 self._dirty = False
490 if self.addls:
507 if self.addls:
491 # if we have just new entries, let's append them to the fncache
508 # if we have just new entries, let's append them to the fncache
492 tr.addbackup('fncache')
509 tr.addbackup('fncache')
493 fp = self.vfs('fncache', mode='ab', atomictemp=True)
510 fp = self.vfs('fncache', mode='ab', atomictemp=True)
494 if self.addls:
511 if self.addls:
495 fp.write(encodedir('\n'.join(self.addls) + '\n'))
512 fp.write(encodedir('\n'.join(self.addls) + '\n'))
496 fp.close()
513 fp.close()
497 self.entries = None
514 self.entries = None
498 self.addls = set()
515 self.addls = set()
499
516
500 def add(self, fn):
517 def add(self, fn):
501 if self.entries is None:
518 if self.entries is None:
502 self._load()
519 self._load()
503 if fn not in self.entries:
520 if fn not in self.entries:
504 self.addls.add(fn)
521 self.addls.add(fn)
505
522
506 def remove(self, fn):
523 def remove(self, fn):
507 if self.entries is None:
524 if self.entries is None:
508 self._load()
525 self._load()
509 if fn in self.addls:
526 if fn in self.addls:
510 self.addls.remove(fn)
527 self.addls.remove(fn)
511 return
528 return
512 try:
529 try:
513 self.entries.remove(fn)
530 self.entries.remove(fn)
514 self._dirty = True
531 self._dirty = True
515 except KeyError:
532 except KeyError:
516 pass
533 pass
517
534
518 def __contains__(self, fn):
535 def __contains__(self, fn):
519 if fn in self.addls:
536 if fn in self.addls:
520 return True
537 return True
521 if self.entries is None:
538 if self.entries is None:
522 self._load()
539 self._load()
523 return fn in self.entries
540 return fn in self.entries
524
541
525 def __iter__(self):
542 def __iter__(self):
526 if self.entries is None:
543 if self.entries is None:
527 self._load()
544 self._load()
528 return iter(self.entries | self.addls)
545 return iter(self.entries | self.addls)
529
546
530 class _fncachevfs(vfsmod.proxyvfs):
547 class _fncachevfs(vfsmod.proxyvfs):
531 def __init__(self, vfs, fnc, encode):
548 def __init__(self, vfs, fnc, encode):
532 vfsmod.proxyvfs.__init__(self, vfs)
549 vfsmod.proxyvfs.__init__(self, vfs)
533 self.fncache = fnc
550 self.fncache = fnc
534 self.encode = encode
551 self.encode = encode
535
552
536 def __call__(self, path, mode='r', *args, **kw):
553 def __call__(self, path, mode='r', *args, **kw):
537 encoded = self.encode(path)
554 encoded = self.encode(path)
538 if mode not in ('r', 'rb') and (path.startswith('data/') or
555 if mode not in ('r', 'rb') and (path.startswith('data/') or
539 path.startswith('meta/')):
556 path.startswith('meta/')):
540 # do not trigger a fncache load when adding a file that already is
557 # do not trigger a fncache load when adding a file that already is
541 # known to exist.
558 # known to exist.
542 notload = self.fncache.entries is None and self.vfs.exists(encoded)
559 notload = self.fncache.entries is None and self.vfs.exists(encoded)
543 if notload and 'a' in mode and not self.vfs.stat(encoded).st_size:
560 if notload and 'a' in mode and not self.vfs.stat(encoded).st_size:
544 # when appending to an existing file, if the file has size zero,
561 # when appending to an existing file, if the file has size zero,
545 # it should be considered as missing. Such zero-size files are
562 # it should be considered as missing. Such zero-size files are
546 # the result of truncation when a transaction is aborted.
563 # the result of truncation when a transaction is aborted.
547 notload = False
564 notload = False
548 if not notload:
565 if not notload:
549 self.fncache.add(path)
566 self.fncache.add(path)
550 return self.vfs(encoded, mode, *args, **kw)
567 return self.vfs(encoded, mode, *args, **kw)
551
568
552 def join(self, path):
569 def join(self, path):
553 if path:
570 if path:
554 return self.vfs.join(self.encode(path))
571 return self.vfs.join(self.encode(path))
555 else:
572 else:
556 return self.vfs.join(path)
573 return self.vfs.join(path)
557
574
558 class fncachestore(basicstore):
575 class fncachestore(basicstore):
559 def __init__(self, path, vfstype, dotencode):
576 def __init__(self, path, vfstype, dotencode):
560 if dotencode:
577 if dotencode:
561 encode = _pathencode
578 encode = _pathencode
562 else:
579 else:
563 encode = _plainhybridencode
580 encode = _plainhybridencode
564 self.encode = encode
581 self.encode = encode
565 vfs = vfstype(path + '/store')
582 vfs = vfstype(path + '/store')
566 self.path = vfs.base
583 self.path = vfs.base
567 self.pathsep = self.path + '/'
584 self.pathsep = self.path + '/'
568 self.createmode = _calcmode(vfs)
585 self.createmode = _calcmode(vfs)
569 vfs.createmode = self.createmode
586 vfs.createmode = self.createmode
570 self.rawvfs = vfs
587 self.rawvfs = vfs
571 fnc = fncache(vfs)
588 fnc = fncache(vfs)
572 self.fncache = fnc
589 self.fncache = fnc
573 self.vfs = _fncachevfs(vfs, fnc, encode)
590 self.vfs = _fncachevfs(vfs, fnc, encode)
574 self.opener = self.vfs
591 self.opener = self.vfs
575
592
576 def join(self, f):
593 def join(self, f):
577 return self.pathsep + self.encode(f)
594 return self.pathsep + self.encode(f)
578
595
579 def getsize(self, path):
596 def getsize(self, path):
580 return self.rawvfs.stat(path).st_size
597 return self.rawvfs.stat(path).st_size
581
598
582 def datafiles(self, matcher=None):
599 def datafiles(self, matcher=None):
583 for f in sorted(self.fncache):
600 for f in sorted(self.fncache):
584 if not _matchtrackedpath(f, matcher):
601 if not _matchtrackedpath(f, matcher):
585 continue
602 continue
586 ef = self.encode(f)
603 ef = self.encode(f)
587 try:
604 try:
588 yield f, ef, self.getsize(ef)
605 yield f, ef, self.getsize(ef)
589 except OSError as err:
606 except OSError as err:
590 if err.errno != errno.ENOENT:
607 if err.errno != errno.ENOENT:
591 raise
608 raise
592
609
593 def copylist(self):
610 def copylist(self):
594 d = ('narrowspec data meta dh fncache phaseroots obsstore'
611 d = ('narrowspec data meta dh fncache phaseroots obsstore'
595 ' 00manifest.d 00manifest.i 00changelog.d 00changelog.i')
612 ' 00manifest.d 00manifest.i 00changelog.d 00changelog.i')
596 return (['requires', '00changelog.i'] +
613 return (['requires', '00changelog.i'] +
597 ['store/' + f for f in d.split()])
614 ['store/' + f for f in d.split()])
598
615
599 def write(self, tr):
616 def write(self, tr):
600 self.fncache.write(tr)
617 self.fncache.write(tr)
601
618
602 def invalidatecaches(self):
619 def invalidatecaches(self):
603 self.fncache.entries = None
620 self.fncache.entries = None
604 self.fncache.addls = set()
621 self.fncache.addls = set()
605
622
606 def markremoved(self, fn):
623 def markremoved(self, fn):
607 self.fncache.remove(fn)
624 self.fncache.remove(fn)
608
625
609 def _exists(self, f):
626 def _exists(self, f):
610 ef = self.encode(f)
627 ef = self.encode(f)
611 try:
628 try:
612 self.getsize(ef)
629 self.getsize(ef)
613 return True
630 return True
614 except OSError as err:
631 except OSError as err:
615 if err.errno != errno.ENOENT:
632 if err.errno != errno.ENOENT:
616 raise
633 raise
617 # nonexistent entry
634 # nonexistent entry
618 return False
635 return False
619
636
620 def __contains__(self, path):
637 def __contains__(self, path):
621 '''Checks if the store contains path'''
638 '''Checks if the store contains path'''
622 path = "/".join(("data", path))
639 path = "/".join(("data", path))
623 # check for files (exact match)
640 # check for files (exact match)
624 e = path + '.i'
641 e = path + '.i'
625 if e in self.fncache and self._exists(e):
642 if e in self.fncache and self._exists(e):
626 return True
643 return True
627 # now check for directories (prefix match)
644 # now check for directories (prefix match)
628 if not path.endswith('/'):
645 if not path.endswith('/'):
629 path += '/'
646 path += '/'
630 for e in self.fncache:
647 for e in self.fncache:
631 if e.startswith(path) and self._exists(e):
648 if e.startswith(path) and self._exists(e):
632 return True
649 return True
633 return False
650 return False
@@ -1,518 +1,532 b''
1 #require repofncache
1 #require repofncache
2
2
3 An extension which will set fncache chunksize to 1 byte to make sure that logic
4 does not break
5
6 $ cat > chunksize.py <<EOF
7 > from __future__ import absolute_import
8 > from mercurial import store
9 > store.fncache_chunksize = 1
10 > EOF
11
12 $ cat >> $HGRCPATH <<EOF
13 > [extensions]
14 > chunksize = $TESTTMP/chunksize.py
15 > EOF
16
3 Init repo1:
17 Init repo1:
4
18
5 $ hg init repo1
19 $ hg init repo1
6 $ cd repo1
20 $ cd repo1
7 $ echo "some text" > a
21 $ echo "some text" > a
8 $ hg add
22 $ hg add
9 adding a
23 adding a
10 $ hg ci -m first
24 $ hg ci -m first
11 $ cat .hg/store/fncache | sort
25 $ cat .hg/store/fncache | sort
12 data/a.i
26 data/a.i
13
27
14 Testing a.i/b:
28 Testing a.i/b:
15
29
16 $ mkdir a.i
30 $ mkdir a.i
17 $ echo "some other text" > a.i/b
31 $ echo "some other text" > a.i/b
18 $ hg add
32 $ hg add
19 adding a.i/b
33 adding a.i/b
20 $ hg ci -m second
34 $ hg ci -m second
21 $ cat .hg/store/fncache | sort
35 $ cat .hg/store/fncache | sort
22 data/a.i
36 data/a.i
23 data/a.i.hg/b.i
37 data/a.i.hg/b.i
24
38
25 Testing a.i.hg/c:
39 Testing a.i.hg/c:
26
40
27 $ mkdir a.i.hg
41 $ mkdir a.i.hg
28 $ echo "yet another text" > a.i.hg/c
42 $ echo "yet another text" > a.i.hg/c
29 $ hg add
43 $ hg add
30 adding a.i.hg/c
44 adding a.i.hg/c
31 $ hg ci -m third
45 $ hg ci -m third
32 $ cat .hg/store/fncache | sort
46 $ cat .hg/store/fncache | sort
33 data/a.i
47 data/a.i
34 data/a.i.hg.hg/c.i
48 data/a.i.hg.hg/c.i
35 data/a.i.hg/b.i
49 data/a.i.hg/b.i
36
50
37 Testing verify:
51 Testing verify:
38
52
39 $ hg verify
53 $ hg verify
40 checking changesets
54 checking changesets
41 checking manifests
55 checking manifests
42 crosschecking files in changesets and manifests
56 crosschecking files in changesets and manifests
43 checking files
57 checking files
44 checked 3 changesets with 3 changes to 3 files
58 checked 3 changesets with 3 changes to 3 files
45
59
46 $ rm .hg/store/fncache
60 $ rm .hg/store/fncache
47
61
48 $ hg verify
62 $ hg verify
49 checking changesets
63 checking changesets
50 checking manifests
64 checking manifests
51 crosschecking files in changesets and manifests
65 crosschecking files in changesets and manifests
52 checking files
66 checking files
53 warning: revlog 'data/a.i' not in fncache!
67 warning: revlog 'data/a.i' not in fncache!
54 warning: revlog 'data/a.i.hg/c.i' not in fncache!
68 warning: revlog 'data/a.i.hg/c.i' not in fncache!
55 warning: revlog 'data/a.i/b.i' not in fncache!
69 warning: revlog 'data/a.i/b.i' not in fncache!
56 checked 3 changesets with 3 changes to 3 files
70 checked 3 changesets with 3 changes to 3 files
57 3 warnings encountered!
71 3 warnings encountered!
58 hint: run "hg debugrebuildfncache" to recover from corrupt fncache
72 hint: run "hg debugrebuildfncache" to recover from corrupt fncache
59
73
60 Follow the hint to make sure it works
74 Follow the hint to make sure it works
61
75
62 $ hg debugrebuildfncache
76 $ hg debugrebuildfncache
63 adding data/a.i
77 adding data/a.i
64 adding data/a.i.hg/c.i
78 adding data/a.i.hg/c.i
65 adding data/a.i/b.i
79 adding data/a.i/b.i
66 3 items added, 0 removed from fncache
80 3 items added, 0 removed from fncache
67
81
68 $ hg verify
82 $ hg verify
69 checking changesets
83 checking changesets
70 checking manifests
84 checking manifests
71 crosschecking files in changesets and manifests
85 crosschecking files in changesets and manifests
72 checking files
86 checking files
73 checked 3 changesets with 3 changes to 3 files
87 checked 3 changesets with 3 changes to 3 files
74
88
75 $ cd ..
89 $ cd ..
76
90
77 Non store repo:
91 Non store repo:
78
92
79 $ hg --config format.usestore=False init foo
93 $ hg --config format.usestore=False init foo
80 $ cd foo
94 $ cd foo
81 $ mkdir tst.d
95 $ mkdir tst.d
82 $ echo foo > tst.d/foo
96 $ echo foo > tst.d/foo
83 $ hg ci -Amfoo
97 $ hg ci -Amfoo
84 adding tst.d/foo
98 adding tst.d/foo
85 $ find .hg | sort
99 $ find .hg | sort
86 .hg
100 .hg
87 .hg/00changelog.i
101 .hg/00changelog.i
88 .hg/00manifest.i
102 .hg/00manifest.i
89 .hg/cache
103 .hg/cache
90 .hg/cache/branch2-served
104 .hg/cache/branch2-served
91 .hg/cache/rbc-names-v1
105 .hg/cache/rbc-names-v1
92 .hg/cache/rbc-revs-v1
106 .hg/cache/rbc-revs-v1
93 .hg/data
107 .hg/data
94 .hg/data/tst.d.hg
108 .hg/data/tst.d.hg
95 .hg/data/tst.d.hg/foo.i
109 .hg/data/tst.d.hg/foo.i
96 .hg/dirstate
110 .hg/dirstate
97 .hg/fsmonitor.state (fsmonitor !)
111 .hg/fsmonitor.state (fsmonitor !)
98 .hg/last-message.txt
112 .hg/last-message.txt
99 .hg/phaseroots
113 .hg/phaseroots
100 .hg/requires
114 .hg/requires
101 .hg/undo
115 .hg/undo
102 .hg/undo.backup.dirstate
116 .hg/undo.backup.dirstate
103 .hg/undo.backupfiles
117 .hg/undo.backupfiles
104 .hg/undo.bookmarks
118 .hg/undo.bookmarks
105 .hg/undo.branch
119 .hg/undo.branch
106 .hg/undo.desc
120 .hg/undo.desc
107 .hg/undo.dirstate
121 .hg/undo.dirstate
108 .hg/undo.phaseroots
122 .hg/undo.phaseroots
109 .hg/wcache
123 .hg/wcache
110 .hg/wcache/checkisexec (execbit !)
124 .hg/wcache/checkisexec (execbit !)
111 .hg/wcache/checklink (symlink !)
125 .hg/wcache/checklink (symlink !)
112 .hg/wcache/checklink-target (symlink !)
126 .hg/wcache/checklink-target (symlink !)
113 .hg/wcache/manifestfulltextcache (reporevlogstore !)
127 .hg/wcache/manifestfulltextcache (reporevlogstore !)
114 $ cd ..
128 $ cd ..
115
129
116 Non fncache repo:
130 Non fncache repo:
117
131
118 $ hg --config format.usefncache=False init bar
132 $ hg --config format.usefncache=False init bar
119 $ cd bar
133 $ cd bar
120 $ mkdir tst.d
134 $ mkdir tst.d
121 $ echo foo > tst.d/Foo
135 $ echo foo > tst.d/Foo
122 $ hg ci -Amfoo
136 $ hg ci -Amfoo
123 adding tst.d/Foo
137 adding tst.d/Foo
124 $ find .hg | sort
138 $ find .hg | sort
125 .hg
139 .hg
126 .hg/00changelog.i
140 .hg/00changelog.i
127 .hg/cache
141 .hg/cache
128 .hg/cache/branch2-served
142 .hg/cache/branch2-served
129 .hg/cache/rbc-names-v1
143 .hg/cache/rbc-names-v1
130 .hg/cache/rbc-revs-v1
144 .hg/cache/rbc-revs-v1
131 .hg/dirstate
145 .hg/dirstate
132 .hg/fsmonitor.state (fsmonitor !)
146 .hg/fsmonitor.state (fsmonitor !)
133 .hg/last-message.txt
147 .hg/last-message.txt
134 .hg/requires
148 .hg/requires
135 .hg/store
149 .hg/store
136 .hg/store/00changelog.i
150 .hg/store/00changelog.i
137 .hg/store/00manifest.i
151 .hg/store/00manifest.i
138 .hg/store/data
152 .hg/store/data
139 .hg/store/data/tst.d.hg
153 .hg/store/data/tst.d.hg
140 .hg/store/data/tst.d.hg/_foo.i
154 .hg/store/data/tst.d.hg/_foo.i
141 .hg/store/phaseroots
155 .hg/store/phaseroots
142 .hg/store/undo
156 .hg/store/undo
143 .hg/store/undo.backupfiles
157 .hg/store/undo.backupfiles
144 .hg/store/undo.phaseroots
158 .hg/store/undo.phaseroots
145 .hg/undo.backup.dirstate
159 .hg/undo.backup.dirstate
146 .hg/undo.bookmarks
160 .hg/undo.bookmarks
147 .hg/undo.branch
161 .hg/undo.branch
148 .hg/undo.desc
162 .hg/undo.desc
149 .hg/undo.dirstate
163 .hg/undo.dirstate
150 .hg/wcache
164 .hg/wcache
151 .hg/wcache/checkisexec (execbit !)
165 .hg/wcache/checkisexec (execbit !)
152 .hg/wcache/checklink (symlink !)
166 .hg/wcache/checklink (symlink !)
153 .hg/wcache/checklink-target (symlink !)
167 .hg/wcache/checklink-target (symlink !)
154 .hg/wcache/manifestfulltextcache (reporevlogstore !)
168 .hg/wcache/manifestfulltextcache (reporevlogstore !)
155 $ cd ..
169 $ cd ..
156
170
157 Encoding of reserved / long paths in the store
171 Encoding of reserved / long paths in the store
158
172
159 $ hg init r2
173 $ hg init r2
160 $ cd r2
174 $ cd r2
161 $ cat <<EOF > .hg/hgrc
175 $ cat <<EOF > .hg/hgrc
162 > [ui]
176 > [ui]
163 > portablefilenames = ignore
177 > portablefilenames = ignore
164 > EOF
178 > EOF
165
179
166 $ hg import -q --bypass - <<EOF
180 $ hg import -q --bypass - <<EOF
167 > # HG changeset patch
181 > # HG changeset patch
168 > # User test
182 > # User test
169 > # Date 0 0
183 > # Date 0 0
170 > # Node ID 1c7a2f7cb77be1a0def34e4c7cabc562ad98fbd7
184 > # Node ID 1c7a2f7cb77be1a0def34e4c7cabc562ad98fbd7
171 > # Parent 0000000000000000000000000000000000000000
185 > # Parent 0000000000000000000000000000000000000000
172 > 1
186 > 1
173 >
187 >
174 > diff --git a/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3456789-12345-ABCDEFGHIJKLMNOPRSTUVWXYZ-abcdefghjiklmnopqrstuvwxyz b/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3456789-12345-ABCDEFGHIJKLMNOPRSTUVWXYZ-abcdefghjiklmnopqrstuvwxyz
188 > diff --git a/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3456789-12345-ABCDEFGHIJKLMNOPRSTUVWXYZ-abcdefghjiklmnopqrstuvwxyz b/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3456789-12345-ABCDEFGHIJKLMNOPRSTUVWXYZ-abcdefghjiklmnopqrstuvwxyz
175 > new file mode 100644
189 > new file mode 100644
176 > --- /dev/null
190 > --- /dev/null
177 > +++ b/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3456789-12345-ABCDEFGHIJKLMNOPRSTUVWXYZ-abcdefghjiklmnopqrstuvwxyz
191 > +++ b/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3456789-12345-ABCDEFGHIJKLMNOPRSTUVWXYZ-abcdefghjiklmnopqrstuvwxyz
178 > @@ -0,0 +1,1 @@
192 > @@ -0,0 +1,1 @@
179 > +foo
193 > +foo
180 > diff --git a/AUX/SECOND/X.PRN/FOURTH/FI:FTH/SIXTH/SEVENTH/EIGHTH/NINETH/TENTH/ELEVENTH/LOREMIPSUM.TXT b/AUX/SECOND/X.PRN/FOURTH/FI:FTH/SIXTH/SEVENTH/EIGHTH/NINETH/TENTH/ELEVENTH/LOREMIPSUM.TXT
194 > diff --git a/AUX/SECOND/X.PRN/FOURTH/FI:FTH/SIXTH/SEVENTH/EIGHTH/NINETH/TENTH/ELEVENTH/LOREMIPSUM.TXT b/AUX/SECOND/X.PRN/FOURTH/FI:FTH/SIXTH/SEVENTH/EIGHTH/NINETH/TENTH/ELEVENTH/LOREMIPSUM.TXT
181 > new file mode 100644
195 > new file mode 100644
182 > --- /dev/null
196 > --- /dev/null
183 > +++ b/AUX/SECOND/X.PRN/FOURTH/FI:FTH/SIXTH/SEVENTH/EIGHTH/NINETH/TENTH/ELEVENTH/LOREMIPSUM.TXT
197 > +++ b/AUX/SECOND/X.PRN/FOURTH/FI:FTH/SIXTH/SEVENTH/EIGHTH/NINETH/TENTH/ELEVENTH/LOREMIPSUM.TXT
184 > @@ -0,0 +1,1 @@
198 > @@ -0,0 +1,1 @@
185 > +foo
199 > +foo
186 > diff --git a/Project Planning/Resources/AnotherLongDirectoryName/Followedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt b/Project Planning/Resources/AnotherLongDirectoryName/Followedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt
200 > diff --git a/Project Planning/Resources/AnotherLongDirectoryName/Followedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt b/Project Planning/Resources/AnotherLongDirectoryName/Followedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt
187 > new file mode 100644
201 > new file mode 100644
188 > --- /dev/null
202 > --- /dev/null
189 > +++ b/Project Planning/Resources/AnotherLongDirectoryName/Followedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt
203 > +++ b/Project Planning/Resources/AnotherLongDirectoryName/Followedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt
190 > @@ -0,0 +1,1 @@
204 > @@ -0,0 +1,1 @@
191 > +foo
205 > +foo
192 > diff --git a/bla.aux/prn/PRN/lpt/com3/nul/coma/foo.NUL/normal.c b/bla.aux/prn/PRN/lpt/com3/nul/coma/foo.NUL/normal.c
206 > diff --git a/bla.aux/prn/PRN/lpt/com3/nul/coma/foo.NUL/normal.c b/bla.aux/prn/PRN/lpt/com3/nul/coma/foo.NUL/normal.c
193 > new file mode 100644
207 > new file mode 100644
194 > --- /dev/null
208 > --- /dev/null
195 > +++ b/bla.aux/prn/PRN/lpt/com3/nul/coma/foo.NUL/normal.c
209 > +++ b/bla.aux/prn/PRN/lpt/com3/nul/coma/foo.NUL/normal.c
196 > @@ -0,0 +1,1 @@
210 > @@ -0,0 +1,1 @@
197 > +foo
211 > +foo
198 > diff --git a/enterprise/openesbaddons/contrib-imola/corba-bc/netbeansplugin/wsdlExtension/src/main/java/META-INF/services/org.netbeans.modules.xml.wsdl.bindingsupport.spi.ExtensibilityElementTemplateProvider b/enterprise/openesbaddons/contrib-imola/corba-bc/netbeansplugin/wsdlExtension/src/main/java/META-INF/services/org.netbeans.modules.xml.wsdl.bindingsupport.spi.ExtensibilityElementTemplateProvider
212 > diff --git a/enterprise/openesbaddons/contrib-imola/corba-bc/netbeansplugin/wsdlExtension/src/main/java/META-INF/services/org.netbeans.modules.xml.wsdl.bindingsupport.spi.ExtensibilityElementTemplateProvider b/enterprise/openesbaddons/contrib-imola/corba-bc/netbeansplugin/wsdlExtension/src/main/java/META-INF/services/org.netbeans.modules.xml.wsdl.bindingsupport.spi.ExtensibilityElementTemplateProvider
199 > new file mode 100644
213 > new file mode 100644
200 > --- /dev/null
214 > --- /dev/null
201 > +++ b/enterprise/openesbaddons/contrib-imola/corba-bc/netbeansplugin/wsdlExtension/src/main/java/META-INF/services/org.netbeans.modules.xml.wsdl.bindingsupport.spi.ExtensibilityElementTemplateProvider
215 > +++ b/enterprise/openesbaddons/contrib-imola/corba-bc/netbeansplugin/wsdlExtension/src/main/java/META-INF/services/org.netbeans.modules.xml.wsdl.bindingsupport.spi.ExtensibilityElementTemplateProvider
202 > @@ -0,0 +1,1 @@
216 > @@ -0,0 +1,1 @@
203 > +foo
217 > +foo
204 > EOF
218 > EOF
205
219
206 $ find .hg/store -name *.i | sort
220 $ find .hg/store -name *.i | sort
207 .hg/store/00changelog.i
221 .hg/store/00changelog.i
208 .hg/store/00manifest.i
222 .hg/store/00manifest.i
209 .hg/store/data/bla.aux/pr~6e/_p_r_n/lpt/co~6d3/nu~6c/coma/foo._n_u_l/normal.c.i
223 .hg/store/data/bla.aux/pr~6e/_p_r_n/lpt/co~6d3/nu~6c/coma/foo._n_u_l/normal.c.i
210 .hg/store/dh/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxx168e07b38e65eff86ab579afaaa8e30bfbe0f35f.i
224 .hg/store/dh/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxx168e07b38e65eff86ab579afaaa8e30bfbe0f35f.i
211 .hg/store/dh/au~78/second/x.prn/fourth/fi~3afth/sixth/seventh/eighth/nineth/tenth/loremia20419e358ddff1bf8751e38288aff1d7c32ec05.i
225 .hg/store/dh/au~78/second/x.prn/fourth/fi~3afth/sixth/seventh/eighth/nineth/tenth/loremia20419e358ddff1bf8751e38288aff1d7c32ec05.i
212 .hg/store/dh/enterpri/openesba/contrib-/corba-bc/netbeans/wsdlexte/src/main/java/org.net7018f27961fdf338a598a40c4683429e7ffb9743.i
226 .hg/store/dh/enterpri/openesba/contrib-/corba-bc/netbeans/wsdlexte/src/main/java/org.net7018f27961fdf338a598a40c4683429e7ffb9743.i
213 .hg/store/dh/project_/resource/anotherl/followed/andanoth/andthenanextremelylongfilename0d8e1f4187c650e2f1fdca9fd90f786bc0976b6b.i
227 .hg/store/dh/project_/resource/anotherl/followed/andanoth/andthenanextremelylongfilename0d8e1f4187c650e2f1fdca9fd90f786bc0976b6b.i
214
228
215 $ cd ..
229 $ cd ..
216
230
217 Aborting lock does not prevent fncache writes
231 Aborting lock does not prevent fncache writes
218
232
219 $ cat > exceptionext.py <<EOF
233 $ cat > exceptionext.py <<EOF
220 > from __future__ import absolute_import
234 > from __future__ import absolute_import
221 > import os
235 > import os
222 > from mercurial import commands, error, extensions
236 > from mercurial import commands, error, extensions
223 >
237 >
224 > def lockexception(orig, vfs, lockname, wait, releasefn, *args, **kwargs):
238 > def lockexception(orig, vfs, lockname, wait, releasefn, *args, **kwargs):
225 > def releasewrap():
239 > def releasewrap():
226 > l.held = False # ensure __del__ is a noop
240 > l.held = False # ensure __del__ is a noop
227 > raise error.Abort("forced lock failure")
241 > raise error.Abort("forced lock failure")
228 > l = orig(vfs, lockname, wait, releasewrap, *args, **kwargs)
242 > l = orig(vfs, lockname, wait, releasewrap, *args, **kwargs)
229 > return l
243 > return l
230 >
244 >
231 > def reposetup(ui, repo):
245 > def reposetup(ui, repo):
232 > extensions.wrapfunction(repo, '_lock', lockexception)
246 > extensions.wrapfunction(repo, '_lock', lockexception)
233 >
247 >
234 > cmdtable = {}
248 > cmdtable = {}
235 >
249 >
236 > # wrap "commit" command to prevent wlock from being '__del__()'-ed
250 > # wrap "commit" command to prevent wlock from being '__del__()'-ed
237 > # at the end of dispatching (for intentional "forced lcok failure")
251 > # at the end of dispatching (for intentional "forced lcok failure")
238 > def commitwrap(orig, ui, repo, *pats, **opts):
252 > def commitwrap(orig, ui, repo, *pats, **opts):
239 > repo = repo.unfiltered() # to use replaced repo._lock certainly
253 > repo = repo.unfiltered() # to use replaced repo._lock certainly
240 > wlock = repo.wlock()
254 > wlock = repo.wlock()
241 > try:
255 > try:
242 > return orig(ui, repo, *pats, **opts)
256 > return orig(ui, repo, *pats, **opts)
243 > finally:
257 > finally:
244 > # multiple 'relase()' is needed for complete releasing wlock,
258 > # multiple 'relase()' is needed for complete releasing wlock,
245 > # because "forced" abort at last releasing store lock
259 > # because "forced" abort at last releasing store lock
246 > # prevents wlock from being released at same 'lockmod.release()'
260 > # prevents wlock from being released at same 'lockmod.release()'
247 > for i in range(wlock.held):
261 > for i in range(wlock.held):
248 > wlock.release()
262 > wlock.release()
249 >
263 >
250 > def extsetup(ui):
264 > def extsetup(ui):
251 > extensions.wrapcommand(commands.table, b"commit", commitwrap)
265 > extensions.wrapcommand(commands.table, b"commit", commitwrap)
252 > EOF
266 > EOF
253 $ extpath=`pwd`/exceptionext.py
267 $ extpath=`pwd`/exceptionext.py
254 $ hg init fncachetxn
268 $ hg init fncachetxn
255 $ cd fncachetxn
269 $ cd fncachetxn
256 $ printf "[extensions]\nexceptionext=$extpath\n" >> .hg/hgrc
270 $ printf "[extensions]\nexceptionext=$extpath\n" >> .hg/hgrc
257 $ touch y
271 $ touch y
258 $ hg ci -qAm y
272 $ hg ci -qAm y
259 abort: forced lock failure
273 abort: forced lock failure
260 [255]
274 [255]
261 $ cat .hg/store/fncache
275 $ cat .hg/store/fncache
262 data/y.i
276 data/y.i
263
277
264 Aborting transaction prevents fncache change
278 Aborting transaction prevents fncache change
265
279
266 $ cat > ../exceptionext.py <<EOF
280 $ cat > ../exceptionext.py <<EOF
267 > from __future__ import absolute_import
281 > from __future__ import absolute_import
268 > import os
282 > import os
269 > from mercurial import commands, error, extensions, localrepo
283 > from mercurial import commands, error, extensions, localrepo
270 >
284 >
271 > def wrapper(orig, self, *args, **kwargs):
285 > def wrapper(orig, self, *args, **kwargs):
272 > tr = orig(self, *args, **kwargs)
286 > tr = orig(self, *args, **kwargs)
273 > def fail(tr):
287 > def fail(tr):
274 > raise error.Abort(b"forced transaction failure")
288 > raise error.Abort(b"forced transaction failure")
275 > # zzz prefix to ensure it sorted after store.write
289 > # zzz prefix to ensure it sorted after store.write
276 > tr.addfinalize(b'zzz-forcefails', fail)
290 > tr.addfinalize(b'zzz-forcefails', fail)
277 > return tr
291 > return tr
278 >
292 >
279 > def uisetup(ui):
293 > def uisetup(ui):
280 > extensions.wrapfunction(
294 > extensions.wrapfunction(
281 > localrepo.localrepository, b'transaction', wrapper)
295 > localrepo.localrepository, b'transaction', wrapper)
282 >
296 >
283 > cmdtable = {}
297 > cmdtable = {}
284 >
298 >
285 > EOF
299 > EOF
286
300
287 Clean cached version
301 Clean cached version
288 $ rm -f "${extpath}c"
302 $ rm -f "${extpath}c"
289 $ rm -Rf "`dirname $extpath`/__pycache__"
303 $ rm -Rf "`dirname $extpath`/__pycache__"
290
304
291 $ touch z
305 $ touch z
292 $ hg ci -qAm z
306 $ hg ci -qAm z
293 transaction abort!
307 transaction abort!
294 rollback completed
308 rollback completed
295 abort: forced transaction failure
309 abort: forced transaction failure
296 [255]
310 [255]
297 $ cat .hg/store/fncache
311 $ cat .hg/store/fncache
298 data/y.i
312 data/y.i
299
313
300 Aborted transactions can be recovered later
314 Aborted transactions can be recovered later
301
315
302 $ cat > ../exceptionext.py <<EOF
316 $ cat > ../exceptionext.py <<EOF
303 > from __future__ import absolute_import
317 > from __future__ import absolute_import
304 > import os
318 > import os
305 > from mercurial import (
319 > from mercurial import (
306 > commands,
320 > commands,
307 > error,
321 > error,
308 > extensions,
322 > extensions,
309 > localrepo,
323 > localrepo,
310 > transaction,
324 > transaction,
311 > )
325 > )
312 >
326 >
313 > def trwrapper(orig, self, *args, **kwargs):
327 > def trwrapper(orig, self, *args, **kwargs):
314 > tr = orig(self, *args, **kwargs)
328 > tr = orig(self, *args, **kwargs)
315 > def fail(tr):
329 > def fail(tr):
316 > raise error.Abort(b"forced transaction failure")
330 > raise error.Abort(b"forced transaction failure")
317 > # zzz prefix to ensure it sorted after store.write
331 > # zzz prefix to ensure it sorted after store.write
318 > tr.addfinalize(b'zzz-forcefails', fail)
332 > tr.addfinalize(b'zzz-forcefails', fail)
319 > return tr
333 > return tr
320 >
334 >
321 > def abortwrapper(orig, self, *args, **kwargs):
335 > def abortwrapper(orig, self, *args, **kwargs):
322 > raise error.Abort(b"forced transaction failure")
336 > raise error.Abort(b"forced transaction failure")
323 >
337 >
324 > def uisetup(ui):
338 > def uisetup(ui):
325 > extensions.wrapfunction(localrepo.localrepository, 'transaction',
339 > extensions.wrapfunction(localrepo.localrepository, 'transaction',
326 > trwrapper)
340 > trwrapper)
327 > extensions.wrapfunction(transaction.transaction, '_abort',
341 > extensions.wrapfunction(transaction.transaction, '_abort',
328 > abortwrapper)
342 > abortwrapper)
329 >
343 >
330 > cmdtable = {}
344 > cmdtable = {}
331 >
345 >
332 > EOF
346 > EOF
333
347
334 Clean cached versions
348 Clean cached versions
335 $ rm -f "${extpath}c"
349 $ rm -f "${extpath}c"
336 $ rm -Rf "`dirname $extpath`/__pycache__"
350 $ rm -Rf "`dirname $extpath`/__pycache__"
337
351
338 $ hg up -q 1
352 $ hg up -q 1
339 $ touch z
353 $ touch z
340 $ hg ci -qAm z 2>/dev/null
354 $ hg ci -qAm z 2>/dev/null
341 [255]
355 [255]
342 $ cat .hg/store/fncache | sort
356 $ cat .hg/store/fncache | sort
343 data/y.i
357 data/y.i
344 data/z.i
358 data/z.i
345 $ hg recover
359 $ hg recover
346 rolling back interrupted transaction
360 rolling back interrupted transaction
347 checking changesets
361 checking changesets
348 checking manifests
362 checking manifests
349 crosschecking files in changesets and manifests
363 crosschecking files in changesets and manifests
350 checking files
364 checking files
351 checked 1 changesets with 1 changes to 1 files
365 checked 1 changesets with 1 changes to 1 files
352 $ cat .hg/store/fncache
366 $ cat .hg/store/fncache
353 data/y.i
367 data/y.i
354
368
355 $ cd ..
369 $ cd ..
356
370
357 debugrebuildfncache does nothing unless repo has fncache requirement
371 debugrebuildfncache does nothing unless repo has fncache requirement
358
372
359 $ hg --config format.usefncache=false init nofncache
373 $ hg --config format.usefncache=false init nofncache
360 $ cd nofncache
374 $ cd nofncache
361 $ hg debugrebuildfncache
375 $ hg debugrebuildfncache
362 (not rebuilding fncache because repository does not support fncache)
376 (not rebuilding fncache because repository does not support fncache)
363
377
364 $ cd ..
378 $ cd ..
365
379
366 debugrebuildfncache works on empty repository
380 debugrebuildfncache works on empty repository
367
381
368 $ hg init empty
382 $ hg init empty
369 $ cd empty
383 $ cd empty
370 $ hg debugrebuildfncache
384 $ hg debugrebuildfncache
371 fncache already up to date
385 fncache already up to date
372 $ cd ..
386 $ cd ..
373
387
374 debugrebuildfncache on an up to date repository no-ops
388 debugrebuildfncache on an up to date repository no-ops
375
389
376 $ hg init repo
390 $ hg init repo
377 $ cd repo
391 $ cd repo
378 $ echo initial > foo
392 $ echo initial > foo
379 $ echo initial > .bar
393 $ echo initial > .bar
380 $ hg commit -A -m initial
394 $ hg commit -A -m initial
381 adding .bar
395 adding .bar
382 adding foo
396 adding foo
383
397
384 $ cat .hg/store/fncache | sort
398 $ cat .hg/store/fncache | sort
385 data/.bar.i
399 data/.bar.i
386 data/foo.i
400 data/foo.i
387
401
388 $ hg debugrebuildfncache
402 $ hg debugrebuildfncache
389 fncache already up to date
403 fncache already up to date
390
404
391 debugrebuildfncache restores deleted fncache file
405 debugrebuildfncache restores deleted fncache file
392
406
393 $ rm -f .hg/store/fncache
407 $ rm -f .hg/store/fncache
394 $ hg debugrebuildfncache
408 $ hg debugrebuildfncache
395 adding data/.bar.i
409 adding data/.bar.i
396 adding data/foo.i
410 adding data/foo.i
397 2 items added, 0 removed from fncache
411 2 items added, 0 removed from fncache
398
412
399 $ cat .hg/store/fncache | sort
413 $ cat .hg/store/fncache | sort
400 data/.bar.i
414 data/.bar.i
401 data/foo.i
415 data/foo.i
402
416
403 Rebuild after rebuild should no-op
417 Rebuild after rebuild should no-op
404
418
405 $ hg debugrebuildfncache
419 $ hg debugrebuildfncache
406 fncache already up to date
420 fncache already up to date
407
421
408 A single missing file should get restored, an extra file should be removed
422 A single missing file should get restored, an extra file should be removed
409
423
410 $ cat > .hg/store/fncache << EOF
424 $ cat > .hg/store/fncache << EOF
411 > data/foo.i
425 > data/foo.i
412 > data/bad-entry.i
426 > data/bad-entry.i
413 > EOF
427 > EOF
414
428
415 $ hg debugrebuildfncache
429 $ hg debugrebuildfncache
416 removing data/bad-entry.i
430 removing data/bad-entry.i
417 adding data/.bar.i
431 adding data/.bar.i
418 1 items added, 1 removed from fncache
432 1 items added, 1 removed from fncache
419
433
420 $ cat .hg/store/fncache | sort
434 $ cat .hg/store/fncache | sort
421 data/.bar.i
435 data/.bar.i
422 data/foo.i
436 data/foo.i
423
437
424 $ cd ..
438 $ cd ..
425
439
426 Try a simple variation without dotencode to ensure fncache is ignorant of encoding
440 Try a simple variation without dotencode to ensure fncache is ignorant of encoding
427
441
428 $ hg --config format.dotencode=false init nodotencode
442 $ hg --config format.dotencode=false init nodotencode
429 $ cd nodotencode
443 $ cd nodotencode
430 $ echo initial > foo
444 $ echo initial > foo
431 $ echo initial > .bar
445 $ echo initial > .bar
432 $ hg commit -A -m initial
446 $ hg commit -A -m initial
433 adding .bar
447 adding .bar
434 adding foo
448 adding foo
435
449
436 $ cat .hg/store/fncache | sort
450 $ cat .hg/store/fncache | sort
437 data/.bar.i
451 data/.bar.i
438 data/foo.i
452 data/foo.i
439
453
440 $ rm .hg/store/fncache
454 $ rm .hg/store/fncache
441 $ hg debugrebuildfncache
455 $ hg debugrebuildfncache
442 adding data/.bar.i
456 adding data/.bar.i
443 adding data/foo.i
457 adding data/foo.i
444 2 items added, 0 removed from fncache
458 2 items added, 0 removed from fncache
445
459
446 $ cat .hg/store/fncache | sort
460 $ cat .hg/store/fncache | sort
447 data/.bar.i
461 data/.bar.i
448 data/foo.i
462 data/foo.i
449
463
450 $ cd ..
464 $ cd ..
451
465
452 In repositories that have accumulated a large number of files over time, the
466 In repositories that have accumulated a large number of files over time, the
453 fncache file is going to be large. If we possibly can avoid loading it, so much the better.
467 fncache file is going to be large. If we possibly can avoid loading it, so much the better.
454 The cache should not loaded when committing changes to existing files, or when unbundling
468 The cache should not loaded when committing changes to existing files, or when unbundling
455 changesets that only contain changes to existing files:
469 changesets that only contain changes to existing files:
456
470
457 $ cat > fncacheloadwarn.py << EOF
471 $ cat > fncacheloadwarn.py << EOF
458 > from __future__ import absolute_import
472 > from __future__ import absolute_import
459 > from mercurial import extensions, localrepo
473 > from mercurial import extensions, localrepo
460 >
474 >
461 > def extsetup(ui):
475 > def extsetup(ui):
462 > def wrapstore(orig, requirements, *args):
476 > def wrapstore(orig, requirements, *args):
463 > store = orig(requirements, *args)
477 > store = orig(requirements, *args)
464 > if b'store' in requirements and b'fncache' in requirements:
478 > if b'store' in requirements and b'fncache' in requirements:
465 > instrumentfncachestore(store, ui)
479 > instrumentfncachestore(store, ui)
466 > return store
480 > return store
467 > extensions.wrapfunction(localrepo, 'makestore', wrapstore)
481 > extensions.wrapfunction(localrepo, 'makestore', wrapstore)
468 >
482 >
469 > def instrumentfncachestore(fncachestore, ui):
483 > def instrumentfncachestore(fncachestore, ui):
470 > class instrumentedfncache(type(fncachestore.fncache)):
484 > class instrumentedfncache(type(fncachestore.fncache)):
471 > def _load(self):
485 > def _load(self):
472 > ui.warn(b'fncache load triggered!\n')
486 > ui.warn(b'fncache load triggered!\n')
473 > super(instrumentedfncache, self)._load()
487 > super(instrumentedfncache, self)._load()
474 > fncachestore.fncache.__class__ = instrumentedfncache
488 > fncachestore.fncache.__class__ = instrumentedfncache
475 > EOF
489 > EOF
476
490
477 $ fncachextpath=`pwd`/fncacheloadwarn.py
491 $ fncachextpath=`pwd`/fncacheloadwarn.py
478 $ hg init nofncacheload
492 $ hg init nofncacheload
479 $ cd nofncacheload
493 $ cd nofncacheload
480 $ printf "[extensions]\nfncacheloadwarn=$fncachextpath\n" >> .hg/hgrc
494 $ printf "[extensions]\nfncacheloadwarn=$fncachextpath\n" >> .hg/hgrc
481
495
482 A new file should trigger a load, as we'd want to update the fncache set in that case:
496 A new file should trigger a load, as we'd want to update the fncache set in that case:
483
497
484 $ touch foo
498 $ touch foo
485 $ hg ci -qAm foo
499 $ hg ci -qAm foo
486 fncache load triggered!
500 fncache load triggered!
487
501
488 But modifying that file should not:
502 But modifying that file should not:
489
503
490 $ echo bar >> foo
504 $ echo bar >> foo
491 $ hg ci -qm foo
505 $ hg ci -qm foo
492
506
493 If a transaction has been aborted, the zero-size truncated index file will
507 If a transaction has been aborted, the zero-size truncated index file will
494 not prevent the fncache from being loaded; rather than actually abort
508 not prevent the fncache from being loaded; rather than actually abort
495 a transaction, we simulate the situation by creating a zero-size index file:
509 a transaction, we simulate the situation by creating a zero-size index file:
496
510
497 $ touch .hg/store/data/bar.i
511 $ touch .hg/store/data/bar.i
498 $ touch bar
512 $ touch bar
499 $ hg ci -qAm bar
513 $ hg ci -qAm bar
500 fncache load triggered!
514 fncache load triggered!
501
515
502 Unbundling should follow the same rules; existing files should not cause a load:
516 Unbundling should follow the same rules; existing files should not cause a load:
503
517
504 $ hg clone -q . tobundle
518 $ hg clone -q . tobundle
505 $ echo 'new line' > tobundle/bar
519 $ echo 'new line' > tobundle/bar
506 $ hg -R tobundle ci -qm bar
520 $ hg -R tobundle ci -qm bar
507 $ hg -R tobundle bundle -q barupdated.hg
521 $ hg -R tobundle bundle -q barupdated.hg
508 $ hg unbundle -q barupdated.hg
522 $ hg unbundle -q barupdated.hg
509
523
510 but adding new files should:
524 but adding new files should:
511
525
512 $ touch tobundle/newfile
526 $ touch tobundle/newfile
513 $ hg -R tobundle ci -qAm newfile
527 $ hg -R tobundle ci -qAm newfile
514 $ hg -R tobundle bundle -q newfile.hg
528 $ hg -R tobundle bundle -q newfile.hg
515 $ hg unbundle -q newfile.hg
529 $ hg unbundle -q newfile.hg
516 fncache load triggered!
530 fncache load triggered!
517
531
518 $ cd ..
532 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now