##// END OF EJS Templates
shallowutil: narrow scope of try/except block...
Augie Fackler -
r48345:db31bafa default
parent child Browse files
Show More
@@ -1,544 +1,545 b''
1 # shallowutil.py -- remotefilelog utilities
1 # shallowutil.py -- remotefilelog utilities
2 #
2 #
3 # Copyright 2014 Facebook, Inc.
3 # Copyright 2014 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from __future__ import absolute_import
7 from __future__ import absolute_import
8
8
9 import collections
9 import collections
10 import errno
10 import errno
11 import os
11 import os
12 import stat
12 import stat
13 import struct
13 import struct
14 import tempfile
14 import tempfile
15
15
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial.pycompat import open
17 from mercurial.pycompat import open
18 from mercurial.node import hex
18 from mercurial.node import hex
19 from mercurial import (
19 from mercurial import (
20 error,
20 error,
21 pycompat,
21 pycompat,
22 revlog,
22 revlog,
23 util,
23 util,
24 )
24 )
25 from mercurial.utils import (
25 from mercurial.utils import (
26 hashutil,
26 hashutil,
27 storageutil,
27 storageutil,
28 stringutil,
28 stringutil,
29 )
29 )
30 from . import constants
30 from . import constants
31
31
32 if not pycompat.iswindows:
32 if not pycompat.iswindows:
33 import grp
33 import grp
34
34
35
35
36 def isenabled(repo):
36 def isenabled(repo):
37 """returns whether the repository is remotefilelog enabled or not"""
37 """returns whether the repository is remotefilelog enabled or not"""
38 return constants.SHALLOWREPO_REQUIREMENT in repo.requirements
38 return constants.SHALLOWREPO_REQUIREMENT in repo.requirements
39
39
40
40
41 def getcachekey(reponame, file, id):
41 def getcachekey(reponame, file, id):
42 pathhash = hex(hashutil.sha1(file).digest())
42 pathhash = hex(hashutil.sha1(file).digest())
43 return os.path.join(reponame, pathhash[:2], pathhash[2:], id)
43 return os.path.join(reponame, pathhash[:2], pathhash[2:], id)
44
44
45
45
46 def getlocalkey(file, id):
46 def getlocalkey(file, id):
47 pathhash = hex(hashutil.sha1(file).digest())
47 pathhash = hex(hashutil.sha1(file).digest())
48 return os.path.join(pathhash, id)
48 return os.path.join(pathhash, id)
49
49
50
50
51 def getcachepath(ui, allowempty=False):
51 def getcachepath(ui, allowempty=False):
52 cachepath = ui.config(b"remotefilelog", b"cachepath")
52 cachepath = ui.config(b"remotefilelog", b"cachepath")
53 if not cachepath:
53 if not cachepath:
54 if allowempty:
54 if allowempty:
55 return None
55 return None
56 else:
56 else:
57 raise error.Abort(
57 raise error.Abort(
58 _(b"could not find config option remotefilelog.cachepath")
58 _(b"could not find config option remotefilelog.cachepath")
59 )
59 )
60 return util.expandpath(cachepath)
60 return util.expandpath(cachepath)
61
61
62
62
63 def getcachepackpath(repo, category):
63 def getcachepackpath(repo, category):
64 cachepath = getcachepath(repo.ui)
64 cachepath = getcachepath(repo.ui)
65 if category != constants.FILEPACK_CATEGORY:
65 if category != constants.FILEPACK_CATEGORY:
66 return os.path.join(cachepath, repo.name, b'packs', category)
66 return os.path.join(cachepath, repo.name, b'packs', category)
67 else:
67 else:
68 return os.path.join(cachepath, repo.name, b'packs')
68 return os.path.join(cachepath, repo.name, b'packs')
69
69
70
70
71 def getlocalpackpath(base, category):
71 def getlocalpackpath(base, category):
72 return os.path.join(base, b'packs', category)
72 return os.path.join(base, b'packs', category)
73
73
74
74
75 def createrevlogtext(text, copyfrom=None, copyrev=None):
75 def createrevlogtext(text, copyfrom=None, copyrev=None):
76 """returns a string that matches the revlog contents in a
76 """returns a string that matches the revlog contents in a
77 traditional revlog
77 traditional revlog
78 """
78 """
79 meta = {}
79 meta = {}
80 if copyfrom or text.startswith(b'\1\n'):
80 if copyfrom or text.startswith(b'\1\n'):
81 if copyfrom:
81 if copyfrom:
82 meta[b'copy'] = copyfrom
82 meta[b'copy'] = copyfrom
83 meta[b'copyrev'] = copyrev
83 meta[b'copyrev'] = copyrev
84 text = storageutil.packmeta(meta, text)
84 text = storageutil.packmeta(meta, text)
85
85
86 return text
86 return text
87
87
88
88
89 def parsemeta(text):
89 def parsemeta(text):
90 """parse mercurial filelog metadata"""
90 """parse mercurial filelog metadata"""
91 meta, size = storageutil.parsemeta(text)
91 meta, size = storageutil.parsemeta(text)
92 if text.startswith(b'\1\n'):
92 if text.startswith(b'\1\n'):
93 s = text.index(b'\1\n', 2)
93 s = text.index(b'\1\n', 2)
94 text = text[s + 2 :]
94 text = text[s + 2 :]
95 return meta or {}, text
95 return meta or {}, text
96
96
97
97
98 def sumdicts(*dicts):
98 def sumdicts(*dicts):
99 """Adds all the values of *dicts together into one dictionary. This assumes
99 """Adds all the values of *dicts together into one dictionary. This assumes
100 the values in *dicts are all summable.
100 the values in *dicts are all summable.
101
101
102 e.g. [{'a': 4', 'b': 2}, {'b': 3, 'c': 1}] -> {'a': 4, 'b': 5, 'c': 1}
102 e.g. [{'a': 4', 'b': 2}, {'b': 3, 'c': 1}] -> {'a': 4, 'b': 5, 'c': 1}
103 """
103 """
104 result = collections.defaultdict(lambda: 0)
104 result = collections.defaultdict(lambda: 0)
105 for dict in dicts:
105 for dict in dicts:
106 for k, v in pycompat.iteritems(dict):
106 for k, v in pycompat.iteritems(dict):
107 result[k] += v
107 result[k] += v
108 return result
108 return result
109
109
110
110
111 def prefixkeys(dict, prefix):
111 def prefixkeys(dict, prefix):
112 """Returns ``dict`` with ``prefix`` prepended to all its keys."""
112 """Returns ``dict`` with ``prefix`` prepended to all its keys."""
113 result = {}
113 result = {}
114 for k, v in pycompat.iteritems(dict):
114 for k, v in pycompat.iteritems(dict):
115 result[prefix + k] = v
115 result[prefix + k] = v
116 return result
116 return result
117
117
118
118
119 def reportpackmetrics(ui, prefix, *stores):
119 def reportpackmetrics(ui, prefix, *stores):
120 dicts = [s.getmetrics() for s in stores]
120 dicts = [s.getmetrics() for s in stores]
121 dict = prefixkeys(sumdicts(*dicts), prefix + b'_')
121 dict = prefixkeys(sumdicts(*dicts), prefix + b'_')
122 ui.log(prefix + b"_packsizes", b"\n", **pycompat.strkwargs(dict))
122 ui.log(prefix + b"_packsizes", b"\n", **pycompat.strkwargs(dict))
123
123
124
124
125 def _parsepackmeta(metabuf):
125 def _parsepackmeta(metabuf):
126 """parse datapack meta, bytes (<metadata-list>) -> dict
126 """parse datapack meta, bytes (<metadata-list>) -> dict
127
127
128 The dict contains raw content - both keys and values are strings.
128 The dict contains raw content - both keys and values are strings.
129 Upper-level business may want to convert some of them to other types like
129 Upper-level business may want to convert some of them to other types like
130 integers, on their own.
130 integers, on their own.
131
131
132 raise ValueError if the data is corrupted
132 raise ValueError if the data is corrupted
133 """
133 """
134 metadict = {}
134 metadict = {}
135 offset = 0
135 offset = 0
136 buflen = len(metabuf)
136 buflen = len(metabuf)
137 while buflen - offset >= 3:
137 while buflen - offset >= 3:
138 key = metabuf[offset : offset + 1]
138 key = metabuf[offset : offset + 1]
139 offset += 1
139 offset += 1
140 metalen = struct.unpack_from(b'!H', metabuf, offset)[0]
140 metalen = struct.unpack_from(b'!H', metabuf, offset)[0]
141 offset += 2
141 offset += 2
142 if offset + metalen > buflen:
142 if offset + metalen > buflen:
143 raise ValueError(b'corrupted metadata: incomplete buffer')
143 raise ValueError(b'corrupted metadata: incomplete buffer')
144 value = metabuf[offset : offset + metalen]
144 value = metabuf[offset : offset + metalen]
145 metadict[key] = value
145 metadict[key] = value
146 offset += metalen
146 offset += metalen
147 if offset != buflen:
147 if offset != buflen:
148 raise ValueError(b'corrupted metadata: redundant data')
148 raise ValueError(b'corrupted metadata: redundant data')
149 return metadict
149 return metadict
150
150
151
151
152 def _buildpackmeta(metadict):
152 def _buildpackmeta(metadict):
153 """reverse of _parsepackmeta, dict -> bytes (<metadata-list>)
153 """reverse of _parsepackmeta, dict -> bytes (<metadata-list>)
154
154
155 The dict contains raw content - both keys and values are strings.
155 The dict contains raw content - both keys and values are strings.
156 Upper-level business may want to serialize some of other types (like
156 Upper-level business may want to serialize some of other types (like
157 integers) to strings before calling this function.
157 integers) to strings before calling this function.
158
158
159 raise ProgrammingError when metadata key is illegal, or ValueError if
159 raise ProgrammingError when metadata key is illegal, or ValueError if
160 length limit is exceeded
160 length limit is exceeded
161 """
161 """
162 metabuf = b''
162 metabuf = b''
163 for k, v in sorted(pycompat.iteritems((metadict or {}))):
163 for k, v in sorted(pycompat.iteritems((metadict or {}))):
164 if len(k) != 1:
164 if len(k) != 1:
165 raise error.ProgrammingError(b'packmeta: illegal key: %s' % k)
165 raise error.ProgrammingError(b'packmeta: illegal key: %s' % k)
166 if len(v) > 0xFFFE:
166 if len(v) > 0xFFFE:
167 raise ValueError(
167 raise ValueError(
168 b'metadata value is too long: 0x%x > 0xfffe' % len(v)
168 b'metadata value is too long: 0x%x > 0xfffe' % len(v)
169 )
169 )
170 metabuf += k
170 metabuf += k
171 metabuf += struct.pack(b'!H', len(v))
171 metabuf += struct.pack(b'!H', len(v))
172 metabuf += v
172 metabuf += v
173 # len(metabuf) is guaranteed representable in 4 bytes, because there are
173 # len(metabuf) is guaranteed representable in 4 bytes, because there are
174 # only 256 keys, and for each value, len(value) <= 0xfffe.
174 # only 256 keys, and for each value, len(value) <= 0xfffe.
175 return metabuf
175 return metabuf
176
176
177
177
178 _metaitemtypes = {
178 _metaitemtypes = {
179 constants.METAKEYFLAG: (int, pycompat.long),
179 constants.METAKEYFLAG: (int, pycompat.long),
180 constants.METAKEYSIZE: (int, pycompat.long),
180 constants.METAKEYSIZE: (int, pycompat.long),
181 }
181 }
182
182
183
183
184 def buildpackmeta(metadict):
184 def buildpackmeta(metadict):
185 """like _buildpackmeta, but typechecks metadict and normalize it.
185 """like _buildpackmeta, but typechecks metadict and normalize it.
186
186
187 This means, METAKEYSIZE and METAKEYSIZE should have integers as values,
187 This means, METAKEYSIZE and METAKEYSIZE should have integers as values,
188 and METAKEYFLAG will be dropped if its value is 0.
188 and METAKEYFLAG will be dropped if its value is 0.
189 """
189 """
190 newmeta = {}
190 newmeta = {}
191 for k, v in pycompat.iteritems(metadict or {}):
191 for k, v in pycompat.iteritems(metadict or {}):
192 expectedtype = _metaitemtypes.get(k, (bytes,))
192 expectedtype = _metaitemtypes.get(k, (bytes,))
193 if not isinstance(v, expectedtype):
193 if not isinstance(v, expectedtype):
194 raise error.ProgrammingError(b'packmeta: wrong type of key %s' % k)
194 raise error.ProgrammingError(b'packmeta: wrong type of key %s' % k)
195 # normalize int to binary buffer
195 # normalize int to binary buffer
196 if int in expectedtype:
196 if int in expectedtype:
197 # optimization: remove flag if it's 0 to save space
197 # optimization: remove flag if it's 0 to save space
198 if k == constants.METAKEYFLAG and v == 0:
198 if k == constants.METAKEYFLAG and v == 0:
199 continue
199 continue
200 v = int2bin(v)
200 v = int2bin(v)
201 newmeta[k] = v
201 newmeta[k] = v
202 return _buildpackmeta(newmeta)
202 return _buildpackmeta(newmeta)
203
203
204
204
205 def parsepackmeta(metabuf):
205 def parsepackmeta(metabuf):
206 """like _parsepackmeta, but convert fields to desired types automatically.
206 """like _parsepackmeta, but convert fields to desired types automatically.
207
207
208 This means, METAKEYFLAG and METAKEYSIZE fields will be converted to
208 This means, METAKEYFLAG and METAKEYSIZE fields will be converted to
209 integers.
209 integers.
210 """
210 """
211 metadict = _parsepackmeta(metabuf)
211 metadict = _parsepackmeta(metabuf)
212 for k, v in pycompat.iteritems(metadict):
212 for k, v in pycompat.iteritems(metadict):
213 if k in _metaitemtypes and int in _metaitemtypes[k]:
213 if k in _metaitemtypes and int in _metaitemtypes[k]:
214 metadict[k] = bin2int(v)
214 metadict[k] = bin2int(v)
215 return metadict
215 return metadict
216
216
217
217
218 def int2bin(n):
218 def int2bin(n):
219 """convert a non-negative integer to raw binary buffer"""
219 """convert a non-negative integer to raw binary buffer"""
220 buf = bytearray()
220 buf = bytearray()
221 while n > 0:
221 while n > 0:
222 buf.insert(0, n & 0xFF)
222 buf.insert(0, n & 0xFF)
223 n >>= 8
223 n >>= 8
224 return bytes(buf)
224 return bytes(buf)
225
225
226
226
227 def bin2int(buf):
227 def bin2int(buf):
228 """the reverse of int2bin, convert a binary buffer to an integer"""
228 """the reverse of int2bin, convert a binary buffer to an integer"""
229 x = 0
229 x = 0
230 for b in bytearray(buf):
230 for b in bytearray(buf):
231 x <<= 8
231 x <<= 8
232 x |= b
232 x |= b
233 return x
233 return x
234
234
235
235
236 class BadRemotefilelogHeader(error.StorageError):
236 class BadRemotefilelogHeader(error.StorageError):
237 """Exception raised when parsing a remotefilelog blob header fails."""
237 """Exception raised when parsing a remotefilelog blob header fails."""
238
238
239
239
240 def parsesizeflags(raw):
240 def parsesizeflags(raw):
241 """given a remotefilelog blob, return (headersize, rawtextsize, flags)
241 """given a remotefilelog blob, return (headersize, rawtextsize, flags)
242
242
243 see remotefilelogserver.createfileblob for the format.
243 see remotefilelogserver.createfileblob for the format.
244 raise RuntimeError if the content is illformed.
244 raise RuntimeError if the content is illformed.
245 """
245 """
246 flags = revlog.REVIDX_DEFAULT_FLAGS
246 flags = revlog.REVIDX_DEFAULT_FLAGS
247 size = None
247 size = None
248 try:
248 try:
249 index = raw.index(b'\0')
249 index = raw.index(b'\0')
250 except ValueError:
251 raise BadRemotefilelogHeader(
252 "unexpected remotefilelog header: illegal format"
253 )
254 if True:
250 header = raw[:index]
255 header = raw[:index]
251 if header.startswith(b'v'):
256 if header.startswith(b'v'):
252 # v1 and above, header starts with 'v'
257 # v1 and above, header starts with 'v'
253 if header.startswith(b'v1\n'):
258 if header.startswith(b'v1\n'):
254 for s in header.split(b'\n'):
259 for s in header.split(b'\n'):
255 if s.startswith(constants.METAKEYSIZE):
260 if s.startswith(constants.METAKEYSIZE):
256 size = int(s[len(constants.METAKEYSIZE) :])
261 size = int(s[len(constants.METAKEYSIZE) :])
257 elif s.startswith(constants.METAKEYFLAG):
262 elif s.startswith(constants.METAKEYFLAG):
258 flags = int(s[len(constants.METAKEYFLAG) :])
263 flags = int(s[len(constants.METAKEYFLAG) :])
259 else:
264 else:
260 raise BadRemotefilelogHeader(
265 raise BadRemotefilelogHeader(
261 b'unsupported remotefilelog header: %s' % header
266 b'unsupported remotefilelog header: %s' % header
262 )
267 )
263 else:
268 else:
264 # v0, str(int(size)) is the header
269 # v0, str(int(size)) is the header
265 size = int(header)
270 size = int(header)
266 except ValueError:
267 raise BadRemotefilelogHeader(
268 "unexpected remotefilelog header: illegal format"
269 )
270 if size is None:
271 if size is None:
271 raise BadRemotefilelogHeader(
272 raise BadRemotefilelogHeader(
272 "unexpected remotefilelog header: no size found"
273 "unexpected remotefilelog header: no size found"
273 )
274 )
274 return index + 1, size, flags
275 return index + 1, size, flags
275
276
276
277
277 def buildfileblobheader(size, flags, version=None):
278 def buildfileblobheader(size, flags, version=None):
278 """return the header of a remotefilelog blob.
279 """return the header of a remotefilelog blob.
279
280
280 see remotefilelogserver.createfileblob for the format.
281 see remotefilelogserver.createfileblob for the format.
281 approximately the reverse of parsesizeflags.
282 approximately the reverse of parsesizeflags.
282
283
283 version could be 0 or 1, or None (auto decide).
284 version could be 0 or 1, or None (auto decide).
284 """
285 """
285 # choose v0 if flags is empty, otherwise v1
286 # choose v0 if flags is empty, otherwise v1
286 if version is None:
287 if version is None:
287 version = int(bool(flags))
288 version = int(bool(flags))
288 if version == 1:
289 if version == 1:
289 header = b'v1\n%s%d\n%s%d' % (
290 header = b'v1\n%s%d\n%s%d' % (
290 constants.METAKEYSIZE,
291 constants.METAKEYSIZE,
291 size,
292 size,
292 constants.METAKEYFLAG,
293 constants.METAKEYFLAG,
293 flags,
294 flags,
294 )
295 )
295 elif version == 0:
296 elif version == 0:
296 if flags:
297 if flags:
297 raise error.ProgrammingError(b'fileblob v0 does not support flag')
298 raise error.ProgrammingError(b'fileblob v0 does not support flag')
298 header = b'%d' % size
299 header = b'%d' % size
299 else:
300 else:
300 raise error.ProgrammingError(b'unknown fileblob version %d' % version)
301 raise error.ProgrammingError(b'unknown fileblob version %d' % version)
301 return header
302 return header
302
303
303
304
304 def ancestormap(raw):
305 def ancestormap(raw):
305 offset, size, flags = parsesizeflags(raw)
306 offset, size, flags = parsesizeflags(raw)
306 start = offset + size
307 start = offset + size
307
308
308 mapping = {}
309 mapping = {}
309 while start < len(raw):
310 while start < len(raw):
310 divider = raw.index(b'\0', start + 80)
311 divider = raw.index(b'\0', start + 80)
311
312
312 currentnode = raw[start : (start + 20)]
313 currentnode = raw[start : (start + 20)]
313 p1 = raw[(start + 20) : (start + 40)]
314 p1 = raw[(start + 20) : (start + 40)]
314 p2 = raw[(start + 40) : (start + 60)]
315 p2 = raw[(start + 40) : (start + 60)]
315 linknode = raw[(start + 60) : (start + 80)]
316 linknode = raw[(start + 60) : (start + 80)]
316 copyfrom = raw[(start + 80) : divider]
317 copyfrom = raw[(start + 80) : divider]
317
318
318 mapping[currentnode] = (p1, p2, linknode, copyfrom)
319 mapping[currentnode] = (p1, p2, linknode, copyfrom)
319 start = divider + 1
320 start = divider + 1
320
321
321 return mapping
322 return mapping
322
323
323
324
324 def readfile(path):
325 def readfile(path):
325 f = open(path, b'rb')
326 f = open(path, b'rb')
326 try:
327 try:
327 result = f.read()
328 result = f.read()
328
329
329 # we should never have empty files
330 # we should never have empty files
330 if not result:
331 if not result:
331 os.remove(path)
332 os.remove(path)
332 raise IOError(b"empty file: %s" % path)
333 raise IOError(b"empty file: %s" % path)
333
334
334 return result
335 return result
335 finally:
336 finally:
336 f.close()
337 f.close()
337
338
338
339
339 def unlinkfile(filepath):
340 def unlinkfile(filepath):
340 if pycompat.iswindows:
341 if pycompat.iswindows:
341 # On Windows, os.unlink cannnot delete readonly files
342 # On Windows, os.unlink cannnot delete readonly files
342 os.chmod(filepath, stat.S_IWUSR)
343 os.chmod(filepath, stat.S_IWUSR)
343 os.unlink(filepath)
344 os.unlink(filepath)
344
345
345
346
346 def renamefile(source, destination):
347 def renamefile(source, destination):
347 if pycompat.iswindows:
348 if pycompat.iswindows:
348 # On Windows, os.rename cannot rename readonly files
349 # On Windows, os.rename cannot rename readonly files
349 # and cannot overwrite destination if it exists
350 # and cannot overwrite destination if it exists
350 os.chmod(source, stat.S_IWUSR)
351 os.chmod(source, stat.S_IWUSR)
351 if os.path.isfile(destination):
352 if os.path.isfile(destination):
352 os.chmod(destination, stat.S_IWUSR)
353 os.chmod(destination, stat.S_IWUSR)
353 os.unlink(destination)
354 os.unlink(destination)
354
355
355 os.rename(source, destination)
356 os.rename(source, destination)
356
357
357
358
358 def writefile(path, content, readonly=False):
359 def writefile(path, content, readonly=False):
359 dirname, filename = os.path.split(path)
360 dirname, filename = os.path.split(path)
360 if not os.path.exists(dirname):
361 if not os.path.exists(dirname):
361 try:
362 try:
362 os.makedirs(dirname)
363 os.makedirs(dirname)
363 except OSError as ex:
364 except OSError as ex:
364 if ex.errno != errno.EEXIST:
365 if ex.errno != errno.EEXIST:
365 raise
366 raise
366
367
367 fd, temp = tempfile.mkstemp(prefix=b'.%s-' % filename, dir=dirname)
368 fd, temp = tempfile.mkstemp(prefix=b'.%s-' % filename, dir=dirname)
368 os.close(fd)
369 os.close(fd)
369
370
370 try:
371 try:
371 f = util.posixfile(temp, b'wb')
372 f = util.posixfile(temp, b'wb')
372 f.write(content)
373 f.write(content)
373 f.close()
374 f.close()
374
375
375 if readonly:
376 if readonly:
376 mode = 0o444
377 mode = 0o444
377 else:
378 else:
378 # tempfiles are created with 0o600, so we need to manually set the
379 # tempfiles are created with 0o600, so we need to manually set the
379 # mode.
380 # mode.
380 oldumask = os.umask(0)
381 oldumask = os.umask(0)
381 # there's no way to get the umask without modifying it, so set it
382 # there's no way to get the umask without modifying it, so set it
382 # back
383 # back
383 os.umask(oldumask)
384 os.umask(oldumask)
384 mode = ~oldumask
385 mode = ~oldumask
385
386
386 renamefile(temp, path)
387 renamefile(temp, path)
387 os.chmod(path, mode)
388 os.chmod(path, mode)
388 except Exception:
389 except Exception:
389 try:
390 try:
390 unlinkfile(temp)
391 unlinkfile(temp)
391 except OSError:
392 except OSError:
392 pass
393 pass
393 raise
394 raise
394
395
395
396
396 def sortnodes(nodes, parentfunc):
397 def sortnodes(nodes, parentfunc):
397 """Topologically sorts the nodes, using the parentfunc to find
398 """Topologically sorts the nodes, using the parentfunc to find
398 the parents of nodes."""
399 the parents of nodes."""
399 nodes = set(nodes)
400 nodes = set(nodes)
400 childmap = {}
401 childmap = {}
401 parentmap = {}
402 parentmap = {}
402 roots = []
403 roots = []
403
404
404 # Build a child and parent map
405 # Build a child and parent map
405 for n in nodes:
406 for n in nodes:
406 parents = [p for p in parentfunc(n) if p in nodes]
407 parents = [p for p in parentfunc(n) if p in nodes]
407 parentmap[n] = set(parents)
408 parentmap[n] = set(parents)
408 for p in parents:
409 for p in parents:
409 childmap.setdefault(p, set()).add(n)
410 childmap.setdefault(p, set()).add(n)
410 if not parents:
411 if not parents:
411 roots.append(n)
412 roots.append(n)
412
413
413 roots.sort()
414 roots.sort()
414 # Process roots, adding children to the queue as they become roots
415 # Process roots, adding children to the queue as they become roots
415 results = []
416 results = []
416 while roots:
417 while roots:
417 n = roots.pop(0)
418 n = roots.pop(0)
418 results.append(n)
419 results.append(n)
419 if n in childmap:
420 if n in childmap:
420 children = childmap[n]
421 children = childmap[n]
421 for c in children:
422 for c in children:
422 childparents = parentmap[c]
423 childparents = parentmap[c]
423 childparents.remove(n)
424 childparents.remove(n)
424 if len(childparents) == 0:
425 if len(childparents) == 0:
425 # insert at the beginning, that way child nodes
426 # insert at the beginning, that way child nodes
426 # are likely to be output immediately after their
427 # are likely to be output immediately after their
427 # parents. This gives better compression results.
428 # parents. This gives better compression results.
428 roots.insert(0, c)
429 roots.insert(0, c)
429
430
430 return results
431 return results
431
432
432
433
433 def readexactly(stream, n):
434 def readexactly(stream, n):
434 '''read n bytes from stream.read and abort if less was available'''
435 '''read n bytes from stream.read and abort if less was available'''
435 s = stream.read(n)
436 s = stream.read(n)
436 if len(s) < n:
437 if len(s) < n:
437 raise error.Abort(
438 raise error.Abort(
438 _(b"stream ended unexpectedly (got %d bytes, expected %d)")
439 _(b"stream ended unexpectedly (got %d bytes, expected %d)")
439 % (len(s), n)
440 % (len(s), n)
440 )
441 )
441 return s
442 return s
442
443
443
444
444 def readunpack(stream, fmt):
445 def readunpack(stream, fmt):
445 data = readexactly(stream, struct.calcsize(fmt))
446 data = readexactly(stream, struct.calcsize(fmt))
446 return struct.unpack(fmt, data)
447 return struct.unpack(fmt, data)
447
448
448
449
449 def readpath(stream):
450 def readpath(stream):
450 rawlen = readexactly(stream, constants.FILENAMESIZE)
451 rawlen = readexactly(stream, constants.FILENAMESIZE)
451 pathlen = struct.unpack(constants.FILENAMESTRUCT, rawlen)[0]
452 pathlen = struct.unpack(constants.FILENAMESTRUCT, rawlen)[0]
452 return readexactly(stream, pathlen)
453 return readexactly(stream, pathlen)
453
454
454
455
455 def readnodelist(stream):
456 def readnodelist(stream):
456 rawlen = readexactly(stream, constants.NODECOUNTSIZE)
457 rawlen = readexactly(stream, constants.NODECOUNTSIZE)
457 nodecount = struct.unpack(constants.NODECOUNTSTRUCT, rawlen)[0]
458 nodecount = struct.unpack(constants.NODECOUNTSTRUCT, rawlen)[0]
458 for i in pycompat.xrange(nodecount):
459 for i in pycompat.xrange(nodecount):
459 yield readexactly(stream, constants.NODESIZE)
460 yield readexactly(stream, constants.NODESIZE)
460
461
461
462
462 def readpathlist(stream):
463 def readpathlist(stream):
463 rawlen = readexactly(stream, constants.PATHCOUNTSIZE)
464 rawlen = readexactly(stream, constants.PATHCOUNTSIZE)
464 pathcount = struct.unpack(constants.PATHCOUNTSTRUCT, rawlen)[0]
465 pathcount = struct.unpack(constants.PATHCOUNTSTRUCT, rawlen)[0]
465 for i in pycompat.xrange(pathcount):
466 for i in pycompat.xrange(pathcount):
466 yield readpath(stream)
467 yield readpath(stream)
467
468
468
469
469 def getgid(groupname):
470 def getgid(groupname):
470 try:
471 try:
471 gid = grp.getgrnam(pycompat.fsdecode(groupname)).gr_gid
472 gid = grp.getgrnam(pycompat.fsdecode(groupname)).gr_gid
472 return gid
473 return gid
473 except KeyError:
474 except KeyError:
474 return None
475 return None
475
476
476
477
477 def setstickygroupdir(path, gid, warn=None):
478 def setstickygroupdir(path, gid, warn=None):
478 if gid is None:
479 if gid is None:
479 return
480 return
480 try:
481 try:
481 os.chown(path, -1, gid)
482 os.chown(path, -1, gid)
482 os.chmod(path, 0o2775)
483 os.chmod(path, 0o2775)
483 except (IOError, OSError) as ex:
484 except (IOError, OSError) as ex:
484 if warn:
485 if warn:
485 warn(_(b'unable to chown/chmod on %s: %s\n') % (path, ex))
486 warn(_(b'unable to chown/chmod on %s: %s\n') % (path, ex))
486
487
487
488
488 def mkstickygroupdir(ui, path):
489 def mkstickygroupdir(ui, path):
489 """Creates the given directory (if it doesn't exist) and give it a
490 """Creates the given directory (if it doesn't exist) and give it a
490 particular group with setgid enabled."""
491 particular group with setgid enabled."""
491 gid = None
492 gid = None
492 groupname = ui.config(b"remotefilelog", b"cachegroup")
493 groupname = ui.config(b"remotefilelog", b"cachegroup")
493 if groupname:
494 if groupname:
494 gid = getgid(groupname)
495 gid = getgid(groupname)
495 if gid is None:
496 if gid is None:
496 ui.warn(_(b'unable to resolve group name: %s\n') % groupname)
497 ui.warn(_(b'unable to resolve group name: %s\n') % groupname)
497
498
498 # we use a single stat syscall to test the existence and mode / group bit
499 # we use a single stat syscall to test the existence and mode / group bit
499 st = None
500 st = None
500 try:
501 try:
501 st = os.stat(path)
502 st = os.stat(path)
502 except OSError:
503 except OSError:
503 pass
504 pass
504
505
505 if st:
506 if st:
506 # exists
507 # exists
507 if (st.st_mode & 0o2775) != 0o2775 or st.st_gid != gid:
508 if (st.st_mode & 0o2775) != 0o2775 or st.st_gid != gid:
508 # permission needs to be fixed
509 # permission needs to be fixed
509 setstickygroupdir(path, gid, ui.warn)
510 setstickygroupdir(path, gid, ui.warn)
510 return
511 return
511
512
512 oldumask = os.umask(0o002)
513 oldumask = os.umask(0o002)
513 try:
514 try:
514 missingdirs = [path]
515 missingdirs = [path]
515 path = os.path.dirname(path)
516 path = os.path.dirname(path)
516 while path and not os.path.exists(path):
517 while path and not os.path.exists(path):
517 missingdirs.append(path)
518 missingdirs.append(path)
518 path = os.path.dirname(path)
519 path = os.path.dirname(path)
519
520
520 for path in reversed(missingdirs):
521 for path in reversed(missingdirs):
521 try:
522 try:
522 os.mkdir(path)
523 os.mkdir(path)
523 except OSError as ex:
524 except OSError as ex:
524 if ex.errno != errno.EEXIST:
525 if ex.errno != errno.EEXIST:
525 raise
526 raise
526
527
527 for path in missingdirs:
528 for path in missingdirs:
528 setstickygroupdir(path, gid, ui.warn)
529 setstickygroupdir(path, gid, ui.warn)
529 finally:
530 finally:
530 os.umask(oldumask)
531 os.umask(oldumask)
531
532
532
533
533 def getusername(ui):
534 def getusername(ui):
534 try:
535 try:
535 return stringutil.shortuser(ui.username())
536 return stringutil.shortuser(ui.username())
536 except Exception:
537 except Exception:
537 return b'unknown'
538 return b'unknown'
538
539
539
540
540 def getreponame(ui):
541 def getreponame(ui):
541 reponame = ui.config(b'paths', b'default')
542 reponame = ui.config(b'paths', b'default')
542 if reponame:
543 if reponame:
543 return os.path.basename(reponame)
544 return os.path.basename(reponame)
544 return b"unknown"
545 return b"unknown"
General Comments 0
You need to be logged in to leave comments. Login now