##// END OF EJS Templates
shallowutil: dedent code after the previous change...
Augie Fackler -
r48340:e2888ebb default draft
parent child Browse files
Show More
@@ -1,545 +1,544 b''
1 # shallowutil.py -- remotefilelog utilities
1 # shallowutil.py -- remotefilelog utilities
2 #
2 #
3 # Copyright 2014 Facebook, Inc.
3 # Copyright 2014 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from __future__ import absolute_import
7 from __future__ import absolute_import
8
8
9 import collections
9 import collections
10 import errno
10 import errno
11 import os
11 import os
12 import stat
12 import stat
13 import struct
13 import struct
14 import tempfile
14 import tempfile
15
15
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial.pycompat import open
17 from mercurial.pycompat import open
18 from mercurial.node import hex
18 from mercurial.node import hex
19 from mercurial import (
19 from mercurial import (
20 error,
20 error,
21 pycompat,
21 pycompat,
22 revlog,
22 revlog,
23 util,
23 util,
24 )
24 )
25 from mercurial.utils import (
25 from mercurial.utils import (
26 hashutil,
26 hashutil,
27 storageutil,
27 storageutil,
28 stringutil,
28 stringutil,
29 )
29 )
30 from . import constants
30 from . import constants
31
31
32 if not pycompat.iswindows:
32 if not pycompat.iswindows:
33 import grp
33 import grp
34
34
35
35
36 def isenabled(repo):
36 def isenabled(repo):
37 """returns whether the repository is remotefilelog enabled or not"""
37 """returns whether the repository is remotefilelog enabled or not"""
38 return constants.SHALLOWREPO_REQUIREMENT in repo.requirements
38 return constants.SHALLOWREPO_REQUIREMENT in repo.requirements
39
39
40
40
41 def getcachekey(reponame, file, id):
41 def getcachekey(reponame, file, id):
42 pathhash = hex(hashutil.sha1(file).digest())
42 pathhash = hex(hashutil.sha1(file).digest())
43 return os.path.join(reponame, pathhash[:2], pathhash[2:], id)
43 return os.path.join(reponame, pathhash[:2], pathhash[2:], id)
44
44
45
45
46 def getlocalkey(file, id):
46 def getlocalkey(file, id):
47 pathhash = hex(hashutil.sha1(file).digest())
47 pathhash = hex(hashutil.sha1(file).digest())
48 return os.path.join(pathhash, id)
48 return os.path.join(pathhash, id)
49
49
50
50
51 def getcachepath(ui, allowempty=False):
51 def getcachepath(ui, allowempty=False):
52 cachepath = ui.config(b"remotefilelog", b"cachepath")
52 cachepath = ui.config(b"remotefilelog", b"cachepath")
53 if not cachepath:
53 if not cachepath:
54 if allowempty:
54 if allowempty:
55 return None
55 return None
56 else:
56 else:
57 raise error.Abort(
57 raise error.Abort(
58 _(b"could not find config option remotefilelog.cachepath")
58 _(b"could not find config option remotefilelog.cachepath")
59 )
59 )
60 return util.expandpath(cachepath)
60 return util.expandpath(cachepath)
61
61
62
62
63 def getcachepackpath(repo, category):
63 def getcachepackpath(repo, category):
64 cachepath = getcachepath(repo.ui)
64 cachepath = getcachepath(repo.ui)
65 if category != constants.FILEPACK_CATEGORY:
65 if category != constants.FILEPACK_CATEGORY:
66 return os.path.join(cachepath, repo.name, b'packs', category)
66 return os.path.join(cachepath, repo.name, b'packs', category)
67 else:
67 else:
68 return os.path.join(cachepath, repo.name, b'packs')
68 return os.path.join(cachepath, repo.name, b'packs')
69
69
70
70
71 def getlocalpackpath(base, category):
71 def getlocalpackpath(base, category):
72 return os.path.join(base, b'packs', category)
72 return os.path.join(base, b'packs', category)
73
73
74
74
75 def createrevlogtext(text, copyfrom=None, copyrev=None):
75 def createrevlogtext(text, copyfrom=None, copyrev=None):
76 """returns a string that matches the revlog contents in a
76 """returns a string that matches the revlog contents in a
77 traditional revlog
77 traditional revlog
78 """
78 """
79 meta = {}
79 meta = {}
80 if copyfrom or text.startswith(b'\1\n'):
80 if copyfrom or text.startswith(b'\1\n'):
81 if copyfrom:
81 if copyfrom:
82 meta[b'copy'] = copyfrom
82 meta[b'copy'] = copyfrom
83 meta[b'copyrev'] = copyrev
83 meta[b'copyrev'] = copyrev
84 text = storageutil.packmeta(meta, text)
84 text = storageutil.packmeta(meta, text)
85
85
86 return text
86 return text
87
87
88
88
89 def parsemeta(text):
89 def parsemeta(text):
90 """parse mercurial filelog metadata"""
90 """parse mercurial filelog metadata"""
91 meta, size = storageutil.parsemeta(text)
91 meta, size = storageutil.parsemeta(text)
92 if text.startswith(b'\1\n'):
92 if text.startswith(b'\1\n'):
93 s = text.index(b'\1\n', 2)
93 s = text.index(b'\1\n', 2)
94 text = text[s + 2 :]
94 text = text[s + 2 :]
95 return meta or {}, text
95 return meta or {}, text
96
96
97
97
98 def sumdicts(*dicts):
98 def sumdicts(*dicts):
99 """Adds all the values of *dicts together into one dictionary. This assumes
99 """Adds all the values of *dicts together into one dictionary. This assumes
100 the values in *dicts are all summable.
100 the values in *dicts are all summable.
101
101
102 e.g. [{'a': 4', 'b': 2}, {'b': 3, 'c': 1}] -> {'a': 4, 'b': 5, 'c': 1}
102 e.g. [{'a': 4', 'b': 2}, {'b': 3, 'c': 1}] -> {'a': 4, 'b': 5, 'c': 1}
103 """
103 """
104 result = collections.defaultdict(lambda: 0)
104 result = collections.defaultdict(lambda: 0)
105 for dict in dicts:
105 for dict in dicts:
106 for k, v in pycompat.iteritems(dict):
106 for k, v in pycompat.iteritems(dict):
107 result[k] += v
107 result[k] += v
108 return result
108 return result
109
109
110
110
111 def prefixkeys(dict, prefix):
111 def prefixkeys(dict, prefix):
112 """Returns ``dict`` with ``prefix`` prepended to all its keys."""
112 """Returns ``dict`` with ``prefix`` prepended to all its keys."""
113 result = {}
113 result = {}
114 for k, v in pycompat.iteritems(dict):
114 for k, v in pycompat.iteritems(dict):
115 result[prefix + k] = v
115 result[prefix + k] = v
116 return result
116 return result
117
117
118
118
119 def reportpackmetrics(ui, prefix, *stores):
119 def reportpackmetrics(ui, prefix, *stores):
120 dicts = [s.getmetrics() for s in stores]
120 dicts = [s.getmetrics() for s in stores]
121 dict = prefixkeys(sumdicts(*dicts), prefix + b'_')
121 dict = prefixkeys(sumdicts(*dicts), prefix + b'_')
122 ui.log(prefix + b"_packsizes", b"\n", **pycompat.strkwargs(dict))
122 ui.log(prefix + b"_packsizes", b"\n", **pycompat.strkwargs(dict))
123
123
124
124
125 def _parsepackmeta(metabuf):
125 def _parsepackmeta(metabuf):
126 """parse datapack meta, bytes (<metadata-list>) -> dict
126 """parse datapack meta, bytes (<metadata-list>) -> dict
127
127
128 The dict contains raw content - both keys and values are strings.
128 The dict contains raw content - both keys and values are strings.
129 Upper-level business may want to convert some of them to other types like
129 Upper-level business may want to convert some of them to other types like
130 integers, on their own.
130 integers, on their own.
131
131
132 raise ValueError if the data is corrupted
132 raise ValueError if the data is corrupted
133 """
133 """
134 metadict = {}
134 metadict = {}
135 offset = 0
135 offset = 0
136 buflen = len(metabuf)
136 buflen = len(metabuf)
137 while buflen - offset >= 3:
137 while buflen - offset >= 3:
138 key = metabuf[offset : offset + 1]
138 key = metabuf[offset : offset + 1]
139 offset += 1
139 offset += 1
140 metalen = struct.unpack_from(b'!H', metabuf, offset)[0]
140 metalen = struct.unpack_from(b'!H', metabuf, offset)[0]
141 offset += 2
141 offset += 2
142 if offset + metalen > buflen:
142 if offset + metalen > buflen:
143 raise ValueError(b'corrupted metadata: incomplete buffer')
143 raise ValueError(b'corrupted metadata: incomplete buffer')
144 value = metabuf[offset : offset + metalen]
144 value = metabuf[offset : offset + metalen]
145 metadict[key] = value
145 metadict[key] = value
146 offset += metalen
146 offset += metalen
147 if offset != buflen:
147 if offset != buflen:
148 raise ValueError(b'corrupted metadata: redundant data')
148 raise ValueError(b'corrupted metadata: redundant data')
149 return metadict
149 return metadict
150
150
151
151
152 def _buildpackmeta(metadict):
152 def _buildpackmeta(metadict):
153 """reverse of _parsepackmeta, dict -> bytes (<metadata-list>)
153 """reverse of _parsepackmeta, dict -> bytes (<metadata-list>)
154
154
155 The dict contains raw content - both keys and values are strings.
155 The dict contains raw content - both keys and values are strings.
156 Upper-level business may want to serialize some of other types (like
156 Upper-level business may want to serialize some of other types (like
157 integers) to strings before calling this function.
157 integers) to strings before calling this function.
158
158
159 raise ProgrammingError when metadata key is illegal, or ValueError if
159 raise ProgrammingError when metadata key is illegal, or ValueError if
160 length limit is exceeded
160 length limit is exceeded
161 """
161 """
162 metabuf = b''
162 metabuf = b''
163 for k, v in sorted(pycompat.iteritems((metadict or {}))):
163 for k, v in sorted(pycompat.iteritems((metadict or {}))):
164 if len(k) != 1:
164 if len(k) != 1:
165 raise error.ProgrammingError(b'packmeta: illegal key: %s' % k)
165 raise error.ProgrammingError(b'packmeta: illegal key: %s' % k)
166 if len(v) > 0xFFFE:
166 if len(v) > 0xFFFE:
167 raise ValueError(
167 raise ValueError(
168 b'metadata value is too long: 0x%x > 0xfffe' % len(v)
168 b'metadata value is too long: 0x%x > 0xfffe' % len(v)
169 )
169 )
170 metabuf += k
170 metabuf += k
171 metabuf += struct.pack(b'!H', len(v))
171 metabuf += struct.pack(b'!H', len(v))
172 metabuf += v
172 metabuf += v
173 # len(metabuf) is guaranteed representable in 4 bytes, because there are
173 # len(metabuf) is guaranteed representable in 4 bytes, because there are
174 # only 256 keys, and for each value, len(value) <= 0xfffe.
174 # only 256 keys, and for each value, len(value) <= 0xfffe.
175 return metabuf
175 return metabuf
176
176
177
177
178 _metaitemtypes = {
178 _metaitemtypes = {
179 constants.METAKEYFLAG: (int, pycompat.long),
179 constants.METAKEYFLAG: (int, pycompat.long),
180 constants.METAKEYSIZE: (int, pycompat.long),
180 constants.METAKEYSIZE: (int, pycompat.long),
181 }
181 }
182
182
183
183
184 def buildpackmeta(metadict):
184 def buildpackmeta(metadict):
185 """like _buildpackmeta, but typechecks metadict and normalize it.
185 """like _buildpackmeta, but typechecks metadict and normalize it.
186
186
187 This means, METAKEYSIZE and METAKEYSIZE should have integers as values,
187 This means, METAKEYSIZE and METAKEYSIZE should have integers as values,
188 and METAKEYFLAG will be dropped if its value is 0.
188 and METAKEYFLAG will be dropped if its value is 0.
189 """
189 """
190 newmeta = {}
190 newmeta = {}
191 for k, v in pycompat.iteritems(metadict or {}):
191 for k, v in pycompat.iteritems(metadict or {}):
192 expectedtype = _metaitemtypes.get(k, (bytes,))
192 expectedtype = _metaitemtypes.get(k, (bytes,))
193 if not isinstance(v, expectedtype):
193 if not isinstance(v, expectedtype):
194 raise error.ProgrammingError(b'packmeta: wrong type of key %s' % k)
194 raise error.ProgrammingError(b'packmeta: wrong type of key %s' % k)
195 # normalize int to binary buffer
195 # normalize int to binary buffer
196 if int in expectedtype:
196 if int in expectedtype:
197 # optimization: remove flag if it's 0 to save space
197 # optimization: remove flag if it's 0 to save space
198 if k == constants.METAKEYFLAG and v == 0:
198 if k == constants.METAKEYFLAG and v == 0:
199 continue
199 continue
200 v = int2bin(v)
200 v = int2bin(v)
201 newmeta[k] = v
201 newmeta[k] = v
202 return _buildpackmeta(newmeta)
202 return _buildpackmeta(newmeta)
203
203
204
204
205 def parsepackmeta(metabuf):
205 def parsepackmeta(metabuf):
206 """like _parsepackmeta, but convert fields to desired types automatically.
206 """like _parsepackmeta, but convert fields to desired types automatically.
207
207
208 This means, METAKEYFLAG and METAKEYSIZE fields will be converted to
208 This means, METAKEYFLAG and METAKEYSIZE fields will be converted to
209 integers.
209 integers.
210 """
210 """
211 metadict = _parsepackmeta(metabuf)
211 metadict = _parsepackmeta(metabuf)
212 for k, v in pycompat.iteritems(metadict):
212 for k, v in pycompat.iteritems(metadict):
213 if k in _metaitemtypes and int in _metaitemtypes[k]:
213 if k in _metaitemtypes and int in _metaitemtypes[k]:
214 metadict[k] = bin2int(v)
214 metadict[k] = bin2int(v)
215 return metadict
215 return metadict
216
216
217
217
218 def int2bin(n):
218 def int2bin(n):
219 """convert a non-negative integer to raw binary buffer"""
219 """convert a non-negative integer to raw binary buffer"""
220 buf = bytearray()
220 buf = bytearray()
221 while n > 0:
221 while n > 0:
222 buf.insert(0, n & 0xFF)
222 buf.insert(0, n & 0xFF)
223 n >>= 8
223 n >>= 8
224 return bytes(buf)
224 return bytes(buf)
225
225
226
226
227 def bin2int(buf):
227 def bin2int(buf):
228 """the reverse of int2bin, convert a binary buffer to an integer"""
228 """the reverse of int2bin, convert a binary buffer to an integer"""
229 x = 0
229 x = 0
230 for b in bytearray(buf):
230 for b in bytearray(buf):
231 x <<= 8
231 x <<= 8
232 x |= b
232 x |= b
233 return x
233 return x
234
234
235
235
236 class BadRemotefilelogHeader(error.StorageError):
236 class BadRemotefilelogHeader(error.StorageError):
237 """Exception raised when parsing a remotefilelog blob header fails."""
237 """Exception raised when parsing a remotefilelog blob header fails."""
238
238
239
239
240 def parsesizeflags(raw):
240 def parsesizeflags(raw):
241 """given a remotefilelog blob, return (headersize, rawtextsize, flags)
241 """given a remotefilelog blob, return (headersize, rawtextsize, flags)
242
242
243 see remotefilelogserver.createfileblob for the format.
243 see remotefilelogserver.createfileblob for the format.
244 raise RuntimeError if the content is illformed.
244 raise RuntimeError if the content is illformed.
245 """
245 """
246 flags = revlog.REVIDX_DEFAULT_FLAGS
246 flags = revlog.REVIDX_DEFAULT_FLAGS
247 size = None
247 size = None
248 try:
248 try:
249 index = raw.index(b'\0')
249 index = raw.index(b'\0')
250 except ValueError:
250 except ValueError:
251 raise BadRemotefilelogHeader(
251 raise BadRemotefilelogHeader(
252 "unexpected remotefilelog header: illegal format"
252 "unexpected remotefilelog header: illegal format"
253 )
253 )
254 if True:
254 header = raw[:index]
255 header = raw[:index]
255 if header.startswith(b'v'):
256 if header.startswith(b'v'):
256 # v1 and above, header starts with 'v'
257 # v1 and above, header starts with 'v'
257 if header.startswith(b'v1\n'):
258 if header.startswith(b'v1\n'):
258 for s in header.split(b'\n'):
259 for s in header.split(b'\n'):
259 if s.startswith(constants.METAKEYSIZE):
260 if s.startswith(constants.METAKEYSIZE):
260 size = int(s[len(constants.METAKEYSIZE) :])
261 size = int(s[len(constants.METAKEYSIZE) :])
261 elif s.startswith(constants.METAKEYFLAG):
262 elif s.startswith(constants.METAKEYFLAG):
262 flags = int(s[len(constants.METAKEYFLAG) :])
263 flags = int(s[len(constants.METAKEYFLAG) :])
264 else:
265 raise BadRemotefilelogHeader(
266 b'unsupported remotefilelog header: %s' % header
267 )
268 else:
263 else:
269 # v0, str(int(size)) is the header
264 raise BadRemotefilelogHeader(
270 size = int(header)
265 b'unsupported remotefilelog header: %s' % header
266 )
267 else:
268 # v0, str(int(size)) is the header
269 size = int(header)
271 if size is None:
270 if size is None:
272 raise BadRemotefilelogHeader(
271 raise BadRemotefilelogHeader(
273 "unexpected remotefilelog header: no size found"
272 "unexpected remotefilelog header: no size found"
274 )
273 )
275 return index + 1, size, flags
274 return index + 1, size, flags
276
275
277
276
278 def buildfileblobheader(size, flags, version=None):
277 def buildfileblobheader(size, flags, version=None):
279 """return the header of a remotefilelog blob.
278 """return the header of a remotefilelog blob.
280
279
281 see remotefilelogserver.createfileblob for the format.
280 see remotefilelogserver.createfileblob for the format.
282 approximately the reverse of parsesizeflags.
281 approximately the reverse of parsesizeflags.
283
282
284 version could be 0 or 1, or None (auto decide).
283 version could be 0 or 1, or None (auto decide).
285 """
284 """
286 # choose v0 if flags is empty, otherwise v1
285 # choose v0 if flags is empty, otherwise v1
287 if version is None:
286 if version is None:
288 version = int(bool(flags))
287 version = int(bool(flags))
289 if version == 1:
288 if version == 1:
290 header = b'v1\n%s%d\n%s%d' % (
289 header = b'v1\n%s%d\n%s%d' % (
291 constants.METAKEYSIZE,
290 constants.METAKEYSIZE,
292 size,
291 size,
293 constants.METAKEYFLAG,
292 constants.METAKEYFLAG,
294 flags,
293 flags,
295 )
294 )
296 elif version == 0:
295 elif version == 0:
297 if flags:
296 if flags:
298 raise error.ProgrammingError(b'fileblob v0 does not support flag')
297 raise error.ProgrammingError(b'fileblob v0 does not support flag')
299 header = b'%d' % size
298 header = b'%d' % size
300 else:
299 else:
301 raise error.ProgrammingError(b'unknown fileblob version %d' % version)
300 raise error.ProgrammingError(b'unknown fileblob version %d' % version)
302 return header
301 return header
303
302
304
303
305 def ancestormap(raw):
304 def ancestormap(raw):
306 offset, size, flags = parsesizeflags(raw)
305 offset, size, flags = parsesizeflags(raw)
307 start = offset + size
306 start = offset + size
308
307
309 mapping = {}
308 mapping = {}
310 while start < len(raw):
309 while start < len(raw):
311 divider = raw.index(b'\0', start + 80)
310 divider = raw.index(b'\0', start + 80)
312
311
313 currentnode = raw[start : (start + 20)]
312 currentnode = raw[start : (start + 20)]
314 p1 = raw[(start + 20) : (start + 40)]
313 p1 = raw[(start + 20) : (start + 40)]
315 p2 = raw[(start + 40) : (start + 60)]
314 p2 = raw[(start + 40) : (start + 60)]
316 linknode = raw[(start + 60) : (start + 80)]
315 linknode = raw[(start + 60) : (start + 80)]
317 copyfrom = raw[(start + 80) : divider]
316 copyfrom = raw[(start + 80) : divider]
318
317
319 mapping[currentnode] = (p1, p2, linknode, copyfrom)
318 mapping[currentnode] = (p1, p2, linknode, copyfrom)
320 start = divider + 1
319 start = divider + 1
321
320
322 return mapping
321 return mapping
323
322
324
323
325 def readfile(path):
324 def readfile(path):
326 f = open(path, b'rb')
325 f = open(path, b'rb')
327 try:
326 try:
328 result = f.read()
327 result = f.read()
329
328
330 # we should never have empty files
329 # we should never have empty files
331 if not result:
330 if not result:
332 os.remove(path)
331 os.remove(path)
333 raise IOError(b"empty file: %s" % path)
332 raise IOError(b"empty file: %s" % path)
334
333
335 return result
334 return result
336 finally:
335 finally:
337 f.close()
336 f.close()
338
337
339
338
340 def unlinkfile(filepath):
339 def unlinkfile(filepath):
341 if pycompat.iswindows:
340 if pycompat.iswindows:
342 # On Windows, os.unlink cannnot delete readonly files
341 # On Windows, os.unlink cannnot delete readonly files
343 os.chmod(filepath, stat.S_IWUSR)
342 os.chmod(filepath, stat.S_IWUSR)
344 os.unlink(filepath)
343 os.unlink(filepath)
345
344
346
345
347 def renamefile(source, destination):
346 def renamefile(source, destination):
348 if pycompat.iswindows:
347 if pycompat.iswindows:
349 # On Windows, os.rename cannot rename readonly files
348 # On Windows, os.rename cannot rename readonly files
350 # and cannot overwrite destination if it exists
349 # and cannot overwrite destination if it exists
351 os.chmod(source, stat.S_IWUSR)
350 os.chmod(source, stat.S_IWUSR)
352 if os.path.isfile(destination):
351 if os.path.isfile(destination):
353 os.chmod(destination, stat.S_IWUSR)
352 os.chmod(destination, stat.S_IWUSR)
354 os.unlink(destination)
353 os.unlink(destination)
355
354
356 os.rename(source, destination)
355 os.rename(source, destination)
357
356
358
357
359 def writefile(path, content, readonly=False):
358 def writefile(path, content, readonly=False):
360 dirname, filename = os.path.split(path)
359 dirname, filename = os.path.split(path)
361 if not os.path.exists(dirname):
360 if not os.path.exists(dirname):
362 try:
361 try:
363 os.makedirs(dirname)
362 os.makedirs(dirname)
364 except OSError as ex:
363 except OSError as ex:
365 if ex.errno != errno.EEXIST:
364 if ex.errno != errno.EEXIST:
366 raise
365 raise
367
366
368 fd, temp = tempfile.mkstemp(prefix=b'.%s-' % filename, dir=dirname)
367 fd, temp = tempfile.mkstemp(prefix=b'.%s-' % filename, dir=dirname)
369 os.close(fd)
368 os.close(fd)
370
369
371 try:
370 try:
372 f = util.posixfile(temp, b'wb')
371 f = util.posixfile(temp, b'wb')
373 f.write(content)
372 f.write(content)
374 f.close()
373 f.close()
375
374
376 if readonly:
375 if readonly:
377 mode = 0o444
376 mode = 0o444
378 else:
377 else:
379 # tempfiles are created with 0o600, so we need to manually set the
378 # tempfiles are created with 0o600, so we need to manually set the
380 # mode.
379 # mode.
381 oldumask = os.umask(0)
380 oldumask = os.umask(0)
382 # there's no way to get the umask without modifying it, so set it
381 # there's no way to get the umask without modifying it, so set it
383 # back
382 # back
384 os.umask(oldumask)
383 os.umask(oldumask)
385 mode = ~oldumask
384 mode = ~oldumask
386
385
387 renamefile(temp, path)
386 renamefile(temp, path)
388 os.chmod(path, mode)
387 os.chmod(path, mode)
389 except Exception:
388 except Exception:
390 try:
389 try:
391 unlinkfile(temp)
390 unlinkfile(temp)
392 except OSError:
391 except OSError:
393 pass
392 pass
394 raise
393 raise
395
394
396
395
397 def sortnodes(nodes, parentfunc):
396 def sortnodes(nodes, parentfunc):
398 """Topologically sorts the nodes, using the parentfunc to find
397 """Topologically sorts the nodes, using the parentfunc to find
399 the parents of nodes."""
398 the parents of nodes."""
400 nodes = set(nodes)
399 nodes = set(nodes)
401 childmap = {}
400 childmap = {}
402 parentmap = {}
401 parentmap = {}
403 roots = []
402 roots = []
404
403
405 # Build a child and parent map
404 # Build a child and parent map
406 for n in nodes:
405 for n in nodes:
407 parents = [p for p in parentfunc(n) if p in nodes]
406 parents = [p for p in parentfunc(n) if p in nodes]
408 parentmap[n] = set(parents)
407 parentmap[n] = set(parents)
409 for p in parents:
408 for p in parents:
410 childmap.setdefault(p, set()).add(n)
409 childmap.setdefault(p, set()).add(n)
411 if not parents:
410 if not parents:
412 roots.append(n)
411 roots.append(n)
413
412
414 roots.sort()
413 roots.sort()
415 # Process roots, adding children to the queue as they become roots
414 # Process roots, adding children to the queue as they become roots
416 results = []
415 results = []
417 while roots:
416 while roots:
418 n = roots.pop(0)
417 n = roots.pop(0)
419 results.append(n)
418 results.append(n)
420 if n in childmap:
419 if n in childmap:
421 children = childmap[n]
420 children = childmap[n]
422 for c in children:
421 for c in children:
423 childparents = parentmap[c]
422 childparents = parentmap[c]
424 childparents.remove(n)
423 childparents.remove(n)
425 if len(childparents) == 0:
424 if len(childparents) == 0:
426 # insert at the beginning, that way child nodes
425 # insert at the beginning, that way child nodes
427 # are likely to be output immediately after their
426 # are likely to be output immediately after their
428 # parents. This gives better compression results.
427 # parents. This gives better compression results.
429 roots.insert(0, c)
428 roots.insert(0, c)
430
429
431 return results
430 return results
432
431
433
432
434 def readexactly(stream, n):
433 def readexactly(stream, n):
435 '''read n bytes from stream.read and abort if less was available'''
434 '''read n bytes from stream.read and abort if less was available'''
436 s = stream.read(n)
435 s = stream.read(n)
437 if len(s) < n:
436 if len(s) < n:
438 raise error.Abort(
437 raise error.Abort(
439 _(b"stream ended unexpectedly (got %d bytes, expected %d)")
438 _(b"stream ended unexpectedly (got %d bytes, expected %d)")
440 % (len(s), n)
439 % (len(s), n)
441 )
440 )
442 return s
441 return s
443
442
444
443
445 def readunpack(stream, fmt):
444 def readunpack(stream, fmt):
446 data = readexactly(stream, struct.calcsize(fmt))
445 data = readexactly(stream, struct.calcsize(fmt))
447 return struct.unpack(fmt, data)
446 return struct.unpack(fmt, data)
448
447
449
448
450 def readpath(stream):
449 def readpath(stream):
451 rawlen = readexactly(stream, constants.FILENAMESIZE)
450 rawlen = readexactly(stream, constants.FILENAMESIZE)
452 pathlen = struct.unpack(constants.FILENAMESTRUCT, rawlen)[0]
451 pathlen = struct.unpack(constants.FILENAMESTRUCT, rawlen)[0]
453 return readexactly(stream, pathlen)
452 return readexactly(stream, pathlen)
454
453
455
454
456 def readnodelist(stream):
455 def readnodelist(stream):
457 rawlen = readexactly(stream, constants.NODECOUNTSIZE)
456 rawlen = readexactly(stream, constants.NODECOUNTSIZE)
458 nodecount = struct.unpack(constants.NODECOUNTSTRUCT, rawlen)[0]
457 nodecount = struct.unpack(constants.NODECOUNTSTRUCT, rawlen)[0]
459 for i in pycompat.xrange(nodecount):
458 for i in pycompat.xrange(nodecount):
460 yield readexactly(stream, constants.NODESIZE)
459 yield readexactly(stream, constants.NODESIZE)
461
460
462
461
463 def readpathlist(stream):
462 def readpathlist(stream):
464 rawlen = readexactly(stream, constants.PATHCOUNTSIZE)
463 rawlen = readexactly(stream, constants.PATHCOUNTSIZE)
465 pathcount = struct.unpack(constants.PATHCOUNTSTRUCT, rawlen)[0]
464 pathcount = struct.unpack(constants.PATHCOUNTSTRUCT, rawlen)[0]
466 for i in pycompat.xrange(pathcount):
465 for i in pycompat.xrange(pathcount):
467 yield readpath(stream)
466 yield readpath(stream)
468
467
469
468
470 def getgid(groupname):
469 def getgid(groupname):
471 try:
470 try:
472 gid = grp.getgrnam(pycompat.fsdecode(groupname)).gr_gid
471 gid = grp.getgrnam(pycompat.fsdecode(groupname)).gr_gid
473 return gid
472 return gid
474 except KeyError:
473 except KeyError:
475 return None
474 return None
476
475
477
476
478 def setstickygroupdir(path, gid, warn=None):
477 def setstickygroupdir(path, gid, warn=None):
479 if gid is None:
478 if gid is None:
480 return
479 return
481 try:
480 try:
482 os.chown(path, -1, gid)
481 os.chown(path, -1, gid)
483 os.chmod(path, 0o2775)
482 os.chmod(path, 0o2775)
484 except (IOError, OSError) as ex:
483 except (IOError, OSError) as ex:
485 if warn:
484 if warn:
486 warn(_(b'unable to chown/chmod on %s: %s\n') % (path, ex))
485 warn(_(b'unable to chown/chmod on %s: %s\n') % (path, ex))
487
486
488
487
489 def mkstickygroupdir(ui, path):
488 def mkstickygroupdir(ui, path):
490 """Creates the given directory (if it doesn't exist) and give it a
489 """Creates the given directory (if it doesn't exist) and give it a
491 particular group with setgid enabled."""
490 particular group with setgid enabled."""
492 gid = None
491 gid = None
493 groupname = ui.config(b"remotefilelog", b"cachegroup")
492 groupname = ui.config(b"remotefilelog", b"cachegroup")
494 if groupname:
493 if groupname:
495 gid = getgid(groupname)
494 gid = getgid(groupname)
496 if gid is None:
495 if gid is None:
497 ui.warn(_(b'unable to resolve group name: %s\n') % groupname)
496 ui.warn(_(b'unable to resolve group name: %s\n') % groupname)
498
497
499 # we use a single stat syscall to test the existence and mode / group bit
498 # we use a single stat syscall to test the existence and mode / group bit
500 st = None
499 st = None
501 try:
500 try:
502 st = os.stat(path)
501 st = os.stat(path)
503 except OSError:
502 except OSError:
504 pass
503 pass
505
504
506 if st:
505 if st:
507 # exists
506 # exists
508 if (st.st_mode & 0o2775) != 0o2775 or st.st_gid != gid:
507 if (st.st_mode & 0o2775) != 0o2775 or st.st_gid != gid:
509 # permission needs to be fixed
508 # permission needs to be fixed
510 setstickygroupdir(path, gid, ui.warn)
509 setstickygroupdir(path, gid, ui.warn)
511 return
510 return
512
511
513 oldumask = os.umask(0o002)
512 oldumask = os.umask(0o002)
514 try:
513 try:
515 missingdirs = [path]
514 missingdirs = [path]
516 path = os.path.dirname(path)
515 path = os.path.dirname(path)
517 while path and not os.path.exists(path):
516 while path and not os.path.exists(path):
518 missingdirs.append(path)
517 missingdirs.append(path)
519 path = os.path.dirname(path)
518 path = os.path.dirname(path)
520
519
521 for path in reversed(missingdirs):
520 for path in reversed(missingdirs):
522 try:
521 try:
523 os.mkdir(path)
522 os.mkdir(path)
524 except OSError as ex:
523 except OSError as ex:
525 if ex.errno != errno.EEXIST:
524 if ex.errno != errno.EEXIST:
526 raise
525 raise
527
526
528 for path in missingdirs:
527 for path in missingdirs:
529 setstickygroupdir(path, gid, ui.warn)
528 setstickygroupdir(path, gid, ui.warn)
530 finally:
529 finally:
531 os.umask(oldumask)
530 os.umask(oldumask)
532
531
533
532
534 def getusername(ui):
533 def getusername(ui):
535 try:
534 try:
536 return stringutil.shortuser(ui.username())
535 return stringutil.shortuser(ui.username())
537 except Exception:
536 except Exception:
538 return b'unknown'
537 return b'unknown'
539
538
540
539
541 def getreponame(ui):
540 def getreponame(ui):
542 reponame = ui.config(b'paths', b'default')
541 reponame = ui.config(b'paths', b'default')
543 if reponame:
542 if reponame:
544 return os.path.basename(reponame)
543 return os.path.basename(reponame)
545 return b"unknown"
544 return b"unknown"
General Comments 0
You need to be logged in to leave comments. Login now