##// END OF EJS Templates
remotefilelog: remove pycompat.iteritems()...
Gregory Szorc -
r49773:0fe00349 default
parent child Browse files
Show More
@@ -1,543 +1,543 b''
1 # shallowutil.py -- remotefilelog utilities
1 # shallowutil.py -- remotefilelog utilities
2 #
2 #
3 # Copyright 2014 Facebook, Inc.
3 # Copyright 2014 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import collections
8 import collections
9 import errno
9 import errno
10 import os
10 import os
11 import stat
11 import stat
12 import struct
12 import struct
13 import tempfile
13 import tempfile
14
14
15 from mercurial.i18n import _
15 from mercurial.i18n import _
16 from mercurial.pycompat import open
16 from mercurial.pycompat import open
17 from mercurial.node import hex
17 from mercurial.node import hex
18 from mercurial import (
18 from mercurial import (
19 error,
19 error,
20 pycompat,
20 pycompat,
21 revlog,
21 revlog,
22 util,
22 util,
23 )
23 )
24 from mercurial.utils import (
24 from mercurial.utils import (
25 hashutil,
25 hashutil,
26 storageutil,
26 storageutil,
27 stringutil,
27 stringutil,
28 )
28 )
29 from . import constants
29 from . import constants
30
30
31 if not pycompat.iswindows:
31 if not pycompat.iswindows:
32 import grp
32 import grp
33
33
34
34
35 def isenabled(repo):
35 def isenabled(repo):
36 """returns whether the repository is remotefilelog enabled or not"""
36 """returns whether the repository is remotefilelog enabled or not"""
37 return constants.SHALLOWREPO_REQUIREMENT in repo.requirements
37 return constants.SHALLOWREPO_REQUIREMENT in repo.requirements
38
38
39
39
40 def getcachekey(reponame, file, id):
40 def getcachekey(reponame, file, id):
41 pathhash = hex(hashutil.sha1(file).digest())
41 pathhash = hex(hashutil.sha1(file).digest())
42 return os.path.join(reponame, pathhash[:2], pathhash[2:], id)
42 return os.path.join(reponame, pathhash[:2], pathhash[2:], id)
43
43
44
44
45 def getlocalkey(file, id):
45 def getlocalkey(file, id):
46 pathhash = hex(hashutil.sha1(file).digest())
46 pathhash = hex(hashutil.sha1(file).digest())
47 return os.path.join(pathhash, id)
47 return os.path.join(pathhash, id)
48
48
49
49
50 def getcachepath(ui, allowempty=False):
50 def getcachepath(ui, allowempty=False):
51 cachepath = ui.config(b"remotefilelog", b"cachepath")
51 cachepath = ui.config(b"remotefilelog", b"cachepath")
52 if not cachepath:
52 if not cachepath:
53 if allowempty:
53 if allowempty:
54 return None
54 return None
55 else:
55 else:
56 raise error.Abort(
56 raise error.Abort(
57 _(b"could not find config option remotefilelog.cachepath")
57 _(b"could not find config option remotefilelog.cachepath")
58 )
58 )
59 return util.expandpath(cachepath)
59 return util.expandpath(cachepath)
60
60
61
61
62 def getcachepackpath(repo, category):
62 def getcachepackpath(repo, category):
63 cachepath = getcachepath(repo.ui)
63 cachepath = getcachepath(repo.ui)
64 if category != constants.FILEPACK_CATEGORY:
64 if category != constants.FILEPACK_CATEGORY:
65 return os.path.join(cachepath, repo.name, b'packs', category)
65 return os.path.join(cachepath, repo.name, b'packs', category)
66 else:
66 else:
67 return os.path.join(cachepath, repo.name, b'packs')
67 return os.path.join(cachepath, repo.name, b'packs')
68
68
69
69
70 def getlocalpackpath(base, category):
70 def getlocalpackpath(base, category):
71 return os.path.join(base, b'packs', category)
71 return os.path.join(base, b'packs', category)
72
72
73
73
74 def createrevlogtext(text, copyfrom=None, copyrev=None):
74 def createrevlogtext(text, copyfrom=None, copyrev=None):
75 """returns a string that matches the revlog contents in a
75 """returns a string that matches the revlog contents in a
76 traditional revlog
76 traditional revlog
77 """
77 """
78 meta = {}
78 meta = {}
79 if copyfrom or text.startswith(b'\1\n'):
79 if copyfrom or text.startswith(b'\1\n'):
80 if copyfrom:
80 if copyfrom:
81 meta[b'copy'] = copyfrom
81 meta[b'copy'] = copyfrom
82 meta[b'copyrev'] = copyrev
82 meta[b'copyrev'] = copyrev
83 text = storageutil.packmeta(meta, text)
83 text = storageutil.packmeta(meta, text)
84
84
85 return text
85 return text
86
86
87
87
88 def parsemeta(text):
88 def parsemeta(text):
89 """parse mercurial filelog metadata"""
89 """parse mercurial filelog metadata"""
90 meta, size = storageutil.parsemeta(text)
90 meta, size = storageutil.parsemeta(text)
91 if text.startswith(b'\1\n'):
91 if text.startswith(b'\1\n'):
92 s = text.index(b'\1\n', 2)
92 s = text.index(b'\1\n', 2)
93 text = text[s + 2 :]
93 text = text[s + 2 :]
94 return meta or {}, text
94 return meta or {}, text
95
95
96
96
97 def sumdicts(*dicts):
97 def sumdicts(*dicts):
98 """Adds all the values of *dicts together into one dictionary. This assumes
98 """Adds all the values of *dicts together into one dictionary. This assumes
99 the values in *dicts are all summable.
99 the values in *dicts are all summable.
100
100
101 e.g. [{'a': 4', 'b': 2}, {'b': 3, 'c': 1}] -> {'a': 4, 'b': 5, 'c': 1}
101 e.g. [{'a': 4', 'b': 2}, {'b': 3, 'c': 1}] -> {'a': 4, 'b': 5, 'c': 1}
102 """
102 """
103 result = collections.defaultdict(lambda: 0)
103 result = collections.defaultdict(lambda: 0)
104 for dict in dicts:
104 for dict in dicts:
105 for k, v in dict.items():
105 for k, v in dict.items():
106 result[k] += v
106 result[k] += v
107 return result
107 return result
108
108
109
109
110 def prefixkeys(dict, prefix):
110 def prefixkeys(dict, prefix):
111 """Returns ``dict`` with ``prefix`` prepended to all its keys."""
111 """Returns ``dict`` with ``prefix`` prepended to all its keys."""
112 result = {}
112 result = {}
113 for k, v in dict.items():
113 for k, v in dict.items():
114 result[prefix + k] = v
114 result[prefix + k] = v
115 return result
115 return result
116
116
117
117
118 def reportpackmetrics(ui, prefix, *stores):
118 def reportpackmetrics(ui, prefix, *stores):
119 dicts = [s.getmetrics() for s in stores]
119 dicts = [s.getmetrics() for s in stores]
120 dict = prefixkeys(sumdicts(*dicts), prefix + b'_')
120 dict = prefixkeys(sumdicts(*dicts), prefix + b'_')
121 ui.log(prefix + b"_packsizes", b"\n", **pycompat.strkwargs(dict))
121 ui.log(prefix + b"_packsizes", b"\n", **pycompat.strkwargs(dict))
122
122
123
123
124 def _parsepackmeta(metabuf):
124 def _parsepackmeta(metabuf):
125 """parse datapack meta, bytes (<metadata-list>) -> dict
125 """parse datapack meta, bytes (<metadata-list>) -> dict
126
126
127 The dict contains raw content - both keys and values are strings.
127 The dict contains raw content - both keys and values are strings.
128 Upper-level business may want to convert some of them to other types like
128 Upper-level business may want to convert some of them to other types like
129 integers, on their own.
129 integers, on their own.
130
130
131 raise ValueError if the data is corrupted
131 raise ValueError if the data is corrupted
132 """
132 """
133 metadict = {}
133 metadict = {}
134 offset = 0
134 offset = 0
135 buflen = len(metabuf)
135 buflen = len(metabuf)
136 while buflen - offset >= 3:
136 while buflen - offset >= 3:
137 key = metabuf[offset : offset + 1]
137 key = metabuf[offset : offset + 1]
138 offset += 1
138 offset += 1
139 metalen = struct.unpack_from(b'!H', metabuf, offset)[0]
139 metalen = struct.unpack_from(b'!H', metabuf, offset)[0]
140 offset += 2
140 offset += 2
141 if offset + metalen > buflen:
141 if offset + metalen > buflen:
142 raise ValueError(b'corrupted metadata: incomplete buffer')
142 raise ValueError(b'corrupted metadata: incomplete buffer')
143 value = metabuf[offset : offset + metalen]
143 value = metabuf[offset : offset + metalen]
144 metadict[key] = value
144 metadict[key] = value
145 offset += metalen
145 offset += metalen
146 if offset != buflen:
146 if offset != buflen:
147 raise ValueError(b'corrupted metadata: redundant data')
147 raise ValueError(b'corrupted metadata: redundant data')
148 return metadict
148 return metadict
149
149
150
150
151 def _buildpackmeta(metadict):
151 def _buildpackmeta(metadict):
152 """reverse of _parsepackmeta, dict -> bytes (<metadata-list>)
152 """reverse of _parsepackmeta, dict -> bytes (<metadata-list>)
153
153
154 The dict contains raw content - both keys and values are strings.
154 The dict contains raw content - both keys and values are strings.
155 Upper-level business may want to serialize some of other types (like
155 Upper-level business may want to serialize some of other types (like
156 integers) to strings before calling this function.
156 integers) to strings before calling this function.
157
157
158 raise ProgrammingError when metadata key is illegal, or ValueError if
158 raise ProgrammingError when metadata key is illegal, or ValueError if
159 length limit is exceeded
159 length limit is exceeded
160 """
160 """
161 metabuf = b''
161 metabuf = b''
162 for k, v in sorted(pycompat.iteritems((metadict or {}))):
162 for k, v in sorted((metadict or {}).items()):
163 if len(k) != 1:
163 if len(k) != 1:
164 raise error.ProgrammingError(b'packmeta: illegal key: %s' % k)
164 raise error.ProgrammingError(b'packmeta: illegal key: %s' % k)
165 if len(v) > 0xFFFE:
165 if len(v) > 0xFFFE:
166 raise ValueError(
166 raise ValueError(
167 b'metadata value is too long: 0x%x > 0xfffe' % len(v)
167 b'metadata value is too long: 0x%x > 0xfffe' % len(v)
168 )
168 )
169 metabuf += k
169 metabuf += k
170 metabuf += struct.pack(b'!H', len(v))
170 metabuf += struct.pack(b'!H', len(v))
171 metabuf += v
171 metabuf += v
172 # len(metabuf) is guaranteed representable in 4 bytes, because there are
172 # len(metabuf) is guaranteed representable in 4 bytes, because there are
173 # only 256 keys, and for each value, len(value) <= 0xfffe.
173 # only 256 keys, and for each value, len(value) <= 0xfffe.
174 return metabuf
174 return metabuf
175
175
176
176
177 _metaitemtypes = {
177 _metaitemtypes = {
178 constants.METAKEYFLAG: (int, pycompat.long),
178 constants.METAKEYFLAG: (int, pycompat.long),
179 constants.METAKEYSIZE: (int, pycompat.long),
179 constants.METAKEYSIZE: (int, pycompat.long),
180 }
180 }
181
181
182
182
183 def buildpackmeta(metadict):
183 def buildpackmeta(metadict):
184 """like _buildpackmeta, but typechecks metadict and normalize it.
184 """like _buildpackmeta, but typechecks metadict and normalize it.
185
185
186 This means, METAKEYSIZE and METAKEYSIZE should have integers as values,
186 This means, METAKEYSIZE and METAKEYSIZE should have integers as values,
187 and METAKEYFLAG will be dropped if its value is 0.
187 and METAKEYFLAG will be dropped if its value is 0.
188 """
188 """
189 newmeta = {}
189 newmeta = {}
190 for k, v in pycompat.iteritems(metadict or {}):
190 for k, v in (metadict or {}).items():
191 expectedtype = _metaitemtypes.get(k, (bytes,))
191 expectedtype = _metaitemtypes.get(k, (bytes,))
192 if not isinstance(v, expectedtype):
192 if not isinstance(v, expectedtype):
193 raise error.ProgrammingError(b'packmeta: wrong type of key %s' % k)
193 raise error.ProgrammingError(b'packmeta: wrong type of key %s' % k)
194 # normalize int to binary buffer
194 # normalize int to binary buffer
195 if int in expectedtype:
195 if int in expectedtype:
196 # optimization: remove flag if it's 0 to save space
196 # optimization: remove flag if it's 0 to save space
197 if k == constants.METAKEYFLAG and v == 0:
197 if k == constants.METAKEYFLAG and v == 0:
198 continue
198 continue
199 v = int2bin(v)
199 v = int2bin(v)
200 newmeta[k] = v
200 newmeta[k] = v
201 return _buildpackmeta(newmeta)
201 return _buildpackmeta(newmeta)
202
202
203
203
204 def parsepackmeta(metabuf):
204 def parsepackmeta(metabuf):
205 """like _parsepackmeta, but convert fields to desired types automatically.
205 """like _parsepackmeta, but convert fields to desired types automatically.
206
206
207 This means, METAKEYFLAG and METAKEYSIZE fields will be converted to
207 This means, METAKEYFLAG and METAKEYSIZE fields will be converted to
208 integers.
208 integers.
209 """
209 """
210 metadict = _parsepackmeta(metabuf)
210 metadict = _parsepackmeta(metabuf)
211 for k, v in metadict.items():
211 for k, v in metadict.items():
212 if k in _metaitemtypes and int in _metaitemtypes[k]:
212 if k in _metaitemtypes and int in _metaitemtypes[k]:
213 metadict[k] = bin2int(v)
213 metadict[k] = bin2int(v)
214 return metadict
214 return metadict
215
215
216
216
217 def int2bin(n):
217 def int2bin(n):
218 """convert a non-negative integer to raw binary buffer"""
218 """convert a non-negative integer to raw binary buffer"""
219 buf = bytearray()
219 buf = bytearray()
220 while n > 0:
220 while n > 0:
221 buf.insert(0, n & 0xFF)
221 buf.insert(0, n & 0xFF)
222 n >>= 8
222 n >>= 8
223 return bytes(buf)
223 return bytes(buf)
224
224
225
225
226 def bin2int(buf):
226 def bin2int(buf):
227 """the reverse of int2bin, convert a binary buffer to an integer"""
227 """the reverse of int2bin, convert a binary buffer to an integer"""
228 x = 0
228 x = 0
229 for b in bytearray(buf):
229 for b in bytearray(buf):
230 x <<= 8
230 x <<= 8
231 x |= b
231 x |= b
232 return x
232 return x
233
233
234
234
235 class BadRemotefilelogHeader(error.StorageError):
235 class BadRemotefilelogHeader(error.StorageError):
236 """Exception raised when parsing a remotefilelog blob header fails."""
236 """Exception raised when parsing a remotefilelog blob header fails."""
237
237
238
238
239 def parsesizeflags(raw):
239 def parsesizeflags(raw):
240 """given a remotefilelog blob, return (headersize, rawtextsize, flags)
240 """given a remotefilelog blob, return (headersize, rawtextsize, flags)
241
241
242 see remotefilelogserver.createfileblob for the format.
242 see remotefilelogserver.createfileblob for the format.
243 raise RuntimeError if the content is illformed.
243 raise RuntimeError if the content is illformed.
244 """
244 """
245 flags = revlog.REVIDX_DEFAULT_FLAGS
245 flags = revlog.REVIDX_DEFAULT_FLAGS
246 size = None
246 size = None
247 try:
247 try:
248 index = raw.index(b'\0')
248 index = raw.index(b'\0')
249 except ValueError:
249 except ValueError:
250 raise BadRemotefilelogHeader(
250 raise BadRemotefilelogHeader(
251 "unexpected remotefilelog header: illegal format"
251 "unexpected remotefilelog header: illegal format"
252 )
252 )
253 header = raw[:index]
253 header = raw[:index]
254 if header.startswith(b'v'):
254 if header.startswith(b'v'):
255 # v1 and above, header starts with 'v'
255 # v1 and above, header starts with 'v'
256 if header.startswith(b'v1\n'):
256 if header.startswith(b'v1\n'):
257 for s in header.split(b'\n'):
257 for s in header.split(b'\n'):
258 if s.startswith(constants.METAKEYSIZE):
258 if s.startswith(constants.METAKEYSIZE):
259 size = int(s[len(constants.METAKEYSIZE) :])
259 size = int(s[len(constants.METAKEYSIZE) :])
260 elif s.startswith(constants.METAKEYFLAG):
260 elif s.startswith(constants.METAKEYFLAG):
261 flags = int(s[len(constants.METAKEYFLAG) :])
261 flags = int(s[len(constants.METAKEYFLAG) :])
262 else:
262 else:
263 raise BadRemotefilelogHeader(
263 raise BadRemotefilelogHeader(
264 b'unsupported remotefilelog header: %s' % header
264 b'unsupported remotefilelog header: %s' % header
265 )
265 )
266 else:
266 else:
267 # v0, str(int(size)) is the header
267 # v0, str(int(size)) is the header
268 size = int(header)
268 size = int(header)
269 if size is None:
269 if size is None:
270 raise BadRemotefilelogHeader(
270 raise BadRemotefilelogHeader(
271 "unexpected remotefilelog header: no size found"
271 "unexpected remotefilelog header: no size found"
272 )
272 )
273 return index + 1, size, flags
273 return index + 1, size, flags
274
274
275
275
276 def buildfileblobheader(size, flags, version=None):
276 def buildfileblobheader(size, flags, version=None):
277 """return the header of a remotefilelog blob.
277 """return the header of a remotefilelog blob.
278
278
279 see remotefilelogserver.createfileblob for the format.
279 see remotefilelogserver.createfileblob for the format.
280 approximately the reverse of parsesizeflags.
280 approximately the reverse of parsesizeflags.
281
281
282 version could be 0 or 1, or None (auto decide).
282 version could be 0 or 1, or None (auto decide).
283 """
283 """
284 # choose v0 if flags is empty, otherwise v1
284 # choose v0 if flags is empty, otherwise v1
285 if version is None:
285 if version is None:
286 version = int(bool(flags))
286 version = int(bool(flags))
287 if version == 1:
287 if version == 1:
288 header = b'v1\n%s%d\n%s%d' % (
288 header = b'v1\n%s%d\n%s%d' % (
289 constants.METAKEYSIZE,
289 constants.METAKEYSIZE,
290 size,
290 size,
291 constants.METAKEYFLAG,
291 constants.METAKEYFLAG,
292 flags,
292 flags,
293 )
293 )
294 elif version == 0:
294 elif version == 0:
295 if flags:
295 if flags:
296 raise error.ProgrammingError(b'fileblob v0 does not support flag')
296 raise error.ProgrammingError(b'fileblob v0 does not support flag')
297 header = b'%d' % size
297 header = b'%d' % size
298 else:
298 else:
299 raise error.ProgrammingError(b'unknown fileblob version %d' % version)
299 raise error.ProgrammingError(b'unknown fileblob version %d' % version)
300 return header
300 return header
301
301
302
302
303 def ancestormap(raw):
303 def ancestormap(raw):
304 offset, size, flags = parsesizeflags(raw)
304 offset, size, flags = parsesizeflags(raw)
305 start = offset + size
305 start = offset + size
306
306
307 mapping = {}
307 mapping = {}
308 while start < len(raw):
308 while start < len(raw):
309 divider = raw.index(b'\0', start + 80)
309 divider = raw.index(b'\0', start + 80)
310
310
311 currentnode = raw[start : (start + 20)]
311 currentnode = raw[start : (start + 20)]
312 p1 = raw[(start + 20) : (start + 40)]
312 p1 = raw[(start + 20) : (start + 40)]
313 p2 = raw[(start + 40) : (start + 60)]
313 p2 = raw[(start + 40) : (start + 60)]
314 linknode = raw[(start + 60) : (start + 80)]
314 linknode = raw[(start + 60) : (start + 80)]
315 copyfrom = raw[(start + 80) : divider]
315 copyfrom = raw[(start + 80) : divider]
316
316
317 mapping[currentnode] = (p1, p2, linknode, copyfrom)
317 mapping[currentnode] = (p1, p2, linknode, copyfrom)
318 start = divider + 1
318 start = divider + 1
319
319
320 return mapping
320 return mapping
321
321
322
322
323 def readfile(path):
323 def readfile(path):
324 f = open(path, b'rb')
324 f = open(path, b'rb')
325 try:
325 try:
326 result = f.read()
326 result = f.read()
327
327
328 # we should never have empty files
328 # we should never have empty files
329 if not result:
329 if not result:
330 os.remove(path)
330 os.remove(path)
331 raise IOError(b"empty file: %s" % path)
331 raise IOError(b"empty file: %s" % path)
332
332
333 return result
333 return result
334 finally:
334 finally:
335 f.close()
335 f.close()
336
336
337
337
338 def unlinkfile(filepath):
338 def unlinkfile(filepath):
339 if pycompat.iswindows:
339 if pycompat.iswindows:
340 # On Windows, os.unlink cannnot delete readonly files
340 # On Windows, os.unlink cannnot delete readonly files
341 os.chmod(filepath, stat.S_IWUSR)
341 os.chmod(filepath, stat.S_IWUSR)
342 os.unlink(filepath)
342 os.unlink(filepath)
343
343
344
344
345 def renamefile(source, destination):
345 def renamefile(source, destination):
346 if pycompat.iswindows:
346 if pycompat.iswindows:
347 # On Windows, os.rename cannot rename readonly files
347 # On Windows, os.rename cannot rename readonly files
348 # and cannot overwrite destination if it exists
348 # and cannot overwrite destination if it exists
349 os.chmod(source, stat.S_IWUSR)
349 os.chmod(source, stat.S_IWUSR)
350 if os.path.isfile(destination):
350 if os.path.isfile(destination):
351 os.chmod(destination, stat.S_IWUSR)
351 os.chmod(destination, stat.S_IWUSR)
352 os.unlink(destination)
352 os.unlink(destination)
353
353
354 os.rename(source, destination)
354 os.rename(source, destination)
355
355
356
356
357 def writefile(path, content, readonly=False):
357 def writefile(path, content, readonly=False):
358 dirname, filename = os.path.split(path)
358 dirname, filename = os.path.split(path)
359 if not os.path.exists(dirname):
359 if not os.path.exists(dirname):
360 try:
360 try:
361 os.makedirs(dirname)
361 os.makedirs(dirname)
362 except OSError as ex:
362 except OSError as ex:
363 if ex.errno != errno.EEXIST:
363 if ex.errno != errno.EEXIST:
364 raise
364 raise
365
365
366 fd, temp = tempfile.mkstemp(prefix=b'.%s-' % filename, dir=dirname)
366 fd, temp = tempfile.mkstemp(prefix=b'.%s-' % filename, dir=dirname)
367 os.close(fd)
367 os.close(fd)
368
368
369 try:
369 try:
370 f = util.posixfile(temp, b'wb')
370 f = util.posixfile(temp, b'wb')
371 f.write(content)
371 f.write(content)
372 f.close()
372 f.close()
373
373
374 if readonly:
374 if readonly:
375 mode = 0o444
375 mode = 0o444
376 else:
376 else:
377 # tempfiles are created with 0o600, so we need to manually set the
377 # tempfiles are created with 0o600, so we need to manually set the
378 # mode.
378 # mode.
379 oldumask = os.umask(0)
379 oldumask = os.umask(0)
380 # there's no way to get the umask without modifying it, so set it
380 # there's no way to get the umask without modifying it, so set it
381 # back
381 # back
382 os.umask(oldumask)
382 os.umask(oldumask)
383 mode = ~oldumask
383 mode = ~oldumask
384
384
385 renamefile(temp, path)
385 renamefile(temp, path)
386 os.chmod(path, mode)
386 os.chmod(path, mode)
387 except Exception:
387 except Exception:
388 try:
388 try:
389 unlinkfile(temp)
389 unlinkfile(temp)
390 except OSError:
390 except OSError:
391 pass
391 pass
392 raise
392 raise
393
393
394
394
395 def sortnodes(nodes, parentfunc):
395 def sortnodes(nodes, parentfunc):
396 """Topologically sorts the nodes, using the parentfunc to find
396 """Topologically sorts the nodes, using the parentfunc to find
397 the parents of nodes."""
397 the parents of nodes."""
398 nodes = set(nodes)
398 nodes = set(nodes)
399 childmap = {}
399 childmap = {}
400 parentmap = {}
400 parentmap = {}
401 roots = []
401 roots = []
402
402
403 # Build a child and parent map
403 # Build a child and parent map
404 for n in nodes:
404 for n in nodes:
405 parents = [p for p in parentfunc(n) if p in nodes]
405 parents = [p for p in parentfunc(n) if p in nodes]
406 parentmap[n] = set(parents)
406 parentmap[n] = set(parents)
407 for p in parents:
407 for p in parents:
408 childmap.setdefault(p, set()).add(n)
408 childmap.setdefault(p, set()).add(n)
409 if not parents:
409 if not parents:
410 roots.append(n)
410 roots.append(n)
411
411
412 roots.sort()
412 roots.sort()
413 # Process roots, adding children to the queue as they become roots
413 # Process roots, adding children to the queue as they become roots
414 results = []
414 results = []
415 while roots:
415 while roots:
416 n = roots.pop(0)
416 n = roots.pop(0)
417 results.append(n)
417 results.append(n)
418 if n in childmap:
418 if n in childmap:
419 children = childmap[n]
419 children = childmap[n]
420 for c in children:
420 for c in children:
421 childparents = parentmap[c]
421 childparents = parentmap[c]
422 childparents.remove(n)
422 childparents.remove(n)
423 if len(childparents) == 0:
423 if len(childparents) == 0:
424 # insert at the beginning, that way child nodes
424 # insert at the beginning, that way child nodes
425 # are likely to be output immediately after their
425 # are likely to be output immediately after their
426 # parents. This gives better compression results.
426 # parents. This gives better compression results.
427 roots.insert(0, c)
427 roots.insert(0, c)
428
428
429 return results
429 return results
430
430
431
431
432 def readexactly(stream, n):
432 def readexactly(stream, n):
433 '''read n bytes from stream.read and abort if less was available'''
433 '''read n bytes from stream.read and abort if less was available'''
434 s = stream.read(n)
434 s = stream.read(n)
435 if len(s) < n:
435 if len(s) < n:
436 raise error.Abort(
436 raise error.Abort(
437 _(b"stream ended unexpectedly (got %d bytes, expected %d)")
437 _(b"stream ended unexpectedly (got %d bytes, expected %d)")
438 % (len(s), n)
438 % (len(s), n)
439 )
439 )
440 return s
440 return s
441
441
442
442
443 def readunpack(stream, fmt):
443 def readunpack(stream, fmt):
444 data = readexactly(stream, struct.calcsize(fmt))
444 data = readexactly(stream, struct.calcsize(fmt))
445 return struct.unpack(fmt, data)
445 return struct.unpack(fmt, data)
446
446
447
447
448 def readpath(stream):
448 def readpath(stream):
449 rawlen = readexactly(stream, constants.FILENAMESIZE)
449 rawlen = readexactly(stream, constants.FILENAMESIZE)
450 pathlen = struct.unpack(constants.FILENAMESTRUCT, rawlen)[0]
450 pathlen = struct.unpack(constants.FILENAMESTRUCT, rawlen)[0]
451 return readexactly(stream, pathlen)
451 return readexactly(stream, pathlen)
452
452
453
453
454 def readnodelist(stream):
454 def readnodelist(stream):
455 rawlen = readexactly(stream, constants.NODECOUNTSIZE)
455 rawlen = readexactly(stream, constants.NODECOUNTSIZE)
456 nodecount = struct.unpack(constants.NODECOUNTSTRUCT, rawlen)[0]
456 nodecount = struct.unpack(constants.NODECOUNTSTRUCT, rawlen)[0]
457 for i in pycompat.xrange(nodecount):
457 for i in pycompat.xrange(nodecount):
458 yield readexactly(stream, constants.NODESIZE)
458 yield readexactly(stream, constants.NODESIZE)
459
459
460
460
461 def readpathlist(stream):
461 def readpathlist(stream):
462 rawlen = readexactly(stream, constants.PATHCOUNTSIZE)
462 rawlen = readexactly(stream, constants.PATHCOUNTSIZE)
463 pathcount = struct.unpack(constants.PATHCOUNTSTRUCT, rawlen)[0]
463 pathcount = struct.unpack(constants.PATHCOUNTSTRUCT, rawlen)[0]
464 for i in pycompat.xrange(pathcount):
464 for i in pycompat.xrange(pathcount):
465 yield readpath(stream)
465 yield readpath(stream)
466
466
467
467
468 def getgid(groupname):
468 def getgid(groupname):
469 try:
469 try:
470 gid = grp.getgrnam(pycompat.fsdecode(groupname)).gr_gid
470 gid = grp.getgrnam(pycompat.fsdecode(groupname)).gr_gid
471 return gid
471 return gid
472 except KeyError:
472 except KeyError:
473 return None
473 return None
474
474
475
475
476 def setstickygroupdir(path, gid, warn=None):
476 def setstickygroupdir(path, gid, warn=None):
477 if gid is None:
477 if gid is None:
478 return
478 return
479 try:
479 try:
480 os.chown(path, -1, gid)
480 os.chown(path, -1, gid)
481 os.chmod(path, 0o2775)
481 os.chmod(path, 0o2775)
482 except (IOError, OSError) as ex:
482 except (IOError, OSError) as ex:
483 if warn:
483 if warn:
484 warn(_(b'unable to chown/chmod on %s: %s\n') % (path, ex))
484 warn(_(b'unable to chown/chmod on %s: %s\n') % (path, ex))
485
485
486
486
487 def mkstickygroupdir(ui, path):
487 def mkstickygroupdir(ui, path):
488 """Creates the given directory (if it doesn't exist) and give it a
488 """Creates the given directory (if it doesn't exist) and give it a
489 particular group with setgid enabled."""
489 particular group with setgid enabled."""
490 gid = None
490 gid = None
491 groupname = ui.config(b"remotefilelog", b"cachegroup")
491 groupname = ui.config(b"remotefilelog", b"cachegroup")
492 if groupname:
492 if groupname:
493 gid = getgid(groupname)
493 gid = getgid(groupname)
494 if gid is None:
494 if gid is None:
495 ui.warn(_(b'unable to resolve group name: %s\n') % groupname)
495 ui.warn(_(b'unable to resolve group name: %s\n') % groupname)
496
496
497 # we use a single stat syscall to test the existence and mode / group bit
497 # we use a single stat syscall to test the existence and mode / group bit
498 st = None
498 st = None
499 try:
499 try:
500 st = os.stat(path)
500 st = os.stat(path)
501 except OSError:
501 except OSError:
502 pass
502 pass
503
503
504 if st:
504 if st:
505 # exists
505 # exists
506 if (st.st_mode & 0o2775) != 0o2775 or st.st_gid != gid:
506 if (st.st_mode & 0o2775) != 0o2775 or st.st_gid != gid:
507 # permission needs to be fixed
507 # permission needs to be fixed
508 setstickygroupdir(path, gid, ui.warn)
508 setstickygroupdir(path, gid, ui.warn)
509 return
509 return
510
510
511 oldumask = os.umask(0o002)
511 oldumask = os.umask(0o002)
512 try:
512 try:
513 missingdirs = [path]
513 missingdirs = [path]
514 path = os.path.dirname(path)
514 path = os.path.dirname(path)
515 while path and not os.path.exists(path):
515 while path and not os.path.exists(path):
516 missingdirs.append(path)
516 missingdirs.append(path)
517 path = os.path.dirname(path)
517 path = os.path.dirname(path)
518
518
519 for path in reversed(missingdirs):
519 for path in reversed(missingdirs):
520 try:
520 try:
521 os.mkdir(path)
521 os.mkdir(path)
522 except OSError as ex:
522 except OSError as ex:
523 if ex.errno != errno.EEXIST:
523 if ex.errno != errno.EEXIST:
524 raise
524 raise
525
525
526 for path in missingdirs:
526 for path in missingdirs:
527 setstickygroupdir(path, gid, ui.warn)
527 setstickygroupdir(path, gid, ui.warn)
528 finally:
528 finally:
529 os.umask(oldumask)
529 os.umask(oldumask)
530
530
531
531
532 def getusername(ui):
532 def getusername(ui):
533 try:
533 try:
534 return stringutil.shortuser(ui.username())
534 return stringutil.shortuser(ui.username())
535 except Exception:
535 except Exception:
536 return b'unknown'
536 return b'unknown'
537
537
538
538
539 def getreponame(ui):
539 def getreponame(ui):
540 reponame = ui.config(b'paths', b'default')
540 reponame = ui.config(b'paths', b'default')
541 if reponame:
541 if reponame:
542 return os.path.basename(reponame)
542 return os.path.basename(reponame)
543 return b"unknown"
543 return b"unknown"
General Comments 0
You need to be logged in to leave comments. Login now