##// END OF EJS Templates
py3: replace os.sep with pycompat.ossep (part 2 of 4)...
Pulkit Goyal -
r30614:cfe66dcf default
parent child Browse files
Show More
@@ -1,1260 +1,1260 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import errno
11 import errno
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import nullid
16 from .node import nullid
17 from . import (
17 from . import (
18 encoding,
18 encoding,
19 error,
19 error,
20 match as matchmod,
20 match as matchmod,
21 osutil,
21 osutil,
22 parsers,
22 parsers,
23 pathutil,
23 pathutil,
24 pycompat,
24 pycompat,
25 scmutil,
25 scmutil,
26 util,
26 util,
27 )
27 )
28
28
29 propertycache = util.propertycache
29 propertycache = util.propertycache
30 filecache = scmutil.filecache
30 filecache = scmutil.filecache
31 _rangemask = 0x7fffffff
31 _rangemask = 0x7fffffff
32
32
33 dirstatetuple = parsers.dirstatetuple
33 dirstatetuple = parsers.dirstatetuple
34
34
35 class repocache(filecache):
35 class repocache(filecache):
36 """filecache for files in .hg/"""
36 """filecache for files in .hg/"""
37 def join(self, obj, fname):
37 def join(self, obj, fname):
38 return obj._opener.join(fname)
38 return obj._opener.join(fname)
39
39
40 class rootcache(filecache):
40 class rootcache(filecache):
41 """filecache for files in the repository root"""
41 """filecache for files in the repository root"""
42 def join(self, obj, fname):
42 def join(self, obj, fname):
43 return obj._join(fname)
43 return obj._join(fname)
44
44
45 def _getfsnow(vfs):
45 def _getfsnow(vfs):
46 '''Get "now" timestamp on filesystem'''
46 '''Get "now" timestamp on filesystem'''
47 tmpfd, tmpname = vfs.mkstemp()
47 tmpfd, tmpname = vfs.mkstemp()
48 try:
48 try:
49 return os.fstat(tmpfd).st_mtime
49 return os.fstat(tmpfd).st_mtime
50 finally:
50 finally:
51 os.close(tmpfd)
51 os.close(tmpfd)
52 vfs.unlink(tmpname)
52 vfs.unlink(tmpname)
53
53
54 def nonnormalentries(dmap):
54 def nonnormalentries(dmap):
55 '''Compute the nonnormal dirstate entries from the dmap'''
55 '''Compute the nonnormal dirstate entries from the dmap'''
56 try:
56 try:
57 return parsers.nonnormalentries(dmap)
57 return parsers.nonnormalentries(dmap)
58 except AttributeError:
58 except AttributeError:
59 return set(fname for fname, e in dmap.iteritems()
59 return set(fname for fname, e in dmap.iteritems()
60 if e[0] != 'n' or e[3] == -1)
60 if e[0] != 'n' or e[3] == -1)
61
61
62 def _trypending(root, vfs, filename):
62 def _trypending(root, vfs, filename):
63 '''Open file to be read according to HG_PENDING environment variable
63 '''Open file to be read according to HG_PENDING environment variable
64
64
65 This opens '.pending' of specified 'filename' only when HG_PENDING
65 This opens '.pending' of specified 'filename' only when HG_PENDING
66 is equal to 'root'.
66 is equal to 'root'.
67
67
68 This returns '(fp, is_pending_opened)' tuple.
68 This returns '(fp, is_pending_opened)' tuple.
69 '''
69 '''
70 if root == os.environ.get('HG_PENDING'):
70 if root == os.environ.get('HG_PENDING'):
71 try:
71 try:
72 return (vfs('%s.pending' % filename), True)
72 return (vfs('%s.pending' % filename), True)
73 except IOError as inst:
73 except IOError as inst:
74 if inst.errno != errno.ENOENT:
74 if inst.errno != errno.ENOENT:
75 raise
75 raise
76 return (vfs(filename), False)
76 return (vfs(filename), False)
77
77
78 class dirstate(object):
78 class dirstate(object):
79
79
80 def __init__(self, opener, ui, root, validate):
80 def __init__(self, opener, ui, root, validate):
81 '''Create a new dirstate object.
81 '''Create a new dirstate object.
82
82
83 opener is an open()-like callable that can be used to open the
83 opener is an open()-like callable that can be used to open the
84 dirstate file; root is the root of the directory tracked by
84 dirstate file; root is the root of the directory tracked by
85 the dirstate.
85 the dirstate.
86 '''
86 '''
87 self._opener = opener
87 self._opener = opener
88 self._validate = validate
88 self._validate = validate
89 self._root = root
89 self._root = root
90 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
90 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
91 # UNC path pointing to root share (issue4557)
91 # UNC path pointing to root share (issue4557)
92 self._rootdir = pathutil.normasprefix(root)
92 self._rootdir = pathutil.normasprefix(root)
93 # internal config: ui.forcecwd
93 # internal config: ui.forcecwd
94 forcecwd = ui.config('ui', 'forcecwd')
94 forcecwd = ui.config('ui', 'forcecwd')
95 if forcecwd:
95 if forcecwd:
96 self._cwd = forcecwd
96 self._cwd = forcecwd
97 self._dirty = False
97 self._dirty = False
98 self._dirtypl = False
98 self._dirtypl = False
99 self._lastnormaltime = 0
99 self._lastnormaltime = 0
100 self._ui = ui
100 self._ui = ui
101 self._filecache = {}
101 self._filecache = {}
102 self._parentwriters = 0
102 self._parentwriters = 0
103 self._filename = 'dirstate'
103 self._filename = 'dirstate'
104 self._pendingfilename = '%s.pending' % self._filename
104 self._pendingfilename = '%s.pending' % self._filename
105 self._plchangecallbacks = {}
105 self._plchangecallbacks = {}
106 self._origpl = None
106 self._origpl = None
107
107
108 # for consistent view between _pl() and _read() invocations
108 # for consistent view between _pl() and _read() invocations
109 self._pendingmode = None
109 self._pendingmode = None
110
110
111 def beginparentchange(self):
111 def beginparentchange(self):
112 '''Marks the beginning of a set of changes that involve changing
112 '''Marks the beginning of a set of changes that involve changing
113 the dirstate parents. If there is an exception during this time,
113 the dirstate parents. If there is an exception during this time,
114 the dirstate will not be written when the wlock is released. This
114 the dirstate will not be written when the wlock is released. This
115 prevents writing an incoherent dirstate where the parent doesn't
115 prevents writing an incoherent dirstate where the parent doesn't
116 match the contents.
116 match the contents.
117 '''
117 '''
118 self._parentwriters += 1
118 self._parentwriters += 1
119
119
120 def endparentchange(self):
120 def endparentchange(self):
121 '''Marks the end of a set of changes that involve changing the
121 '''Marks the end of a set of changes that involve changing the
122 dirstate parents. Once all parent changes have been marked done,
122 dirstate parents. Once all parent changes have been marked done,
123 the wlock will be free to write the dirstate on release.
123 the wlock will be free to write the dirstate on release.
124 '''
124 '''
125 if self._parentwriters > 0:
125 if self._parentwriters > 0:
126 self._parentwriters -= 1
126 self._parentwriters -= 1
127
127
128 def pendingparentchange(self):
128 def pendingparentchange(self):
129 '''Returns true if the dirstate is in the middle of a set of changes
129 '''Returns true if the dirstate is in the middle of a set of changes
130 that modify the dirstate parent.
130 that modify the dirstate parent.
131 '''
131 '''
132 return self._parentwriters > 0
132 return self._parentwriters > 0
133
133
134 @propertycache
134 @propertycache
135 def _map(self):
135 def _map(self):
136 '''Return the dirstate contents as a map from filename to
136 '''Return the dirstate contents as a map from filename to
137 (state, mode, size, time).'''
137 (state, mode, size, time).'''
138 self._read()
138 self._read()
139 return self._map
139 return self._map
140
140
141 @propertycache
141 @propertycache
142 def _copymap(self):
142 def _copymap(self):
143 self._read()
143 self._read()
144 return self._copymap
144 return self._copymap
145
145
146 @propertycache
146 @propertycache
147 def _nonnormalset(self):
147 def _nonnormalset(self):
148 return nonnormalentries(self._map)
148 return nonnormalentries(self._map)
149
149
150 @propertycache
150 @propertycache
151 def _filefoldmap(self):
151 def _filefoldmap(self):
152 try:
152 try:
153 makefilefoldmap = parsers.make_file_foldmap
153 makefilefoldmap = parsers.make_file_foldmap
154 except AttributeError:
154 except AttributeError:
155 pass
155 pass
156 else:
156 else:
157 return makefilefoldmap(self._map, util.normcasespec,
157 return makefilefoldmap(self._map, util.normcasespec,
158 util.normcasefallback)
158 util.normcasefallback)
159
159
160 f = {}
160 f = {}
161 normcase = util.normcase
161 normcase = util.normcase
162 for name, s in self._map.iteritems():
162 for name, s in self._map.iteritems():
163 if s[0] != 'r':
163 if s[0] != 'r':
164 f[normcase(name)] = name
164 f[normcase(name)] = name
165 f['.'] = '.' # prevents useless util.fspath() invocation
165 f['.'] = '.' # prevents useless util.fspath() invocation
166 return f
166 return f
167
167
168 @propertycache
168 @propertycache
169 def _dirfoldmap(self):
169 def _dirfoldmap(self):
170 f = {}
170 f = {}
171 normcase = util.normcase
171 normcase = util.normcase
172 for name in self._dirs:
172 for name in self._dirs:
173 f[normcase(name)] = name
173 f[normcase(name)] = name
174 return f
174 return f
175
175
176 @repocache('branch')
176 @repocache('branch')
177 def _branch(self):
177 def _branch(self):
178 try:
178 try:
179 return self._opener.read("branch").strip() or "default"
179 return self._opener.read("branch").strip() or "default"
180 except IOError as inst:
180 except IOError as inst:
181 if inst.errno != errno.ENOENT:
181 if inst.errno != errno.ENOENT:
182 raise
182 raise
183 return "default"
183 return "default"
184
184
185 @propertycache
185 @propertycache
186 def _pl(self):
186 def _pl(self):
187 try:
187 try:
188 fp = self._opendirstatefile()
188 fp = self._opendirstatefile()
189 st = fp.read(40)
189 st = fp.read(40)
190 fp.close()
190 fp.close()
191 l = len(st)
191 l = len(st)
192 if l == 40:
192 if l == 40:
193 return st[:20], st[20:40]
193 return st[:20], st[20:40]
194 elif l > 0 and l < 40:
194 elif l > 0 and l < 40:
195 raise error.Abort(_('working directory state appears damaged!'))
195 raise error.Abort(_('working directory state appears damaged!'))
196 except IOError as err:
196 except IOError as err:
197 if err.errno != errno.ENOENT:
197 if err.errno != errno.ENOENT:
198 raise
198 raise
199 return [nullid, nullid]
199 return [nullid, nullid]
200
200
201 @propertycache
201 @propertycache
202 def _dirs(self):
202 def _dirs(self):
203 return util.dirs(self._map, 'r')
203 return util.dirs(self._map, 'r')
204
204
205 def dirs(self):
205 def dirs(self):
206 return self._dirs
206 return self._dirs
207
207
208 @rootcache('.hgignore')
208 @rootcache('.hgignore')
209 def _ignore(self):
209 def _ignore(self):
210 files = self._ignorefiles()
210 files = self._ignorefiles()
211 if not files:
211 if not files:
212 return util.never
212 return util.never
213
213
214 pats = ['include:%s' % f for f in files]
214 pats = ['include:%s' % f for f in files]
215 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
215 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
216
216
217 @propertycache
217 @propertycache
218 def _slash(self):
218 def _slash(self):
219 return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/'
219 return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/'
220
220
221 @propertycache
221 @propertycache
222 def _checklink(self):
222 def _checklink(self):
223 return util.checklink(self._root)
223 return util.checklink(self._root)
224
224
225 @propertycache
225 @propertycache
226 def _checkexec(self):
226 def _checkexec(self):
227 return util.checkexec(self._root)
227 return util.checkexec(self._root)
228
228
229 @propertycache
229 @propertycache
230 def _checkcase(self):
230 def _checkcase(self):
231 return not util.fscasesensitive(self._join('.hg'))
231 return not util.fscasesensitive(self._join('.hg'))
232
232
233 def _join(self, f):
233 def _join(self, f):
234 # much faster than os.path.join()
234 # much faster than os.path.join()
235 # it's safe because f is always a relative path
235 # it's safe because f is always a relative path
236 return self._rootdir + f
236 return self._rootdir + f
237
237
238 def flagfunc(self, buildfallback):
238 def flagfunc(self, buildfallback):
239 if self._checklink and self._checkexec:
239 if self._checklink and self._checkexec:
240 def f(x):
240 def f(x):
241 try:
241 try:
242 st = os.lstat(self._join(x))
242 st = os.lstat(self._join(x))
243 if util.statislink(st):
243 if util.statislink(st):
244 return 'l'
244 return 'l'
245 if util.statisexec(st):
245 if util.statisexec(st):
246 return 'x'
246 return 'x'
247 except OSError:
247 except OSError:
248 pass
248 pass
249 return ''
249 return ''
250 return f
250 return f
251
251
252 fallback = buildfallback()
252 fallback = buildfallback()
253 if self._checklink:
253 if self._checklink:
254 def f(x):
254 def f(x):
255 if os.path.islink(self._join(x)):
255 if os.path.islink(self._join(x)):
256 return 'l'
256 return 'l'
257 if 'x' in fallback(x):
257 if 'x' in fallback(x):
258 return 'x'
258 return 'x'
259 return ''
259 return ''
260 return f
260 return f
261 if self._checkexec:
261 if self._checkexec:
262 def f(x):
262 def f(x):
263 if 'l' in fallback(x):
263 if 'l' in fallback(x):
264 return 'l'
264 return 'l'
265 if util.isexec(self._join(x)):
265 if util.isexec(self._join(x)):
266 return 'x'
266 return 'x'
267 return ''
267 return ''
268 return f
268 return f
269 else:
269 else:
270 return fallback
270 return fallback
271
271
272 @propertycache
272 @propertycache
273 def _cwd(self):
273 def _cwd(self):
274 return pycompat.getcwd()
274 return pycompat.getcwd()
275
275
276 def getcwd(self):
276 def getcwd(self):
277 '''Return the path from which a canonical path is calculated.
277 '''Return the path from which a canonical path is calculated.
278
278
279 This path should be used to resolve file patterns or to convert
279 This path should be used to resolve file patterns or to convert
280 canonical paths back to file paths for display. It shouldn't be
280 canonical paths back to file paths for display. It shouldn't be
281 used to get real file paths. Use vfs functions instead.
281 used to get real file paths. Use vfs functions instead.
282 '''
282 '''
283 cwd = self._cwd
283 cwd = self._cwd
284 if cwd == self._root:
284 if cwd == self._root:
285 return ''
285 return ''
286 # self._root ends with a path separator if self._root is '/' or 'C:\'
286 # self._root ends with a path separator if self._root is '/' or 'C:\'
287 rootsep = self._root
287 rootsep = self._root
288 if not util.endswithsep(rootsep):
288 if not util.endswithsep(rootsep):
289 rootsep += os.sep
289 rootsep += pycompat.ossep
290 if cwd.startswith(rootsep):
290 if cwd.startswith(rootsep):
291 return cwd[len(rootsep):]
291 return cwd[len(rootsep):]
292 else:
292 else:
293 # we're outside the repo. return an absolute path.
293 # we're outside the repo. return an absolute path.
294 return cwd
294 return cwd
295
295
296 def pathto(self, f, cwd=None):
296 def pathto(self, f, cwd=None):
297 if cwd is None:
297 if cwd is None:
298 cwd = self.getcwd()
298 cwd = self.getcwd()
299 path = util.pathto(self._root, cwd, f)
299 path = util.pathto(self._root, cwd, f)
300 if self._slash:
300 if self._slash:
301 return util.pconvert(path)
301 return util.pconvert(path)
302 return path
302 return path
303
303
304 def __getitem__(self, key):
304 def __getitem__(self, key):
305 '''Return the current state of key (a filename) in the dirstate.
305 '''Return the current state of key (a filename) in the dirstate.
306
306
307 States are:
307 States are:
308 n normal
308 n normal
309 m needs merging
309 m needs merging
310 r marked for removal
310 r marked for removal
311 a marked for addition
311 a marked for addition
312 ? not tracked
312 ? not tracked
313 '''
313 '''
314 return self._map.get(key, ("?",))[0]
314 return self._map.get(key, ("?",))[0]
315
315
316 def __contains__(self, key):
316 def __contains__(self, key):
317 return key in self._map
317 return key in self._map
318
318
319 def __iter__(self):
319 def __iter__(self):
320 for x in sorted(self._map):
320 for x in sorted(self._map):
321 yield x
321 yield x
322
322
323 def iteritems(self):
323 def iteritems(self):
324 return self._map.iteritems()
324 return self._map.iteritems()
325
325
326 def parents(self):
326 def parents(self):
327 return [self._validate(p) for p in self._pl]
327 return [self._validate(p) for p in self._pl]
328
328
329 def p1(self):
329 def p1(self):
330 return self._validate(self._pl[0])
330 return self._validate(self._pl[0])
331
331
332 def p2(self):
332 def p2(self):
333 return self._validate(self._pl[1])
333 return self._validate(self._pl[1])
334
334
335 def branch(self):
335 def branch(self):
336 return encoding.tolocal(self._branch)
336 return encoding.tolocal(self._branch)
337
337
338 def setparents(self, p1, p2=nullid):
338 def setparents(self, p1, p2=nullid):
339 """Set dirstate parents to p1 and p2.
339 """Set dirstate parents to p1 and p2.
340
340
341 When moving from two parents to one, 'm' merged entries a
341 When moving from two parents to one, 'm' merged entries a
342 adjusted to normal and previous copy records discarded and
342 adjusted to normal and previous copy records discarded and
343 returned by the call.
343 returned by the call.
344
344
345 See localrepo.setparents()
345 See localrepo.setparents()
346 """
346 """
347 if self._parentwriters == 0:
347 if self._parentwriters == 0:
348 raise ValueError("cannot set dirstate parent without "
348 raise ValueError("cannot set dirstate parent without "
349 "calling dirstate.beginparentchange")
349 "calling dirstate.beginparentchange")
350
350
351 self._dirty = self._dirtypl = True
351 self._dirty = self._dirtypl = True
352 oldp2 = self._pl[1]
352 oldp2 = self._pl[1]
353 if self._origpl is None:
353 if self._origpl is None:
354 self._origpl = self._pl
354 self._origpl = self._pl
355 self._pl = p1, p2
355 self._pl = p1, p2
356 copies = {}
356 copies = {}
357 if oldp2 != nullid and p2 == nullid:
357 if oldp2 != nullid and p2 == nullid:
358 for f, s in self._map.iteritems():
358 for f, s in self._map.iteritems():
359 # Discard 'm' markers when moving away from a merge state
359 # Discard 'm' markers when moving away from a merge state
360 if s[0] == 'm':
360 if s[0] == 'm':
361 if f in self._copymap:
361 if f in self._copymap:
362 copies[f] = self._copymap[f]
362 copies[f] = self._copymap[f]
363 self.normallookup(f)
363 self.normallookup(f)
364 # Also fix up otherparent markers
364 # Also fix up otherparent markers
365 elif s[0] == 'n' and s[2] == -2:
365 elif s[0] == 'n' and s[2] == -2:
366 if f in self._copymap:
366 if f in self._copymap:
367 copies[f] = self._copymap[f]
367 copies[f] = self._copymap[f]
368 self.add(f)
368 self.add(f)
369 return copies
369 return copies
370
370
371 def setbranch(self, branch):
371 def setbranch(self, branch):
372 self._branch = encoding.fromlocal(branch)
372 self._branch = encoding.fromlocal(branch)
373 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
373 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
374 try:
374 try:
375 f.write(self._branch + '\n')
375 f.write(self._branch + '\n')
376 f.close()
376 f.close()
377
377
378 # make sure filecache has the correct stat info for _branch after
378 # make sure filecache has the correct stat info for _branch after
379 # replacing the underlying file
379 # replacing the underlying file
380 ce = self._filecache['_branch']
380 ce = self._filecache['_branch']
381 if ce:
381 if ce:
382 ce.refresh()
382 ce.refresh()
383 except: # re-raises
383 except: # re-raises
384 f.discard()
384 f.discard()
385 raise
385 raise
386
386
387 def _opendirstatefile(self):
387 def _opendirstatefile(self):
388 fp, mode = _trypending(self._root, self._opener, self._filename)
388 fp, mode = _trypending(self._root, self._opener, self._filename)
389 if self._pendingmode is not None and self._pendingmode != mode:
389 if self._pendingmode is not None and self._pendingmode != mode:
390 fp.close()
390 fp.close()
391 raise error.Abort(_('working directory state may be '
391 raise error.Abort(_('working directory state may be '
392 'changed parallelly'))
392 'changed parallelly'))
393 self._pendingmode = mode
393 self._pendingmode = mode
394 return fp
394 return fp
395
395
396 def _read(self):
396 def _read(self):
397 self._map = {}
397 self._map = {}
398 self._copymap = {}
398 self._copymap = {}
399 try:
399 try:
400 fp = self._opendirstatefile()
400 fp = self._opendirstatefile()
401 try:
401 try:
402 st = fp.read()
402 st = fp.read()
403 finally:
403 finally:
404 fp.close()
404 fp.close()
405 except IOError as err:
405 except IOError as err:
406 if err.errno != errno.ENOENT:
406 if err.errno != errno.ENOENT:
407 raise
407 raise
408 return
408 return
409 if not st:
409 if not st:
410 return
410 return
411
411
412 if util.safehasattr(parsers, 'dict_new_presized'):
412 if util.safehasattr(parsers, 'dict_new_presized'):
413 # Make an estimate of the number of files in the dirstate based on
413 # Make an estimate of the number of files in the dirstate based on
414 # its size. From a linear regression on a set of real-world repos,
414 # its size. From a linear regression on a set of real-world repos,
415 # all over 10,000 files, the size of a dirstate entry is 85
415 # all over 10,000 files, the size of a dirstate entry is 85
416 # bytes. The cost of resizing is significantly higher than the cost
416 # bytes. The cost of resizing is significantly higher than the cost
417 # of filling in a larger presized dict, so subtract 20% from the
417 # of filling in a larger presized dict, so subtract 20% from the
418 # size.
418 # size.
419 #
419 #
420 # This heuristic is imperfect in many ways, so in a future dirstate
420 # This heuristic is imperfect in many ways, so in a future dirstate
421 # format update it makes sense to just record the number of entries
421 # format update it makes sense to just record the number of entries
422 # on write.
422 # on write.
423 self._map = parsers.dict_new_presized(len(st) / 71)
423 self._map = parsers.dict_new_presized(len(st) / 71)
424
424
425 # Python's garbage collector triggers a GC each time a certain number
425 # Python's garbage collector triggers a GC each time a certain number
426 # of container objects (the number being defined by
426 # of container objects (the number being defined by
427 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
427 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
428 # for each file in the dirstate. The C version then immediately marks
428 # for each file in the dirstate. The C version then immediately marks
429 # them as not to be tracked by the collector. However, this has no
429 # them as not to be tracked by the collector. However, this has no
430 # effect on when GCs are triggered, only on what objects the GC looks
430 # effect on when GCs are triggered, only on what objects the GC looks
431 # into. This means that O(number of files) GCs are unavoidable.
431 # into. This means that O(number of files) GCs are unavoidable.
432 # Depending on when in the process's lifetime the dirstate is parsed,
432 # Depending on when in the process's lifetime the dirstate is parsed,
433 # this can get very expensive. As a workaround, disable GC while
433 # this can get very expensive. As a workaround, disable GC while
434 # parsing the dirstate.
434 # parsing the dirstate.
435 #
435 #
436 # (we cannot decorate the function directly since it is in a C module)
436 # (we cannot decorate the function directly since it is in a C module)
437 parse_dirstate = util.nogc(parsers.parse_dirstate)
437 parse_dirstate = util.nogc(parsers.parse_dirstate)
438 p = parse_dirstate(self._map, self._copymap, st)
438 p = parse_dirstate(self._map, self._copymap, st)
439 if not self._dirtypl:
439 if not self._dirtypl:
440 self._pl = p
440 self._pl = p
441
441
442 def invalidate(self):
442 def invalidate(self):
443 for a in ("_map", "_copymap", "_filefoldmap", "_dirfoldmap", "_branch",
443 for a in ("_map", "_copymap", "_filefoldmap", "_dirfoldmap", "_branch",
444 "_pl", "_dirs", "_ignore", "_nonnormalset"):
444 "_pl", "_dirs", "_ignore", "_nonnormalset"):
445 if a in self.__dict__:
445 if a in self.__dict__:
446 delattr(self, a)
446 delattr(self, a)
447 self._lastnormaltime = 0
447 self._lastnormaltime = 0
448 self._dirty = False
448 self._dirty = False
449 self._parentwriters = 0
449 self._parentwriters = 0
450 self._origpl = None
450 self._origpl = None
451
451
452 def copy(self, source, dest):
452 def copy(self, source, dest):
453 """Mark dest as a copy of source. Unmark dest if source is None."""
453 """Mark dest as a copy of source. Unmark dest if source is None."""
454 if source == dest:
454 if source == dest:
455 return
455 return
456 self._dirty = True
456 self._dirty = True
457 if source is not None:
457 if source is not None:
458 self._copymap[dest] = source
458 self._copymap[dest] = source
459 elif dest in self._copymap:
459 elif dest in self._copymap:
460 del self._copymap[dest]
460 del self._copymap[dest]
461
461
462 def copied(self, file):
462 def copied(self, file):
463 return self._copymap.get(file, None)
463 return self._copymap.get(file, None)
464
464
465 def copies(self):
465 def copies(self):
466 return self._copymap
466 return self._copymap
467
467
468 def _droppath(self, f):
468 def _droppath(self, f):
469 if self[f] not in "?r" and "_dirs" in self.__dict__:
469 if self[f] not in "?r" and "_dirs" in self.__dict__:
470 self._dirs.delpath(f)
470 self._dirs.delpath(f)
471
471
472 if "_filefoldmap" in self.__dict__:
472 if "_filefoldmap" in self.__dict__:
473 normed = util.normcase(f)
473 normed = util.normcase(f)
474 if normed in self._filefoldmap:
474 if normed in self._filefoldmap:
475 del self._filefoldmap[normed]
475 del self._filefoldmap[normed]
476
476
477 def _addpath(self, f, state, mode, size, mtime):
477 def _addpath(self, f, state, mode, size, mtime):
478 oldstate = self[f]
478 oldstate = self[f]
479 if state == 'a' or oldstate == 'r':
479 if state == 'a' or oldstate == 'r':
480 scmutil.checkfilename(f)
480 scmutil.checkfilename(f)
481 if f in self._dirs:
481 if f in self._dirs:
482 raise error.Abort(_('directory %r already in dirstate') % f)
482 raise error.Abort(_('directory %r already in dirstate') % f)
483 # shadows
483 # shadows
484 for d in util.finddirs(f):
484 for d in util.finddirs(f):
485 if d in self._dirs:
485 if d in self._dirs:
486 break
486 break
487 if d in self._map and self[d] != 'r':
487 if d in self._map and self[d] != 'r':
488 raise error.Abort(
488 raise error.Abort(
489 _('file %r in dirstate clashes with %r') % (d, f))
489 _('file %r in dirstate clashes with %r') % (d, f))
490 if oldstate in "?r" and "_dirs" in self.__dict__:
490 if oldstate in "?r" and "_dirs" in self.__dict__:
491 self._dirs.addpath(f)
491 self._dirs.addpath(f)
492 self._dirty = True
492 self._dirty = True
493 self._map[f] = dirstatetuple(state, mode, size, mtime)
493 self._map[f] = dirstatetuple(state, mode, size, mtime)
494 if state != 'n' or mtime == -1:
494 if state != 'n' or mtime == -1:
495 self._nonnormalset.add(f)
495 self._nonnormalset.add(f)
496
496
497 def normal(self, f):
497 def normal(self, f):
498 '''Mark a file normal and clean.'''
498 '''Mark a file normal and clean.'''
499 s = os.lstat(self._join(f))
499 s = os.lstat(self._join(f))
500 mtime = s.st_mtime
500 mtime = s.st_mtime
501 self._addpath(f, 'n', s.st_mode,
501 self._addpath(f, 'n', s.st_mode,
502 s.st_size & _rangemask, mtime & _rangemask)
502 s.st_size & _rangemask, mtime & _rangemask)
503 if f in self._copymap:
503 if f in self._copymap:
504 del self._copymap[f]
504 del self._copymap[f]
505 if f in self._nonnormalset:
505 if f in self._nonnormalset:
506 self._nonnormalset.remove(f)
506 self._nonnormalset.remove(f)
507 if mtime > self._lastnormaltime:
507 if mtime > self._lastnormaltime:
508 # Remember the most recent modification timeslot for status(),
508 # Remember the most recent modification timeslot for status(),
509 # to make sure we won't miss future size-preserving file content
509 # to make sure we won't miss future size-preserving file content
510 # modifications that happen within the same timeslot.
510 # modifications that happen within the same timeslot.
511 self._lastnormaltime = mtime
511 self._lastnormaltime = mtime
512
512
513 def normallookup(self, f):
513 def normallookup(self, f):
514 '''Mark a file normal, but possibly dirty.'''
514 '''Mark a file normal, but possibly dirty.'''
515 if self._pl[1] != nullid and f in self._map:
515 if self._pl[1] != nullid and f in self._map:
516 # if there is a merge going on and the file was either
516 # if there is a merge going on and the file was either
517 # in state 'm' (-1) or coming from other parent (-2) before
517 # in state 'm' (-1) or coming from other parent (-2) before
518 # being removed, restore that state.
518 # being removed, restore that state.
519 entry = self._map[f]
519 entry = self._map[f]
520 if entry[0] == 'r' and entry[2] in (-1, -2):
520 if entry[0] == 'r' and entry[2] in (-1, -2):
521 source = self._copymap.get(f)
521 source = self._copymap.get(f)
522 if entry[2] == -1:
522 if entry[2] == -1:
523 self.merge(f)
523 self.merge(f)
524 elif entry[2] == -2:
524 elif entry[2] == -2:
525 self.otherparent(f)
525 self.otherparent(f)
526 if source:
526 if source:
527 self.copy(source, f)
527 self.copy(source, f)
528 return
528 return
529 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
529 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
530 return
530 return
531 self._addpath(f, 'n', 0, -1, -1)
531 self._addpath(f, 'n', 0, -1, -1)
532 if f in self._copymap:
532 if f in self._copymap:
533 del self._copymap[f]
533 del self._copymap[f]
534 if f in self._nonnormalset:
534 if f in self._nonnormalset:
535 self._nonnormalset.remove(f)
535 self._nonnormalset.remove(f)
536
536
537 def otherparent(self, f):
537 def otherparent(self, f):
538 '''Mark as coming from the other parent, always dirty.'''
538 '''Mark as coming from the other parent, always dirty.'''
539 if self._pl[1] == nullid:
539 if self._pl[1] == nullid:
540 raise error.Abort(_("setting %r to other parent "
540 raise error.Abort(_("setting %r to other parent "
541 "only allowed in merges") % f)
541 "only allowed in merges") % f)
542 if f in self and self[f] == 'n':
542 if f in self and self[f] == 'n':
543 # merge-like
543 # merge-like
544 self._addpath(f, 'm', 0, -2, -1)
544 self._addpath(f, 'm', 0, -2, -1)
545 else:
545 else:
546 # add-like
546 # add-like
547 self._addpath(f, 'n', 0, -2, -1)
547 self._addpath(f, 'n', 0, -2, -1)
548
548
549 if f in self._copymap:
549 if f in self._copymap:
550 del self._copymap[f]
550 del self._copymap[f]
551
551
552 def add(self, f):
552 def add(self, f):
553 '''Mark a file added.'''
553 '''Mark a file added.'''
554 self._addpath(f, 'a', 0, -1, -1)
554 self._addpath(f, 'a', 0, -1, -1)
555 if f in self._copymap:
555 if f in self._copymap:
556 del self._copymap[f]
556 del self._copymap[f]
557
557
558 def remove(self, f):
558 def remove(self, f):
559 '''Mark a file removed.'''
559 '''Mark a file removed.'''
560 self._dirty = True
560 self._dirty = True
561 self._droppath(f)
561 self._droppath(f)
562 size = 0
562 size = 0
563 if self._pl[1] != nullid and f in self._map:
563 if self._pl[1] != nullid and f in self._map:
564 # backup the previous state
564 # backup the previous state
565 entry = self._map[f]
565 entry = self._map[f]
566 if entry[0] == 'm': # merge
566 if entry[0] == 'm': # merge
567 size = -1
567 size = -1
568 elif entry[0] == 'n' and entry[2] == -2: # other parent
568 elif entry[0] == 'n' and entry[2] == -2: # other parent
569 size = -2
569 size = -2
570 self._map[f] = dirstatetuple('r', 0, size, 0)
570 self._map[f] = dirstatetuple('r', 0, size, 0)
571 self._nonnormalset.add(f)
571 self._nonnormalset.add(f)
572 if size == 0 and f in self._copymap:
572 if size == 0 and f in self._copymap:
573 del self._copymap[f]
573 del self._copymap[f]
574
574
575 def merge(self, f):
575 def merge(self, f):
576 '''Mark a file merged.'''
576 '''Mark a file merged.'''
577 if self._pl[1] == nullid:
577 if self._pl[1] == nullid:
578 return self.normallookup(f)
578 return self.normallookup(f)
579 return self.otherparent(f)
579 return self.otherparent(f)
580
580
581 def drop(self, f):
581 def drop(self, f):
582 '''Drop a file from the dirstate'''
582 '''Drop a file from the dirstate'''
583 if f in self._map:
583 if f in self._map:
584 self._dirty = True
584 self._dirty = True
585 self._droppath(f)
585 self._droppath(f)
586 del self._map[f]
586 del self._map[f]
587 if f in self._nonnormalset:
587 if f in self._nonnormalset:
588 self._nonnormalset.remove(f)
588 self._nonnormalset.remove(f)
589 if f in self._copymap:
589 if f in self._copymap:
590 del self._copymap[f]
590 del self._copymap[f]
591
591
592 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
592 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
593 if exists is None:
593 if exists is None:
594 exists = os.path.lexists(os.path.join(self._root, path))
594 exists = os.path.lexists(os.path.join(self._root, path))
595 if not exists:
595 if not exists:
596 # Maybe a path component exists
596 # Maybe a path component exists
597 if not ignoremissing and '/' in path:
597 if not ignoremissing and '/' in path:
598 d, f = path.rsplit('/', 1)
598 d, f = path.rsplit('/', 1)
599 d = self._normalize(d, False, ignoremissing, None)
599 d = self._normalize(d, False, ignoremissing, None)
600 folded = d + "/" + f
600 folded = d + "/" + f
601 else:
601 else:
602 # No path components, preserve original case
602 # No path components, preserve original case
603 folded = path
603 folded = path
604 else:
604 else:
605 # recursively normalize leading directory components
605 # recursively normalize leading directory components
606 # against dirstate
606 # against dirstate
607 if '/' in normed:
607 if '/' in normed:
608 d, f = normed.rsplit('/', 1)
608 d, f = normed.rsplit('/', 1)
609 d = self._normalize(d, False, ignoremissing, True)
609 d = self._normalize(d, False, ignoremissing, True)
610 r = self._root + "/" + d
610 r = self._root + "/" + d
611 folded = d + "/" + util.fspath(f, r)
611 folded = d + "/" + util.fspath(f, r)
612 else:
612 else:
613 folded = util.fspath(normed, self._root)
613 folded = util.fspath(normed, self._root)
614 storemap[normed] = folded
614 storemap[normed] = folded
615
615
616 return folded
616 return folded
617
617
618 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
618 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
619 normed = util.normcase(path)
619 normed = util.normcase(path)
620 folded = self._filefoldmap.get(normed, None)
620 folded = self._filefoldmap.get(normed, None)
621 if folded is None:
621 if folded is None:
622 if isknown:
622 if isknown:
623 folded = path
623 folded = path
624 else:
624 else:
625 folded = self._discoverpath(path, normed, ignoremissing, exists,
625 folded = self._discoverpath(path, normed, ignoremissing, exists,
626 self._filefoldmap)
626 self._filefoldmap)
627 return folded
627 return folded
628
628
629 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
629 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
630 normed = util.normcase(path)
630 normed = util.normcase(path)
631 folded = self._filefoldmap.get(normed, None)
631 folded = self._filefoldmap.get(normed, None)
632 if folded is None:
632 if folded is None:
633 folded = self._dirfoldmap.get(normed, None)
633 folded = self._dirfoldmap.get(normed, None)
634 if folded is None:
634 if folded is None:
635 if isknown:
635 if isknown:
636 folded = path
636 folded = path
637 else:
637 else:
638 # store discovered result in dirfoldmap so that future
638 # store discovered result in dirfoldmap so that future
639 # normalizefile calls don't start matching directories
639 # normalizefile calls don't start matching directories
640 folded = self._discoverpath(path, normed, ignoremissing, exists,
640 folded = self._discoverpath(path, normed, ignoremissing, exists,
641 self._dirfoldmap)
641 self._dirfoldmap)
642 return folded
642 return folded
643
643
644 def normalize(self, path, isknown=False, ignoremissing=False):
644 def normalize(self, path, isknown=False, ignoremissing=False):
645 '''
645 '''
646 normalize the case of a pathname when on a casefolding filesystem
646 normalize the case of a pathname when on a casefolding filesystem
647
647
648 isknown specifies whether the filename came from walking the
648 isknown specifies whether the filename came from walking the
649 disk, to avoid extra filesystem access.
649 disk, to avoid extra filesystem access.
650
650
651 If ignoremissing is True, missing path are returned
651 If ignoremissing is True, missing path are returned
652 unchanged. Otherwise, we try harder to normalize possibly
652 unchanged. Otherwise, we try harder to normalize possibly
653 existing path components.
653 existing path components.
654
654
655 The normalized case is determined based on the following precedence:
655 The normalized case is determined based on the following precedence:
656
656
657 - version of name already stored in the dirstate
657 - version of name already stored in the dirstate
658 - version of name stored on disk
658 - version of name stored on disk
659 - version provided via command arguments
659 - version provided via command arguments
660 '''
660 '''
661
661
662 if self._checkcase:
662 if self._checkcase:
663 return self._normalize(path, isknown, ignoremissing)
663 return self._normalize(path, isknown, ignoremissing)
664 return path
664 return path
665
665
666 def clear(self):
666 def clear(self):
667 self._map = {}
667 self._map = {}
668 self._nonnormalset = set()
668 self._nonnormalset = set()
669 if "_dirs" in self.__dict__:
669 if "_dirs" in self.__dict__:
670 delattr(self, "_dirs")
670 delattr(self, "_dirs")
671 self._copymap = {}
671 self._copymap = {}
672 self._pl = [nullid, nullid]
672 self._pl = [nullid, nullid]
673 self._lastnormaltime = 0
673 self._lastnormaltime = 0
674 self._dirty = True
674 self._dirty = True
675
675
676 def rebuild(self, parent, allfiles, changedfiles=None):
676 def rebuild(self, parent, allfiles, changedfiles=None):
677 if changedfiles is None:
677 if changedfiles is None:
678 # Rebuild entire dirstate
678 # Rebuild entire dirstate
679 changedfiles = allfiles
679 changedfiles = allfiles
680 lastnormaltime = self._lastnormaltime
680 lastnormaltime = self._lastnormaltime
681 self.clear()
681 self.clear()
682 self._lastnormaltime = lastnormaltime
682 self._lastnormaltime = lastnormaltime
683
683
684 if self._origpl is None:
684 if self._origpl is None:
685 self._origpl = self._pl
685 self._origpl = self._pl
686 self._pl = (parent, nullid)
686 self._pl = (parent, nullid)
687 for f in changedfiles:
687 for f in changedfiles:
688 if f in allfiles:
688 if f in allfiles:
689 self.normallookup(f)
689 self.normallookup(f)
690 else:
690 else:
691 self.drop(f)
691 self.drop(f)
692
692
693 self._dirty = True
693 self._dirty = True
694
694
695 def write(self, tr):
695 def write(self, tr):
696 if not self._dirty:
696 if not self._dirty:
697 return
697 return
698
698
699 filename = self._filename
699 filename = self._filename
700 if tr:
700 if tr:
701 # 'dirstate.write()' is not only for writing in-memory
701 # 'dirstate.write()' is not only for writing in-memory
702 # changes out, but also for dropping ambiguous timestamp.
702 # changes out, but also for dropping ambiguous timestamp.
703 # delayed writing re-raise "ambiguous timestamp issue".
703 # delayed writing re-raise "ambiguous timestamp issue".
704 # See also the wiki page below for detail:
704 # See also the wiki page below for detail:
705 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
705 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
706
706
707 # emulate dropping timestamp in 'parsers.pack_dirstate'
707 # emulate dropping timestamp in 'parsers.pack_dirstate'
708 now = _getfsnow(self._opener)
708 now = _getfsnow(self._opener)
709 dmap = self._map
709 dmap = self._map
710 for f, e in dmap.iteritems():
710 for f, e in dmap.iteritems():
711 if e[0] == 'n' and e[3] == now:
711 if e[0] == 'n' and e[3] == now:
712 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
712 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
713 self._nonnormalset.add(f)
713 self._nonnormalset.add(f)
714
714
715 # emulate that all 'dirstate.normal' results are written out
715 # emulate that all 'dirstate.normal' results are written out
716 self._lastnormaltime = 0
716 self._lastnormaltime = 0
717
717
718 # delay writing in-memory changes out
718 # delay writing in-memory changes out
719 tr.addfilegenerator('dirstate', (self._filename,),
719 tr.addfilegenerator('dirstate', (self._filename,),
720 self._writedirstate, location='plain')
720 self._writedirstate, location='plain')
721 return
721 return
722
722
723 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
723 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
724 self._writedirstate(st)
724 self._writedirstate(st)
725
725
726 def addparentchangecallback(self, category, callback):
726 def addparentchangecallback(self, category, callback):
727 """add a callback to be called when the wd parents are changed
727 """add a callback to be called when the wd parents are changed
728
728
729 Callback will be called with the following arguments:
729 Callback will be called with the following arguments:
730 dirstate, (oldp1, oldp2), (newp1, newp2)
730 dirstate, (oldp1, oldp2), (newp1, newp2)
731
731
732 Category is a unique identifier to allow overwriting an old callback
732 Category is a unique identifier to allow overwriting an old callback
733 with a newer callback.
733 with a newer callback.
734 """
734 """
735 self._plchangecallbacks[category] = callback
735 self._plchangecallbacks[category] = callback
736
736
737 def _writedirstate(self, st):
737 def _writedirstate(self, st):
738 # notify callbacks about parents change
738 # notify callbacks about parents change
739 if self._origpl is not None and self._origpl != self._pl:
739 if self._origpl is not None and self._origpl != self._pl:
740 for c, callback in sorted(self._plchangecallbacks.iteritems()):
740 for c, callback in sorted(self._plchangecallbacks.iteritems()):
741 callback(self, self._origpl, self._pl)
741 callback(self, self._origpl, self._pl)
742 self._origpl = None
742 self._origpl = None
743 # use the modification time of the newly created temporary file as the
743 # use the modification time of the newly created temporary file as the
744 # filesystem's notion of 'now'
744 # filesystem's notion of 'now'
745 now = util.fstat(st).st_mtime & _rangemask
745 now = util.fstat(st).st_mtime & _rangemask
746
746
747 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
747 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
748 # timestamp of each entries in dirstate, because of 'now > mtime'
748 # timestamp of each entries in dirstate, because of 'now > mtime'
749 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
749 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
750 if delaywrite > 0:
750 if delaywrite > 0:
751 # do we have any files to delay for?
751 # do we have any files to delay for?
752 for f, e in self._map.iteritems():
752 for f, e in self._map.iteritems():
753 if e[0] == 'n' and e[3] == now:
753 if e[0] == 'n' and e[3] == now:
754 import time # to avoid useless import
754 import time # to avoid useless import
755 # rather than sleep n seconds, sleep until the next
755 # rather than sleep n seconds, sleep until the next
756 # multiple of n seconds
756 # multiple of n seconds
757 clock = time.time()
757 clock = time.time()
758 start = int(clock) - (int(clock) % delaywrite)
758 start = int(clock) - (int(clock) % delaywrite)
759 end = start + delaywrite
759 end = start + delaywrite
760 time.sleep(end - clock)
760 time.sleep(end - clock)
761 now = end # trust our estimate that the end is near now
761 now = end # trust our estimate that the end is near now
762 break
762 break
763
763
764 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
764 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
765 self._nonnormalset = nonnormalentries(self._map)
765 self._nonnormalset = nonnormalentries(self._map)
766 st.close()
766 st.close()
767 self._lastnormaltime = 0
767 self._lastnormaltime = 0
768 self._dirty = self._dirtypl = False
768 self._dirty = self._dirtypl = False
769
769
770 def _dirignore(self, f):
770 def _dirignore(self, f):
771 if f == '.':
771 if f == '.':
772 return False
772 return False
773 if self._ignore(f):
773 if self._ignore(f):
774 return True
774 return True
775 for p in util.finddirs(f):
775 for p in util.finddirs(f):
776 if self._ignore(p):
776 if self._ignore(p):
777 return True
777 return True
778 return False
778 return False
779
779
780 def _ignorefiles(self):
780 def _ignorefiles(self):
781 files = []
781 files = []
782 if os.path.exists(self._join('.hgignore')):
782 if os.path.exists(self._join('.hgignore')):
783 files.append(self._join('.hgignore'))
783 files.append(self._join('.hgignore'))
784 for name, path in self._ui.configitems("ui"):
784 for name, path in self._ui.configitems("ui"):
785 if name == 'ignore' or name.startswith('ignore.'):
785 if name == 'ignore' or name.startswith('ignore.'):
786 # we need to use os.path.join here rather than self._join
786 # we need to use os.path.join here rather than self._join
787 # because path is arbitrary and user-specified
787 # because path is arbitrary and user-specified
788 files.append(os.path.join(self._rootdir, util.expandpath(path)))
788 files.append(os.path.join(self._rootdir, util.expandpath(path)))
789 return files
789 return files
790
790
791 def _ignorefileandline(self, f):
791 def _ignorefileandline(self, f):
792 files = collections.deque(self._ignorefiles())
792 files = collections.deque(self._ignorefiles())
793 visited = set()
793 visited = set()
794 while files:
794 while files:
795 i = files.popleft()
795 i = files.popleft()
796 patterns = matchmod.readpatternfile(i, self._ui.warn,
796 patterns = matchmod.readpatternfile(i, self._ui.warn,
797 sourceinfo=True)
797 sourceinfo=True)
798 for pattern, lineno, line in patterns:
798 for pattern, lineno, line in patterns:
799 kind, p = matchmod._patsplit(pattern, 'glob')
799 kind, p = matchmod._patsplit(pattern, 'glob')
800 if kind == "subinclude":
800 if kind == "subinclude":
801 if p not in visited:
801 if p not in visited:
802 files.append(p)
802 files.append(p)
803 continue
803 continue
804 m = matchmod.match(self._root, '', [], [pattern],
804 m = matchmod.match(self._root, '', [], [pattern],
805 warn=self._ui.warn)
805 warn=self._ui.warn)
806 if m(f):
806 if m(f):
807 return (i, lineno, line)
807 return (i, lineno, line)
808 visited.add(i)
808 visited.add(i)
809 return (None, -1, "")
809 return (None, -1, "")
810
810
811 def _walkexplicit(self, match, subrepos):
811 def _walkexplicit(self, match, subrepos):
812 '''Get stat data about the files explicitly specified by match.
812 '''Get stat data about the files explicitly specified by match.
813
813
814 Return a triple (results, dirsfound, dirsnotfound).
814 Return a triple (results, dirsfound, dirsnotfound).
815 - results is a mapping from filename to stat result. It also contains
815 - results is a mapping from filename to stat result. It also contains
816 listings mapping subrepos and .hg to None.
816 listings mapping subrepos and .hg to None.
817 - dirsfound is a list of files found to be directories.
817 - dirsfound is a list of files found to be directories.
818 - dirsnotfound is a list of files that the dirstate thinks are
818 - dirsnotfound is a list of files that the dirstate thinks are
819 directories and that were not found.'''
819 directories and that were not found.'''
820
820
821 def badtype(mode):
821 def badtype(mode):
822 kind = _('unknown')
822 kind = _('unknown')
823 if stat.S_ISCHR(mode):
823 if stat.S_ISCHR(mode):
824 kind = _('character device')
824 kind = _('character device')
825 elif stat.S_ISBLK(mode):
825 elif stat.S_ISBLK(mode):
826 kind = _('block device')
826 kind = _('block device')
827 elif stat.S_ISFIFO(mode):
827 elif stat.S_ISFIFO(mode):
828 kind = _('fifo')
828 kind = _('fifo')
829 elif stat.S_ISSOCK(mode):
829 elif stat.S_ISSOCK(mode):
830 kind = _('socket')
830 kind = _('socket')
831 elif stat.S_ISDIR(mode):
831 elif stat.S_ISDIR(mode):
832 kind = _('directory')
832 kind = _('directory')
833 return _('unsupported file type (type is %s)') % kind
833 return _('unsupported file type (type is %s)') % kind
834
834
835 matchedir = match.explicitdir
835 matchedir = match.explicitdir
836 badfn = match.bad
836 badfn = match.bad
837 dmap = self._map
837 dmap = self._map
838 lstat = os.lstat
838 lstat = os.lstat
839 getkind = stat.S_IFMT
839 getkind = stat.S_IFMT
840 dirkind = stat.S_IFDIR
840 dirkind = stat.S_IFDIR
841 regkind = stat.S_IFREG
841 regkind = stat.S_IFREG
842 lnkkind = stat.S_IFLNK
842 lnkkind = stat.S_IFLNK
843 join = self._join
843 join = self._join
844 dirsfound = []
844 dirsfound = []
845 foundadd = dirsfound.append
845 foundadd = dirsfound.append
846 dirsnotfound = []
846 dirsnotfound = []
847 notfoundadd = dirsnotfound.append
847 notfoundadd = dirsnotfound.append
848
848
849 if not match.isexact() and self._checkcase:
849 if not match.isexact() and self._checkcase:
850 normalize = self._normalize
850 normalize = self._normalize
851 else:
851 else:
852 normalize = None
852 normalize = None
853
853
854 files = sorted(match.files())
854 files = sorted(match.files())
855 subrepos.sort()
855 subrepos.sort()
856 i, j = 0, 0
856 i, j = 0, 0
857 while i < len(files) and j < len(subrepos):
857 while i < len(files) and j < len(subrepos):
858 subpath = subrepos[j] + "/"
858 subpath = subrepos[j] + "/"
859 if files[i] < subpath:
859 if files[i] < subpath:
860 i += 1
860 i += 1
861 continue
861 continue
862 while i < len(files) and files[i].startswith(subpath):
862 while i < len(files) and files[i].startswith(subpath):
863 del files[i]
863 del files[i]
864 j += 1
864 j += 1
865
865
866 if not files or '.' in files:
866 if not files or '.' in files:
867 files = ['.']
867 files = ['.']
868 results = dict.fromkeys(subrepos)
868 results = dict.fromkeys(subrepos)
869 results['.hg'] = None
869 results['.hg'] = None
870
870
871 alldirs = None
871 alldirs = None
872 for ff in files:
872 for ff in files:
873 # constructing the foldmap is expensive, so don't do it for the
873 # constructing the foldmap is expensive, so don't do it for the
874 # common case where files is ['.']
874 # common case where files is ['.']
875 if normalize and ff != '.':
875 if normalize and ff != '.':
876 nf = normalize(ff, False, True)
876 nf = normalize(ff, False, True)
877 else:
877 else:
878 nf = ff
878 nf = ff
879 if nf in results:
879 if nf in results:
880 continue
880 continue
881
881
882 try:
882 try:
883 st = lstat(join(nf))
883 st = lstat(join(nf))
884 kind = getkind(st.st_mode)
884 kind = getkind(st.st_mode)
885 if kind == dirkind:
885 if kind == dirkind:
886 if nf in dmap:
886 if nf in dmap:
887 # file replaced by dir on disk but still in dirstate
887 # file replaced by dir on disk but still in dirstate
888 results[nf] = None
888 results[nf] = None
889 if matchedir:
889 if matchedir:
890 matchedir(nf)
890 matchedir(nf)
891 foundadd((nf, ff))
891 foundadd((nf, ff))
892 elif kind == regkind or kind == lnkkind:
892 elif kind == regkind or kind == lnkkind:
893 results[nf] = st
893 results[nf] = st
894 else:
894 else:
895 badfn(ff, badtype(kind))
895 badfn(ff, badtype(kind))
896 if nf in dmap:
896 if nf in dmap:
897 results[nf] = None
897 results[nf] = None
898 except OSError as inst: # nf not found on disk - it is dirstate only
898 except OSError as inst: # nf not found on disk - it is dirstate only
899 if nf in dmap: # does it exactly match a missing file?
899 if nf in dmap: # does it exactly match a missing file?
900 results[nf] = None
900 results[nf] = None
901 else: # does it match a missing directory?
901 else: # does it match a missing directory?
902 if alldirs is None:
902 if alldirs is None:
903 alldirs = util.dirs(dmap)
903 alldirs = util.dirs(dmap)
904 if nf in alldirs:
904 if nf in alldirs:
905 if matchedir:
905 if matchedir:
906 matchedir(nf)
906 matchedir(nf)
907 notfoundadd(nf)
907 notfoundadd(nf)
908 else:
908 else:
909 badfn(ff, inst.strerror)
909 badfn(ff, inst.strerror)
910
910
911 # Case insensitive filesystems cannot rely on lstat() failing to detect
911 # Case insensitive filesystems cannot rely on lstat() failing to detect
912 # a case-only rename. Prune the stat object for any file that does not
912 # a case-only rename. Prune the stat object for any file that does not
913 # match the case in the filesystem, if there are multiple files that
913 # match the case in the filesystem, if there are multiple files that
914 # normalize to the same path.
914 # normalize to the same path.
915 if match.isexact() and self._checkcase:
915 if match.isexact() and self._checkcase:
916 normed = {}
916 normed = {}
917
917
918 for f, st in results.iteritems():
918 for f, st in results.iteritems():
919 if st is None:
919 if st is None:
920 continue
920 continue
921
921
922 nc = util.normcase(f)
922 nc = util.normcase(f)
923 paths = normed.get(nc)
923 paths = normed.get(nc)
924
924
925 if paths is None:
925 if paths is None:
926 paths = set()
926 paths = set()
927 normed[nc] = paths
927 normed[nc] = paths
928
928
929 paths.add(f)
929 paths.add(f)
930
930
931 for norm, paths in normed.iteritems():
931 for norm, paths in normed.iteritems():
932 if len(paths) > 1:
932 if len(paths) > 1:
933 for path in paths:
933 for path in paths:
934 folded = self._discoverpath(path, norm, True, None,
934 folded = self._discoverpath(path, norm, True, None,
935 self._dirfoldmap)
935 self._dirfoldmap)
936 if path != folded:
936 if path != folded:
937 results[path] = None
937 results[path] = None
938
938
939 return results, dirsfound, dirsnotfound
939 return results, dirsfound, dirsnotfound
940
940
941 def walk(self, match, subrepos, unknown, ignored, full=True):
941 def walk(self, match, subrepos, unknown, ignored, full=True):
942 '''
942 '''
943 Walk recursively through the directory tree, finding all files
943 Walk recursively through the directory tree, finding all files
944 matched by match.
944 matched by match.
945
945
946 If full is False, maybe skip some known-clean files.
946 If full is False, maybe skip some known-clean files.
947
947
948 Return a dict mapping filename to stat-like object (either
948 Return a dict mapping filename to stat-like object (either
949 mercurial.osutil.stat instance or return value of os.stat()).
949 mercurial.osutil.stat instance or return value of os.stat()).
950
950
951 '''
951 '''
952 # full is a flag that extensions that hook into walk can use -- this
952 # full is a flag that extensions that hook into walk can use -- this
953 # implementation doesn't use it at all. This satisfies the contract
953 # implementation doesn't use it at all. This satisfies the contract
954 # because we only guarantee a "maybe".
954 # because we only guarantee a "maybe".
955
955
956 if ignored:
956 if ignored:
957 ignore = util.never
957 ignore = util.never
958 dirignore = util.never
958 dirignore = util.never
959 elif unknown:
959 elif unknown:
960 ignore = self._ignore
960 ignore = self._ignore
961 dirignore = self._dirignore
961 dirignore = self._dirignore
962 else:
962 else:
963 # if not unknown and not ignored, drop dir recursion and step 2
963 # if not unknown and not ignored, drop dir recursion and step 2
964 ignore = util.always
964 ignore = util.always
965 dirignore = util.always
965 dirignore = util.always
966
966
967 matchfn = match.matchfn
967 matchfn = match.matchfn
968 matchalways = match.always()
968 matchalways = match.always()
969 matchtdir = match.traversedir
969 matchtdir = match.traversedir
970 dmap = self._map
970 dmap = self._map
971 listdir = osutil.listdir
971 listdir = osutil.listdir
972 lstat = os.lstat
972 lstat = os.lstat
973 dirkind = stat.S_IFDIR
973 dirkind = stat.S_IFDIR
974 regkind = stat.S_IFREG
974 regkind = stat.S_IFREG
975 lnkkind = stat.S_IFLNK
975 lnkkind = stat.S_IFLNK
976 join = self._join
976 join = self._join
977
977
978 exact = skipstep3 = False
978 exact = skipstep3 = False
979 if match.isexact(): # match.exact
979 if match.isexact(): # match.exact
980 exact = True
980 exact = True
981 dirignore = util.always # skip step 2
981 dirignore = util.always # skip step 2
982 elif match.prefix(): # match.match, no patterns
982 elif match.prefix(): # match.match, no patterns
983 skipstep3 = True
983 skipstep3 = True
984
984
985 if not exact and self._checkcase:
985 if not exact and self._checkcase:
986 normalize = self._normalize
986 normalize = self._normalize
987 normalizefile = self._normalizefile
987 normalizefile = self._normalizefile
988 skipstep3 = False
988 skipstep3 = False
989 else:
989 else:
990 normalize = self._normalize
990 normalize = self._normalize
991 normalizefile = None
991 normalizefile = None
992
992
993 # step 1: find all explicit files
993 # step 1: find all explicit files
994 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
994 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
995
995
996 skipstep3 = skipstep3 and not (work or dirsnotfound)
996 skipstep3 = skipstep3 and not (work or dirsnotfound)
997 work = [d for d in work if not dirignore(d[0])]
997 work = [d for d in work if not dirignore(d[0])]
998
998
999 # step 2: visit subdirectories
999 # step 2: visit subdirectories
1000 def traverse(work, alreadynormed):
1000 def traverse(work, alreadynormed):
1001 wadd = work.append
1001 wadd = work.append
1002 while work:
1002 while work:
1003 nd = work.pop()
1003 nd = work.pop()
1004 skip = None
1004 skip = None
1005 if nd == '.':
1005 if nd == '.':
1006 nd = ''
1006 nd = ''
1007 else:
1007 else:
1008 skip = '.hg'
1008 skip = '.hg'
1009 try:
1009 try:
1010 entries = listdir(join(nd), stat=True, skip=skip)
1010 entries = listdir(join(nd), stat=True, skip=skip)
1011 except OSError as inst:
1011 except OSError as inst:
1012 if inst.errno in (errno.EACCES, errno.ENOENT):
1012 if inst.errno in (errno.EACCES, errno.ENOENT):
1013 match.bad(self.pathto(nd), inst.strerror)
1013 match.bad(self.pathto(nd), inst.strerror)
1014 continue
1014 continue
1015 raise
1015 raise
1016 for f, kind, st in entries:
1016 for f, kind, st in entries:
1017 if normalizefile:
1017 if normalizefile:
1018 # even though f might be a directory, we're only
1018 # even though f might be a directory, we're only
1019 # interested in comparing it to files currently in the
1019 # interested in comparing it to files currently in the
1020 # dmap -- therefore normalizefile is enough
1020 # dmap -- therefore normalizefile is enough
1021 nf = normalizefile(nd and (nd + "/" + f) or f, True,
1021 nf = normalizefile(nd and (nd + "/" + f) or f, True,
1022 True)
1022 True)
1023 else:
1023 else:
1024 nf = nd and (nd + "/" + f) or f
1024 nf = nd and (nd + "/" + f) or f
1025 if nf not in results:
1025 if nf not in results:
1026 if kind == dirkind:
1026 if kind == dirkind:
1027 if not ignore(nf):
1027 if not ignore(nf):
1028 if matchtdir:
1028 if matchtdir:
1029 matchtdir(nf)
1029 matchtdir(nf)
1030 wadd(nf)
1030 wadd(nf)
1031 if nf in dmap and (matchalways or matchfn(nf)):
1031 if nf in dmap and (matchalways or matchfn(nf)):
1032 results[nf] = None
1032 results[nf] = None
1033 elif kind == regkind or kind == lnkkind:
1033 elif kind == regkind or kind == lnkkind:
1034 if nf in dmap:
1034 if nf in dmap:
1035 if matchalways or matchfn(nf):
1035 if matchalways or matchfn(nf):
1036 results[nf] = st
1036 results[nf] = st
1037 elif ((matchalways or matchfn(nf))
1037 elif ((matchalways or matchfn(nf))
1038 and not ignore(nf)):
1038 and not ignore(nf)):
1039 # unknown file -- normalize if necessary
1039 # unknown file -- normalize if necessary
1040 if not alreadynormed:
1040 if not alreadynormed:
1041 nf = normalize(nf, False, True)
1041 nf = normalize(nf, False, True)
1042 results[nf] = st
1042 results[nf] = st
1043 elif nf in dmap and (matchalways or matchfn(nf)):
1043 elif nf in dmap and (matchalways or matchfn(nf)):
1044 results[nf] = None
1044 results[nf] = None
1045
1045
1046 for nd, d in work:
1046 for nd, d in work:
1047 # alreadynormed means that processwork doesn't have to do any
1047 # alreadynormed means that processwork doesn't have to do any
1048 # expensive directory normalization
1048 # expensive directory normalization
1049 alreadynormed = not normalize or nd == d
1049 alreadynormed = not normalize or nd == d
1050 traverse([d], alreadynormed)
1050 traverse([d], alreadynormed)
1051
1051
1052 for s in subrepos:
1052 for s in subrepos:
1053 del results[s]
1053 del results[s]
1054 del results['.hg']
1054 del results['.hg']
1055
1055
1056 # step 3: visit remaining files from dmap
1056 # step 3: visit remaining files from dmap
1057 if not skipstep3 and not exact:
1057 if not skipstep3 and not exact:
1058 # If a dmap file is not in results yet, it was either
1058 # If a dmap file is not in results yet, it was either
1059 # a) not matching matchfn b) ignored, c) missing, or d) under a
1059 # a) not matching matchfn b) ignored, c) missing, or d) under a
1060 # symlink directory.
1060 # symlink directory.
1061 if not results and matchalways:
1061 if not results and matchalways:
1062 visit = dmap.keys()
1062 visit = dmap.keys()
1063 else:
1063 else:
1064 visit = [f for f in dmap if f not in results and matchfn(f)]
1064 visit = [f for f in dmap if f not in results and matchfn(f)]
1065 visit.sort()
1065 visit.sort()
1066
1066
1067 if unknown:
1067 if unknown:
1068 # unknown == True means we walked all dirs under the roots
1068 # unknown == True means we walked all dirs under the roots
1069 # that wasn't ignored, and everything that matched was stat'ed
1069 # that wasn't ignored, and everything that matched was stat'ed
1070 # and is already in results.
1070 # and is already in results.
1071 # The rest must thus be ignored or under a symlink.
1071 # The rest must thus be ignored or under a symlink.
1072 audit_path = pathutil.pathauditor(self._root)
1072 audit_path = pathutil.pathauditor(self._root)
1073
1073
1074 for nf in iter(visit):
1074 for nf in iter(visit):
1075 # If a stat for the same file was already added with a
1075 # If a stat for the same file was already added with a
1076 # different case, don't add one for this, since that would
1076 # different case, don't add one for this, since that would
1077 # make it appear as if the file exists under both names
1077 # make it appear as if the file exists under both names
1078 # on disk.
1078 # on disk.
1079 if (normalizefile and
1079 if (normalizefile and
1080 normalizefile(nf, True, True) in results):
1080 normalizefile(nf, True, True) in results):
1081 results[nf] = None
1081 results[nf] = None
1082 # Report ignored items in the dmap as long as they are not
1082 # Report ignored items in the dmap as long as they are not
1083 # under a symlink directory.
1083 # under a symlink directory.
1084 elif audit_path.check(nf):
1084 elif audit_path.check(nf):
1085 try:
1085 try:
1086 results[nf] = lstat(join(nf))
1086 results[nf] = lstat(join(nf))
1087 # file was just ignored, no links, and exists
1087 # file was just ignored, no links, and exists
1088 except OSError:
1088 except OSError:
1089 # file doesn't exist
1089 # file doesn't exist
1090 results[nf] = None
1090 results[nf] = None
1091 else:
1091 else:
1092 # It's either missing or under a symlink directory
1092 # It's either missing or under a symlink directory
1093 # which we in this case report as missing
1093 # which we in this case report as missing
1094 results[nf] = None
1094 results[nf] = None
1095 else:
1095 else:
1096 # We may not have walked the full directory tree above,
1096 # We may not have walked the full directory tree above,
1097 # so stat and check everything we missed.
1097 # so stat and check everything we missed.
1098 nf = iter(visit).next
1098 nf = iter(visit).next
1099 for st in util.statfiles([join(i) for i in visit]):
1099 for st in util.statfiles([join(i) for i in visit]):
1100 results[nf()] = st
1100 results[nf()] = st
1101 return results
1101 return results
1102
1102
1103 def status(self, match, subrepos, ignored, clean, unknown):
1103 def status(self, match, subrepos, ignored, clean, unknown):
1104 '''Determine the status of the working copy relative to the
1104 '''Determine the status of the working copy relative to the
1105 dirstate and return a pair of (unsure, status), where status is of type
1105 dirstate and return a pair of (unsure, status), where status is of type
1106 scmutil.status and:
1106 scmutil.status and:
1107
1107
1108 unsure:
1108 unsure:
1109 files that might have been modified since the dirstate was
1109 files that might have been modified since the dirstate was
1110 written, but need to be read to be sure (size is the same
1110 written, but need to be read to be sure (size is the same
1111 but mtime differs)
1111 but mtime differs)
1112 status.modified:
1112 status.modified:
1113 files that have definitely been modified since the dirstate
1113 files that have definitely been modified since the dirstate
1114 was written (different size or mode)
1114 was written (different size or mode)
1115 status.clean:
1115 status.clean:
1116 files that have definitely not been modified since the
1116 files that have definitely not been modified since the
1117 dirstate was written
1117 dirstate was written
1118 '''
1118 '''
1119 listignored, listclean, listunknown = ignored, clean, unknown
1119 listignored, listclean, listunknown = ignored, clean, unknown
1120 lookup, modified, added, unknown, ignored = [], [], [], [], []
1120 lookup, modified, added, unknown, ignored = [], [], [], [], []
1121 removed, deleted, clean = [], [], []
1121 removed, deleted, clean = [], [], []
1122
1122
1123 dmap = self._map
1123 dmap = self._map
1124 ladd = lookup.append # aka "unsure"
1124 ladd = lookup.append # aka "unsure"
1125 madd = modified.append
1125 madd = modified.append
1126 aadd = added.append
1126 aadd = added.append
1127 uadd = unknown.append
1127 uadd = unknown.append
1128 iadd = ignored.append
1128 iadd = ignored.append
1129 radd = removed.append
1129 radd = removed.append
1130 dadd = deleted.append
1130 dadd = deleted.append
1131 cadd = clean.append
1131 cadd = clean.append
1132 mexact = match.exact
1132 mexact = match.exact
1133 dirignore = self._dirignore
1133 dirignore = self._dirignore
1134 checkexec = self._checkexec
1134 checkexec = self._checkexec
1135 copymap = self._copymap
1135 copymap = self._copymap
1136 lastnormaltime = self._lastnormaltime
1136 lastnormaltime = self._lastnormaltime
1137
1137
1138 # We need to do full walks when either
1138 # We need to do full walks when either
1139 # - we're listing all clean files, or
1139 # - we're listing all clean files, or
1140 # - match.traversedir does something, because match.traversedir should
1140 # - match.traversedir does something, because match.traversedir should
1141 # be called for every dir in the working dir
1141 # be called for every dir in the working dir
1142 full = listclean or match.traversedir is not None
1142 full = listclean or match.traversedir is not None
1143 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1143 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1144 full=full).iteritems():
1144 full=full).iteritems():
1145 if fn not in dmap:
1145 if fn not in dmap:
1146 if (listignored or mexact(fn)) and dirignore(fn):
1146 if (listignored or mexact(fn)) and dirignore(fn):
1147 if listignored:
1147 if listignored:
1148 iadd(fn)
1148 iadd(fn)
1149 else:
1149 else:
1150 uadd(fn)
1150 uadd(fn)
1151 continue
1151 continue
1152
1152
1153 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1153 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1154 # written like that for performance reasons. dmap[fn] is not a
1154 # written like that for performance reasons. dmap[fn] is not a
1155 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1155 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1156 # opcode has fast paths when the value to be unpacked is a tuple or
1156 # opcode has fast paths when the value to be unpacked is a tuple or
1157 # a list, but falls back to creating a full-fledged iterator in
1157 # a list, but falls back to creating a full-fledged iterator in
1158 # general. That is much slower than simply accessing and storing the
1158 # general. That is much slower than simply accessing and storing the
1159 # tuple members one by one.
1159 # tuple members one by one.
1160 t = dmap[fn]
1160 t = dmap[fn]
1161 state = t[0]
1161 state = t[0]
1162 mode = t[1]
1162 mode = t[1]
1163 size = t[2]
1163 size = t[2]
1164 time = t[3]
1164 time = t[3]
1165
1165
1166 if not st and state in "nma":
1166 if not st and state in "nma":
1167 dadd(fn)
1167 dadd(fn)
1168 elif state == 'n':
1168 elif state == 'n':
1169 if (size >= 0 and
1169 if (size >= 0 and
1170 ((size != st.st_size and size != st.st_size & _rangemask)
1170 ((size != st.st_size and size != st.st_size & _rangemask)
1171 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1171 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1172 or size == -2 # other parent
1172 or size == -2 # other parent
1173 or fn in copymap):
1173 or fn in copymap):
1174 madd(fn)
1174 madd(fn)
1175 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1175 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1176 ladd(fn)
1176 ladd(fn)
1177 elif st.st_mtime == lastnormaltime:
1177 elif st.st_mtime == lastnormaltime:
1178 # fn may have just been marked as normal and it may have
1178 # fn may have just been marked as normal and it may have
1179 # changed in the same second without changing its size.
1179 # changed in the same second without changing its size.
1180 # This can happen if we quickly do multiple commits.
1180 # This can happen if we quickly do multiple commits.
1181 # Force lookup, so we don't miss such a racy file change.
1181 # Force lookup, so we don't miss such a racy file change.
1182 ladd(fn)
1182 ladd(fn)
1183 elif listclean:
1183 elif listclean:
1184 cadd(fn)
1184 cadd(fn)
1185 elif state == 'm':
1185 elif state == 'm':
1186 madd(fn)
1186 madd(fn)
1187 elif state == 'a':
1187 elif state == 'a':
1188 aadd(fn)
1188 aadd(fn)
1189 elif state == 'r':
1189 elif state == 'r':
1190 radd(fn)
1190 radd(fn)
1191
1191
1192 return (lookup, scmutil.status(modified, added, removed, deleted,
1192 return (lookup, scmutil.status(modified, added, removed, deleted,
1193 unknown, ignored, clean))
1193 unknown, ignored, clean))
1194
1194
1195 def matches(self, match):
1195 def matches(self, match):
1196 '''
1196 '''
1197 return files in the dirstate (in whatever state) filtered by match
1197 return files in the dirstate (in whatever state) filtered by match
1198 '''
1198 '''
1199 dmap = self._map
1199 dmap = self._map
1200 if match.always():
1200 if match.always():
1201 return dmap.keys()
1201 return dmap.keys()
1202 files = match.files()
1202 files = match.files()
1203 if match.isexact():
1203 if match.isexact():
1204 # fast path -- filter the other way around, since typically files is
1204 # fast path -- filter the other way around, since typically files is
1205 # much smaller than dmap
1205 # much smaller than dmap
1206 return [f for f in files if f in dmap]
1206 return [f for f in files if f in dmap]
1207 if match.prefix() and all(fn in dmap for fn in files):
1207 if match.prefix() and all(fn in dmap for fn in files):
1208 # fast path -- all the values are known to be files, so just return
1208 # fast path -- all the values are known to be files, so just return
1209 # that
1209 # that
1210 return list(files)
1210 return list(files)
1211 return [f for f in dmap if match(f)]
1211 return [f for f in dmap if match(f)]
1212
1212
1213 def _actualfilename(self, tr):
1213 def _actualfilename(self, tr):
1214 if tr:
1214 if tr:
1215 return self._pendingfilename
1215 return self._pendingfilename
1216 else:
1216 else:
1217 return self._filename
1217 return self._filename
1218
1218
1219 def savebackup(self, tr, suffix='', prefix=''):
1219 def savebackup(self, tr, suffix='', prefix=''):
1220 '''Save current dirstate into backup file with suffix'''
1220 '''Save current dirstate into backup file with suffix'''
1221 assert len(suffix) > 0 or len(prefix) > 0
1221 assert len(suffix) > 0 or len(prefix) > 0
1222 filename = self._actualfilename(tr)
1222 filename = self._actualfilename(tr)
1223
1223
1224 # use '_writedirstate' instead of 'write' to write changes certainly,
1224 # use '_writedirstate' instead of 'write' to write changes certainly,
1225 # because the latter omits writing out if transaction is running.
1225 # because the latter omits writing out if transaction is running.
1226 # output file will be used to create backup of dirstate at this point.
1226 # output file will be used to create backup of dirstate at this point.
1227 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1227 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1228 checkambig=True))
1228 checkambig=True))
1229
1229
1230 if tr:
1230 if tr:
1231 # ensure that subsequent tr.writepending returns True for
1231 # ensure that subsequent tr.writepending returns True for
1232 # changes written out above, even if dirstate is never
1232 # changes written out above, even if dirstate is never
1233 # changed after this
1233 # changed after this
1234 tr.addfilegenerator('dirstate', (self._filename,),
1234 tr.addfilegenerator('dirstate', (self._filename,),
1235 self._writedirstate, location='plain')
1235 self._writedirstate, location='plain')
1236
1236
1237 # ensure that pending file written above is unlinked at
1237 # ensure that pending file written above is unlinked at
1238 # failure, even if tr.writepending isn't invoked until the
1238 # failure, even if tr.writepending isn't invoked until the
1239 # end of this transaction
1239 # end of this transaction
1240 tr.registertmp(filename, location='plain')
1240 tr.registertmp(filename, location='plain')
1241
1241
1242 self._opener.write(prefix + self._filename + suffix,
1242 self._opener.write(prefix + self._filename + suffix,
1243 self._opener.tryread(filename))
1243 self._opener.tryread(filename))
1244
1244
1245 def restorebackup(self, tr, suffix='', prefix=''):
1245 def restorebackup(self, tr, suffix='', prefix=''):
1246 '''Restore dirstate by backup file with suffix'''
1246 '''Restore dirstate by backup file with suffix'''
1247 assert len(suffix) > 0 or len(prefix) > 0
1247 assert len(suffix) > 0 or len(prefix) > 0
1248 # this "invalidate()" prevents "wlock.release()" from writing
1248 # this "invalidate()" prevents "wlock.release()" from writing
1249 # changes of dirstate out after restoring from backup file
1249 # changes of dirstate out after restoring from backup file
1250 self.invalidate()
1250 self.invalidate()
1251 filename = self._actualfilename(tr)
1251 filename = self._actualfilename(tr)
1252 # using self._filename to avoid having "pending" in the backup filename
1252 # using self._filename to avoid having "pending" in the backup filename
1253 self._opener.rename(prefix + self._filename + suffix, filename,
1253 self._opener.rename(prefix + self._filename + suffix, filename,
1254 checkambig=True)
1254 checkambig=True)
1255
1255
1256 def clearbackup(self, tr, suffix='', prefix=''):
1256 def clearbackup(self, tr, suffix='', prefix=''):
1257 '''Clear backup file with suffix'''
1257 '''Clear backup file with suffix'''
1258 assert len(suffix) > 0 or len(prefix) > 0
1258 assert len(suffix) > 0 or len(prefix) > 0
1259 # using self._filename to avoid having "pending" in the backup filename
1259 # using self._filename to avoid having "pending" in the backup filename
1260 self._opener.unlink(prefix + self._filename + suffix)
1260 self._opener.unlink(prefix + self._filename + suffix)
@@ -1,214 +1,215 b''
1 from __future__ import absolute_import
1 from __future__ import absolute_import
2
2
3 import errno
3 import errno
4 import os
4 import os
5 import posixpath
5 import posixpath
6 import stat
6 import stat
7
7
8 from .i18n import _
8 from .i18n import _
9 from . import (
9 from . import (
10 encoding,
10 encoding,
11 error,
11 error,
12 pycompat,
12 util,
13 util,
13 )
14 )
14
15
15 def _lowerclean(s):
16 def _lowerclean(s):
16 return encoding.hfsignoreclean(s.lower())
17 return encoding.hfsignoreclean(s.lower())
17
18
18 class pathauditor(object):
19 class pathauditor(object):
19 '''ensure that a filesystem path contains no banned components.
20 '''ensure that a filesystem path contains no banned components.
20 the following properties of a path are checked:
21 the following properties of a path are checked:
21
22
22 - ends with a directory separator
23 - ends with a directory separator
23 - under top-level .hg
24 - under top-level .hg
24 - starts at the root of a windows drive
25 - starts at the root of a windows drive
25 - contains ".."
26 - contains ".."
26
27
27 More check are also done about the file system states:
28 More check are also done about the file system states:
28 - traverses a symlink (e.g. a/symlink_here/b)
29 - traverses a symlink (e.g. a/symlink_here/b)
29 - inside a nested repository (a callback can be used to approve
30 - inside a nested repository (a callback can be used to approve
30 some nested repositories, e.g., subrepositories)
31 some nested repositories, e.g., subrepositories)
31
32
32 The file system checks are only done when 'realfs' is set to True (the
33 The file system checks are only done when 'realfs' is set to True (the
33 default). They should be disable then we are auditing path for operation on
34 default). They should be disable then we are auditing path for operation on
34 stored history.
35 stored history.
35 '''
36 '''
36
37
37 def __init__(self, root, callback=None, realfs=True):
38 def __init__(self, root, callback=None, realfs=True):
38 self.audited = set()
39 self.audited = set()
39 self.auditeddir = set()
40 self.auditeddir = set()
40 self.root = root
41 self.root = root
41 self._realfs = realfs
42 self._realfs = realfs
42 self.callback = callback
43 self.callback = callback
43 if os.path.lexists(root) and not util.fscasesensitive(root):
44 if os.path.lexists(root) and not util.fscasesensitive(root):
44 self.normcase = util.normcase
45 self.normcase = util.normcase
45 else:
46 else:
46 self.normcase = lambda x: x
47 self.normcase = lambda x: x
47
48
48 def __call__(self, path):
49 def __call__(self, path):
49 '''Check the relative path.
50 '''Check the relative path.
50 path may contain a pattern (e.g. foodir/**.txt)'''
51 path may contain a pattern (e.g. foodir/**.txt)'''
51
52
52 path = util.localpath(path)
53 path = util.localpath(path)
53 normpath = self.normcase(path)
54 normpath = self.normcase(path)
54 if normpath in self.audited:
55 if normpath in self.audited:
55 return
56 return
56 # AIX ignores "/" at end of path, others raise EISDIR.
57 # AIX ignores "/" at end of path, others raise EISDIR.
57 if util.endswithsep(path):
58 if util.endswithsep(path):
58 raise error.Abort(_("path ends in directory separator: %s") % path)
59 raise error.Abort(_("path ends in directory separator: %s") % path)
59 parts = util.splitpath(path)
60 parts = util.splitpath(path)
60 if (os.path.splitdrive(path)[0]
61 if (os.path.splitdrive(path)[0]
61 or _lowerclean(parts[0]) in ('.hg', '.hg.', '')
62 or _lowerclean(parts[0]) in ('.hg', '.hg.', '')
62 or os.pardir in parts):
63 or os.pardir in parts):
63 raise error.Abort(_("path contains illegal component: %s") % path)
64 raise error.Abort(_("path contains illegal component: %s") % path)
64 # Windows shortname aliases
65 # Windows shortname aliases
65 for p in parts:
66 for p in parts:
66 if "~" in p:
67 if "~" in p:
67 first, last = p.split("~", 1)
68 first, last = p.split("~", 1)
68 if last.isdigit() and first.upper() in ["HG", "HG8B6C"]:
69 if last.isdigit() and first.upper() in ["HG", "HG8B6C"]:
69 raise error.Abort(_("path contains illegal component: %s")
70 raise error.Abort(_("path contains illegal component: %s")
70 % path)
71 % path)
71 if '.hg' in _lowerclean(path):
72 if '.hg' in _lowerclean(path):
72 lparts = [_lowerclean(p.lower()) for p in parts]
73 lparts = [_lowerclean(p.lower()) for p in parts]
73 for p in '.hg', '.hg.':
74 for p in '.hg', '.hg.':
74 if p in lparts[1:]:
75 if p in lparts[1:]:
75 pos = lparts.index(p)
76 pos = lparts.index(p)
76 base = os.path.join(*parts[:pos])
77 base = os.path.join(*parts[:pos])
77 raise error.Abort(_("path '%s' is inside nested repo %r")
78 raise error.Abort(_("path '%s' is inside nested repo %r")
78 % (path, base))
79 % (path, base))
79
80
80 normparts = util.splitpath(normpath)
81 normparts = util.splitpath(normpath)
81 assert len(parts) == len(normparts)
82 assert len(parts) == len(normparts)
82
83
83 parts.pop()
84 parts.pop()
84 normparts.pop()
85 normparts.pop()
85 prefixes = []
86 prefixes = []
86 # It's important that we check the path parts starting from the root.
87 # It's important that we check the path parts starting from the root.
87 # This means we won't accidentally traverse a symlink into some other
88 # This means we won't accidentally traverse a symlink into some other
88 # filesystem (which is potentially expensive to access).
89 # filesystem (which is potentially expensive to access).
89 for i in range(len(parts)):
90 for i in range(len(parts)):
90 prefix = os.sep.join(parts[:i + 1])
91 prefix = pycompat.ossep.join(parts[:i + 1])
91 normprefix = os.sep.join(normparts[:i + 1])
92 normprefix = pycompat.ossep.join(normparts[:i + 1])
92 if normprefix in self.auditeddir:
93 if normprefix in self.auditeddir:
93 continue
94 continue
94 if self._realfs:
95 if self._realfs:
95 self._checkfs(prefix, path)
96 self._checkfs(prefix, path)
96 prefixes.append(normprefix)
97 prefixes.append(normprefix)
97
98
98 self.audited.add(normpath)
99 self.audited.add(normpath)
99 # only add prefixes to the cache after checking everything: we don't
100 # only add prefixes to the cache after checking everything: we don't
100 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
101 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
101 self.auditeddir.update(prefixes)
102 self.auditeddir.update(prefixes)
102
103
103 def _checkfs(self, prefix, path):
104 def _checkfs(self, prefix, path):
104 """raise exception if a file system backed check fails"""
105 """raise exception if a file system backed check fails"""
105 curpath = os.path.join(self.root, prefix)
106 curpath = os.path.join(self.root, prefix)
106 try:
107 try:
107 st = os.lstat(curpath)
108 st = os.lstat(curpath)
108 except OSError as err:
109 except OSError as err:
109 # EINVAL can be raised as invalid path syntax under win32.
110 # EINVAL can be raised as invalid path syntax under win32.
110 # They must be ignored for patterns can be checked too.
111 # They must be ignored for patterns can be checked too.
111 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
112 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
112 raise
113 raise
113 else:
114 else:
114 if stat.S_ISLNK(st.st_mode):
115 if stat.S_ISLNK(st.st_mode):
115 msg = _('path %r traverses symbolic link %r') % (path, prefix)
116 msg = _('path %r traverses symbolic link %r') % (path, prefix)
116 raise error.Abort(msg)
117 raise error.Abort(msg)
117 elif (stat.S_ISDIR(st.st_mode) and
118 elif (stat.S_ISDIR(st.st_mode) and
118 os.path.isdir(os.path.join(curpath, '.hg'))):
119 os.path.isdir(os.path.join(curpath, '.hg'))):
119 if not self.callback or not self.callback(curpath):
120 if not self.callback or not self.callback(curpath):
120 msg = _("path '%s' is inside nested repo %r")
121 msg = _("path '%s' is inside nested repo %r")
121 raise error.Abort(msg % (path, prefix))
122 raise error.Abort(msg % (path, prefix))
122
123
123 def check(self, path):
124 def check(self, path):
124 try:
125 try:
125 self(path)
126 self(path)
126 return True
127 return True
127 except (OSError, error.Abort):
128 except (OSError, error.Abort):
128 return False
129 return False
129
130
130 def canonpath(root, cwd, myname, auditor=None):
131 def canonpath(root, cwd, myname, auditor=None):
131 '''return the canonical path of myname, given cwd and root'''
132 '''return the canonical path of myname, given cwd and root'''
132 if util.endswithsep(root):
133 if util.endswithsep(root):
133 rootsep = root
134 rootsep = root
134 else:
135 else:
135 rootsep = root + os.sep
136 rootsep = root + pycompat.ossep
136 name = myname
137 name = myname
137 if not os.path.isabs(name):
138 if not os.path.isabs(name):
138 name = os.path.join(root, cwd, name)
139 name = os.path.join(root, cwd, name)
139 name = os.path.normpath(name)
140 name = os.path.normpath(name)
140 if auditor is None:
141 if auditor is None:
141 auditor = pathauditor(root)
142 auditor = pathauditor(root)
142 if name != rootsep and name.startswith(rootsep):
143 if name != rootsep and name.startswith(rootsep):
143 name = name[len(rootsep):]
144 name = name[len(rootsep):]
144 auditor(name)
145 auditor(name)
145 return util.pconvert(name)
146 return util.pconvert(name)
146 elif name == root:
147 elif name == root:
147 return ''
148 return ''
148 else:
149 else:
149 # Determine whether `name' is in the hierarchy at or beneath `root',
150 # Determine whether `name' is in the hierarchy at or beneath `root',
150 # by iterating name=dirname(name) until that causes no change (can't
151 # by iterating name=dirname(name) until that causes no change (can't
151 # check name == '/', because that doesn't work on windows). The list
152 # check name == '/', because that doesn't work on windows). The list
152 # `rel' holds the reversed list of components making up the relative
153 # `rel' holds the reversed list of components making up the relative
153 # file name we want.
154 # file name we want.
154 rel = []
155 rel = []
155 while True:
156 while True:
156 try:
157 try:
157 s = util.samefile(name, root)
158 s = util.samefile(name, root)
158 except OSError:
159 except OSError:
159 s = False
160 s = False
160 if s:
161 if s:
161 if not rel:
162 if not rel:
162 # name was actually the same as root (maybe a symlink)
163 # name was actually the same as root (maybe a symlink)
163 return ''
164 return ''
164 rel.reverse()
165 rel.reverse()
165 name = os.path.join(*rel)
166 name = os.path.join(*rel)
166 auditor(name)
167 auditor(name)
167 return util.pconvert(name)
168 return util.pconvert(name)
168 dirname, basename = util.split(name)
169 dirname, basename = util.split(name)
169 rel.append(basename)
170 rel.append(basename)
170 if dirname == name:
171 if dirname == name:
171 break
172 break
172 name = dirname
173 name = dirname
173
174
174 # A common mistake is to use -R, but specify a file relative to the repo
175 # A common mistake is to use -R, but specify a file relative to the repo
175 # instead of cwd. Detect that case, and provide a hint to the user.
176 # instead of cwd. Detect that case, and provide a hint to the user.
176 hint = None
177 hint = None
177 try:
178 try:
178 if cwd != root:
179 if cwd != root:
179 canonpath(root, root, myname, auditor)
180 canonpath(root, root, myname, auditor)
180 hint = (_("consider using '--cwd %s'")
181 hint = (_("consider using '--cwd %s'")
181 % os.path.relpath(root, cwd))
182 % os.path.relpath(root, cwd))
182 except error.Abort:
183 except error.Abort:
183 pass
184 pass
184
185
185 raise error.Abort(_("%s not under root '%s'") % (myname, root),
186 raise error.Abort(_("%s not under root '%s'") % (myname, root),
186 hint=hint)
187 hint=hint)
187
188
188 def normasprefix(path):
189 def normasprefix(path):
189 '''normalize the specified path as path prefix
190 '''normalize the specified path as path prefix
190
191
191 Returned value can be used safely for "p.startswith(prefix)",
192 Returned value can be used safely for "p.startswith(prefix)",
192 "p[len(prefix):]", and so on.
193 "p[len(prefix):]", and so on.
193
194
194 For efficiency, this expects "path" argument to be already
195 For efficiency, this expects "path" argument to be already
195 normalized by "os.path.normpath", "os.path.realpath", and so on.
196 normalized by "os.path.normpath", "os.path.realpath", and so on.
196
197
197 See also issue3033 for detail about need of this function.
198 See also issue3033 for detail about need of this function.
198
199
199 >>> normasprefix('/foo/bar').replace(os.sep, '/')
200 >>> normasprefix('/foo/bar').replace(os.sep, '/')
200 '/foo/bar/'
201 '/foo/bar/'
201 >>> normasprefix('/').replace(os.sep, '/')
202 >>> normasprefix('/').replace(os.sep, '/')
202 '/'
203 '/'
203 '''
204 '''
204 d, p = os.path.splitdrive(path)
205 d, p = os.path.splitdrive(path)
205 if len(p) != len(os.sep):
206 if len(p) != len(pycompat.ossep):
206 return path + os.sep
207 return path + pycompat.ossep
207 else:
208 else:
208 return path
209 return path
209
210
210 # forward two methods from posixpath that do what we need, but we'd
211 # forward two methods from posixpath that do what we need, but we'd
211 # rather not let our internals know that we're thinking in posix terms
212 # rather not let our internals know that we're thinking in posix terms
212 # - instead we'll let them be oblivious.
213 # - instead we'll let them be oblivious.
213 join = posixpath.join
214 join = posixpath.join
214 dirname = posixpath.dirname
215 dirname = posixpath.dirname
@@ -1,652 +1,652 b''
1 # posix.py - Posix utility function implementations for Mercurial
1 # posix.py - Posix utility function implementations for Mercurial
2 #
2 #
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import fcntl
11 import fcntl
12 import getpass
12 import getpass
13 import grp
13 import grp
14 import os
14 import os
15 import pwd
15 import pwd
16 import re
16 import re
17 import select
17 import select
18 import stat
18 import stat
19 import sys
19 import sys
20 import tempfile
20 import tempfile
21 import unicodedata
21 import unicodedata
22
22
23 from .i18n import _
23 from .i18n import _
24 from . import (
24 from . import (
25 encoding,
25 encoding,
26 pycompat,
26 pycompat,
27 )
27 )
28
28
29 posixfile = open
29 posixfile = open
30 normpath = os.path.normpath
30 normpath = os.path.normpath
31 samestat = os.path.samestat
31 samestat = os.path.samestat
32 try:
32 try:
33 oslink = os.link
33 oslink = os.link
34 except AttributeError:
34 except AttributeError:
35 # Some platforms build Python without os.link on systems that are
35 # Some platforms build Python without os.link on systems that are
36 # vaguely unix-like but don't have hardlink support. For those
36 # vaguely unix-like but don't have hardlink support. For those
37 # poor souls, just say we tried and that it failed so we fall back
37 # poor souls, just say we tried and that it failed so we fall back
38 # to copies.
38 # to copies.
39 def oslink(src, dst):
39 def oslink(src, dst):
40 raise OSError(errno.EINVAL,
40 raise OSError(errno.EINVAL,
41 'hardlinks not supported: %s to %s' % (src, dst))
41 'hardlinks not supported: %s to %s' % (src, dst))
42 unlink = os.unlink
42 unlink = os.unlink
43 rename = os.rename
43 rename = os.rename
44 removedirs = os.removedirs
44 removedirs = os.removedirs
45 expandglobs = False
45 expandglobs = False
46
46
47 umask = os.umask(0)
47 umask = os.umask(0)
48 os.umask(umask)
48 os.umask(umask)
49
49
50 def split(p):
50 def split(p):
51 '''Same as posixpath.split, but faster
51 '''Same as posixpath.split, but faster
52
52
53 >>> import posixpath
53 >>> import posixpath
54 >>> for f in ['/absolute/path/to/file',
54 >>> for f in ['/absolute/path/to/file',
55 ... 'relative/path/to/file',
55 ... 'relative/path/to/file',
56 ... 'file_alone',
56 ... 'file_alone',
57 ... 'path/to/directory/',
57 ... 'path/to/directory/',
58 ... '/multiple/path//separators',
58 ... '/multiple/path//separators',
59 ... '/file_at_root',
59 ... '/file_at_root',
60 ... '///multiple_leading_separators_at_root',
60 ... '///multiple_leading_separators_at_root',
61 ... '']:
61 ... '']:
62 ... assert split(f) == posixpath.split(f), f
62 ... assert split(f) == posixpath.split(f), f
63 '''
63 '''
64 ht = p.rsplit('/', 1)
64 ht = p.rsplit('/', 1)
65 if len(ht) == 1:
65 if len(ht) == 1:
66 return '', p
66 return '', p
67 nh = ht[0].rstrip('/')
67 nh = ht[0].rstrip('/')
68 if nh:
68 if nh:
69 return nh, ht[1]
69 return nh, ht[1]
70 return ht[0] + '/', ht[1]
70 return ht[0] + '/', ht[1]
71
71
72 def openhardlinks():
72 def openhardlinks():
73 '''return true if it is safe to hold open file handles to hardlinks'''
73 '''return true if it is safe to hold open file handles to hardlinks'''
74 return True
74 return True
75
75
76 def nlinks(name):
76 def nlinks(name):
77 '''return number of hardlinks for the given file'''
77 '''return number of hardlinks for the given file'''
78 return os.lstat(name).st_nlink
78 return os.lstat(name).st_nlink
79
79
80 def parsepatchoutput(output_line):
80 def parsepatchoutput(output_line):
81 """parses the output produced by patch and returns the filename"""
81 """parses the output produced by patch and returns the filename"""
82 pf = output_line[14:]
82 pf = output_line[14:]
83 if os.sys.platform == 'OpenVMS':
83 if os.sys.platform == 'OpenVMS':
84 if pf[0] == '`':
84 if pf[0] == '`':
85 pf = pf[1:-1] # Remove the quotes
85 pf = pf[1:-1] # Remove the quotes
86 else:
86 else:
87 if pf.startswith("'") and pf.endswith("'") and " " in pf:
87 if pf.startswith("'") and pf.endswith("'") and " " in pf:
88 pf = pf[1:-1] # Remove the quotes
88 pf = pf[1:-1] # Remove the quotes
89 return pf
89 return pf
90
90
91 def sshargs(sshcmd, host, user, port):
91 def sshargs(sshcmd, host, user, port):
92 '''Build argument list for ssh'''
92 '''Build argument list for ssh'''
93 args = user and ("%s@%s" % (user, host)) or host
93 args = user and ("%s@%s" % (user, host)) or host
94 return port and ("%s -p %s" % (args, port)) or args
94 return port and ("%s -p %s" % (args, port)) or args
95
95
96 def isexec(f):
96 def isexec(f):
97 """check whether a file is executable"""
97 """check whether a file is executable"""
98 return (os.lstat(f).st_mode & 0o100 != 0)
98 return (os.lstat(f).st_mode & 0o100 != 0)
99
99
100 def setflags(f, l, x):
100 def setflags(f, l, x):
101 s = os.lstat(f).st_mode
101 s = os.lstat(f).st_mode
102 if l:
102 if l:
103 if not stat.S_ISLNK(s):
103 if not stat.S_ISLNK(s):
104 # switch file to link
104 # switch file to link
105 fp = open(f)
105 fp = open(f)
106 data = fp.read()
106 data = fp.read()
107 fp.close()
107 fp.close()
108 os.unlink(f)
108 os.unlink(f)
109 try:
109 try:
110 os.symlink(data, f)
110 os.symlink(data, f)
111 except OSError:
111 except OSError:
112 # failed to make a link, rewrite file
112 # failed to make a link, rewrite file
113 fp = open(f, "w")
113 fp = open(f, "w")
114 fp.write(data)
114 fp.write(data)
115 fp.close()
115 fp.close()
116 # no chmod needed at this point
116 # no chmod needed at this point
117 return
117 return
118 if stat.S_ISLNK(s):
118 if stat.S_ISLNK(s):
119 # switch link to file
119 # switch link to file
120 data = os.readlink(f)
120 data = os.readlink(f)
121 os.unlink(f)
121 os.unlink(f)
122 fp = open(f, "w")
122 fp = open(f, "w")
123 fp.write(data)
123 fp.write(data)
124 fp.close()
124 fp.close()
125 s = 0o666 & ~umask # avoid restatting for chmod
125 s = 0o666 & ~umask # avoid restatting for chmod
126
126
127 sx = s & 0o100
127 sx = s & 0o100
128 if x and not sx:
128 if x and not sx:
129 # Turn on +x for every +r bit when making a file executable
129 # Turn on +x for every +r bit when making a file executable
130 # and obey umask.
130 # and obey umask.
131 os.chmod(f, s | (s & 0o444) >> 2 & ~umask)
131 os.chmod(f, s | (s & 0o444) >> 2 & ~umask)
132 elif not x and sx:
132 elif not x and sx:
133 # Turn off all +x bits
133 # Turn off all +x bits
134 os.chmod(f, s & 0o666)
134 os.chmod(f, s & 0o666)
135
135
136 def copymode(src, dst, mode=None):
136 def copymode(src, dst, mode=None):
137 '''Copy the file mode from the file at path src to dst.
137 '''Copy the file mode from the file at path src to dst.
138 If src doesn't exist, we're using mode instead. If mode is None, we're
138 If src doesn't exist, we're using mode instead. If mode is None, we're
139 using umask.'''
139 using umask.'''
140 try:
140 try:
141 st_mode = os.lstat(src).st_mode & 0o777
141 st_mode = os.lstat(src).st_mode & 0o777
142 except OSError as inst:
142 except OSError as inst:
143 if inst.errno != errno.ENOENT:
143 if inst.errno != errno.ENOENT:
144 raise
144 raise
145 st_mode = mode
145 st_mode = mode
146 if st_mode is None:
146 if st_mode is None:
147 st_mode = ~umask
147 st_mode = ~umask
148 st_mode &= 0o666
148 st_mode &= 0o666
149 os.chmod(dst, st_mode)
149 os.chmod(dst, st_mode)
150
150
151 def checkexec(path):
151 def checkexec(path):
152 """
152 """
153 Check whether the given path is on a filesystem with UNIX-like exec flags
153 Check whether the given path is on a filesystem with UNIX-like exec flags
154
154
155 Requires a directory (like /foo/.hg)
155 Requires a directory (like /foo/.hg)
156 """
156 """
157
157
158 # VFAT on some Linux versions can flip mode but it doesn't persist
158 # VFAT on some Linux versions can flip mode but it doesn't persist
159 # a FS remount. Frequently we can detect it if files are created
159 # a FS remount. Frequently we can detect it if files are created
160 # with exec bit on.
160 # with exec bit on.
161
161
162 try:
162 try:
163 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
163 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
164 cachedir = os.path.join(path, '.hg', 'cache')
164 cachedir = os.path.join(path, '.hg', 'cache')
165 if os.path.isdir(cachedir):
165 if os.path.isdir(cachedir):
166 checkisexec = os.path.join(cachedir, 'checkisexec')
166 checkisexec = os.path.join(cachedir, 'checkisexec')
167 checknoexec = os.path.join(cachedir, 'checknoexec')
167 checknoexec = os.path.join(cachedir, 'checknoexec')
168
168
169 try:
169 try:
170 m = os.stat(checkisexec).st_mode
170 m = os.stat(checkisexec).st_mode
171 except OSError as e:
171 except OSError as e:
172 if e.errno != errno.ENOENT:
172 if e.errno != errno.ENOENT:
173 raise
173 raise
174 # checkisexec does not exist - fall through ...
174 # checkisexec does not exist - fall through ...
175 else:
175 else:
176 # checkisexec exists, check if it actually is exec
176 # checkisexec exists, check if it actually is exec
177 if m & EXECFLAGS != 0:
177 if m & EXECFLAGS != 0:
178 # ensure checkisexec exists, check it isn't exec
178 # ensure checkisexec exists, check it isn't exec
179 try:
179 try:
180 m = os.stat(checknoexec).st_mode
180 m = os.stat(checknoexec).st_mode
181 except OSError as e:
181 except OSError as e:
182 if e.errno != errno.ENOENT:
182 if e.errno != errno.ENOENT:
183 raise
183 raise
184 file(checknoexec, 'w').close() # might fail
184 file(checknoexec, 'w').close() # might fail
185 m = os.stat(checknoexec).st_mode
185 m = os.stat(checknoexec).st_mode
186 if m & EXECFLAGS == 0:
186 if m & EXECFLAGS == 0:
187 # check-exec is exec and check-no-exec is not exec
187 # check-exec is exec and check-no-exec is not exec
188 return True
188 return True
189 # checknoexec exists but is exec - delete it
189 # checknoexec exists but is exec - delete it
190 os.unlink(checknoexec)
190 os.unlink(checknoexec)
191 # checkisexec exists but is not exec - delete it
191 # checkisexec exists but is not exec - delete it
192 os.unlink(checkisexec)
192 os.unlink(checkisexec)
193
193
194 # check using one file, leave it as checkisexec
194 # check using one file, leave it as checkisexec
195 checkdir = cachedir
195 checkdir = cachedir
196 else:
196 else:
197 # check directly in path and don't leave checkisexec behind
197 # check directly in path and don't leave checkisexec behind
198 checkdir = path
198 checkdir = path
199 checkisexec = None
199 checkisexec = None
200 fh, fn = tempfile.mkstemp(dir=checkdir, prefix='hg-checkexec-')
200 fh, fn = tempfile.mkstemp(dir=checkdir, prefix='hg-checkexec-')
201 try:
201 try:
202 os.close(fh)
202 os.close(fh)
203 m = os.stat(fn).st_mode
203 m = os.stat(fn).st_mode
204 if m & EXECFLAGS == 0:
204 if m & EXECFLAGS == 0:
205 os.chmod(fn, m & 0o777 | EXECFLAGS)
205 os.chmod(fn, m & 0o777 | EXECFLAGS)
206 if os.stat(fn).st_mode & EXECFLAGS != 0:
206 if os.stat(fn).st_mode & EXECFLAGS != 0:
207 if checkisexec is not None:
207 if checkisexec is not None:
208 os.rename(fn, checkisexec)
208 os.rename(fn, checkisexec)
209 fn = None
209 fn = None
210 return True
210 return True
211 finally:
211 finally:
212 if fn is not None:
212 if fn is not None:
213 os.unlink(fn)
213 os.unlink(fn)
214 except (IOError, OSError):
214 except (IOError, OSError):
215 # we don't care, the user probably won't be able to commit anyway
215 # we don't care, the user probably won't be able to commit anyway
216 return False
216 return False
217
217
218 def checklink(path):
218 def checklink(path):
219 """check whether the given path is on a symlink-capable filesystem"""
219 """check whether the given path is on a symlink-capable filesystem"""
220 # mktemp is not racy because symlink creation will fail if the
220 # mktemp is not racy because symlink creation will fail if the
221 # file already exists
221 # file already exists
222 while True:
222 while True:
223 cachedir = os.path.join(path, '.hg', 'cache')
223 cachedir = os.path.join(path, '.hg', 'cache')
224 checklink = os.path.join(cachedir, 'checklink')
224 checklink = os.path.join(cachedir, 'checklink')
225 # try fast path, read only
225 # try fast path, read only
226 if os.path.islink(checklink):
226 if os.path.islink(checklink):
227 return True
227 return True
228 if os.path.isdir(cachedir):
228 if os.path.isdir(cachedir):
229 checkdir = cachedir
229 checkdir = cachedir
230 else:
230 else:
231 checkdir = path
231 checkdir = path
232 cachedir = None
232 cachedir = None
233 name = tempfile.mktemp(dir=checkdir, prefix='checklink-')
233 name = tempfile.mktemp(dir=checkdir, prefix='checklink-')
234 try:
234 try:
235 fd = None
235 fd = None
236 if cachedir is None:
236 if cachedir is None:
237 fd = tempfile.NamedTemporaryFile(dir=checkdir,
237 fd = tempfile.NamedTemporaryFile(dir=checkdir,
238 prefix='hg-checklink-')
238 prefix='hg-checklink-')
239 target = os.path.basename(fd.name)
239 target = os.path.basename(fd.name)
240 else:
240 else:
241 # create a fixed file to link to; doesn't matter if it
241 # create a fixed file to link to; doesn't matter if it
242 # already exists.
242 # already exists.
243 target = 'checklink-target'
243 target = 'checklink-target'
244 open(os.path.join(cachedir, target), 'w').close()
244 open(os.path.join(cachedir, target), 'w').close()
245 try:
245 try:
246 os.symlink(target, name)
246 os.symlink(target, name)
247 if cachedir is None:
247 if cachedir is None:
248 os.unlink(name)
248 os.unlink(name)
249 else:
249 else:
250 try:
250 try:
251 os.rename(name, checklink)
251 os.rename(name, checklink)
252 except OSError:
252 except OSError:
253 os.unlink(name)
253 os.unlink(name)
254 return True
254 return True
255 except OSError as inst:
255 except OSError as inst:
256 # link creation might race, try again
256 # link creation might race, try again
257 if inst[0] == errno.EEXIST:
257 if inst[0] == errno.EEXIST:
258 continue
258 continue
259 raise
259 raise
260 finally:
260 finally:
261 if fd is not None:
261 if fd is not None:
262 fd.close()
262 fd.close()
263 except AttributeError:
263 except AttributeError:
264 return False
264 return False
265 except OSError as inst:
265 except OSError as inst:
266 # sshfs might report failure while successfully creating the link
266 # sshfs might report failure while successfully creating the link
267 if inst[0] == errno.EIO and os.path.exists(name):
267 if inst[0] == errno.EIO and os.path.exists(name):
268 os.unlink(name)
268 os.unlink(name)
269 return False
269 return False
270
270
271 def checkosfilename(path):
271 def checkosfilename(path):
272 '''Check that the base-relative path is a valid filename on this platform.
272 '''Check that the base-relative path is a valid filename on this platform.
273 Returns None if the path is ok, or a UI string describing the problem.'''
273 Returns None if the path is ok, or a UI string describing the problem.'''
274 pass # on posix platforms, every path is ok
274 pass # on posix platforms, every path is ok
275
275
276 def setbinary(fd):
276 def setbinary(fd):
277 pass
277 pass
278
278
279 def pconvert(path):
279 def pconvert(path):
280 return path
280 return path
281
281
282 def localpath(path):
282 def localpath(path):
283 return path
283 return path
284
284
285 def samefile(fpath1, fpath2):
285 def samefile(fpath1, fpath2):
286 """Returns whether path1 and path2 refer to the same file. This is only
286 """Returns whether path1 and path2 refer to the same file. This is only
287 guaranteed to work for files, not directories."""
287 guaranteed to work for files, not directories."""
288 return os.path.samefile(fpath1, fpath2)
288 return os.path.samefile(fpath1, fpath2)
289
289
290 def samedevice(fpath1, fpath2):
290 def samedevice(fpath1, fpath2):
291 """Returns whether fpath1 and fpath2 are on the same device. This is only
291 """Returns whether fpath1 and fpath2 are on the same device. This is only
292 guaranteed to work for files, not directories."""
292 guaranteed to work for files, not directories."""
293 st1 = os.lstat(fpath1)
293 st1 = os.lstat(fpath1)
294 st2 = os.lstat(fpath2)
294 st2 = os.lstat(fpath2)
295 return st1.st_dev == st2.st_dev
295 return st1.st_dev == st2.st_dev
296
296
297 # os.path.normcase is a no-op, which doesn't help us on non-native filesystems
297 # os.path.normcase is a no-op, which doesn't help us on non-native filesystems
298 def normcase(path):
298 def normcase(path):
299 return path.lower()
299 return path.lower()
300
300
301 # what normcase does to ASCII strings
301 # what normcase does to ASCII strings
302 normcasespec = encoding.normcasespecs.lower
302 normcasespec = encoding.normcasespecs.lower
303 # fallback normcase function for non-ASCII strings
303 # fallback normcase function for non-ASCII strings
304 normcasefallback = normcase
304 normcasefallback = normcase
305
305
306 if sys.platform == 'darwin':
306 if sys.platform == 'darwin':
307
307
308 def normcase(path):
308 def normcase(path):
309 '''
309 '''
310 Normalize a filename for OS X-compatible comparison:
310 Normalize a filename for OS X-compatible comparison:
311 - escape-encode invalid characters
311 - escape-encode invalid characters
312 - decompose to NFD
312 - decompose to NFD
313 - lowercase
313 - lowercase
314 - omit ignored characters [200c-200f, 202a-202e, 206a-206f,feff]
314 - omit ignored characters [200c-200f, 202a-202e, 206a-206f,feff]
315
315
316 >>> normcase('UPPER')
316 >>> normcase('UPPER')
317 'upper'
317 'upper'
318 >>> normcase('Caf\xc3\xa9')
318 >>> normcase('Caf\xc3\xa9')
319 'cafe\\xcc\\x81'
319 'cafe\\xcc\\x81'
320 >>> normcase('\xc3\x89')
320 >>> normcase('\xc3\x89')
321 'e\\xcc\\x81'
321 'e\\xcc\\x81'
322 >>> normcase('\xb8\xca\xc3\xca\xbe\xc8.JPG') # issue3918
322 >>> normcase('\xb8\xca\xc3\xca\xbe\xc8.JPG') # issue3918
323 '%b8%ca%c3\\xca\\xbe%c8.jpg'
323 '%b8%ca%c3\\xca\\xbe%c8.jpg'
324 '''
324 '''
325
325
326 try:
326 try:
327 return encoding.asciilower(path) # exception for non-ASCII
327 return encoding.asciilower(path) # exception for non-ASCII
328 except UnicodeDecodeError:
328 except UnicodeDecodeError:
329 return normcasefallback(path)
329 return normcasefallback(path)
330
330
331 normcasespec = encoding.normcasespecs.lower
331 normcasespec = encoding.normcasespecs.lower
332
332
333 def normcasefallback(path):
333 def normcasefallback(path):
334 try:
334 try:
335 u = path.decode('utf-8')
335 u = path.decode('utf-8')
336 except UnicodeDecodeError:
336 except UnicodeDecodeError:
337 # OS X percent-encodes any bytes that aren't valid utf-8
337 # OS X percent-encodes any bytes that aren't valid utf-8
338 s = ''
338 s = ''
339 pos = 0
339 pos = 0
340 l = len(path)
340 l = len(path)
341 while pos < l:
341 while pos < l:
342 try:
342 try:
343 c = encoding.getutf8char(path, pos)
343 c = encoding.getutf8char(path, pos)
344 pos += len(c)
344 pos += len(c)
345 except ValueError:
345 except ValueError:
346 c = '%%%02X' % ord(path[pos])
346 c = '%%%02X' % ord(path[pos])
347 pos += 1
347 pos += 1
348 s += c
348 s += c
349
349
350 u = s.decode('utf-8')
350 u = s.decode('utf-8')
351
351
352 # Decompose then lowercase (HFS+ technote specifies lower)
352 # Decompose then lowercase (HFS+ technote specifies lower)
353 enc = unicodedata.normalize('NFD', u).lower().encode('utf-8')
353 enc = unicodedata.normalize('NFD', u).lower().encode('utf-8')
354 # drop HFS+ ignored characters
354 # drop HFS+ ignored characters
355 return encoding.hfsignoreclean(enc)
355 return encoding.hfsignoreclean(enc)
356
356
357 if sys.platform == 'cygwin':
357 if sys.platform == 'cygwin':
358 # workaround for cygwin, in which mount point part of path is
358 # workaround for cygwin, in which mount point part of path is
359 # treated as case sensitive, even though underlying NTFS is case
359 # treated as case sensitive, even though underlying NTFS is case
360 # insensitive.
360 # insensitive.
361
361
362 # default mount points
362 # default mount points
363 cygwinmountpoints = sorted([
363 cygwinmountpoints = sorted([
364 "/usr/bin",
364 "/usr/bin",
365 "/usr/lib",
365 "/usr/lib",
366 "/cygdrive",
366 "/cygdrive",
367 ], reverse=True)
367 ], reverse=True)
368
368
369 # use upper-ing as normcase as same as NTFS workaround
369 # use upper-ing as normcase as same as NTFS workaround
370 def normcase(path):
370 def normcase(path):
371 pathlen = len(path)
371 pathlen = len(path)
372 if (pathlen == 0) or (path[0] != os.sep):
372 if (pathlen == 0) or (path[0] != pycompat.ossep):
373 # treat as relative
373 # treat as relative
374 return encoding.upper(path)
374 return encoding.upper(path)
375
375
376 # to preserve case of mountpoint part
376 # to preserve case of mountpoint part
377 for mp in cygwinmountpoints:
377 for mp in cygwinmountpoints:
378 if not path.startswith(mp):
378 if not path.startswith(mp):
379 continue
379 continue
380
380
381 mplen = len(mp)
381 mplen = len(mp)
382 if mplen == pathlen: # mount point itself
382 if mplen == pathlen: # mount point itself
383 return mp
383 return mp
384 if path[mplen] == os.sep:
384 if path[mplen] == pycompat.ossep:
385 return mp + encoding.upper(path[mplen:])
385 return mp + encoding.upper(path[mplen:])
386
386
387 return encoding.upper(path)
387 return encoding.upper(path)
388
388
389 normcasespec = encoding.normcasespecs.other
389 normcasespec = encoding.normcasespecs.other
390 normcasefallback = normcase
390 normcasefallback = normcase
391
391
392 # Cygwin translates native ACLs to POSIX permissions,
392 # Cygwin translates native ACLs to POSIX permissions,
393 # but these translations are not supported by native
393 # but these translations are not supported by native
394 # tools, so the exec bit tends to be set erroneously.
394 # tools, so the exec bit tends to be set erroneously.
395 # Therefore, disable executable bit access on Cygwin.
395 # Therefore, disable executable bit access on Cygwin.
396 def checkexec(path):
396 def checkexec(path):
397 return False
397 return False
398
398
399 # Similarly, Cygwin's symlink emulation is likely to create
399 # Similarly, Cygwin's symlink emulation is likely to create
400 # problems when Mercurial is used from both Cygwin and native
400 # problems when Mercurial is used from both Cygwin and native
401 # Windows, with other native tools, or on shared volumes
401 # Windows, with other native tools, or on shared volumes
402 def checklink(path):
402 def checklink(path):
403 return False
403 return False
404
404
405 _needsshellquote = None
405 _needsshellquote = None
406 def shellquote(s):
406 def shellquote(s):
407 if os.sys.platform == 'OpenVMS':
407 if os.sys.platform == 'OpenVMS':
408 return '"%s"' % s
408 return '"%s"' % s
409 global _needsshellquote
409 global _needsshellquote
410 if _needsshellquote is None:
410 if _needsshellquote is None:
411 _needsshellquote = re.compile(r'[^a-zA-Z0-9._/+-]').search
411 _needsshellquote = re.compile(r'[^a-zA-Z0-9._/+-]').search
412 if s and not _needsshellquote(s):
412 if s and not _needsshellquote(s):
413 # "s" shouldn't have to be quoted
413 # "s" shouldn't have to be quoted
414 return s
414 return s
415 else:
415 else:
416 return "'%s'" % s.replace("'", "'\\''")
416 return "'%s'" % s.replace("'", "'\\''")
417
417
418 def quotecommand(cmd):
418 def quotecommand(cmd):
419 return cmd
419 return cmd
420
420
421 def popen(command, mode='r'):
421 def popen(command, mode='r'):
422 return os.popen(command, mode)
422 return os.popen(command, mode)
423
423
424 def testpid(pid):
424 def testpid(pid):
425 '''return False if pid dead, True if running or not sure'''
425 '''return False if pid dead, True if running or not sure'''
426 if os.sys.platform == 'OpenVMS':
426 if os.sys.platform == 'OpenVMS':
427 return True
427 return True
428 try:
428 try:
429 os.kill(pid, 0)
429 os.kill(pid, 0)
430 return True
430 return True
431 except OSError as inst:
431 except OSError as inst:
432 return inst.errno != errno.ESRCH
432 return inst.errno != errno.ESRCH
433
433
434 def explainexit(code):
434 def explainexit(code):
435 """return a 2-tuple (desc, code) describing a subprocess status
435 """return a 2-tuple (desc, code) describing a subprocess status
436 (codes from kill are negative - not os.system/wait encoding)"""
436 (codes from kill are negative - not os.system/wait encoding)"""
437 if code >= 0:
437 if code >= 0:
438 return _("exited with status %d") % code, code
438 return _("exited with status %d") % code, code
439 return _("killed by signal %d") % -code, -code
439 return _("killed by signal %d") % -code, -code
440
440
441 def isowner(st):
441 def isowner(st):
442 """Return True if the stat object st is from the current user."""
442 """Return True if the stat object st is from the current user."""
443 return st.st_uid == os.getuid()
443 return st.st_uid == os.getuid()
444
444
445 def findexe(command):
445 def findexe(command):
446 '''Find executable for command searching like which does.
446 '''Find executable for command searching like which does.
447 If command is a basename then PATH is searched for command.
447 If command is a basename then PATH is searched for command.
448 PATH isn't searched if command is an absolute or relative path.
448 PATH isn't searched if command is an absolute or relative path.
449 If command isn't found None is returned.'''
449 If command isn't found None is returned.'''
450 if sys.platform == 'OpenVMS':
450 if sys.platform == 'OpenVMS':
451 return command
451 return command
452
452
453 def findexisting(executable):
453 def findexisting(executable):
454 'Will return executable if existing file'
454 'Will return executable if existing file'
455 if os.path.isfile(executable) and os.access(executable, os.X_OK):
455 if os.path.isfile(executable) and os.access(executable, os.X_OK):
456 return executable
456 return executable
457 return None
457 return None
458
458
459 if os.sep in command:
459 if pycompat.ossep in command:
460 return findexisting(command)
460 return findexisting(command)
461
461
462 if sys.platform == 'plan9':
462 if sys.platform == 'plan9':
463 return findexisting(os.path.join('/bin', command))
463 return findexisting(os.path.join('/bin', command))
464
464
465 for path in os.environ.get('PATH', '').split(pycompat.ospathsep):
465 for path in os.environ.get('PATH', '').split(pycompat.ospathsep):
466 executable = findexisting(os.path.join(path, command))
466 executable = findexisting(os.path.join(path, command))
467 if executable is not None:
467 if executable is not None:
468 return executable
468 return executable
469 return None
469 return None
470
470
471 def setsignalhandler():
471 def setsignalhandler():
472 pass
472 pass
473
473
474 _wantedkinds = set([stat.S_IFREG, stat.S_IFLNK])
474 _wantedkinds = set([stat.S_IFREG, stat.S_IFLNK])
475
475
476 def statfiles(files):
476 def statfiles(files):
477 '''Stat each file in files. Yield each stat, or None if a file does not
477 '''Stat each file in files. Yield each stat, or None if a file does not
478 exist or has a type we don't care about.'''
478 exist or has a type we don't care about.'''
479 lstat = os.lstat
479 lstat = os.lstat
480 getkind = stat.S_IFMT
480 getkind = stat.S_IFMT
481 for nf in files:
481 for nf in files:
482 try:
482 try:
483 st = lstat(nf)
483 st = lstat(nf)
484 if getkind(st.st_mode) not in _wantedkinds:
484 if getkind(st.st_mode) not in _wantedkinds:
485 st = None
485 st = None
486 except OSError as err:
486 except OSError as err:
487 if err.errno not in (errno.ENOENT, errno.ENOTDIR):
487 if err.errno not in (errno.ENOENT, errno.ENOTDIR):
488 raise
488 raise
489 st = None
489 st = None
490 yield st
490 yield st
491
491
492 def getuser():
492 def getuser():
493 '''return name of current user'''
493 '''return name of current user'''
494 return getpass.getuser()
494 return getpass.getuser()
495
495
496 def username(uid=None):
496 def username(uid=None):
497 """Return the name of the user with the given uid.
497 """Return the name of the user with the given uid.
498
498
499 If uid is None, return the name of the current user."""
499 If uid is None, return the name of the current user."""
500
500
501 if uid is None:
501 if uid is None:
502 uid = os.getuid()
502 uid = os.getuid()
503 try:
503 try:
504 return pwd.getpwuid(uid)[0]
504 return pwd.getpwuid(uid)[0]
505 except KeyError:
505 except KeyError:
506 return str(uid)
506 return str(uid)
507
507
508 def groupname(gid=None):
508 def groupname(gid=None):
509 """Return the name of the group with the given gid.
509 """Return the name of the group with the given gid.
510
510
511 If gid is None, return the name of the current group."""
511 If gid is None, return the name of the current group."""
512
512
513 if gid is None:
513 if gid is None:
514 gid = os.getgid()
514 gid = os.getgid()
515 try:
515 try:
516 return grp.getgrgid(gid)[0]
516 return grp.getgrgid(gid)[0]
517 except KeyError:
517 except KeyError:
518 return str(gid)
518 return str(gid)
519
519
520 def groupmembers(name):
520 def groupmembers(name):
521 """Return the list of members of the group with the given
521 """Return the list of members of the group with the given
522 name, KeyError if the group does not exist.
522 name, KeyError if the group does not exist.
523 """
523 """
524 return list(grp.getgrnam(name).gr_mem)
524 return list(grp.getgrnam(name).gr_mem)
525
525
526 def spawndetached(args):
526 def spawndetached(args):
527 return os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0),
527 return os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0),
528 args[0], args)
528 args[0], args)
529
529
530 def gethgcmd():
530 def gethgcmd():
531 return sys.argv[:1]
531 return sys.argv[:1]
532
532
533 def makedir(path, notindexed):
533 def makedir(path, notindexed):
534 os.mkdir(path)
534 os.mkdir(path)
535
535
536 def unlinkpath(f, ignoremissing=False):
536 def unlinkpath(f, ignoremissing=False):
537 """unlink and remove the directory if it is empty"""
537 """unlink and remove the directory if it is empty"""
538 try:
538 try:
539 os.unlink(f)
539 os.unlink(f)
540 except OSError as e:
540 except OSError as e:
541 if not (ignoremissing and e.errno == errno.ENOENT):
541 if not (ignoremissing and e.errno == errno.ENOENT):
542 raise
542 raise
543 # try removing directories that might now be empty
543 # try removing directories that might now be empty
544 try:
544 try:
545 os.removedirs(os.path.dirname(f))
545 os.removedirs(os.path.dirname(f))
546 except OSError:
546 except OSError:
547 pass
547 pass
548
548
549 def lookupreg(key, name=None, scope=None):
549 def lookupreg(key, name=None, scope=None):
550 return None
550 return None
551
551
552 def hidewindow():
552 def hidewindow():
553 """Hide current shell window.
553 """Hide current shell window.
554
554
555 Used to hide the window opened when starting asynchronous
555 Used to hide the window opened when starting asynchronous
556 child process under Windows, unneeded on other systems.
556 child process under Windows, unneeded on other systems.
557 """
557 """
558 pass
558 pass
559
559
560 class cachestat(object):
560 class cachestat(object):
561 def __init__(self, path):
561 def __init__(self, path):
562 self.stat = os.stat(path)
562 self.stat = os.stat(path)
563
563
564 def cacheable(self):
564 def cacheable(self):
565 return bool(self.stat.st_ino)
565 return bool(self.stat.st_ino)
566
566
567 __hash__ = object.__hash__
567 __hash__ = object.__hash__
568
568
569 def __eq__(self, other):
569 def __eq__(self, other):
570 try:
570 try:
571 # Only dev, ino, size, mtime and atime are likely to change. Out
571 # Only dev, ino, size, mtime and atime are likely to change. Out
572 # of these, we shouldn't compare atime but should compare the
572 # of these, we shouldn't compare atime but should compare the
573 # rest. However, one of the other fields changing indicates
573 # rest. However, one of the other fields changing indicates
574 # something fishy going on, so return False if anything but atime
574 # something fishy going on, so return False if anything but atime
575 # changes.
575 # changes.
576 return (self.stat.st_mode == other.stat.st_mode and
576 return (self.stat.st_mode == other.stat.st_mode and
577 self.stat.st_ino == other.stat.st_ino and
577 self.stat.st_ino == other.stat.st_ino and
578 self.stat.st_dev == other.stat.st_dev and
578 self.stat.st_dev == other.stat.st_dev and
579 self.stat.st_nlink == other.stat.st_nlink and
579 self.stat.st_nlink == other.stat.st_nlink and
580 self.stat.st_uid == other.stat.st_uid and
580 self.stat.st_uid == other.stat.st_uid and
581 self.stat.st_gid == other.stat.st_gid and
581 self.stat.st_gid == other.stat.st_gid and
582 self.stat.st_size == other.stat.st_size and
582 self.stat.st_size == other.stat.st_size and
583 self.stat.st_mtime == other.stat.st_mtime and
583 self.stat.st_mtime == other.stat.st_mtime and
584 self.stat.st_ctime == other.stat.st_ctime)
584 self.stat.st_ctime == other.stat.st_ctime)
585 except AttributeError:
585 except AttributeError:
586 return False
586 return False
587
587
588 def __ne__(self, other):
588 def __ne__(self, other):
589 return not self == other
589 return not self == other
590
590
591 def executablepath():
591 def executablepath():
592 return None # available on Windows only
592 return None # available on Windows only
593
593
594 def statislink(st):
594 def statislink(st):
595 '''check whether a stat result is a symlink'''
595 '''check whether a stat result is a symlink'''
596 return st and stat.S_ISLNK(st.st_mode)
596 return st and stat.S_ISLNK(st.st_mode)
597
597
598 def statisexec(st):
598 def statisexec(st):
599 '''check whether a stat result is an executable file'''
599 '''check whether a stat result is an executable file'''
600 return st and (st.st_mode & 0o100 != 0)
600 return st and (st.st_mode & 0o100 != 0)
601
601
602 def poll(fds):
602 def poll(fds):
603 """block until something happens on any file descriptor
603 """block until something happens on any file descriptor
604
604
605 This is a generic helper that will check for any activity
605 This is a generic helper that will check for any activity
606 (read, write. exception) and return the list of touched files.
606 (read, write. exception) and return the list of touched files.
607
607
608 In unsupported cases, it will raise a NotImplementedError"""
608 In unsupported cases, it will raise a NotImplementedError"""
609 try:
609 try:
610 res = select.select(fds, fds, fds)
610 res = select.select(fds, fds, fds)
611 except ValueError: # out of range file descriptor
611 except ValueError: # out of range file descriptor
612 raise NotImplementedError()
612 raise NotImplementedError()
613 return sorted(list(set(sum(res, []))))
613 return sorted(list(set(sum(res, []))))
614
614
615 def readpipe(pipe):
615 def readpipe(pipe):
616 """Read all available data from a pipe."""
616 """Read all available data from a pipe."""
617 # We can't fstat() a pipe because Linux will always report 0.
617 # We can't fstat() a pipe because Linux will always report 0.
618 # So, we set the pipe to non-blocking mode and read everything
618 # So, we set the pipe to non-blocking mode and read everything
619 # that's available.
619 # that's available.
620 flags = fcntl.fcntl(pipe, fcntl.F_GETFL)
620 flags = fcntl.fcntl(pipe, fcntl.F_GETFL)
621 flags |= os.O_NONBLOCK
621 flags |= os.O_NONBLOCK
622 oldflags = fcntl.fcntl(pipe, fcntl.F_SETFL, flags)
622 oldflags = fcntl.fcntl(pipe, fcntl.F_SETFL, flags)
623
623
624 try:
624 try:
625 chunks = []
625 chunks = []
626 while True:
626 while True:
627 try:
627 try:
628 s = pipe.read()
628 s = pipe.read()
629 if not s:
629 if not s:
630 break
630 break
631 chunks.append(s)
631 chunks.append(s)
632 except IOError:
632 except IOError:
633 break
633 break
634
634
635 return ''.join(chunks)
635 return ''.join(chunks)
636 finally:
636 finally:
637 fcntl.fcntl(pipe, fcntl.F_SETFL, oldflags)
637 fcntl.fcntl(pipe, fcntl.F_SETFL, oldflags)
638
638
639 def bindunixsocket(sock, path):
639 def bindunixsocket(sock, path):
640 """Bind the UNIX domain socket to the specified path"""
640 """Bind the UNIX domain socket to the specified path"""
641 # use relative path instead of full path at bind() if possible, since
641 # use relative path instead of full path at bind() if possible, since
642 # AF_UNIX path has very small length limit (107 chars) on common
642 # AF_UNIX path has very small length limit (107 chars) on common
643 # platforms (see sys/un.h)
643 # platforms (see sys/un.h)
644 dirname, basename = os.path.split(path)
644 dirname, basename = os.path.split(path)
645 bakwdfd = None
645 bakwdfd = None
646 if dirname:
646 if dirname:
647 bakwdfd = os.open('.', os.O_DIRECTORY)
647 bakwdfd = os.open('.', os.O_DIRECTORY)
648 os.chdir(dirname)
648 os.chdir(dirname)
649 sock.bind(basename)
649 sock.bind(basename)
650 if bakwdfd:
650 if bakwdfd:
651 os.fchdir(bakwdfd)
651 os.fchdir(bakwdfd)
652 os.close(bakwdfd)
652 os.close(bakwdfd)
General Comments 0
You need to be logged in to leave comments. Login now