|
@@
-1,1518
+1,1518
b''
|
|
1
|
# dirstate.py - working directory tracking for mercurial
|
|
1
|
# dirstate.py - working directory tracking for mercurial
|
|
2
|
#
|
|
2
|
#
|
|
3
|
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
|
|
3
|
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
|
|
4
|
#
|
|
4
|
#
|
|
5
|
# This software may be used and distributed according to the terms of the
|
|
5
|
# This software may be used and distributed according to the terms of the
|
|
6
|
# GNU General Public License version 2 or any later version.
|
|
6
|
# GNU General Public License version 2 or any later version.
|
|
7
|
|
|
7
|
|
|
8
|
from __future__ import absolute_import
|
|
8
|
from __future__ import absolute_import
|
|
9
|
|
|
9
|
|
|
10
|
import collections
|
|
10
|
import collections
|
|
11
|
import contextlib
|
|
11
|
import contextlib
|
|
12
|
import errno
|
|
12
|
import errno
|
|
13
|
import os
|
|
13
|
import os
|
|
14
|
import stat
|
|
14
|
import stat
|
|
15
|
|
|
15
|
|
|
16
|
from .i18n import _
|
|
16
|
from .i18n import _
|
|
17
|
from .node import nullid
|
|
17
|
from .node import nullid
|
|
18
|
from . import (
|
|
18
|
from . import (
|
|
19
|
encoding,
|
|
19
|
encoding,
|
|
20
|
error,
|
|
20
|
error,
|
|
21
|
match as matchmod,
|
|
21
|
match as matchmod,
|
|
22
|
pathutil,
|
|
22
|
pathutil,
|
|
23
|
policy,
|
|
23
|
policy,
|
|
24
|
pycompat,
|
|
24
|
pycompat,
|
|
25
|
scmutil,
|
|
25
|
scmutil,
|
|
26
|
txnutil,
|
|
26
|
txnutil,
|
|
27
|
util,
|
|
27
|
util,
|
|
28
|
)
|
|
28
|
)
|
|
29
|
|
|
29
|
|
|
30
|
parsers = policy.importmod(r'parsers')
|
|
30
|
orig_parsers = policy.importmod(r'parsers')
|
|
31
|
dirstatemod = policy.importrust(r'dirstate', default=parsers)
|
|
31
|
parsers = policy.importrust(r'parsers', default=orig_parsers)
|
|
32
|
|
|
32
|
|
|
33
|
propertycache = util.propertycache
|
|
33
|
propertycache = util.propertycache
|
|
34
|
filecache = scmutil.filecache
|
|
34
|
filecache = scmutil.filecache
|
|
35
|
_rangemask = 0x7fffffff
|
|
35
|
_rangemask = 0x7fffffff
|
|
36
|
|
|
36
|
|
|
37
|
dirstatetuple = parsers.dirstatetuple
|
|
37
|
dirstatetuple = orig_parsers.dirstatetuple
|
|
38
|
|
|
38
|
|
|
39
|
class repocache(filecache):
|
|
39
|
class repocache(filecache):
|
|
40
|
"""filecache for files in .hg/"""
|
|
40
|
"""filecache for files in .hg/"""
|
|
41
|
def join(self, obj, fname):
|
|
41
|
def join(self, obj, fname):
|
|
42
|
return obj._opener.join(fname)
|
|
42
|
return obj._opener.join(fname)
|
|
43
|
|
|
43
|
|
|
44
|
class rootcache(filecache):
|
|
44
|
class rootcache(filecache):
|
|
45
|
"""filecache for files in the repository root"""
|
|
45
|
"""filecache for files in the repository root"""
|
|
46
|
def join(self, obj, fname):
|
|
46
|
def join(self, obj, fname):
|
|
47
|
return obj._join(fname)
|
|
47
|
return obj._join(fname)
|
|
48
|
|
|
48
|
|
|
49
|
def _getfsnow(vfs):
|
|
49
|
def _getfsnow(vfs):
|
|
50
|
'''Get "now" timestamp on filesystem'''
|
|
50
|
'''Get "now" timestamp on filesystem'''
|
|
51
|
tmpfd, tmpname = vfs.mkstemp()
|
|
51
|
tmpfd, tmpname = vfs.mkstemp()
|
|
52
|
try:
|
|
52
|
try:
|
|
53
|
return os.fstat(tmpfd)[stat.ST_MTIME]
|
|
53
|
return os.fstat(tmpfd)[stat.ST_MTIME]
|
|
54
|
finally:
|
|
54
|
finally:
|
|
55
|
os.close(tmpfd)
|
|
55
|
os.close(tmpfd)
|
|
56
|
vfs.unlink(tmpname)
|
|
56
|
vfs.unlink(tmpname)
|
|
57
|
|
|
57
|
|
|
58
|
class dirstate(object):
|
|
58
|
class dirstate(object):
|
|
59
|
|
|
59
|
|
|
60
|
def __init__(self, opener, ui, root, validate, sparsematchfn):
|
|
60
|
def __init__(self, opener, ui, root, validate, sparsematchfn):
|
|
61
|
'''Create a new dirstate object.
|
|
61
|
'''Create a new dirstate object.
|
|
62
|
|
|
62
|
|
|
63
|
opener is an open()-like callable that can be used to open the
|
|
63
|
opener is an open()-like callable that can be used to open the
|
|
64
|
dirstate file; root is the root of the directory tracked by
|
|
64
|
dirstate file; root is the root of the directory tracked by
|
|
65
|
the dirstate.
|
|
65
|
the dirstate.
|
|
66
|
'''
|
|
66
|
'''
|
|
67
|
self._opener = opener
|
|
67
|
self._opener = opener
|
|
68
|
self._validate = validate
|
|
68
|
self._validate = validate
|
|
69
|
self._root = root
|
|
69
|
self._root = root
|
|
70
|
self._sparsematchfn = sparsematchfn
|
|
70
|
self._sparsematchfn = sparsematchfn
|
|
71
|
# ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
|
|
71
|
# ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
|
|
72
|
# UNC path pointing to root share (issue4557)
|
|
72
|
# UNC path pointing to root share (issue4557)
|
|
73
|
self._rootdir = pathutil.normasprefix(root)
|
|
73
|
self._rootdir = pathutil.normasprefix(root)
|
|
74
|
self._dirty = False
|
|
74
|
self._dirty = False
|
|
75
|
self._lastnormaltime = 0
|
|
75
|
self._lastnormaltime = 0
|
|
76
|
self._ui = ui
|
|
76
|
self._ui = ui
|
|
77
|
self._filecache = {}
|
|
77
|
self._filecache = {}
|
|
78
|
self._parentwriters = 0
|
|
78
|
self._parentwriters = 0
|
|
79
|
self._filename = 'dirstate'
|
|
79
|
self._filename = 'dirstate'
|
|
80
|
self._pendingfilename = '%s.pending' % self._filename
|
|
80
|
self._pendingfilename = '%s.pending' % self._filename
|
|
81
|
self._plchangecallbacks = {}
|
|
81
|
self._plchangecallbacks = {}
|
|
82
|
self._origpl = None
|
|
82
|
self._origpl = None
|
|
83
|
self._updatedfiles = set()
|
|
83
|
self._updatedfiles = set()
|
|
84
|
self._mapcls = dirstatemap
|
|
84
|
self._mapcls = dirstatemap
|
|
85
|
# Access and cache cwd early, so we don't access it for the first time
|
|
85
|
# Access and cache cwd early, so we don't access it for the first time
|
|
86
|
# after a working-copy update caused it to not exist (accessing it then
|
|
86
|
# after a working-copy update caused it to not exist (accessing it then
|
|
87
|
# raises an exception).
|
|
87
|
# raises an exception).
|
|
88
|
self._cwd
|
|
88
|
self._cwd
|
|
89
|
|
|
89
|
|
|
90
|
@contextlib.contextmanager
|
|
90
|
@contextlib.contextmanager
|
|
91
|
def parentchange(self):
|
|
91
|
def parentchange(self):
|
|
92
|
'''Context manager for handling dirstate parents.
|
|
92
|
'''Context manager for handling dirstate parents.
|
|
93
|
|
|
93
|
|
|
94
|
If an exception occurs in the scope of the context manager,
|
|
94
|
If an exception occurs in the scope of the context manager,
|
|
95
|
the incoherent dirstate won't be written when wlock is
|
|
95
|
the incoherent dirstate won't be written when wlock is
|
|
96
|
released.
|
|
96
|
released.
|
|
97
|
'''
|
|
97
|
'''
|
|
98
|
self._parentwriters += 1
|
|
98
|
self._parentwriters += 1
|
|
99
|
yield
|
|
99
|
yield
|
|
100
|
# Typically we want the "undo" step of a context manager in a
|
|
100
|
# Typically we want the "undo" step of a context manager in a
|
|
101
|
# finally block so it happens even when an exception
|
|
101
|
# finally block so it happens even when an exception
|
|
102
|
# occurs. In this case, however, we only want to decrement
|
|
102
|
# occurs. In this case, however, we only want to decrement
|
|
103
|
# parentwriters if the code in the with statement exits
|
|
103
|
# parentwriters if the code in the with statement exits
|
|
104
|
# normally, so we don't have a try/finally here on purpose.
|
|
104
|
# normally, so we don't have a try/finally here on purpose.
|
|
105
|
self._parentwriters -= 1
|
|
105
|
self._parentwriters -= 1
|
|
106
|
|
|
106
|
|
|
107
|
def pendingparentchange(self):
|
|
107
|
def pendingparentchange(self):
|
|
108
|
'''Returns true if the dirstate is in the middle of a set of changes
|
|
108
|
'''Returns true if the dirstate is in the middle of a set of changes
|
|
109
|
that modify the dirstate parent.
|
|
109
|
that modify the dirstate parent.
|
|
110
|
'''
|
|
110
|
'''
|
|
111
|
return self._parentwriters > 0
|
|
111
|
return self._parentwriters > 0
|
|
112
|
|
|
112
|
|
|
113
|
@propertycache
|
|
113
|
@propertycache
|
|
114
|
def _map(self):
|
|
114
|
def _map(self):
|
|
115
|
"""Return the dirstate contents (see documentation for dirstatemap)."""
|
|
115
|
"""Return the dirstate contents (see documentation for dirstatemap)."""
|
|
116
|
self._map = self._mapcls(self._ui, self._opener, self._root)
|
|
116
|
self._map = self._mapcls(self._ui, self._opener, self._root)
|
|
117
|
return self._map
|
|
117
|
return self._map
|
|
118
|
|
|
118
|
|
|
119
|
@property
|
|
119
|
@property
|
|
120
|
def _sparsematcher(self):
|
|
120
|
def _sparsematcher(self):
|
|
121
|
"""The matcher for the sparse checkout.
|
|
121
|
"""The matcher for the sparse checkout.
|
|
122
|
|
|
122
|
|
|
123
|
The working directory may not include every file from a manifest. The
|
|
123
|
The working directory may not include every file from a manifest. The
|
|
124
|
matcher obtained by this property will match a path if it is to be
|
|
124
|
matcher obtained by this property will match a path if it is to be
|
|
125
|
included in the working directory.
|
|
125
|
included in the working directory.
|
|
126
|
"""
|
|
126
|
"""
|
|
127
|
# TODO there is potential to cache this property. For now, the matcher
|
|
127
|
# TODO there is potential to cache this property. For now, the matcher
|
|
128
|
# is resolved on every access. (But the called function does use a
|
|
128
|
# is resolved on every access. (But the called function does use a
|
|
129
|
# cache to keep the lookup fast.)
|
|
129
|
# cache to keep the lookup fast.)
|
|
130
|
return self._sparsematchfn()
|
|
130
|
return self._sparsematchfn()
|
|
131
|
|
|
131
|
|
|
132
|
@repocache('branch')
|
|
132
|
@repocache('branch')
|
|
133
|
def _branch(self):
|
|
133
|
def _branch(self):
|
|
134
|
try:
|
|
134
|
try:
|
|
135
|
return self._opener.read("branch").strip() or "default"
|
|
135
|
return self._opener.read("branch").strip() or "default"
|
|
136
|
except IOError as inst:
|
|
136
|
except IOError as inst:
|
|
137
|
if inst.errno != errno.ENOENT:
|
|
137
|
if inst.errno != errno.ENOENT:
|
|
138
|
raise
|
|
138
|
raise
|
|
139
|
return "default"
|
|
139
|
return "default"
|
|
140
|
|
|
140
|
|
|
141
|
@property
|
|
141
|
@property
|
|
142
|
def _pl(self):
|
|
142
|
def _pl(self):
|
|
143
|
return self._map.parents()
|
|
143
|
return self._map.parents()
|
|
144
|
|
|
144
|
|
|
145
|
def hasdir(self, d):
|
|
145
|
def hasdir(self, d):
|
|
146
|
return self._map.hastrackeddir(d)
|
|
146
|
return self._map.hastrackeddir(d)
|
|
147
|
|
|
147
|
|
|
148
|
@rootcache('.hgignore')
|
|
148
|
@rootcache('.hgignore')
|
|
149
|
def _ignore(self):
|
|
149
|
def _ignore(self):
|
|
150
|
files = self._ignorefiles()
|
|
150
|
files = self._ignorefiles()
|
|
151
|
if not files:
|
|
151
|
if not files:
|
|
152
|
return matchmod.never()
|
|
152
|
return matchmod.never()
|
|
153
|
|
|
153
|
|
|
154
|
pats = ['include:%s' % f for f in files]
|
|
154
|
pats = ['include:%s' % f for f in files]
|
|
155
|
return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
|
|
155
|
return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
|
|
156
|
|
|
156
|
|
|
157
|
@propertycache
|
|
157
|
@propertycache
|
|
158
|
def _slash(self):
|
|
158
|
def _slash(self):
|
|
159
|
return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/'
|
|
159
|
return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/'
|
|
160
|
|
|
160
|
|
|
161
|
@propertycache
|
|
161
|
@propertycache
|
|
162
|
def _checklink(self):
|
|
162
|
def _checklink(self):
|
|
163
|
return util.checklink(self._root)
|
|
163
|
return util.checklink(self._root)
|
|
164
|
|
|
164
|
|
|
165
|
@propertycache
|
|
165
|
@propertycache
|
|
166
|
def _checkexec(self):
|
|
166
|
def _checkexec(self):
|
|
167
|
return util.checkexec(self._root)
|
|
167
|
return util.checkexec(self._root)
|
|
168
|
|
|
168
|
|
|
169
|
@propertycache
|
|
169
|
@propertycache
|
|
170
|
def _checkcase(self):
|
|
170
|
def _checkcase(self):
|
|
171
|
return not util.fscasesensitive(self._join('.hg'))
|
|
171
|
return not util.fscasesensitive(self._join('.hg'))
|
|
172
|
|
|
172
|
|
|
173
|
def _join(self, f):
|
|
173
|
def _join(self, f):
|
|
174
|
# much faster than os.path.join()
|
|
174
|
# much faster than os.path.join()
|
|
175
|
# it's safe because f is always a relative path
|
|
175
|
# it's safe because f is always a relative path
|
|
176
|
return self._rootdir + f
|
|
176
|
return self._rootdir + f
|
|
177
|
|
|
177
|
|
|
178
|
def flagfunc(self, buildfallback):
|
|
178
|
def flagfunc(self, buildfallback):
|
|
179
|
if self._checklink and self._checkexec:
|
|
179
|
if self._checklink and self._checkexec:
|
|
180
|
def f(x):
|
|
180
|
def f(x):
|
|
181
|
try:
|
|
181
|
try:
|
|
182
|
st = os.lstat(self._join(x))
|
|
182
|
st = os.lstat(self._join(x))
|
|
183
|
if util.statislink(st):
|
|
183
|
if util.statislink(st):
|
|
184
|
return 'l'
|
|
184
|
return 'l'
|
|
185
|
if util.statisexec(st):
|
|
185
|
if util.statisexec(st):
|
|
186
|
return 'x'
|
|
186
|
return 'x'
|
|
187
|
except OSError:
|
|
187
|
except OSError:
|
|
188
|
pass
|
|
188
|
pass
|
|
189
|
return ''
|
|
189
|
return ''
|
|
190
|
return f
|
|
190
|
return f
|
|
191
|
|
|
191
|
|
|
192
|
fallback = buildfallback()
|
|
192
|
fallback = buildfallback()
|
|
193
|
if self._checklink:
|
|
193
|
if self._checklink:
|
|
194
|
def f(x):
|
|
194
|
def f(x):
|
|
195
|
if os.path.islink(self._join(x)):
|
|
195
|
if os.path.islink(self._join(x)):
|
|
196
|
return 'l'
|
|
196
|
return 'l'
|
|
197
|
if 'x' in fallback(x):
|
|
197
|
if 'x' in fallback(x):
|
|
198
|
return 'x'
|
|
198
|
return 'x'
|
|
199
|
return ''
|
|
199
|
return ''
|
|
200
|
return f
|
|
200
|
return f
|
|
201
|
if self._checkexec:
|
|
201
|
if self._checkexec:
|
|
202
|
def f(x):
|
|
202
|
def f(x):
|
|
203
|
if 'l' in fallback(x):
|
|
203
|
if 'l' in fallback(x):
|
|
204
|
return 'l'
|
|
204
|
return 'l'
|
|
205
|
if util.isexec(self._join(x)):
|
|
205
|
if util.isexec(self._join(x)):
|
|
206
|
return 'x'
|
|
206
|
return 'x'
|
|
207
|
return ''
|
|
207
|
return ''
|
|
208
|
return f
|
|
208
|
return f
|
|
209
|
else:
|
|
209
|
else:
|
|
210
|
return fallback
|
|
210
|
return fallback
|
|
211
|
|
|
211
|
|
|
212
|
@propertycache
|
|
212
|
@propertycache
|
|
213
|
def _cwd(self):
|
|
213
|
def _cwd(self):
|
|
214
|
# internal config: ui.forcecwd
|
|
214
|
# internal config: ui.forcecwd
|
|
215
|
forcecwd = self._ui.config('ui', 'forcecwd')
|
|
215
|
forcecwd = self._ui.config('ui', 'forcecwd')
|
|
216
|
if forcecwd:
|
|
216
|
if forcecwd:
|
|
217
|
return forcecwd
|
|
217
|
return forcecwd
|
|
218
|
return encoding.getcwd()
|
|
218
|
return encoding.getcwd()
|
|
219
|
|
|
219
|
|
|
220
|
def getcwd(self):
|
|
220
|
def getcwd(self):
|
|
221
|
'''Return the path from which a canonical path is calculated.
|
|
221
|
'''Return the path from which a canonical path is calculated.
|
|
222
|
|
|
222
|
|
|
223
|
This path should be used to resolve file patterns or to convert
|
|
223
|
This path should be used to resolve file patterns or to convert
|
|
224
|
canonical paths back to file paths for display. It shouldn't be
|
|
224
|
canonical paths back to file paths for display. It shouldn't be
|
|
225
|
used to get real file paths. Use vfs functions instead.
|
|
225
|
used to get real file paths. Use vfs functions instead.
|
|
226
|
'''
|
|
226
|
'''
|
|
227
|
cwd = self._cwd
|
|
227
|
cwd = self._cwd
|
|
228
|
if cwd == self._root:
|
|
228
|
if cwd == self._root:
|
|
229
|
return ''
|
|
229
|
return ''
|
|
230
|
# self._root ends with a path separator if self._root is '/' or 'C:\'
|
|
230
|
# self._root ends with a path separator if self._root is '/' or 'C:\'
|
|
231
|
rootsep = self._root
|
|
231
|
rootsep = self._root
|
|
232
|
if not util.endswithsep(rootsep):
|
|
232
|
if not util.endswithsep(rootsep):
|
|
233
|
rootsep += pycompat.ossep
|
|
233
|
rootsep += pycompat.ossep
|
|
234
|
if cwd.startswith(rootsep):
|
|
234
|
if cwd.startswith(rootsep):
|
|
235
|
return cwd[len(rootsep):]
|
|
235
|
return cwd[len(rootsep):]
|
|
236
|
else:
|
|
236
|
else:
|
|
237
|
# we're outside the repo. return an absolute path.
|
|
237
|
# we're outside the repo. return an absolute path.
|
|
238
|
return cwd
|
|
238
|
return cwd
|
|
239
|
|
|
239
|
|
|
240
|
def pathto(self, f, cwd=None):
|
|
240
|
def pathto(self, f, cwd=None):
|
|
241
|
if cwd is None:
|
|
241
|
if cwd is None:
|
|
242
|
cwd = self.getcwd()
|
|
242
|
cwd = self.getcwd()
|
|
243
|
path = util.pathto(self._root, cwd, f)
|
|
243
|
path = util.pathto(self._root, cwd, f)
|
|
244
|
if self._slash:
|
|
244
|
if self._slash:
|
|
245
|
return util.pconvert(path)
|
|
245
|
return util.pconvert(path)
|
|
246
|
return path
|
|
246
|
return path
|
|
247
|
|
|
247
|
|
|
248
|
def __getitem__(self, key):
|
|
248
|
def __getitem__(self, key):
|
|
249
|
'''Return the current state of key (a filename) in the dirstate.
|
|
249
|
'''Return the current state of key (a filename) in the dirstate.
|
|
250
|
|
|
250
|
|
|
251
|
States are:
|
|
251
|
States are:
|
|
252
|
n normal
|
|
252
|
n normal
|
|
253
|
m needs merging
|
|
253
|
m needs merging
|
|
254
|
r marked for removal
|
|
254
|
r marked for removal
|
|
255
|
a marked for addition
|
|
255
|
a marked for addition
|
|
256
|
? not tracked
|
|
256
|
? not tracked
|
|
257
|
'''
|
|
257
|
'''
|
|
258
|
return self._map.get(key, ("?",))[0]
|
|
258
|
return self._map.get(key, ("?",))[0]
|
|
259
|
|
|
259
|
|
|
260
|
def __contains__(self, key):
|
|
260
|
def __contains__(self, key):
|
|
261
|
return key in self._map
|
|
261
|
return key in self._map
|
|
262
|
|
|
262
|
|
|
263
|
def __iter__(self):
|
|
263
|
def __iter__(self):
|
|
264
|
return iter(sorted(self._map))
|
|
264
|
return iter(sorted(self._map))
|
|
265
|
|
|
265
|
|
|
266
|
def items(self):
|
|
266
|
def items(self):
|
|
267
|
return self._map.iteritems()
|
|
267
|
return self._map.iteritems()
|
|
268
|
|
|
268
|
|
|
269
|
iteritems = items
|
|
269
|
iteritems = items
|
|
270
|
|
|
270
|
|
|
271
|
def parents(self):
|
|
271
|
def parents(self):
|
|
272
|
return [self._validate(p) for p in self._pl]
|
|
272
|
return [self._validate(p) for p in self._pl]
|
|
273
|
|
|
273
|
|
|
274
|
def p1(self):
|
|
274
|
def p1(self):
|
|
275
|
return self._validate(self._pl[0])
|
|
275
|
return self._validate(self._pl[0])
|
|
276
|
|
|
276
|
|
|
277
|
def p2(self):
|
|
277
|
def p2(self):
|
|
278
|
return self._validate(self._pl[1])
|
|
278
|
return self._validate(self._pl[1])
|
|
279
|
|
|
279
|
|
|
280
|
def branch(self):
|
|
280
|
def branch(self):
|
|
281
|
return encoding.tolocal(self._branch)
|
|
281
|
return encoding.tolocal(self._branch)
|
|
282
|
|
|
282
|
|
|
283
|
def setparents(self, p1, p2=nullid):
|
|
283
|
def setparents(self, p1, p2=nullid):
|
|
284
|
"""Set dirstate parents to p1 and p2.
|
|
284
|
"""Set dirstate parents to p1 and p2.
|
|
285
|
|
|
285
|
|
|
286
|
When moving from two parents to one, 'm' merged entries a
|
|
286
|
When moving from two parents to one, 'm' merged entries a
|
|
287
|
adjusted to normal and previous copy records discarded and
|
|
287
|
adjusted to normal and previous copy records discarded and
|
|
288
|
returned by the call.
|
|
288
|
returned by the call.
|
|
289
|
|
|
289
|
|
|
290
|
See localrepo.setparents()
|
|
290
|
See localrepo.setparents()
|
|
291
|
"""
|
|
291
|
"""
|
|
292
|
if self._parentwriters == 0:
|
|
292
|
if self._parentwriters == 0:
|
|
293
|
raise ValueError("cannot set dirstate parent outside of "
|
|
293
|
raise ValueError("cannot set dirstate parent outside of "
|
|
294
|
"dirstate.parentchange context manager")
|
|
294
|
"dirstate.parentchange context manager")
|
|
295
|
|
|
295
|
|
|
296
|
self._dirty = True
|
|
296
|
self._dirty = True
|
|
297
|
oldp2 = self._pl[1]
|
|
297
|
oldp2 = self._pl[1]
|
|
298
|
if self._origpl is None:
|
|
298
|
if self._origpl is None:
|
|
299
|
self._origpl = self._pl
|
|
299
|
self._origpl = self._pl
|
|
300
|
self._map.setparents(p1, p2)
|
|
300
|
self._map.setparents(p1, p2)
|
|
301
|
copies = {}
|
|
301
|
copies = {}
|
|
302
|
if oldp2 != nullid and p2 == nullid:
|
|
302
|
if oldp2 != nullid and p2 == nullid:
|
|
303
|
candidatefiles = self._map.nonnormalset.union(
|
|
303
|
candidatefiles = self._map.nonnormalset.union(
|
|
304
|
self._map.otherparentset)
|
|
304
|
self._map.otherparentset)
|
|
305
|
for f in candidatefiles:
|
|
305
|
for f in candidatefiles:
|
|
306
|
s = self._map.get(f)
|
|
306
|
s = self._map.get(f)
|
|
307
|
if s is None:
|
|
307
|
if s is None:
|
|
308
|
continue
|
|
308
|
continue
|
|
309
|
|
|
309
|
|
|
310
|
# Discard 'm' markers when moving away from a merge state
|
|
310
|
# Discard 'm' markers when moving away from a merge state
|
|
311
|
if s[0] == 'm':
|
|
311
|
if s[0] == 'm':
|
|
312
|
source = self._map.copymap.get(f)
|
|
312
|
source = self._map.copymap.get(f)
|
|
313
|
if source:
|
|
313
|
if source:
|
|
314
|
copies[f] = source
|
|
314
|
copies[f] = source
|
|
315
|
self.normallookup(f)
|
|
315
|
self.normallookup(f)
|
|
316
|
# Also fix up otherparent markers
|
|
316
|
# Also fix up otherparent markers
|
|
317
|
elif s[0] == 'n' and s[2] == -2:
|
|
317
|
elif s[0] == 'n' and s[2] == -2:
|
|
318
|
source = self._map.copymap.get(f)
|
|
318
|
source = self._map.copymap.get(f)
|
|
319
|
if source:
|
|
319
|
if source:
|
|
320
|
copies[f] = source
|
|
320
|
copies[f] = source
|
|
321
|
self.add(f)
|
|
321
|
self.add(f)
|
|
322
|
return copies
|
|
322
|
return copies
|
|
323
|
|
|
323
|
|
|
324
|
def setbranch(self, branch):
|
|
324
|
def setbranch(self, branch):
|
|
325
|
self.__class__._branch.set(self, encoding.fromlocal(branch))
|
|
325
|
self.__class__._branch.set(self, encoding.fromlocal(branch))
|
|
326
|
f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
|
|
326
|
f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
|
|
327
|
try:
|
|
327
|
try:
|
|
328
|
f.write(self._branch + '\n')
|
|
328
|
f.write(self._branch + '\n')
|
|
329
|
f.close()
|
|
329
|
f.close()
|
|
330
|
|
|
330
|
|
|
331
|
# make sure filecache has the correct stat info for _branch after
|
|
331
|
# make sure filecache has the correct stat info for _branch after
|
|
332
|
# replacing the underlying file
|
|
332
|
# replacing the underlying file
|
|
333
|
ce = self._filecache['_branch']
|
|
333
|
ce = self._filecache['_branch']
|
|
334
|
if ce:
|
|
334
|
if ce:
|
|
335
|
ce.refresh()
|
|
335
|
ce.refresh()
|
|
336
|
except: # re-raises
|
|
336
|
except: # re-raises
|
|
337
|
f.discard()
|
|
337
|
f.discard()
|
|
338
|
raise
|
|
338
|
raise
|
|
339
|
|
|
339
|
|
|
340
|
def invalidate(self):
|
|
340
|
def invalidate(self):
|
|
341
|
'''Causes the next access to reread the dirstate.
|
|
341
|
'''Causes the next access to reread the dirstate.
|
|
342
|
|
|
342
|
|
|
343
|
This is different from localrepo.invalidatedirstate() because it always
|
|
343
|
This is different from localrepo.invalidatedirstate() because it always
|
|
344
|
rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
|
|
344
|
rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
|
|
345
|
check whether the dirstate has changed before rereading it.'''
|
|
345
|
check whether the dirstate has changed before rereading it.'''
|
|
346
|
|
|
346
|
|
|
347
|
for a in (r"_map", r"_branch", r"_ignore"):
|
|
347
|
for a in (r"_map", r"_branch", r"_ignore"):
|
|
348
|
if a in self.__dict__:
|
|
348
|
if a in self.__dict__:
|
|
349
|
delattr(self, a)
|
|
349
|
delattr(self, a)
|
|
350
|
self._lastnormaltime = 0
|
|
350
|
self._lastnormaltime = 0
|
|
351
|
self._dirty = False
|
|
351
|
self._dirty = False
|
|
352
|
self._updatedfiles.clear()
|
|
352
|
self._updatedfiles.clear()
|
|
353
|
self._parentwriters = 0
|
|
353
|
self._parentwriters = 0
|
|
354
|
self._origpl = None
|
|
354
|
self._origpl = None
|
|
355
|
|
|
355
|
|
|
356
|
def copy(self, source, dest):
|
|
356
|
def copy(self, source, dest):
|
|
357
|
"""Mark dest as a copy of source. Unmark dest if source is None."""
|
|
357
|
"""Mark dest as a copy of source. Unmark dest if source is None."""
|
|
358
|
if source == dest:
|
|
358
|
if source == dest:
|
|
359
|
return
|
|
359
|
return
|
|
360
|
self._dirty = True
|
|
360
|
self._dirty = True
|
|
361
|
if source is not None:
|
|
361
|
if source is not None:
|
|
362
|
self._map.copymap[dest] = source
|
|
362
|
self._map.copymap[dest] = source
|
|
363
|
self._updatedfiles.add(source)
|
|
363
|
self._updatedfiles.add(source)
|
|
364
|
self._updatedfiles.add(dest)
|
|
364
|
self._updatedfiles.add(dest)
|
|
365
|
elif self._map.copymap.pop(dest, None):
|
|
365
|
elif self._map.copymap.pop(dest, None):
|
|
366
|
self._updatedfiles.add(dest)
|
|
366
|
self._updatedfiles.add(dest)
|
|
367
|
|
|
367
|
|
|
368
|
def copied(self, file):
|
|
368
|
def copied(self, file):
|
|
369
|
return self._map.copymap.get(file, None)
|
|
369
|
return self._map.copymap.get(file, None)
|
|
370
|
|
|
370
|
|
|
371
|
def copies(self):
|
|
371
|
def copies(self):
|
|
372
|
return self._map.copymap
|
|
372
|
return self._map.copymap
|
|
373
|
|
|
373
|
|
|
374
|
def _addpath(self, f, state, mode, size, mtime):
|
|
374
|
def _addpath(self, f, state, mode, size, mtime):
|
|
375
|
oldstate = self[f]
|
|
375
|
oldstate = self[f]
|
|
376
|
if state == 'a' or oldstate == 'r':
|
|
376
|
if state == 'a' or oldstate == 'r':
|
|
377
|
scmutil.checkfilename(f)
|
|
377
|
scmutil.checkfilename(f)
|
|
378
|
if self._map.hastrackeddir(f):
|
|
378
|
if self._map.hastrackeddir(f):
|
|
379
|
raise error.Abort(_('directory %r already in dirstate') %
|
|
379
|
raise error.Abort(_('directory %r already in dirstate') %
|
|
380
|
pycompat.bytestr(f))
|
|
380
|
pycompat.bytestr(f))
|
|
381
|
# shadows
|
|
381
|
# shadows
|
|
382
|
for d in util.finddirs(f):
|
|
382
|
for d in util.finddirs(f):
|
|
383
|
if self._map.hastrackeddir(d):
|
|
383
|
if self._map.hastrackeddir(d):
|
|
384
|
break
|
|
384
|
break
|
|
385
|
entry = self._map.get(d)
|
|
385
|
entry = self._map.get(d)
|
|
386
|
if entry is not None and entry[0] != 'r':
|
|
386
|
if entry is not None and entry[0] != 'r':
|
|
387
|
raise error.Abort(
|
|
387
|
raise error.Abort(
|
|
388
|
_('file %r in dirstate clashes with %r') %
|
|
388
|
_('file %r in dirstate clashes with %r') %
|
|
389
|
(pycompat.bytestr(d), pycompat.bytestr(f)))
|
|
389
|
(pycompat.bytestr(d), pycompat.bytestr(f)))
|
|
390
|
self._dirty = True
|
|
390
|
self._dirty = True
|
|
391
|
self._updatedfiles.add(f)
|
|
391
|
self._updatedfiles.add(f)
|
|
392
|
self._map.addfile(f, oldstate, state, mode, size, mtime)
|
|
392
|
self._map.addfile(f, oldstate, state, mode, size, mtime)
|
|
393
|
|
|
393
|
|
|
394
|
def normal(self, f, parentfiledata=None):
|
|
394
|
def normal(self, f, parentfiledata=None):
|
|
395
|
'''Mark a file normal and clean.
|
|
395
|
'''Mark a file normal and clean.
|
|
396
|
|
|
396
|
|
|
397
|
parentfiledata: (mode, size, mtime) of the clean file
|
|
397
|
parentfiledata: (mode, size, mtime) of the clean file
|
|
398
|
|
|
398
|
|
|
399
|
parentfiledata should be computed from memory (for mode,
|
|
399
|
parentfiledata should be computed from memory (for mode,
|
|
400
|
size), as or close as possible from the point where we
|
|
400
|
size), as or close as possible from the point where we
|
|
401
|
determined the file was clean, to limit the risk of the
|
|
401
|
determined the file was clean, to limit the risk of the
|
|
402
|
file having been changed by an external process between the
|
|
402
|
file having been changed by an external process between the
|
|
403
|
moment where the file was determined to be clean and now.'''
|
|
403
|
moment where the file was determined to be clean and now.'''
|
|
404
|
if parentfiledata:
|
|
404
|
if parentfiledata:
|
|
405
|
(mode, size, mtime) = parentfiledata
|
|
405
|
(mode, size, mtime) = parentfiledata
|
|
406
|
else:
|
|
406
|
else:
|
|
407
|
s = os.lstat(self._join(f))
|
|
407
|
s = os.lstat(self._join(f))
|
|
408
|
mode = s.st_mode
|
|
408
|
mode = s.st_mode
|
|
409
|
size = s.st_size
|
|
409
|
size = s.st_size
|
|
410
|
mtime = s[stat.ST_MTIME]
|
|
410
|
mtime = s[stat.ST_MTIME]
|
|
411
|
self._addpath(f, 'n', mode, size & _rangemask, mtime & _rangemask)
|
|
411
|
self._addpath(f, 'n', mode, size & _rangemask, mtime & _rangemask)
|
|
412
|
self._map.copymap.pop(f, None)
|
|
412
|
self._map.copymap.pop(f, None)
|
|
413
|
if f in self._map.nonnormalset:
|
|
413
|
if f in self._map.nonnormalset:
|
|
414
|
self._map.nonnormalset.remove(f)
|
|
414
|
self._map.nonnormalset.remove(f)
|
|
415
|
if mtime > self._lastnormaltime:
|
|
415
|
if mtime > self._lastnormaltime:
|
|
416
|
# Remember the most recent modification timeslot for status(),
|
|
416
|
# Remember the most recent modification timeslot for status(),
|
|
417
|
# to make sure we won't miss future size-preserving file content
|
|
417
|
# to make sure we won't miss future size-preserving file content
|
|
418
|
# modifications that happen within the same timeslot.
|
|
418
|
# modifications that happen within the same timeslot.
|
|
419
|
self._lastnormaltime = mtime
|
|
419
|
self._lastnormaltime = mtime
|
|
420
|
|
|
420
|
|
|
421
|
def normallookup(self, f):
|
|
421
|
def normallookup(self, f):
|
|
422
|
'''Mark a file normal, but possibly dirty.'''
|
|
422
|
'''Mark a file normal, but possibly dirty.'''
|
|
423
|
if self._pl[1] != nullid:
|
|
423
|
if self._pl[1] != nullid:
|
|
424
|
# if there is a merge going on and the file was either
|
|
424
|
# if there is a merge going on and the file was either
|
|
425
|
# in state 'm' (-1) or coming from other parent (-2) before
|
|
425
|
# in state 'm' (-1) or coming from other parent (-2) before
|
|
426
|
# being removed, restore that state.
|
|
426
|
# being removed, restore that state.
|
|
427
|
entry = self._map.get(f)
|
|
427
|
entry = self._map.get(f)
|
|
428
|
if entry is not None:
|
|
428
|
if entry is not None:
|
|
429
|
if entry[0] == 'r' and entry[2] in (-1, -2):
|
|
429
|
if entry[0] == 'r' and entry[2] in (-1, -2):
|
|
430
|
source = self._map.copymap.get(f)
|
|
430
|
source = self._map.copymap.get(f)
|
|
431
|
if entry[2] == -1:
|
|
431
|
if entry[2] == -1:
|
|
432
|
self.merge(f)
|
|
432
|
self.merge(f)
|
|
433
|
elif entry[2] == -2:
|
|
433
|
elif entry[2] == -2:
|
|
434
|
self.otherparent(f)
|
|
434
|
self.otherparent(f)
|
|
435
|
if source:
|
|
435
|
if source:
|
|
436
|
self.copy(source, f)
|
|
436
|
self.copy(source, f)
|
|
437
|
return
|
|
437
|
return
|
|
438
|
if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
|
|
438
|
if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
|
|
439
|
return
|
|
439
|
return
|
|
440
|
self._addpath(f, 'n', 0, -1, -1)
|
|
440
|
self._addpath(f, 'n', 0, -1, -1)
|
|
441
|
self._map.copymap.pop(f, None)
|
|
441
|
self._map.copymap.pop(f, None)
|
|
442
|
|
|
442
|
|
|
443
|
def otherparent(self, f):
|
|
443
|
def otherparent(self, f):
|
|
444
|
'''Mark as coming from the other parent, always dirty.'''
|
|
444
|
'''Mark as coming from the other parent, always dirty.'''
|
|
445
|
if self._pl[1] == nullid:
|
|
445
|
if self._pl[1] == nullid:
|
|
446
|
raise error.Abort(_("setting %r to other parent "
|
|
446
|
raise error.Abort(_("setting %r to other parent "
|
|
447
|
"only allowed in merges") % f)
|
|
447
|
"only allowed in merges") % f)
|
|
448
|
if f in self and self[f] == 'n':
|
|
448
|
if f in self and self[f] == 'n':
|
|
449
|
# merge-like
|
|
449
|
# merge-like
|
|
450
|
self._addpath(f, 'm', 0, -2, -1)
|
|
450
|
self._addpath(f, 'm', 0, -2, -1)
|
|
451
|
else:
|
|
451
|
else:
|
|
452
|
# add-like
|
|
452
|
# add-like
|
|
453
|
self._addpath(f, 'n', 0, -2, -1)
|
|
453
|
self._addpath(f, 'n', 0, -2, -1)
|
|
454
|
self._map.copymap.pop(f, None)
|
|
454
|
self._map.copymap.pop(f, None)
|
|
455
|
|
|
455
|
|
|
456
|
def add(self, f):
|
|
456
|
def add(self, f):
|
|
457
|
'''Mark a file added.'''
|
|
457
|
'''Mark a file added.'''
|
|
458
|
self._addpath(f, 'a', 0, -1, -1)
|
|
458
|
self._addpath(f, 'a', 0, -1, -1)
|
|
459
|
self._map.copymap.pop(f, None)
|
|
459
|
self._map.copymap.pop(f, None)
|
|
460
|
|
|
460
|
|
|
461
|
def remove(self, f):
|
|
461
|
def remove(self, f):
|
|
462
|
'''Mark a file removed.'''
|
|
462
|
'''Mark a file removed.'''
|
|
463
|
self._dirty = True
|
|
463
|
self._dirty = True
|
|
464
|
oldstate = self[f]
|
|
464
|
oldstate = self[f]
|
|
465
|
size = 0
|
|
465
|
size = 0
|
|
466
|
if self._pl[1] != nullid:
|
|
466
|
if self._pl[1] != nullid:
|
|
467
|
entry = self._map.get(f)
|
|
467
|
entry = self._map.get(f)
|
|
468
|
if entry is not None:
|
|
468
|
if entry is not None:
|
|
469
|
# backup the previous state
|
|
469
|
# backup the previous state
|
|
470
|
if entry[0] == 'm': # merge
|
|
470
|
if entry[0] == 'm': # merge
|
|
471
|
size = -1
|
|
471
|
size = -1
|
|
472
|
elif entry[0] == 'n' and entry[2] == -2: # other parent
|
|
472
|
elif entry[0] == 'n' and entry[2] == -2: # other parent
|
|
473
|
size = -2
|
|
473
|
size = -2
|
|
474
|
self._map.otherparentset.add(f)
|
|
474
|
self._map.otherparentset.add(f)
|
|
475
|
self._updatedfiles.add(f)
|
|
475
|
self._updatedfiles.add(f)
|
|
476
|
self._map.removefile(f, oldstate, size)
|
|
476
|
self._map.removefile(f, oldstate, size)
|
|
477
|
if size == 0:
|
|
477
|
if size == 0:
|
|
478
|
self._map.copymap.pop(f, None)
|
|
478
|
self._map.copymap.pop(f, None)
|
|
479
|
|
|
479
|
|
|
480
|
def merge(self, f):
|
|
480
|
def merge(self, f):
|
|
481
|
'''Mark a file merged.'''
|
|
481
|
'''Mark a file merged.'''
|
|
482
|
if self._pl[1] == nullid:
|
|
482
|
if self._pl[1] == nullid:
|
|
483
|
return self.normallookup(f)
|
|
483
|
return self.normallookup(f)
|
|
484
|
return self.otherparent(f)
|
|
484
|
return self.otherparent(f)
|
|
485
|
|
|
485
|
|
|
486
|
def drop(self, f):
|
|
486
|
def drop(self, f):
|
|
487
|
'''Drop a file from the dirstate'''
|
|
487
|
'''Drop a file from the dirstate'''
|
|
488
|
oldstate = self[f]
|
|
488
|
oldstate = self[f]
|
|
489
|
if self._map.dropfile(f, oldstate):
|
|
489
|
if self._map.dropfile(f, oldstate):
|
|
490
|
self._dirty = True
|
|
490
|
self._dirty = True
|
|
491
|
self._updatedfiles.add(f)
|
|
491
|
self._updatedfiles.add(f)
|
|
492
|
self._map.copymap.pop(f, None)
|
|
492
|
self._map.copymap.pop(f, None)
|
|
493
|
|
|
493
|
|
|
494
|
def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
|
|
494
|
def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
|
|
495
|
if exists is None:
|
|
495
|
if exists is None:
|
|
496
|
exists = os.path.lexists(os.path.join(self._root, path))
|
|
496
|
exists = os.path.lexists(os.path.join(self._root, path))
|
|
497
|
if not exists:
|
|
497
|
if not exists:
|
|
498
|
# Maybe a path component exists
|
|
498
|
# Maybe a path component exists
|
|
499
|
if not ignoremissing and '/' in path:
|
|
499
|
if not ignoremissing and '/' in path:
|
|
500
|
d, f = path.rsplit('/', 1)
|
|
500
|
d, f = path.rsplit('/', 1)
|
|
501
|
d = self._normalize(d, False, ignoremissing, None)
|
|
501
|
d = self._normalize(d, False, ignoremissing, None)
|
|
502
|
folded = d + "/" + f
|
|
502
|
folded = d + "/" + f
|
|
503
|
else:
|
|
503
|
else:
|
|
504
|
# No path components, preserve original case
|
|
504
|
# No path components, preserve original case
|
|
505
|
folded = path
|
|
505
|
folded = path
|
|
506
|
else:
|
|
506
|
else:
|
|
507
|
# recursively normalize leading directory components
|
|
507
|
# recursively normalize leading directory components
|
|
508
|
# against dirstate
|
|
508
|
# against dirstate
|
|
509
|
if '/' in normed:
|
|
509
|
if '/' in normed:
|
|
510
|
d, f = normed.rsplit('/', 1)
|
|
510
|
d, f = normed.rsplit('/', 1)
|
|
511
|
d = self._normalize(d, False, ignoremissing, True)
|
|
511
|
d = self._normalize(d, False, ignoremissing, True)
|
|
512
|
r = self._root + "/" + d
|
|
512
|
r = self._root + "/" + d
|
|
513
|
folded = d + "/" + util.fspath(f, r)
|
|
513
|
folded = d + "/" + util.fspath(f, r)
|
|
514
|
else:
|
|
514
|
else:
|
|
515
|
folded = util.fspath(normed, self._root)
|
|
515
|
folded = util.fspath(normed, self._root)
|
|
516
|
storemap[normed] = folded
|
|
516
|
storemap[normed] = folded
|
|
517
|
|
|
517
|
|
|
518
|
return folded
|
|
518
|
return folded
|
|
519
|
|
|
519
|
|
|
520
|
def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
|
|
520
|
def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
|
|
521
|
normed = util.normcase(path)
|
|
521
|
normed = util.normcase(path)
|
|
522
|
folded = self._map.filefoldmap.get(normed, None)
|
|
522
|
folded = self._map.filefoldmap.get(normed, None)
|
|
523
|
if folded is None:
|
|
523
|
if folded is None:
|
|
524
|
if isknown:
|
|
524
|
if isknown:
|
|
525
|
folded = path
|
|
525
|
folded = path
|
|
526
|
else:
|
|
526
|
else:
|
|
527
|
folded = self._discoverpath(path, normed, ignoremissing, exists,
|
|
527
|
folded = self._discoverpath(path, normed, ignoremissing, exists,
|
|
528
|
self._map.filefoldmap)
|
|
528
|
self._map.filefoldmap)
|
|
529
|
return folded
|
|
529
|
return folded
|
|
530
|
|
|
530
|
|
|
531
|
def _normalize(self, path, isknown, ignoremissing=False, exists=None):
|
|
531
|
def _normalize(self, path, isknown, ignoremissing=False, exists=None):
|
|
532
|
normed = util.normcase(path)
|
|
532
|
normed = util.normcase(path)
|
|
533
|
folded = self._map.filefoldmap.get(normed, None)
|
|
533
|
folded = self._map.filefoldmap.get(normed, None)
|
|
534
|
if folded is None:
|
|
534
|
if folded is None:
|
|
535
|
folded = self._map.dirfoldmap.get(normed, None)
|
|
535
|
folded = self._map.dirfoldmap.get(normed, None)
|
|
536
|
if folded is None:
|
|
536
|
if folded is None:
|
|
537
|
if isknown:
|
|
537
|
if isknown:
|
|
538
|
folded = path
|
|
538
|
folded = path
|
|
539
|
else:
|
|
539
|
else:
|
|
540
|
# store discovered result in dirfoldmap so that future
|
|
540
|
# store discovered result in dirfoldmap so that future
|
|
541
|
# normalizefile calls don't start matching directories
|
|
541
|
# normalizefile calls don't start matching directories
|
|
542
|
folded = self._discoverpath(path, normed, ignoremissing, exists,
|
|
542
|
folded = self._discoverpath(path, normed, ignoremissing, exists,
|
|
543
|
self._map.dirfoldmap)
|
|
543
|
self._map.dirfoldmap)
|
|
544
|
return folded
|
|
544
|
return folded
|
|
545
|
|
|
545
|
|
|
546
|
def normalize(self, path, isknown=False, ignoremissing=False):
|
|
546
|
def normalize(self, path, isknown=False, ignoremissing=False):
|
|
547
|
'''
|
|
547
|
'''
|
|
548
|
normalize the case of a pathname when on a casefolding filesystem
|
|
548
|
normalize the case of a pathname when on a casefolding filesystem
|
|
549
|
|
|
549
|
|
|
550
|
isknown specifies whether the filename came from walking the
|
|
550
|
isknown specifies whether the filename came from walking the
|
|
551
|
disk, to avoid extra filesystem access.
|
|
551
|
disk, to avoid extra filesystem access.
|
|
552
|
|
|
552
|
|
|
553
|
If ignoremissing is True, missing path are returned
|
|
553
|
If ignoremissing is True, missing path are returned
|
|
554
|
unchanged. Otherwise, we try harder to normalize possibly
|
|
554
|
unchanged. Otherwise, we try harder to normalize possibly
|
|
555
|
existing path components.
|
|
555
|
existing path components.
|
|
556
|
|
|
556
|
|
|
557
|
The normalized case is determined based on the following precedence:
|
|
557
|
The normalized case is determined based on the following precedence:
|
|
558
|
|
|
558
|
|
|
559
|
- version of name already stored in the dirstate
|
|
559
|
- version of name already stored in the dirstate
|
|
560
|
- version of name stored on disk
|
|
560
|
- version of name stored on disk
|
|
561
|
- version provided via command arguments
|
|
561
|
- version provided via command arguments
|
|
562
|
'''
|
|
562
|
'''
|
|
563
|
|
|
563
|
|
|
564
|
if self._checkcase:
|
|
564
|
if self._checkcase:
|
|
565
|
return self._normalize(path, isknown, ignoremissing)
|
|
565
|
return self._normalize(path, isknown, ignoremissing)
|
|
566
|
return path
|
|
566
|
return path
|
|
567
|
|
|
567
|
|
|
568
|
def clear(self):
|
|
568
|
def clear(self):
|
|
569
|
self._map.clear()
|
|
569
|
self._map.clear()
|
|
570
|
self._lastnormaltime = 0
|
|
570
|
self._lastnormaltime = 0
|
|
571
|
self._updatedfiles.clear()
|
|
571
|
self._updatedfiles.clear()
|
|
572
|
self._dirty = True
|
|
572
|
self._dirty = True
|
|
573
|
|
|
573
|
|
|
574
|
def rebuild(self, parent, allfiles, changedfiles=None):
|
|
574
|
def rebuild(self, parent, allfiles, changedfiles=None):
|
|
575
|
if changedfiles is None:
|
|
575
|
if changedfiles is None:
|
|
576
|
# Rebuild entire dirstate
|
|
576
|
# Rebuild entire dirstate
|
|
577
|
changedfiles = allfiles
|
|
577
|
changedfiles = allfiles
|
|
578
|
lastnormaltime = self._lastnormaltime
|
|
578
|
lastnormaltime = self._lastnormaltime
|
|
579
|
self.clear()
|
|
579
|
self.clear()
|
|
580
|
self._lastnormaltime = lastnormaltime
|
|
580
|
self._lastnormaltime = lastnormaltime
|
|
581
|
|
|
581
|
|
|
582
|
if self._origpl is None:
|
|
582
|
if self._origpl is None:
|
|
583
|
self._origpl = self._pl
|
|
583
|
self._origpl = self._pl
|
|
584
|
self._map.setparents(parent, nullid)
|
|
584
|
self._map.setparents(parent, nullid)
|
|
585
|
for f in changedfiles:
|
|
585
|
for f in changedfiles:
|
|
586
|
if f in allfiles:
|
|
586
|
if f in allfiles:
|
|
587
|
self.normallookup(f)
|
|
587
|
self.normallookup(f)
|
|
588
|
else:
|
|
588
|
else:
|
|
589
|
self.drop(f)
|
|
589
|
self.drop(f)
|
|
590
|
|
|
590
|
|
|
591
|
self._dirty = True
|
|
591
|
self._dirty = True
|
|
592
|
|
|
592
|
|
|
593
|
def identity(self):
|
|
593
|
def identity(self):
|
|
594
|
'''Return identity of dirstate itself to detect changing in storage
|
|
594
|
'''Return identity of dirstate itself to detect changing in storage
|
|
595
|
|
|
595
|
|
|
596
|
If identity of previous dirstate is equal to this, writing
|
|
596
|
If identity of previous dirstate is equal to this, writing
|
|
597
|
changes based on the former dirstate out can keep consistency.
|
|
597
|
changes based on the former dirstate out can keep consistency.
|
|
598
|
'''
|
|
598
|
'''
|
|
599
|
return self._map.identity
|
|
599
|
return self._map.identity
|
|
600
|
|
|
600
|
|
|
601
|
def write(self, tr):
|
|
601
|
def write(self, tr):
|
|
602
|
if not self._dirty:
|
|
602
|
if not self._dirty:
|
|
603
|
return
|
|
603
|
return
|
|
604
|
|
|
604
|
|
|
605
|
filename = self._filename
|
|
605
|
filename = self._filename
|
|
606
|
if tr:
|
|
606
|
if tr:
|
|
607
|
# 'dirstate.write()' is not only for writing in-memory
|
|
607
|
# 'dirstate.write()' is not only for writing in-memory
|
|
608
|
# changes out, but also for dropping ambiguous timestamp.
|
|
608
|
# changes out, but also for dropping ambiguous timestamp.
|
|
609
|
# delayed writing re-raise "ambiguous timestamp issue".
|
|
609
|
# delayed writing re-raise "ambiguous timestamp issue".
|
|
610
|
# See also the wiki page below for detail:
|
|
610
|
# See also the wiki page below for detail:
|
|
611
|
# https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
|
|
611
|
# https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
|
|
612
|
|
|
612
|
|
|
613
|
# emulate dropping timestamp in 'parsers.pack_dirstate'
|
|
613
|
# emulate dropping timestamp in 'parsers.pack_dirstate'
|
|
614
|
now = _getfsnow(self._opener)
|
|
614
|
now = _getfsnow(self._opener)
|
|
615
|
self._map.clearambiguoustimes(self._updatedfiles, now)
|
|
615
|
self._map.clearambiguoustimes(self._updatedfiles, now)
|
|
616
|
|
|
616
|
|
|
617
|
# emulate that all 'dirstate.normal' results are written out
|
|
617
|
# emulate that all 'dirstate.normal' results are written out
|
|
618
|
self._lastnormaltime = 0
|
|
618
|
self._lastnormaltime = 0
|
|
619
|
self._updatedfiles.clear()
|
|
619
|
self._updatedfiles.clear()
|
|
620
|
|
|
620
|
|
|
621
|
# delay writing in-memory changes out
|
|
621
|
# delay writing in-memory changes out
|
|
622
|
tr.addfilegenerator('dirstate', (self._filename,),
|
|
622
|
tr.addfilegenerator('dirstate', (self._filename,),
|
|
623
|
self._writedirstate, location='plain')
|
|
623
|
self._writedirstate, location='plain')
|
|
624
|
return
|
|
624
|
return
|
|
625
|
|
|
625
|
|
|
626
|
st = self._opener(filename, "w", atomictemp=True, checkambig=True)
|
|
626
|
st = self._opener(filename, "w", atomictemp=True, checkambig=True)
|
|
627
|
self._writedirstate(st)
|
|
627
|
self._writedirstate(st)
|
|
628
|
|
|
628
|
|
|
629
|
def addparentchangecallback(self, category, callback):
|
|
629
|
def addparentchangecallback(self, category, callback):
|
|
630
|
"""add a callback to be called when the wd parents are changed
|
|
630
|
"""add a callback to be called when the wd parents are changed
|
|
631
|
|
|
631
|
|
|
632
|
Callback will be called with the following arguments:
|
|
632
|
Callback will be called with the following arguments:
|
|
633
|
dirstate, (oldp1, oldp2), (newp1, newp2)
|
|
633
|
dirstate, (oldp1, oldp2), (newp1, newp2)
|
|
634
|
|
|
634
|
|
|
635
|
Category is a unique identifier to allow overwriting an old callback
|
|
635
|
Category is a unique identifier to allow overwriting an old callback
|
|
636
|
with a newer callback.
|
|
636
|
with a newer callback.
|
|
637
|
"""
|
|
637
|
"""
|
|
638
|
self._plchangecallbacks[category] = callback
|
|
638
|
self._plchangecallbacks[category] = callback
|
|
639
|
|
|
639
|
|
|
640
|
def _writedirstate(self, st):
|
|
640
|
def _writedirstate(self, st):
|
|
641
|
# notify callbacks about parents change
|
|
641
|
# notify callbacks about parents change
|
|
642
|
if self._origpl is not None and self._origpl != self._pl:
|
|
642
|
if self._origpl is not None and self._origpl != self._pl:
|
|
643
|
for c, callback in sorted(self._plchangecallbacks.iteritems()):
|
|
643
|
for c, callback in sorted(self._plchangecallbacks.iteritems()):
|
|
644
|
callback(self, self._origpl, self._pl)
|
|
644
|
callback(self, self._origpl, self._pl)
|
|
645
|
self._origpl = None
|
|
645
|
self._origpl = None
|
|
646
|
# use the modification time of the newly created temporary file as the
|
|
646
|
# use the modification time of the newly created temporary file as the
|
|
647
|
# filesystem's notion of 'now'
|
|
647
|
# filesystem's notion of 'now'
|
|
648
|
now = util.fstat(st)[stat.ST_MTIME] & _rangemask
|
|
648
|
now = util.fstat(st)[stat.ST_MTIME] & _rangemask
|
|
649
|
|
|
649
|
|
|
650
|
# enough 'delaywrite' prevents 'pack_dirstate' from dropping
|
|
650
|
# enough 'delaywrite' prevents 'pack_dirstate' from dropping
|
|
651
|
# timestamp of each entries in dirstate, because of 'now > mtime'
|
|
651
|
# timestamp of each entries in dirstate, because of 'now > mtime'
|
|
652
|
delaywrite = self._ui.configint('debug', 'dirstate.delaywrite')
|
|
652
|
delaywrite = self._ui.configint('debug', 'dirstate.delaywrite')
|
|
653
|
if delaywrite > 0:
|
|
653
|
if delaywrite > 0:
|
|
654
|
# do we have any files to delay for?
|
|
654
|
# do we have any files to delay for?
|
|
655
|
for f, e in self._map.iteritems():
|
|
655
|
for f, e in self._map.iteritems():
|
|
656
|
if e[0] == 'n' and e[3] == now:
|
|
656
|
if e[0] == 'n' and e[3] == now:
|
|
657
|
import time # to avoid useless import
|
|
657
|
import time # to avoid useless import
|
|
658
|
# rather than sleep n seconds, sleep until the next
|
|
658
|
# rather than sleep n seconds, sleep until the next
|
|
659
|
# multiple of n seconds
|
|
659
|
# multiple of n seconds
|
|
660
|
clock = time.time()
|
|
660
|
clock = time.time()
|
|
661
|
start = int(clock) - (int(clock) % delaywrite)
|
|
661
|
start = int(clock) - (int(clock) % delaywrite)
|
|
662
|
end = start + delaywrite
|
|
662
|
end = start + delaywrite
|
|
663
|
time.sleep(end - clock)
|
|
663
|
time.sleep(end - clock)
|
|
664
|
now = end # trust our estimate that the end is near now
|
|
664
|
now = end # trust our estimate that the end is near now
|
|
665
|
break
|
|
665
|
break
|
|
666
|
|
|
666
|
|
|
667
|
self._map.write(st, now)
|
|
667
|
self._map.write(st, now)
|
|
668
|
self._lastnormaltime = 0
|
|
668
|
self._lastnormaltime = 0
|
|
669
|
self._dirty = False
|
|
669
|
self._dirty = False
|
|
670
|
|
|
670
|
|
|
671
|
def _dirignore(self, f):
|
|
671
|
def _dirignore(self, f):
|
|
672
|
if self._ignore(f):
|
|
672
|
if self._ignore(f):
|
|
673
|
return True
|
|
673
|
return True
|
|
674
|
for p in util.finddirs(f):
|
|
674
|
for p in util.finddirs(f):
|
|
675
|
if self._ignore(p):
|
|
675
|
if self._ignore(p):
|
|
676
|
return True
|
|
676
|
return True
|
|
677
|
return False
|
|
677
|
return False
|
|
678
|
|
|
678
|
|
|
679
|
def _ignorefiles(self):
|
|
679
|
def _ignorefiles(self):
|
|
680
|
files = []
|
|
680
|
files = []
|
|
681
|
if os.path.exists(self._join('.hgignore')):
|
|
681
|
if os.path.exists(self._join('.hgignore')):
|
|
682
|
files.append(self._join('.hgignore'))
|
|
682
|
files.append(self._join('.hgignore'))
|
|
683
|
for name, path in self._ui.configitems("ui"):
|
|
683
|
for name, path in self._ui.configitems("ui"):
|
|
684
|
if name == 'ignore' or name.startswith('ignore.'):
|
|
684
|
if name == 'ignore' or name.startswith('ignore.'):
|
|
685
|
# we need to use os.path.join here rather than self._join
|
|
685
|
# we need to use os.path.join here rather than self._join
|
|
686
|
# because path is arbitrary and user-specified
|
|
686
|
# because path is arbitrary and user-specified
|
|
687
|
files.append(os.path.join(self._rootdir, util.expandpath(path)))
|
|
687
|
files.append(os.path.join(self._rootdir, util.expandpath(path)))
|
|
688
|
return files
|
|
688
|
return files
|
|
689
|
|
|
689
|
|
|
690
|
def _ignorefileandline(self, f):
|
|
690
|
def _ignorefileandline(self, f):
|
|
691
|
files = collections.deque(self._ignorefiles())
|
|
691
|
files = collections.deque(self._ignorefiles())
|
|
692
|
visited = set()
|
|
692
|
visited = set()
|
|
693
|
while files:
|
|
693
|
while files:
|
|
694
|
i = files.popleft()
|
|
694
|
i = files.popleft()
|
|
695
|
patterns = matchmod.readpatternfile(i, self._ui.warn,
|
|
695
|
patterns = matchmod.readpatternfile(i, self._ui.warn,
|
|
696
|
sourceinfo=True)
|
|
696
|
sourceinfo=True)
|
|
697
|
for pattern, lineno, line in patterns:
|
|
697
|
for pattern, lineno, line in patterns:
|
|
698
|
kind, p = matchmod._patsplit(pattern, 'glob')
|
|
698
|
kind, p = matchmod._patsplit(pattern, 'glob')
|
|
699
|
if kind == "subinclude":
|
|
699
|
if kind == "subinclude":
|
|
700
|
if p not in visited:
|
|
700
|
if p not in visited:
|
|
701
|
files.append(p)
|
|
701
|
files.append(p)
|
|
702
|
continue
|
|
702
|
continue
|
|
703
|
m = matchmod.match(self._root, '', [], [pattern],
|
|
703
|
m = matchmod.match(self._root, '', [], [pattern],
|
|
704
|
warn=self._ui.warn)
|
|
704
|
warn=self._ui.warn)
|
|
705
|
if m(f):
|
|
705
|
if m(f):
|
|
706
|
return (i, lineno, line)
|
|
706
|
return (i, lineno, line)
|
|
707
|
visited.add(i)
|
|
707
|
visited.add(i)
|
|
708
|
return (None, -1, "")
|
|
708
|
return (None, -1, "")
|
|
709
|
|
|
709
|
|
|
710
|
def _walkexplicit(self, match, subrepos):
|
|
710
|
def _walkexplicit(self, match, subrepos):
|
|
711
|
'''Get stat data about the files explicitly specified by match.
|
|
711
|
'''Get stat data about the files explicitly specified by match.
|
|
712
|
|
|
712
|
|
|
713
|
Return a triple (results, dirsfound, dirsnotfound).
|
|
713
|
Return a triple (results, dirsfound, dirsnotfound).
|
|
714
|
- results is a mapping from filename to stat result. It also contains
|
|
714
|
- results is a mapping from filename to stat result. It also contains
|
|
715
|
listings mapping subrepos and .hg to None.
|
|
715
|
listings mapping subrepos and .hg to None.
|
|
716
|
- dirsfound is a list of files found to be directories.
|
|
716
|
- dirsfound is a list of files found to be directories.
|
|
717
|
- dirsnotfound is a list of files that the dirstate thinks are
|
|
717
|
- dirsnotfound is a list of files that the dirstate thinks are
|
|
718
|
directories and that were not found.'''
|
|
718
|
directories and that were not found.'''
|
|
719
|
|
|
719
|
|
|
720
|
def badtype(mode):
|
|
720
|
def badtype(mode):
|
|
721
|
kind = _('unknown')
|
|
721
|
kind = _('unknown')
|
|
722
|
if stat.S_ISCHR(mode):
|
|
722
|
if stat.S_ISCHR(mode):
|
|
723
|
kind = _('character device')
|
|
723
|
kind = _('character device')
|
|
724
|
elif stat.S_ISBLK(mode):
|
|
724
|
elif stat.S_ISBLK(mode):
|
|
725
|
kind = _('block device')
|
|
725
|
kind = _('block device')
|
|
726
|
elif stat.S_ISFIFO(mode):
|
|
726
|
elif stat.S_ISFIFO(mode):
|
|
727
|
kind = _('fifo')
|
|
727
|
kind = _('fifo')
|
|
728
|
elif stat.S_ISSOCK(mode):
|
|
728
|
elif stat.S_ISSOCK(mode):
|
|
729
|
kind = _('socket')
|
|
729
|
kind = _('socket')
|
|
730
|
elif stat.S_ISDIR(mode):
|
|
730
|
elif stat.S_ISDIR(mode):
|
|
731
|
kind = _('directory')
|
|
731
|
kind = _('directory')
|
|
732
|
return _('unsupported file type (type is %s)') % kind
|
|
732
|
return _('unsupported file type (type is %s)') % kind
|
|
733
|
|
|
733
|
|
|
734
|
matchedir = match.explicitdir
|
|
734
|
matchedir = match.explicitdir
|
|
735
|
badfn = match.bad
|
|
735
|
badfn = match.bad
|
|
736
|
dmap = self._map
|
|
736
|
dmap = self._map
|
|
737
|
lstat = os.lstat
|
|
737
|
lstat = os.lstat
|
|
738
|
getkind = stat.S_IFMT
|
|
738
|
getkind = stat.S_IFMT
|
|
739
|
dirkind = stat.S_IFDIR
|
|
739
|
dirkind = stat.S_IFDIR
|
|
740
|
regkind = stat.S_IFREG
|
|
740
|
regkind = stat.S_IFREG
|
|
741
|
lnkkind = stat.S_IFLNK
|
|
741
|
lnkkind = stat.S_IFLNK
|
|
742
|
join = self._join
|
|
742
|
join = self._join
|
|
743
|
dirsfound = []
|
|
743
|
dirsfound = []
|
|
744
|
foundadd = dirsfound.append
|
|
744
|
foundadd = dirsfound.append
|
|
745
|
dirsnotfound = []
|
|
745
|
dirsnotfound = []
|
|
746
|
notfoundadd = dirsnotfound.append
|
|
746
|
notfoundadd = dirsnotfound.append
|
|
747
|
|
|
747
|
|
|
748
|
if not match.isexact() and self._checkcase:
|
|
748
|
if not match.isexact() and self._checkcase:
|
|
749
|
normalize = self._normalize
|
|
749
|
normalize = self._normalize
|
|
750
|
else:
|
|
750
|
else:
|
|
751
|
normalize = None
|
|
751
|
normalize = None
|
|
752
|
|
|
752
|
|
|
753
|
files = sorted(match.files())
|
|
753
|
files = sorted(match.files())
|
|
754
|
subrepos.sort()
|
|
754
|
subrepos.sort()
|
|
755
|
i, j = 0, 0
|
|
755
|
i, j = 0, 0
|
|
756
|
while i < len(files) and j < len(subrepos):
|
|
756
|
while i < len(files) and j < len(subrepos):
|
|
757
|
subpath = subrepos[j] + "/"
|
|
757
|
subpath = subrepos[j] + "/"
|
|
758
|
if files[i] < subpath:
|
|
758
|
if files[i] < subpath:
|
|
759
|
i += 1
|
|
759
|
i += 1
|
|
760
|
continue
|
|
760
|
continue
|
|
761
|
while i < len(files) and files[i].startswith(subpath):
|
|
761
|
while i < len(files) and files[i].startswith(subpath):
|
|
762
|
del files[i]
|
|
762
|
del files[i]
|
|
763
|
j += 1
|
|
763
|
j += 1
|
|
764
|
|
|
764
|
|
|
765
|
if not files or '' in files:
|
|
765
|
if not files or '' in files:
|
|
766
|
files = ['']
|
|
766
|
files = ['']
|
|
767
|
# constructing the foldmap is expensive, so don't do it for the
|
|
767
|
# constructing the foldmap is expensive, so don't do it for the
|
|
768
|
# common case where files is ['']
|
|
768
|
# common case where files is ['']
|
|
769
|
normalize = None
|
|
769
|
normalize = None
|
|
770
|
results = dict.fromkeys(subrepos)
|
|
770
|
results = dict.fromkeys(subrepos)
|
|
771
|
results['.hg'] = None
|
|
771
|
results['.hg'] = None
|
|
772
|
|
|
772
|
|
|
773
|
for ff in files:
|
|
773
|
for ff in files:
|
|
774
|
if normalize:
|
|
774
|
if normalize:
|
|
775
|
nf = normalize(ff, False, True)
|
|
775
|
nf = normalize(ff, False, True)
|
|
776
|
else:
|
|
776
|
else:
|
|
777
|
nf = ff
|
|
777
|
nf = ff
|
|
778
|
if nf in results:
|
|
778
|
if nf in results:
|
|
779
|
continue
|
|
779
|
continue
|
|
780
|
|
|
780
|
|
|
781
|
try:
|
|
781
|
try:
|
|
782
|
st = lstat(join(nf))
|
|
782
|
st = lstat(join(nf))
|
|
783
|
kind = getkind(st.st_mode)
|
|
783
|
kind = getkind(st.st_mode)
|
|
784
|
if kind == dirkind:
|
|
784
|
if kind == dirkind:
|
|
785
|
if nf in dmap:
|
|
785
|
if nf in dmap:
|
|
786
|
# file replaced by dir on disk but still in dirstate
|
|
786
|
# file replaced by dir on disk but still in dirstate
|
|
787
|
results[nf] = None
|
|
787
|
results[nf] = None
|
|
788
|
if matchedir:
|
|
788
|
if matchedir:
|
|
789
|
matchedir(nf)
|
|
789
|
matchedir(nf)
|
|
790
|
foundadd((nf, ff))
|
|
790
|
foundadd((nf, ff))
|
|
791
|
elif kind == regkind or kind == lnkkind:
|
|
791
|
elif kind == regkind or kind == lnkkind:
|
|
792
|
results[nf] = st
|
|
792
|
results[nf] = st
|
|
793
|
else:
|
|
793
|
else:
|
|
794
|
badfn(ff, badtype(kind))
|
|
794
|
badfn(ff, badtype(kind))
|
|
795
|
if nf in dmap:
|
|
795
|
if nf in dmap:
|
|
796
|
results[nf] = None
|
|
796
|
results[nf] = None
|
|
797
|
except OSError as inst: # nf not found on disk - it is dirstate only
|
|
797
|
except OSError as inst: # nf not found on disk - it is dirstate only
|
|
798
|
if nf in dmap: # does it exactly match a missing file?
|
|
798
|
if nf in dmap: # does it exactly match a missing file?
|
|
799
|
results[nf] = None
|
|
799
|
results[nf] = None
|
|
800
|
else: # does it match a missing directory?
|
|
800
|
else: # does it match a missing directory?
|
|
801
|
if self._map.hasdir(nf):
|
|
801
|
if self._map.hasdir(nf):
|
|
802
|
if matchedir:
|
|
802
|
if matchedir:
|
|
803
|
matchedir(nf)
|
|
803
|
matchedir(nf)
|
|
804
|
notfoundadd(nf)
|
|
804
|
notfoundadd(nf)
|
|
805
|
else:
|
|
805
|
else:
|
|
806
|
badfn(ff, encoding.strtolocal(inst.strerror))
|
|
806
|
badfn(ff, encoding.strtolocal(inst.strerror))
|
|
807
|
|
|
807
|
|
|
808
|
# match.files() may contain explicitly-specified paths that shouldn't
|
|
808
|
# match.files() may contain explicitly-specified paths that shouldn't
|
|
809
|
# be taken; drop them from the list of files found. dirsfound/notfound
|
|
809
|
# be taken; drop them from the list of files found. dirsfound/notfound
|
|
810
|
# aren't filtered here because they will be tested later.
|
|
810
|
# aren't filtered here because they will be tested later.
|
|
811
|
if match.anypats():
|
|
811
|
if match.anypats():
|
|
812
|
for f in list(results):
|
|
812
|
for f in list(results):
|
|
813
|
if f == '.hg' or f in subrepos:
|
|
813
|
if f == '.hg' or f in subrepos:
|
|
814
|
# keep sentinel to disable further out-of-repo walks
|
|
814
|
# keep sentinel to disable further out-of-repo walks
|
|
815
|
continue
|
|
815
|
continue
|
|
816
|
if not match(f):
|
|
816
|
if not match(f):
|
|
817
|
del results[f]
|
|
817
|
del results[f]
|
|
818
|
|
|
818
|
|
|
819
|
# Case insensitive filesystems cannot rely on lstat() failing to detect
|
|
819
|
# Case insensitive filesystems cannot rely on lstat() failing to detect
|
|
820
|
# a case-only rename. Prune the stat object for any file that does not
|
|
820
|
# a case-only rename. Prune the stat object for any file that does not
|
|
821
|
# match the case in the filesystem, if there are multiple files that
|
|
821
|
# match the case in the filesystem, if there are multiple files that
|
|
822
|
# normalize to the same path.
|
|
822
|
# normalize to the same path.
|
|
823
|
if match.isexact() and self._checkcase:
|
|
823
|
if match.isexact() and self._checkcase:
|
|
824
|
normed = {}
|
|
824
|
normed = {}
|
|
825
|
|
|
825
|
|
|
826
|
for f, st in results.iteritems():
|
|
826
|
for f, st in results.iteritems():
|
|
827
|
if st is None:
|
|
827
|
if st is None:
|
|
828
|
continue
|
|
828
|
continue
|
|
829
|
|
|
829
|
|
|
830
|
nc = util.normcase(f)
|
|
830
|
nc = util.normcase(f)
|
|
831
|
paths = normed.get(nc)
|
|
831
|
paths = normed.get(nc)
|
|
832
|
|
|
832
|
|
|
833
|
if paths is None:
|
|
833
|
if paths is None:
|
|
834
|
paths = set()
|
|
834
|
paths = set()
|
|
835
|
normed[nc] = paths
|
|
835
|
normed[nc] = paths
|
|
836
|
|
|
836
|
|
|
837
|
paths.add(f)
|
|
837
|
paths.add(f)
|
|
838
|
|
|
838
|
|
|
839
|
for norm, paths in normed.iteritems():
|
|
839
|
for norm, paths in normed.iteritems():
|
|
840
|
if len(paths) > 1:
|
|
840
|
if len(paths) > 1:
|
|
841
|
for path in paths:
|
|
841
|
for path in paths:
|
|
842
|
folded = self._discoverpath(path, norm, True, None,
|
|
842
|
folded = self._discoverpath(path, norm, True, None,
|
|
843
|
self._map.dirfoldmap)
|
|
843
|
self._map.dirfoldmap)
|
|
844
|
if path != folded:
|
|
844
|
if path != folded:
|
|
845
|
results[path] = None
|
|
845
|
results[path] = None
|
|
846
|
|
|
846
|
|
|
847
|
return results, dirsfound, dirsnotfound
|
|
847
|
return results, dirsfound, dirsnotfound
|
|
848
|
|
|
848
|
|
|
849
|
def walk(self, match, subrepos, unknown, ignored, full=True):
|
|
849
|
def walk(self, match, subrepos, unknown, ignored, full=True):
|
|
850
|
'''
|
|
850
|
'''
|
|
851
|
Walk recursively through the directory tree, finding all files
|
|
851
|
Walk recursively through the directory tree, finding all files
|
|
852
|
matched by match.
|
|
852
|
matched by match.
|
|
853
|
|
|
853
|
|
|
854
|
If full is False, maybe skip some known-clean files.
|
|
854
|
If full is False, maybe skip some known-clean files.
|
|
855
|
|
|
855
|
|
|
856
|
Return a dict mapping filename to stat-like object (either
|
|
856
|
Return a dict mapping filename to stat-like object (either
|
|
857
|
mercurial.osutil.stat instance or return value of os.stat()).
|
|
857
|
mercurial.osutil.stat instance or return value of os.stat()).
|
|
858
|
|
|
858
|
|
|
859
|
'''
|
|
859
|
'''
|
|
860
|
# full is a flag that extensions that hook into walk can use -- this
|
|
860
|
# full is a flag that extensions that hook into walk can use -- this
|
|
861
|
# implementation doesn't use it at all. This satisfies the contract
|
|
861
|
# implementation doesn't use it at all. This satisfies the contract
|
|
862
|
# because we only guarantee a "maybe".
|
|
862
|
# because we only guarantee a "maybe".
|
|
863
|
|
|
863
|
|
|
864
|
if ignored:
|
|
864
|
if ignored:
|
|
865
|
ignore = util.never
|
|
865
|
ignore = util.never
|
|
866
|
dirignore = util.never
|
|
866
|
dirignore = util.never
|
|
867
|
elif unknown:
|
|
867
|
elif unknown:
|
|
868
|
ignore = self._ignore
|
|
868
|
ignore = self._ignore
|
|
869
|
dirignore = self._dirignore
|
|
869
|
dirignore = self._dirignore
|
|
870
|
else:
|
|
870
|
else:
|
|
871
|
# if not unknown and not ignored, drop dir recursion and step 2
|
|
871
|
# if not unknown and not ignored, drop dir recursion and step 2
|
|
872
|
ignore = util.always
|
|
872
|
ignore = util.always
|
|
873
|
dirignore = util.always
|
|
873
|
dirignore = util.always
|
|
874
|
|
|
874
|
|
|
875
|
matchfn = match.matchfn
|
|
875
|
matchfn = match.matchfn
|
|
876
|
matchalways = match.always()
|
|
876
|
matchalways = match.always()
|
|
877
|
matchtdir = match.traversedir
|
|
877
|
matchtdir = match.traversedir
|
|
878
|
dmap = self._map
|
|
878
|
dmap = self._map
|
|
879
|
listdir = util.listdir
|
|
879
|
listdir = util.listdir
|
|
880
|
lstat = os.lstat
|
|
880
|
lstat = os.lstat
|
|
881
|
dirkind = stat.S_IFDIR
|
|
881
|
dirkind = stat.S_IFDIR
|
|
882
|
regkind = stat.S_IFREG
|
|
882
|
regkind = stat.S_IFREG
|
|
883
|
lnkkind = stat.S_IFLNK
|
|
883
|
lnkkind = stat.S_IFLNK
|
|
884
|
join = self._join
|
|
884
|
join = self._join
|
|
885
|
|
|
885
|
|
|
886
|
exact = skipstep3 = False
|
|
886
|
exact = skipstep3 = False
|
|
887
|
if match.isexact(): # match.exact
|
|
887
|
if match.isexact(): # match.exact
|
|
888
|
exact = True
|
|
888
|
exact = True
|
|
889
|
dirignore = util.always # skip step 2
|
|
889
|
dirignore = util.always # skip step 2
|
|
890
|
elif match.prefix(): # match.match, no patterns
|
|
890
|
elif match.prefix(): # match.match, no patterns
|
|
891
|
skipstep3 = True
|
|
891
|
skipstep3 = True
|
|
892
|
|
|
892
|
|
|
893
|
if not exact and self._checkcase:
|
|
893
|
if not exact and self._checkcase:
|
|
894
|
normalize = self._normalize
|
|
894
|
normalize = self._normalize
|
|
895
|
normalizefile = self._normalizefile
|
|
895
|
normalizefile = self._normalizefile
|
|
896
|
skipstep3 = False
|
|
896
|
skipstep3 = False
|
|
897
|
else:
|
|
897
|
else:
|
|
898
|
normalize = self._normalize
|
|
898
|
normalize = self._normalize
|
|
899
|
normalizefile = None
|
|
899
|
normalizefile = None
|
|
900
|
|
|
900
|
|
|
901
|
# step 1: find all explicit files
|
|
901
|
# step 1: find all explicit files
|
|
902
|
results, work, dirsnotfound = self._walkexplicit(match, subrepos)
|
|
902
|
results, work, dirsnotfound = self._walkexplicit(match, subrepos)
|
|
903
|
|
|
903
|
|
|
904
|
skipstep3 = skipstep3 and not (work or dirsnotfound)
|
|
904
|
skipstep3 = skipstep3 and not (work or dirsnotfound)
|
|
905
|
work = [d for d in work if not dirignore(d[0])]
|
|
905
|
work = [d for d in work if not dirignore(d[0])]
|
|
906
|
|
|
906
|
|
|
907
|
# step 2: visit subdirectories
|
|
907
|
# step 2: visit subdirectories
|
|
908
|
def traverse(work, alreadynormed):
|
|
908
|
def traverse(work, alreadynormed):
|
|
909
|
wadd = work.append
|
|
909
|
wadd = work.append
|
|
910
|
while work:
|
|
910
|
while work:
|
|
911
|
nd = work.pop()
|
|
911
|
nd = work.pop()
|
|
912
|
visitentries = match.visitchildrenset(nd)
|
|
912
|
visitentries = match.visitchildrenset(nd)
|
|
913
|
if not visitentries:
|
|
913
|
if not visitentries:
|
|
914
|
continue
|
|
914
|
continue
|
|
915
|
if visitentries == 'this' or visitentries == 'all':
|
|
915
|
if visitentries == 'this' or visitentries == 'all':
|
|
916
|
visitentries = None
|
|
916
|
visitentries = None
|
|
917
|
skip = None
|
|
917
|
skip = None
|
|
918
|
if nd != '':
|
|
918
|
if nd != '':
|
|
919
|
skip = '.hg'
|
|
919
|
skip = '.hg'
|
|
920
|
try:
|
|
920
|
try:
|
|
921
|
entries = listdir(join(nd), stat=True, skip=skip)
|
|
921
|
entries = listdir(join(nd), stat=True, skip=skip)
|
|
922
|
except OSError as inst:
|
|
922
|
except OSError as inst:
|
|
923
|
if inst.errno in (errno.EACCES, errno.ENOENT):
|
|
923
|
if inst.errno in (errno.EACCES, errno.ENOENT):
|
|
924
|
match.bad(self.pathto(nd),
|
|
924
|
match.bad(self.pathto(nd),
|
|
925
|
encoding.strtolocal(inst.strerror))
|
|
925
|
encoding.strtolocal(inst.strerror))
|
|
926
|
continue
|
|
926
|
continue
|
|
927
|
raise
|
|
927
|
raise
|
|
928
|
for f, kind, st in entries:
|
|
928
|
for f, kind, st in entries:
|
|
929
|
# Some matchers may return files in the visitentries set,
|
|
929
|
# Some matchers may return files in the visitentries set,
|
|
930
|
# instead of 'this', if the matcher explicitly mentions them
|
|
930
|
# instead of 'this', if the matcher explicitly mentions them
|
|
931
|
# and is not an exactmatcher. This is acceptable; we do not
|
|
931
|
# and is not an exactmatcher. This is acceptable; we do not
|
|
932
|
# make any hard assumptions about file-or-directory below
|
|
932
|
# make any hard assumptions about file-or-directory below
|
|
933
|
# based on the presence of `f` in visitentries. If
|
|
933
|
# based on the presence of `f` in visitentries. If
|
|
934
|
# visitchildrenset returned a set, we can always skip the
|
|
934
|
# visitchildrenset returned a set, we can always skip the
|
|
935
|
# entries *not* in the set it provided regardless of whether
|
|
935
|
# entries *not* in the set it provided regardless of whether
|
|
936
|
# they're actually a file or a directory.
|
|
936
|
# they're actually a file or a directory.
|
|
937
|
if visitentries and f not in visitentries:
|
|
937
|
if visitentries and f not in visitentries:
|
|
938
|
continue
|
|
938
|
continue
|
|
939
|
if normalizefile:
|
|
939
|
if normalizefile:
|
|
940
|
# even though f might be a directory, we're only
|
|
940
|
# even though f might be a directory, we're only
|
|
941
|
# interested in comparing it to files currently in the
|
|
941
|
# interested in comparing it to files currently in the
|
|
942
|
# dmap -- therefore normalizefile is enough
|
|
942
|
# dmap -- therefore normalizefile is enough
|
|
943
|
nf = normalizefile(nd and (nd + "/" + f) or f, True,
|
|
943
|
nf = normalizefile(nd and (nd + "/" + f) or f, True,
|
|
944
|
True)
|
|
944
|
True)
|
|
945
|
else:
|
|
945
|
else:
|
|
946
|
nf = nd and (nd + "/" + f) or f
|
|
946
|
nf = nd and (nd + "/" + f) or f
|
|
947
|
if nf not in results:
|
|
947
|
if nf not in results:
|
|
948
|
if kind == dirkind:
|
|
948
|
if kind == dirkind:
|
|
949
|
if not ignore(nf):
|
|
949
|
if not ignore(nf):
|
|
950
|
if matchtdir:
|
|
950
|
if matchtdir:
|
|
951
|
matchtdir(nf)
|
|
951
|
matchtdir(nf)
|
|
952
|
wadd(nf)
|
|
952
|
wadd(nf)
|
|
953
|
if nf in dmap and (matchalways or matchfn(nf)):
|
|
953
|
if nf in dmap and (matchalways or matchfn(nf)):
|
|
954
|
results[nf] = None
|
|
954
|
results[nf] = None
|
|
955
|
elif kind == regkind or kind == lnkkind:
|
|
955
|
elif kind == regkind or kind == lnkkind:
|
|
956
|
if nf in dmap:
|
|
956
|
if nf in dmap:
|
|
957
|
if matchalways or matchfn(nf):
|
|
957
|
if matchalways or matchfn(nf):
|
|
958
|
results[nf] = st
|
|
958
|
results[nf] = st
|
|
959
|
elif ((matchalways or matchfn(nf))
|
|
959
|
elif ((matchalways or matchfn(nf))
|
|
960
|
and not ignore(nf)):
|
|
960
|
and not ignore(nf)):
|
|
961
|
# unknown file -- normalize if necessary
|
|
961
|
# unknown file -- normalize if necessary
|
|
962
|
if not alreadynormed:
|
|
962
|
if not alreadynormed:
|
|
963
|
nf = normalize(nf, False, True)
|
|
963
|
nf = normalize(nf, False, True)
|
|
964
|
results[nf] = st
|
|
964
|
results[nf] = st
|
|
965
|
elif nf in dmap and (matchalways or matchfn(nf)):
|
|
965
|
elif nf in dmap and (matchalways or matchfn(nf)):
|
|
966
|
results[nf] = None
|
|
966
|
results[nf] = None
|
|
967
|
|
|
967
|
|
|
968
|
for nd, d in work:
|
|
968
|
for nd, d in work:
|
|
969
|
# alreadynormed means that processwork doesn't have to do any
|
|
969
|
# alreadynormed means that processwork doesn't have to do any
|
|
970
|
# expensive directory normalization
|
|
970
|
# expensive directory normalization
|
|
971
|
alreadynormed = not normalize or nd == d
|
|
971
|
alreadynormed = not normalize or nd == d
|
|
972
|
traverse([d], alreadynormed)
|
|
972
|
traverse([d], alreadynormed)
|
|
973
|
|
|
973
|
|
|
974
|
for s in subrepos:
|
|
974
|
for s in subrepos:
|
|
975
|
del results[s]
|
|
975
|
del results[s]
|
|
976
|
del results['.hg']
|
|
976
|
del results['.hg']
|
|
977
|
|
|
977
|
|
|
978
|
# step 3: visit remaining files from dmap
|
|
978
|
# step 3: visit remaining files from dmap
|
|
979
|
if not skipstep3 and not exact:
|
|
979
|
if not skipstep3 and not exact:
|
|
980
|
# If a dmap file is not in results yet, it was either
|
|
980
|
# If a dmap file is not in results yet, it was either
|
|
981
|
# a) not matching matchfn b) ignored, c) missing, or d) under a
|
|
981
|
# a) not matching matchfn b) ignored, c) missing, or d) under a
|
|
982
|
# symlink directory.
|
|
982
|
# symlink directory.
|
|
983
|
if not results and matchalways:
|
|
983
|
if not results and matchalways:
|
|
984
|
visit = [f for f in dmap]
|
|
984
|
visit = [f for f in dmap]
|
|
985
|
else:
|
|
985
|
else:
|
|
986
|
visit = [f for f in dmap if f not in results and matchfn(f)]
|
|
986
|
visit = [f for f in dmap if f not in results and matchfn(f)]
|
|
987
|
visit.sort()
|
|
987
|
visit.sort()
|
|
988
|
|
|
988
|
|
|
989
|
if unknown:
|
|
989
|
if unknown:
|
|
990
|
# unknown == True means we walked all dirs under the roots
|
|
990
|
# unknown == True means we walked all dirs under the roots
|
|
991
|
# that wasn't ignored, and everything that matched was stat'ed
|
|
991
|
# that wasn't ignored, and everything that matched was stat'ed
|
|
992
|
# and is already in results.
|
|
992
|
# and is already in results.
|
|
993
|
# The rest must thus be ignored or under a symlink.
|
|
993
|
# The rest must thus be ignored or under a symlink.
|
|
994
|
audit_path = pathutil.pathauditor(self._root, cached=True)
|
|
994
|
audit_path = pathutil.pathauditor(self._root, cached=True)
|
|
995
|
|
|
995
|
|
|
996
|
for nf in iter(visit):
|
|
996
|
for nf in iter(visit):
|
|
997
|
# If a stat for the same file was already added with a
|
|
997
|
# If a stat for the same file was already added with a
|
|
998
|
# different case, don't add one for this, since that would
|
|
998
|
# different case, don't add one for this, since that would
|
|
999
|
# make it appear as if the file exists under both names
|
|
999
|
# make it appear as if the file exists under both names
|
|
1000
|
# on disk.
|
|
1000
|
# on disk.
|
|
1001
|
if (normalizefile and
|
|
1001
|
if (normalizefile and
|
|
1002
|
normalizefile(nf, True, True) in results):
|
|
1002
|
normalizefile(nf, True, True) in results):
|
|
1003
|
results[nf] = None
|
|
1003
|
results[nf] = None
|
|
1004
|
# Report ignored items in the dmap as long as they are not
|
|
1004
|
# Report ignored items in the dmap as long as they are not
|
|
1005
|
# under a symlink directory.
|
|
1005
|
# under a symlink directory.
|
|
1006
|
elif audit_path.check(nf):
|
|
1006
|
elif audit_path.check(nf):
|
|
1007
|
try:
|
|
1007
|
try:
|
|
1008
|
results[nf] = lstat(join(nf))
|
|
1008
|
results[nf] = lstat(join(nf))
|
|
1009
|
# file was just ignored, no links, and exists
|
|
1009
|
# file was just ignored, no links, and exists
|
|
1010
|
except OSError:
|
|
1010
|
except OSError:
|
|
1011
|
# file doesn't exist
|
|
1011
|
# file doesn't exist
|
|
1012
|
results[nf] = None
|
|
1012
|
results[nf] = None
|
|
1013
|
else:
|
|
1013
|
else:
|
|
1014
|
# It's either missing or under a symlink directory
|
|
1014
|
# It's either missing or under a symlink directory
|
|
1015
|
# which we in this case report as missing
|
|
1015
|
# which we in this case report as missing
|
|
1016
|
results[nf] = None
|
|
1016
|
results[nf] = None
|
|
1017
|
else:
|
|
1017
|
else:
|
|
1018
|
# We may not have walked the full directory tree above,
|
|
1018
|
# We may not have walked the full directory tree above,
|
|
1019
|
# so stat and check everything we missed.
|
|
1019
|
# so stat and check everything we missed.
|
|
1020
|
iv = iter(visit)
|
|
1020
|
iv = iter(visit)
|
|
1021
|
for st in util.statfiles([join(i) for i in visit]):
|
|
1021
|
for st in util.statfiles([join(i) for i in visit]):
|
|
1022
|
results[next(iv)] = st
|
|
1022
|
results[next(iv)] = st
|
|
1023
|
return results
|
|
1023
|
return results
|
|
1024
|
|
|
1024
|
|
|
1025
|
def status(self, match, subrepos, ignored, clean, unknown):
|
|
1025
|
def status(self, match, subrepos, ignored, clean, unknown):
|
|
1026
|
'''Determine the status of the working copy relative to the
|
|
1026
|
'''Determine the status of the working copy relative to the
|
|
1027
|
dirstate and return a pair of (unsure, status), where status is of type
|
|
1027
|
dirstate and return a pair of (unsure, status), where status is of type
|
|
1028
|
scmutil.status and:
|
|
1028
|
scmutil.status and:
|
|
1029
|
|
|
1029
|
|
|
1030
|
unsure:
|
|
1030
|
unsure:
|
|
1031
|
files that might have been modified since the dirstate was
|
|
1031
|
files that might have been modified since the dirstate was
|
|
1032
|
written, but need to be read to be sure (size is the same
|
|
1032
|
written, but need to be read to be sure (size is the same
|
|
1033
|
but mtime differs)
|
|
1033
|
but mtime differs)
|
|
1034
|
status.modified:
|
|
1034
|
status.modified:
|
|
1035
|
files that have definitely been modified since the dirstate
|
|
1035
|
files that have definitely been modified since the dirstate
|
|
1036
|
was written (different size or mode)
|
|
1036
|
was written (different size or mode)
|
|
1037
|
status.clean:
|
|
1037
|
status.clean:
|
|
1038
|
files that have definitely not been modified since the
|
|
1038
|
files that have definitely not been modified since the
|
|
1039
|
dirstate was written
|
|
1039
|
dirstate was written
|
|
1040
|
'''
|
|
1040
|
'''
|
|
1041
|
listignored, listclean, listunknown = ignored, clean, unknown
|
|
1041
|
listignored, listclean, listunknown = ignored, clean, unknown
|
|
1042
|
lookup, modified, added, unknown, ignored = [], [], [], [], []
|
|
1042
|
lookup, modified, added, unknown, ignored = [], [], [], [], []
|
|
1043
|
removed, deleted, clean = [], [], []
|
|
1043
|
removed, deleted, clean = [], [], []
|
|
1044
|
|
|
1044
|
|
|
1045
|
dmap = self._map
|
|
1045
|
dmap = self._map
|
|
1046
|
dmap.preload()
|
|
1046
|
dmap.preload()
|
|
1047
|
dcontains = dmap.__contains__
|
|
1047
|
dcontains = dmap.__contains__
|
|
1048
|
dget = dmap.__getitem__
|
|
1048
|
dget = dmap.__getitem__
|
|
1049
|
ladd = lookup.append # aka "unsure"
|
|
1049
|
ladd = lookup.append # aka "unsure"
|
|
1050
|
madd = modified.append
|
|
1050
|
madd = modified.append
|
|
1051
|
aadd = added.append
|
|
1051
|
aadd = added.append
|
|
1052
|
uadd = unknown.append
|
|
1052
|
uadd = unknown.append
|
|
1053
|
iadd = ignored.append
|
|
1053
|
iadd = ignored.append
|
|
1054
|
radd = removed.append
|
|
1054
|
radd = removed.append
|
|
1055
|
dadd = deleted.append
|
|
1055
|
dadd = deleted.append
|
|
1056
|
cadd = clean.append
|
|
1056
|
cadd = clean.append
|
|
1057
|
mexact = match.exact
|
|
1057
|
mexact = match.exact
|
|
1058
|
dirignore = self._dirignore
|
|
1058
|
dirignore = self._dirignore
|
|
1059
|
checkexec = self._checkexec
|
|
1059
|
checkexec = self._checkexec
|
|
1060
|
copymap = self._map.copymap
|
|
1060
|
copymap = self._map.copymap
|
|
1061
|
lastnormaltime = self._lastnormaltime
|
|
1061
|
lastnormaltime = self._lastnormaltime
|
|
1062
|
|
|
1062
|
|
|
1063
|
# We need to do full walks when either
|
|
1063
|
# We need to do full walks when either
|
|
1064
|
# - we're listing all clean files, or
|
|
1064
|
# - we're listing all clean files, or
|
|
1065
|
# - match.traversedir does something, because match.traversedir should
|
|
1065
|
# - match.traversedir does something, because match.traversedir should
|
|
1066
|
# be called for every dir in the working dir
|
|
1066
|
# be called for every dir in the working dir
|
|
1067
|
full = listclean or match.traversedir is not None
|
|
1067
|
full = listclean or match.traversedir is not None
|
|
1068
|
for fn, st in self.walk(match, subrepos, listunknown, listignored,
|
|
1068
|
for fn, st in self.walk(match, subrepos, listunknown, listignored,
|
|
1069
|
full=full).iteritems():
|
|
1069
|
full=full).iteritems():
|
|
1070
|
if not dcontains(fn):
|
|
1070
|
if not dcontains(fn):
|
|
1071
|
if (listignored or mexact(fn)) and dirignore(fn):
|
|
1071
|
if (listignored or mexact(fn)) and dirignore(fn):
|
|
1072
|
if listignored:
|
|
1072
|
if listignored:
|
|
1073
|
iadd(fn)
|
|
1073
|
iadd(fn)
|
|
1074
|
else:
|
|
1074
|
else:
|
|
1075
|
uadd(fn)
|
|
1075
|
uadd(fn)
|
|
1076
|
continue
|
|
1076
|
continue
|
|
1077
|
|
|
1077
|
|
|
1078
|
# This is equivalent to 'state, mode, size, time = dmap[fn]' but not
|
|
1078
|
# This is equivalent to 'state, mode, size, time = dmap[fn]' but not
|
|
1079
|
# written like that for performance reasons. dmap[fn] is not a
|
|
1079
|
# written like that for performance reasons. dmap[fn] is not a
|
|
1080
|
# Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
|
|
1080
|
# Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
|
|
1081
|
# opcode has fast paths when the value to be unpacked is a tuple or
|
|
1081
|
# opcode has fast paths when the value to be unpacked is a tuple or
|
|
1082
|
# a list, but falls back to creating a full-fledged iterator in
|
|
1082
|
# a list, but falls back to creating a full-fledged iterator in
|
|
1083
|
# general. That is much slower than simply accessing and storing the
|
|
1083
|
# general. That is much slower than simply accessing and storing the
|
|
1084
|
# tuple members one by one.
|
|
1084
|
# tuple members one by one.
|
|
1085
|
t = dget(fn)
|
|
1085
|
t = dget(fn)
|
|
1086
|
state = t[0]
|
|
1086
|
state = t[0]
|
|
1087
|
mode = t[1]
|
|
1087
|
mode = t[1]
|
|
1088
|
size = t[2]
|
|
1088
|
size = t[2]
|
|
1089
|
time = t[3]
|
|
1089
|
time = t[3]
|
|
1090
|
|
|
1090
|
|
|
1091
|
if not st and state in "nma":
|
|
1091
|
if not st and state in "nma":
|
|
1092
|
dadd(fn)
|
|
1092
|
dadd(fn)
|
|
1093
|
elif state == 'n':
|
|
1093
|
elif state == 'n':
|
|
1094
|
if (size >= 0 and
|
|
1094
|
if (size >= 0 and
|
|
1095
|
((size != st.st_size and size != st.st_size & _rangemask)
|
|
1095
|
((size != st.st_size and size != st.st_size & _rangemask)
|
|
1096
|
or ((mode ^ st.st_mode) & 0o100 and checkexec))
|
|
1096
|
or ((mode ^ st.st_mode) & 0o100 and checkexec))
|
|
1097
|
or size == -2 # other parent
|
|
1097
|
or size == -2 # other parent
|
|
1098
|
or fn in copymap):
|
|
1098
|
or fn in copymap):
|
|
1099
|
madd(fn)
|
|
1099
|
madd(fn)
|
|
1100
|
elif (time != st[stat.ST_MTIME]
|
|
1100
|
elif (time != st[stat.ST_MTIME]
|
|
1101
|
and time != st[stat.ST_MTIME] & _rangemask):
|
|
1101
|
and time != st[stat.ST_MTIME] & _rangemask):
|
|
1102
|
ladd(fn)
|
|
1102
|
ladd(fn)
|
|
1103
|
elif st[stat.ST_MTIME] == lastnormaltime:
|
|
1103
|
elif st[stat.ST_MTIME] == lastnormaltime:
|
|
1104
|
# fn may have just been marked as normal and it may have
|
|
1104
|
# fn may have just been marked as normal and it may have
|
|
1105
|
# changed in the same second without changing its size.
|
|
1105
|
# changed in the same second without changing its size.
|
|
1106
|
# This can happen if we quickly do multiple commits.
|
|
1106
|
# This can happen if we quickly do multiple commits.
|
|
1107
|
# Force lookup, so we don't miss such a racy file change.
|
|
1107
|
# Force lookup, so we don't miss such a racy file change.
|
|
1108
|
ladd(fn)
|
|
1108
|
ladd(fn)
|
|
1109
|
elif listclean:
|
|
1109
|
elif listclean:
|
|
1110
|
cadd(fn)
|
|
1110
|
cadd(fn)
|
|
1111
|
elif state == 'm':
|
|
1111
|
elif state == 'm':
|
|
1112
|
madd(fn)
|
|
1112
|
madd(fn)
|
|
1113
|
elif state == 'a':
|
|
1113
|
elif state == 'a':
|
|
1114
|
aadd(fn)
|
|
1114
|
aadd(fn)
|
|
1115
|
elif state == 'r':
|
|
1115
|
elif state == 'r':
|
|
1116
|
radd(fn)
|
|
1116
|
radd(fn)
|
|
1117
|
|
|
1117
|
|
|
1118
|
return (lookup, scmutil.status(modified, added, removed, deleted,
|
|
1118
|
return (lookup, scmutil.status(modified, added, removed, deleted,
|
|
1119
|
unknown, ignored, clean))
|
|
1119
|
unknown, ignored, clean))
|
|
1120
|
|
|
1120
|
|
|
1121
|
def matches(self, match):
|
|
1121
|
def matches(self, match):
|
|
1122
|
'''
|
|
1122
|
'''
|
|
1123
|
return files in the dirstate (in whatever state) filtered by match
|
|
1123
|
return files in the dirstate (in whatever state) filtered by match
|
|
1124
|
'''
|
|
1124
|
'''
|
|
1125
|
dmap = self._map
|
|
1125
|
dmap = self._map
|
|
1126
|
if match.always():
|
|
1126
|
if match.always():
|
|
1127
|
return dmap.keys()
|
|
1127
|
return dmap.keys()
|
|
1128
|
files = match.files()
|
|
1128
|
files = match.files()
|
|
1129
|
if match.isexact():
|
|
1129
|
if match.isexact():
|
|
1130
|
# fast path -- filter the other way around, since typically files is
|
|
1130
|
# fast path -- filter the other way around, since typically files is
|
|
1131
|
# much smaller than dmap
|
|
1131
|
# much smaller than dmap
|
|
1132
|
return [f for f in files if f in dmap]
|
|
1132
|
return [f for f in files if f in dmap]
|
|
1133
|
if match.prefix() and all(fn in dmap for fn in files):
|
|
1133
|
if match.prefix() and all(fn in dmap for fn in files):
|
|
1134
|
# fast path -- all the values are known to be files, so just return
|
|
1134
|
# fast path -- all the values are known to be files, so just return
|
|
1135
|
# that
|
|
1135
|
# that
|
|
1136
|
return list(files)
|
|
1136
|
return list(files)
|
|
1137
|
return [f for f in dmap if match(f)]
|
|
1137
|
return [f for f in dmap if match(f)]
|
|
1138
|
|
|
1138
|
|
|
1139
|
def _actualfilename(self, tr):
|
|
1139
|
def _actualfilename(self, tr):
|
|
1140
|
if tr:
|
|
1140
|
if tr:
|
|
1141
|
return self._pendingfilename
|
|
1141
|
return self._pendingfilename
|
|
1142
|
else:
|
|
1142
|
else:
|
|
1143
|
return self._filename
|
|
1143
|
return self._filename
|
|
1144
|
|
|
1144
|
|
|
1145
|
def savebackup(self, tr, backupname):
|
|
1145
|
def savebackup(self, tr, backupname):
|
|
1146
|
'''Save current dirstate into backup file'''
|
|
1146
|
'''Save current dirstate into backup file'''
|
|
1147
|
filename = self._actualfilename(tr)
|
|
1147
|
filename = self._actualfilename(tr)
|
|
1148
|
assert backupname != filename
|
|
1148
|
assert backupname != filename
|
|
1149
|
|
|
1149
|
|
|
1150
|
# use '_writedirstate' instead of 'write' to write changes certainly,
|
|
1150
|
# use '_writedirstate' instead of 'write' to write changes certainly,
|
|
1151
|
# because the latter omits writing out if transaction is running.
|
|
1151
|
# because the latter omits writing out if transaction is running.
|
|
1152
|
# output file will be used to create backup of dirstate at this point.
|
|
1152
|
# output file will be used to create backup of dirstate at this point.
|
|
1153
|
if self._dirty or not self._opener.exists(filename):
|
|
1153
|
if self._dirty or not self._opener.exists(filename):
|
|
1154
|
self._writedirstate(self._opener(filename, "w", atomictemp=True,
|
|
1154
|
self._writedirstate(self._opener(filename, "w", atomictemp=True,
|
|
1155
|
checkambig=True))
|
|
1155
|
checkambig=True))
|
|
1156
|
|
|
1156
|
|
|
1157
|
if tr:
|
|
1157
|
if tr:
|
|
1158
|
# ensure that subsequent tr.writepending returns True for
|
|
1158
|
# ensure that subsequent tr.writepending returns True for
|
|
1159
|
# changes written out above, even if dirstate is never
|
|
1159
|
# changes written out above, even if dirstate is never
|
|
1160
|
# changed after this
|
|
1160
|
# changed after this
|
|
1161
|
tr.addfilegenerator('dirstate', (self._filename,),
|
|
1161
|
tr.addfilegenerator('dirstate', (self._filename,),
|
|
1162
|
self._writedirstate, location='plain')
|
|
1162
|
self._writedirstate, location='plain')
|
|
1163
|
|
|
1163
|
|
|
1164
|
# ensure that pending file written above is unlinked at
|
|
1164
|
# ensure that pending file written above is unlinked at
|
|
1165
|
# failure, even if tr.writepending isn't invoked until the
|
|
1165
|
# failure, even if tr.writepending isn't invoked until the
|
|
1166
|
# end of this transaction
|
|
1166
|
# end of this transaction
|
|
1167
|
tr.registertmp(filename, location='plain')
|
|
1167
|
tr.registertmp(filename, location='plain')
|
|
1168
|
|
|
1168
|
|
|
1169
|
self._opener.tryunlink(backupname)
|
|
1169
|
self._opener.tryunlink(backupname)
|
|
1170
|
# hardlink backup is okay because _writedirstate is always called
|
|
1170
|
# hardlink backup is okay because _writedirstate is always called
|
|
1171
|
# with an "atomictemp=True" file.
|
|
1171
|
# with an "atomictemp=True" file.
|
|
1172
|
util.copyfile(self._opener.join(filename),
|
|
1172
|
util.copyfile(self._opener.join(filename),
|
|
1173
|
self._opener.join(backupname), hardlink=True)
|
|
1173
|
self._opener.join(backupname), hardlink=True)
|
|
1174
|
|
|
1174
|
|
|
1175
|
def restorebackup(self, tr, backupname):
|
|
1175
|
def restorebackup(self, tr, backupname):
|
|
1176
|
'''Restore dirstate by backup file'''
|
|
1176
|
'''Restore dirstate by backup file'''
|
|
1177
|
# this "invalidate()" prevents "wlock.release()" from writing
|
|
1177
|
# this "invalidate()" prevents "wlock.release()" from writing
|
|
1178
|
# changes of dirstate out after restoring from backup file
|
|
1178
|
# changes of dirstate out after restoring from backup file
|
|
1179
|
self.invalidate()
|
|
1179
|
self.invalidate()
|
|
1180
|
filename = self._actualfilename(tr)
|
|
1180
|
filename = self._actualfilename(tr)
|
|
1181
|
o = self._opener
|
|
1181
|
o = self._opener
|
|
1182
|
if util.samefile(o.join(backupname), o.join(filename)):
|
|
1182
|
if util.samefile(o.join(backupname), o.join(filename)):
|
|
1183
|
o.unlink(backupname)
|
|
1183
|
o.unlink(backupname)
|
|
1184
|
else:
|
|
1184
|
else:
|
|
1185
|
o.rename(backupname, filename, checkambig=True)
|
|
1185
|
o.rename(backupname, filename, checkambig=True)
|
|
1186
|
|
|
1186
|
|
|
1187
|
def clearbackup(self, tr, backupname):
|
|
1187
|
def clearbackup(self, tr, backupname):
|
|
1188
|
'''Clear backup file'''
|
|
1188
|
'''Clear backup file'''
|
|
1189
|
self._opener.unlink(backupname)
|
|
1189
|
self._opener.unlink(backupname)
|
|
1190
|
|
|
1190
|
|
|
1191
|
class dirstatemap(object):
|
|
1191
|
class dirstatemap(object):
|
|
1192
|
"""Map encapsulating the dirstate's contents.
|
|
1192
|
"""Map encapsulating the dirstate's contents.
|
|
1193
|
|
|
1193
|
|
|
1194
|
The dirstate contains the following state:
|
|
1194
|
The dirstate contains the following state:
|
|
1195
|
|
|
1195
|
|
|
1196
|
- `identity` is the identity of the dirstate file, which can be used to
|
|
1196
|
- `identity` is the identity of the dirstate file, which can be used to
|
|
1197
|
detect when changes have occurred to the dirstate file.
|
|
1197
|
detect when changes have occurred to the dirstate file.
|
|
1198
|
|
|
1198
|
|
|
1199
|
- `parents` is a pair containing the parents of the working copy. The
|
|
1199
|
- `parents` is a pair containing the parents of the working copy. The
|
|
1200
|
parents are updated by calling `setparents`.
|
|
1200
|
parents are updated by calling `setparents`.
|
|
1201
|
|
|
1201
|
|
|
1202
|
- the state map maps filenames to tuples of (state, mode, size, mtime),
|
|
1202
|
- the state map maps filenames to tuples of (state, mode, size, mtime),
|
|
1203
|
where state is a single character representing 'normal', 'added',
|
|
1203
|
where state is a single character representing 'normal', 'added',
|
|
1204
|
'removed', or 'merged'. It is read by treating the dirstate as a
|
|
1204
|
'removed', or 'merged'. It is read by treating the dirstate as a
|
|
1205
|
dict. File state is updated by calling the `addfile`, `removefile` and
|
|
1205
|
dict. File state is updated by calling the `addfile`, `removefile` and
|
|
1206
|
`dropfile` methods.
|
|
1206
|
`dropfile` methods.
|
|
1207
|
|
|
1207
|
|
|
1208
|
- `copymap` maps destination filenames to their source filename.
|
|
1208
|
- `copymap` maps destination filenames to their source filename.
|
|
1209
|
|
|
1209
|
|
|
1210
|
The dirstate also provides the following views onto the state:
|
|
1210
|
The dirstate also provides the following views onto the state:
|
|
1211
|
|
|
1211
|
|
|
1212
|
- `nonnormalset` is a set of the filenames that have state other
|
|
1212
|
- `nonnormalset` is a set of the filenames that have state other
|
|
1213
|
than 'normal', or are normal but have an mtime of -1 ('normallookup').
|
|
1213
|
than 'normal', or are normal but have an mtime of -1 ('normallookup').
|
|
1214
|
|
|
1214
|
|
|
1215
|
- `otherparentset` is a set of the filenames that are marked as coming
|
|
1215
|
- `otherparentset` is a set of the filenames that are marked as coming
|
|
1216
|
from the second parent when the dirstate is currently being merged.
|
|
1216
|
from the second parent when the dirstate is currently being merged.
|
|
1217
|
|
|
1217
|
|
|
1218
|
- `filefoldmap` is a dict mapping normalized filenames to the denormalized
|
|
1218
|
- `filefoldmap` is a dict mapping normalized filenames to the denormalized
|
|
1219
|
form that they appear as in the dirstate.
|
|
1219
|
form that they appear as in the dirstate.
|
|
1220
|
|
|
1220
|
|
|
1221
|
- `dirfoldmap` is a dict mapping normalized directory names to the
|
|
1221
|
- `dirfoldmap` is a dict mapping normalized directory names to the
|
|
1222
|
denormalized form that they appear as in the dirstate.
|
|
1222
|
denormalized form that they appear as in the dirstate.
|
|
1223
|
"""
|
|
1223
|
"""
|
|
1224
|
|
|
1224
|
|
|
1225
|
def __init__(self, ui, opener, root):
|
|
1225
|
def __init__(self, ui, opener, root):
|
|
1226
|
self._ui = ui
|
|
1226
|
self._ui = ui
|
|
1227
|
self._opener = opener
|
|
1227
|
self._opener = opener
|
|
1228
|
self._root = root
|
|
1228
|
self._root = root
|
|
1229
|
self._filename = 'dirstate'
|
|
1229
|
self._filename = 'dirstate'
|
|
1230
|
|
|
1230
|
|
|
1231
|
self._parents = None
|
|
1231
|
self._parents = None
|
|
1232
|
self._dirtyparents = False
|
|
1232
|
self._dirtyparents = False
|
|
1233
|
|
|
1233
|
|
|
1234
|
# for consistent view between _pl() and _read() invocations
|
|
1234
|
# for consistent view between _pl() and _read() invocations
|
|
1235
|
self._pendingmode = None
|
|
1235
|
self._pendingmode = None
|
|
1236
|
|
|
1236
|
|
|
1237
|
@propertycache
|
|
1237
|
@propertycache
|
|
1238
|
def _map(self):
|
|
1238
|
def _map(self):
|
|
1239
|
self._map = {}
|
|
1239
|
self._map = {}
|
|
1240
|
self.read()
|
|
1240
|
self.read()
|
|
1241
|
return self._map
|
|
1241
|
return self._map
|
|
1242
|
|
|
1242
|
|
|
1243
|
@propertycache
|
|
1243
|
@propertycache
|
|
1244
|
def copymap(self):
|
|
1244
|
def copymap(self):
|
|
1245
|
self.copymap = {}
|
|
1245
|
self.copymap = {}
|
|
1246
|
self._map
|
|
1246
|
self._map
|
|
1247
|
return self.copymap
|
|
1247
|
return self.copymap
|
|
1248
|
|
|
1248
|
|
|
1249
|
def clear(self):
|
|
1249
|
def clear(self):
|
|
1250
|
self._map.clear()
|
|
1250
|
self._map.clear()
|
|
1251
|
self.copymap.clear()
|
|
1251
|
self.copymap.clear()
|
|
1252
|
self.setparents(nullid, nullid)
|
|
1252
|
self.setparents(nullid, nullid)
|
|
1253
|
util.clearcachedproperty(self, "_dirs")
|
|
1253
|
util.clearcachedproperty(self, "_dirs")
|
|
1254
|
util.clearcachedproperty(self, "_alldirs")
|
|
1254
|
util.clearcachedproperty(self, "_alldirs")
|
|
1255
|
util.clearcachedproperty(self, "filefoldmap")
|
|
1255
|
util.clearcachedproperty(self, "filefoldmap")
|
|
1256
|
util.clearcachedproperty(self, "dirfoldmap")
|
|
1256
|
util.clearcachedproperty(self, "dirfoldmap")
|
|
1257
|
util.clearcachedproperty(self, "nonnormalset")
|
|
1257
|
util.clearcachedproperty(self, "nonnormalset")
|
|
1258
|
util.clearcachedproperty(self, "otherparentset")
|
|
1258
|
util.clearcachedproperty(self, "otherparentset")
|
|
1259
|
|
|
1259
|
|
|
1260
|
def items(self):
|
|
1260
|
def items(self):
|
|
1261
|
return self._map.iteritems()
|
|
1261
|
return self._map.iteritems()
|
|
1262
|
|
|
1262
|
|
|
1263
|
# forward for python2,3 compat
|
|
1263
|
# forward for python2,3 compat
|
|
1264
|
iteritems = items
|
|
1264
|
iteritems = items
|
|
1265
|
|
|
1265
|
|
|
1266
|
def __len__(self):
|
|
1266
|
def __len__(self):
|
|
1267
|
return len(self._map)
|
|
1267
|
return len(self._map)
|
|
1268
|
|
|
1268
|
|
|
1269
|
def __iter__(self):
|
|
1269
|
def __iter__(self):
|
|
1270
|
return iter(self._map)
|
|
1270
|
return iter(self._map)
|
|
1271
|
|
|
1271
|
|
|
1272
|
def get(self, key, default=None):
|
|
1272
|
def get(self, key, default=None):
|
|
1273
|
return self._map.get(key, default)
|
|
1273
|
return self._map.get(key, default)
|
|
1274
|
|
|
1274
|
|
|
1275
|
def __contains__(self, key):
|
|
1275
|
def __contains__(self, key):
|
|
1276
|
return key in self._map
|
|
1276
|
return key in self._map
|
|
1277
|
|
|
1277
|
|
|
1278
|
def __getitem__(self, key):
|
|
1278
|
def __getitem__(self, key):
|
|
1279
|
return self._map[key]
|
|
1279
|
return self._map[key]
|
|
1280
|
|
|
1280
|
|
|
1281
|
def keys(self):
|
|
1281
|
def keys(self):
|
|
1282
|
return self._map.keys()
|
|
1282
|
return self._map.keys()
|
|
1283
|
|
|
1283
|
|
|
1284
|
def preload(self):
|
|
1284
|
def preload(self):
|
|
1285
|
"""Loads the underlying data, if it's not already loaded"""
|
|
1285
|
"""Loads the underlying data, if it's not already loaded"""
|
|
1286
|
self._map
|
|
1286
|
self._map
|
|
1287
|
|
|
1287
|
|
|
1288
|
def addfile(self, f, oldstate, state, mode, size, mtime):
|
|
1288
|
def addfile(self, f, oldstate, state, mode, size, mtime):
|
|
1289
|
"""Add a tracked file to the dirstate."""
|
|
1289
|
"""Add a tracked file to the dirstate."""
|
|
1290
|
if oldstate in "?r" and r"_dirs" in self.__dict__:
|
|
1290
|
if oldstate in "?r" and r"_dirs" in self.__dict__:
|
|
1291
|
self._dirs.addpath(f)
|
|
1291
|
self._dirs.addpath(f)
|
|
1292
|
if oldstate == "?" and r"_alldirs" in self.__dict__:
|
|
1292
|
if oldstate == "?" and r"_alldirs" in self.__dict__:
|
|
1293
|
self._alldirs.addpath(f)
|
|
1293
|
self._alldirs.addpath(f)
|
|
1294
|
self._map[f] = dirstatetuple(state, mode, size, mtime)
|
|
1294
|
self._map[f] = dirstatetuple(state, mode, size, mtime)
|
|
1295
|
if state != 'n' or mtime == -1:
|
|
1295
|
if state != 'n' or mtime == -1:
|
|
1296
|
self.nonnormalset.add(f)
|
|
1296
|
self.nonnormalset.add(f)
|
|
1297
|
if size == -2:
|
|
1297
|
if size == -2:
|
|
1298
|
self.otherparentset.add(f)
|
|
1298
|
self.otherparentset.add(f)
|
|
1299
|
|
|
1299
|
|
|
1300
|
def removefile(self, f, oldstate, size):
|
|
1300
|
def removefile(self, f, oldstate, size):
|
|
1301
|
"""
|
|
1301
|
"""
|
|
1302
|
Mark a file as removed in the dirstate.
|
|
1302
|
Mark a file as removed in the dirstate.
|
|
1303
|
|
|
1303
|
|
|
1304
|
The `size` parameter is used to store sentinel values that indicate
|
|
1304
|
The `size` parameter is used to store sentinel values that indicate
|
|
1305
|
the file's previous state. In the future, we should refactor this
|
|
1305
|
the file's previous state. In the future, we should refactor this
|
|
1306
|
to be more explicit about what that state is.
|
|
1306
|
to be more explicit about what that state is.
|
|
1307
|
"""
|
|
1307
|
"""
|
|
1308
|
if oldstate not in "?r" and r"_dirs" in self.__dict__:
|
|
1308
|
if oldstate not in "?r" and r"_dirs" in self.__dict__:
|
|
1309
|
self._dirs.delpath(f)
|
|
1309
|
self._dirs.delpath(f)
|
|
1310
|
if oldstate == "?" and r"_alldirs" in self.__dict__:
|
|
1310
|
if oldstate == "?" and r"_alldirs" in self.__dict__:
|
|
1311
|
self._alldirs.addpath(f)
|
|
1311
|
self._alldirs.addpath(f)
|
|
1312
|
if r"filefoldmap" in self.__dict__:
|
|
1312
|
if r"filefoldmap" in self.__dict__:
|
|
1313
|
normed = util.normcase(f)
|
|
1313
|
normed = util.normcase(f)
|
|
1314
|
self.filefoldmap.pop(normed, None)
|
|
1314
|
self.filefoldmap.pop(normed, None)
|
|
1315
|
self._map[f] = dirstatetuple('r', 0, size, 0)
|
|
1315
|
self._map[f] = dirstatetuple('r', 0, size, 0)
|
|
1316
|
self.nonnormalset.add(f)
|
|
1316
|
self.nonnormalset.add(f)
|
|
1317
|
|
|
1317
|
|
|
1318
|
def dropfile(self, f, oldstate):
|
|
1318
|
def dropfile(self, f, oldstate):
|
|
1319
|
"""
|
|
1319
|
"""
|
|
1320
|
Remove a file from the dirstate. Returns True if the file was
|
|
1320
|
Remove a file from the dirstate. Returns True if the file was
|
|
1321
|
previously recorded.
|
|
1321
|
previously recorded.
|
|
1322
|
"""
|
|
1322
|
"""
|
|
1323
|
exists = self._map.pop(f, None) is not None
|
|
1323
|
exists = self._map.pop(f, None) is not None
|
|
1324
|
if exists:
|
|
1324
|
if exists:
|
|
1325
|
if oldstate != "r" and r"_dirs" in self.__dict__:
|
|
1325
|
if oldstate != "r" and r"_dirs" in self.__dict__:
|
|
1326
|
self._dirs.delpath(f)
|
|
1326
|
self._dirs.delpath(f)
|
|
1327
|
if r"_alldirs" in self.__dict__:
|
|
1327
|
if r"_alldirs" in self.__dict__:
|
|
1328
|
self._alldirs.delpath(f)
|
|
1328
|
self._alldirs.delpath(f)
|
|
1329
|
if r"filefoldmap" in self.__dict__:
|
|
1329
|
if r"filefoldmap" in self.__dict__:
|
|
1330
|
normed = util.normcase(f)
|
|
1330
|
normed = util.normcase(f)
|
|
1331
|
self.filefoldmap.pop(normed, None)
|
|
1331
|
self.filefoldmap.pop(normed, None)
|
|
1332
|
self.nonnormalset.discard(f)
|
|
1332
|
self.nonnormalset.discard(f)
|
|
1333
|
return exists
|
|
1333
|
return exists
|
|
1334
|
|
|
1334
|
|
|
1335
|
def clearambiguoustimes(self, files, now):
|
|
1335
|
def clearambiguoustimes(self, files, now):
|
|
1336
|
for f in files:
|
|
1336
|
for f in files:
|
|
1337
|
e = self.get(f)
|
|
1337
|
e = self.get(f)
|
|
1338
|
if e is not None and e[0] == 'n' and e[3] == now:
|
|
1338
|
if e is not None and e[0] == 'n' and e[3] == now:
|
|
1339
|
self._map[f] = dirstatetuple(e[0], e[1], e[2], -1)
|
|
1339
|
self._map[f] = dirstatetuple(e[0], e[1], e[2], -1)
|
|
1340
|
self.nonnormalset.add(f)
|
|
1340
|
self.nonnormalset.add(f)
|
|
1341
|
|
|
1341
|
|
|
1342
|
def nonnormalentries(self):
|
|
1342
|
def nonnormalentries(self):
|
|
1343
|
'''Compute the nonnormal dirstate entries from the dmap'''
|
|
1343
|
'''Compute the nonnormal dirstate entries from the dmap'''
|
|
1344
|
try:
|
|
1344
|
try:
|
|
1345
|
return parsers.nonnormalotherparententries(self._map)
|
|
1345
|
return parsers.nonnormalotherparententries(self._map)
|
|
1346
|
except AttributeError:
|
|
1346
|
except AttributeError:
|
|
1347
|
nonnorm = set()
|
|
1347
|
nonnorm = set()
|
|
1348
|
otherparent = set()
|
|
1348
|
otherparent = set()
|
|
1349
|
for fname, e in self._map.iteritems():
|
|
1349
|
for fname, e in self._map.iteritems():
|
|
1350
|
if e[0] != 'n' or e[3] == -1:
|
|
1350
|
if e[0] != 'n' or e[3] == -1:
|
|
1351
|
nonnorm.add(fname)
|
|
1351
|
nonnorm.add(fname)
|
|
1352
|
if e[0] == 'n' and e[2] == -2:
|
|
1352
|
if e[0] == 'n' and e[2] == -2:
|
|
1353
|
otherparent.add(fname)
|
|
1353
|
otherparent.add(fname)
|
|
1354
|
return nonnorm, otherparent
|
|
1354
|
return nonnorm, otherparent
|
|
1355
|
|
|
1355
|
|
|
1356
|
@propertycache
|
|
1356
|
@propertycache
|
|
1357
|
def filefoldmap(self):
|
|
1357
|
def filefoldmap(self):
|
|
1358
|
"""Returns a dictionary mapping normalized case paths to their
|
|
1358
|
"""Returns a dictionary mapping normalized case paths to their
|
|
1359
|
non-normalized versions.
|
|
1359
|
non-normalized versions.
|
|
1360
|
"""
|
|
1360
|
"""
|
|
1361
|
try:
|
|
1361
|
try:
|
|
1362
|
makefilefoldmap = parsers.make_file_foldmap
|
|
1362
|
makefilefoldmap = parsers.make_file_foldmap
|
|
1363
|
except AttributeError:
|
|
1363
|
except AttributeError:
|
|
1364
|
pass
|
|
1364
|
pass
|
|
1365
|
else:
|
|
1365
|
else:
|
|
1366
|
return makefilefoldmap(self._map, util.normcasespec,
|
|
1366
|
return makefilefoldmap(self._map, util.normcasespec,
|
|
1367
|
util.normcasefallback)
|
|
1367
|
util.normcasefallback)
|
|
1368
|
|
|
1368
|
|
|
1369
|
f = {}
|
|
1369
|
f = {}
|
|
1370
|
normcase = util.normcase
|
|
1370
|
normcase = util.normcase
|
|
1371
|
for name, s in self._map.iteritems():
|
|
1371
|
for name, s in self._map.iteritems():
|
|
1372
|
if s[0] != 'r':
|
|
1372
|
if s[0] != 'r':
|
|
1373
|
f[normcase(name)] = name
|
|
1373
|
f[normcase(name)] = name
|
|
1374
|
f['.'] = '.' # prevents useless util.fspath() invocation
|
|
1374
|
f['.'] = '.' # prevents useless util.fspath() invocation
|
|
1375
|
return f
|
|
1375
|
return f
|
|
1376
|
|
|
1376
|
|
|
1377
|
def hastrackeddir(self, d):
|
|
1377
|
def hastrackeddir(self, d):
|
|
1378
|
"""
|
|
1378
|
"""
|
|
1379
|
Returns True if the dirstate contains a tracked (not removed) file
|
|
1379
|
Returns True if the dirstate contains a tracked (not removed) file
|
|
1380
|
in this directory.
|
|
1380
|
in this directory.
|
|
1381
|
"""
|
|
1381
|
"""
|
|
1382
|
return d in self._dirs
|
|
1382
|
return d in self._dirs
|
|
1383
|
|
|
1383
|
|
|
1384
|
def hasdir(self, d):
|
|
1384
|
def hasdir(self, d):
|
|
1385
|
"""
|
|
1385
|
"""
|
|
1386
|
Returns True if the dirstate contains a file (tracked or removed)
|
|
1386
|
Returns True if the dirstate contains a file (tracked or removed)
|
|
1387
|
in this directory.
|
|
1387
|
in this directory.
|
|
1388
|
"""
|
|
1388
|
"""
|
|
1389
|
return d in self._alldirs
|
|
1389
|
return d in self._alldirs
|
|
1390
|
|
|
1390
|
|
|
1391
|
@propertycache
|
|
1391
|
@propertycache
|
|
1392
|
def _dirs(self):
|
|
1392
|
def _dirs(self):
|
|
1393
|
return util.dirs(self._map, 'r')
|
|
1393
|
return util.dirs(self._map, 'r')
|
|
1394
|
|
|
1394
|
|
|
1395
|
@propertycache
|
|
1395
|
@propertycache
|
|
1396
|
def _alldirs(self):
|
|
1396
|
def _alldirs(self):
|
|
1397
|
return util.dirs(self._map)
|
|
1397
|
return util.dirs(self._map)
|
|
1398
|
|
|
1398
|
|
|
1399
|
def _opendirstatefile(self):
|
|
1399
|
def _opendirstatefile(self):
|
|
1400
|
fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
|
|
1400
|
fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
|
|
1401
|
if self._pendingmode is not None and self._pendingmode != mode:
|
|
1401
|
if self._pendingmode is not None and self._pendingmode != mode:
|
|
1402
|
fp.close()
|
|
1402
|
fp.close()
|
|
1403
|
raise error.Abort(_('working directory state may be '
|
|
1403
|
raise error.Abort(_('working directory state may be '
|
|
1404
|
'changed parallelly'))
|
|
1404
|
'changed parallelly'))
|
|
1405
|
self._pendingmode = mode
|
|
1405
|
self._pendingmode = mode
|
|
1406
|
return fp
|
|
1406
|
return fp
|
|
1407
|
|
|
1407
|
|
|
1408
|
def parents(self):
|
|
1408
|
def parents(self):
|
|
1409
|
if not self._parents:
|
|
1409
|
if not self._parents:
|
|
1410
|
try:
|
|
1410
|
try:
|
|
1411
|
fp = self._opendirstatefile()
|
|
1411
|
fp = self._opendirstatefile()
|
|
1412
|
st = fp.read(40)
|
|
1412
|
st = fp.read(40)
|
|
1413
|
fp.close()
|
|
1413
|
fp.close()
|
|
1414
|
except IOError as err:
|
|
1414
|
except IOError as err:
|
|
1415
|
if err.errno != errno.ENOENT:
|
|
1415
|
if err.errno != errno.ENOENT:
|
|
1416
|
raise
|
|
1416
|
raise
|
|
1417
|
# File doesn't exist, so the current state is empty
|
|
1417
|
# File doesn't exist, so the current state is empty
|
|
1418
|
st = ''
|
|
1418
|
st = ''
|
|
1419
|
|
|
1419
|
|
|
1420
|
l = len(st)
|
|
1420
|
l = len(st)
|
|
1421
|
if l == 40:
|
|
1421
|
if l == 40:
|
|
1422
|
self._parents = (st[:20], st[20:40])
|
|
1422
|
self._parents = (st[:20], st[20:40])
|
|
1423
|
elif l == 0:
|
|
1423
|
elif l == 0:
|
|
1424
|
self._parents = (nullid, nullid)
|
|
1424
|
self._parents = (nullid, nullid)
|
|
1425
|
else:
|
|
1425
|
else:
|
|
1426
|
raise error.Abort(_('working directory state appears '
|
|
1426
|
raise error.Abort(_('working directory state appears '
|
|
1427
|
'damaged!'))
|
|
1427
|
'damaged!'))
|
|
1428
|
|
|
1428
|
|
|
1429
|
return self._parents
|
|
1429
|
return self._parents
|
|
1430
|
|
|
1430
|
|
|
1431
|
def setparents(self, p1, p2):
|
|
1431
|
def setparents(self, p1, p2):
|
|
1432
|
self._parents = (p1, p2)
|
|
1432
|
self._parents = (p1, p2)
|
|
1433
|
self._dirtyparents = True
|
|
1433
|
self._dirtyparents = True
|
|
1434
|
|
|
1434
|
|
|
1435
|
def read(self):
|
|
1435
|
def read(self):
|
|
1436
|
# ignore HG_PENDING because identity is used only for writing
|
|
1436
|
# ignore HG_PENDING because identity is used only for writing
|
|
1437
|
self.identity = util.filestat.frompath(
|
|
1437
|
self.identity = util.filestat.frompath(
|
|
1438
|
self._opener.join(self._filename))
|
|
1438
|
self._opener.join(self._filename))
|
|
1439
|
|
|
1439
|
|
|
1440
|
try:
|
|
1440
|
try:
|
|
1441
|
fp = self._opendirstatefile()
|
|
1441
|
fp = self._opendirstatefile()
|
|
1442
|
try:
|
|
1442
|
try:
|
|
1443
|
st = fp.read()
|
|
1443
|
st = fp.read()
|
|
1444
|
finally:
|
|
1444
|
finally:
|
|
1445
|
fp.close()
|
|
1445
|
fp.close()
|
|
1446
|
except IOError as err:
|
|
1446
|
except IOError as err:
|
|
1447
|
if err.errno != errno.ENOENT:
|
|
1447
|
if err.errno != errno.ENOENT:
|
|
1448
|
raise
|
|
1448
|
raise
|
|
1449
|
return
|
|
1449
|
return
|
|
1450
|
if not st:
|
|
1450
|
if not st:
|
|
1451
|
return
|
|
1451
|
return
|
|
1452
|
|
|
1452
|
|
|
1453
|
if util.safehasattr(parsers, 'dict_new_presized'):
|
|
1453
|
if util.safehasattr(parsers, 'dict_new_presized'):
|
|
1454
|
# Make an estimate of the number of files in the dirstate based on
|
|
1454
|
# Make an estimate of the number of files in the dirstate based on
|
|
1455
|
# its size. From a linear regression on a set of real-world repos,
|
|
1455
|
# its size. From a linear regression on a set of real-world repos,
|
|
1456
|
# all over 10,000 files, the size of a dirstate entry is 85
|
|
1456
|
# all over 10,000 files, the size of a dirstate entry is 85
|
|
1457
|
# bytes. The cost of resizing is significantly higher than the cost
|
|
1457
|
# bytes. The cost of resizing is significantly higher than the cost
|
|
1458
|
# of filling in a larger presized dict, so subtract 20% from the
|
|
1458
|
# of filling in a larger presized dict, so subtract 20% from the
|
|
1459
|
# size.
|
|
1459
|
# size.
|
|
1460
|
#
|
|
1460
|
#
|
|
1461
|
# This heuristic is imperfect in many ways, so in a future dirstate
|
|
1461
|
# This heuristic is imperfect in many ways, so in a future dirstate
|
|
1462
|
# format update it makes sense to just record the number of entries
|
|
1462
|
# format update it makes sense to just record the number of entries
|
|
1463
|
# on write.
|
|
1463
|
# on write.
|
|
1464
|
self._map = parsers.dict_new_presized(len(st) // 71)
|
|
1464
|
self._map = parsers.dict_new_presized(len(st) // 71)
|
|
1465
|
|
|
1465
|
|
|
1466
|
# Python's garbage collector triggers a GC each time a certain number
|
|
1466
|
# Python's garbage collector triggers a GC each time a certain number
|
|
1467
|
# of container objects (the number being defined by
|
|
1467
|
# of container objects (the number being defined by
|
|
1468
|
# gc.get_threshold()) are allocated. parse_dirstate creates a tuple
|
|
1468
|
# gc.get_threshold()) are allocated. parse_dirstate creates a tuple
|
|
1469
|
# for each file in the dirstate. The C version then immediately marks
|
|
1469
|
# for each file in the dirstate. The C version then immediately marks
|
|
1470
|
# them as not to be tracked by the collector. However, this has no
|
|
1470
|
# them as not to be tracked by the collector. However, this has no
|
|
1471
|
# effect on when GCs are triggered, only on what objects the GC looks
|
|
1471
|
# effect on when GCs are triggered, only on what objects the GC looks
|
|
1472
|
# into. This means that O(number of files) GCs are unavoidable.
|
|
1472
|
# into. This means that O(number of files) GCs are unavoidable.
|
|
1473
|
# Depending on when in the process's lifetime the dirstate is parsed,
|
|
1473
|
# Depending on when in the process's lifetime the dirstate is parsed,
|
|
1474
|
# this can get very expensive. As a workaround, disable GC while
|
|
1474
|
# this can get very expensive. As a workaround, disable GC while
|
|
1475
|
# parsing the dirstate.
|
|
1475
|
# parsing the dirstate.
|
|
1476
|
#
|
|
1476
|
#
|
|
1477
|
# (we cannot decorate the function directly since it is in a C module)
|
|
1477
|
# (we cannot decorate the function directly since it is in a C module)
|
|
1478
|
parse_dirstate = util.nogc(dirstatemod.parse_dirstate)
|
|
1478
|
parse_dirstate = util.nogc(parsers.parse_dirstate)
|
|
1479
|
p = parse_dirstate(self._map, self.copymap, st)
|
|
1479
|
p = parse_dirstate(self._map, self.copymap, st)
|
|
1480
|
if not self._dirtyparents:
|
|
1480
|
if not self._dirtyparents:
|
|
1481
|
self.setparents(*p)
|
|
1481
|
self.setparents(*p)
|
|
1482
|
|
|
1482
|
|
|
1483
|
# Avoid excess attribute lookups by fast pathing certain checks
|
|
1483
|
# Avoid excess attribute lookups by fast pathing certain checks
|
|
1484
|
self.__contains__ = self._map.__contains__
|
|
1484
|
self.__contains__ = self._map.__contains__
|
|
1485
|
self.__getitem__ = self._map.__getitem__
|
|
1485
|
self.__getitem__ = self._map.__getitem__
|
|
1486
|
self.get = self._map.get
|
|
1486
|
self.get = self._map.get
|
|
1487
|
|
|
1487
|
|
|
1488
|
def write(self, st, now):
|
|
1488
|
def write(self, st, now):
|
|
1489
|
st.write(dirstatemod.pack_dirstate(self._map, self.copymap,
|
|
1489
|
st.write(parsers.pack_dirstate(self._map, self.copymap,
|
|
1490
|
self.parents(), now))
|
|
1490
|
self.parents(), now))
|
|
1491
|
st.close()
|
|
1491
|
st.close()
|
|
1492
|
self._dirtyparents = False
|
|
1492
|
self._dirtyparents = False
|
|
1493
|
self.nonnormalset, self.otherparentset = self.nonnormalentries()
|
|
1493
|
self.nonnormalset, self.otherparentset = self.nonnormalentries()
|
|
1494
|
|
|
1494
|
|
|
1495
|
@propertycache
|
|
1495
|
@propertycache
|
|
1496
|
def nonnormalset(self):
|
|
1496
|
def nonnormalset(self):
|
|
1497
|
nonnorm, otherparents = self.nonnormalentries()
|
|
1497
|
nonnorm, otherparents = self.nonnormalentries()
|
|
1498
|
self.otherparentset = otherparents
|
|
1498
|
self.otherparentset = otherparents
|
|
1499
|
return nonnorm
|
|
1499
|
return nonnorm
|
|
1500
|
|
|
1500
|
|
|
1501
|
@propertycache
|
|
1501
|
@propertycache
|
|
1502
|
def otherparentset(self):
|
|
1502
|
def otherparentset(self):
|
|
1503
|
nonnorm, otherparents = self.nonnormalentries()
|
|
1503
|
nonnorm, otherparents = self.nonnormalentries()
|
|
1504
|
self.nonnormalset = nonnorm
|
|
1504
|
self.nonnormalset = nonnorm
|
|
1505
|
return otherparents
|
|
1505
|
return otherparents
|
|
1506
|
|
|
1506
|
|
|
1507
|
@propertycache
|
|
1507
|
@propertycache
|
|
1508
|
def identity(self):
|
|
1508
|
def identity(self):
|
|
1509
|
self._map
|
|
1509
|
self._map
|
|
1510
|
return self.identity
|
|
1510
|
return self.identity
|
|
1511
|
|
|
1511
|
|
|
1512
|
@propertycache
|
|
1512
|
@propertycache
|
|
1513
|
def dirfoldmap(self):
|
|
1513
|
def dirfoldmap(self):
|
|
1514
|
f = {}
|
|
1514
|
f = {}
|
|
1515
|
normcase = util.normcase
|
|
1515
|
normcase = util.normcase
|
|
1516
|
for name in self._dirs:
|
|
1516
|
for name in self._dirs:
|
|
1517
|
f[normcase(name)] = name
|
|
1517
|
f[normcase(name)] = name
|
|
1518
|
return f
|
|
1518
|
return f
|