##// END OF EJS Templates
scmutil: use new dirs class in dirstate and context...
Bryan O'Sullivan -
r18899:d8ff607e default
parent child Browse files
Show More
@@ -1,1380 +1,1371 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid, nullrev, short, hex, bin
8 from node import nullid, nullrev, short, hex, bin
9 from i18n import _
9 from i18n import _
10 import ancestor, mdiff, error, util, scmutil, subrepo, patch, encoding, phases
10 import ancestor, mdiff, error, util, scmutil, subrepo, patch, encoding, phases
11 import copies
11 import copies
12 import match as matchmod
12 import match as matchmod
13 import os, errno, stat
13 import os, errno, stat
14 import obsolete as obsmod
14 import obsolete as obsmod
15 import repoview
15 import repoview
16
16
17 propertycache = util.propertycache
17 propertycache = util.propertycache
18
18
19 class changectx(object):
19 class changectx(object):
20 """A changecontext object makes access to data related to a particular
20 """A changecontext object makes access to data related to a particular
21 changeset convenient."""
21 changeset convenient."""
22 def __init__(self, repo, changeid=''):
22 def __init__(self, repo, changeid=''):
23 """changeid is a revision number, node, or tag"""
23 """changeid is a revision number, node, or tag"""
24 if changeid == '':
24 if changeid == '':
25 changeid = '.'
25 changeid = '.'
26 self._repo = repo
26 self._repo = repo
27
27
28 if isinstance(changeid, int):
28 if isinstance(changeid, int):
29 try:
29 try:
30 self._node = repo.changelog.node(changeid)
30 self._node = repo.changelog.node(changeid)
31 except IndexError:
31 except IndexError:
32 raise error.RepoLookupError(
32 raise error.RepoLookupError(
33 _("unknown revision '%s'") % changeid)
33 _("unknown revision '%s'") % changeid)
34 self._rev = changeid
34 self._rev = changeid
35 return
35 return
36 if isinstance(changeid, long):
36 if isinstance(changeid, long):
37 changeid = str(changeid)
37 changeid = str(changeid)
38 if changeid == '.':
38 if changeid == '.':
39 self._node = repo.dirstate.p1()
39 self._node = repo.dirstate.p1()
40 self._rev = repo.changelog.rev(self._node)
40 self._rev = repo.changelog.rev(self._node)
41 return
41 return
42 if changeid == 'null':
42 if changeid == 'null':
43 self._node = nullid
43 self._node = nullid
44 self._rev = nullrev
44 self._rev = nullrev
45 return
45 return
46 if changeid == 'tip':
46 if changeid == 'tip':
47 self._node = repo.changelog.tip()
47 self._node = repo.changelog.tip()
48 self._rev = repo.changelog.rev(self._node)
48 self._rev = repo.changelog.rev(self._node)
49 return
49 return
50 if len(changeid) == 20:
50 if len(changeid) == 20:
51 try:
51 try:
52 self._node = changeid
52 self._node = changeid
53 self._rev = repo.changelog.rev(changeid)
53 self._rev = repo.changelog.rev(changeid)
54 return
54 return
55 except LookupError:
55 except LookupError:
56 pass
56 pass
57
57
58 try:
58 try:
59 r = int(changeid)
59 r = int(changeid)
60 if str(r) != changeid:
60 if str(r) != changeid:
61 raise ValueError
61 raise ValueError
62 l = len(repo.changelog)
62 l = len(repo.changelog)
63 if r < 0:
63 if r < 0:
64 r += l
64 r += l
65 if r < 0 or r >= l:
65 if r < 0 or r >= l:
66 raise ValueError
66 raise ValueError
67 self._rev = r
67 self._rev = r
68 self._node = repo.changelog.node(r)
68 self._node = repo.changelog.node(r)
69 return
69 return
70 except (ValueError, OverflowError, IndexError):
70 except (ValueError, OverflowError, IndexError):
71 pass
71 pass
72
72
73 if len(changeid) == 40:
73 if len(changeid) == 40:
74 try:
74 try:
75 self._node = bin(changeid)
75 self._node = bin(changeid)
76 self._rev = repo.changelog.rev(self._node)
76 self._rev = repo.changelog.rev(self._node)
77 return
77 return
78 except (TypeError, LookupError):
78 except (TypeError, LookupError):
79 pass
79 pass
80
80
81 if changeid in repo._bookmarks:
81 if changeid in repo._bookmarks:
82 self._node = repo._bookmarks[changeid]
82 self._node = repo._bookmarks[changeid]
83 self._rev = repo.changelog.rev(self._node)
83 self._rev = repo.changelog.rev(self._node)
84 return
84 return
85 if changeid in repo._tagscache.tags:
85 if changeid in repo._tagscache.tags:
86 self._node = repo._tagscache.tags[changeid]
86 self._node = repo._tagscache.tags[changeid]
87 self._rev = repo.changelog.rev(self._node)
87 self._rev = repo.changelog.rev(self._node)
88 return
88 return
89 try:
89 try:
90 self._node = repo.branchtip(changeid)
90 self._node = repo.branchtip(changeid)
91 self._rev = repo.changelog.rev(self._node)
91 self._rev = repo.changelog.rev(self._node)
92 return
92 return
93 except error.RepoLookupError:
93 except error.RepoLookupError:
94 pass
94 pass
95
95
96 self._node = repo.changelog._partialmatch(changeid)
96 self._node = repo.changelog._partialmatch(changeid)
97 if self._node is not None:
97 if self._node is not None:
98 self._rev = repo.changelog.rev(self._node)
98 self._rev = repo.changelog.rev(self._node)
99 return
99 return
100
100
101 # lookup failed
101 # lookup failed
102 # check if it might have come from damaged dirstate
102 # check if it might have come from damaged dirstate
103 #
103 #
104 # XXX we could avoid the unfiltered if we had a recognizable exception
104 # XXX we could avoid the unfiltered if we had a recognizable exception
105 # for filtered changeset access
105 # for filtered changeset access
106 if changeid in repo.unfiltered().dirstate.parents():
106 if changeid in repo.unfiltered().dirstate.parents():
107 raise error.Abort(_("working directory has unknown parent '%s'!")
107 raise error.Abort(_("working directory has unknown parent '%s'!")
108 % short(changeid))
108 % short(changeid))
109 try:
109 try:
110 if len(changeid) == 20:
110 if len(changeid) == 20:
111 changeid = hex(changeid)
111 changeid = hex(changeid)
112 except TypeError:
112 except TypeError:
113 pass
113 pass
114 raise error.RepoLookupError(
114 raise error.RepoLookupError(
115 _("unknown revision '%s'") % changeid)
115 _("unknown revision '%s'") % changeid)
116
116
117 def __str__(self):
117 def __str__(self):
118 return short(self.node())
118 return short(self.node())
119
119
120 def __int__(self):
120 def __int__(self):
121 return self.rev()
121 return self.rev()
122
122
123 def __repr__(self):
123 def __repr__(self):
124 return "<changectx %s>" % str(self)
124 return "<changectx %s>" % str(self)
125
125
126 def __hash__(self):
126 def __hash__(self):
127 try:
127 try:
128 return hash(self._rev)
128 return hash(self._rev)
129 except AttributeError:
129 except AttributeError:
130 return id(self)
130 return id(self)
131
131
132 def __eq__(self, other):
132 def __eq__(self, other):
133 try:
133 try:
134 return self._rev == other._rev
134 return self._rev == other._rev
135 except AttributeError:
135 except AttributeError:
136 return False
136 return False
137
137
138 def __ne__(self, other):
138 def __ne__(self, other):
139 return not (self == other)
139 return not (self == other)
140
140
141 def __nonzero__(self):
141 def __nonzero__(self):
142 return self._rev != nullrev
142 return self._rev != nullrev
143
143
144 @propertycache
144 @propertycache
145 def _changeset(self):
145 def _changeset(self):
146 return self._repo.changelog.read(self.rev())
146 return self._repo.changelog.read(self.rev())
147
147
148 @propertycache
148 @propertycache
149 def _manifest(self):
149 def _manifest(self):
150 return self._repo.manifest.read(self._changeset[0])
150 return self._repo.manifest.read(self._changeset[0])
151
151
152 @propertycache
152 @propertycache
153 def _manifestdelta(self):
153 def _manifestdelta(self):
154 return self._repo.manifest.readdelta(self._changeset[0])
154 return self._repo.manifest.readdelta(self._changeset[0])
155
155
156 @propertycache
156 @propertycache
157 def _parents(self):
157 def _parents(self):
158 p = self._repo.changelog.parentrevs(self._rev)
158 p = self._repo.changelog.parentrevs(self._rev)
159 if p[1] == nullrev:
159 if p[1] == nullrev:
160 p = p[:-1]
160 p = p[:-1]
161 return [changectx(self._repo, x) for x in p]
161 return [changectx(self._repo, x) for x in p]
162
162
163 @propertycache
163 @propertycache
164 def substate(self):
164 def substate(self):
165 return subrepo.state(self, self._repo.ui)
165 return subrepo.state(self, self._repo.ui)
166
166
167 def __contains__(self, key):
167 def __contains__(self, key):
168 return key in self._manifest
168 return key in self._manifest
169
169
170 def __getitem__(self, key):
170 def __getitem__(self, key):
171 return self.filectx(key)
171 return self.filectx(key)
172
172
173 def __iter__(self):
173 def __iter__(self):
174 for f in sorted(self._manifest):
174 for f in sorted(self._manifest):
175 yield f
175 yield f
176
176
177 def changeset(self):
177 def changeset(self):
178 return self._changeset
178 return self._changeset
179 def manifest(self):
179 def manifest(self):
180 return self._manifest
180 return self._manifest
181 def manifestnode(self):
181 def manifestnode(self):
182 return self._changeset[0]
182 return self._changeset[0]
183
183
184 def rev(self):
184 def rev(self):
185 return self._rev
185 return self._rev
186 def node(self):
186 def node(self):
187 return self._node
187 return self._node
188 def hex(self):
188 def hex(self):
189 return hex(self._node)
189 return hex(self._node)
190 def user(self):
190 def user(self):
191 return self._changeset[1]
191 return self._changeset[1]
192 def date(self):
192 def date(self):
193 return self._changeset[2]
193 return self._changeset[2]
194 def files(self):
194 def files(self):
195 return self._changeset[3]
195 return self._changeset[3]
196 def description(self):
196 def description(self):
197 return self._changeset[4]
197 return self._changeset[4]
198 def branch(self):
198 def branch(self):
199 return encoding.tolocal(self._changeset[5].get("branch"))
199 return encoding.tolocal(self._changeset[5].get("branch"))
200 def closesbranch(self):
200 def closesbranch(self):
201 return 'close' in self._changeset[5]
201 return 'close' in self._changeset[5]
202 def extra(self):
202 def extra(self):
203 return self._changeset[5]
203 return self._changeset[5]
204 def tags(self):
204 def tags(self):
205 return self._repo.nodetags(self._node)
205 return self._repo.nodetags(self._node)
206 def bookmarks(self):
206 def bookmarks(self):
207 return self._repo.nodebookmarks(self._node)
207 return self._repo.nodebookmarks(self._node)
208 def phase(self):
208 def phase(self):
209 return self._repo._phasecache.phase(self._repo, self._rev)
209 return self._repo._phasecache.phase(self._repo, self._rev)
210 def phasestr(self):
210 def phasestr(self):
211 return phases.phasenames[self.phase()]
211 return phases.phasenames[self.phase()]
212 def mutable(self):
212 def mutable(self):
213 return self.phase() > phases.public
213 return self.phase() > phases.public
214 def hidden(self):
214 def hidden(self):
215 return self._rev in repoview.filterrevs(self._repo, 'visible')
215 return self._rev in repoview.filterrevs(self._repo, 'visible')
216
216
217 def parents(self):
217 def parents(self):
218 """return contexts for each parent changeset"""
218 """return contexts for each parent changeset"""
219 return self._parents
219 return self._parents
220
220
221 def p1(self):
221 def p1(self):
222 return self._parents[0]
222 return self._parents[0]
223
223
224 def p2(self):
224 def p2(self):
225 if len(self._parents) == 2:
225 if len(self._parents) == 2:
226 return self._parents[1]
226 return self._parents[1]
227 return changectx(self._repo, -1)
227 return changectx(self._repo, -1)
228
228
229 def children(self):
229 def children(self):
230 """return contexts for each child changeset"""
230 """return contexts for each child changeset"""
231 c = self._repo.changelog.children(self._node)
231 c = self._repo.changelog.children(self._node)
232 return [changectx(self._repo, x) for x in c]
232 return [changectx(self._repo, x) for x in c]
233
233
234 def ancestors(self):
234 def ancestors(self):
235 for a in self._repo.changelog.ancestors([self._rev]):
235 for a in self._repo.changelog.ancestors([self._rev]):
236 yield changectx(self._repo, a)
236 yield changectx(self._repo, a)
237
237
238 def descendants(self):
238 def descendants(self):
239 for d in self._repo.changelog.descendants([self._rev]):
239 for d in self._repo.changelog.descendants([self._rev]):
240 yield changectx(self._repo, d)
240 yield changectx(self._repo, d)
241
241
242 def obsolete(self):
242 def obsolete(self):
243 """True if the changeset is obsolete"""
243 """True if the changeset is obsolete"""
244 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
244 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
245
245
246 def extinct(self):
246 def extinct(self):
247 """True if the changeset is extinct"""
247 """True if the changeset is extinct"""
248 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
248 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
249
249
250 def unstable(self):
250 def unstable(self):
251 """True if the changeset is not obsolete but it's ancestor are"""
251 """True if the changeset is not obsolete but it's ancestor are"""
252 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
252 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
253
253
254 def bumped(self):
254 def bumped(self):
255 """True if the changeset try to be a successor of a public changeset
255 """True if the changeset try to be a successor of a public changeset
256
256
257 Only non-public and non-obsolete changesets may be bumped.
257 Only non-public and non-obsolete changesets may be bumped.
258 """
258 """
259 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
259 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
260
260
261 def divergent(self):
261 def divergent(self):
262 """Is a successors of a changeset with multiple possible successors set
262 """Is a successors of a changeset with multiple possible successors set
263
263
264 Only non-public and non-obsolete changesets may be divergent.
264 Only non-public and non-obsolete changesets may be divergent.
265 """
265 """
266 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
266 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
267
267
268 def troubled(self):
268 def troubled(self):
269 """True if the changeset is either unstable, bumped or divergent"""
269 """True if the changeset is either unstable, bumped or divergent"""
270 return self.unstable() or self.bumped() or self.divergent()
270 return self.unstable() or self.bumped() or self.divergent()
271
271
272 def troubles(self):
272 def troubles(self):
273 """return the list of troubles affecting this changesets.
273 """return the list of troubles affecting this changesets.
274
274
275 Troubles are returned as strings. possible values are:
275 Troubles are returned as strings. possible values are:
276 - unstable,
276 - unstable,
277 - bumped,
277 - bumped,
278 - divergent.
278 - divergent.
279 """
279 """
280 troubles = []
280 troubles = []
281 if self.unstable():
281 if self.unstable():
282 troubles.append('unstable')
282 troubles.append('unstable')
283 if self.bumped():
283 if self.bumped():
284 troubles.append('bumped')
284 troubles.append('bumped')
285 if self.divergent():
285 if self.divergent():
286 troubles.append('divergent')
286 troubles.append('divergent')
287 return troubles
287 return troubles
288
288
289 def _fileinfo(self, path):
289 def _fileinfo(self, path):
290 if '_manifest' in self.__dict__:
290 if '_manifest' in self.__dict__:
291 try:
291 try:
292 return self._manifest[path], self._manifest.flags(path)
292 return self._manifest[path], self._manifest.flags(path)
293 except KeyError:
293 except KeyError:
294 raise error.ManifestLookupError(self._node, path,
294 raise error.ManifestLookupError(self._node, path,
295 _('not found in manifest'))
295 _('not found in manifest'))
296 if '_manifestdelta' in self.__dict__ or path in self.files():
296 if '_manifestdelta' in self.__dict__ or path in self.files():
297 if path in self._manifestdelta:
297 if path in self._manifestdelta:
298 return (self._manifestdelta[path],
298 return (self._manifestdelta[path],
299 self._manifestdelta.flags(path))
299 self._manifestdelta.flags(path))
300 node, flag = self._repo.manifest.find(self._changeset[0], path)
300 node, flag = self._repo.manifest.find(self._changeset[0], path)
301 if not node:
301 if not node:
302 raise error.ManifestLookupError(self._node, path,
302 raise error.ManifestLookupError(self._node, path,
303 _('not found in manifest'))
303 _('not found in manifest'))
304
304
305 return node, flag
305 return node, flag
306
306
307 def filenode(self, path):
307 def filenode(self, path):
308 return self._fileinfo(path)[0]
308 return self._fileinfo(path)[0]
309
309
310 def flags(self, path):
310 def flags(self, path):
311 try:
311 try:
312 return self._fileinfo(path)[1]
312 return self._fileinfo(path)[1]
313 except error.LookupError:
313 except error.LookupError:
314 return ''
314 return ''
315
315
316 def filectx(self, path, fileid=None, filelog=None):
316 def filectx(self, path, fileid=None, filelog=None):
317 """get a file context from this changeset"""
317 """get a file context from this changeset"""
318 if fileid is None:
318 if fileid is None:
319 fileid = self.filenode(path)
319 fileid = self.filenode(path)
320 return filectx(self._repo, path, fileid=fileid,
320 return filectx(self._repo, path, fileid=fileid,
321 changectx=self, filelog=filelog)
321 changectx=self, filelog=filelog)
322
322
323 def ancestor(self, c2):
323 def ancestor(self, c2):
324 """
324 """
325 return the ancestor context of self and c2
325 return the ancestor context of self and c2
326 """
326 """
327 # deal with workingctxs
327 # deal with workingctxs
328 n2 = c2._node
328 n2 = c2._node
329 if n2 is None:
329 if n2 is None:
330 n2 = c2._parents[0]._node
330 n2 = c2._parents[0]._node
331 n = self._repo.changelog.ancestor(self._node, n2)
331 n = self._repo.changelog.ancestor(self._node, n2)
332 return changectx(self._repo, n)
332 return changectx(self._repo, n)
333
333
334 def descendant(self, other):
334 def descendant(self, other):
335 """True if other is descendant of this changeset"""
335 """True if other is descendant of this changeset"""
336 return self._repo.changelog.descendant(self._rev, other._rev)
336 return self._repo.changelog.descendant(self._rev, other._rev)
337
337
338 def walk(self, match):
338 def walk(self, match):
339 fset = set(match.files())
339 fset = set(match.files())
340 # for dirstate.walk, files=['.'] means "walk the whole tree".
340 # for dirstate.walk, files=['.'] means "walk the whole tree".
341 # follow that here, too
341 # follow that here, too
342 fset.discard('.')
342 fset.discard('.')
343 for fn in self:
343 for fn in self:
344 if fn in fset:
344 if fn in fset:
345 # specified pattern is the exact name
345 # specified pattern is the exact name
346 fset.remove(fn)
346 fset.remove(fn)
347 if match(fn):
347 if match(fn):
348 yield fn
348 yield fn
349 for fn in sorted(fset):
349 for fn in sorted(fset):
350 if fn in self._dirs:
350 if fn in self._dirs:
351 # specified pattern is a directory
351 # specified pattern is a directory
352 continue
352 continue
353 if match.bad(fn, _('no such file in rev %s') % self) and match(fn):
353 if match.bad(fn, _('no such file in rev %s') % self) and match(fn):
354 yield fn
354 yield fn
355
355
356 def sub(self, path):
356 def sub(self, path):
357 return subrepo.subrepo(self, path)
357 return subrepo.subrepo(self, path)
358
358
359 def match(self, pats=[], include=None, exclude=None, default='glob'):
359 def match(self, pats=[], include=None, exclude=None, default='glob'):
360 r = self._repo
360 r = self._repo
361 return matchmod.match(r.root, r.getcwd(), pats,
361 return matchmod.match(r.root, r.getcwd(), pats,
362 include, exclude, default,
362 include, exclude, default,
363 auditor=r.auditor, ctx=self)
363 auditor=r.auditor, ctx=self)
364
364
365 def diff(self, ctx2=None, match=None, **opts):
365 def diff(self, ctx2=None, match=None, **opts):
366 """Returns a diff generator for the given contexts and matcher"""
366 """Returns a diff generator for the given contexts and matcher"""
367 if ctx2 is None:
367 if ctx2 is None:
368 ctx2 = self.p1()
368 ctx2 = self.p1()
369 if ctx2 is not None and not isinstance(ctx2, changectx):
369 if ctx2 is not None and not isinstance(ctx2, changectx):
370 ctx2 = self._repo[ctx2]
370 ctx2 = self._repo[ctx2]
371 diffopts = patch.diffopts(self._repo.ui, opts)
371 diffopts = patch.diffopts(self._repo.ui, opts)
372 return patch.diff(self._repo, ctx2.node(), self.node(),
372 return patch.diff(self._repo, ctx2.node(), self.node(),
373 match=match, opts=diffopts)
373 match=match, opts=diffopts)
374
374
375 @propertycache
375 @propertycache
376 def _dirs(self):
376 def _dirs(self):
377 dirs = set()
377 return scmutil.dirs(self._manifest)
378 for f in self._manifest:
379 pos = f.rfind('/')
380 while pos != -1:
381 f = f[:pos]
382 if f in dirs:
383 break # dirs already contains this and above
384 dirs.add(f)
385 pos = f.rfind('/')
386 return dirs
387
378
388 def dirs(self):
379 def dirs(self):
389 return self._dirs
380 return self._dirs
390
381
391 def dirty(self):
382 def dirty(self):
392 return False
383 return False
393
384
394 class filectx(object):
385 class filectx(object):
395 """A filecontext object makes access to data related to a particular
386 """A filecontext object makes access to data related to a particular
396 filerevision convenient."""
387 filerevision convenient."""
397 def __init__(self, repo, path, changeid=None, fileid=None,
388 def __init__(self, repo, path, changeid=None, fileid=None,
398 filelog=None, changectx=None):
389 filelog=None, changectx=None):
399 """changeid can be a changeset revision, node, or tag.
390 """changeid can be a changeset revision, node, or tag.
400 fileid can be a file revision or node."""
391 fileid can be a file revision or node."""
401 self._repo = repo
392 self._repo = repo
402 self._path = path
393 self._path = path
403
394
404 assert (changeid is not None
395 assert (changeid is not None
405 or fileid is not None
396 or fileid is not None
406 or changectx is not None), \
397 or changectx is not None), \
407 ("bad args: changeid=%r, fileid=%r, changectx=%r"
398 ("bad args: changeid=%r, fileid=%r, changectx=%r"
408 % (changeid, fileid, changectx))
399 % (changeid, fileid, changectx))
409
400
410 if filelog:
401 if filelog:
411 self._filelog = filelog
402 self._filelog = filelog
412
403
413 if changeid is not None:
404 if changeid is not None:
414 self._changeid = changeid
405 self._changeid = changeid
415 if changectx is not None:
406 if changectx is not None:
416 self._changectx = changectx
407 self._changectx = changectx
417 if fileid is not None:
408 if fileid is not None:
418 self._fileid = fileid
409 self._fileid = fileid
419
410
420 @propertycache
411 @propertycache
421 def _changectx(self):
412 def _changectx(self):
422 try:
413 try:
423 return changectx(self._repo, self._changeid)
414 return changectx(self._repo, self._changeid)
424 except error.RepoLookupError:
415 except error.RepoLookupError:
425 # Linkrev may point to any revision in the repository. When the
416 # Linkrev may point to any revision in the repository. When the
426 # repository is filtered this may lead to `filectx` trying to build
417 # repository is filtered this may lead to `filectx` trying to build
427 # `changectx` for filtered revision. In such case we fallback to
418 # `changectx` for filtered revision. In such case we fallback to
428 # creating `changectx` on the unfiltered version of the reposition.
419 # creating `changectx` on the unfiltered version of the reposition.
429 # This fallback should not be an issue because `changectx` from
420 # This fallback should not be an issue because `changectx` from
430 # `filectx` are not used in complex operations that care about
421 # `filectx` are not used in complex operations that care about
431 # filtering.
422 # filtering.
432 #
423 #
433 # This fallback is a cheap and dirty fix that prevent several
424 # This fallback is a cheap and dirty fix that prevent several
434 # crashes. It does not ensure the behavior is correct. However the
425 # crashes. It does not ensure the behavior is correct. However the
435 # behavior was not correct before filtering either and "incorrect
426 # behavior was not correct before filtering either and "incorrect
436 # behavior" is seen as better as "crash"
427 # behavior" is seen as better as "crash"
437 #
428 #
438 # Linkrevs have several serious troubles with filtering that are
429 # Linkrevs have several serious troubles with filtering that are
439 # complicated to solve. Proper handling of the issue here should be
430 # complicated to solve. Proper handling of the issue here should be
440 # considered when solving linkrev issue are on the table.
431 # considered when solving linkrev issue are on the table.
441 return changectx(self._repo.unfiltered(), self._changeid)
432 return changectx(self._repo.unfiltered(), self._changeid)
442
433
443 @propertycache
434 @propertycache
444 def _filelog(self):
435 def _filelog(self):
445 return self._repo.file(self._path)
436 return self._repo.file(self._path)
446
437
447 @propertycache
438 @propertycache
448 def _changeid(self):
439 def _changeid(self):
449 if '_changectx' in self.__dict__:
440 if '_changectx' in self.__dict__:
450 return self._changectx.rev()
441 return self._changectx.rev()
451 else:
442 else:
452 return self._filelog.linkrev(self._filerev)
443 return self._filelog.linkrev(self._filerev)
453
444
454 @propertycache
445 @propertycache
455 def _filenode(self):
446 def _filenode(self):
456 if '_fileid' in self.__dict__:
447 if '_fileid' in self.__dict__:
457 return self._filelog.lookup(self._fileid)
448 return self._filelog.lookup(self._fileid)
458 else:
449 else:
459 return self._changectx.filenode(self._path)
450 return self._changectx.filenode(self._path)
460
451
461 @propertycache
452 @propertycache
462 def _filerev(self):
453 def _filerev(self):
463 return self._filelog.rev(self._filenode)
454 return self._filelog.rev(self._filenode)
464
455
465 @propertycache
456 @propertycache
466 def _repopath(self):
457 def _repopath(self):
467 return self._path
458 return self._path
468
459
469 def __nonzero__(self):
460 def __nonzero__(self):
470 try:
461 try:
471 self._filenode
462 self._filenode
472 return True
463 return True
473 except error.LookupError:
464 except error.LookupError:
474 # file is missing
465 # file is missing
475 return False
466 return False
476
467
477 def __str__(self):
468 def __str__(self):
478 return "%s@%s" % (self.path(), short(self.node()))
469 return "%s@%s" % (self.path(), short(self.node()))
479
470
480 def __repr__(self):
471 def __repr__(self):
481 return "<filectx %s>" % str(self)
472 return "<filectx %s>" % str(self)
482
473
483 def __hash__(self):
474 def __hash__(self):
484 try:
475 try:
485 return hash((self._path, self._filenode))
476 return hash((self._path, self._filenode))
486 except AttributeError:
477 except AttributeError:
487 return id(self)
478 return id(self)
488
479
489 def __eq__(self, other):
480 def __eq__(self, other):
490 try:
481 try:
491 return (self._path == other._path
482 return (self._path == other._path
492 and self._filenode == other._filenode)
483 and self._filenode == other._filenode)
493 except AttributeError:
484 except AttributeError:
494 return False
485 return False
495
486
496 def __ne__(self, other):
487 def __ne__(self, other):
497 return not (self == other)
488 return not (self == other)
498
489
499 def filectx(self, fileid):
490 def filectx(self, fileid):
500 '''opens an arbitrary revision of the file without
491 '''opens an arbitrary revision of the file without
501 opening a new filelog'''
492 opening a new filelog'''
502 return filectx(self._repo, self._path, fileid=fileid,
493 return filectx(self._repo, self._path, fileid=fileid,
503 filelog=self._filelog)
494 filelog=self._filelog)
504
495
505 def filerev(self):
496 def filerev(self):
506 return self._filerev
497 return self._filerev
507 def filenode(self):
498 def filenode(self):
508 return self._filenode
499 return self._filenode
509 def flags(self):
500 def flags(self):
510 return self._changectx.flags(self._path)
501 return self._changectx.flags(self._path)
511 def filelog(self):
502 def filelog(self):
512 return self._filelog
503 return self._filelog
513
504
514 def rev(self):
505 def rev(self):
515 if '_changectx' in self.__dict__:
506 if '_changectx' in self.__dict__:
516 return self._changectx.rev()
507 return self._changectx.rev()
517 if '_changeid' in self.__dict__:
508 if '_changeid' in self.__dict__:
518 return self._changectx.rev()
509 return self._changectx.rev()
519 return self._filelog.linkrev(self._filerev)
510 return self._filelog.linkrev(self._filerev)
520
511
521 def linkrev(self):
512 def linkrev(self):
522 return self._filelog.linkrev(self._filerev)
513 return self._filelog.linkrev(self._filerev)
523 def node(self):
514 def node(self):
524 return self._changectx.node()
515 return self._changectx.node()
525 def hex(self):
516 def hex(self):
526 return hex(self.node())
517 return hex(self.node())
527 def user(self):
518 def user(self):
528 return self._changectx.user()
519 return self._changectx.user()
529 def date(self):
520 def date(self):
530 return self._changectx.date()
521 return self._changectx.date()
531 def files(self):
522 def files(self):
532 return self._changectx.files()
523 return self._changectx.files()
533 def description(self):
524 def description(self):
534 return self._changectx.description()
525 return self._changectx.description()
535 def branch(self):
526 def branch(self):
536 return self._changectx.branch()
527 return self._changectx.branch()
537 def extra(self):
528 def extra(self):
538 return self._changectx.extra()
529 return self._changectx.extra()
539 def phase(self):
530 def phase(self):
540 return self._changectx.phase()
531 return self._changectx.phase()
541 def phasestr(self):
532 def phasestr(self):
542 return self._changectx.phasestr()
533 return self._changectx.phasestr()
543 def manifest(self):
534 def manifest(self):
544 return self._changectx.manifest()
535 return self._changectx.manifest()
545 def changectx(self):
536 def changectx(self):
546 return self._changectx
537 return self._changectx
547
538
548 def data(self):
539 def data(self):
549 return self._filelog.read(self._filenode)
540 return self._filelog.read(self._filenode)
550 def path(self):
541 def path(self):
551 return self._path
542 return self._path
552 def size(self):
543 def size(self):
553 return self._filelog.size(self._filerev)
544 return self._filelog.size(self._filerev)
554
545
555 def isbinary(self):
546 def isbinary(self):
556 try:
547 try:
557 return util.binary(self.data())
548 return util.binary(self.data())
558 except IOError:
549 except IOError:
559 return False
550 return False
560
551
561 def cmp(self, fctx):
552 def cmp(self, fctx):
562 """compare with other file context
553 """compare with other file context
563
554
564 returns True if different than fctx.
555 returns True if different than fctx.
565 """
556 """
566 if (fctx._filerev is None
557 if (fctx._filerev is None
567 and (self._repo._encodefilterpats
558 and (self._repo._encodefilterpats
568 # if file data starts with '\1\n', empty metadata block is
559 # if file data starts with '\1\n', empty metadata block is
569 # prepended, which adds 4 bytes to filelog.size().
560 # prepended, which adds 4 bytes to filelog.size().
570 or self.size() - 4 == fctx.size())
561 or self.size() - 4 == fctx.size())
571 or self.size() == fctx.size()):
562 or self.size() == fctx.size()):
572 return self._filelog.cmp(self._filenode, fctx.data())
563 return self._filelog.cmp(self._filenode, fctx.data())
573
564
574 return True
565 return True
575
566
576 def renamed(self):
567 def renamed(self):
577 """check if file was actually renamed in this changeset revision
568 """check if file was actually renamed in this changeset revision
578
569
579 If rename logged in file revision, we report copy for changeset only
570 If rename logged in file revision, we report copy for changeset only
580 if file revisions linkrev points back to the changeset in question
571 if file revisions linkrev points back to the changeset in question
581 or both changeset parents contain different file revisions.
572 or both changeset parents contain different file revisions.
582 """
573 """
583
574
584 renamed = self._filelog.renamed(self._filenode)
575 renamed = self._filelog.renamed(self._filenode)
585 if not renamed:
576 if not renamed:
586 return renamed
577 return renamed
587
578
588 if self.rev() == self.linkrev():
579 if self.rev() == self.linkrev():
589 return renamed
580 return renamed
590
581
591 name = self.path()
582 name = self.path()
592 fnode = self._filenode
583 fnode = self._filenode
593 for p in self._changectx.parents():
584 for p in self._changectx.parents():
594 try:
585 try:
595 if fnode == p.filenode(name):
586 if fnode == p.filenode(name):
596 return None
587 return None
597 except error.LookupError:
588 except error.LookupError:
598 pass
589 pass
599 return renamed
590 return renamed
600
591
601 def parents(self):
592 def parents(self):
602 p = self._path
593 p = self._path
603 fl = self._filelog
594 fl = self._filelog
604 pl = [(p, n, fl) for n in self._filelog.parents(self._filenode)]
595 pl = [(p, n, fl) for n in self._filelog.parents(self._filenode)]
605
596
606 r = self._filelog.renamed(self._filenode)
597 r = self._filelog.renamed(self._filenode)
607 if r:
598 if r:
608 pl[0] = (r[0], r[1], None)
599 pl[0] = (r[0], r[1], None)
609
600
610 return [filectx(self._repo, p, fileid=n, filelog=l)
601 return [filectx(self._repo, p, fileid=n, filelog=l)
611 for p, n, l in pl if n != nullid]
602 for p, n, l in pl if n != nullid]
612
603
613 def p1(self):
604 def p1(self):
614 return self.parents()[0]
605 return self.parents()[0]
615
606
616 def p2(self):
607 def p2(self):
617 p = self.parents()
608 p = self.parents()
618 if len(p) == 2:
609 if len(p) == 2:
619 return p[1]
610 return p[1]
620 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
611 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
621
612
622 def children(self):
613 def children(self):
623 # hard for renames
614 # hard for renames
624 c = self._filelog.children(self._filenode)
615 c = self._filelog.children(self._filenode)
625 return [filectx(self._repo, self._path, fileid=x,
616 return [filectx(self._repo, self._path, fileid=x,
626 filelog=self._filelog) for x in c]
617 filelog=self._filelog) for x in c]
627
618
628 def annotate(self, follow=False, linenumber=None, diffopts=None):
619 def annotate(self, follow=False, linenumber=None, diffopts=None):
629 '''returns a list of tuples of (ctx, line) for each line
620 '''returns a list of tuples of (ctx, line) for each line
630 in the file, where ctx is the filectx of the node where
621 in the file, where ctx is the filectx of the node where
631 that line was last changed.
622 that line was last changed.
632 This returns tuples of ((ctx, linenumber), line) for each line,
623 This returns tuples of ((ctx, linenumber), line) for each line,
633 if "linenumber" parameter is NOT "None".
624 if "linenumber" parameter is NOT "None".
634 In such tuples, linenumber means one at the first appearance
625 In such tuples, linenumber means one at the first appearance
635 in the managed file.
626 in the managed file.
636 To reduce annotation cost,
627 To reduce annotation cost,
637 this returns fixed value(False is used) as linenumber,
628 this returns fixed value(False is used) as linenumber,
638 if "linenumber" parameter is "False".'''
629 if "linenumber" parameter is "False".'''
639
630
640 def decorate_compat(text, rev):
631 def decorate_compat(text, rev):
641 return ([rev] * len(text.splitlines()), text)
632 return ([rev] * len(text.splitlines()), text)
642
633
643 def without_linenumber(text, rev):
634 def without_linenumber(text, rev):
644 return ([(rev, False)] * len(text.splitlines()), text)
635 return ([(rev, False)] * len(text.splitlines()), text)
645
636
646 def with_linenumber(text, rev):
637 def with_linenumber(text, rev):
647 size = len(text.splitlines())
638 size = len(text.splitlines())
648 return ([(rev, i) for i in xrange(1, size + 1)], text)
639 return ([(rev, i) for i in xrange(1, size + 1)], text)
649
640
650 decorate = (((linenumber is None) and decorate_compat) or
641 decorate = (((linenumber is None) and decorate_compat) or
651 (linenumber and with_linenumber) or
642 (linenumber and with_linenumber) or
652 without_linenumber)
643 without_linenumber)
653
644
654 def pair(parent, child):
645 def pair(parent, child):
655 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
646 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
656 refine=True)
647 refine=True)
657 for (a1, a2, b1, b2), t in blocks:
648 for (a1, a2, b1, b2), t in blocks:
658 # Changed blocks ('!') or blocks made only of blank lines ('~')
649 # Changed blocks ('!') or blocks made only of blank lines ('~')
659 # belong to the child.
650 # belong to the child.
660 if t == '=':
651 if t == '=':
661 child[0][b1:b2] = parent[0][a1:a2]
652 child[0][b1:b2] = parent[0][a1:a2]
662 return child
653 return child
663
654
664 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
655 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
665 def getctx(path, fileid):
656 def getctx(path, fileid):
666 log = path == self._path and self._filelog or getlog(path)
657 log = path == self._path and self._filelog or getlog(path)
667 return filectx(self._repo, path, fileid=fileid, filelog=log)
658 return filectx(self._repo, path, fileid=fileid, filelog=log)
668 getctx = util.lrucachefunc(getctx)
659 getctx = util.lrucachefunc(getctx)
669
660
670 def parents(f):
661 def parents(f):
671 # we want to reuse filectx objects as much as possible
662 # we want to reuse filectx objects as much as possible
672 p = f._path
663 p = f._path
673 if f._filerev is None: # working dir
664 if f._filerev is None: # working dir
674 pl = [(n.path(), n.filerev()) for n in f.parents()]
665 pl = [(n.path(), n.filerev()) for n in f.parents()]
675 else:
666 else:
676 pl = [(p, n) for n in f._filelog.parentrevs(f._filerev)]
667 pl = [(p, n) for n in f._filelog.parentrevs(f._filerev)]
677
668
678 if follow:
669 if follow:
679 r = f.renamed()
670 r = f.renamed()
680 if r:
671 if r:
681 pl[0] = (r[0], getlog(r[0]).rev(r[1]))
672 pl[0] = (r[0], getlog(r[0]).rev(r[1]))
682
673
683 return [getctx(p, n) for p, n in pl if n != nullrev]
674 return [getctx(p, n) for p, n in pl if n != nullrev]
684
675
685 # use linkrev to find the first changeset where self appeared
676 # use linkrev to find the first changeset where self appeared
686 if self.rev() != self.linkrev():
677 if self.rev() != self.linkrev():
687 base = self.filectx(self.filerev())
678 base = self.filectx(self.filerev())
688 else:
679 else:
689 base = self
680 base = self
690
681
691 # This algorithm would prefer to be recursive, but Python is a
682 # This algorithm would prefer to be recursive, but Python is a
692 # bit recursion-hostile. Instead we do an iterative
683 # bit recursion-hostile. Instead we do an iterative
693 # depth-first search.
684 # depth-first search.
694
685
695 visit = [base]
686 visit = [base]
696 hist = {}
687 hist = {}
697 pcache = {}
688 pcache = {}
698 needed = {base: 1}
689 needed = {base: 1}
699 while visit:
690 while visit:
700 f = visit[-1]
691 f = visit[-1]
701 if f not in pcache:
692 if f not in pcache:
702 pcache[f] = parents(f)
693 pcache[f] = parents(f)
703
694
704 ready = True
695 ready = True
705 pl = pcache[f]
696 pl = pcache[f]
706 for p in pl:
697 for p in pl:
707 if p not in hist:
698 if p not in hist:
708 ready = False
699 ready = False
709 visit.append(p)
700 visit.append(p)
710 needed[p] = needed.get(p, 0) + 1
701 needed[p] = needed.get(p, 0) + 1
711 if ready:
702 if ready:
712 visit.pop()
703 visit.pop()
713 curr = decorate(f.data(), f)
704 curr = decorate(f.data(), f)
714 for p in pl:
705 for p in pl:
715 curr = pair(hist[p], curr)
706 curr = pair(hist[p], curr)
716 if needed[p] == 1:
707 if needed[p] == 1:
717 del hist[p]
708 del hist[p]
718 else:
709 else:
719 needed[p] -= 1
710 needed[p] -= 1
720
711
721 hist[f] = curr
712 hist[f] = curr
722 pcache[f] = []
713 pcache[f] = []
723
714
724 return zip(hist[base][0], hist[base][1].splitlines(True))
715 return zip(hist[base][0], hist[base][1].splitlines(True))
725
716
726 def ancestor(self, fc2, actx):
717 def ancestor(self, fc2, actx):
727 """
718 """
728 find the common ancestor file context, if any, of self, and fc2
719 find the common ancestor file context, if any, of self, and fc2
729
720
730 actx must be the changectx of the common ancestor
721 actx must be the changectx of the common ancestor
731 of self's and fc2's respective changesets.
722 of self's and fc2's respective changesets.
732 """
723 """
733
724
734 # the easy case: no (relevant) renames
725 # the easy case: no (relevant) renames
735 if fc2.path() == self.path() and self.path() in actx:
726 if fc2.path() == self.path() and self.path() in actx:
736 return actx[self.path()]
727 return actx[self.path()]
737
728
738 # the next easiest cases: unambiguous predecessor (name trumps
729 # the next easiest cases: unambiguous predecessor (name trumps
739 # history)
730 # history)
740 if self.path() in actx and fc2.path() not in actx:
731 if self.path() in actx and fc2.path() not in actx:
741 return actx[self.path()]
732 return actx[self.path()]
742 if fc2.path() in actx and self.path() not in actx:
733 if fc2.path() in actx and self.path() not in actx:
743 return actx[fc2.path()]
734 return actx[fc2.path()]
744
735
745 # prime the ancestor cache for the working directory
736 # prime the ancestor cache for the working directory
746 acache = {}
737 acache = {}
747 for c in (self, fc2):
738 for c in (self, fc2):
748 if c._filerev is None:
739 if c._filerev is None:
749 pl = [(n.path(), n.filenode()) for n in c.parents()]
740 pl = [(n.path(), n.filenode()) for n in c.parents()]
750 acache[(c._path, None)] = pl
741 acache[(c._path, None)] = pl
751
742
752 flcache = {self._repopath:self._filelog, fc2._repopath:fc2._filelog}
743 flcache = {self._repopath:self._filelog, fc2._repopath:fc2._filelog}
753 def parents(vertex):
744 def parents(vertex):
754 if vertex in acache:
745 if vertex in acache:
755 return acache[vertex]
746 return acache[vertex]
756 f, n = vertex
747 f, n = vertex
757 if f not in flcache:
748 if f not in flcache:
758 flcache[f] = self._repo.file(f)
749 flcache[f] = self._repo.file(f)
759 fl = flcache[f]
750 fl = flcache[f]
760 pl = [(f, p) for p in fl.parents(n) if p != nullid]
751 pl = [(f, p) for p in fl.parents(n) if p != nullid]
761 re = fl.renamed(n)
752 re = fl.renamed(n)
762 if re:
753 if re:
763 pl.append(re)
754 pl.append(re)
764 acache[vertex] = pl
755 acache[vertex] = pl
765 return pl
756 return pl
766
757
767 a, b = (self._path, self._filenode), (fc2._path, fc2._filenode)
758 a, b = (self._path, self._filenode), (fc2._path, fc2._filenode)
768 v = ancestor.ancestor(a, b, parents)
759 v = ancestor.ancestor(a, b, parents)
769 if v:
760 if v:
770 f, n = v
761 f, n = v
771 return filectx(self._repo, f, fileid=n, filelog=flcache[f])
762 return filectx(self._repo, f, fileid=n, filelog=flcache[f])
772
763
773 return None
764 return None
774
765
775 def ancestors(self, followfirst=False):
766 def ancestors(self, followfirst=False):
776 visit = {}
767 visit = {}
777 c = self
768 c = self
778 cut = followfirst and 1 or None
769 cut = followfirst and 1 or None
779 while True:
770 while True:
780 for parent in c.parents()[:cut]:
771 for parent in c.parents()[:cut]:
781 visit[(parent.rev(), parent.node())] = parent
772 visit[(parent.rev(), parent.node())] = parent
782 if not visit:
773 if not visit:
783 break
774 break
784 c = visit.pop(max(visit))
775 c = visit.pop(max(visit))
785 yield c
776 yield c
786
777
787 def copies(self, c2):
778 def copies(self, c2):
788 if not util.safehasattr(self, "_copycache"):
779 if not util.safehasattr(self, "_copycache"):
789 self._copycache = {}
780 self._copycache = {}
790 sc2 = str(c2)
781 sc2 = str(c2)
791 if sc2 not in self._copycache:
782 if sc2 not in self._copycache:
792 self._copycache[sc2] = copies.pathcopies(c2)
783 self._copycache[sc2] = copies.pathcopies(c2)
793 return self._copycache[sc2]
784 return self._copycache[sc2]
794
785
795 class workingctx(changectx):
786 class workingctx(changectx):
796 """A workingctx object makes access to data related to
787 """A workingctx object makes access to data related to
797 the current working directory convenient.
788 the current working directory convenient.
798 date - any valid date string or (unixtime, offset), or None.
789 date - any valid date string or (unixtime, offset), or None.
799 user - username string, or None.
790 user - username string, or None.
800 extra - a dictionary of extra values, or None.
791 extra - a dictionary of extra values, or None.
801 changes - a list of file lists as returned by localrepo.status()
792 changes - a list of file lists as returned by localrepo.status()
802 or None to use the repository status.
793 or None to use the repository status.
803 """
794 """
804 def __init__(self, repo, text="", user=None, date=None, extra=None,
795 def __init__(self, repo, text="", user=None, date=None, extra=None,
805 changes=None):
796 changes=None):
806 self._repo = repo
797 self._repo = repo
807 self._rev = None
798 self._rev = None
808 self._node = None
799 self._node = None
809 self._text = text
800 self._text = text
810 if date:
801 if date:
811 self._date = util.parsedate(date)
802 self._date = util.parsedate(date)
812 if user:
803 if user:
813 self._user = user
804 self._user = user
814 if changes:
805 if changes:
815 self._status = list(changes[:4])
806 self._status = list(changes[:4])
816 self._unknown = changes[4]
807 self._unknown = changes[4]
817 self._ignored = changes[5]
808 self._ignored = changes[5]
818 self._clean = changes[6]
809 self._clean = changes[6]
819 else:
810 else:
820 self._unknown = None
811 self._unknown = None
821 self._ignored = None
812 self._ignored = None
822 self._clean = None
813 self._clean = None
823
814
824 self._extra = {}
815 self._extra = {}
825 if extra:
816 if extra:
826 self._extra = extra.copy()
817 self._extra = extra.copy()
827 if 'branch' not in self._extra:
818 if 'branch' not in self._extra:
828 try:
819 try:
829 branch = encoding.fromlocal(self._repo.dirstate.branch())
820 branch = encoding.fromlocal(self._repo.dirstate.branch())
830 except UnicodeDecodeError:
821 except UnicodeDecodeError:
831 raise util.Abort(_('branch name not in UTF-8!'))
822 raise util.Abort(_('branch name not in UTF-8!'))
832 self._extra['branch'] = branch
823 self._extra['branch'] = branch
833 if self._extra['branch'] == '':
824 if self._extra['branch'] == '':
834 self._extra['branch'] = 'default'
825 self._extra['branch'] = 'default'
835
826
836 def __str__(self):
827 def __str__(self):
837 return str(self._parents[0]) + "+"
828 return str(self._parents[0]) + "+"
838
829
839 def __repr__(self):
830 def __repr__(self):
840 return "<workingctx %s>" % str(self)
831 return "<workingctx %s>" % str(self)
841
832
842 def __nonzero__(self):
833 def __nonzero__(self):
843 return True
834 return True
844
835
845 def __contains__(self, key):
836 def __contains__(self, key):
846 return self._repo.dirstate[key] not in "?r"
837 return self._repo.dirstate[key] not in "?r"
847
838
848 def _buildflagfunc(self):
839 def _buildflagfunc(self):
849 # Create a fallback function for getting file flags when the
840 # Create a fallback function for getting file flags when the
850 # filesystem doesn't support them
841 # filesystem doesn't support them
851
842
852 copiesget = self._repo.dirstate.copies().get
843 copiesget = self._repo.dirstate.copies().get
853
844
854 if len(self._parents) < 2:
845 if len(self._parents) < 2:
855 # when we have one parent, it's easy: copy from parent
846 # when we have one parent, it's easy: copy from parent
856 man = self._parents[0].manifest()
847 man = self._parents[0].manifest()
857 def func(f):
848 def func(f):
858 f = copiesget(f, f)
849 f = copiesget(f, f)
859 return man.flags(f)
850 return man.flags(f)
860 else:
851 else:
861 # merges are tricky: we try to reconstruct the unstored
852 # merges are tricky: we try to reconstruct the unstored
862 # result from the merge (issue1802)
853 # result from the merge (issue1802)
863 p1, p2 = self._parents
854 p1, p2 = self._parents
864 pa = p1.ancestor(p2)
855 pa = p1.ancestor(p2)
865 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
856 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
866
857
867 def func(f):
858 def func(f):
868 f = copiesget(f, f) # may be wrong for merges with copies
859 f = copiesget(f, f) # may be wrong for merges with copies
869 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
860 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
870 if fl1 == fl2:
861 if fl1 == fl2:
871 return fl1
862 return fl1
872 if fl1 == fla:
863 if fl1 == fla:
873 return fl2
864 return fl2
874 if fl2 == fla:
865 if fl2 == fla:
875 return fl1
866 return fl1
876 return '' # punt for conflicts
867 return '' # punt for conflicts
877
868
878 return func
869 return func
879
870
880 @propertycache
871 @propertycache
881 def _flagfunc(self):
872 def _flagfunc(self):
882 return self._repo.dirstate.flagfunc(self._buildflagfunc)
873 return self._repo.dirstate.flagfunc(self._buildflagfunc)
883
874
884 @propertycache
875 @propertycache
885 def _manifest(self):
876 def _manifest(self):
886 """generate a manifest corresponding to the working directory"""
877 """generate a manifest corresponding to the working directory"""
887
878
888 man = self._parents[0].manifest().copy()
879 man = self._parents[0].manifest().copy()
889 if len(self._parents) > 1:
880 if len(self._parents) > 1:
890 man2 = self.p2().manifest()
881 man2 = self.p2().manifest()
891 def getman(f):
882 def getman(f):
892 if f in man:
883 if f in man:
893 return man
884 return man
894 return man2
885 return man2
895 else:
886 else:
896 getman = lambda f: man
887 getman = lambda f: man
897
888
898 copied = self._repo.dirstate.copies()
889 copied = self._repo.dirstate.copies()
899 ff = self._flagfunc
890 ff = self._flagfunc
900 modified, added, removed, deleted = self._status
891 modified, added, removed, deleted = self._status
901 for i, l in (("a", added), ("m", modified)):
892 for i, l in (("a", added), ("m", modified)):
902 for f in l:
893 for f in l:
903 orig = copied.get(f, f)
894 orig = copied.get(f, f)
904 man[f] = getman(orig).get(orig, nullid) + i
895 man[f] = getman(orig).get(orig, nullid) + i
905 try:
896 try:
906 man.set(f, ff(f))
897 man.set(f, ff(f))
907 except OSError:
898 except OSError:
908 pass
899 pass
909
900
910 for f in deleted + removed:
901 for f in deleted + removed:
911 if f in man:
902 if f in man:
912 del man[f]
903 del man[f]
913
904
914 return man
905 return man
915
906
916 def __iter__(self):
907 def __iter__(self):
917 d = self._repo.dirstate
908 d = self._repo.dirstate
918 for f in d:
909 for f in d:
919 if d[f] != 'r':
910 if d[f] != 'r':
920 yield f
911 yield f
921
912
922 @propertycache
913 @propertycache
923 def _status(self):
914 def _status(self):
924 return self._repo.status()[:4]
915 return self._repo.status()[:4]
925
916
926 @propertycache
917 @propertycache
927 def _user(self):
918 def _user(self):
928 return self._repo.ui.username()
919 return self._repo.ui.username()
929
920
930 @propertycache
921 @propertycache
931 def _date(self):
922 def _date(self):
932 return util.makedate()
923 return util.makedate()
933
924
934 @propertycache
925 @propertycache
935 def _parents(self):
926 def _parents(self):
936 p = self._repo.dirstate.parents()
927 p = self._repo.dirstate.parents()
937 if p[1] == nullid:
928 if p[1] == nullid:
938 p = p[:-1]
929 p = p[:-1]
939 return [changectx(self._repo, x) for x in p]
930 return [changectx(self._repo, x) for x in p]
940
931
941 def status(self, ignored=False, clean=False, unknown=False):
932 def status(self, ignored=False, clean=False, unknown=False):
942 """Explicit status query
933 """Explicit status query
943 Unless this method is used to query the working copy status, the
934 Unless this method is used to query the working copy status, the
944 _status property will implicitly read the status using its default
935 _status property will implicitly read the status using its default
945 arguments."""
936 arguments."""
946 stat = self._repo.status(ignored=ignored, clean=clean, unknown=unknown)
937 stat = self._repo.status(ignored=ignored, clean=clean, unknown=unknown)
947 self._unknown = self._ignored = self._clean = None
938 self._unknown = self._ignored = self._clean = None
948 if unknown:
939 if unknown:
949 self._unknown = stat[4]
940 self._unknown = stat[4]
950 if ignored:
941 if ignored:
951 self._ignored = stat[5]
942 self._ignored = stat[5]
952 if clean:
943 if clean:
953 self._clean = stat[6]
944 self._clean = stat[6]
954 self._status = stat[:4]
945 self._status = stat[:4]
955 return stat
946 return stat
956
947
957 def manifest(self):
948 def manifest(self):
958 return self._manifest
949 return self._manifest
959 def user(self):
950 def user(self):
960 return self._user or self._repo.ui.username()
951 return self._user or self._repo.ui.username()
961 def date(self):
952 def date(self):
962 return self._date
953 return self._date
963 def description(self):
954 def description(self):
964 return self._text
955 return self._text
965 def files(self):
956 def files(self):
966 return sorted(self._status[0] + self._status[1] + self._status[2])
957 return sorted(self._status[0] + self._status[1] + self._status[2])
967
958
968 def modified(self):
959 def modified(self):
969 return self._status[0]
960 return self._status[0]
970 def added(self):
961 def added(self):
971 return self._status[1]
962 return self._status[1]
972 def removed(self):
963 def removed(self):
973 return self._status[2]
964 return self._status[2]
974 def deleted(self):
965 def deleted(self):
975 return self._status[3]
966 return self._status[3]
976 def unknown(self):
967 def unknown(self):
977 assert self._unknown is not None # must call status first
968 assert self._unknown is not None # must call status first
978 return self._unknown
969 return self._unknown
979 def ignored(self):
970 def ignored(self):
980 assert self._ignored is not None # must call status first
971 assert self._ignored is not None # must call status first
981 return self._ignored
972 return self._ignored
982 def clean(self):
973 def clean(self):
983 assert self._clean is not None # must call status first
974 assert self._clean is not None # must call status first
984 return self._clean
975 return self._clean
985 def branch(self):
976 def branch(self):
986 return encoding.tolocal(self._extra['branch'])
977 return encoding.tolocal(self._extra['branch'])
987 def closesbranch(self):
978 def closesbranch(self):
988 return 'close' in self._extra
979 return 'close' in self._extra
989 def extra(self):
980 def extra(self):
990 return self._extra
981 return self._extra
991
982
992 def tags(self):
983 def tags(self):
993 t = []
984 t = []
994 for p in self.parents():
985 for p in self.parents():
995 t.extend(p.tags())
986 t.extend(p.tags())
996 return t
987 return t
997
988
998 def bookmarks(self):
989 def bookmarks(self):
999 b = []
990 b = []
1000 for p in self.parents():
991 for p in self.parents():
1001 b.extend(p.bookmarks())
992 b.extend(p.bookmarks())
1002 return b
993 return b
1003
994
1004 def phase(self):
995 def phase(self):
1005 phase = phases.draft # default phase to draft
996 phase = phases.draft # default phase to draft
1006 for p in self.parents():
997 for p in self.parents():
1007 phase = max(phase, p.phase())
998 phase = max(phase, p.phase())
1008 return phase
999 return phase
1009
1000
1010 def hidden(self):
1001 def hidden(self):
1011 return False
1002 return False
1012
1003
1013 def children(self):
1004 def children(self):
1014 return []
1005 return []
1015
1006
1016 def flags(self, path):
1007 def flags(self, path):
1017 if '_manifest' in self.__dict__:
1008 if '_manifest' in self.__dict__:
1018 try:
1009 try:
1019 return self._manifest.flags(path)
1010 return self._manifest.flags(path)
1020 except KeyError:
1011 except KeyError:
1021 return ''
1012 return ''
1022
1013
1023 try:
1014 try:
1024 return self._flagfunc(path)
1015 return self._flagfunc(path)
1025 except OSError:
1016 except OSError:
1026 return ''
1017 return ''
1027
1018
1028 def filectx(self, path, filelog=None):
1019 def filectx(self, path, filelog=None):
1029 """get a file context from the working directory"""
1020 """get a file context from the working directory"""
1030 return workingfilectx(self._repo, path, workingctx=self,
1021 return workingfilectx(self._repo, path, workingctx=self,
1031 filelog=filelog)
1022 filelog=filelog)
1032
1023
1033 def ancestor(self, c2):
1024 def ancestor(self, c2):
1034 """return the ancestor context of self and c2"""
1025 """return the ancestor context of self and c2"""
1035 return self._parents[0].ancestor(c2) # punt on two parents for now
1026 return self._parents[0].ancestor(c2) # punt on two parents for now
1036
1027
1037 def walk(self, match):
1028 def walk(self, match):
1038 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1029 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1039 True, False))
1030 True, False))
1040
1031
1041 def dirty(self, missing=False, merge=True, branch=True):
1032 def dirty(self, missing=False, merge=True, branch=True):
1042 "check whether a working directory is modified"
1033 "check whether a working directory is modified"
1043 # check subrepos first
1034 # check subrepos first
1044 for s in sorted(self.substate):
1035 for s in sorted(self.substate):
1045 if self.sub(s).dirty():
1036 if self.sub(s).dirty():
1046 return True
1037 return True
1047 # check current working dir
1038 # check current working dir
1048 return ((merge and self.p2()) or
1039 return ((merge and self.p2()) or
1049 (branch and self.branch() != self.p1().branch()) or
1040 (branch and self.branch() != self.p1().branch()) or
1050 self.modified() or self.added() or self.removed() or
1041 self.modified() or self.added() or self.removed() or
1051 (missing and self.deleted()))
1042 (missing and self.deleted()))
1052
1043
1053 def add(self, list, prefix=""):
1044 def add(self, list, prefix=""):
1054 join = lambda f: os.path.join(prefix, f)
1045 join = lambda f: os.path.join(prefix, f)
1055 wlock = self._repo.wlock()
1046 wlock = self._repo.wlock()
1056 ui, ds = self._repo.ui, self._repo.dirstate
1047 ui, ds = self._repo.ui, self._repo.dirstate
1057 try:
1048 try:
1058 rejected = []
1049 rejected = []
1059 for f in list:
1050 for f in list:
1060 scmutil.checkportable(ui, join(f))
1051 scmutil.checkportable(ui, join(f))
1061 p = self._repo.wjoin(f)
1052 p = self._repo.wjoin(f)
1062 try:
1053 try:
1063 st = os.lstat(p)
1054 st = os.lstat(p)
1064 except OSError:
1055 except OSError:
1065 ui.warn(_("%s does not exist!\n") % join(f))
1056 ui.warn(_("%s does not exist!\n") % join(f))
1066 rejected.append(f)
1057 rejected.append(f)
1067 continue
1058 continue
1068 if st.st_size > 10000000:
1059 if st.st_size > 10000000:
1069 ui.warn(_("%s: up to %d MB of RAM may be required "
1060 ui.warn(_("%s: up to %d MB of RAM may be required "
1070 "to manage this file\n"
1061 "to manage this file\n"
1071 "(use 'hg revert %s' to cancel the "
1062 "(use 'hg revert %s' to cancel the "
1072 "pending addition)\n")
1063 "pending addition)\n")
1073 % (f, 3 * st.st_size // 1000000, join(f)))
1064 % (f, 3 * st.st_size // 1000000, join(f)))
1074 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1065 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1075 ui.warn(_("%s not added: only files and symlinks "
1066 ui.warn(_("%s not added: only files and symlinks "
1076 "supported currently\n") % join(f))
1067 "supported currently\n") % join(f))
1077 rejected.append(p)
1068 rejected.append(p)
1078 elif ds[f] in 'amn':
1069 elif ds[f] in 'amn':
1079 ui.warn(_("%s already tracked!\n") % join(f))
1070 ui.warn(_("%s already tracked!\n") % join(f))
1080 elif ds[f] == 'r':
1071 elif ds[f] == 'r':
1081 ds.normallookup(f)
1072 ds.normallookup(f)
1082 else:
1073 else:
1083 ds.add(f)
1074 ds.add(f)
1084 return rejected
1075 return rejected
1085 finally:
1076 finally:
1086 wlock.release()
1077 wlock.release()
1087
1078
1088 def forget(self, files, prefix=""):
1079 def forget(self, files, prefix=""):
1089 join = lambda f: os.path.join(prefix, f)
1080 join = lambda f: os.path.join(prefix, f)
1090 wlock = self._repo.wlock()
1081 wlock = self._repo.wlock()
1091 try:
1082 try:
1092 rejected = []
1083 rejected = []
1093 for f in files:
1084 for f in files:
1094 if f not in self._repo.dirstate:
1085 if f not in self._repo.dirstate:
1095 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1086 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1096 rejected.append(f)
1087 rejected.append(f)
1097 elif self._repo.dirstate[f] != 'a':
1088 elif self._repo.dirstate[f] != 'a':
1098 self._repo.dirstate.remove(f)
1089 self._repo.dirstate.remove(f)
1099 else:
1090 else:
1100 self._repo.dirstate.drop(f)
1091 self._repo.dirstate.drop(f)
1101 return rejected
1092 return rejected
1102 finally:
1093 finally:
1103 wlock.release()
1094 wlock.release()
1104
1095
1105 def ancestors(self):
1096 def ancestors(self):
1106 for a in self._repo.changelog.ancestors(
1097 for a in self._repo.changelog.ancestors(
1107 [p.rev() for p in self._parents]):
1098 [p.rev() for p in self._parents]):
1108 yield changectx(self._repo, a)
1099 yield changectx(self._repo, a)
1109
1100
1110 def undelete(self, list):
1101 def undelete(self, list):
1111 pctxs = self.parents()
1102 pctxs = self.parents()
1112 wlock = self._repo.wlock()
1103 wlock = self._repo.wlock()
1113 try:
1104 try:
1114 for f in list:
1105 for f in list:
1115 if self._repo.dirstate[f] != 'r':
1106 if self._repo.dirstate[f] != 'r':
1116 self._repo.ui.warn(_("%s not removed!\n") % f)
1107 self._repo.ui.warn(_("%s not removed!\n") % f)
1117 else:
1108 else:
1118 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1109 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1119 t = fctx.data()
1110 t = fctx.data()
1120 self._repo.wwrite(f, t, fctx.flags())
1111 self._repo.wwrite(f, t, fctx.flags())
1121 self._repo.dirstate.normal(f)
1112 self._repo.dirstate.normal(f)
1122 finally:
1113 finally:
1123 wlock.release()
1114 wlock.release()
1124
1115
1125 def copy(self, source, dest):
1116 def copy(self, source, dest):
1126 p = self._repo.wjoin(dest)
1117 p = self._repo.wjoin(dest)
1127 if not os.path.lexists(p):
1118 if not os.path.lexists(p):
1128 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1119 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1129 elif not (os.path.isfile(p) or os.path.islink(p)):
1120 elif not (os.path.isfile(p) or os.path.islink(p)):
1130 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1121 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1131 "symbolic link\n") % dest)
1122 "symbolic link\n") % dest)
1132 else:
1123 else:
1133 wlock = self._repo.wlock()
1124 wlock = self._repo.wlock()
1134 try:
1125 try:
1135 if self._repo.dirstate[dest] in '?r':
1126 if self._repo.dirstate[dest] in '?r':
1136 self._repo.dirstate.add(dest)
1127 self._repo.dirstate.add(dest)
1137 self._repo.dirstate.copy(source, dest)
1128 self._repo.dirstate.copy(source, dest)
1138 finally:
1129 finally:
1139 wlock.release()
1130 wlock.release()
1140
1131
1141 def markcommitted(self, node):
1132 def markcommitted(self, node):
1142 """Perform post-commit cleanup necessary after commiting this workingctx
1133 """Perform post-commit cleanup necessary after commiting this workingctx
1143
1134
1144 Specifically, this updates backing stores this working context
1135 Specifically, this updates backing stores this working context
1145 wraps to reflect the fact that the changes reflected by this
1136 wraps to reflect the fact that the changes reflected by this
1146 workingctx have been committed. For example, it marks
1137 workingctx have been committed. For example, it marks
1147 modified and added files as normal in the dirstate.
1138 modified and added files as normal in the dirstate.
1148
1139
1149 """
1140 """
1150
1141
1151 for f in self.modified() + self.added():
1142 for f in self.modified() + self.added():
1152 self._repo.dirstate.normal(f)
1143 self._repo.dirstate.normal(f)
1153 for f in self.removed():
1144 for f in self.removed():
1154 self._repo.dirstate.drop(f)
1145 self._repo.dirstate.drop(f)
1155 self._repo.dirstate.setparents(node)
1146 self._repo.dirstate.setparents(node)
1156
1147
1157 def dirs(self):
1148 def dirs(self):
1158 return set(self._repo.dirstate.dirs())
1149 return self._repo.dirstate.dirs()
1159
1150
1160 class workingfilectx(filectx):
1151 class workingfilectx(filectx):
1161 """A workingfilectx object makes access to data related to a particular
1152 """A workingfilectx object makes access to data related to a particular
1162 file in the working directory convenient."""
1153 file in the working directory convenient."""
1163 def __init__(self, repo, path, filelog=None, workingctx=None):
1154 def __init__(self, repo, path, filelog=None, workingctx=None):
1164 """changeid can be a changeset revision, node, or tag.
1155 """changeid can be a changeset revision, node, or tag.
1165 fileid can be a file revision or node."""
1156 fileid can be a file revision or node."""
1166 self._repo = repo
1157 self._repo = repo
1167 self._path = path
1158 self._path = path
1168 self._changeid = None
1159 self._changeid = None
1169 self._filerev = self._filenode = None
1160 self._filerev = self._filenode = None
1170
1161
1171 if filelog:
1162 if filelog:
1172 self._filelog = filelog
1163 self._filelog = filelog
1173 if workingctx:
1164 if workingctx:
1174 self._changectx = workingctx
1165 self._changectx = workingctx
1175
1166
1176 @propertycache
1167 @propertycache
1177 def _changectx(self):
1168 def _changectx(self):
1178 return workingctx(self._repo)
1169 return workingctx(self._repo)
1179
1170
1180 def __nonzero__(self):
1171 def __nonzero__(self):
1181 return True
1172 return True
1182
1173
1183 def __str__(self):
1174 def __str__(self):
1184 return "%s@%s" % (self.path(), self._changectx)
1175 return "%s@%s" % (self.path(), self._changectx)
1185
1176
1186 def __repr__(self):
1177 def __repr__(self):
1187 return "<workingfilectx %s>" % str(self)
1178 return "<workingfilectx %s>" % str(self)
1188
1179
1189 def data(self):
1180 def data(self):
1190 return self._repo.wread(self._path)
1181 return self._repo.wread(self._path)
1191 def renamed(self):
1182 def renamed(self):
1192 rp = self._repo.dirstate.copied(self._path)
1183 rp = self._repo.dirstate.copied(self._path)
1193 if not rp:
1184 if not rp:
1194 return None
1185 return None
1195 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1186 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1196
1187
1197 def parents(self):
1188 def parents(self):
1198 '''return parent filectxs, following copies if necessary'''
1189 '''return parent filectxs, following copies if necessary'''
1199 def filenode(ctx, path):
1190 def filenode(ctx, path):
1200 return ctx._manifest.get(path, nullid)
1191 return ctx._manifest.get(path, nullid)
1201
1192
1202 path = self._path
1193 path = self._path
1203 fl = self._filelog
1194 fl = self._filelog
1204 pcl = self._changectx._parents
1195 pcl = self._changectx._parents
1205 renamed = self.renamed()
1196 renamed = self.renamed()
1206
1197
1207 if renamed:
1198 if renamed:
1208 pl = [renamed + (None,)]
1199 pl = [renamed + (None,)]
1209 else:
1200 else:
1210 pl = [(path, filenode(pcl[0], path), fl)]
1201 pl = [(path, filenode(pcl[0], path), fl)]
1211
1202
1212 for pc in pcl[1:]:
1203 for pc in pcl[1:]:
1213 pl.append((path, filenode(pc, path), fl))
1204 pl.append((path, filenode(pc, path), fl))
1214
1205
1215 return [filectx(self._repo, p, fileid=n, filelog=l)
1206 return [filectx(self._repo, p, fileid=n, filelog=l)
1216 for p, n, l in pl if n != nullid]
1207 for p, n, l in pl if n != nullid]
1217
1208
1218 def children(self):
1209 def children(self):
1219 return []
1210 return []
1220
1211
1221 def size(self):
1212 def size(self):
1222 return os.lstat(self._repo.wjoin(self._path)).st_size
1213 return os.lstat(self._repo.wjoin(self._path)).st_size
1223 def date(self):
1214 def date(self):
1224 t, tz = self._changectx.date()
1215 t, tz = self._changectx.date()
1225 try:
1216 try:
1226 return (int(os.lstat(self._repo.wjoin(self._path)).st_mtime), tz)
1217 return (int(os.lstat(self._repo.wjoin(self._path)).st_mtime), tz)
1227 except OSError, err:
1218 except OSError, err:
1228 if err.errno != errno.ENOENT:
1219 if err.errno != errno.ENOENT:
1229 raise
1220 raise
1230 return (t, tz)
1221 return (t, tz)
1231
1222
1232 def cmp(self, fctx):
1223 def cmp(self, fctx):
1233 """compare with other file context
1224 """compare with other file context
1234
1225
1235 returns True if different than fctx.
1226 returns True if different than fctx.
1236 """
1227 """
1237 # fctx should be a filectx (not a workingfilectx)
1228 # fctx should be a filectx (not a workingfilectx)
1238 # invert comparison to reuse the same code path
1229 # invert comparison to reuse the same code path
1239 return fctx.cmp(self)
1230 return fctx.cmp(self)
1240
1231
1241 class memctx(object):
1232 class memctx(object):
1242 """Use memctx to perform in-memory commits via localrepo.commitctx().
1233 """Use memctx to perform in-memory commits via localrepo.commitctx().
1243
1234
1244 Revision information is supplied at initialization time while
1235 Revision information is supplied at initialization time while
1245 related files data and is made available through a callback
1236 related files data and is made available through a callback
1246 mechanism. 'repo' is the current localrepo, 'parents' is a
1237 mechanism. 'repo' is the current localrepo, 'parents' is a
1247 sequence of two parent revisions identifiers (pass None for every
1238 sequence of two parent revisions identifiers (pass None for every
1248 missing parent), 'text' is the commit message and 'files' lists
1239 missing parent), 'text' is the commit message and 'files' lists
1249 names of files touched by the revision (normalized and relative to
1240 names of files touched by the revision (normalized and relative to
1250 repository root).
1241 repository root).
1251
1242
1252 filectxfn(repo, memctx, path) is a callable receiving the
1243 filectxfn(repo, memctx, path) is a callable receiving the
1253 repository, the current memctx object and the normalized path of
1244 repository, the current memctx object and the normalized path of
1254 requested file, relative to repository root. It is fired by the
1245 requested file, relative to repository root. It is fired by the
1255 commit function for every file in 'files', but calls order is
1246 commit function for every file in 'files', but calls order is
1256 undefined. If the file is available in the revision being
1247 undefined. If the file is available in the revision being
1257 committed (updated or added), filectxfn returns a memfilectx
1248 committed (updated or added), filectxfn returns a memfilectx
1258 object. If the file was removed, filectxfn raises an
1249 object. If the file was removed, filectxfn raises an
1259 IOError. Moved files are represented by marking the source file
1250 IOError. Moved files are represented by marking the source file
1260 removed and the new file added with copy information (see
1251 removed and the new file added with copy information (see
1261 memfilectx).
1252 memfilectx).
1262
1253
1263 user receives the committer name and defaults to current
1254 user receives the committer name and defaults to current
1264 repository username, date is the commit date in any format
1255 repository username, date is the commit date in any format
1265 supported by util.parsedate() and defaults to current date, extra
1256 supported by util.parsedate() and defaults to current date, extra
1266 is a dictionary of metadata or is left empty.
1257 is a dictionary of metadata or is left empty.
1267 """
1258 """
1268 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1259 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1269 date=None, extra=None):
1260 date=None, extra=None):
1270 self._repo = repo
1261 self._repo = repo
1271 self._rev = None
1262 self._rev = None
1272 self._node = None
1263 self._node = None
1273 self._text = text
1264 self._text = text
1274 self._date = date and util.parsedate(date) or util.makedate()
1265 self._date = date and util.parsedate(date) or util.makedate()
1275 self._user = user
1266 self._user = user
1276 parents = [(p or nullid) for p in parents]
1267 parents = [(p or nullid) for p in parents]
1277 p1, p2 = parents
1268 p1, p2 = parents
1278 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1269 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1279 files = sorted(set(files))
1270 files = sorted(set(files))
1280 self._status = [files, [], [], [], []]
1271 self._status = [files, [], [], [], []]
1281 self._filectxfn = filectxfn
1272 self._filectxfn = filectxfn
1282
1273
1283 self._extra = extra and extra.copy() or {}
1274 self._extra = extra and extra.copy() or {}
1284 if self._extra.get('branch', '') == '':
1275 if self._extra.get('branch', '') == '':
1285 self._extra['branch'] = 'default'
1276 self._extra['branch'] = 'default'
1286
1277
1287 def __str__(self):
1278 def __str__(self):
1288 return str(self._parents[0]) + "+"
1279 return str(self._parents[0]) + "+"
1289
1280
1290 def __int__(self):
1281 def __int__(self):
1291 return self._rev
1282 return self._rev
1292
1283
1293 def __nonzero__(self):
1284 def __nonzero__(self):
1294 return True
1285 return True
1295
1286
1296 def __getitem__(self, key):
1287 def __getitem__(self, key):
1297 return self.filectx(key)
1288 return self.filectx(key)
1298
1289
1299 def p1(self):
1290 def p1(self):
1300 return self._parents[0]
1291 return self._parents[0]
1301 def p2(self):
1292 def p2(self):
1302 return self._parents[1]
1293 return self._parents[1]
1303
1294
1304 def user(self):
1295 def user(self):
1305 return self._user or self._repo.ui.username()
1296 return self._user or self._repo.ui.username()
1306 def date(self):
1297 def date(self):
1307 return self._date
1298 return self._date
1308 def description(self):
1299 def description(self):
1309 return self._text
1300 return self._text
1310 def files(self):
1301 def files(self):
1311 return self.modified()
1302 return self.modified()
1312 def modified(self):
1303 def modified(self):
1313 return self._status[0]
1304 return self._status[0]
1314 def added(self):
1305 def added(self):
1315 return self._status[1]
1306 return self._status[1]
1316 def removed(self):
1307 def removed(self):
1317 return self._status[2]
1308 return self._status[2]
1318 def deleted(self):
1309 def deleted(self):
1319 return self._status[3]
1310 return self._status[3]
1320 def unknown(self):
1311 def unknown(self):
1321 return self._status[4]
1312 return self._status[4]
1322 def ignored(self):
1313 def ignored(self):
1323 return self._status[5]
1314 return self._status[5]
1324 def clean(self):
1315 def clean(self):
1325 return self._status[6]
1316 return self._status[6]
1326 def branch(self):
1317 def branch(self):
1327 return encoding.tolocal(self._extra['branch'])
1318 return encoding.tolocal(self._extra['branch'])
1328 def extra(self):
1319 def extra(self):
1329 return self._extra
1320 return self._extra
1330 def flags(self, f):
1321 def flags(self, f):
1331 return self[f].flags()
1322 return self[f].flags()
1332
1323
1333 def parents(self):
1324 def parents(self):
1334 """return contexts for each parent changeset"""
1325 """return contexts for each parent changeset"""
1335 return self._parents
1326 return self._parents
1336
1327
1337 def filectx(self, path, filelog=None):
1328 def filectx(self, path, filelog=None):
1338 """get a file context from the working directory"""
1329 """get a file context from the working directory"""
1339 return self._filectxfn(self._repo, self, path)
1330 return self._filectxfn(self._repo, self, path)
1340
1331
1341 def commit(self):
1332 def commit(self):
1342 """commit context to the repo"""
1333 """commit context to the repo"""
1343 return self._repo.commitctx(self)
1334 return self._repo.commitctx(self)
1344
1335
1345 class memfilectx(object):
1336 class memfilectx(object):
1346 """memfilectx represents an in-memory file to commit.
1337 """memfilectx represents an in-memory file to commit.
1347
1338
1348 See memctx for more details.
1339 See memctx for more details.
1349 """
1340 """
1350 def __init__(self, path, data, islink=False, isexec=False, copied=None):
1341 def __init__(self, path, data, islink=False, isexec=False, copied=None):
1351 """
1342 """
1352 path is the normalized file path relative to repository root.
1343 path is the normalized file path relative to repository root.
1353 data is the file content as a string.
1344 data is the file content as a string.
1354 islink is True if the file is a symbolic link.
1345 islink is True if the file is a symbolic link.
1355 isexec is True if the file is executable.
1346 isexec is True if the file is executable.
1356 copied is the source file path if current file was copied in the
1347 copied is the source file path if current file was copied in the
1357 revision being committed, or None."""
1348 revision being committed, or None."""
1358 self._path = path
1349 self._path = path
1359 self._data = data
1350 self._data = data
1360 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1351 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1361 self._copied = None
1352 self._copied = None
1362 if copied:
1353 if copied:
1363 self._copied = (copied, nullid)
1354 self._copied = (copied, nullid)
1364
1355
1365 def __nonzero__(self):
1356 def __nonzero__(self):
1366 return True
1357 return True
1367 def __str__(self):
1358 def __str__(self):
1368 return "%s@%s" % (self.path(), self._changectx)
1359 return "%s@%s" % (self.path(), self._changectx)
1369 def path(self):
1360 def path(self):
1370 return self._path
1361 return self._path
1371 def data(self):
1362 def data(self):
1372 return self._data
1363 return self._data
1373 def flags(self):
1364 def flags(self):
1374 return self._flags
1365 return self._flags
1375 def isexec(self):
1366 def isexec(self):
1376 return 'x' in self._flags
1367 return 'x' in self._flags
1377 def islink(self):
1368 def islink(self):
1378 return 'l' in self._flags
1369 return 'l' in self._flags
1379 def renamed(self):
1370 def renamed(self):
1380 return self._copied
1371 return self._copied
@@ -1,388 +1,388 b''
1 # copies.py - copy detection for Mercurial
1 # copies.py - copy detection for Mercurial
2 #
2 #
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import util
8 import util
9 import heapq
9 import heapq
10
10
11 def _nonoverlap(d1, d2, d3):
11 def _nonoverlap(d1, d2, d3):
12 "Return list of elements in d1 not in d2 or d3"
12 "Return list of elements in d1 not in d2 or d3"
13 return sorted([d for d in d1 if d not in d3 and d not in d2])
13 return sorted([d for d in d1 if d not in d3 and d not in d2])
14
14
15 def _dirname(f):
15 def _dirname(f):
16 s = f.rfind("/")
16 s = f.rfind("/")
17 if s == -1:
17 if s == -1:
18 return ""
18 return ""
19 return f[:s]
19 return f[:s]
20
20
21 def _findlimit(repo, a, b):
21 def _findlimit(repo, a, b):
22 """Find the earliest revision that's an ancestor of a or b but not both,
22 """Find the earliest revision that's an ancestor of a or b but not both,
23 None if no such revision exists.
23 None if no such revision exists.
24 """
24 """
25 # basic idea:
25 # basic idea:
26 # - mark a and b with different sides
26 # - mark a and b with different sides
27 # - if a parent's children are all on the same side, the parent is
27 # - if a parent's children are all on the same side, the parent is
28 # on that side, otherwise it is on no side
28 # on that side, otherwise it is on no side
29 # - walk the graph in topological order with the help of a heap;
29 # - walk the graph in topological order with the help of a heap;
30 # - add unseen parents to side map
30 # - add unseen parents to side map
31 # - clear side of any parent that has children on different sides
31 # - clear side of any parent that has children on different sides
32 # - track number of interesting revs that might still be on a side
32 # - track number of interesting revs that might still be on a side
33 # - track the lowest interesting rev seen
33 # - track the lowest interesting rev seen
34 # - quit when interesting revs is zero
34 # - quit when interesting revs is zero
35
35
36 cl = repo.changelog
36 cl = repo.changelog
37 working = len(cl) # pseudo rev for the working directory
37 working = len(cl) # pseudo rev for the working directory
38 if a is None:
38 if a is None:
39 a = working
39 a = working
40 if b is None:
40 if b is None:
41 b = working
41 b = working
42
42
43 side = {a: -1, b: 1}
43 side = {a: -1, b: 1}
44 visit = [-a, -b]
44 visit = [-a, -b]
45 heapq.heapify(visit)
45 heapq.heapify(visit)
46 interesting = len(visit)
46 interesting = len(visit)
47 hascommonancestor = False
47 hascommonancestor = False
48 limit = working
48 limit = working
49
49
50 while interesting:
50 while interesting:
51 r = -heapq.heappop(visit)
51 r = -heapq.heappop(visit)
52 if r == working:
52 if r == working:
53 parents = [cl.rev(p) for p in repo.dirstate.parents()]
53 parents = [cl.rev(p) for p in repo.dirstate.parents()]
54 else:
54 else:
55 parents = cl.parentrevs(r)
55 parents = cl.parentrevs(r)
56 for p in parents:
56 for p in parents:
57 if p < 0:
57 if p < 0:
58 continue
58 continue
59 if p not in side:
59 if p not in side:
60 # first time we see p; add it to visit
60 # first time we see p; add it to visit
61 side[p] = side[r]
61 side[p] = side[r]
62 if side[p]:
62 if side[p]:
63 interesting += 1
63 interesting += 1
64 heapq.heappush(visit, -p)
64 heapq.heappush(visit, -p)
65 elif side[p] and side[p] != side[r]:
65 elif side[p] and side[p] != side[r]:
66 # p was interesting but now we know better
66 # p was interesting but now we know better
67 side[p] = 0
67 side[p] = 0
68 interesting -= 1
68 interesting -= 1
69 hascommonancestor = True
69 hascommonancestor = True
70 if side[r]:
70 if side[r]:
71 limit = r # lowest rev visited
71 limit = r # lowest rev visited
72 interesting -= 1
72 interesting -= 1
73
73
74 if not hascommonancestor:
74 if not hascommonancestor:
75 return None
75 return None
76 return limit
76 return limit
77
77
78 def _chain(src, dst, a, b):
78 def _chain(src, dst, a, b):
79 '''chain two sets of copies a->b'''
79 '''chain two sets of copies a->b'''
80 t = a.copy()
80 t = a.copy()
81 for k, v in b.iteritems():
81 for k, v in b.iteritems():
82 if v in t:
82 if v in t:
83 # found a chain
83 # found a chain
84 if t[v] != k:
84 if t[v] != k:
85 # file wasn't renamed back to itself
85 # file wasn't renamed back to itself
86 t[k] = t[v]
86 t[k] = t[v]
87 if v not in dst:
87 if v not in dst:
88 # chain was a rename, not a copy
88 # chain was a rename, not a copy
89 del t[v]
89 del t[v]
90 if v in src:
90 if v in src:
91 # file is a copy of an existing file
91 # file is a copy of an existing file
92 t[k] = v
92 t[k] = v
93
93
94 # remove criss-crossed copies
94 # remove criss-crossed copies
95 for k, v in t.items():
95 for k, v in t.items():
96 if k in src and v in dst:
96 if k in src and v in dst:
97 del t[k]
97 del t[k]
98
98
99 return t
99 return t
100
100
101 def _tracefile(fctx, actx):
101 def _tracefile(fctx, actx):
102 '''return file context that is the ancestor of fctx present in actx'''
102 '''return file context that is the ancestor of fctx present in actx'''
103 stop = actx.rev()
103 stop = actx.rev()
104 am = actx.manifest()
104 am = actx.manifest()
105
105
106 for f in fctx.ancestors():
106 for f in fctx.ancestors():
107 if am.get(f.path(), None) == f.filenode():
107 if am.get(f.path(), None) == f.filenode():
108 return f
108 return f
109 if f.rev() < stop:
109 if f.rev() < stop:
110 return None
110 return None
111
111
112 def _dirstatecopies(d):
112 def _dirstatecopies(d):
113 ds = d._repo.dirstate
113 ds = d._repo.dirstate
114 c = ds.copies().copy()
114 c = ds.copies().copy()
115 for k in c.keys():
115 for k in c.keys():
116 if ds[k] not in 'anm':
116 if ds[k] not in 'anm':
117 del c[k]
117 del c[k]
118 return c
118 return c
119
119
120 def _forwardcopies(a, b):
120 def _forwardcopies(a, b):
121 '''find {dst@b: src@a} copy mapping where a is an ancestor of b'''
121 '''find {dst@b: src@a} copy mapping where a is an ancestor of b'''
122
122
123 # check for working copy
123 # check for working copy
124 w = None
124 w = None
125 if b.rev() is None:
125 if b.rev() is None:
126 w = b
126 w = b
127 b = w.p1()
127 b = w.p1()
128 if a == b:
128 if a == b:
129 # short-circuit to avoid issues with merge states
129 # short-circuit to avoid issues with merge states
130 return _dirstatecopies(w)
130 return _dirstatecopies(w)
131
131
132 # find where new files came from
132 # find where new files came from
133 # we currently don't try to find where old files went, too expensive
133 # we currently don't try to find where old files went, too expensive
134 # this means we can miss a case like 'hg rm b; hg cp a b'
134 # this means we can miss a case like 'hg rm b; hg cp a b'
135 cm = {}
135 cm = {}
136 missing = set(b.manifest().iterkeys())
136 missing = set(b.manifest().iterkeys())
137 missing.difference_update(a.manifest().iterkeys())
137 missing.difference_update(a.manifest().iterkeys())
138
138
139 for f in missing:
139 for f in missing:
140 ofctx = _tracefile(b[f], a)
140 ofctx = _tracefile(b[f], a)
141 if ofctx:
141 if ofctx:
142 cm[f] = ofctx.path()
142 cm[f] = ofctx.path()
143
143
144 # combine copies from dirstate if necessary
144 # combine copies from dirstate if necessary
145 if w is not None:
145 if w is not None:
146 cm = _chain(a, w, cm, _dirstatecopies(w))
146 cm = _chain(a, w, cm, _dirstatecopies(w))
147
147
148 return cm
148 return cm
149
149
150 def _backwardrenames(a, b):
150 def _backwardrenames(a, b):
151 # Even though we're not taking copies into account, 1:n rename situations
151 # Even though we're not taking copies into account, 1:n rename situations
152 # can still exist (e.g. hg cp a b; hg mv a c). In those cases we
152 # can still exist (e.g. hg cp a b; hg mv a c). In those cases we
153 # arbitrarily pick one of the renames.
153 # arbitrarily pick one of the renames.
154 f = _forwardcopies(b, a)
154 f = _forwardcopies(b, a)
155 r = {}
155 r = {}
156 for k, v in sorted(f.iteritems()):
156 for k, v in sorted(f.iteritems()):
157 # remove copies
157 # remove copies
158 if v in a:
158 if v in a:
159 continue
159 continue
160 r[v] = k
160 r[v] = k
161 return r
161 return r
162
162
163 def pathcopies(x, y):
163 def pathcopies(x, y):
164 '''find {dst@y: src@x} copy mapping for directed compare'''
164 '''find {dst@y: src@x} copy mapping for directed compare'''
165 if x == y or not x or not y:
165 if x == y or not x or not y:
166 return {}
166 return {}
167 a = y.ancestor(x)
167 a = y.ancestor(x)
168 if a == x:
168 if a == x:
169 return _forwardcopies(x, y)
169 return _forwardcopies(x, y)
170 if a == y:
170 if a == y:
171 return _backwardrenames(x, y)
171 return _backwardrenames(x, y)
172 return _chain(x, y, _backwardrenames(x, a), _forwardcopies(a, y))
172 return _chain(x, y, _backwardrenames(x, a), _forwardcopies(a, y))
173
173
174 def mergecopies(repo, c1, c2, ca):
174 def mergecopies(repo, c1, c2, ca):
175 """
175 """
176 Find moves and copies between context c1 and c2 that are relevant
176 Find moves and copies between context c1 and c2 that are relevant
177 for merging.
177 for merging.
178
178
179 Returns four dicts: "copy", "movewithdir", "diverge", and
179 Returns four dicts: "copy", "movewithdir", "diverge", and
180 "renamedelete".
180 "renamedelete".
181
181
182 "copy" is a mapping from destination name -> source name,
182 "copy" is a mapping from destination name -> source name,
183 where source is in c1 and destination is in c2 or vice-versa.
183 where source is in c1 and destination is in c2 or vice-versa.
184
184
185 "movewithdir" is a mapping from source name -> destination name,
185 "movewithdir" is a mapping from source name -> destination name,
186 where the file at source present in one context but not the other
186 where the file at source present in one context but not the other
187 needs to be moved to destination by the merge process, because the
187 needs to be moved to destination by the merge process, because the
188 other context moved the directory it is in.
188 other context moved the directory it is in.
189
189
190 "diverge" is a mapping of source name -> list of destination names
190 "diverge" is a mapping of source name -> list of destination names
191 for divergent renames.
191 for divergent renames.
192
192
193 "renamedelete" is a mapping of source name -> list of destination
193 "renamedelete" is a mapping of source name -> list of destination
194 names for files deleted in c1 that were renamed in c2 or vice-versa.
194 names for files deleted in c1 that were renamed in c2 or vice-versa.
195 """
195 """
196 # avoid silly behavior for update from empty dir
196 # avoid silly behavior for update from empty dir
197 if not c1 or not c2 or c1 == c2:
197 if not c1 or not c2 or c1 == c2:
198 return {}, {}, {}, {}
198 return {}, {}, {}, {}
199
199
200 # avoid silly behavior for parent -> working dir
200 # avoid silly behavior for parent -> working dir
201 if c2.node() is None and c1.node() == repo.dirstate.p1():
201 if c2.node() is None and c1.node() == repo.dirstate.p1():
202 return repo.dirstate.copies(), {}, {}, {}
202 return repo.dirstate.copies(), {}, {}, {}
203
203
204 limit = _findlimit(repo, c1.rev(), c2.rev())
204 limit = _findlimit(repo, c1.rev(), c2.rev())
205 if limit is None:
205 if limit is None:
206 # no common ancestor, no copies
206 # no common ancestor, no copies
207 return {}, {}, {}, {}
207 return {}, {}, {}, {}
208 m1 = c1.manifest()
208 m1 = c1.manifest()
209 m2 = c2.manifest()
209 m2 = c2.manifest()
210 ma = ca.manifest()
210 ma = ca.manifest()
211
211
212 def makectx(f, n):
212 def makectx(f, n):
213 if len(n) != 20: # in a working context?
213 if len(n) != 20: # in a working context?
214 if c1.rev() is None:
214 if c1.rev() is None:
215 return c1.filectx(f)
215 return c1.filectx(f)
216 return c2.filectx(f)
216 return c2.filectx(f)
217 return repo.filectx(f, fileid=n)
217 return repo.filectx(f, fileid=n)
218
218
219 ctx = util.lrucachefunc(makectx)
219 ctx = util.lrucachefunc(makectx)
220 copy = {}
220 copy = {}
221 movewithdir = {}
221 movewithdir = {}
222 fullcopy = {}
222 fullcopy = {}
223 diverge = {}
223 diverge = {}
224
224
225 def related(f1, f2, limit):
225 def related(f1, f2, limit):
226 # Walk back to common ancestor to see if the two files originate
226 # Walk back to common ancestor to see if the two files originate
227 # from the same file. Since workingfilectx's rev() is None it messes
227 # from the same file. Since workingfilectx's rev() is None it messes
228 # up the integer comparison logic, hence the pre-step check for
228 # up the integer comparison logic, hence the pre-step check for
229 # None (f1 and f2 can only be workingfilectx's initially).
229 # None (f1 and f2 can only be workingfilectx's initially).
230
230
231 if f1 == f2:
231 if f1 == f2:
232 return f1 # a match
232 return f1 # a match
233
233
234 g1, g2 = f1.ancestors(), f2.ancestors()
234 g1, g2 = f1.ancestors(), f2.ancestors()
235 try:
235 try:
236 f1r, f2r = f1.rev(), f2.rev()
236 f1r, f2r = f1.rev(), f2.rev()
237
237
238 if f1r is None:
238 if f1r is None:
239 f1 = g1.next()
239 f1 = g1.next()
240 if f2r is None:
240 if f2r is None:
241 f2 = g2.next()
241 f2 = g2.next()
242
242
243 while True:
243 while True:
244 f1r, f2r = f1.rev(), f2.rev()
244 f1r, f2r = f1.rev(), f2.rev()
245 if f1r > f2r:
245 if f1r > f2r:
246 f1 = g1.next()
246 f1 = g1.next()
247 elif f2r > f1r:
247 elif f2r > f1r:
248 f2 = g2.next()
248 f2 = g2.next()
249 elif f1 == f2:
249 elif f1 == f2:
250 return f1 # a match
250 return f1 # a match
251 elif f1r == f2r or f1r < limit or f2r < limit:
251 elif f1r == f2r or f1r < limit or f2r < limit:
252 return False # copy no longer relevant
252 return False # copy no longer relevant
253 except StopIteration:
253 except StopIteration:
254 return False
254 return False
255
255
256 def checkcopies(f, m1, m2):
256 def checkcopies(f, m1, m2):
257 '''check possible copies of f from m1 to m2'''
257 '''check possible copies of f from m1 to m2'''
258 of = None
258 of = None
259 seen = set([f])
259 seen = set([f])
260 for oc in ctx(f, m1[f]).ancestors():
260 for oc in ctx(f, m1[f]).ancestors():
261 ocr = oc.rev()
261 ocr = oc.rev()
262 of = oc.path()
262 of = oc.path()
263 if of in seen:
263 if of in seen:
264 # check limit late - grab last rename before
264 # check limit late - grab last rename before
265 if ocr < limit:
265 if ocr < limit:
266 break
266 break
267 continue
267 continue
268 seen.add(of)
268 seen.add(of)
269
269
270 fullcopy[f] = of # remember for dir rename detection
270 fullcopy[f] = of # remember for dir rename detection
271 if of not in m2:
271 if of not in m2:
272 continue # no match, keep looking
272 continue # no match, keep looking
273 if m2[of] == ma.get(of):
273 if m2[of] == ma.get(of):
274 break # no merge needed, quit early
274 break # no merge needed, quit early
275 c2 = ctx(of, m2[of])
275 c2 = ctx(of, m2[of])
276 cr = related(oc, c2, ca.rev())
276 cr = related(oc, c2, ca.rev())
277 if cr and (of == f or of == c2.path()): # non-divergent
277 if cr and (of == f or of == c2.path()): # non-divergent
278 copy[f] = of
278 copy[f] = of
279 of = None
279 of = None
280 break
280 break
281
281
282 if of in ma:
282 if of in ma:
283 diverge.setdefault(of, []).append(f)
283 diverge.setdefault(of, []).append(f)
284
284
285 repo.ui.debug(" searching for copies back to rev %d\n" % limit)
285 repo.ui.debug(" searching for copies back to rev %d\n" % limit)
286
286
287 u1 = _nonoverlap(m1, m2, ma)
287 u1 = _nonoverlap(m1, m2, ma)
288 u2 = _nonoverlap(m2, m1, ma)
288 u2 = _nonoverlap(m2, m1, ma)
289
289
290 if u1:
290 if u1:
291 repo.ui.debug(" unmatched files in local:\n %s\n"
291 repo.ui.debug(" unmatched files in local:\n %s\n"
292 % "\n ".join(u1))
292 % "\n ".join(u1))
293 if u2:
293 if u2:
294 repo.ui.debug(" unmatched files in other:\n %s\n"
294 repo.ui.debug(" unmatched files in other:\n %s\n"
295 % "\n ".join(u2))
295 % "\n ".join(u2))
296
296
297 for f in u1:
297 for f in u1:
298 checkcopies(f, m1, m2)
298 checkcopies(f, m1, m2)
299 for f in u2:
299 for f in u2:
300 checkcopies(f, m2, m1)
300 checkcopies(f, m2, m1)
301
301
302 renamedelete = {}
302 renamedelete = {}
303 renamedelete2 = set()
303 renamedelete2 = set()
304 diverge2 = set()
304 diverge2 = set()
305 for of, fl in diverge.items():
305 for of, fl in diverge.items():
306 if len(fl) == 1 or of in c1 or of in c2:
306 if len(fl) == 1 or of in c1 or of in c2:
307 del diverge[of] # not actually divergent, or not a rename
307 del diverge[of] # not actually divergent, or not a rename
308 if of not in c1 and of not in c2:
308 if of not in c1 and of not in c2:
309 # renamed on one side, deleted on the other side, but filter
309 # renamed on one side, deleted on the other side, but filter
310 # out files that have been renamed and then deleted
310 # out files that have been renamed and then deleted
311 renamedelete[of] = [f for f in fl if f in c1 or f in c2]
311 renamedelete[of] = [f for f in fl if f in c1 or f in c2]
312 renamedelete2.update(fl) # reverse map for below
312 renamedelete2.update(fl) # reverse map for below
313 else:
313 else:
314 diverge2.update(fl) # reverse map for below
314 diverge2.update(fl) # reverse map for below
315
315
316 if fullcopy:
316 if fullcopy:
317 repo.ui.debug(" all copies found (* = to merge, ! = divergent, "
317 repo.ui.debug(" all copies found (* = to merge, ! = divergent, "
318 "% = renamed and deleted):\n")
318 "% = renamed and deleted):\n")
319 for f in sorted(fullcopy):
319 for f in sorted(fullcopy):
320 note = ""
320 note = ""
321 if f in copy:
321 if f in copy:
322 note += "*"
322 note += "*"
323 if f in diverge2:
323 if f in diverge2:
324 note += "!"
324 note += "!"
325 if f in renamedelete2:
325 if f in renamedelete2:
326 note += "%"
326 note += "%"
327 repo.ui.debug(" src: '%s' -> dst: '%s' %s\n" % (fullcopy[f], f,
327 repo.ui.debug(" src: '%s' -> dst: '%s' %s\n" % (fullcopy[f], f,
328 note))
328 note))
329 del diverge2
329 del diverge2
330
330
331 if not fullcopy:
331 if not fullcopy:
332 return copy, movewithdir, diverge, renamedelete
332 return copy, movewithdir, diverge, renamedelete
333
333
334 repo.ui.debug(" checking for directory renames\n")
334 repo.ui.debug(" checking for directory renames\n")
335
335
336 # generate a directory move map
336 # generate a directory move map
337 d1, d2 = c1.dirs(), c2.dirs()
337 d1, d2 = c1.dirs(), c2.dirs()
338 d1.add('')
338 d1.addpath('/')
339 d2.add('')
339 d2.addpath('/')
340 invalid = set()
340 invalid = set()
341 dirmove = {}
341 dirmove = {}
342
342
343 # examine each file copy for a potential directory move, which is
343 # examine each file copy for a potential directory move, which is
344 # when all the files in a directory are moved to a new directory
344 # when all the files in a directory are moved to a new directory
345 for dst, src in fullcopy.iteritems():
345 for dst, src in fullcopy.iteritems():
346 dsrc, ddst = _dirname(src), _dirname(dst)
346 dsrc, ddst = _dirname(src), _dirname(dst)
347 if dsrc in invalid:
347 if dsrc in invalid:
348 # already seen to be uninteresting
348 # already seen to be uninteresting
349 continue
349 continue
350 elif dsrc in d1 and ddst in d1:
350 elif dsrc in d1 and ddst in d1:
351 # directory wasn't entirely moved locally
351 # directory wasn't entirely moved locally
352 invalid.add(dsrc)
352 invalid.add(dsrc)
353 elif dsrc in d2 and ddst in d2:
353 elif dsrc in d2 and ddst in d2:
354 # directory wasn't entirely moved remotely
354 # directory wasn't entirely moved remotely
355 invalid.add(dsrc)
355 invalid.add(dsrc)
356 elif dsrc in dirmove and dirmove[dsrc] != ddst:
356 elif dsrc in dirmove and dirmove[dsrc] != ddst:
357 # files from the same directory moved to two different places
357 # files from the same directory moved to two different places
358 invalid.add(dsrc)
358 invalid.add(dsrc)
359 else:
359 else:
360 # looks good so far
360 # looks good so far
361 dirmove[dsrc + "/"] = ddst + "/"
361 dirmove[dsrc + "/"] = ddst + "/"
362
362
363 for i in invalid:
363 for i in invalid:
364 if i in dirmove:
364 if i in dirmove:
365 del dirmove[i]
365 del dirmove[i]
366 del d1, d2, invalid
366 del d1, d2, invalid
367
367
368 if not dirmove:
368 if not dirmove:
369 return copy, movewithdir, diverge, renamedelete
369 return copy, movewithdir, diverge, renamedelete
370
370
371 for d in dirmove:
371 for d in dirmove:
372 repo.ui.debug(" discovered dir src: '%s' -> dst: '%s'\n" %
372 repo.ui.debug(" discovered dir src: '%s' -> dst: '%s'\n" %
373 (d, dirmove[d]))
373 (d, dirmove[d]))
374
374
375 # check unaccounted nonoverlapping files against directory moves
375 # check unaccounted nonoverlapping files against directory moves
376 for f in u1 + u2:
376 for f in u1 + u2:
377 if f not in fullcopy:
377 if f not in fullcopy:
378 for d in dirmove:
378 for d in dirmove:
379 if f.startswith(d):
379 if f.startswith(d):
380 # new file added in a directory that was moved, move it
380 # new file added in a directory that was moved, move it
381 df = dirmove[d] + f[len(d):]
381 df = dirmove[d] + f[len(d):]
382 if df not in copy:
382 if df not in copy:
383 movewithdir[f] = df
383 movewithdir[f] = df
384 repo.ui.debug((" pending file src: '%s' -> "
384 repo.ui.debug((" pending file src: '%s' -> "
385 "dst: '%s'\n") % (f, df))
385 "dst: '%s'\n") % (f, df))
386 break
386 break
387
387
388 return copy, movewithdir, diverge, renamedelete
388 return copy, movewithdir, diverge, renamedelete
@@ -1,832 +1,814 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 import errno
7 import errno
8
8
9 from node import nullid
9 from node import nullid
10 from i18n import _
10 from i18n import _
11 import scmutil, util, ignore, osutil, parsers, encoding
11 import scmutil, util, ignore, osutil, parsers, encoding
12 import os, stat, errno, gc
12 import os, stat, errno, gc
13
13
14 propertycache = util.propertycache
14 propertycache = util.propertycache
15 filecache = scmutil.filecache
15 filecache = scmutil.filecache
16 _rangemask = 0x7fffffff
16 _rangemask = 0x7fffffff
17
17
18 class repocache(filecache):
18 class repocache(filecache):
19 """filecache for files in .hg/"""
19 """filecache for files in .hg/"""
20 def join(self, obj, fname):
20 def join(self, obj, fname):
21 return obj._opener.join(fname)
21 return obj._opener.join(fname)
22
22
23 class rootcache(filecache):
23 class rootcache(filecache):
24 """filecache for files in the repository root"""
24 """filecache for files in the repository root"""
25 def join(self, obj, fname):
25 def join(self, obj, fname):
26 return obj._join(fname)
26 return obj._join(fname)
27
27
28 def _incdirs(dirs, path):
29 for base in scmutil.finddirs(path):
30 if base in dirs:
31 dirs[base] += 1
32 return
33 dirs[base] = 1
34
35 def _decdirs(dirs, path):
36 for base in scmutil.finddirs(path):
37 if dirs[base] > 1:
38 dirs[base] -= 1
39 return
40 del dirs[base]
41
42 class dirstate(object):
28 class dirstate(object):
43
29
44 def __init__(self, opener, ui, root, validate):
30 def __init__(self, opener, ui, root, validate):
45 '''Create a new dirstate object.
31 '''Create a new dirstate object.
46
32
47 opener is an open()-like callable that can be used to open the
33 opener is an open()-like callable that can be used to open the
48 dirstate file; root is the root of the directory tracked by
34 dirstate file; root is the root of the directory tracked by
49 the dirstate.
35 the dirstate.
50 '''
36 '''
51 self._opener = opener
37 self._opener = opener
52 self._validate = validate
38 self._validate = validate
53 self._root = root
39 self._root = root
54 self._rootdir = os.path.join(root, '')
40 self._rootdir = os.path.join(root, '')
55 self._dirty = False
41 self._dirty = False
56 self._dirtypl = False
42 self._dirtypl = False
57 self._lastnormaltime = 0
43 self._lastnormaltime = 0
58 self._ui = ui
44 self._ui = ui
59 self._filecache = {}
45 self._filecache = {}
60
46
61 @propertycache
47 @propertycache
62 def _map(self):
48 def _map(self):
63 '''Return the dirstate contents as a map from filename to
49 '''Return the dirstate contents as a map from filename to
64 (state, mode, size, time).'''
50 (state, mode, size, time).'''
65 self._read()
51 self._read()
66 return self._map
52 return self._map
67
53
68 @propertycache
54 @propertycache
69 def _copymap(self):
55 def _copymap(self):
70 self._read()
56 self._read()
71 return self._copymap
57 return self._copymap
72
58
73 @propertycache
59 @propertycache
74 def _foldmap(self):
60 def _foldmap(self):
75 f = {}
61 f = {}
76 for name in self._map:
62 for name in self._map:
77 f[util.normcase(name)] = name
63 f[util.normcase(name)] = name
78 for name in self._dirs:
64 for name in self._dirs:
79 f[util.normcase(name)] = name
65 f[util.normcase(name)] = name
80 f['.'] = '.' # prevents useless util.fspath() invocation
66 f['.'] = '.' # prevents useless util.fspath() invocation
81 return f
67 return f
82
68
83 @repocache('branch')
69 @repocache('branch')
84 def _branch(self):
70 def _branch(self):
85 try:
71 try:
86 return self._opener.read("branch").strip() or "default"
72 return self._opener.read("branch").strip() or "default"
87 except IOError, inst:
73 except IOError, inst:
88 if inst.errno != errno.ENOENT:
74 if inst.errno != errno.ENOENT:
89 raise
75 raise
90 return "default"
76 return "default"
91
77
92 @propertycache
78 @propertycache
93 def _pl(self):
79 def _pl(self):
94 try:
80 try:
95 fp = self._opener("dirstate")
81 fp = self._opener("dirstate")
96 st = fp.read(40)
82 st = fp.read(40)
97 fp.close()
83 fp.close()
98 l = len(st)
84 l = len(st)
99 if l == 40:
85 if l == 40:
100 return st[:20], st[20:40]
86 return st[:20], st[20:40]
101 elif l > 0 and l < 40:
87 elif l > 0 and l < 40:
102 raise util.Abort(_('working directory state appears damaged!'))
88 raise util.Abort(_('working directory state appears damaged!'))
103 except IOError, err:
89 except IOError, err:
104 if err.errno != errno.ENOENT:
90 if err.errno != errno.ENOENT:
105 raise
91 raise
106 return [nullid, nullid]
92 return [nullid, nullid]
107
93
108 @propertycache
94 @propertycache
109 def _dirs(self):
95 def _dirs(self):
110 dirs = {}
96 return scmutil.dirs(self._map, 'r')
111 for f, s in self._map.iteritems():
112 if s[0] != 'r':
113 _incdirs(dirs, f)
114 return dirs
115
97
116 def dirs(self):
98 def dirs(self):
117 return self._dirs
99 return self._dirs
118
100
119 @rootcache('.hgignore')
101 @rootcache('.hgignore')
120 def _ignore(self):
102 def _ignore(self):
121 files = [self._join('.hgignore')]
103 files = [self._join('.hgignore')]
122 for name, path in self._ui.configitems("ui"):
104 for name, path in self._ui.configitems("ui"):
123 if name == 'ignore' or name.startswith('ignore.'):
105 if name == 'ignore' or name.startswith('ignore.'):
124 files.append(util.expandpath(path))
106 files.append(util.expandpath(path))
125 return ignore.ignore(self._root, files, self._ui.warn)
107 return ignore.ignore(self._root, files, self._ui.warn)
126
108
127 @propertycache
109 @propertycache
128 def _slash(self):
110 def _slash(self):
129 return self._ui.configbool('ui', 'slash') and os.sep != '/'
111 return self._ui.configbool('ui', 'slash') and os.sep != '/'
130
112
131 @propertycache
113 @propertycache
132 def _checklink(self):
114 def _checklink(self):
133 return util.checklink(self._root)
115 return util.checklink(self._root)
134
116
135 @propertycache
117 @propertycache
136 def _checkexec(self):
118 def _checkexec(self):
137 return util.checkexec(self._root)
119 return util.checkexec(self._root)
138
120
139 @propertycache
121 @propertycache
140 def _checkcase(self):
122 def _checkcase(self):
141 return not util.checkcase(self._join('.hg'))
123 return not util.checkcase(self._join('.hg'))
142
124
143 def _join(self, f):
125 def _join(self, f):
144 # much faster than os.path.join()
126 # much faster than os.path.join()
145 # it's safe because f is always a relative path
127 # it's safe because f is always a relative path
146 return self._rootdir + f
128 return self._rootdir + f
147
129
148 def flagfunc(self, buildfallback):
130 def flagfunc(self, buildfallback):
149 if self._checklink and self._checkexec:
131 if self._checklink and self._checkexec:
150 def f(x):
132 def f(x):
151 try:
133 try:
152 st = os.lstat(self._join(x))
134 st = os.lstat(self._join(x))
153 if util.statislink(st):
135 if util.statislink(st):
154 return 'l'
136 return 'l'
155 if util.statisexec(st):
137 if util.statisexec(st):
156 return 'x'
138 return 'x'
157 except OSError:
139 except OSError:
158 pass
140 pass
159 return ''
141 return ''
160 return f
142 return f
161
143
162 fallback = buildfallback()
144 fallback = buildfallback()
163 if self._checklink:
145 if self._checklink:
164 def f(x):
146 def f(x):
165 if os.path.islink(self._join(x)):
147 if os.path.islink(self._join(x)):
166 return 'l'
148 return 'l'
167 if 'x' in fallback(x):
149 if 'x' in fallback(x):
168 return 'x'
150 return 'x'
169 return ''
151 return ''
170 return f
152 return f
171 if self._checkexec:
153 if self._checkexec:
172 def f(x):
154 def f(x):
173 if 'l' in fallback(x):
155 if 'l' in fallback(x):
174 return 'l'
156 return 'l'
175 if util.isexec(self._join(x)):
157 if util.isexec(self._join(x)):
176 return 'x'
158 return 'x'
177 return ''
159 return ''
178 return f
160 return f
179 else:
161 else:
180 return fallback
162 return fallback
181
163
182 def getcwd(self):
164 def getcwd(self):
183 cwd = os.getcwd()
165 cwd = os.getcwd()
184 if cwd == self._root:
166 if cwd == self._root:
185 return ''
167 return ''
186 # self._root ends with a path separator if self._root is '/' or 'C:\'
168 # self._root ends with a path separator if self._root is '/' or 'C:\'
187 rootsep = self._root
169 rootsep = self._root
188 if not util.endswithsep(rootsep):
170 if not util.endswithsep(rootsep):
189 rootsep += os.sep
171 rootsep += os.sep
190 if cwd.startswith(rootsep):
172 if cwd.startswith(rootsep):
191 return cwd[len(rootsep):]
173 return cwd[len(rootsep):]
192 else:
174 else:
193 # we're outside the repo. return an absolute path.
175 # we're outside the repo. return an absolute path.
194 return cwd
176 return cwd
195
177
196 def pathto(self, f, cwd=None):
178 def pathto(self, f, cwd=None):
197 if cwd is None:
179 if cwd is None:
198 cwd = self.getcwd()
180 cwd = self.getcwd()
199 path = util.pathto(self._root, cwd, f)
181 path = util.pathto(self._root, cwd, f)
200 if self._slash:
182 if self._slash:
201 return util.normpath(path)
183 return util.normpath(path)
202 return path
184 return path
203
185
204 def __getitem__(self, key):
186 def __getitem__(self, key):
205 '''Return the current state of key (a filename) in the dirstate.
187 '''Return the current state of key (a filename) in the dirstate.
206
188
207 States are:
189 States are:
208 n normal
190 n normal
209 m needs merging
191 m needs merging
210 r marked for removal
192 r marked for removal
211 a marked for addition
193 a marked for addition
212 ? not tracked
194 ? not tracked
213 '''
195 '''
214 return self._map.get(key, ("?",))[0]
196 return self._map.get(key, ("?",))[0]
215
197
216 def __contains__(self, key):
198 def __contains__(self, key):
217 return key in self._map
199 return key in self._map
218
200
219 def __iter__(self):
201 def __iter__(self):
220 for x in sorted(self._map):
202 for x in sorted(self._map):
221 yield x
203 yield x
222
204
223 def iteritems(self):
205 def iteritems(self):
224 return self._map.iteritems()
206 return self._map.iteritems()
225
207
226 def parents(self):
208 def parents(self):
227 return [self._validate(p) for p in self._pl]
209 return [self._validate(p) for p in self._pl]
228
210
229 def p1(self):
211 def p1(self):
230 return self._validate(self._pl[0])
212 return self._validate(self._pl[0])
231
213
232 def p2(self):
214 def p2(self):
233 return self._validate(self._pl[1])
215 return self._validate(self._pl[1])
234
216
235 def branch(self):
217 def branch(self):
236 return encoding.tolocal(self._branch)
218 return encoding.tolocal(self._branch)
237
219
238 def setparents(self, p1, p2=nullid):
220 def setparents(self, p1, p2=nullid):
239 """Set dirstate parents to p1 and p2.
221 """Set dirstate parents to p1 and p2.
240
222
241 When moving from two parents to one, 'm' merged entries a
223 When moving from two parents to one, 'm' merged entries a
242 adjusted to normal and previous copy records discarded and
224 adjusted to normal and previous copy records discarded and
243 returned by the call.
225 returned by the call.
244
226
245 See localrepo.setparents()
227 See localrepo.setparents()
246 """
228 """
247 self._dirty = self._dirtypl = True
229 self._dirty = self._dirtypl = True
248 oldp2 = self._pl[1]
230 oldp2 = self._pl[1]
249 self._pl = p1, p2
231 self._pl = p1, p2
250 copies = {}
232 copies = {}
251 if oldp2 != nullid and p2 == nullid:
233 if oldp2 != nullid and p2 == nullid:
252 # Discard 'm' markers when moving away from a merge state
234 # Discard 'm' markers when moving away from a merge state
253 for f, s in self._map.iteritems():
235 for f, s in self._map.iteritems():
254 if s[0] == 'm':
236 if s[0] == 'm':
255 if f in self._copymap:
237 if f in self._copymap:
256 copies[f] = self._copymap[f]
238 copies[f] = self._copymap[f]
257 self.normallookup(f)
239 self.normallookup(f)
258 return copies
240 return copies
259
241
260 def setbranch(self, branch):
242 def setbranch(self, branch):
261 self._branch = encoding.fromlocal(branch)
243 self._branch = encoding.fromlocal(branch)
262 f = self._opener('branch', 'w', atomictemp=True)
244 f = self._opener('branch', 'w', atomictemp=True)
263 try:
245 try:
264 f.write(self._branch + '\n')
246 f.write(self._branch + '\n')
265 f.close()
247 f.close()
266
248
267 # make sure filecache has the correct stat info for _branch after
249 # make sure filecache has the correct stat info for _branch after
268 # replacing the underlying file
250 # replacing the underlying file
269 ce = self._filecache['_branch']
251 ce = self._filecache['_branch']
270 if ce:
252 if ce:
271 ce.refresh()
253 ce.refresh()
272 except: # re-raises
254 except: # re-raises
273 f.discard()
255 f.discard()
274 raise
256 raise
275
257
276 def _read(self):
258 def _read(self):
277 self._map = {}
259 self._map = {}
278 self._copymap = {}
260 self._copymap = {}
279 try:
261 try:
280 st = self._opener.read("dirstate")
262 st = self._opener.read("dirstate")
281 except IOError, err:
263 except IOError, err:
282 if err.errno != errno.ENOENT:
264 if err.errno != errno.ENOENT:
283 raise
265 raise
284 return
266 return
285 if not st:
267 if not st:
286 return
268 return
287
269
288 # Python's garbage collector triggers a GC each time a certain number
270 # Python's garbage collector triggers a GC each time a certain number
289 # of container objects (the number being defined by
271 # of container objects (the number being defined by
290 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
272 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
291 # for each file in the dirstate. The C version then immediately marks
273 # for each file in the dirstate. The C version then immediately marks
292 # them as not to be tracked by the collector. However, this has no
274 # them as not to be tracked by the collector. However, this has no
293 # effect on when GCs are triggered, only on what objects the GC looks
275 # effect on when GCs are triggered, only on what objects the GC looks
294 # into. This means that O(number of files) GCs are unavoidable.
276 # into. This means that O(number of files) GCs are unavoidable.
295 # Depending on when in the process's lifetime the dirstate is parsed,
277 # Depending on when in the process's lifetime the dirstate is parsed,
296 # this can get very expensive. As a workaround, disable GC while
278 # this can get very expensive. As a workaround, disable GC while
297 # parsing the dirstate.
279 # parsing the dirstate.
298 gcenabled = gc.isenabled()
280 gcenabled = gc.isenabled()
299 gc.disable()
281 gc.disable()
300 try:
282 try:
301 p = parsers.parse_dirstate(self._map, self._copymap, st)
283 p = parsers.parse_dirstate(self._map, self._copymap, st)
302 finally:
284 finally:
303 if gcenabled:
285 if gcenabled:
304 gc.enable()
286 gc.enable()
305 if not self._dirtypl:
287 if not self._dirtypl:
306 self._pl = p
288 self._pl = p
307
289
308 def invalidate(self):
290 def invalidate(self):
309 for a in ("_map", "_copymap", "_foldmap", "_branch", "_pl", "_dirs",
291 for a in ("_map", "_copymap", "_foldmap", "_branch", "_pl", "_dirs",
310 "_ignore"):
292 "_ignore"):
311 if a in self.__dict__:
293 if a in self.__dict__:
312 delattr(self, a)
294 delattr(self, a)
313 self._lastnormaltime = 0
295 self._lastnormaltime = 0
314 self._dirty = False
296 self._dirty = False
315
297
316 def copy(self, source, dest):
298 def copy(self, source, dest):
317 """Mark dest as a copy of source. Unmark dest if source is None."""
299 """Mark dest as a copy of source. Unmark dest if source is None."""
318 if source == dest:
300 if source == dest:
319 return
301 return
320 self._dirty = True
302 self._dirty = True
321 if source is not None:
303 if source is not None:
322 self._copymap[dest] = source
304 self._copymap[dest] = source
323 elif dest in self._copymap:
305 elif dest in self._copymap:
324 del self._copymap[dest]
306 del self._copymap[dest]
325
307
326 def copied(self, file):
308 def copied(self, file):
327 return self._copymap.get(file, None)
309 return self._copymap.get(file, None)
328
310
329 def copies(self):
311 def copies(self):
330 return self._copymap
312 return self._copymap
331
313
332 def _droppath(self, f):
314 def _droppath(self, f):
333 if self[f] not in "?r" and "_dirs" in self.__dict__:
315 if self[f] not in "?r" and "_dirs" in self.__dict__:
334 _decdirs(self._dirs, f)
316 self._dirs.delpath(f)
335
317
336 def _addpath(self, f, state, mode, size, mtime):
318 def _addpath(self, f, state, mode, size, mtime):
337 oldstate = self[f]
319 oldstate = self[f]
338 if state == 'a' or oldstate == 'r':
320 if state == 'a' or oldstate == 'r':
339 scmutil.checkfilename(f)
321 scmutil.checkfilename(f)
340 if f in self._dirs:
322 if f in self._dirs:
341 raise util.Abort(_('directory %r already in dirstate') % f)
323 raise util.Abort(_('directory %r already in dirstate') % f)
342 # shadows
324 # shadows
343 for d in scmutil.finddirs(f):
325 for d in scmutil.finddirs(f):
344 if d in self._dirs:
326 if d in self._dirs:
345 break
327 break
346 if d in self._map and self[d] != 'r':
328 if d in self._map and self[d] != 'r':
347 raise util.Abort(
329 raise util.Abort(
348 _('file %r in dirstate clashes with %r') % (d, f))
330 _('file %r in dirstate clashes with %r') % (d, f))
349 if oldstate in "?r" and "_dirs" in self.__dict__:
331 if oldstate in "?r" and "_dirs" in self.__dict__:
350 _incdirs(self._dirs, f)
332 self._dirs.addpath(f)
351 self._dirty = True
333 self._dirty = True
352 self._map[f] = (state, mode, size, mtime)
334 self._map[f] = (state, mode, size, mtime)
353
335
354 def normal(self, f):
336 def normal(self, f):
355 '''Mark a file normal and clean.'''
337 '''Mark a file normal and clean.'''
356 s = os.lstat(self._join(f))
338 s = os.lstat(self._join(f))
357 mtime = int(s.st_mtime)
339 mtime = int(s.st_mtime)
358 self._addpath(f, 'n', s.st_mode,
340 self._addpath(f, 'n', s.st_mode,
359 s.st_size & _rangemask, mtime & _rangemask)
341 s.st_size & _rangemask, mtime & _rangemask)
360 if f in self._copymap:
342 if f in self._copymap:
361 del self._copymap[f]
343 del self._copymap[f]
362 if mtime > self._lastnormaltime:
344 if mtime > self._lastnormaltime:
363 # Remember the most recent modification timeslot for status(),
345 # Remember the most recent modification timeslot for status(),
364 # to make sure we won't miss future size-preserving file content
346 # to make sure we won't miss future size-preserving file content
365 # modifications that happen within the same timeslot.
347 # modifications that happen within the same timeslot.
366 self._lastnormaltime = mtime
348 self._lastnormaltime = mtime
367
349
368 def normallookup(self, f):
350 def normallookup(self, f):
369 '''Mark a file normal, but possibly dirty.'''
351 '''Mark a file normal, but possibly dirty.'''
370 if self._pl[1] != nullid and f in self._map:
352 if self._pl[1] != nullid and f in self._map:
371 # if there is a merge going on and the file was either
353 # if there is a merge going on and the file was either
372 # in state 'm' (-1) or coming from other parent (-2) before
354 # in state 'm' (-1) or coming from other parent (-2) before
373 # being removed, restore that state.
355 # being removed, restore that state.
374 entry = self._map[f]
356 entry = self._map[f]
375 if entry[0] == 'r' and entry[2] in (-1, -2):
357 if entry[0] == 'r' and entry[2] in (-1, -2):
376 source = self._copymap.get(f)
358 source = self._copymap.get(f)
377 if entry[2] == -1:
359 if entry[2] == -1:
378 self.merge(f)
360 self.merge(f)
379 elif entry[2] == -2:
361 elif entry[2] == -2:
380 self.otherparent(f)
362 self.otherparent(f)
381 if source:
363 if source:
382 self.copy(source, f)
364 self.copy(source, f)
383 return
365 return
384 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
366 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
385 return
367 return
386 self._addpath(f, 'n', 0, -1, -1)
368 self._addpath(f, 'n', 0, -1, -1)
387 if f in self._copymap:
369 if f in self._copymap:
388 del self._copymap[f]
370 del self._copymap[f]
389
371
390 def otherparent(self, f):
372 def otherparent(self, f):
391 '''Mark as coming from the other parent, always dirty.'''
373 '''Mark as coming from the other parent, always dirty.'''
392 if self._pl[1] == nullid:
374 if self._pl[1] == nullid:
393 raise util.Abort(_("setting %r to other parent "
375 raise util.Abort(_("setting %r to other parent "
394 "only allowed in merges") % f)
376 "only allowed in merges") % f)
395 self._addpath(f, 'n', 0, -2, -1)
377 self._addpath(f, 'n', 0, -2, -1)
396 if f in self._copymap:
378 if f in self._copymap:
397 del self._copymap[f]
379 del self._copymap[f]
398
380
399 def add(self, f):
381 def add(self, f):
400 '''Mark a file added.'''
382 '''Mark a file added.'''
401 self._addpath(f, 'a', 0, -1, -1)
383 self._addpath(f, 'a', 0, -1, -1)
402 if f in self._copymap:
384 if f in self._copymap:
403 del self._copymap[f]
385 del self._copymap[f]
404
386
405 def remove(self, f):
387 def remove(self, f):
406 '''Mark a file removed.'''
388 '''Mark a file removed.'''
407 self._dirty = True
389 self._dirty = True
408 self._droppath(f)
390 self._droppath(f)
409 size = 0
391 size = 0
410 if self._pl[1] != nullid and f in self._map:
392 if self._pl[1] != nullid and f in self._map:
411 # backup the previous state
393 # backup the previous state
412 entry = self._map[f]
394 entry = self._map[f]
413 if entry[0] == 'm': # merge
395 if entry[0] == 'm': # merge
414 size = -1
396 size = -1
415 elif entry[0] == 'n' and entry[2] == -2: # other parent
397 elif entry[0] == 'n' and entry[2] == -2: # other parent
416 size = -2
398 size = -2
417 self._map[f] = ('r', 0, size, 0)
399 self._map[f] = ('r', 0, size, 0)
418 if size == 0 and f in self._copymap:
400 if size == 0 and f in self._copymap:
419 del self._copymap[f]
401 del self._copymap[f]
420
402
421 def merge(self, f):
403 def merge(self, f):
422 '''Mark a file merged.'''
404 '''Mark a file merged.'''
423 if self._pl[1] == nullid:
405 if self._pl[1] == nullid:
424 return self.normallookup(f)
406 return self.normallookup(f)
425 s = os.lstat(self._join(f))
407 s = os.lstat(self._join(f))
426 self._addpath(f, 'm', s.st_mode,
408 self._addpath(f, 'm', s.st_mode,
427 s.st_size & _rangemask, int(s.st_mtime) & _rangemask)
409 s.st_size & _rangemask, int(s.st_mtime) & _rangemask)
428 if f in self._copymap:
410 if f in self._copymap:
429 del self._copymap[f]
411 del self._copymap[f]
430
412
431 def drop(self, f):
413 def drop(self, f):
432 '''Drop a file from the dirstate'''
414 '''Drop a file from the dirstate'''
433 if f in self._map:
415 if f in self._map:
434 self._dirty = True
416 self._dirty = True
435 self._droppath(f)
417 self._droppath(f)
436 del self._map[f]
418 del self._map[f]
437
419
438 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
420 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
439 normed = util.normcase(path)
421 normed = util.normcase(path)
440 folded = self._foldmap.get(normed, None)
422 folded = self._foldmap.get(normed, None)
441 if folded is None:
423 if folded is None:
442 if isknown:
424 if isknown:
443 folded = path
425 folded = path
444 else:
426 else:
445 if exists is None:
427 if exists is None:
446 exists = os.path.lexists(os.path.join(self._root, path))
428 exists = os.path.lexists(os.path.join(self._root, path))
447 if not exists:
429 if not exists:
448 # Maybe a path component exists
430 # Maybe a path component exists
449 if not ignoremissing and '/' in path:
431 if not ignoremissing and '/' in path:
450 d, f = path.rsplit('/', 1)
432 d, f = path.rsplit('/', 1)
451 d = self._normalize(d, isknown, ignoremissing, None)
433 d = self._normalize(d, isknown, ignoremissing, None)
452 folded = d + "/" + f
434 folded = d + "/" + f
453 else:
435 else:
454 # No path components, preserve original case
436 # No path components, preserve original case
455 folded = path
437 folded = path
456 else:
438 else:
457 # recursively normalize leading directory components
439 # recursively normalize leading directory components
458 # against dirstate
440 # against dirstate
459 if '/' in normed:
441 if '/' in normed:
460 d, f = normed.rsplit('/', 1)
442 d, f = normed.rsplit('/', 1)
461 d = self._normalize(d, isknown, ignoremissing, True)
443 d = self._normalize(d, isknown, ignoremissing, True)
462 r = self._root + "/" + d
444 r = self._root + "/" + d
463 folded = d + "/" + util.fspath(f, r)
445 folded = d + "/" + util.fspath(f, r)
464 else:
446 else:
465 folded = util.fspath(normed, self._root)
447 folded = util.fspath(normed, self._root)
466 self._foldmap[normed] = folded
448 self._foldmap[normed] = folded
467
449
468 return folded
450 return folded
469
451
470 def normalize(self, path, isknown=False, ignoremissing=False):
452 def normalize(self, path, isknown=False, ignoremissing=False):
471 '''
453 '''
472 normalize the case of a pathname when on a casefolding filesystem
454 normalize the case of a pathname when on a casefolding filesystem
473
455
474 isknown specifies whether the filename came from walking the
456 isknown specifies whether the filename came from walking the
475 disk, to avoid extra filesystem access.
457 disk, to avoid extra filesystem access.
476
458
477 If ignoremissing is True, missing path are returned
459 If ignoremissing is True, missing path are returned
478 unchanged. Otherwise, we try harder to normalize possibly
460 unchanged. Otherwise, we try harder to normalize possibly
479 existing path components.
461 existing path components.
480
462
481 The normalized case is determined based on the following precedence:
463 The normalized case is determined based on the following precedence:
482
464
483 - version of name already stored in the dirstate
465 - version of name already stored in the dirstate
484 - version of name stored on disk
466 - version of name stored on disk
485 - version provided via command arguments
467 - version provided via command arguments
486 '''
468 '''
487
469
488 if self._checkcase:
470 if self._checkcase:
489 return self._normalize(path, isknown, ignoremissing)
471 return self._normalize(path, isknown, ignoremissing)
490 return path
472 return path
491
473
492 def clear(self):
474 def clear(self):
493 self._map = {}
475 self._map = {}
494 if "_dirs" in self.__dict__:
476 if "_dirs" in self.__dict__:
495 delattr(self, "_dirs")
477 delattr(self, "_dirs")
496 self._copymap = {}
478 self._copymap = {}
497 self._pl = [nullid, nullid]
479 self._pl = [nullid, nullid]
498 self._lastnormaltime = 0
480 self._lastnormaltime = 0
499 self._dirty = True
481 self._dirty = True
500
482
501 def rebuild(self, parent, allfiles, changedfiles=None):
483 def rebuild(self, parent, allfiles, changedfiles=None):
502 changedfiles = changedfiles or allfiles
484 changedfiles = changedfiles or allfiles
503 oldmap = self._map
485 oldmap = self._map
504 self.clear()
486 self.clear()
505 for f in allfiles:
487 for f in allfiles:
506 if f not in changedfiles:
488 if f not in changedfiles:
507 self._map[f] = oldmap[f]
489 self._map[f] = oldmap[f]
508 else:
490 else:
509 if 'x' in allfiles.flags(f):
491 if 'x' in allfiles.flags(f):
510 self._map[f] = ('n', 0777, -1, 0)
492 self._map[f] = ('n', 0777, -1, 0)
511 else:
493 else:
512 self._map[f] = ('n', 0666, -1, 0)
494 self._map[f] = ('n', 0666, -1, 0)
513 self._pl = (parent, nullid)
495 self._pl = (parent, nullid)
514 self._dirty = True
496 self._dirty = True
515
497
516 def write(self):
498 def write(self):
517 if not self._dirty:
499 if not self._dirty:
518 return
500 return
519 st = self._opener("dirstate", "w", atomictemp=True)
501 st = self._opener("dirstate", "w", atomictemp=True)
520
502
521 def finish(s):
503 def finish(s):
522 st.write(s)
504 st.write(s)
523 st.close()
505 st.close()
524 self._lastnormaltime = 0
506 self._lastnormaltime = 0
525 self._dirty = self._dirtypl = False
507 self._dirty = self._dirtypl = False
526
508
527 # use the modification time of the newly created temporary file as the
509 # use the modification time of the newly created temporary file as the
528 # filesystem's notion of 'now'
510 # filesystem's notion of 'now'
529 now = util.fstat(st).st_mtime
511 now = util.fstat(st).st_mtime
530 finish(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
512 finish(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
531
513
532 def _dirignore(self, f):
514 def _dirignore(self, f):
533 if f == '.':
515 if f == '.':
534 return False
516 return False
535 if self._ignore(f):
517 if self._ignore(f):
536 return True
518 return True
537 for p in scmutil.finddirs(f):
519 for p in scmutil.finddirs(f):
538 if self._ignore(p):
520 if self._ignore(p):
539 return True
521 return True
540 return False
522 return False
541
523
542 def walk(self, match, subrepos, unknown, ignored):
524 def walk(self, match, subrepos, unknown, ignored):
543 '''
525 '''
544 Walk recursively through the directory tree, finding all files
526 Walk recursively through the directory tree, finding all files
545 matched by match.
527 matched by match.
546
528
547 Return a dict mapping filename to stat-like object (either
529 Return a dict mapping filename to stat-like object (either
548 mercurial.osutil.stat instance or return value of os.stat()).
530 mercurial.osutil.stat instance or return value of os.stat()).
549 '''
531 '''
550
532
551 def fwarn(f, msg):
533 def fwarn(f, msg):
552 self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
534 self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
553 return False
535 return False
554
536
555 def badtype(mode):
537 def badtype(mode):
556 kind = _('unknown')
538 kind = _('unknown')
557 if stat.S_ISCHR(mode):
539 if stat.S_ISCHR(mode):
558 kind = _('character device')
540 kind = _('character device')
559 elif stat.S_ISBLK(mode):
541 elif stat.S_ISBLK(mode):
560 kind = _('block device')
542 kind = _('block device')
561 elif stat.S_ISFIFO(mode):
543 elif stat.S_ISFIFO(mode):
562 kind = _('fifo')
544 kind = _('fifo')
563 elif stat.S_ISSOCK(mode):
545 elif stat.S_ISSOCK(mode):
564 kind = _('socket')
546 kind = _('socket')
565 elif stat.S_ISDIR(mode):
547 elif stat.S_ISDIR(mode):
566 kind = _('directory')
548 kind = _('directory')
567 return _('unsupported file type (type is %s)') % kind
549 return _('unsupported file type (type is %s)') % kind
568
550
569 ignore = self._ignore
551 ignore = self._ignore
570 dirignore = self._dirignore
552 dirignore = self._dirignore
571 if ignored:
553 if ignored:
572 ignore = util.never
554 ignore = util.never
573 dirignore = util.never
555 dirignore = util.never
574 elif not unknown:
556 elif not unknown:
575 # if unknown and ignored are False, skip step 2
557 # if unknown and ignored are False, skip step 2
576 ignore = util.always
558 ignore = util.always
577 dirignore = util.always
559 dirignore = util.always
578
560
579 matchfn = match.matchfn
561 matchfn = match.matchfn
580 matchalways = match.always()
562 matchalways = match.always()
581 badfn = match.bad
563 badfn = match.bad
582 dmap = self._map
564 dmap = self._map
583 normpath = util.normpath
565 normpath = util.normpath
584 listdir = osutil.listdir
566 listdir = osutil.listdir
585 lstat = os.lstat
567 lstat = os.lstat
586 getkind = stat.S_IFMT
568 getkind = stat.S_IFMT
587 dirkind = stat.S_IFDIR
569 dirkind = stat.S_IFDIR
588 regkind = stat.S_IFREG
570 regkind = stat.S_IFREG
589 lnkkind = stat.S_IFLNK
571 lnkkind = stat.S_IFLNK
590 join = self._join
572 join = self._join
591 work = []
573 work = []
592 wadd = work.append
574 wadd = work.append
593
575
594 exact = skipstep3 = False
576 exact = skipstep3 = False
595 if matchfn == match.exact: # match.exact
577 if matchfn == match.exact: # match.exact
596 exact = True
578 exact = True
597 dirignore = util.always # skip step 2
579 dirignore = util.always # skip step 2
598 elif match.files() and not match.anypats(): # match.match, no patterns
580 elif match.files() and not match.anypats(): # match.match, no patterns
599 skipstep3 = True
581 skipstep3 = True
600
582
601 if not exact and self._checkcase:
583 if not exact and self._checkcase:
602 normalize = self._normalize
584 normalize = self._normalize
603 skipstep3 = False
585 skipstep3 = False
604 else:
586 else:
605 normalize = None
587 normalize = None
606
588
607 files = sorted(match.files())
589 files = sorted(match.files())
608 subrepos.sort()
590 subrepos.sort()
609 i, j = 0, 0
591 i, j = 0, 0
610 while i < len(files) and j < len(subrepos):
592 while i < len(files) and j < len(subrepos):
611 subpath = subrepos[j] + "/"
593 subpath = subrepos[j] + "/"
612 if files[i] < subpath:
594 if files[i] < subpath:
613 i += 1
595 i += 1
614 continue
596 continue
615 while i < len(files) and files[i].startswith(subpath):
597 while i < len(files) and files[i].startswith(subpath):
616 del files[i]
598 del files[i]
617 j += 1
599 j += 1
618
600
619 if not files or '.' in files:
601 if not files or '.' in files:
620 files = ['']
602 files = ['']
621 results = dict.fromkeys(subrepos)
603 results = dict.fromkeys(subrepos)
622 results['.hg'] = None
604 results['.hg'] = None
623
605
624 # step 1: find all explicit files
606 # step 1: find all explicit files
625 for ff in files:
607 for ff in files:
626 if normalize:
608 if normalize:
627 nf = normalize(normpath(ff), False, True)
609 nf = normalize(normpath(ff), False, True)
628 else:
610 else:
629 nf = normpath(ff)
611 nf = normpath(ff)
630 if nf in results:
612 if nf in results:
631 continue
613 continue
632
614
633 try:
615 try:
634 st = lstat(join(nf))
616 st = lstat(join(nf))
635 kind = getkind(st.st_mode)
617 kind = getkind(st.st_mode)
636 if kind == dirkind:
618 if kind == dirkind:
637 skipstep3 = False
619 skipstep3 = False
638 if nf in dmap:
620 if nf in dmap:
639 #file deleted on disk but still in dirstate
621 #file deleted on disk but still in dirstate
640 results[nf] = None
622 results[nf] = None
641 match.dir(nf)
623 match.dir(nf)
642 if not dirignore(nf):
624 if not dirignore(nf):
643 wadd(nf)
625 wadd(nf)
644 elif kind == regkind or kind == lnkkind:
626 elif kind == regkind or kind == lnkkind:
645 results[nf] = st
627 results[nf] = st
646 else:
628 else:
647 badfn(ff, badtype(kind))
629 badfn(ff, badtype(kind))
648 if nf in dmap:
630 if nf in dmap:
649 results[nf] = None
631 results[nf] = None
650 except OSError, inst:
632 except OSError, inst:
651 if nf in dmap: # does it exactly match a file?
633 if nf in dmap: # does it exactly match a file?
652 results[nf] = None
634 results[nf] = None
653 else: # does it match a directory?
635 else: # does it match a directory?
654 prefix = nf + "/"
636 prefix = nf + "/"
655 for fn in dmap:
637 for fn in dmap:
656 if fn.startswith(prefix):
638 if fn.startswith(prefix):
657 match.dir(nf)
639 match.dir(nf)
658 skipstep3 = False
640 skipstep3 = False
659 break
641 break
660 else:
642 else:
661 badfn(ff, inst.strerror)
643 badfn(ff, inst.strerror)
662
644
663 # step 2: visit subdirectories
645 # step 2: visit subdirectories
664 while work:
646 while work:
665 nd = work.pop()
647 nd = work.pop()
666 skip = None
648 skip = None
667 if nd == '.':
649 if nd == '.':
668 nd = ''
650 nd = ''
669 else:
651 else:
670 skip = '.hg'
652 skip = '.hg'
671 try:
653 try:
672 entries = listdir(join(nd), stat=True, skip=skip)
654 entries = listdir(join(nd), stat=True, skip=skip)
673 except OSError, inst:
655 except OSError, inst:
674 if inst.errno in (errno.EACCES, errno.ENOENT):
656 if inst.errno in (errno.EACCES, errno.ENOENT):
675 fwarn(nd, inst.strerror)
657 fwarn(nd, inst.strerror)
676 continue
658 continue
677 raise
659 raise
678 for f, kind, st in entries:
660 for f, kind, st in entries:
679 if normalize:
661 if normalize:
680 nf = normalize(nd and (nd + "/" + f) or f, True, True)
662 nf = normalize(nd and (nd + "/" + f) or f, True, True)
681 else:
663 else:
682 nf = nd and (nd + "/" + f) or f
664 nf = nd and (nd + "/" + f) or f
683 if nf not in results:
665 if nf not in results:
684 if kind == dirkind:
666 if kind == dirkind:
685 if not ignore(nf):
667 if not ignore(nf):
686 match.dir(nf)
668 match.dir(nf)
687 wadd(nf)
669 wadd(nf)
688 if nf in dmap and (matchalways or matchfn(nf)):
670 if nf in dmap and (matchalways or matchfn(nf)):
689 results[nf] = None
671 results[nf] = None
690 elif kind == regkind or kind == lnkkind:
672 elif kind == regkind or kind == lnkkind:
691 if nf in dmap:
673 if nf in dmap:
692 if matchalways or matchfn(nf):
674 if matchalways or matchfn(nf):
693 results[nf] = st
675 results[nf] = st
694 elif (matchalways or matchfn(nf)) and not ignore(nf):
676 elif (matchalways or matchfn(nf)) and not ignore(nf):
695 results[nf] = st
677 results[nf] = st
696 elif nf in dmap and (matchalways or matchfn(nf)):
678 elif nf in dmap and (matchalways or matchfn(nf)):
697 results[nf] = None
679 results[nf] = None
698
680
699 for s in subrepos:
681 for s in subrepos:
700 del results[s]
682 del results[s]
701 del results['.hg']
683 del results['.hg']
702
684
703 # step 3: report unseen items in the dmap hash
685 # step 3: report unseen items in the dmap hash
704 if not skipstep3 and not exact:
686 if not skipstep3 and not exact:
705 if not results and matchalways:
687 if not results and matchalways:
706 visit = dmap.keys()
688 visit = dmap.keys()
707 else:
689 else:
708 visit = [f for f in dmap if f not in results and matchfn(f)]
690 visit = [f for f in dmap if f not in results and matchfn(f)]
709 visit.sort()
691 visit.sort()
710
692
711 if unknown:
693 if unknown:
712 # unknown == True means we walked the full directory tree above.
694 # unknown == True means we walked the full directory tree above.
713 # So if a file is not seen it was either a) not matching matchfn
695 # So if a file is not seen it was either a) not matching matchfn
714 # b) ignored, c) missing, or d) under a symlink directory.
696 # b) ignored, c) missing, or d) under a symlink directory.
715 audit_path = scmutil.pathauditor(self._root)
697 audit_path = scmutil.pathauditor(self._root)
716
698
717 for nf in iter(visit):
699 for nf in iter(visit):
718 # Report ignored items in the dmap as long as they are not
700 # Report ignored items in the dmap as long as they are not
719 # under a symlink directory.
701 # under a symlink directory.
720 if ignore(nf) and audit_path.check(nf):
702 if ignore(nf) and audit_path.check(nf):
721 try:
703 try:
722 results[nf] = lstat(join(nf))
704 results[nf] = lstat(join(nf))
723 except OSError:
705 except OSError:
724 # file doesn't exist
706 # file doesn't exist
725 results[nf] = None
707 results[nf] = None
726 else:
708 else:
727 # It's either missing or under a symlink directory
709 # It's either missing or under a symlink directory
728 results[nf] = None
710 results[nf] = None
729 else:
711 else:
730 # We may not have walked the full directory tree above,
712 # We may not have walked the full directory tree above,
731 # so stat everything we missed.
713 # so stat everything we missed.
732 nf = iter(visit).next
714 nf = iter(visit).next
733 for st in util.statfiles([join(i) for i in visit]):
715 for st in util.statfiles([join(i) for i in visit]):
734 results[nf()] = st
716 results[nf()] = st
735 return results
717 return results
736
718
737 def status(self, match, subrepos, ignored, clean, unknown):
719 def status(self, match, subrepos, ignored, clean, unknown):
738 '''Determine the status of the working copy relative to the
720 '''Determine the status of the working copy relative to the
739 dirstate and return a tuple of lists (unsure, modified, added,
721 dirstate and return a tuple of lists (unsure, modified, added,
740 removed, deleted, unknown, ignored, clean), where:
722 removed, deleted, unknown, ignored, clean), where:
741
723
742 unsure:
724 unsure:
743 files that might have been modified since the dirstate was
725 files that might have been modified since the dirstate was
744 written, but need to be read to be sure (size is the same
726 written, but need to be read to be sure (size is the same
745 but mtime differs)
727 but mtime differs)
746 modified:
728 modified:
747 files that have definitely been modified since the dirstate
729 files that have definitely been modified since the dirstate
748 was written (different size or mode)
730 was written (different size or mode)
749 added:
731 added:
750 files that have been explicitly added with hg add
732 files that have been explicitly added with hg add
751 removed:
733 removed:
752 files that have been explicitly removed with hg remove
734 files that have been explicitly removed with hg remove
753 deleted:
735 deleted:
754 files that have been deleted through other means ("missing")
736 files that have been deleted through other means ("missing")
755 unknown:
737 unknown:
756 files not in the dirstate that are not ignored
738 files not in the dirstate that are not ignored
757 ignored:
739 ignored:
758 files not in the dirstate that are ignored
740 files not in the dirstate that are ignored
759 (by _dirignore())
741 (by _dirignore())
760 clean:
742 clean:
761 files that have definitely not been modified since the
743 files that have definitely not been modified since the
762 dirstate was written
744 dirstate was written
763 '''
745 '''
764 listignored, listclean, listunknown = ignored, clean, unknown
746 listignored, listclean, listunknown = ignored, clean, unknown
765 lookup, modified, added, unknown, ignored = [], [], [], [], []
747 lookup, modified, added, unknown, ignored = [], [], [], [], []
766 removed, deleted, clean = [], [], []
748 removed, deleted, clean = [], [], []
767
749
768 dmap = self._map
750 dmap = self._map
769 ladd = lookup.append # aka "unsure"
751 ladd = lookup.append # aka "unsure"
770 madd = modified.append
752 madd = modified.append
771 aadd = added.append
753 aadd = added.append
772 uadd = unknown.append
754 uadd = unknown.append
773 iadd = ignored.append
755 iadd = ignored.append
774 radd = removed.append
756 radd = removed.append
775 dadd = deleted.append
757 dadd = deleted.append
776 cadd = clean.append
758 cadd = clean.append
777 mexact = match.exact
759 mexact = match.exact
778 dirignore = self._dirignore
760 dirignore = self._dirignore
779 checkexec = self._checkexec
761 checkexec = self._checkexec
780 checklink = self._checklink
762 checklink = self._checklink
781 copymap = self._copymap
763 copymap = self._copymap
782 lastnormaltime = self._lastnormaltime
764 lastnormaltime = self._lastnormaltime
783
765
784 lnkkind = stat.S_IFLNK
766 lnkkind = stat.S_IFLNK
785
767
786 for fn, st in self.walk(match, subrepos, listunknown,
768 for fn, st in self.walk(match, subrepos, listunknown,
787 listignored).iteritems():
769 listignored).iteritems():
788 if fn not in dmap:
770 if fn not in dmap:
789 if (listignored or mexact(fn)) and dirignore(fn):
771 if (listignored or mexact(fn)) and dirignore(fn):
790 if listignored:
772 if listignored:
791 iadd(fn)
773 iadd(fn)
792 elif listunknown:
774 elif listunknown:
793 uadd(fn)
775 uadd(fn)
794 continue
776 continue
795
777
796 state, mode, size, time = dmap[fn]
778 state, mode, size, time = dmap[fn]
797
779
798 if not st and state in "nma":
780 if not st and state in "nma":
799 dadd(fn)
781 dadd(fn)
800 elif state == 'n':
782 elif state == 'n':
801 # The "mode & lnkkind != lnkkind or self._checklink"
783 # The "mode & lnkkind != lnkkind or self._checklink"
802 # lines are an expansion of "islink => checklink"
784 # lines are an expansion of "islink => checklink"
803 # where islink means "is this a link?" and checklink
785 # where islink means "is this a link?" and checklink
804 # means "can we check links?".
786 # means "can we check links?".
805 mtime = int(st.st_mtime)
787 mtime = int(st.st_mtime)
806 if (size >= 0 and
788 if (size >= 0 and
807 ((size != st.st_size and size != st.st_size & _rangemask)
789 ((size != st.st_size and size != st.st_size & _rangemask)
808 or ((mode ^ st.st_mode) & 0100 and checkexec))
790 or ((mode ^ st.st_mode) & 0100 and checkexec))
809 and (mode & lnkkind != lnkkind or checklink)
791 and (mode & lnkkind != lnkkind or checklink)
810 or size == -2 # other parent
792 or size == -2 # other parent
811 or fn in copymap):
793 or fn in copymap):
812 madd(fn)
794 madd(fn)
813 elif ((time != mtime and time != mtime & _rangemask)
795 elif ((time != mtime and time != mtime & _rangemask)
814 and (mode & lnkkind != lnkkind or checklink)):
796 and (mode & lnkkind != lnkkind or checklink)):
815 ladd(fn)
797 ladd(fn)
816 elif mtime == lastnormaltime:
798 elif mtime == lastnormaltime:
817 # fn may have been changed in the same timeslot without
799 # fn may have been changed in the same timeslot without
818 # changing its size. This can happen if we quickly do
800 # changing its size. This can happen if we quickly do
819 # multiple commits in a single transaction.
801 # multiple commits in a single transaction.
820 # Force lookup, so we don't miss such a racy file change.
802 # Force lookup, so we don't miss such a racy file change.
821 ladd(fn)
803 ladd(fn)
822 elif listclean:
804 elif listclean:
823 cadd(fn)
805 cadd(fn)
824 elif state == 'm':
806 elif state == 'm':
825 madd(fn)
807 madd(fn)
826 elif state == 'a':
808 elif state == 'a':
827 aadd(fn)
809 aadd(fn)
828 elif state == 'r':
810 elif state == 'r':
829 radd(fn)
811 radd(fn)
830
812
831 return (lookup, modified, added, removed, deleted, unknown, ignored,
813 return (lookup, modified, added, removed, deleted, unknown, ignored,
832 clean)
814 clean)
General Comments 0
You need to be logged in to leave comments. Login now