##// END OF EJS Templates
status: move the boundary comparison logic within the timestamp module...
marmoute -
r49218:aa8a649a default draft
parent child Browse files
Show More
@@ -1,3158 +1,3141 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2006, 2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 hex,
17 hex,
18 nullrev,
18 nullrev,
19 short,
19 short,
20 )
20 )
21 from .pycompat import (
21 from .pycompat import (
22 getattr,
22 getattr,
23 open,
23 open,
24 )
24 )
25 from . import (
25 from . import (
26 dagop,
26 dagop,
27 encoding,
27 encoding,
28 error,
28 error,
29 fileset,
29 fileset,
30 match as matchmod,
30 match as matchmod,
31 mergestate as mergestatemod,
31 mergestate as mergestatemod,
32 metadata,
32 metadata,
33 obsolete as obsmod,
33 obsolete as obsmod,
34 patch,
34 patch,
35 pathutil,
35 pathutil,
36 phases,
36 phases,
37 pycompat,
37 pycompat,
38 repoview,
38 repoview,
39 scmutil,
39 scmutil,
40 sparse,
40 sparse,
41 subrepo,
41 subrepo,
42 subrepoutil,
42 subrepoutil,
43 util,
43 util,
44 )
44 )
45 from .utils import (
45 from .utils import (
46 dateutil,
46 dateutil,
47 stringutil,
47 stringutil,
48 )
48 )
49 from .dirstateutils import (
49 from .dirstateutils import (
50 timestamp,
50 timestamp,
51 )
51 )
52
52
53 propertycache = util.propertycache
53 propertycache = util.propertycache
54
54
55
55
56 class basectx(object):
56 class basectx(object):
57 """A basectx object represents the common logic for its children:
57 """A basectx object represents the common logic for its children:
58 changectx: read-only context that is already present in the repo,
58 changectx: read-only context that is already present in the repo,
59 workingctx: a context that represents the working directory and can
59 workingctx: a context that represents the working directory and can
60 be committed,
60 be committed,
61 memctx: a context that represents changes in-memory and can also
61 memctx: a context that represents changes in-memory and can also
62 be committed."""
62 be committed."""
63
63
64 def __init__(self, repo):
64 def __init__(self, repo):
65 self._repo = repo
65 self._repo = repo
66
66
67 def __bytes__(self):
67 def __bytes__(self):
68 return short(self.node())
68 return short(self.node())
69
69
70 __str__ = encoding.strmethod(__bytes__)
70 __str__ = encoding.strmethod(__bytes__)
71
71
72 def __repr__(self):
72 def __repr__(self):
73 return "<%s %s>" % (type(self).__name__, str(self))
73 return "<%s %s>" % (type(self).__name__, str(self))
74
74
75 def __eq__(self, other):
75 def __eq__(self, other):
76 try:
76 try:
77 return type(self) == type(other) and self._rev == other._rev
77 return type(self) == type(other) and self._rev == other._rev
78 except AttributeError:
78 except AttributeError:
79 return False
79 return False
80
80
81 def __ne__(self, other):
81 def __ne__(self, other):
82 return not (self == other)
82 return not (self == other)
83
83
84 def __contains__(self, key):
84 def __contains__(self, key):
85 return key in self._manifest
85 return key in self._manifest
86
86
87 def __getitem__(self, key):
87 def __getitem__(self, key):
88 return self.filectx(key)
88 return self.filectx(key)
89
89
90 def __iter__(self):
90 def __iter__(self):
91 return iter(self._manifest)
91 return iter(self._manifest)
92
92
93 def _buildstatusmanifest(self, status):
93 def _buildstatusmanifest(self, status):
94 """Builds a manifest that includes the given status results, if this is
94 """Builds a manifest that includes the given status results, if this is
95 a working copy context. For non-working copy contexts, it just returns
95 a working copy context. For non-working copy contexts, it just returns
96 the normal manifest."""
96 the normal manifest."""
97 return self.manifest()
97 return self.manifest()
98
98
99 def _matchstatus(self, other, match):
99 def _matchstatus(self, other, match):
100 """This internal method provides a way for child objects to override the
100 """This internal method provides a way for child objects to override the
101 match operator.
101 match operator.
102 """
102 """
103 return match
103 return match
104
104
105 def _buildstatus(
105 def _buildstatus(
106 self, other, s, match, listignored, listclean, listunknown
106 self, other, s, match, listignored, listclean, listunknown
107 ):
107 ):
108 """build a status with respect to another context"""
108 """build a status with respect to another context"""
109 # Load earliest manifest first for caching reasons. More specifically,
109 # Load earliest manifest first for caching reasons. More specifically,
110 # if you have revisions 1000 and 1001, 1001 is probably stored as a
110 # if you have revisions 1000 and 1001, 1001 is probably stored as a
111 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
111 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
112 # 1000 and cache it so that when you read 1001, we just need to apply a
112 # 1000 and cache it so that when you read 1001, we just need to apply a
113 # delta to what's in the cache. So that's one full reconstruction + one
113 # delta to what's in the cache. So that's one full reconstruction + one
114 # delta application.
114 # delta application.
115 mf2 = None
115 mf2 = None
116 if self.rev() is not None and self.rev() < other.rev():
116 if self.rev() is not None and self.rev() < other.rev():
117 mf2 = self._buildstatusmanifest(s)
117 mf2 = self._buildstatusmanifest(s)
118 mf1 = other._buildstatusmanifest(s)
118 mf1 = other._buildstatusmanifest(s)
119 if mf2 is None:
119 if mf2 is None:
120 mf2 = self._buildstatusmanifest(s)
120 mf2 = self._buildstatusmanifest(s)
121
121
122 modified, added = [], []
122 modified, added = [], []
123 removed = []
123 removed = []
124 clean = []
124 clean = []
125 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
125 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
126 deletedset = set(deleted)
126 deletedset = set(deleted)
127 d = mf1.diff(mf2, match=match, clean=listclean)
127 d = mf1.diff(mf2, match=match, clean=listclean)
128 for fn, value in pycompat.iteritems(d):
128 for fn, value in pycompat.iteritems(d):
129 if fn in deletedset:
129 if fn in deletedset:
130 continue
130 continue
131 if value is None:
131 if value is None:
132 clean.append(fn)
132 clean.append(fn)
133 continue
133 continue
134 (node1, flag1), (node2, flag2) = value
134 (node1, flag1), (node2, flag2) = value
135 if node1 is None:
135 if node1 is None:
136 added.append(fn)
136 added.append(fn)
137 elif node2 is None:
137 elif node2 is None:
138 removed.append(fn)
138 removed.append(fn)
139 elif flag1 != flag2:
139 elif flag1 != flag2:
140 modified.append(fn)
140 modified.append(fn)
141 elif node2 not in self._repo.nodeconstants.wdirfilenodeids:
141 elif node2 not in self._repo.nodeconstants.wdirfilenodeids:
142 # When comparing files between two commits, we save time by
142 # When comparing files between two commits, we save time by
143 # not comparing the file contents when the nodeids differ.
143 # not comparing the file contents when the nodeids differ.
144 # Note that this means we incorrectly report a reverted change
144 # Note that this means we incorrectly report a reverted change
145 # to a file as a modification.
145 # to a file as a modification.
146 modified.append(fn)
146 modified.append(fn)
147 elif self[fn].cmp(other[fn]):
147 elif self[fn].cmp(other[fn]):
148 modified.append(fn)
148 modified.append(fn)
149 else:
149 else:
150 clean.append(fn)
150 clean.append(fn)
151
151
152 if removed:
152 if removed:
153 # need to filter files if they are already reported as removed
153 # need to filter files if they are already reported as removed
154 unknown = [
154 unknown = [
155 fn
155 fn
156 for fn in unknown
156 for fn in unknown
157 if fn not in mf1 and (not match or match(fn))
157 if fn not in mf1 and (not match or match(fn))
158 ]
158 ]
159 ignored = [
159 ignored = [
160 fn
160 fn
161 for fn in ignored
161 for fn in ignored
162 if fn not in mf1 and (not match or match(fn))
162 if fn not in mf1 and (not match or match(fn))
163 ]
163 ]
164 # if they're deleted, don't report them as removed
164 # if they're deleted, don't report them as removed
165 removed = [fn for fn in removed if fn not in deletedset]
165 removed = [fn for fn in removed if fn not in deletedset]
166
166
167 return scmutil.status(
167 return scmutil.status(
168 modified, added, removed, deleted, unknown, ignored, clean
168 modified, added, removed, deleted, unknown, ignored, clean
169 )
169 )
170
170
171 @propertycache
171 @propertycache
172 def substate(self):
172 def substate(self):
173 return subrepoutil.state(self, self._repo.ui)
173 return subrepoutil.state(self, self._repo.ui)
174
174
175 def subrev(self, subpath):
175 def subrev(self, subpath):
176 return self.substate[subpath][1]
176 return self.substate[subpath][1]
177
177
178 def rev(self):
178 def rev(self):
179 return self._rev
179 return self._rev
180
180
181 def node(self):
181 def node(self):
182 return self._node
182 return self._node
183
183
184 def hex(self):
184 def hex(self):
185 return hex(self.node())
185 return hex(self.node())
186
186
187 def manifest(self):
187 def manifest(self):
188 return self._manifest
188 return self._manifest
189
189
190 def manifestctx(self):
190 def manifestctx(self):
191 return self._manifestctx
191 return self._manifestctx
192
192
193 def repo(self):
193 def repo(self):
194 return self._repo
194 return self._repo
195
195
196 def phasestr(self):
196 def phasestr(self):
197 return phases.phasenames[self.phase()]
197 return phases.phasenames[self.phase()]
198
198
199 def mutable(self):
199 def mutable(self):
200 return self.phase() > phases.public
200 return self.phase() > phases.public
201
201
202 def matchfileset(self, cwd, expr, badfn=None):
202 def matchfileset(self, cwd, expr, badfn=None):
203 return fileset.match(self, cwd, expr, badfn=badfn)
203 return fileset.match(self, cwd, expr, badfn=badfn)
204
204
205 def obsolete(self):
205 def obsolete(self):
206 """True if the changeset is obsolete"""
206 """True if the changeset is obsolete"""
207 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
207 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
208
208
209 def extinct(self):
209 def extinct(self):
210 """True if the changeset is extinct"""
210 """True if the changeset is extinct"""
211 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
211 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
212
212
213 def orphan(self):
213 def orphan(self):
214 """True if the changeset is not obsolete, but its ancestor is"""
214 """True if the changeset is not obsolete, but its ancestor is"""
215 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
215 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
216
216
217 def phasedivergent(self):
217 def phasedivergent(self):
218 """True if the changeset tries to be a successor of a public changeset
218 """True if the changeset tries to be a successor of a public changeset
219
219
220 Only non-public and non-obsolete changesets may be phase-divergent.
220 Only non-public and non-obsolete changesets may be phase-divergent.
221 """
221 """
222 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
222 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
223
223
224 def contentdivergent(self):
224 def contentdivergent(self):
225 """Is a successor of a changeset with multiple possible successor sets
225 """Is a successor of a changeset with multiple possible successor sets
226
226
227 Only non-public and non-obsolete changesets may be content-divergent.
227 Only non-public and non-obsolete changesets may be content-divergent.
228 """
228 """
229 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
229 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
230
230
231 def isunstable(self):
231 def isunstable(self):
232 """True if the changeset is either orphan, phase-divergent or
232 """True if the changeset is either orphan, phase-divergent or
233 content-divergent"""
233 content-divergent"""
234 return self.orphan() or self.phasedivergent() or self.contentdivergent()
234 return self.orphan() or self.phasedivergent() or self.contentdivergent()
235
235
236 def instabilities(self):
236 def instabilities(self):
237 """return the list of instabilities affecting this changeset.
237 """return the list of instabilities affecting this changeset.
238
238
239 Instabilities are returned as strings. possible values are:
239 Instabilities are returned as strings. possible values are:
240 - orphan,
240 - orphan,
241 - phase-divergent,
241 - phase-divergent,
242 - content-divergent.
242 - content-divergent.
243 """
243 """
244 instabilities = []
244 instabilities = []
245 if self.orphan():
245 if self.orphan():
246 instabilities.append(b'orphan')
246 instabilities.append(b'orphan')
247 if self.phasedivergent():
247 if self.phasedivergent():
248 instabilities.append(b'phase-divergent')
248 instabilities.append(b'phase-divergent')
249 if self.contentdivergent():
249 if self.contentdivergent():
250 instabilities.append(b'content-divergent')
250 instabilities.append(b'content-divergent')
251 return instabilities
251 return instabilities
252
252
253 def parents(self):
253 def parents(self):
254 """return contexts for each parent changeset"""
254 """return contexts for each parent changeset"""
255 return self._parents
255 return self._parents
256
256
257 def p1(self):
257 def p1(self):
258 return self._parents[0]
258 return self._parents[0]
259
259
260 def p2(self):
260 def p2(self):
261 parents = self._parents
261 parents = self._parents
262 if len(parents) == 2:
262 if len(parents) == 2:
263 return parents[1]
263 return parents[1]
264 return self._repo[nullrev]
264 return self._repo[nullrev]
265
265
266 def _fileinfo(self, path):
266 def _fileinfo(self, path):
267 if '_manifest' in self.__dict__:
267 if '_manifest' in self.__dict__:
268 try:
268 try:
269 return self._manifest.find(path)
269 return self._manifest.find(path)
270 except KeyError:
270 except KeyError:
271 raise error.ManifestLookupError(
271 raise error.ManifestLookupError(
272 self._node or b'None', path, _(b'not found in manifest')
272 self._node or b'None', path, _(b'not found in manifest')
273 )
273 )
274 if '_manifestdelta' in self.__dict__ or path in self.files():
274 if '_manifestdelta' in self.__dict__ or path in self.files():
275 if path in self._manifestdelta:
275 if path in self._manifestdelta:
276 return (
276 return (
277 self._manifestdelta[path],
277 self._manifestdelta[path],
278 self._manifestdelta.flags(path),
278 self._manifestdelta.flags(path),
279 )
279 )
280 mfl = self._repo.manifestlog
280 mfl = self._repo.manifestlog
281 try:
281 try:
282 node, flag = mfl[self._changeset.manifest].find(path)
282 node, flag = mfl[self._changeset.manifest].find(path)
283 except KeyError:
283 except KeyError:
284 raise error.ManifestLookupError(
284 raise error.ManifestLookupError(
285 self._node or b'None', path, _(b'not found in manifest')
285 self._node or b'None', path, _(b'not found in manifest')
286 )
286 )
287
287
288 return node, flag
288 return node, flag
289
289
290 def filenode(self, path):
290 def filenode(self, path):
291 return self._fileinfo(path)[0]
291 return self._fileinfo(path)[0]
292
292
293 def flags(self, path):
293 def flags(self, path):
294 try:
294 try:
295 return self._fileinfo(path)[1]
295 return self._fileinfo(path)[1]
296 except error.LookupError:
296 except error.LookupError:
297 return b''
297 return b''
298
298
299 @propertycache
299 @propertycache
300 def _copies(self):
300 def _copies(self):
301 return metadata.computechangesetcopies(self)
301 return metadata.computechangesetcopies(self)
302
302
303 def p1copies(self):
303 def p1copies(self):
304 return self._copies[0]
304 return self._copies[0]
305
305
306 def p2copies(self):
306 def p2copies(self):
307 return self._copies[1]
307 return self._copies[1]
308
308
309 def sub(self, path, allowcreate=True):
309 def sub(self, path, allowcreate=True):
310 '''return a subrepo for the stored revision of path, never wdir()'''
310 '''return a subrepo for the stored revision of path, never wdir()'''
311 return subrepo.subrepo(self, path, allowcreate=allowcreate)
311 return subrepo.subrepo(self, path, allowcreate=allowcreate)
312
312
313 def nullsub(self, path, pctx):
313 def nullsub(self, path, pctx):
314 return subrepo.nullsubrepo(self, path, pctx)
314 return subrepo.nullsubrepo(self, path, pctx)
315
315
316 def workingsub(self, path):
316 def workingsub(self, path):
317 """return a subrepo for the stored revision, or wdir if this is a wdir
317 """return a subrepo for the stored revision, or wdir if this is a wdir
318 context.
318 context.
319 """
319 """
320 return subrepo.subrepo(self, path, allowwdir=True)
320 return subrepo.subrepo(self, path, allowwdir=True)
321
321
322 def match(
322 def match(
323 self,
323 self,
324 pats=None,
324 pats=None,
325 include=None,
325 include=None,
326 exclude=None,
326 exclude=None,
327 default=b'glob',
327 default=b'glob',
328 listsubrepos=False,
328 listsubrepos=False,
329 badfn=None,
329 badfn=None,
330 cwd=None,
330 cwd=None,
331 ):
331 ):
332 r = self._repo
332 r = self._repo
333 if not cwd:
333 if not cwd:
334 cwd = r.getcwd()
334 cwd = r.getcwd()
335 return matchmod.match(
335 return matchmod.match(
336 r.root,
336 r.root,
337 cwd,
337 cwd,
338 pats,
338 pats,
339 include,
339 include,
340 exclude,
340 exclude,
341 default,
341 default,
342 auditor=r.nofsauditor,
342 auditor=r.nofsauditor,
343 ctx=self,
343 ctx=self,
344 listsubrepos=listsubrepos,
344 listsubrepos=listsubrepos,
345 badfn=badfn,
345 badfn=badfn,
346 )
346 )
347
347
348 def diff(
348 def diff(
349 self,
349 self,
350 ctx2=None,
350 ctx2=None,
351 match=None,
351 match=None,
352 changes=None,
352 changes=None,
353 opts=None,
353 opts=None,
354 losedatafn=None,
354 losedatafn=None,
355 pathfn=None,
355 pathfn=None,
356 copy=None,
356 copy=None,
357 copysourcematch=None,
357 copysourcematch=None,
358 hunksfilterfn=None,
358 hunksfilterfn=None,
359 ):
359 ):
360 """Returns a diff generator for the given contexts and matcher"""
360 """Returns a diff generator for the given contexts and matcher"""
361 if ctx2 is None:
361 if ctx2 is None:
362 ctx2 = self.p1()
362 ctx2 = self.p1()
363 if ctx2 is not None:
363 if ctx2 is not None:
364 ctx2 = self._repo[ctx2]
364 ctx2 = self._repo[ctx2]
365 return patch.diff(
365 return patch.diff(
366 self._repo,
366 self._repo,
367 ctx2,
367 ctx2,
368 self,
368 self,
369 match=match,
369 match=match,
370 changes=changes,
370 changes=changes,
371 opts=opts,
371 opts=opts,
372 losedatafn=losedatafn,
372 losedatafn=losedatafn,
373 pathfn=pathfn,
373 pathfn=pathfn,
374 copy=copy,
374 copy=copy,
375 copysourcematch=copysourcematch,
375 copysourcematch=copysourcematch,
376 hunksfilterfn=hunksfilterfn,
376 hunksfilterfn=hunksfilterfn,
377 )
377 )
378
378
379 def dirs(self):
379 def dirs(self):
380 return self._manifest.dirs()
380 return self._manifest.dirs()
381
381
382 def hasdir(self, dir):
382 def hasdir(self, dir):
383 return self._manifest.hasdir(dir)
383 return self._manifest.hasdir(dir)
384
384
385 def status(
385 def status(
386 self,
386 self,
387 other=None,
387 other=None,
388 match=None,
388 match=None,
389 listignored=False,
389 listignored=False,
390 listclean=False,
390 listclean=False,
391 listunknown=False,
391 listunknown=False,
392 listsubrepos=False,
392 listsubrepos=False,
393 ):
393 ):
394 """return status of files between two nodes or node and working
394 """return status of files between two nodes or node and working
395 directory.
395 directory.
396
396
397 If other is None, compare this node with working directory.
397 If other is None, compare this node with working directory.
398
398
399 ctx1.status(ctx2) returns the status of change from ctx1 to ctx2
399 ctx1.status(ctx2) returns the status of change from ctx1 to ctx2
400
400
401 Returns a mercurial.scmutils.status object.
401 Returns a mercurial.scmutils.status object.
402
402
403 Data can be accessed using either tuple notation:
403 Data can be accessed using either tuple notation:
404
404
405 (modified, added, removed, deleted, unknown, ignored, clean)
405 (modified, added, removed, deleted, unknown, ignored, clean)
406
406
407 or direct attribute access:
407 or direct attribute access:
408
408
409 s.modified, s.added, ...
409 s.modified, s.added, ...
410 """
410 """
411
411
412 ctx1 = self
412 ctx1 = self
413 ctx2 = self._repo[other]
413 ctx2 = self._repo[other]
414
414
415 # This next code block is, admittedly, fragile logic that tests for
415 # This next code block is, admittedly, fragile logic that tests for
416 # reversing the contexts and wouldn't need to exist if it weren't for
416 # reversing the contexts and wouldn't need to exist if it weren't for
417 # the fast (and common) code path of comparing the working directory
417 # the fast (and common) code path of comparing the working directory
418 # with its first parent.
418 # with its first parent.
419 #
419 #
420 # What we're aiming for here is the ability to call:
420 # What we're aiming for here is the ability to call:
421 #
421 #
422 # workingctx.status(parentctx)
422 # workingctx.status(parentctx)
423 #
423 #
424 # If we always built the manifest for each context and compared those,
424 # If we always built the manifest for each context and compared those,
425 # then we'd be done. But the special case of the above call means we
425 # then we'd be done. But the special case of the above call means we
426 # just copy the manifest of the parent.
426 # just copy the manifest of the parent.
427 reversed = False
427 reversed = False
428 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
428 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
429 reversed = True
429 reversed = True
430 ctx1, ctx2 = ctx2, ctx1
430 ctx1, ctx2 = ctx2, ctx1
431
431
432 match = self._repo.narrowmatch(match)
432 match = self._repo.narrowmatch(match)
433 match = ctx2._matchstatus(ctx1, match)
433 match = ctx2._matchstatus(ctx1, match)
434 r = scmutil.status([], [], [], [], [], [], [])
434 r = scmutil.status([], [], [], [], [], [], [])
435 r = ctx2._buildstatus(
435 r = ctx2._buildstatus(
436 ctx1, r, match, listignored, listclean, listunknown
436 ctx1, r, match, listignored, listclean, listunknown
437 )
437 )
438
438
439 if reversed:
439 if reversed:
440 # Reverse added and removed. Clear deleted, unknown and ignored as
440 # Reverse added and removed. Clear deleted, unknown and ignored as
441 # these make no sense to reverse.
441 # these make no sense to reverse.
442 r = scmutil.status(
442 r = scmutil.status(
443 r.modified, r.removed, r.added, [], [], [], r.clean
443 r.modified, r.removed, r.added, [], [], [], r.clean
444 )
444 )
445
445
446 if listsubrepos:
446 if listsubrepos:
447 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
447 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
448 try:
448 try:
449 rev2 = ctx2.subrev(subpath)
449 rev2 = ctx2.subrev(subpath)
450 except KeyError:
450 except KeyError:
451 # A subrepo that existed in node1 was deleted between
451 # A subrepo that existed in node1 was deleted between
452 # node1 and node2 (inclusive). Thus, ctx2's substate
452 # node1 and node2 (inclusive). Thus, ctx2's substate
453 # won't contain that subpath. The best we can do ignore it.
453 # won't contain that subpath. The best we can do ignore it.
454 rev2 = None
454 rev2 = None
455 submatch = matchmod.subdirmatcher(subpath, match)
455 submatch = matchmod.subdirmatcher(subpath, match)
456 s = sub.status(
456 s = sub.status(
457 rev2,
457 rev2,
458 match=submatch,
458 match=submatch,
459 ignored=listignored,
459 ignored=listignored,
460 clean=listclean,
460 clean=listclean,
461 unknown=listunknown,
461 unknown=listunknown,
462 listsubrepos=True,
462 listsubrepos=True,
463 )
463 )
464 for k in (
464 for k in (
465 'modified',
465 'modified',
466 'added',
466 'added',
467 'removed',
467 'removed',
468 'deleted',
468 'deleted',
469 'unknown',
469 'unknown',
470 'ignored',
470 'ignored',
471 'clean',
471 'clean',
472 ):
472 ):
473 rfiles, sfiles = getattr(r, k), getattr(s, k)
473 rfiles, sfiles = getattr(r, k), getattr(s, k)
474 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
474 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
475
475
476 r.modified.sort()
476 r.modified.sort()
477 r.added.sort()
477 r.added.sort()
478 r.removed.sort()
478 r.removed.sort()
479 r.deleted.sort()
479 r.deleted.sort()
480 r.unknown.sort()
480 r.unknown.sort()
481 r.ignored.sort()
481 r.ignored.sort()
482 r.clean.sort()
482 r.clean.sort()
483
483
484 return r
484 return r
485
485
486 def mergestate(self, clean=False):
486 def mergestate(self, clean=False):
487 """Get a mergestate object for this context."""
487 """Get a mergestate object for this context."""
488 raise NotImplementedError(
488 raise NotImplementedError(
489 '%s does not implement mergestate()' % self.__class__
489 '%s does not implement mergestate()' % self.__class__
490 )
490 )
491
491
492 def isempty(self):
492 def isempty(self):
493 return not (
493 return not (
494 len(self.parents()) > 1
494 len(self.parents()) > 1
495 or self.branch() != self.p1().branch()
495 or self.branch() != self.p1().branch()
496 or self.closesbranch()
496 or self.closesbranch()
497 or self.files()
497 or self.files()
498 )
498 )
499
499
500
500
501 class changectx(basectx):
501 class changectx(basectx):
502 """A changecontext object makes access to data related to a particular
502 """A changecontext object makes access to data related to a particular
503 changeset convenient. It represents a read-only context already present in
503 changeset convenient. It represents a read-only context already present in
504 the repo."""
504 the repo."""
505
505
506 def __init__(self, repo, rev, node, maybe_filtered=True):
506 def __init__(self, repo, rev, node, maybe_filtered=True):
507 super(changectx, self).__init__(repo)
507 super(changectx, self).__init__(repo)
508 self._rev = rev
508 self._rev = rev
509 self._node = node
509 self._node = node
510 # When maybe_filtered is True, the revision might be affected by
510 # When maybe_filtered is True, the revision might be affected by
511 # changelog filtering and operation through the filtered changelog must be used.
511 # changelog filtering and operation through the filtered changelog must be used.
512 #
512 #
513 # When maybe_filtered is False, the revision has already been checked
513 # When maybe_filtered is False, the revision has already been checked
514 # against filtering and is not filtered. Operation through the
514 # against filtering and is not filtered. Operation through the
515 # unfiltered changelog might be used in some case.
515 # unfiltered changelog might be used in some case.
516 self._maybe_filtered = maybe_filtered
516 self._maybe_filtered = maybe_filtered
517
517
518 def __hash__(self):
518 def __hash__(self):
519 try:
519 try:
520 return hash(self._rev)
520 return hash(self._rev)
521 except AttributeError:
521 except AttributeError:
522 return id(self)
522 return id(self)
523
523
524 def __nonzero__(self):
524 def __nonzero__(self):
525 return self._rev != nullrev
525 return self._rev != nullrev
526
526
527 __bool__ = __nonzero__
527 __bool__ = __nonzero__
528
528
529 @propertycache
529 @propertycache
530 def _changeset(self):
530 def _changeset(self):
531 if self._maybe_filtered:
531 if self._maybe_filtered:
532 repo = self._repo
532 repo = self._repo
533 else:
533 else:
534 repo = self._repo.unfiltered()
534 repo = self._repo.unfiltered()
535 return repo.changelog.changelogrevision(self.rev())
535 return repo.changelog.changelogrevision(self.rev())
536
536
537 @propertycache
537 @propertycache
538 def _manifest(self):
538 def _manifest(self):
539 return self._manifestctx.read()
539 return self._manifestctx.read()
540
540
541 @property
541 @property
542 def _manifestctx(self):
542 def _manifestctx(self):
543 return self._repo.manifestlog[self._changeset.manifest]
543 return self._repo.manifestlog[self._changeset.manifest]
544
544
545 @propertycache
545 @propertycache
546 def _manifestdelta(self):
546 def _manifestdelta(self):
547 return self._manifestctx.readdelta()
547 return self._manifestctx.readdelta()
548
548
549 @propertycache
549 @propertycache
550 def _parents(self):
550 def _parents(self):
551 repo = self._repo
551 repo = self._repo
552 if self._maybe_filtered:
552 if self._maybe_filtered:
553 cl = repo.changelog
553 cl = repo.changelog
554 else:
554 else:
555 cl = repo.unfiltered().changelog
555 cl = repo.unfiltered().changelog
556
556
557 p1, p2 = cl.parentrevs(self._rev)
557 p1, p2 = cl.parentrevs(self._rev)
558 if p2 == nullrev:
558 if p2 == nullrev:
559 return [changectx(repo, p1, cl.node(p1), maybe_filtered=False)]
559 return [changectx(repo, p1, cl.node(p1), maybe_filtered=False)]
560 return [
560 return [
561 changectx(repo, p1, cl.node(p1), maybe_filtered=False),
561 changectx(repo, p1, cl.node(p1), maybe_filtered=False),
562 changectx(repo, p2, cl.node(p2), maybe_filtered=False),
562 changectx(repo, p2, cl.node(p2), maybe_filtered=False),
563 ]
563 ]
564
564
565 def changeset(self):
565 def changeset(self):
566 c = self._changeset
566 c = self._changeset
567 return (
567 return (
568 c.manifest,
568 c.manifest,
569 c.user,
569 c.user,
570 c.date,
570 c.date,
571 c.files,
571 c.files,
572 c.description,
572 c.description,
573 c.extra,
573 c.extra,
574 )
574 )
575
575
576 def manifestnode(self):
576 def manifestnode(self):
577 return self._changeset.manifest
577 return self._changeset.manifest
578
578
579 def user(self):
579 def user(self):
580 return self._changeset.user
580 return self._changeset.user
581
581
582 def date(self):
582 def date(self):
583 return self._changeset.date
583 return self._changeset.date
584
584
585 def files(self):
585 def files(self):
586 return self._changeset.files
586 return self._changeset.files
587
587
588 def filesmodified(self):
588 def filesmodified(self):
589 modified = set(self.files())
589 modified = set(self.files())
590 modified.difference_update(self.filesadded())
590 modified.difference_update(self.filesadded())
591 modified.difference_update(self.filesremoved())
591 modified.difference_update(self.filesremoved())
592 return sorted(modified)
592 return sorted(modified)
593
593
594 def filesadded(self):
594 def filesadded(self):
595 filesadded = self._changeset.filesadded
595 filesadded = self._changeset.filesadded
596 compute_on_none = True
596 compute_on_none = True
597 if self._repo.filecopiesmode == b'changeset-sidedata':
597 if self._repo.filecopiesmode == b'changeset-sidedata':
598 compute_on_none = False
598 compute_on_none = False
599 else:
599 else:
600 source = self._repo.ui.config(b'experimental', b'copies.read-from')
600 source = self._repo.ui.config(b'experimental', b'copies.read-from')
601 if source == b'changeset-only':
601 if source == b'changeset-only':
602 compute_on_none = False
602 compute_on_none = False
603 elif source != b'compatibility':
603 elif source != b'compatibility':
604 # filelog mode, ignore any changelog content
604 # filelog mode, ignore any changelog content
605 filesadded = None
605 filesadded = None
606 if filesadded is None:
606 if filesadded is None:
607 if compute_on_none:
607 if compute_on_none:
608 filesadded = metadata.computechangesetfilesadded(self)
608 filesadded = metadata.computechangesetfilesadded(self)
609 else:
609 else:
610 filesadded = []
610 filesadded = []
611 return filesadded
611 return filesadded
612
612
613 def filesremoved(self):
613 def filesremoved(self):
614 filesremoved = self._changeset.filesremoved
614 filesremoved = self._changeset.filesremoved
615 compute_on_none = True
615 compute_on_none = True
616 if self._repo.filecopiesmode == b'changeset-sidedata':
616 if self._repo.filecopiesmode == b'changeset-sidedata':
617 compute_on_none = False
617 compute_on_none = False
618 else:
618 else:
619 source = self._repo.ui.config(b'experimental', b'copies.read-from')
619 source = self._repo.ui.config(b'experimental', b'copies.read-from')
620 if source == b'changeset-only':
620 if source == b'changeset-only':
621 compute_on_none = False
621 compute_on_none = False
622 elif source != b'compatibility':
622 elif source != b'compatibility':
623 # filelog mode, ignore any changelog content
623 # filelog mode, ignore any changelog content
624 filesremoved = None
624 filesremoved = None
625 if filesremoved is None:
625 if filesremoved is None:
626 if compute_on_none:
626 if compute_on_none:
627 filesremoved = metadata.computechangesetfilesremoved(self)
627 filesremoved = metadata.computechangesetfilesremoved(self)
628 else:
628 else:
629 filesremoved = []
629 filesremoved = []
630 return filesremoved
630 return filesremoved
631
631
632 @propertycache
632 @propertycache
633 def _copies(self):
633 def _copies(self):
634 p1copies = self._changeset.p1copies
634 p1copies = self._changeset.p1copies
635 p2copies = self._changeset.p2copies
635 p2copies = self._changeset.p2copies
636 compute_on_none = True
636 compute_on_none = True
637 if self._repo.filecopiesmode == b'changeset-sidedata':
637 if self._repo.filecopiesmode == b'changeset-sidedata':
638 compute_on_none = False
638 compute_on_none = False
639 else:
639 else:
640 source = self._repo.ui.config(b'experimental', b'copies.read-from')
640 source = self._repo.ui.config(b'experimental', b'copies.read-from')
641 # If config says to get copy metadata only from changeset, then
641 # If config says to get copy metadata only from changeset, then
642 # return that, defaulting to {} if there was no copy metadata. In
642 # return that, defaulting to {} if there was no copy metadata. In
643 # compatibility mode, we return copy data from the changeset if it
643 # compatibility mode, we return copy data from the changeset if it
644 # was recorded there, and otherwise we fall back to getting it from
644 # was recorded there, and otherwise we fall back to getting it from
645 # the filelogs (below).
645 # the filelogs (below).
646 #
646 #
647 # If we are in compatiblity mode and there is not data in the
647 # If we are in compatiblity mode and there is not data in the
648 # changeset), we get the copy metadata from the filelogs.
648 # changeset), we get the copy metadata from the filelogs.
649 #
649 #
650 # otherwise, when config said to read only from filelog, we get the
650 # otherwise, when config said to read only from filelog, we get the
651 # copy metadata from the filelogs.
651 # copy metadata from the filelogs.
652 if source == b'changeset-only':
652 if source == b'changeset-only':
653 compute_on_none = False
653 compute_on_none = False
654 elif source != b'compatibility':
654 elif source != b'compatibility':
655 # filelog mode, ignore any changelog content
655 # filelog mode, ignore any changelog content
656 p1copies = p2copies = None
656 p1copies = p2copies = None
657 if p1copies is None:
657 if p1copies is None:
658 if compute_on_none:
658 if compute_on_none:
659 p1copies, p2copies = super(changectx, self)._copies
659 p1copies, p2copies = super(changectx, self)._copies
660 else:
660 else:
661 if p1copies is None:
661 if p1copies is None:
662 p1copies = {}
662 p1copies = {}
663 if p2copies is None:
663 if p2copies is None:
664 p2copies = {}
664 p2copies = {}
665 return p1copies, p2copies
665 return p1copies, p2copies
666
666
667 def description(self):
667 def description(self):
668 return self._changeset.description
668 return self._changeset.description
669
669
670 def branch(self):
670 def branch(self):
671 return encoding.tolocal(self._changeset.extra.get(b"branch"))
671 return encoding.tolocal(self._changeset.extra.get(b"branch"))
672
672
673 def closesbranch(self):
673 def closesbranch(self):
674 return b'close' in self._changeset.extra
674 return b'close' in self._changeset.extra
675
675
676 def extra(self):
676 def extra(self):
677 """Return a dict of extra information."""
677 """Return a dict of extra information."""
678 return self._changeset.extra
678 return self._changeset.extra
679
679
680 def tags(self):
680 def tags(self):
681 """Return a list of byte tag names"""
681 """Return a list of byte tag names"""
682 return self._repo.nodetags(self._node)
682 return self._repo.nodetags(self._node)
683
683
684 def bookmarks(self):
684 def bookmarks(self):
685 """Return a list of byte bookmark names."""
685 """Return a list of byte bookmark names."""
686 return self._repo.nodebookmarks(self._node)
686 return self._repo.nodebookmarks(self._node)
687
687
688 def phase(self):
688 def phase(self):
689 return self._repo._phasecache.phase(self._repo, self._rev)
689 return self._repo._phasecache.phase(self._repo, self._rev)
690
690
691 def hidden(self):
691 def hidden(self):
692 return self._rev in repoview.filterrevs(self._repo, b'visible')
692 return self._rev in repoview.filterrevs(self._repo, b'visible')
693
693
694 def isinmemory(self):
694 def isinmemory(self):
695 return False
695 return False
696
696
697 def children(self):
697 def children(self):
698 """return list of changectx contexts for each child changeset.
698 """return list of changectx contexts for each child changeset.
699
699
700 This returns only the immediate child changesets. Use descendants() to
700 This returns only the immediate child changesets. Use descendants() to
701 recursively walk children.
701 recursively walk children.
702 """
702 """
703 c = self._repo.changelog.children(self._node)
703 c = self._repo.changelog.children(self._node)
704 return [self._repo[x] for x in c]
704 return [self._repo[x] for x in c]
705
705
706 def ancestors(self):
706 def ancestors(self):
707 for a in self._repo.changelog.ancestors([self._rev]):
707 for a in self._repo.changelog.ancestors([self._rev]):
708 yield self._repo[a]
708 yield self._repo[a]
709
709
710 def descendants(self):
710 def descendants(self):
711 """Recursively yield all children of the changeset.
711 """Recursively yield all children of the changeset.
712
712
713 For just the immediate children, use children()
713 For just the immediate children, use children()
714 """
714 """
715 for d in self._repo.changelog.descendants([self._rev]):
715 for d in self._repo.changelog.descendants([self._rev]):
716 yield self._repo[d]
716 yield self._repo[d]
717
717
718 def filectx(self, path, fileid=None, filelog=None):
718 def filectx(self, path, fileid=None, filelog=None):
719 """get a file context from this changeset"""
719 """get a file context from this changeset"""
720 if fileid is None:
720 if fileid is None:
721 fileid = self.filenode(path)
721 fileid = self.filenode(path)
722 return filectx(
722 return filectx(
723 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
723 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
724 )
724 )
725
725
726 def ancestor(self, c2, warn=False):
726 def ancestor(self, c2, warn=False):
727 """return the "best" ancestor context of self and c2
727 """return the "best" ancestor context of self and c2
728
728
729 If there are multiple candidates, it will show a message and check
729 If there are multiple candidates, it will show a message and check
730 merge.preferancestor configuration before falling back to the
730 merge.preferancestor configuration before falling back to the
731 revlog ancestor."""
731 revlog ancestor."""
732 # deal with workingctxs
732 # deal with workingctxs
733 n2 = c2._node
733 n2 = c2._node
734 if n2 is None:
734 if n2 is None:
735 n2 = c2._parents[0]._node
735 n2 = c2._parents[0]._node
736 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
736 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
737 if not cahs:
737 if not cahs:
738 anc = self._repo.nodeconstants.nullid
738 anc = self._repo.nodeconstants.nullid
739 elif len(cahs) == 1:
739 elif len(cahs) == 1:
740 anc = cahs[0]
740 anc = cahs[0]
741 else:
741 else:
742 # experimental config: merge.preferancestor
742 # experimental config: merge.preferancestor
743 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
743 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
744 try:
744 try:
745 ctx = scmutil.revsymbol(self._repo, r)
745 ctx = scmutil.revsymbol(self._repo, r)
746 except error.RepoLookupError:
746 except error.RepoLookupError:
747 continue
747 continue
748 anc = ctx.node()
748 anc = ctx.node()
749 if anc in cahs:
749 if anc in cahs:
750 break
750 break
751 else:
751 else:
752 anc = self._repo.changelog.ancestor(self._node, n2)
752 anc = self._repo.changelog.ancestor(self._node, n2)
753 if warn:
753 if warn:
754 self._repo.ui.status(
754 self._repo.ui.status(
755 (
755 (
756 _(b"note: using %s as ancestor of %s and %s\n")
756 _(b"note: using %s as ancestor of %s and %s\n")
757 % (short(anc), short(self._node), short(n2))
757 % (short(anc), short(self._node), short(n2))
758 )
758 )
759 + b''.join(
759 + b''.join(
760 _(
760 _(
761 b" alternatively, use --config "
761 b" alternatively, use --config "
762 b"merge.preferancestor=%s\n"
762 b"merge.preferancestor=%s\n"
763 )
763 )
764 % short(n)
764 % short(n)
765 for n in sorted(cahs)
765 for n in sorted(cahs)
766 if n != anc
766 if n != anc
767 )
767 )
768 )
768 )
769 return self._repo[anc]
769 return self._repo[anc]
770
770
771 def isancestorof(self, other):
771 def isancestorof(self, other):
772 """True if this changeset is an ancestor of other"""
772 """True if this changeset is an ancestor of other"""
773 return self._repo.changelog.isancestorrev(self._rev, other._rev)
773 return self._repo.changelog.isancestorrev(self._rev, other._rev)
774
774
775 def walk(self, match):
775 def walk(self, match):
776 '''Generates matching file names.'''
776 '''Generates matching file names.'''
777
777
778 # Wrap match.bad method to have message with nodeid
778 # Wrap match.bad method to have message with nodeid
779 def bad(fn, msg):
779 def bad(fn, msg):
780 # The manifest doesn't know about subrepos, so don't complain about
780 # The manifest doesn't know about subrepos, so don't complain about
781 # paths into valid subrepos.
781 # paths into valid subrepos.
782 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
782 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
783 return
783 return
784 match.bad(fn, _(b'no such file in rev %s') % self)
784 match.bad(fn, _(b'no such file in rev %s') % self)
785
785
786 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
786 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
787 return self._manifest.walk(m)
787 return self._manifest.walk(m)
788
788
789 def matches(self, match):
789 def matches(self, match):
790 return self.walk(match)
790 return self.walk(match)
791
791
792
792
793 class basefilectx(object):
793 class basefilectx(object):
794 """A filecontext object represents the common logic for its children:
794 """A filecontext object represents the common logic for its children:
795 filectx: read-only access to a filerevision that is already present
795 filectx: read-only access to a filerevision that is already present
796 in the repo,
796 in the repo,
797 workingfilectx: a filecontext that represents files from the working
797 workingfilectx: a filecontext that represents files from the working
798 directory,
798 directory,
799 memfilectx: a filecontext that represents files in-memory,
799 memfilectx: a filecontext that represents files in-memory,
800 """
800 """
801
801
802 @propertycache
802 @propertycache
803 def _filelog(self):
803 def _filelog(self):
804 return self._repo.file(self._path)
804 return self._repo.file(self._path)
805
805
806 @propertycache
806 @propertycache
807 def _changeid(self):
807 def _changeid(self):
808 if '_changectx' in self.__dict__:
808 if '_changectx' in self.__dict__:
809 return self._changectx.rev()
809 return self._changectx.rev()
810 elif '_descendantrev' in self.__dict__:
810 elif '_descendantrev' in self.__dict__:
811 # this file context was created from a revision with a known
811 # this file context was created from a revision with a known
812 # descendant, we can (lazily) correct for linkrev aliases
812 # descendant, we can (lazily) correct for linkrev aliases
813 return self._adjustlinkrev(self._descendantrev)
813 return self._adjustlinkrev(self._descendantrev)
814 else:
814 else:
815 return self._filelog.linkrev(self._filerev)
815 return self._filelog.linkrev(self._filerev)
816
816
817 @propertycache
817 @propertycache
818 def _filenode(self):
818 def _filenode(self):
819 if '_fileid' in self.__dict__:
819 if '_fileid' in self.__dict__:
820 return self._filelog.lookup(self._fileid)
820 return self._filelog.lookup(self._fileid)
821 else:
821 else:
822 return self._changectx.filenode(self._path)
822 return self._changectx.filenode(self._path)
823
823
824 @propertycache
824 @propertycache
825 def _filerev(self):
825 def _filerev(self):
826 return self._filelog.rev(self._filenode)
826 return self._filelog.rev(self._filenode)
827
827
828 @propertycache
828 @propertycache
829 def _repopath(self):
829 def _repopath(self):
830 return self._path
830 return self._path
831
831
832 def __nonzero__(self):
832 def __nonzero__(self):
833 try:
833 try:
834 self._filenode
834 self._filenode
835 return True
835 return True
836 except error.LookupError:
836 except error.LookupError:
837 # file is missing
837 # file is missing
838 return False
838 return False
839
839
840 __bool__ = __nonzero__
840 __bool__ = __nonzero__
841
841
842 def __bytes__(self):
842 def __bytes__(self):
843 try:
843 try:
844 return b"%s@%s" % (self.path(), self._changectx)
844 return b"%s@%s" % (self.path(), self._changectx)
845 except error.LookupError:
845 except error.LookupError:
846 return b"%s@???" % self.path()
846 return b"%s@???" % self.path()
847
847
848 __str__ = encoding.strmethod(__bytes__)
848 __str__ = encoding.strmethod(__bytes__)
849
849
850 def __repr__(self):
850 def __repr__(self):
851 return "<%s %s>" % (type(self).__name__, str(self))
851 return "<%s %s>" % (type(self).__name__, str(self))
852
852
853 def __hash__(self):
853 def __hash__(self):
854 try:
854 try:
855 return hash((self._path, self._filenode))
855 return hash((self._path, self._filenode))
856 except AttributeError:
856 except AttributeError:
857 return id(self)
857 return id(self)
858
858
859 def __eq__(self, other):
859 def __eq__(self, other):
860 try:
860 try:
861 return (
861 return (
862 type(self) == type(other)
862 type(self) == type(other)
863 and self._path == other._path
863 and self._path == other._path
864 and self._filenode == other._filenode
864 and self._filenode == other._filenode
865 )
865 )
866 except AttributeError:
866 except AttributeError:
867 return False
867 return False
868
868
869 def __ne__(self, other):
869 def __ne__(self, other):
870 return not (self == other)
870 return not (self == other)
871
871
872 def filerev(self):
872 def filerev(self):
873 return self._filerev
873 return self._filerev
874
874
875 def filenode(self):
875 def filenode(self):
876 return self._filenode
876 return self._filenode
877
877
878 @propertycache
878 @propertycache
879 def _flags(self):
879 def _flags(self):
880 return self._changectx.flags(self._path)
880 return self._changectx.flags(self._path)
881
881
882 def flags(self):
882 def flags(self):
883 return self._flags
883 return self._flags
884
884
885 def filelog(self):
885 def filelog(self):
886 return self._filelog
886 return self._filelog
887
887
888 def rev(self):
888 def rev(self):
889 return self._changeid
889 return self._changeid
890
890
891 def linkrev(self):
891 def linkrev(self):
892 return self._filelog.linkrev(self._filerev)
892 return self._filelog.linkrev(self._filerev)
893
893
894 def node(self):
894 def node(self):
895 return self._changectx.node()
895 return self._changectx.node()
896
896
897 def hex(self):
897 def hex(self):
898 return self._changectx.hex()
898 return self._changectx.hex()
899
899
900 def user(self):
900 def user(self):
901 return self._changectx.user()
901 return self._changectx.user()
902
902
903 def date(self):
903 def date(self):
904 return self._changectx.date()
904 return self._changectx.date()
905
905
906 def files(self):
906 def files(self):
907 return self._changectx.files()
907 return self._changectx.files()
908
908
909 def description(self):
909 def description(self):
910 return self._changectx.description()
910 return self._changectx.description()
911
911
912 def branch(self):
912 def branch(self):
913 return self._changectx.branch()
913 return self._changectx.branch()
914
914
915 def extra(self):
915 def extra(self):
916 return self._changectx.extra()
916 return self._changectx.extra()
917
917
918 def phase(self):
918 def phase(self):
919 return self._changectx.phase()
919 return self._changectx.phase()
920
920
921 def phasestr(self):
921 def phasestr(self):
922 return self._changectx.phasestr()
922 return self._changectx.phasestr()
923
923
924 def obsolete(self):
924 def obsolete(self):
925 return self._changectx.obsolete()
925 return self._changectx.obsolete()
926
926
927 def instabilities(self):
927 def instabilities(self):
928 return self._changectx.instabilities()
928 return self._changectx.instabilities()
929
929
930 def manifest(self):
930 def manifest(self):
931 return self._changectx.manifest()
931 return self._changectx.manifest()
932
932
933 def changectx(self):
933 def changectx(self):
934 return self._changectx
934 return self._changectx
935
935
936 def renamed(self):
936 def renamed(self):
937 return self._copied
937 return self._copied
938
938
939 def copysource(self):
939 def copysource(self):
940 return self._copied and self._copied[0]
940 return self._copied and self._copied[0]
941
941
942 def repo(self):
942 def repo(self):
943 return self._repo
943 return self._repo
944
944
945 def size(self):
945 def size(self):
946 return len(self.data())
946 return len(self.data())
947
947
948 def path(self):
948 def path(self):
949 return self._path
949 return self._path
950
950
951 def isbinary(self):
951 def isbinary(self):
952 try:
952 try:
953 return stringutil.binary(self.data())
953 return stringutil.binary(self.data())
954 except IOError:
954 except IOError:
955 return False
955 return False
956
956
957 def isexec(self):
957 def isexec(self):
958 return b'x' in self.flags()
958 return b'x' in self.flags()
959
959
960 def islink(self):
960 def islink(self):
961 return b'l' in self.flags()
961 return b'l' in self.flags()
962
962
963 def isabsent(self):
963 def isabsent(self):
964 """whether this filectx represents a file not in self._changectx
964 """whether this filectx represents a file not in self._changectx
965
965
966 This is mainly for merge code to detect change/delete conflicts. This is
966 This is mainly for merge code to detect change/delete conflicts. This is
967 expected to be True for all subclasses of basectx."""
967 expected to be True for all subclasses of basectx."""
968 return False
968 return False
969
969
970 _customcmp = False
970 _customcmp = False
971
971
972 def cmp(self, fctx):
972 def cmp(self, fctx):
973 """compare with other file context
973 """compare with other file context
974
974
975 returns True if different than fctx.
975 returns True if different than fctx.
976 """
976 """
977 if fctx._customcmp:
977 if fctx._customcmp:
978 return fctx.cmp(self)
978 return fctx.cmp(self)
979
979
980 if self._filenode is None:
980 if self._filenode is None:
981 raise error.ProgrammingError(
981 raise error.ProgrammingError(
982 b'filectx.cmp() must be reimplemented if not backed by revlog'
982 b'filectx.cmp() must be reimplemented if not backed by revlog'
983 )
983 )
984
984
985 if fctx._filenode is None:
985 if fctx._filenode is None:
986 if self._repo._encodefilterpats:
986 if self._repo._encodefilterpats:
987 # can't rely on size() because wdir content may be decoded
987 # can't rely on size() because wdir content may be decoded
988 return self._filelog.cmp(self._filenode, fctx.data())
988 return self._filelog.cmp(self._filenode, fctx.data())
989 if self.size() - 4 == fctx.size():
989 if self.size() - 4 == fctx.size():
990 # size() can match:
990 # size() can match:
991 # if file data starts with '\1\n', empty metadata block is
991 # if file data starts with '\1\n', empty metadata block is
992 # prepended, which adds 4 bytes to filelog.size().
992 # prepended, which adds 4 bytes to filelog.size().
993 return self._filelog.cmp(self._filenode, fctx.data())
993 return self._filelog.cmp(self._filenode, fctx.data())
994 if self.size() == fctx.size() or self.flags() == b'l':
994 if self.size() == fctx.size() or self.flags() == b'l':
995 # size() matches: need to compare content
995 # size() matches: need to compare content
996 # issue6456: Always compare symlinks because size can represent
996 # issue6456: Always compare symlinks because size can represent
997 # encrypted string for EXT-4 encryption(fscrypt).
997 # encrypted string for EXT-4 encryption(fscrypt).
998 return self._filelog.cmp(self._filenode, fctx.data())
998 return self._filelog.cmp(self._filenode, fctx.data())
999
999
1000 # size() differs
1000 # size() differs
1001 return True
1001 return True
1002
1002
1003 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
1003 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
1004 """return the first ancestor of <srcrev> introducing <fnode>
1004 """return the first ancestor of <srcrev> introducing <fnode>
1005
1005
1006 If the linkrev of the file revision does not point to an ancestor of
1006 If the linkrev of the file revision does not point to an ancestor of
1007 srcrev, we'll walk down the ancestors until we find one introducing
1007 srcrev, we'll walk down the ancestors until we find one introducing
1008 this file revision.
1008 this file revision.
1009
1009
1010 :srcrev: the changeset revision we search ancestors from
1010 :srcrev: the changeset revision we search ancestors from
1011 :inclusive: if true, the src revision will also be checked
1011 :inclusive: if true, the src revision will also be checked
1012 :stoprev: an optional revision to stop the walk at. If no introduction
1012 :stoprev: an optional revision to stop the walk at. If no introduction
1013 of this file content could be found before this floor
1013 of this file content could be found before this floor
1014 revision, the function will returns "None" and stops its
1014 revision, the function will returns "None" and stops its
1015 iteration.
1015 iteration.
1016 """
1016 """
1017 repo = self._repo
1017 repo = self._repo
1018 cl = repo.unfiltered().changelog
1018 cl = repo.unfiltered().changelog
1019 mfl = repo.manifestlog
1019 mfl = repo.manifestlog
1020 # fetch the linkrev
1020 # fetch the linkrev
1021 lkr = self.linkrev()
1021 lkr = self.linkrev()
1022 if srcrev == lkr:
1022 if srcrev == lkr:
1023 return lkr
1023 return lkr
1024 # hack to reuse ancestor computation when searching for renames
1024 # hack to reuse ancestor computation when searching for renames
1025 memberanc = getattr(self, '_ancestrycontext', None)
1025 memberanc = getattr(self, '_ancestrycontext', None)
1026 iteranc = None
1026 iteranc = None
1027 if srcrev is None:
1027 if srcrev is None:
1028 # wctx case, used by workingfilectx during mergecopy
1028 # wctx case, used by workingfilectx during mergecopy
1029 revs = [p.rev() for p in self._repo[None].parents()]
1029 revs = [p.rev() for p in self._repo[None].parents()]
1030 inclusive = True # we skipped the real (revless) source
1030 inclusive = True # we skipped the real (revless) source
1031 else:
1031 else:
1032 revs = [srcrev]
1032 revs = [srcrev]
1033 if memberanc is None:
1033 if memberanc is None:
1034 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1034 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1035 # check if this linkrev is an ancestor of srcrev
1035 # check if this linkrev is an ancestor of srcrev
1036 if lkr not in memberanc:
1036 if lkr not in memberanc:
1037 if iteranc is None:
1037 if iteranc is None:
1038 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1038 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1039 fnode = self._filenode
1039 fnode = self._filenode
1040 path = self._path
1040 path = self._path
1041 for a in iteranc:
1041 for a in iteranc:
1042 if stoprev is not None and a < stoprev:
1042 if stoprev is not None and a < stoprev:
1043 return None
1043 return None
1044 ac = cl.read(a) # get changeset data (we avoid object creation)
1044 ac = cl.read(a) # get changeset data (we avoid object creation)
1045 if path in ac[3]: # checking the 'files' field.
1045 if path in ac[3]: # checking the 'files' field.
1046 # The file has been touched, check if the content is
1046 # The file has been touched, check if the content is
1047 # similar to the one we search for.
1047 # similar to the one we search for.
1048 if fnode == mfl[ac[0]].readfast().get(path):
1048 if fnode == mfl[ac[0]].readfast().get(path):
1049 return a
1049 return a
1050 # In theory, we should never get out of that loop without a result.
1050 # In theory, we should never get out of that loop without a result.
1051 # But if manifest uses a buggy file revision (not children of the
1051 # But if manifest uses a buggy file revision (not children of the
1052 # one it replaces) we could. Such a buggy situation will likely
1052 # one it replaces) we could. Such a buggy situation will likely
1053 # result is crash somewhere else at to some point.
1053 # result is crash somewhere else at to some point.
1054 return lkr
1054 return lkr
1055
1055
1056 def isintroducedafter(self, changelogrev):
1056 def isintroducedafter(self, changelogrev):
1057 """True if a filectx has been introduced after a given floor revision"""
1057 """True if a filectx has been introduced after a given floor revision"""
1058 if self.linkrev() >= changelogrev:
1058 if self.linkrev() >= changelogrev:
1059 return True
1059 return True
1060 introrev = self._introrev(stoprev=changelogrev)
1060 introrev = self._introrev(stoprev=changelogrev)
1061 if introrev is None:
1061 if introrev is None:
1062 return False
1062 return False
1063 return introrev >= changelogrev
1063 return introrev >= changelogrev
1064
1064
1065 def introrev(self):
1065 def introrev(self):
1066 """return the rev of the changeset which introduced this file revision
1066 """return the rev of the changeset which introduced this file revision
1067
1067
1068 This method is different from linkrev because it take into account the
1068 This method is different from linkrev because it take into account the
1069 changeset the filectx was created from. It ensures the returned
1069 changeset the filectx was created from. It ensures the returned
1070 revision is one of its ancestors. This prevents bugs from
1070 revision is one of its ancestors. This prevents bugs from
1071 'linkrev-shadowing' when a file revision is used by multiple
1071 'linkrev-shadowing' when a file revision is used by multiple
1072 changesets.
1072 changesets.
1073 """
1073 """
1074 return self._introrev()
1074 return self._introrev()
1075
1075
1076 def _introrev(self, stoprev=None):
1076 def _introrev(self, stoprev=None):
1077 """
1077 """
1078 Same as `introrev` but, with an extra argument to limit changelog
1078 Same as `introrev` but, with an extra argument to limit changelog
1079 iteration range in some internal usecase.
1079 iteration range in some internal usecase.
1080
1080
1081 If `stoprev` is set, the `introrev` will not be searched past that
1081 If `stoprev` is set, the `introrev` will not be searched past that
1082 `stoprev` revision and "None" might be returned. This is useful to
1082 `stoprev` revision and "None" might be returned. This is useful to
1083 limit the iteration range.
1083 limit the iteration range.
1084 """
1084 """
1085 toprev = None
1085 toprev = None
1086 attrs = vars(self)
1086 attrs = vars(self)
1087 if '_changeid' in attrs:
1087 if '_changeid' in attrs:
1088 # We have a cached value already
1088 # We have a cached value already
1089 toprev = self._changeid
1089 toprev = self._changeid
1090 elif '_changectx' in attrs:
1090 elif '_changectx' in attrs:
1091 # We know which changelog entry we are coming from
1091 # We know which changelog entry we are coming from
1092 toprev = self._changectx.rev()
1092 toprev = self._changectx.rev()
1093
1093
1094 if toprev is not None:
1094 if toprev is not None:
1095 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1095 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1096 elif '_descendantrev' in attrs:
1096 elif '_descendantrev' in attrs:
1097 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1097 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1098 # be nice and cache the result of the computation
1098 # be nice and cache the result of the computation
1099 if introrev is not None:
1099 if introrev is not None:
1100 self._changeid = introrev
1100 self._changeid = introrev
1101 return introrev
1101 return introrev
1102 else:
1102 else:
1103 return self.linkrev()
1103 return self.linkrev()
1104
1104
1105 def introfilectx(self):
1105 def introfilectx(self):
1106 """Return filectx having identical contents, but pointing to the
1106 """Return filectx having identical contents, but pointing to the
1107 changeset revision where this filectx was introduced"""
1107 changeset revision where this filectx was introduced"""
1108 introrev = self.introrev()
1108 introrev = self.introrev()
1109 if self.rev() == introrev:
1109 if self.rev() == introrev:
1110 return self
1110 return self
1111 return self.filectx(self.filenode(), changeid=introrev)
1111 return self.filectx(self.filenode(), changeid=introrev)
1112
1112
1113 def _parentfilectx(self, path, fileid, filelog):
1113 def _parentfilectx(self, path, fileid, filelog):
1114 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1114 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1115 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1115 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1116 if '_changeid' in vars(self) or '_changectx' in vars(self):
1116 if '_changeid' in vars(self) or '_changectx' in vars(self):
1117 # If self is associated with a changeset (probably explicitly
1117 # If self is associated with a changeset (probably explicitly
1118 # fed), ensure the created filectx is associated with a
1118 # fed), ensure the created filectx is associated with a
1119 # changeset that is an ancestor of self.changectx.
1119 # changeset that is an ancestor of self.changectx.
1120 # This lets us later use _adjustlinkrev to get a correct link.
1120 # This lets us later use _adjustlinkrev to get a correct link.
1121 fctx._descendantrev = self.rev()
1121 fctx._descendantrev = self.rev()
1122 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1122 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1123 elif '_descendantrev' in vars(self):
1123 elif '_descendantrev' in vars(self):
1124 # Otherwise propagate _descendantrev if we have one associated.
1124 # Otherwise propagate _descendantrev if we have one associated.
1125 fctx._descendantrev = self._descendantrev
1125 fctx._descendantrev = self._descendantrev
1126 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1126 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1127 return fctx
1127 return fctx
1128
1128
1129 def parents(self):
1129 def parents(self):
1130 _path = self._path
1130 _path = self._path
1131 fl = self._filelog
1131 fl = self._filelog
1132 parents = self._filelog.parents(self._filenode)
1132 parents = self._filelog.parents(self._filenode)
1133 pl = [
1133 pl = [
1134 (_path, node, fl)
1134 (_path, node, fl)
1135 for node in parents
1135 for node in parents
1136 if node != self._repo.nodeconstants.nullid
1136 if node != self._repo.nodeconstants.nullid
1137 ]
1137 ]
1138
1138
1139 r = fl.renamed(self._filenode)
1139 r = fl.renamed(self._filenode)
1140 if r:
1140 if r:
1141 # - In the simple rename case, both parent are nullid, pl is empty.
1141 # - In the simple rename case, both parent are nullid, pl is empty.
1142 # - In case of merge, only one of the parent is null id and should
1142 # - In case of merge, only one of the parent is null id and should
1143 # be replaced with the rename information. This parent is -always-
1143 # be replaced with the rename information. This parent is -always-
1144 # the first one.
1144 # the first one.
1145 #
1145 #
1146 # As null id have always been filtered out in the previous list
1146 # As null id have always been filtered out in the previous list
1147 # comprehension, inserting to 0 will always result in "replacing
1147 # comprehension, inserting to 0 will always result in "replacing
1148 # first nullid parent with rename information.
1148 # first nullid parent with rename information.
1149 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1149 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1150
1150
1151 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1151 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1152
1152
1153 def p1(self):
1153 def p1(self):
1154 return self.parents()[0]
1154 return self.parents()[0]
1155
1155
1156 def p2(self):
1156 def p2(self):
1157 p = self.parents()
1157 p = self.parents()
1158 if len(p) == 2:
1158 if len(p) == 2:
1159 return p[1]
1159 return p[1]
1160 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1160 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1161
1161
1162 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1162 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1163 """Returns a list of annotateline objects for each line in the file
1163 """Returns a list of annotateline objects for each line in the file
1164
1164
1165 - line.fctx is the filectx of the node where that line was last changed
1165 - line.fctx is the filectx of the node where that line was last changed
1166 - line.lineno is the line number at the first appearance in the managed
1166 - line.lineno is the line number at the first appearance in the managed
1167 file
1167 file
1168 - line.text is the data on that line (including newline character)
1168 - line.text is the data on that line (including newline character)
1169 """
1169 """
1170 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1170 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1171
1171
1172 def parents(f):
1172 def parents(f):
1173 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1173 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1174 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1174 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1175 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1175 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1176 # isn't an ancestor of the srcrev.
1176 # isn't an ancestor of the srcrev.
1177 f._changeid
1177 f._changeid
1178 pl = f.parents()
1178 pl = f.parents()
1179
1179
1180 # Don't return renamed parents if we aren't following.
1180 # Don't return renamed parents if we aren't following.
1181 if not follow:
1181 if not follow:
1182 pl = [p for p in pl if p.path() == f.path()]
1182 pl = [p for p in pl if p.path() == f.path()]
1183
1183
1184 # renamed filectx won't have a filelog yet, so set it
1184 # renamed filectx won't have a filelog yet, so set it
1185 # from the cache to save time
1185 # from the cache to save time
1186 for p in pl:
1186 for p in pl:
1187 if not '_filelog' in p.__dict__:
1187 if not '_filelog' in p.__dict__:
1188 p._filelog = getlog(p.path())
1188 p._filelog = getlog(p.path())
1189
1189
1190 return pl
1190 return pl
1191
1191
1192 # use linkrev to find the first changeset where self appeared
1192 # use linkrev to find the first changeset where self appeared
1193 base = self.introfilectx()
1193 base = self.introfilectx()
1194 if getattr(base, '_ancestrycontext', None) is None:
1194 if getattr(base, '_ancestrycontext', None) is None:
1195 # it is safe to use an unfiltered repository here because we are
1195 # it is safe to use an unfiltered repository here because we are
1196 # walking ancestors only.
1196 # walking ancestors only.
1197 cl = self._repo.unfiltered().changelog
1197 cl = self._repo.unfiltered().changelog
1198 if base.rev() is None:
1198 if base.rev() is None:
1199 # wctx is not inclusive, but works because _ancestrycontext
1199 # wctx is not inclusive, but works because _ancestrycontext
1200 # is used to test filelog revisions
1200 # is used to test filelog revisions
1201 ac = cl.ancestors(
1201 ac = cl.ancestors(
1202 [p.rev() for p in base.parents()], inclusive=True
1202 [p.rev() for p in base.parents()], inclusive=True
1203 )
1203 )
1204 else:
1204 else:
1205 ac = cl.ancestors([base.rev()], inclusive=True)
1205 ac = cl.ancestors([base.rev()], inclusive=True)
1206 base._ancestrycontext = ac
1206 base._ancestrycontext = ac
1207
1207
1208 return dagop.annotate(
1208 return dagop.annotate(
1209 base, parents, skiprevs=skiprevs, diffopts=diffopts
1209 base, parents, skiprevs=skiprevs, diffopts=diffopts
1210 )
1210 )
1211
1211
1212 def ancestors(self, followfirst=False):
1212 def ancestors(self, followfirst=False):
1213 visit = {}
1213 visit = {}
1214 c = self
1214 c = self
1215 if followfirst:
1215 if followfirst:
1216 cut = 1
1216 cut = 1
1217 else:
1217 else:
1218 cut = None
1218 cut = None
1219
1219
1220 while True:
1220 while True:
1221 for parent in c.parents()[:cut]:
1221 for parent in c.parents()[:cut]:
1222 visit[(parent.linkrev(), parent.filenode())] = parent
1222 visit[(parent.linkrev(), parent.filenode())] = parent
1223 if not visit:
1223 if not visit:
1224 break
1224 break
1225 c = visit.pop(max(visit))
1225 c = visit.pop(max(visit))
1226 yield c
1226 yield c
1227
1227
1228 def decodeddata(self):
1228 def decodeddata(self):
1229 """Returns `data()` after running repository decoding filters.
1229 """Returns `data()` after running repository decoding filters.
1230
1230
1231 This is often equivalent to how the data would be expressed on disk.
1231 This is often equivalent to how the data would be expressed on disk.
1232 """
1232 """
1233 return self._repo.wwritedata(self.path(), self.data())
1233 return self._repo.wwritedata(self.path(), self.data())
1234
1234
1235
1235
1236 class filectx(basefilectx):
1236 class filectx(basefilectx):
1237 """A filecontext object makes access to data related to a particular
1237 """A filecontext object makes access to data related to a particular
1238 filerevision convenient."""
1238 filerevision convenient."""
1239
1239
1240 def __init__(
1240 def __init__(
1241 self,
1241 self,
1242 repo,
1242 repo,
1243 path,
1243 path,
1244 changeid=None,
1244 changeid=None,
1245 fileid=None,
1245 fileid=None,
1246 filelog=None,
1246 filelog=None,
1247 changectx=None,
1247 changectx=None,
1248 ):
1248 ):
1249 """changeid must be a revision number, if specified.
1249 """changeid must be a revision number, if specified.
1250 fileid can be a file revision or node."""
1250 fileid can be a file revision or node."""
1251 self._repo = repo
1251 self._repo = repo
1252 self._path = path
1252 self._path = path
1253
1253
1254 assert (
1254 assert (
1255 changeid is not None or fileid is not None or changectx is not None
1255 changeid is not None or fileid is not None or changectx is not None
1256 ), b"bad args: changeid=%r, fileid=%r, changectx=%r" % (
1256 ), b"bad args: changeid=%r, fileid=%r, changectx=%r" % (
1257 changeid,
1257 changeid,
1258 fileid,
1258 fileid,
1259 changectx,
1259 changectx,
1260 )
1260 )
1261
1261
1262 if filelog is not None:
1262 if filelog is not None:
1263 self._filelog = filelog
1263 self._filelog = filelog
1264
1264
1265 if changeid is not None:
1265 if changeid is not None:
1266 self._changeid = changeid
1266 self._changeid = changeid
1267 if changectx is not None:
1267 if changectx is not None:
1268 self._changectx = changectx
1268 self._changectx = changectx
1269 if fileid is not None:
1269 if fileid is not None:
1270 self._fileid = fileid
1270 self._fileid = fileid
1271
1271
1272 @propertycache
1272 @propertycache
1273 def _changectx(self):
1273 def _changectx(self):
1274 try:
1274 try:
1275 return self._repo[self._changeid]
1275 return self._repo[self._changeid]
1276 except error.FilteredRepoLookupError:
1276 except error.FilteredRepoLookupError:
1277 # Linkrev may point to any revision in the repository. When the
1277 # Linkrev may point to any revision in the repository. When the
1278 # repository is filtered this may lead to `filectx` trying to build
1278 # repository is filtered this may lead to `filectx` trying to build
1279 # `changectx` for filtered revision. In such case we fallback to
1279 # `changectx` for filtered revision. In such case we fallback to
1280 # creating `changectx` on the unfiltered version of the reposition.
1280 # creating `changectx` on the unfiltered version of the reposition.
1281 # This fallback should not be an issue because `changectx` from
1281 # This fallback should not be an issue because `changectx` from
1282 # `filectx` are not used in complex operations that care about
1282 # `filectx` are not used in complex operations that care about
1283 # filtering.
1283 # filtering.
1284 #
1284 #
1285 # This fallback is a cheap and dirty fix that prevent several
1285 # This fallback is a cheap and dirty fix that prevent several
1286 # crashes. It does not ensure the behavior is correct. However the
1286 # crashes. It does not ensure the behavior is correct. However the
1287 # behavior was not correct before filtering either and "incorrect
1287 # behavior was not correct before filtering either and "incorrect
1288 # behavior" is seen as better as "crash"
1288 # behavior" is seen as better as "crash"
1289 #
1289 #
1290 # Linkrevs have several serious troubles with filtering that are
1290 # Linkrevs have several serious troubles with filtering that are
1291 # complicated to solve. Proper handling of the issue here should be
1291 # complicated to solve. Proper handling of the issue here should be
1292 # considered when solving linkrev issue are on the table.
1292 # considered when solving linkrev issue are on the table.
1293 return self._repo.unfiltered()[self._changeid]
1293 return self._repo.unfiltered()[self._changeid]
1294
1294
1295 def filectx(self, fileid, changeid=None):
1295 def filectx(self, fileid, changeid=None):
1296 """opens an arbitrary revision of the file without
1296 """opens an arbitrary revision of the file without
1297 opening a new filelog"""
1297 opening a new filelog"""
1298 return filectx(
1298 return filectx(
1299 self._repo,
1299 self._repo,
1300 self._path,
1300 self._path,
1301 fileid=fileid,
1301 fileid=fileid,
1302 filelog=self._filelog,
1302 filelog=self._filelog,
1303 changeid=changeid,
1303 changeid=changeid,
1304 )
1304 )
1305
1305
1306 def rawdata(self):
1306 def rawdata(self):
1307 return self._filelog.rawdata(self._filenode)
1307 return self._filelog.rawdata(self._filenode)
1308
1308
1309 def rawflags(self):
1309 def rawflags(self):
1310 """low-level revlog flags"""
1310 """low-level revlog flags"""
1311 return self._filelog.flags(self._filerev)
1311 return self._filelog.flags(self._filerev)
1312
1312
1313 def data(self):
1313 def data(self):
1314 try:
1314 try:
1315 return self._filelog.read(self._filenode)
1315 return self._filelog.read(self._filenode)
1316 except error.CensoredNodeError:
1316 except error.CensoredNodeError:
1317 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1317 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1318 return b""
1318 return b""
1319 raise error.Abort(
1319 raise error.Abort(
1320 _(b"censored node: %s") % short(self._filenode),
1320 _(b"censored node: %s") % short(self._filenode),
1321 hint=_(b"set censor.policy to ignore errors"),
1321 hint=_(b"set censor.policy to ignore errors"),
1322 )
1322 )
1323
1323
1324 def size(self):
1324 def size(self):
1325 return self._filelog.size(self._filerev)
1325 return self._filelog.size(self._filerev)
1326
1326
1327 @propertycache
1327 @propertycache
1328 def _copied(self):
1328 def _copied(self):
1329 """check if file was actually renamed in this changeset revision
1329 """check if file was actually renamed in this changeset revision
1330
1330
1331 If rename logged in file revision, we report copy for changeset only
1331 If rename logged in file revision, we report copy for changeset only
1332 if file revisions linkrev points back to the changeset in question
1332 if file revisions linkrev points back to the changeset in question
1333 or both changeset parents contain different file revisions.
1333 or both changeset parents contain different file revisions.
1334 """
1334 """
1335
1335
1336 renamed = self._filelog.renamed(self._filenode)
1336 renamed = self._filelog.renamed(self._filenode)
1337 if not renamed:
1337 if not renamed:
1338 return None
1338 return None
1339
1339
1340 if self.rev() == self.linkrev():
1340 if self.rev() == self.linkrev():
1341 return renamed
1341 return renamed
1342
1342
1343 name = self.path()
1343 name = self.path()
1344 fnode = self._filenode
1344 fnode = self._filenode
1345 for p in self._changectx.parents():
1345 for p in self._changectx.parents():
1346 try:
1346 try:
1347 if fnode == p.filenode(name):
1347 if fnode == p.filenode(name):
1348 return None
1348 return None
1349 except error.LookupError:
1349 except error.LookupError:
1350 pass
1350 pass
1351 return renamed
1351 return renamed
1352
1352
1353 def children(self):
1353 def children(self):
1354 # hard for renames
1354 # hard for renames
1355 c = self._filelog.children(self._filenode)
1355 c = self._filelog.children(self._filenode)
1356 return [
1356 return [
1357 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1357 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1358 for x in c
1358 for x in c
1359 ]
1359 ]
1360
1360
1361
1361
1362 class committablectx(basectx):
1362 class committablectx(basectx):
1363 """A committablectx object provides common functionality for a context that
1363 """A committablectx object provides common functionality for a context that
1364 wants the ability to commit, e.g. workingctx or memctx."""
1364 wants the ability to commit, e.g. workingctx or memctx."""
1365
1365
1366 def __init__(
1366 def __init__(
1367 self,
1367 self,
1368 repo,
1368 repo,
1369 text=b"",
1369 text=b"",
1370 user=None,
1370 user=None,
1371 date=None,
1371 date=None,
1372 extra=None,
1372 extra=None,
1373 changes=None,
1373 changes=None,
1374 branch=None,
1374 branch=None,
1375 ):
1375 ):
1376 super(committablectx, self).__init__(repo)
1376 super(committablectx, self).__init__(repo)
1377 self._rev = None
1377 self._rev = None
1378 self._node = None
1378 self._node = None
1379 self._text = text
1379 self._text = text
1380 if date:
1380 if date:
1381 self._date = dateutil.parsedate(date)
1381 self._date = dateutil.parsedate(date)
1382 if user:
1382 if user:
1383 self._user = user
1383 self._user = user
1384 if changes:
1384 if changes:
1385 self._status = changes
1385 self._status = changes
1386
1386
1387 self._extra = {}
1387 self._extra = {}
1388 if extra:
1388 if extra:
1389 self._extra = extra.copy()
1389 self._extra = extra.copy()
1390 if branch is not None:
1390 if branch is not None:
1391 self._extra[b'branch'] = encoding.fromlocal(branch)
1391 self._extra[b'branch'] = encoding.fromlocal(branch)
1392 if not self._extra.get(b'branch'):
1392 if not self._extra.get(b'branch'):
1393 self._extra[b'branch'] = b'default'
1393 self._extra[b'branch'] = b'default'
1394
1394
1395 def __bytes__(self):
1395 def __bytes__(self):
1396 return bytes(self._parents[0]) + b"+"
1396 return bytes(self._parents[0]) + b"+"
1397
1397
1398 def hex(self):
1398 def hex(self):
1399 self._repo.nodeconstants.wdirhex
1399 self._repo.nodeconstants.wdirhex
1400
1400
1401 __str__ = encoding.strmethod(__bytes__)
1401 __str__ = encoding.strmethod(__bytes__)
1402
1402
1403 def __nonzero__(self):
1403 def __nonzero__(self):
1404 return True
1404 return True
1405
1405
1406 __bool__ = __nonzero__
1406 __bool__ = __nonzero__
1407
1407
1408 @propertycache
1408 @propertycache
1409 def _status(self):
1409 def _status(self):
1410 return self._repo.status()
1410 return self._repo.status()
1411
1411
1412 @propertycache
1412 @propertycache
1413 def _user(self):
1413 def _user(self):
1414 return self._repo.ui.username()
1414 return self._repo.ui.username()
1415
1415
1416 @propertycache
1416 @propertycache
1417 def _date(self):
1417 def _date(self):
1418 ui = self._repo.ui
1418 ui = self._repo.ui
1419 date = ui.configdate(b'devel', b'default-date')
1419 date = ui.configdate(b'devel', b'default-date')
1420 if date is None:
1420 if date is None:
1421 date = dateutil.makedate()
1421 date = dateutil.makedate()
1422 return date
1422 return date
1423
1423
1424 def subrev(self, subpath):
1424 def subrev(self, subpath):
1425 return None
1425 return None
1426
1426
1427 def manifestnode(self):
1427 def manifestnode(self):
1428 return None
1428 return None
1429
1429
1430 def user(self):
1430 def user(self):
1431 return self._user or self._repo.ui.username()
1431 return self._user or self._repo.ui.username()
1432
1432
1433 def date(self):
1433 def date(self):
1434 return self._date
1434 return self._date
1435
1435
1436 def description(self):
1436 def description(self):
1437 return self._text
1437 return self._text
1438
1438
1439 def files(self):
1439 def files(self):
1440 return sorted(
1440 return sorted(
1441 self._status.modified + self._status.added + self._status.removed
1441 self._status.modified + self._status.added + self._status.removed
1442 )
1442 )
1443
1443
1444 def modified(self):
1444 def modified(self):
1445 return self._status.modified
1445 return self._status.modified
1446
1446
1447 def added(self):
1447 def added(self):
1448 return self._status.added
1448 return self._status.added
1449
1449
1450 def removed(self):
1450 def removed(self):
1451 return self._status.removed
1451 return self._status.removed
1452
1452
1453 def deleted(self):
1453 def deleted(self):
1454 return self._status.deleted
1454 return self._status.deleted
1455
1455
1456 filesmodified = modified
1456 filesmodified = modified
1457 filesadded = added
1457 filesadded = added
1458 filesremoved = removed
1458 filesremoved = removed
1459
1459
1460 def branch(self):
1460 def branch(self):
1461 return encoding.tolocal(self._extra[b'branch'])
1461 return encoding.tolocal(self._extra[b'branch'])
1462
1462
1463 def closesbranch(self):
1463 def closesbranch(self):
1464 return b'close' in self._extra
1464 return b'close' in self._extra
1465
1465
1466 def extra(self):
1466 def extra(self):
1467 return self._extra
1467 return self._extra
1468
1468
1469 def isinmemory(self):
1469 def isinmemory(self):
1470 return False
1470 return False
1471
1471
1472 def tags(self):
1472 def tags(self):
1473 return []
1473 return []
1474
1474
1475 def bookmarks(self):
1475 def bookmarks(self):
1476 b = []
1476 b = []
1477 for p in self.parents():
1477 for p in self.parents():
1478 b.extend(p.bookmarks())
1478 b.extend(p.bookmarks())
1479 return b
1479 return b
1480
1480
1481 def phase(self):
1481 def phase(self):
1482 phase = phases.newcommitphase(self._repo.ui)
1482 phase = phases.newcommitphase(self._repo.ui)
1483 for p in self.parents():
1483 for p in self.parents():
1484 phase = max(phase, p.phase())
1484 phase = max(phase, p.phase())
1485 return phase
1485 return phase
1486
1486
1487 def hidden(self):
1487 def hidden(self):
1488 return False
1488 return False
1489
1489
1490 def children(self):
1490 def children(self):
1491 return []
1491 return []
1492
1492
1493 def flags(self, path):
1493 def flags(self, path):
1494 if '_manifest' in self.__dict__:
1494 if '_manifest' in self.__dict__:
1495 try:
1495 try:
1496 return self._manifest.flags(path)
1496 return self._manifest.flags(path)
1497 except KeyError:
1497 except KeyError:
1498 return b''
1498 return b''
1499
1499
1500 try:
1500 try:
1501 return self._flagfunc(path)
1501 return self._flagfunc(path)
1502 except OSError:
1502 except OSError:
1503 return b''
1503 return b''
1504
1504
1505 def ancestor(self, c2):
1505 def ancestor(self, c2):
1506 """return the "best" ancestor context of self and c2"""
1506 """return the "best" ancestor context of self and c2"""
1507 return self._parents[0].ancestor(c2) # punt on two parents for now
1507 return self._parents[0].ancestor(c2) # punt on two parents for now
1508
1508
1509 def ancestors(self):
1509 def ancestors(self):
1510 for p in self._parents:
1510 for p in self._parents:
1511 yield p
1511 yield p
1512 for a in self._repo.changelog.ancestors(
1512 for a in self._repo.changelog.ancestors(
1513 [p.rev() for p in self._parents]
1513 [p.rev() for p in self._parents]
1514 ):
1514 ):
1515 yield self._repo[a]
1515 yield self._repo[a]
1516
1516
1517 def markcommitted(self, node):
1517 def markcommitted(self, node):
1518 """Perform post-commit cleanup necessary after committing this ctx
1518 """Perform post-commit cleanup necessary after committing this ctx
1519
1519
1520 Specifically, this updates backing stores this working context
1520 Specifically, this updates backing stores this working context
1521 wraps to reflect the fact that the changes reflected by this
1521 wraps to reflect the fact that the changes reflected by this
1522 workingctx have been committed. For example, it marks
1522 workingctx have been committed. For example, it marks
1523 modified and added files as normal in the dirstate.
1523 modified and added files as normal in the dirstate.
1524
1524
1525 """
1525 """
1526
1526
1527 def dirty(self, missing=False, merge=True, branch=True):
1527 def dirty(self, missing=False, merge=True, branch=True):
1528 return False
1528 return False
1529
1529
1530
1530
1531 class workingctx(committablectx):
1531 class workingctx(committablectx):
1532 """A workingctx object makes access to data related to
1532 """A workingctx object makes access to data related to
1533 the current working directory convenient.
1533 the current working directory convenient.
1534 date - any valid date string or (unixtime, offset), or None.
1534 date - any valid date string or (unixtime, offset), or None.
1535 user - username string, or None.
1535 user - username string, or None.
1536 extra - a dictionary of extra values, or None.
1536 extra - a dictionary of extra values, or None.
1537 changes - a list of file lists as returned by localrepo.status()
1537 changes - a list of file lists as returned by localrepo.status()
1538 or None to use the repository status.
1538 or None to use the repository status.
1539 """
1539 """
1540
1540
1541 def __init__(
1541 def __init__(
1542 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1542 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1543 ):
1543 ):
1544 branch = None
1544 branch = None
1545 if not extra or b'branch' not in extra:
1545 if not extra or b'branch' not in extra:
1546 try:
1546 try:
1547 branch = repo.dirstate.branch()
1547 branch = repo.dirstate.branch()
1548 except UnicodeDecodeError:
1548 except UnicodeDecodeError:
1549 raise error.Abort(_(b'branch name not in UTF-8!'))
1549 raise error.Abort(_(b'branch name not in UTF-8!'))
1550 super(workingctx, self).__init__(
1550 super(workingctx, self).__init__(
1551 repo, text, user, date, extra, changes, branch=branch
1551 repo, text, user, date, extra, changes, branch=branch
1552 )
1552 )
1553
1553
1554 def __iter__(self):
1554 def __iter__(self):
1555 d = self._repo.dirstate
1555 d = self._repo.dirstate
1556 for f in d:
1556 for f in d:
1557 if d.get_entry(f).tracked:
1557 if d.get_entry(f).tracked:
1558 yield f
1558 yield f
1559
1559
1560 def __contains__(self, key):
1560 def __contains__(self, key):
1561 return self._repo.dirstate.get_entry(key).tracked
1561 return self._repo.dirstate.get_entry(key).tracked
1562
1562
1563 def hex(self):
1563 def hex(self):
1564 return self._repo.nodeconstants.wdirhex
1564 return self._repo.nodeconstants.wdirhex
1565
1565
1566 @propertycache
1566 @propertycache
1567 def _parents(self):
1567 def _parents(self):
1568 p = self._repo.dirstate.parents()
1568 p = self._repo.dirstate.parents()
1569 if p[1] == self._repo.nodeconstants.nullid:
1569 if p[1] == self._repo.nodeconstants.nullid:
1570 p = p[:-1]
1570 p = p[:-1]
1571 # use unfiltered repo to delay/avoid loading obsmarkers
1571 # use unfiltered repo to delay/avoid loading obsmarkers
1572 unfi = self._repo.unfiltered()
1572 unfi = self._repo.unfiltered()
1573 return [
1573 return [
1574 changectx(
1574 changectx(
1575 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1575 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1576 )
1576 )
1577 for n in p
1577 for n in p
1578 ]
1578 ]
1579
1579
1580 def setparents(self, p1node, p2node=None):
1580 def setparents(self, p1node, p2node=None):
1581 if p2node is None:
1581 if p2node is None:
1582 p2node = self._repo.nodeconstants.nullid
1582 p2node = self._repo.nodeconstants.nullid
1583 dirstate = self._repo.dirstate
1583 dirstate = self._repo.dirstate
1584 with dirstate.parentchange():
1584 with dirstate.parentchange():
1585 copies = dirstate.setparents(p1node, p2node)
1585 copies = dirstate.setparents(p1node, p2node)
1586 pctx = self._repo[p1node]
1586 pctx = self._repo[p1node]
1587 if copies:
1587 if copies:
1588 # Adjust copy records, the dirstate cannot do it, it
1588 # Adjust copy records, the dirstate cannot do it, it
1589 # requires access to parents manifests. Preserve them
1589 # requires access to parents manifests. Preserve them
1590 # only for entries added to first parent.
1590 # only for entries added to first parent.
1591 for f in copies:
1591 for f in copies:
1592 if f not in pctx and copies[f] in pctx:
1592 if f not in pctx and copies[f] in pctx:
1593 dirstate.copy(copies[f], f)
1593 dirstate.copy(copies[f], f)
1594 if p2node == self._repo.nodeconstants.nullid:
1594 if p2node == self._repo.nodeconstants.nullid:
1595 for f, s in sorted(dirstate.copies().items()):
1595 for f, s in sorted(dirstate.copies().items()):
1596 if f not in pctx and s not in pctx:
1596 if f not in pctx and s not in pctx:
1597 dirstate.copy(None, f)
1597 dirstate.copy(None, f)
1598
1598
1599 def _fileinfo(self, path):
1599 def _fileinfo(self, path):
1600 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1600 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1601 self._manifest
1601 self._manifest
1602 return super(workingctx, self)._fileinfo(path)
1602 return super(workingctx, self)._fileinfo(path)
1603
1603
1604 def _buildflagfunc(self):
1604 def _buildflagfunc(self):
1605 # Create a fallback function for getting file flags when the
1605 # Create a fallback function for getting file flags when the
1606 # filesystem doesn't support them
1606 # filesystem doesn't support them
1607
1607
1608 copiesget = self._repo.dirstate.copies().get
1608 copiesget = self._repo.dirstate.copies().get
1609 parents = self.parents()
1609 parents = self.parents()
1610 if len(parents) < 2:
1610 if len(parents) < 2:
1611 # when we have one parent, it's easy: copy from parent
1611 # when we have one parent, it's easy: copy from parent
1612 man = parents[0].manifest()
1612 man = parents[0].manifest()
1613
1613
1614 def func(f):
1614 def func(f):
1615 f = copiesget(f, f)
1615 f = copiesget(f, f)
1616 return man.flags(f)
1616 return man.flags(f)
1617
1617
1618 else:
1618 else:
1619 # merges are tricky: we try to reconstruct the unstored
1619 # merges are tricky: we try to reconstruct the unstored
1620 # result from the merge (issue1802)
1620 # result from the merge (issue1802)
1621 p1, p2 = parents
1621 p1, p2 = parents
1622 pa = p1.ancestor(p2)
1622 pa = p1.ancestor(p2)
1623 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1623 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1624
1624
1625 def func(f):
1625 def func(f):
1626 f = copiesget(f, f) # may be wrong for merges with copies
1626 f = copiesget(f, f) # may be wrong for merges with copies
1627 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1627 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1628 if fl1 == fl2:
1628 if fl1 == fl2:
1629 return fl1
1629 return fl1
1630 if fl1 == fla:
1630 if fl1 == fla:
1631 return fl2
1631 return fl2
1632 if fl2 == fla:
1632 if fl2 == fla:
1633 return fl1
1633 return fl1
1634 return b'' # punt for conflicts
1634 return b'' # punt for conflicts
1635
1635
1636 return func
1636 return func
1637
1637
1638 @propertycache
1638 @propertycache
1639 def _flagfunc(self):
1639 def _flagfunc(self):
1640 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1640 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1641
1641
1642 def flags(self, path):
1642 def flags(self, path):
1643 try:
1643 try:
1644 return self._flagfunc(path)
1644 return self._flagfunc(path)
1645 except OSError:
1645 except OSError:
1646 return b''
1646 return b''
1647
1647
1648 def filectx(self, path, filelog=None):
1648 def filectx(self, path, filelog=None):
1649 """get a file context from the working directory"""
1649 """get a file context from the working directory"""
1650 return workingfilectx(
1650 return workingfilectx(
1651 self._repo, path, workingctx=self, filelog=filelog
1651 self._repo, path, workingctx=self, filelog=filelog
1652 )
1652 )
1653
1653
1654 def dirty(self, missing=False, merge=True, branch=True):
1654 def dirty(self, missing=False, merge=True, branch=True):
1655 """check whether a working directory is modified"""
1655 """check whether a working directory is modified"""
1656 # check subrepos first
1656 # check subrepos first
1657 for s in sorted(self.substate):
1657 for s in sorted(self.substate):
1658 if self.sub(s).dirty(missing=missing):
1658 if self.sub(s).dirty(missing=missing):
1659 return True
1659 return True
1660 # check current working dir
1660 # check current working dir
1661 return (
1661 return (
1662 (merge and self.p2())
1662 (merge and self.p2())
1663 or (branch and self.branch() != self.p1().branch())
1663 or (branch and self.branch() != self.p1().branch())
1664 or self.modified()
1664 or self.modified()
1665 or self.added()
1665 or self.added()
1666 or self.removed()
1666 or self.removed()
1667 or (missing and self.deleted())
1667 or (missing and self.deleted())
1668 )
1668 )
1669
1669
1670 def add(self, list, prefix=b""):
1670 def add(self, list, prefix=b""):
1671 with self._repo.wlock():
1671 with self._repo.wlock():
1672 ui, ds = self._repo.ui, self._repo.dirstate
1672 ui, ds = self._repo.ui, self._repo.dirstate
1673 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1673 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1674 rejected = []
1674 rejected = []
1675 lstat = self._repo.wvfs.lstat
1675 lstat = self._repo.wvfs.lstat
1676 for f in list:
1676 for f in list:
1677 # ds.pathto() returns an absolute file when this is invoked from
1677 # ds.pathto() returns an absolute file when this is invoked from
1678 # the keyword extension. That gets flagged as non-portable on
1678 # the keyword extension. That gets flagged as non-portable on
1679 # Windows, since it contains the drive letter and colon.
1679 # Windows, since it contains the drive letter and colon.
1680 scmutil.checkportable(ui, os.path.join(prefix, f))
1680 scmutil.checkportable(ui, os.path.join(prefix, f))
1681 try:
1681 try:
1682 st = lstat(f)
1682 st = lstat(f)
1683 except OSError:
1683 except OSError:
1684 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1684 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1685 rejected.append(f)
1685 rejected.append(f)
1686 continue
1686 continue
1687 limit = ui.configbytes(b'ui', b'large-file-limit')
1687 limit = ui.configbytes(b'ui', b'large-file-limit')
1688 if limit != 0 and st.st_size > limit:
1688 if limit != 0 and st.st_size > limit:
1689 ui.warn(
1689 ui.warn(
1690 _(
1690 _(
1691 b"%s: up to %d MB of RAM may be required "
1691 b"%s: up to %d MB of RAM may be required "
1692 b"to manage this file\n"
1692 b"to manage this file\n"
1693 b"(use 'hg revert %s' to cancel the "
1693 b"(use 'hg revert %s' to cancel the "
1694 b"pending addition)\n"
1694 b"pending addition)\n"
1695 )
1695 )
1696 % (f, 3 * st.st_size // 1000000, uipath(f))
1696 % (f, 3 * st.st_size // 1000000, uipath(f))
1697 )
1697 )
1698 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1698 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1699 ui.warn(
1699 ui.warn(
1700 _(
1700 _(
1701 b"%s not added: only files and symlinks "
1701 b"%s not added: only files and symlinks "
1702 b"supported currently\n"
1702 b"supported currently\n"
1703 )
1703 )
1704 % uipath(f)
1704 % uipath(f)
1705 )
1705 )
1706 rejected.append(f)
1706 rejected.append(f)
1707 elif not ds.set_tracked(f):
1707 elif not ds.set_tracked(f):
1708 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1708 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1709 return rejected
1709 return rejected
1710
1710
1711 def forget(self, files, prefix=b""):
1711 def forget(self, files, prefix=b""):
1712 with self._repo.wlock():
1712 with self._repo.wlock():
1713 ds = self._repo.dirstate
1713 ds = self._repo.dirstate
1714 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1714 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1715 rejected = []
1715 rejected = []
1716 for f in files:
1716 for f in files:
1717 if not ds.set_untracked(f):
1717 if not ds.set_untracked(f):
1718 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1718 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1719 rejected.append(f)
1719 rejected.append(f)
1720 return rejected
1720 return rejected
1721
1721
1722 def copy(self, source, dest):
1722 def copy(self, source, dest):
1723 try:
1723 try:
1724 st = self._repo.wvfs.lstat(dest)
1724 st = self._repo.wvfs.lstat(dest)
1725 except OSError as err:
1725 except OSError as err:
1726 if err.errno != errno.ENOENT:
1726 if err.errno != errno.ENOENT:
1727 raise
1727 raise
1728 self._repo.ui.warn(
1728 self._repo.ui.warn(
1729 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1729 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1730 )
1730 )
1731 return
1731 return
1732 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1732 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1733 self._repo.ui.warn(
1733 self._repo.ui.warn(
1734 _(b"copy failed: %s is not a file or a symbolic link\n")
1734 _(b"copy failed: %s is not a file or a symbolic link\n")
1735 % self._repo.dirstate.pathto(dest)
1735 % self._repo.dirstate.pathto(dest)
1736 )
1736 )
1737 else:
1737 else:
1738 with self._repo.wlock():
1738 with self._repo.wlock():
1739 ds = self._repo.dirstate
1739 ds = self._repo.dirstate
1740 ds.set_tracked(dest)
1740 ds.set_tracked(dest)
1741 ds.copy(source, dest)
1741 ds.copy(source, dest)
1742
1742
1743 def match(
1743 def match(
1744 self,
1744 self,
1745 pats=None,
1745 pats=None,
1746 include=None,
1746 include=None,
1747 exclude=None,
1747 exclude=None,
1748 default=b'glob',
1748 default=b'glob',
1749 listsubrepos=False,
1749 listsubrepos=False,
1750 badfn=None,
1750 badfn=None,
1751 cwd=None,
1751 cwd=None,
1752 ):
1752 ):
1753 r = self._repo
1753 r = self._repo
1754 if not cwd:
1754 if not cwd:
1755 cwd = r.getcwd()
1755 cwd = r.getcwd()
1756
1756
1757 # Only a case insensitive filesystem needs magic to translate user input
1757 # Only a case insensitive filesystem needs magic to translate user input
1758 # to actual case in the filesystem.
1758 # to actual case in the filesystem.
1759 icasefs = not util.fscasesensitive(r.root)
1759 icasefs = not util.fscasesensitive(r.root)
1760 return matchmod.match(
1760 return matchmod.match(
1761 r.root,
1761 r.root,
1762 cwd,
1762 cwd,
1763 pats,
1763 pats,
1764 include,
1764 include,
1765 exclude,
1765 exclude,
1766 default,
1766 default,
1767 auditor=r.auditor,
1767 auditor=r.auditor,
1768 ctx=self,
1768 ctx=self,
1769 listsubrepos=listsubrepos,
1769 listsubrepos=listsubrepos,
1770 badfn=badfn,
1770 badfn=badfn,
1771 icasefs=icasefs,
1771 icasefs=icasefs,
1772 )
1772 )
1773
1773
1774 def _filtersuspectsymlink(self, files):
1774 def _filtersuspectsymlink(self, files):
1775 if not files or self._repo.dirstate._checklink:
1775 if not files or self._repo.dirstate._checklink:
1776 return files
1776 return files
1777
1777
1778 # Symlink placeholders may get non-symlink-like contents
1778 # Symlink placeholders may get non-symlink-like contents
1779 # via user error or dereferencing by NFS or Samba servers,
1779 # via user error or dereferencing by NFS or Samba servers,
1780 # so we filter out any placeholders that don't look like a
1780 # so we filter out any placeholders that don't look like a
1781 # symlink
1781 # symlink
1782 sane = []
1782 sane = []
1783 for f in files:
1783 for f in files:
1784 if self.flags(f) == b'l':
1784 if self.flags(f) == b'l':
1785 d = self[f].data()
1785 d = self[f].data()
1786 if (
1786 if (
1787 d == b''
1787 d == b''
1788 or len(d) >= 1024
1788 or len(d) >= 1024
1789 or b'\n' in d
1789 or b'\n' in d
1790 or stringutil.binary(d)
1790 or stringutil.binary(d)
1791 ):
1791 ):
1792 self._repo.ui.debug(
1792 self._repo.ui.debug(
1793 b'ignoring suspect symlink placeholder "%s"\n' % f
1793 b'ignoring suspect symlink placeholder "%s"\n' % f
1794 )
1794 )
1795 continue
1795 continue
1796 sane.append(f)
1796 sane.append(f)
1797 return sane
1797 return sane
1798
1798
1799 def _checklookup(self, files, mtime_boundary):
1799 def _checklookup(self, files, mtime_boundary):
1800 # check for any possibly clean files
1800 # check for any possibly clean files
1801 if not files:
1801 if not files:
1802 return [], [], [], []
1802 return [], [], [], []
1803
1803
1804 modified = []
1804 modified = []
1805 deleted = []
1805 deleted = []
1806 clean = []
1806 clean = []
1807 fixup = []
1807 fixup = []
1808 pctx = self._parents[0]
1808 pctx = self._parents[0]
1809 # do a full compare of any files that might have changed
1809 # do a full compare of any files that might have changed
1810 for f in sorted(files):
1810 for f in sorted(files):
1811 try:
1811 try:
1812 # This will return True for a file that got replaced by a
1812 # This will return True for a file that got replaced by a
1813 # directory in the interim, but fixing that is pretty hard.
1813 # directory in the interim, but fixing that is pretty hard.
1814 if (
1814 if (
1815 f not in pctx
1815 f not in pctx
1816 or self.flags(f) != pctx.flags(f)
1816 or self.flags(f) != pctx.flags(f)
1817 or pctx[f].cmp(self[f])
1817 or pctx[f].cmp(self[f])
1818 ):
1818 ):
1819 modified.append(f)
1819 modified.append(f)
1820 elif mtime_boundary is None:
1820 elif mtime_boundary is None:
1821 clean.append(f)
1821 clean.append(f)
1822 else:
1822 else:
1823 s = self[f].lstat()
1823 s = self[f].lstat()
1824 mode = s.st_mode
1824 mode = s.st_mode
1825 size = s.st_size
1825 size = s.st_size
1826 file_mtime = timestamp.mtime_of(s)
1826 file_mtime = timestamp.reliable_mtime_of(s, mtime_boundary)
1827 if file_mtime is not None:
1827 cache_info = (mode, size, file_mtime)
1828 cache_info = (mode, size, file_mtime)
1828
1829 fixup.append((f, cache_info))
1829 file_second = file_mtime[0]
1830 else:
1830 boundary_second = mtime_boundary[0]
1831 # If the mtime of the ambiguous file is younger (or equal)
1832 # to the starting point of the `status` walk, we cannot
1833 # garantee that another, racy, write will not happen right
1834 # after with the same mtime and we cannot cache the
1835 # information.
1836 #
1837 # However is the mtime is far away in the future, this is
1838 # likely some mismatch between the current clock and
1839 # previous file system operation. So mtime more than one days
1840 # in the future are considered fine.
1841 if (
1842 boundary_second
1843 <= file_second
1844 < (3600 * 24 + boundary_second)
1845 ):
1846 clean.append(f)
1831 clean.append(f)
1847 else:
1848 fixup.append((f, cache_info))
1849 except (IOError, OSError):
1832 except (IOError, OSError):
1850 # A file become inaccessible in between? Mark it as deleted,
1833 # A file become inaccessible in between? Mark it as deleted,
1851 # matching dirstate behavior (issue5584).
1834 # matching dirstate behavior (issue5584).
1852 # The dirstate has more complex behavior around whether a
1835 # The dirstate has more complex behavior around whether a
1853 # missing file matches a directory, etc, but we don't need to
1836 # missing file matches a directory, etc, but we don't need to
1854 # bother with that: if f has made it to this point, we're sure
1837 # bother with that: if f has made it to this point, we're sure
1855 # it's in the dirstate.
1838 # it's in the dirstate.
1856 deleted.append(f)
1839 deleted.append(f)
1857
1840
1858 return modified, deleted, clean, fixup
1841 return modified, deleted, clean, fixup
1859
1842
1860 def _poststatusfixup(self, status, fixup):
1843 def _poststatusfixup(self, status, fixup):
1861 """update dirstate for files that are actually clean"""
1844 """update dirstate for files that are actually clean"""
1862 poststatus = self._repo.postdsstatus()
1845 poststatus = self._repo.postdsstatus()
1863 if fixup or poststatus or self._repo.dirstate._dirty:
1846 if fixup or poststatus or self._repo.dirstate._dirty:
1864 try:
1847 try:
1865 oldid = self._repo.dirstate.identity()
1848 oldid = self._repo.dirstate.identity()
1866
1849
1867 # updating the dirstate is optional
1850 # updating the dirstate is optional
1868 # so we don't wait on the lock
1851 # so we don't wait on the lock
1869 # wlock can invalidate the dirstate, so cache normal _after_
1852 # wlock can invalidate the dirstate, so cache normal _after_
1870 # taking the lock
1853 # taking the lock
1871 with self._repo.wlock(False):
1854 with self._repo.wlock(False):
1872 dirstate = self._repo.dirstate
1855 dirstate = self._repo.dirstate
1873 if dirstate.identity() == oldid:
1856 if dirstate.identity() == oldid:
1874 if fixup:
1857 if fixup:
1875 if dirstate.pendingparentchange():
1858 if dirstate.pendingparentchange():
1876 normal = lambda f, pfd: dirstate.update_file(
1859 normal = lambda f, pfd: dirstate.update_file(
1877 f, p1_tracked=True, wc_tracked=True
1860 f, p1_tracked=True, wc_tracked=True
1878 )
1861 )
1879 else:
1862 else:
1880 normal = dirstate.set_clean
1863 normal = dirstate.set_clean
1881 for f, pdf in fixup:
1864 for f, pdf in fixup:
1882 normal(f, pdf)
1865 normal(f, pdf)
1883 # write changes out explicitly, because nesting
1866 # write changes out explicitly, because nesting
1884 # wlock at runtime may prevent 'wlock.release()'
1867 # wlock at runtime may prevent 'wlock.release()'
1885 # after this block from doing so for subsequent
1868 # after this block from doing so for subsequent
1886 # changing files
1869 # changing files
1887 tr = self._repo.currenttransaction()
1870 tr = self._repo.currenttransaction()
1888 self._repo.dirstate.write(tr)
1871 self._repo.dirstate.write(tr)
1889
1872
1890 if poststatus:
1873 if poststatus:
1891 for ps in poststatus:
1874 for ps in poststatus:
1892 ps(self, status)
1875 ps(self, status)
1893 else:
1876 else:
1894 # in this case, writing changes out breaks
1877 # in this case, writing changes out breaks
1895 # consistency, because .hg/dirstate was
1878 # consistency, because .hg/dirstate was
1896 # already changed simultaneously after last
1879 # already changed simultaneously after last
1897 # caching (see also issue5584 for detail)
1880 # caching (see also issue5584 for detail)
1898 self._repo.ui.debug(
1881 self._repo.ui.debug(
1899 b'skip updating dirstate: identity mismatch\n'
1882 b'skip updating dirstate: identity mismatch\n'
1900 )
1883 )
1901 except error.LockError:
1884 except error.LockError:
1902 pass
1885 pass
1903 finally:
1886 finally:
1904 # Even if the wlock couldn't be grabbed, clear out the list.
1887 # Even if the wlock couldn't be grabbed, clear out the list.
1905 self._repo.clearpostdsstatus()
1888 self._repo.clearpostdsstatus()
1906
1889
1907 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1890 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1908 '''Gets the status from the dirstate -- internal use only.'''
1891 '''Gets the status from the dirstate -- internal use only.'''
1909 subrepos = []
1892 subrepos = []
1910 if b'.hgsub' in self:
1893 if b'.hgsub' in self:
1911 subrepos = sorted(self.substate)
1894 subrepos = sorted(self.substate)
1912 cmp, s, mtime_boundary = self._repo.dirstate.status(
1895 cmp, s, mtime_boundary = self._repo.dirstate.status(
1913 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1896 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1914 )
1897 )
1915
1898
1916 # check for any possibly clean files
1899 # check for any possibly clean files
1917 fixup = []
1900 fixup = []
1918 if cmp:
1901 if cmp:
1919 modified2, deleted2, clean_set, fixup = self._checklookup(
1902 modified2, deleted2, clean_set, fixup = self._checklookup(
1920 cmp, mtime_boundary
1903 cmp, mtime_boundary
1921 )
1904 )
1922 s.modified.extend(modified2)
1905 s.modified.extend(modified2)
1923 s.deleted.extend(deleted2)
1906 s.deleted.extend(deleted2)
1924
1907
1925 if clean_set and clean:
1908 if clean_set and clean:
1926 s.clean.extend(clean_set)
1909 s.clean.extend(clean_set)
1927 if fixup and clean:
1910 if fixup and clean:
1928 s.clean.extend((f for f, _ in fixup))
1911 s.clean.extend((f for f, _ in fixup))
1929
1912
1930 self._poststatusfixup(s, fixup)
1913 self._poststatusfixup(s, fixup)
1931
1914
1932 if match.always():
1915 if match.always():
1933 # cache for performance
1916 # cache for performance
1934 if s.unknown or s.ignored or s.clean:
1917 if s.unknown or s.ignored or s.clean:
1935 # "_status" is cached with list*=False in the normal route
1918 # "_status" is cached with list*=False in the normal route
1936 self._status = scmutil.status(
1919 self._status = scmutil.status(
1937 s.modified, s.added, s.removed, s.deleted, [], [], []
1920 s.modified, s.added, s.removed, s.deleted, [], [], []
1938 )
1921 )
1939 else:
1922 else:
1940 self._status = s
1923 self._status = s
1941
1924
1942 return s
1925 return s
1943
1926
1944 @propertycache
1927 @propertycache
1945 def _copies(self):
1928 def _copies(self):
1946 p1copies = {}
1929 p1copies = {}
1947 p2copies = {}
1930 p2copies = {}
1948 parents = self._repo.dirstate.parents()
1931 parents = self._repo.dirstate.parents()
1949 p1manifest = self._repo[parents[0]].manifest()
1932 p1manifest = self._repo[parents[0]].manifest()
1950 p2manifest = self._repo[parents[1]].manifest()
1933 p2manifest = self._repo[parents[1]].manifest()
1951 changedset = set(self.added()) | set(self.modified())
1934 changedset = set(self.added()) | set(self.modified())
1952 narrowmatch = self._repo.narrowmatch()
1935 narrowmatch = self._repo.narrowmatch()
1953 for dst, src in self._repo.dirstate.copies().items():
1936 for dst, src in self._repo.dirstate.copies().items():
1954 if dst not in changedset or not narrowmatch(dst):
1937 if dst not in changedset or not narrowmatch(dst):
1955 continue
1938 continue
1956 if src in p1manifest:
1939 if src in p1manifest:
1957 p1copies[dst] = src
1940 p1copies[dst] = src
1958 elif src in p2manifest:
1941 elif src in p2manifest:
1959 p2copies[dst] = src
1942 p2copies[dst] = src
1960 return p1copies, p2copies
1943 return p1copies, p2copies
1961
1944
1962 @propertycache
1945 @propertycache
1963 def _manifest(self):
1946 def _manifest(self):
1964 """generate a manifest corresponding to the values in self._status
1947 """generate a manifest corresponding to the values in self._status
1965
1948
1966 This reuse the file nodeid from parent, but we use special node
1949 This reuse the file nodeid from parent, but we use special node
1967 identifiers for added and modified files. This is used by manifests
1950 identifiers for added and modified files. This is used by manifests
1968 merge to see that files are different and by update logic to avoid
1951 merge to see that files are different and by update logic to avoid
1969 deleting newly added files.
1952 deleting newly added files.
1970 """
1953 """
1971 return self._buildstatusmanifest(self._status)
1954 return self._buildstatusmanifest(self._status)
1972
1955
1973 def _buildstatusmanifest(self, status):
1956 def _buildstatusmanifest(self, status):
1974 """Builds a manifest that includes the given status results."""
1957 """Builds a manifest that includes the given status results."""
1975 parents = self.parents()
1958 parents = self.parents()
1976
1959
1977 man = parents[0].manifest().copy()
1960 man = parents[0].manifest().copy()
1978
1961
1979 ff = self._flagfunc
1962 ff = self._flagfunc
1980 for i, l in (
1963 for i, l in (
1981 (self._repo.nodeconstants.addednodeid, status.added),
1964 (self._repo.nodeconstants.addednodeid, status.added),
1982 (self._repo.nodeconstants.modifiednodeid, status.modified),
1965 (self._repo.nodeconstants.modifiednodeid, status.modified),
1983 ):
1966 ):
1984 for f in l:
1967 for f in l:
1985 man[f] = i
1968 man[f] = i
1986 try:
1969 try:
1987 man.setflag(f, ff(f))
1970 man.setflag(f, ff(f))
1988 except OSError:
1971 except OSError:
1989 pass
1972 pass
1990
1973
1991 for f in status.deleted + status.removed:
1974 for f in status.deleted + status.removed:
1992 if f in man:
1975 if f in man:
1993 del man[f]
1976 del man[f]
1994
1977
1995 return man
1978 return man
1996
1979
1997 def _buildstatus(
1980 def _buildstatus(
1998 self, other, s, match, listignored, listclean, listunknown
1981 self, other, s, match, listignored, listclean, listunknown
1999 ):
1982 ):
2000 """build a status with respect to another context
1983 """build a status with respect to another context
2001
1984
2002 This includes logic for maintaining the fast path of status when
1985 This includes logic for maintaining the fast path of status when
2003 comparing the working directory against its parent, which is to skip
1986 comparing the working directory against its parent, which is to skip
2004 building a new manifest if self (working directory) is not comparing
1987 building a new manifest if self (working directory) is not comparing
2005 against its parent (repo['.']).
1988 against its parent (repo['.']).
2006 """
1989 """
2007 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1990 s = self._dirstatestatus(match, listignored, listclean, listunknown)
2008 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1991 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
2009 # might have accidentally ended up with the entire contents of the file
1992 # might have accidentally ended up with the entire contents of the file
2010 # they are supposed to be linking to.
1993 # they are supposed to be linking to.
2011 s.modified[:] = self._filtersuspectsymlink(s.modified)
1994 s.modified[:] = self._filtersuspectsymlink(s.modified)
2012 if other != self._repo[b'.']:
1995 if other != self._repo[b'.']:
2013 s = super(workingctx, self)._buildstatus(
1996 s = super(workingctx, self)._buildstatus(
2014 other, s, match, listignored, listclean, listunknown
1997 other, s, match, listignored, listclean, listunknown
2015 )
1998 )
2016 return s
1999 return s
2017
2000
2018 def _matchstatus(self, other, match):
2001 def _matchstatus(self, other, match):
2019 """override the match method with a filter for directory patterns
2002 """override the match method with a filter for directory patterns
2020
2003
2021 We use inheritance to customize the match.bad method only in cases of
2004 We use inheritance to customize the match.bad method only in cases of
2022 workingctx since it belongs only to the working directory when
2005 workingctx since it belongs only to the working directory when
2023 comparing against the parent changeset.
2006 comparing against the parent changeset.
2024
2007
2025 If we aren't comparing against the working directory's parent, then we
2008 If we aren't comparing against the working directory's parent, then we
2026 just use the default match object sent to us.
2009 just use the default match object sent to us.
2027 """
2010 """
2028 if other != self._repo[b'.']:
2011 if other != self._repo[b'.']:
2029
2012
2030 def bad(f, msg):
2013 def bad(f, msg):
2031 # 'f' may be a directory pattern from 'match.files()',
2014 # 'f' may be a directory pattern from 'match.files()',
2032 # so 'f not in ctx1' is not enough
2015 # so 'f not in ctx1' is not enough
2033 if f not in other and not other.hasdir(f):
2016 if f not in other and not other.hasdir(f):
2034 self._repo.ui.warn(
2017 self._repo.ui.warn(
2035 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
2018 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
2036 )
2019 )
2037
2020
2038 match.bad = bad
2021 match.bad = bad
2039 return match
2022 return match
2040
2023
2041 def walk(self, match):
2024 def walk(self, match):
2042 '''Generates matching file names.'''
2025 '''Generates matching file names.'''
2043 return sorted(
2026 return sorted(
2044 self._repo.dirstate.walk(
2027 self._repo.dirstate.walk(
2045 self._repo.narrowmatch(match),
2028 self._repo.narrowmatch(match),
2046 subrepos=sorted(self.substate),
2029 subrepos=sorted(self.substate),
2047 unknown=True,
2030 unknown=True,
2048 ignored=False,
2031 ignored=False,
2049 )
2032 )
2050 )
2033 )
2051
2034
2052 def matches(self, match):
2035 def matches(self, match):
2053 match = self._repo.narrowmatch(match)
2036 match = self._repo.narrowmatch(match)
2054 ds = self._repo.dirstate
2037 ds = self._repo.dirstate
2055 return sorted(f for f in ds.matches(match) if ds.get_entry(f).tracked)
2038 return sorted(f for f in ds.matches(match) if ds.get_entry(f).tracked)
2056
2039
2057 def markcommitted(self, node):
2040 def markcommitted(self, node):
2058 with self._repo.dirstate.parentchange():
2041 with self._repo.dirstate.parentchange():
2059 for f in self.modified() + self.added():
2042 for f in self.modified() + self.added():
2060 self._repo.dirstate.update_file(
2043 self._repo.dirstate.update_file(
2061 f, p1_tracked=True, wc_tracked=True
2044 f, p1_tracked=True, wc_tracked=True
2062 )
2045 )
2063 for f in self.removed():
2046 for f in self.removed():
2064 self._repo.dirstate.update_file(
2047 self._repo.dirstate.update_file(
2065 f, p1_tracked=False, wc_tracked=False
2048 f, p1_tracked=False, wc_tracked=False
2066 )
2049 )
2067 self._repo.dirstate.setparents(node)
2050 self._repo.dirstate.setparents(node)
2068 self._repo._quick_access_changeid_invalidate()
2051 self._repo._quick_access_changeid_invalidate()
2069
2052
2070 sparse.aftercommit(self._repo, node)
2053 sparse.aftercommit(self._repo, node)
2071
2054
2072 # write changes out explicitly, because nesting wlock at
2055 # write changes out explicitly, because nesting wlock at
2073 # runtime may prevent 'wlock.release()' in 'repo.commit()'
2056 # runtime may prevent 'wlock.release()' in 'repo.commit()'
2074 # from immediately doing so for subsequent changing files
2057 # from immediately doing so for subsequent changing files
2075 self._repo.dirstate.write(self._repo.currenttransaction())
2058 self._repo.dirstate.write(self._repo.currenttransaction())
2076
2059
2077 def mergestate(self, clean=False):
2060 def mergestate(self, clean=False):
2078 if clean:
2061 if clean:
2079 return mergestatemod.mergestate.clean(self._repo)
2062 return mergestatemod.mergestate.clean(self._repo)
2080 return mergestatemod.mergestate.read(self._repo)
2063 return mergestatemod.mergestate.read(self._repo)
2081
2064
2082
2065
2083 class committablefilectx(basefilectx):
2066 class committablefilectx(basefilectx):
2084 """A committablefilectx provides common functionality for a file context
2067 """A committablefilectx provides common functionality for a file context
2085 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
2068 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
2086
2069
2087 def __init__(self, repo, path, filelog=None, ctx=None):
2070 def __init__(self, repo, path, filelog=None, ctx=None):
2088 self._repo = repo
2071 self._repo = repo
2089 self._path = path
2072 self._path = path
2090 self._changeid = None
2073 self._changeid = None
2091 self._filerev = self._filenode = None
2074 self._filerev = self._filenode = None
2092
2075
2093 if filelog is not None:
2076 if filelog is not None:
2094 self._filelog = filelog
2077 self._filelog = filelog
2095 if ctx:
2078 if ctx:
2096 self._changectx = ctx
2079 self._changectx = ctx
2097
2080
2098 def __nonzero__(self):
2081 def __nonzero__(self):
2099 return True
2082 return True
2100
2083
2101 __bool__ = __nonzero__
2084 __bool__ = __nonzero__
2102
2085
2103 def linkrev(self):
2086 def linkrev(self):
2104 # linked to self._changectx no matter if file is modified or not
2087 # linked to self._changectx no matter if file is modified or not
2105 return self.rev()
2088 return self.rev()
2106
2089
2107 def renamed(self):
2090 def renamed(self):
2108 path = self.copysource()
2091 path = self.copysource()
2109 if not path:
2092 if not path:
2110 return None
2093 return None
2111 return (
2094 return (
2112 path,
2095 path,
2113 self._changectx._parents[0]._manifest.get(
2096 self._changectx._parents[0]._manifest.get(
2114 path, self._repo.nodeconstants.nullid
2097 path, self._repo.nodeconstants.nullid
2115 ),
2098 ),
2116 )
2099 )
2117
2100
2118 def parents(self):
2101 def parents(self):
2119 '''return parent filectxs, following copies if necessary'''
2102 '''return parent filectxs, following copies if necessary'''
2120
2103
2121 def filenode(ctx, path):
2104 def filenode(ctx, path):
2122 return ctx._manifest.get(path, self._repo.nodeconstants.nullid)
2105 return ctx._manifest.get(path, self._repo.nodeconstants.nullid)
2123
2106
2124 path = self._path
2107 path = self._path
2125 fl = self._filelog
2108 fl = self._filelog
2126 pcl = self._changectx._parents
2109 pcl = self._changectx._parents
2127 renamed = self.renamed()
2110 renamed = self.renamed()
2128
2111
2129 if renamed:
2112 if renamed:
2130 pl = [renamed + (None,)]
2113 pl = [renamed + (None,)]
2131 else:
2114 else:
2132 pl = [(path, filenode(pcl[0], path), fl)]
2115 pl = [(path, filenode(pcl[0], path), fl)]
2133
2116
2134 for pc in pcl[1:]:
2117 for pc in pcl[1:]:
2135 pl.append((path, filenode(pc, path), fl))
2118 pl.append((path, filenode(pc, path), fl))
2136
2119
2137 return [
2120 return [
2138 self._parentfilectx(p, fileid=n, filelog=l)
2121 self._parentfilectx(p, fileid=n, filelog=l)
2139 for p, n, l in pl
2122 for p, n, l in pl
2140 if n != self._repo.nodeconstants.nullid
2123 if n != self._repo.nodeconstants.nullid
2141 ]
2124 ]
2142
2125
2143 def children(self):
2126 def children(self):
2144 return []
2127 return []
2145
2128
2146
2129
2147 class workingfilectx(committablefilectx):
2130 class workingfilectx(committablefilectx):
2148 """A workingfilectx object makes access to data related to a particular
2131 """A workingfilectx object makes access to data related to a particular
2149 file in the working directory convenient."""
2132 file in the working directory convenient."""
2150
2133
2151 def __init__(self, repo, path, filelog=None, workingctx=None):
2134 def __init__(self, repo, path, filelog=None, workingctx=None):
2152 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2135 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2153
2136
2154 @propertycache
2137 @propertycache
2155 def _changectx(self):
2138 def _changectx(self):
2156 return workingctx(self._repo)
2139 return workingctx(self._repo)
2157
2140
2158 def data(self):
2141 def data(self):
2159 return self._repo.wread(self._path)
2142 return self._repo.wread(self._path)
2160
2143
2161 def copysource(self):
2144 def copysource(self):
2162 return self._repo.dirstate.copied(self._path)
2145 return self._repo.dirstate.copied(self._path)
2163
2146
2164 def size(self):
2147 def size(self):
2165 return self._repo.wvfs.lstat(self._path).st_size
2148 return self._repo.wvfs.lstat(self._path).st_size
2166
2149
2167 def lstat(self):
2150 def lstat(self):
2168 return self._repo.wvfs.lstat(self._path)
2151 return self._repo.wvfs.lstat(self._path)
2169
2152
2170 def date(self):
2153 def date(self):
2171 t, tz = self._changectx.date()
2154 t, tz = self._changectx.date()
2172 try:
2155 try:
2173 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2156 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2174 except OSError as err:
2157 except OSError as err:
2175 if err.errno != errno.ENOENT:
2158 if err.errno != errno.ENOENT:
2176 raise
2159 raise
2177 return (t, tz)
2160 return (t, tz)
2178
2161
2179 def exists(self):
2162 def exists(self):
2180 return self._repo.wvfs.exists(self._path)
2163 return self._repo.wvfs.exists(self._path)
2181
2164
2182 def lexists(self):
2165 def lexists(self):
2183 return self._repo.wvfs.lexists(self._path)
2166 return self._repo.wvfs.lexists(self._path)
2184
2167
2185 def audit(self):
2168 def audit(self):
2186 return self._repo.wvfs.audit(self._path)
2169 return self._repo.wvfs.audit(self._path)
2187
2170
2188 def cmp(self, fctx):
2171 def cmp(self, fctx):
2189 """compare with other file context
2172 """compare with other file context
2190
2173
2191 returns True if different than fctx.
2174 returns True if different than fctx.
2192 """
2175 """
2193 # fctx should be a filectx (not a workingfilectx)
2176 # fctx should be a filectx (not a workingfilectx)
2194 # invert comparison to reuse the same code path
2177 # invert comparison to reuse the same code path
2195 return fctx.cmp(self)
2178 return fctx.cmp(self)
2196
2179
2197 def remove(self, ignoremissing=False):
2180 def remove(self, ignoremissing=False):
2198 """wraps unlink for a repo's working directory"""
2181 """wraps unlink for a repo's working directory"""
2199 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2182 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2200 self._repo.wvfs.unlinkpath(
2183 self._repo.wvfs.unlinkpath(
2201 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2184 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2202 )
2185 )
2203
2186
2204 def write(self, data, flags, backgroundclose=False, **kwargs):
2187 def write(self, data, flags, backgroundclose=False, **kwargs):
2205 """wraps repo.wwrite"""
2188 """wraps repo.wwrite"""
2206 return self._repo.wwrite(
2189 return self._repo.wwrite(
2207 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2190 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2208 )
2191 )
2209
2192
2210 def markcopied(self, src):
2193 def markcopied(self, src):
2211 """marks this file a copy of `src`"""
2194 """marks this file a copy of `src`"""
2212 self._repo.dirstate.copy(src, self._path)
2195 self._repo.dirstate.copy(src, self._path)
2213
2196
2214 def clearunknown(self):
2197 def clearunknown(self):
2215 """Removes conflicting items in the working directory so that
2198 """Removes conflicting items in the working directory so that
2216 ``write()`` can be called successfully.
2199 ``write()`` can be called successfully.
2217 """
2200 """
2218 wvfs = self._repo.wvfs
2201 wvfs = self._repo.wvfs
2219 f = self._path
2202 f = self._path
2220 wvfs.audit(f)
2203 wvfs.audit(f)
2221 if self._repo.ui.configbool(
2204 if self._repo.ui.configbool(
2222 b'experimental', b'merge.checkpathconflicts'
2205 b'experimental', b'merge.checkpathconflicts'
2223 ):
2206 ):
2224 # remove files under the directory as they should already be
2207 # remove files under the directory as they should already be
2225 # warned and backed up
2208 # warned and backed up
2226 if wvfs.isdir(f) and not wvfs.islink(f):
2209 if wvfs.isdir(f) and not wvfs.islink(f):
2227 wvfs.rmtree(f, forcibly=True)
2210 wvfs.rmtree(f, forcibly=True)
2228 for p in reversed(list(pathutil.finddirs(f))):
2211 for p in reversed(list(pathutil.finddirs(f))):
2229 if wvfs.isfileorlink(p):
2212 if wvfs.isfileorlink(p):
2230 wvfs.unlink(p)
2213 wvfs.unlink(p)
2231 break
2214 break
2232 else:
2215 else:
2233 # don't remove files if path conflicts are not processed
2216 # don't remove files if path conflicts are not processed
2234 if wvfs.isdir(f) and not wvfs.islink(f):
2217 if wvfs.isdir(f) and not wvfs.islink(f):
2235 wvfs.removedirs(f)
2218 wvfs.removedirs(f)
2236
2219
2237 def setflags(self, l, x):
2220 def setflags(self, l, x):
2238 self._repo.wvfs.setflags(self._path, l, x)
2221 self._repo.wvfs.setflags(self._path, l, x)
2239
2222
2240
2223
2241 class overlayworkingctx(committablectx):
2224 class overlayworkingctx(committablectx):
2242 """Wraps another mutable context with a write-back cache that can be
2225 """Wraps another mutable context with a write-back cache that can be
2243 converted into a commit context.
2226 converted into a commit context.
2244
2227
2245 self._cache[path] maps to a dict with keys: {
2228 self._cache[path] maps to a dict with keys: {
2246 'exists': bool?
2229 'exists': bool?
2247 'date': date?
2230 'date': date?
2248 'data': str?
2231 'data': str?
2249 'flags': str?
2232 'flags': str?
2250 'copied': str? (path or None)
2233 'copied': str? (path or None)
2251 }
2234 }
2252 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2235 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2253 is `False`, the file was deleted.
2236 is `False`, the file was deleted.
2254 """
2237 """
2255
2238
2256 def __init__(self, repo):
2239 def __init__(self, repo):
2257 super(overlayworkingctx, self).__init__(repo)
2240 super(overlayworkingctx, self).__init__(repo)
2258 self.clean()
2241 self.clean()
2259
2242
2260 def setbase(self, wrappedctx):
2243 def setbase(self, wrappedctx):
2261 self._wrappedctx = wrappedctx
2244 self._wrappedctx = wrappedctx
2262 self._parents = [wrappedctx]
2245 self._parents = [wrappedctx]
2263 # Drop old manifest cache as it is now out of date.
2246 # Drop old manifest cache as it is now out of date.
2264 # This is necessary when, e.g., rebasing several nodes with one
2247 # This is necessary when, e.g., rebasing several nodes with one
2265 # ``overlayworkingctx`` (e.g. with --collapse).
2248 # ``overlayworkingctx`` (e.g. with --collapse).
2266 util.clearcachedproperty(self, b'_manifest')
2249 util.clearcachedproperty(self, b'_manifest')
2267
2250
2268 def setparents(self, p1node, p2node=None):
2251 def setparents(self, p1node, p2node=None):
2269 if p2node is None:
2252 if p2node is None:
2270 p2node = self._repo.nodeconstants.nullid
2253 p2node = self._repo.nodeconstants.nullid
2271 assert p1node == self._wrappedctx.node()
2254 assert p1node == self._wrappedctx.node()
2272 self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
2255 self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
2273
2256
2274 def data(self, path):
2257 def data(self, path):
2275 if self.isdirty(path):
2258 if self.isdirty(path):
2276 if self._cache[path][b'exists']:
2259 if self._cache[path][b'exists']:
2277 if self._cache[path][b'data'] is not None:
2260 if self._cache[path][b'data'] is not None:
2278 return self._cache[path][b'data']
2261 return self._cache[path][b'data']
2279 else:
2262 else:
2280 # Must fallback here, too, because we only set flags.
2263 # Must fallback here, too, because we only set flags.
2281 return self._wrappedctx[path].data()
2264 return self._wrappedctx[path].data()
2282 else:
2265 else:
2283 raise error.ProgrammingError(
2266 raise error.ProgrammingError(
2284 b"No such file or directory: %s" % path
2267 b"No such file or directory: %s" % path
2285 )
2268 )
2286 else:
2269 else:
2287 return self._wrappedctx[path].data()
2270 return self._wrappedctx[path].data()
2288
2271
2289 @propertycache
2272 @propertycache
2290 def _manifest(self):
2273 def _manifest(self):
2291 parents = self.parents()
2274 parents = self.parents()
2292 man = parents[0].manifest().copy()
2275 man = parents[0].manifest().copy()
2293
2276
2294 flag = self._flagfunc
2277 flag = self._flagfunc
2295 for path in self.added():
2278 for path in self.added():
2296 man[path] = self._repo.nodeconstants.addednodeid
2279 man[path] = self._repo.nodeconstants.addednodeid
2297 man.setflag(path, flag(path))
2280 man.setflag(path, flag(path))
2298 for path in self.modified():
2281 for path in self.modified():
2299 man[path] = self._repo.nodeconstants.modifiednodeid
2282 man[path] = self._repo.nodeconstants.modifiednodeid
2300 man.setflag(path, flag(path))
2283 man.setflag(path, flag(path))
2301 for path in self.removed():
2284 for path in self.removed():
2302 del man[path]
2285 del man[path]
2303 return man
2286 return man
2304
2287
2305 @propertycache
2288 @propertycache
2306 def _flagfunc(self):
2289 def _flagfunc(self):
2307 def f(path):
2290 def f(path):
2308 return self._cache[path][b'flags']
2291 return self._cache[path][b'flags']
2309
2292
2310 return f
2293 return f
2311
2294
2312 def files(self):
2295 def files(self):
2313 return sorted(self.added() + self.modified() + self.removed())
2296 return sorted(self.added() + self.modified() + self.removed())
2314
2297
2315 def modified(self):
2298 def modified(self):
2316 return [
2299 return [
2317 f
2300 f
2318 for f in self._cache.keys()
2301 for f in self._cache.keys()
2319 if self._cache[f][b'exists'] and self._existsinparent(f)
2302 if self._cache[f][b'exists'] and self._existsinparent(f)
2320 ]
2303 ]
2321
2304
2322 def added(self):
2305 def added(self):
2323 return [
2306 return [
2324 f
2307 f
2325 for f in self._cache.keys()
2308 for f in self._cache.keys()
2326 if self._cache[f][b'exists'] and not self._existsinparent(f)
2309 if self._cache[f][b'exists'] and not self._existsinparent(f)
2327 ]
2310 ]
2328
2311
2329 def removed(self):
2312 def removed(self):
2330 return [
2313 return [
2331 f
2314 f
2332 for f in self._cache.keys()
2315 for f in self._cache.keys()
2333 if not self._cache[f][b'exists'] and self._existsinparent(f)
2316 if not self._cache[f][b'exists'] and self._existsinparent(f)
2334 ]
2317 ]
2335
2318
2336 def p1copies(self):
2319 def p1copies(self):
2337 copies = {}
2320 copies = {}
2338 narrowmatch = self._repo.narrowmatch()
2321 narrowmatch = self._repo.narrowmatch()
2339 for f in self._cache.keys():
2322 for f in self._cache.keys():
2340 if not narrowmatch(f):
2323 if not narrowmatch(f):
2341 continue
2324 continue
2342 copies.pop(f, None) # delete if it exists
2325 copies.pop(f, None) # delete if it exists
2343 source = self._cache[f][b'copied']
2326 source = self._cache[f][b'copied']
2344 if source:
2327 if source:
2345 copies[f] = source
2328 copies[f] = source
2346 return copies
2329 return copies
2347
2330
2348 def p2copies(self):
2331 def p2copies(self):
2349 copies = {}
2332 copies = {}
2350 narrowmatch = self._repo.narrowmatch()
2333 narrowmatch = self._repo.narrowmatch()
2351 for f in self._cache.keys():
2334 for f in self._cache.keys():
2352 if not narrowmatch(f):
2335 if not narrowmatch(f):
2353 continue
2336 continue
2354 copies.pop(f, None) # delete if it exists
2337 copies.pop(f, None) # delete if it exists
2355 source = self._cache[f][b'copied']
2338 source = self._cache[f][b'copied']
2356 if source:
2339 if source:
2357 copies[f] = source
2340 copies[f] = source
2358 return copies
2341 return copies
2359
2342
2360 def isinmemory(self):
2343 def isinmemory(self):
2361 return True
2344 return True
2362
2345
2363 def filedate(self, path):
2346 def filedate(self, path):
2364 if self.isdirty(path):
2347 if self.isdirty(path):
2365 return self._cache[path][b'date']
2348 return self._cache[path][b'date']
2366 else:
2349 else:
2367 return self._wrappedctx[path].date()
2350 return self._wrappedctx[path].date()
2368
2351
2369 def markcopied(self, path, origin):
2352 def markcopied(self, path, origin):
2370 self._markdirty(
2353 self._markdirty(
2371 path,
2354 path,
2372 exists=True,
2355 exists=True,
2373 date=self.filedate(path),
2356 date=self.filedate(path),
2374 flags=self.flags(path),
2357 flags=self.flags(path),
2375 copied=origin,
2358 copied=origin,
2376 )
2359 )
2377
2360
2378 def copydata(self, path):
2361 def copydata(self, path):
2379 if self.isdirty(path):
2362 if self.isdirty(path):
2380 return self._cache[path][b'copied']
2363 return self._cache[path][b'copied']
2381 else:
2364 else:
2382 return None
2365 return None
2383
2366
2384 def flags(self, path):
2367 def flags(self, path):
2385 if self.isdirty(path):
2368 if self.isdirty(path):
2386 if self._cache[path][b'exists']:
2369 if self._cache[path][b'exists']:
2387 return self._cache[path][b'flags']
2370 return self._cache[path][b'flags']
2388 else:
2371 else:
2389 raise error.ProgrammingError(
2372 raise error.ProgrammingError(
2390 b"No such file or directory: %s" % path
2373 b"No such file or directory: %s" % path
2391 )
2374 )
2392 else:
2375 else:
2393 return self._wrappedctx[path].flags()
2376 return self._wrappedctx[path].flags()
2394
2377
2395 def __contains__(self, key):
2378 def __contains__(self, key):
2396 if key in self._cache:
2379 if key in self._cache:
2397 return self._cache[key][b'exists']
2380 return self._cache[key][b'exists']
2398 return key in self.p1()
2381 return key in self.p1()
2399
2382
2400 def _existsinparent(self, path):
2383 def _existsinparent(self, path):
2401 try:
2384 try:
2402 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2385 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2403 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2386 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2404 # with an ``exists()`` function.
2387 # with an ``exists()`` function.
2405 self._wrappedctx[path]
2388 self._wrappedctx[path]
2406 return True
2389 return True
2407 except error.ManifestLookupError:
2390 except error.ManifestLookupError:
2408 return False
2391 return False
2409
2392
2410 def _auditconflicts(self, path):
2393 def _auditconflicts(self, path):
2411 """Replicates conflict checks done by wvfs.write().
2394 """Replicates conflict checks done by wvfs.write().
2412
2395
2413 Since we never write to the filesystem and never call `applyupdates` in
2396 Since we never write to the filesystem and never call `applyupdates` in
2414 IMM, we'll never check that a path is actually writable -- e.g., because
2397 IMM, we'll never check that a path is actually writable -- e.g., because
2415 it adds `a/foo`, but `a` is actually a file in the other commit.
2398 it adds `a/foo`, but `a` is actually a file in the other commit.
2416 """
2399 """
2417
2400
2418 def fail(path, component):
2401 def fail(path, component):
2419 # p1() is the base and we're receiving "writes" for p2()'s
2402 # p1() is the base and we're receiving "writes" for p2()'s
2420 # files.
2403 # files.
2421 if b'l' in self.p1()[component].flags():
2404 if b'l' in self.p1()[component].flags():
2422 raise error.Abort(
2405 raise error.Abort(
2423 b"error: %s conflicts with symlink %s "
2406 b"error: %s conflicts with symlink %s "
2424 b"in %d." % (path, component, self.p1().rev())
2407 b"in %d." % (path, component, self.p1().rev())
2425 )
2408 )
2426 else:
2409 else:
2427 raise error.Abort(
2410 raise error.Abort(
2428 b"error: '%s' conflicts with file '%s' in "
2411 b"error: '%s' conflicts with file '%s' in "
2429 b"%d." % (path, component, self.p1().rev())
2412 b"%d." % (path, component, self.p1().rev())
2430 )
2413 )
2431
2414
2432 # Test that each new directory to be created to write this path from p2
2415 # Test that each new directory to be created to write this path from p2
2433 # is not a file in p1.
2416 # is not a file in p1.
2434 components = path.split(b'/')
2417 components = path.split(b'/')
2435 for i in pycompat.xrange(len(components)):
2418 for i in pycompat.xrange(len(components)):
2436 component = b"/".join(components[0:i])
2419 component = b"/".join(components[0:i])
2437 if component in self:
2420 if component in self:
2438 fail(path, component)
2421 fail(path, component)
2439
2422
2440 # Test the other direction -- that this path from p2 isn't a directory
2423 # Test the other direction -- that this path from p2 isn't a directory
2441 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2424 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2442 match = self.match([path], default=b'path')
2425 match = self.match([path], default=b'path')
2443 mfiles = list(self.p1().manifest().walk(match))
2426 mfiles = list(self.p1().manifest().walk(match))
2444 if len(mfiles) > 0:
2427 if len(mfiles) > 0:
2445 if len(mfiles) == 1 and mfiles[0] == path:
2428 if len(mfiles) == 1 and mfiles[0] == path:
2446 return
2429 return
2447 # omit the files which are deleted in current IMM wctx
2430 # omit the files which are deleted in current IMM wctx
2448 mfiles = [m for m in mfiles if m in self]
2431 mfiles = [m for m in mfiles if m in self]
2449 if not mfiles:
2432 if not mfiles:
2450 return
2433 return
2451 raise error.Abort(
2434 raise error.Abort(
2452 b"error: file '%s' cannot be written because "
2435 b"error: file '%s' cannot be written because "
2453 b" '%s/' is a directory in %s (containing %d "
2436 b" '%s/' is a directory in %s (containing %d "
2454 b"entries: %s)"
2437 b"entries: %s)"
2455 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2438 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2456 )
2439 )
2457
2440
2458 def write(self, path, data, flags=b'', **kwargs):
2441 def write(self, path, data, flags=b'', **kwargs):
2459 if data is None:
2442 if data is None:
2460 raise error.ProgrammingError(b"data must be non-None")
2443 raise error.ProgrammingError(b"data must be non-None")
2461 self._auditconflicts(path)
2444 self._auditconflicts(path)
2462 self._markdirty(
2445 self._markdirty(
2463 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2446 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2464 )
2447 )
2465
2448
2466 def setflags(self, path, l, x):
2449 def setflags(self, path, l, x):
2467 flag = b''
2450 flag = b''
2468 if l:
2451 if l:
2469 flag = b'l'
2452 flag = b'l'
2470 elif x:
2453 elif x:
2471 flag = b'x'
2454 flag = b'x'
2472 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2455 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2473
2456
2474 def remove(self, path):
2457 def remove(self, path):
2475 self._markdirty(path, exists=False)
2458 self._markdirty(path, exists=False)
2476
2459
2477 def exists(self, path):
2460 def exists(self, path):
2478 """exists behaves like `lexists`, but needs to follow symlinks and
2461 """exists behaves like `lexists`, but needs to follow symlinks and
2479 return False if they are broken.
2462 return False if they are broken.
2480 """
2463 """
2481 if self.isdirty(path):
2464 if self.isdirty(path):
2482 # If this path exists and is a symlink, "follow" it by calling
2465 # If this path exists and is a symlink, "follow" it by calling
2483 # exists on the destination path.
2466 # exists on the destination path.
2484 if (
2467 if (
2485 self._cache[path][b'exists']
2468 self._cache[path][b'exists']
2486 and b'l' in self._cache[path][b'flags']
2469 and b'l' in self._cache[path][b'flags']
2487 ):
2470 ):
2488 return self.exists(self._cache[path][b'data'].strip())
2471 return self.exists(self._cache[path][b'data'].strip())
2489 else:
2472 else:
2490 return self._cache[path][b'exists']
2473 return self._cache[path][b'exists']
2491
2474
2492 return self._existsinparent(path)
2475 return self._existsinparent(path)
2493
2476
2494 def lexists(self, path):
2477 def lexists(self, path):
2495 """lexists returns True if the path exists"""
2478 """lexists returns True if the path exists"""
2496 if self.isdirty(path):
2479 if self.isdirty(path):
2497 return self._cache[path][b'exists']
2480 return self._cache[path][b'exists']
2498
2481
2499 return self._existsinparent(path)
2482 return self._existsinparent(path)
2500
2483
2501 def size(self, path):
2484 def size(self, path):
2502 if self.isdirty(path):
2485 if self.isdirty(path):
2503 if self._cache[path][b'exists']:
2486 if self._cache[path][b'exists']:
2504 return len(self._cache[path][b'data'])
2487 return len(self._cache[path][b'data'])
2505 else:
2488 else:
2506 raise error.ProgrammingError(
2489 raise error.ProgrammingError(
2507 b"No such file or directory: %s" % path
2490 b"No such file or directory: %s" % path
2508 )
2491 )
2509 return self._wrappedctx[path].size()
2492 return self._wrappedctx[path].size()
2510
2493
2511 def tomemctx(
2494 def tomemctx(
2512 self,
2495 self,
2513 text,
2496 text,
2514 branch=None,
2497 branch=None,
2515 extra=None,
2498 extra=None,
2516 date=None,
2499 date=None,
2517 parents=None,
2500 parents=None,
2518 user=None,
2501 user=None,
2519 editor=None,
2502 editor=None,
2520 ):
2503 ):
2521 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2504 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2522 committed.
2505 committed.
2523
2506
2524 ``text`` is the commit message.
2507 ``text`` is the commit message.
2525 ``parents`` (optional) are rev numbers.
2508 ``parents`` (optional) are rev numbers.
2526 """
2509 """
2527 # Default parents to the wrapped context if not passed.
2510 # Default parents to the wrapped context if not passed.
2528 if parents is None:
2511 if parents is None:
2529 parents = self.parents()
2512 parents = self.parents()
2530 if len(parents) == 1:
2513 if len(parents) == 1:
2531 parents = (parents[0], None)
2514 parents = (parents[0], None)
2532
2515
2533 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2516 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2534 if parents[1] is None:
2517 if parents[1] is None:
2535 parents = (self._repo[parents[0]], None)
2518 parents = (self._repo[parents[0]], None)
2536 else:
2519 else:
2537 parents = (self._repo[parents[0]], self._repo[parents[1]])
2520 parents = (self._repo[parents[0]], self._repo[parents[1]])
2538
2521
2539 files = self.files()
2522 files = self.files()
2540
2523
2541 def getfile(repo, memctx, path):
2524 def getfile(repo, memctx, path):
2542 if self._cache[path][b'exists']:
2525 if self._cache[path][b'exists']:
2543 return memfilectx(
2526 return memfilectx(
2544 repo,
2527 repo,
2545 memctx,
2528 memctx,
2546 path,
2529 path,
2547 self._cache[path][b'data'],
2530 self._cache[path][b'data'],
2548 b'l' in self._cache[path][b'flags'],
2531 b'l' in self._cache[path][b'flags'],
2549 b'x' in self._cache[path][b'flags'],
2532 b'x' in self._cache[path][b'flags'],
2550 self._cache[path][b'copied'],
2533 self._cache[path][b'copied'],
2551 )
2534 )
2552 else:
2535 else:
2553 # Returning None, but including the path in `files`, is
2536 # Returning None, but including the path in `files`, is
2554 # necessary for memctx to register a deletion.
2537 # necessary for memctx to register a deletion.
2555 return None
2538 return None
2556
2539
2557 if branch is None:
2540 if branch is None:
2558 branch = self._wrappedctx.branch()
2541 branch = self._wrappedctx.branch()
2559
2542
2560 return memctx(
2543 return memctx(
2561 self._repo,
2544 self._repo,
2562 parents,
2545 parents,
2563 text,
2546 text,
2564 files,
2547 files,
2565 getfile,
2548 getfile,
2566 date=date,
2549 date=date,
2567 extra=extra,
2550 extra=extra,
2568 user=user,
2551 user=user,
2569 branch=branch,
2552 branch=branch,
2570 editor=editor,
2553 editor=editor,
2571 )
2554 )
2572
2555
2573 def tomemctx_for_amend(self, precursor):
2556 def tomemctx_for_amend(self, precursor):
2574 extra = precursor.extra().copy()
2557 extra = precursor.extra().copy()
2575 extra[b'amend_source'] = precursor.hex()
2558 extra[b'amend_source'] = precursor.hex()
2576 return self.tomemctx(
2559 return self.tomemctx(
2577 text=precursor.description(),
2560 text=precursor.description(),
2578 branch=precursor.branch(),
2561 branch=precursor.branch(),
2579 extra=extra,
2562 extra=extra,
2580 date=precursor.date(),
2563 date=precursor.date(),
2581 user=precursor.user(),
2564 user=precursor.user(),
2582 )
2565 )
2583
2566
2584 def isdirty(self, path):
2567 def isdirty(self, path):
2585 return path in self._cache
2568 return path in self._cache
2586
2569
2587 def clean(self):
2570 def clean(self):
2588 self._mergestate = None
2571 self._mergestate = None
2589 self._cache = {}
2572 self._cache = {}
2590
2573
2591 def _compact(self):
2574 def _compact(self):
2592 """Removes keys from the cache that are actually clean, by comparing
2575 """Removes keys from the cache that are actually clean, by comparing
2593 them with the underlying context.
2576 them with the underlying context.
2594
2577
2595 This can occur during the merge process, e.g. by passing --tool :local
2578 This can occur during the merge process, e.g. by passing --tool :local
2596 to resolve a conflict.
2579 to resolve a conflict.
2597 """
2580 """
2598 keys = []
2581 keys = []
2599 # This won't be perfect, but can help performance significantly when
2582 # This won't be perfect, but can help performance significantly when
2600 # using things like remotefilelog.
2583 # using things like remotefilelog.
2601 scmutil.prefetchfiles(
2584 scmutil.prefetchfiles(
2602 self.repo(),
2585 self.repo(),
2603 [
2586 [
2604 (
2587 (
2605 self.p1().rev(),
2588 self.p1().rev(),
2606 scmutil.matchfiles(self.repo(), self._cache.keys()),
2589 scmutil.matchfiles(self.repo(), self._cache.keys()),
2607 )
2590 )
2608 ],
2591 ],
2609 )
2592 )
2610
2593
2611 for path in self._cache.keys():
2594 for path in self._cache.keys():
2612 cache = self._cache[path]
2595 cache = self._cache[path]
2613 try:
2596 try:
2614 underlying = self._wrappedctx[path]
2597 underlying = self._wrappedctx[path]
2615 if (
2598 if (
2616 underlying.data() == cache[b'data']
2599 underlying.data() == cache[b'data']
2617 and underlying.flags() == cache[b'flags']
2600 and underlying.flags() == cache[b'flags']
2618 ):
2601 ):
2619 keys.append(path)
2602 keys.append(path)
2620 except error.ManifestLookupError:
2603 except error.ManifestLookupError:
2621 # Path not in the underlying manifest (created).
2604 # Path not in the underlying manifest (created).
2622 continue
2605 continue
2623
2606
2624 for path in keys:
2607 for path in keys:
2625 del self._cache[path]
2608 del self._cache[path]
2626 return keys
2609 return keys
2627
2610
2628 def _markdirty(
2611 def _markdirty(
2629 self, path, exists, data=None, date=None, flags=b'', copied=None
2612 self, path, exists, data=None, date=None, flags=b'', copied=None
2630 ):
2613 ):
2631 # data not provided, let's see if we already have some; if not, let's
2614 # data not provided, let's see if we already have some; if not, let's
2632 # grab it from our underlying context, so that we always have data if
2615 # grab it from our underlying context, so that we always have data if
2633 # the file is marked as existing.
2616 # the file is marked as existing.
2634 if exists and data is None:
2617 if exists and data is None:
2635 oldentry = self._cache.get(path) or {}
2618 oldentry = self._cache.get(path) or {}
2636 data = oldentry.get(b'data')
2619 data = oldentry.get(b'data')
2637 if data is None:
2620 if data is None:
2638 data = self._wrappedctx[path].data()
2621 data = self._wrappedctx[path].data()
2639
2622
2640 self._cache[path] = {
2623 self._cache[path] = {
2641 b'exists': exists,
2624 b'exists': exists,
2642 b'data': data,
2625 b'data': data,
2643 b'date': date,
2626 b'date': date,
2644 b'flags': flags,
2627 b'flags': flags,
2645 b'copied': copied,
2628 b'copied': copied,
2646 }
2629 }
2647 util.clearcachedproperty(self, b'_manifest')
2630 util.clearcachedproperty(self, b'_manifest')
2648
2631
2649 def filectx(self, path, filelog=None):
2632 def filectx(self, path, filelog=None):
2650 return overlayworkingfilectx(
2633 return overlayworkingfilectx(
2651 self._repo, path, parent=self, filelog=filelog
2634 self._repo, path, parent=self, filelog=filelog
2652 )
2635 )
2653
2636
2654 def mergestate(self, clean=False):
2637 def mergestate(self, clean=False):
2655 if clean or self._mergestate is None:
2638 if clean or self._mergestate is None:
2656 self._mergestate = mergestatemod.memmergestate(self._repo)
2639 self._mergestate = mergestatemod.memmergestate(self._repo)
2657 return self._mergestate
2640 return self._mergestate
2658
2641
2659
2642
2660 class overlayworkingfilectx(committablefilectx):
2643 class overlayworkingfilectx(committablefilectx):
2661 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2644 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2662 cache, which can be flushed through later by calling ``flush()``."""
2645 cache, which can be flushed through later by calling ``flush()``."""
2663
2646
2664 def __init__(self, repo, path, filelog=None, parent=None):
2647 def __init__(self, repo, path, filelog=None, parent=None):
2665 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2648 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2666 self._repo = repo
2649 self._repo = repo
2667 self._parent = parent
2650 self._parent = parent
2668 self._path = path
2651 self._path = path
2669
2652
2670 def cmp(self, fctx):
2653 def cmp(self, fctx):
2671 return self.data() != fctx.data()
2654 return self.data() != fctx.data()
2672
2655
2673 def changectx(self):
2656 def changectx(self):
2674 return self._parent
2657 return self._parent
2675
2658
2676 def data(self):
2659 def data(self):
2677 return self._parent.data(self._path)
2660 return self._parent.data(self._path)
2678
2661
2679 def date(self):
2662 def date(self):
2680 return self._parent.filedate(self._path)
2663 return self._parent.filedate(self._path)
2681
2664
2682 def exists(self):
2665 def exists(self):
2683 return self.lexists()
2666 return self.lexists()
2684
2667
2685 def lexists(self):
2668 def lexists(self):
2686 return self._parent.exists(self._path)
2669 return self._parent.exists(self._path)
2687
2670
2688 def copysource(self):
2671 def copysource(self):
2689 return self._parent.copydata(self._path)
2672 return self._parent.copydata(self._path)
2690
2673
2691 def size(self):
2674 def size(self):
2692 return self._parent.size(self._path)
2675 return self._parent.size(self._path)
2693
2676
2694 def markcopied(self, origin):
2677 def markcopied(self, origin):
2695 self._parent.markcopied(self._path, origin)
2678 self._parent.markcopied(self._path, origin)
2696
2679
2697 def audit(self):
2680 def audit(self):
2698 pass
2681 pass
2699
2682
2700 def flags(self):
2683 def flags(self):
2701 return self._parent.flags(self._path)
2684 return self._parent.flags(self._path)
2702
2685
2703 def setflags(self, islink, isexec):
2686 def setflags(self, islink, isexec):
2704 return self._parent.setflags(self._path, islink, isexec)
2687 return self._parent.setflags(self._path, islink, isexec)
2705
2688
2706 def write(self, data, flags, backgroundclose=False, **kwargs):
2689 def write(self, data, flags, backgroundclose=False, **kwargs):
2707 return self._parent.write(self._path, data, flags, **kwargs)
2690 return self._parent.write(self._path, data, flags, **kwargs)
2708
2691
2709 def remove(self, ignoremissing=False):
2692 def remove(self, ignoremissing=False):
2710 return self._parent.remove(self._path)
2693 return self._parent.remove(self._path)
2711
2694
2712 def clearunknown(self):
2695 def clearunknown(self):
2713 pass
2696 pass
2714
2697
2715
2698
2716 class workingcommitctx(workingctx):
2699 class workingcommitctx(workingctx):
2717 """A workingcommitctx object makes access to data related to
2700 """A workingcommitctx object makes access to data related to
2718 the revision being committed convenient.
2701 the revision being committed convenient.
2719
2702
2720 This hides changes in the working directory, if they aren't
2703 This hides changes in the working directory, if they aren't
2721 committed in this context.
2704 committed in this context.
2722 """
2705 """
2723
2706
2724 def __init__(
2707 def __init__(
2725 self, repo, changes, text=b"", user=None, date=None, extra=None
2708 self, repo, changes, text=b"", user=None, date=None, extra=None
2726 ):
2709 ):
2727 super(workingcommitctx, self).__init__(
2710 super(workingcommitctx, self).__init__(
2728 repo, text, user, date, extra, changes
2711 repo, text, user, date, extra, changes
2729 )
2712 )
2730
2713
2731 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2714 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2732 """Return matched files only in ``self._status``
2715 """Return matched files only in ``self._status``
2733
2716
2734 Uncommitted files appear "clean" via this context, even if
2717 Uncommitted files appear "clean" via this context, even if
2735 they aren't actually so in the working directory.
2718 they aren't actually so in the working directory.
2736 """
2719 """
2737 if clean:
2720 if clean:
2738 clean = [f for f in self._manifest if f not in self._changedset]
2721 clean = [f for f in self._manifest if f not in self._changedset]
2739 else:
2722 else:
2740 clean = []
2723 clean = []
2741 return scmutil.status(
2724 return scmutil.status(
2742 [f for f in self._status.modified if match(f)],
2725 [f for f in self._status.modified if match(f)],
2743 [f for f in self._status.added if match(f)],
2726 [f for f in self._status.added if match(f)],
2744 [f for f in self._status.removed if match(f)],
2727 [f for f in self._status.removed if match(f)],
2745 [],
2728 [],
2746 [],
2729 [],
2747 [],
2730 [],
2748 clean,
2731 clean,
2749 )
2732 )
2750
2733
2751 @propertycache
2734 @propertycache
2752 def _changedset(self):
2735 def _changedset(self):
2753 """Return the set of files changed in this context"""
2736 """Return the set of files changed in this context"""
2754 changed = set(self._status.modified)
2737 changed = set(self._status.modified)
2755 changed.update(self._status.added)
2738 changed.update(self._status.added)
2756 changed.update(self._status.removed)
2739 changed.update(self._status.removed)
2757 return changed
2740 return changed
2758
2741
2759
2742
2760 def makecachingfilectxfn(func):
2743 def makecachingfilectxfn(func):
2761 """Create a filectxfn that caches based on the path.
2744 """Create a filectxfn that caches based on the path.
2762
2745
2763 We can't use util.cachefunc because it uses all arguments as the cache
2746 We can't use util.cachefunc because it uses all arguments as the cache
2764 key and this creates a cycle since the arguments include the repo and
2747 key and this creates a cycle since the arguments include the repo and
2765 memctx.
2748 memctx.
2766 """
2749 """
2767 cache = {}
2750 cache = {}
2768
2751
2769 def getfilectx(repo, memctx, path):
2752 def getfilectx(repo, memctx, path):
2770 if path not in cache:
2753 if path not in cache:
2771 cache[path] = func(repo, memctx, path)
2754 cache[path] = func(repo, memctx, path)
2772 return cache[path]
2755 return cache[path]
2773
2756
2774 return getfilectx
2757 return getfilectx
2775
2758
2776
2759
2777 def memfilefromctx(ctx):
2760 def memfilefromctx(ctx):
2778 """Given a context return a memfilectx for ctx[path]
2761 """Given a context return a memfilectx for ctx[path]
2779
2762
2780 This is a convenience method for building a memctx based on another
2763 This is a convenience method for building a memctx based on another
2781 context.
2764 context.
2782 """
2765 """
2783
2766
2784 def getfilectx(repo, memctx, path):
2767 def getfilectx(repo, memctx, path):
2785 fctx = ctx[path]
2768 fctx = ctx[path]
2786 copysource = fctx.copysource()
2769 copysource = fctx.copysource()
2787 return memfilectx(
2770 return memfilectx(
2788 repo,
2771 repo,
2789 memctx,
2772 memctx,
2790 path,
2773 path,
2791 fctx.data(),
2774 fctx.data(),
2792 islink=fctx.islink(),
2775 islink=fctx.islink(),
2793 isexec=fctx.isexec(),
2776 isexec=fctx.isexec(),
2794 copysource=copysource,
2777 copysource=copysource,
2795 )
2778 )
2796
2779
2797 return getfilectx
2780 return getfilectx
2798
2781
2799
2782
2800 def memfilefrompatch(patchstore):
2783 def memfilefrompatch(patchstore):
2801 """Given a patch (e.g. patchstore object) return a memfilectx
2784 """Given a patch (e.g. patchstore object) return a memfilectx
2802
2785
2803 This is a convenience method for building a memctx based on a patchstore.
2786 This is a convenience method for building a memctx based on a patchstore.
2804 """
2787 """
2805
2788
2806 def getfilectx(repo, memctx, path):
2789 def getfilectx(repo, memctx, path):
2807 data, mode, copysource = patchstore.getfile(path)
2790 data, mode, copysource = patchstore.getfile(path)
2808 if data is None:
2791 if data is None:
2809 return None
2792 return None
2810 islink, isexec = mode
2793 islink, isexec = mode
2811 return memfilectx(
2794 return memfilectx(
2812 repo,
2795 repo,
2813 memctx,
2796 memctx,
2814 path,
2797 path,
2815 data,
2798 data,
2816 islink=islink,
2799 islink=islink,
2817 isexec=isexec,
2800 isexec=isexec,
2818 copysource=copysource,
2801 copysource=copysource,
2819 )
2802 )
2820
2803
2821 return getfilectx
2804 return getfilectx
2822
2805
2823
2806
2824 class memctx(committablectx):
2807 class memctx(committablectx):
2825 """Use memctx to perform in-memory commits via localrepo.commitctx().
2808 """Use memctx to perform in-memory commits via localrepo.commitctx().
2826
2809
2827 Revision information is supplied at initialization time while
2810 Revision information is supplied at initialization time while
2828 related files data and is made available through a callback
2811 related files data and is made available through a callback
2829 mechanism. 'repo' is the current localrepo, 'parents' is a
2812 mechanism. 'repo' is the current localrepo, 'parents' is a
2830 sequence of two parent revisions identifiers (pass None for every
2813 sequence of two parent revisions identifiers (pass None for every
2831 missing parent), 'text' is the commit message and 'files' lists
2814 missing parent), 'text' is the commit message and 'files' lists
2832 names of files touched by the revision (normalized and relative to
2815 names of files touched by the revision (normalized and relative to
2833 repository root).
2816 repository root).
2834
2817
2835 filectxfn(repo, memctx, path) is a callable receiving the
2818 filectxfn(repo, memctx, path) is a callable receiving the
2836 repository, the current memctx object and the normalized path of
2819 repository, the current memctx object and the normalized path of
2837 requested file, relative to repository root. It is fired by the
2820 requested file, relative to repository root. It is fired by the
2838 commit function for every file in 'files', but calls order is
2821 commit function for every file in 'files', but calls order is
2839 undefined. If the file is available in the revision being
2822 undefined. If the file is available in the revision being
2840 committed (updated or added), filectxfn returns a memfilectx
2823 committed (updated or added), filectxfn returns a memfilectx
2841 object. If the file was removed, filectxfn return None for recent
2824 object. If the file was removed, filectxfn return None for recent
2842 Mercurial. Moved files are represented by marking the source file
2825 Mercurial. Moved files are represented by marking the source file
2843 removed and the new file added with copy information (see
2826 removed and the new file added with copy information (see
2844 memfilectx).
2827 memfilectx).
2845
2828
2846 user receives the committer name and defaults to current
2829 user receives the committer name and defaults to current
2847 repository username, date is the commit date in any format
2830 repository username, date is the commit date in any format
2848 supported by dateutil.parsedate() and defaults to current date, extra
2831 supported by dateutil.parsedate() and defaults to current date, extra
2849 is a dictionary of metadata or is left empty.
2832 is a dictionary of metadata or is left empty.
2850 """
2833 """
2851
2834
2852 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2835 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2853 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2836 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2854 # this field to determine what to do in filectxfn.
2837 # this field to determine what to do in filectxfn.
2855 _returnnoneformissingfiles = True
2838 _returnnoneformissingfiles = True
2856
2839
2857 def __init__(
2840 def __init__(
2858 self,
2841 self,
2859 repo,
2842 repo,
2860 parents,
2843 parents,
2861 text,
2844 text,
2862 files,
2845 files,
2863 filectxfn,
2846 filectxfn,
2864 user=None,
2847 user=None,
2865 date=None,
2848 date=None,
2866 extra=None,
2849 extra=None,
2867 branch=None,
2850 branch=None,
2868 editor=None,
2851 editor=None,
2869 ):
2852 ):
2870 super(memctx, self).__init__(
2853 super(memctx, self).__init__(
2871 repo, text, user, date, extra, branch=branch
2854 repo, text, user, date, extra, branch=branch
2872 )
2855 )
2873 self._rev = None
2856 self._rev = None
2874 self._node = None
2857 self._node = None
2875 parents = [(p or self._repo.nodeconstants.nullid) for p in parents]
2858 parents = [(p or self._repo.nodeconstants.nullid) for p in parents]
2876 p1, p2 = parents
2859 p1, p2 = parents
2877 self._parents = [self._repo[p] for p in (p1, p2)]
2860 self._parents = [self._repo[p] for p in (p1, p2)]
2878 files = sorted(set(files))
2861 files = sorted(set(files))
2879 self._files = files
2862 self._files = files
2880 self.substate = {}
2863 self.substate = {}
2881
2864
2882 if isinstance(filectxfn, patch.filestore):
2865 if isinstance(filectxfn, patch.filestore):
2883 filectxfn = memfilefrompatch(filectxfn)
2866 filectxfn = memfilefrompatch(filectxfn)
2884 elif not callable(filectxfn):
2867 elif not callable(filectxfn):
2885 # if store is not callable, wrap it in a function
2868 # if store is not callable, wrap it in a function
2886 filectxfn = memfilefromctx(filectxfn)
2869 filectxfn = memfilefromctx(filectxfn)
2887
2870
2888 # memoizing increases performance for e.g. vcs convert scenarios.
2871 # memoizing increases performance for e.g. vcs convert scenarios.
2889 self._filectxfn = makecachingfilectxfn(filectxfn)
2872 self._filectxfn = makecachingfilectxfn(filectxfn)
2890
2873
2891 if editor:
2874 if editor:
2892 self._text = editor(self._repo, self, [])
2875 self._text = editor(self._repo, self, [])
2893 self._repo.savecommitmessage(self._text)
2876 self._repo.savecommitmessage(self._text)
2894
2877
2895 def filectx(self, path, filelog=None):
2878 def filectx(self, path, filelog=None):
2896 """get a file context from the working directory
2879 """get a file context from the working directory
2897
2880
2898 Returns None if file doesn't exist and should be removed."""
2881 Returns None if file doesn't exist and should be removed."""
2899 return self._filectxfn(self._repo, self, path)
2882 return self._filectxfn(self._repo, self, path)
2900
2883
2901 def commit(self):
2884 def commit(self):
2902 """commit context to the repo"""
2885 """commit context to the repo"""
2903 return self._repo.commitctx(self)
2886 return self._repo.commitctx(self)
2904
2887
2905 @propertycache
2888 @propertycache
2906 def _manifest(self):
2889 def _manifest(self):
2907 """generate a manifest based on the return values of filectxfn"""
2890 """generate a manifest based on the return values of filectxfn"""
2908
2891
2909 # keep this simple for now; just worry about p1
2892 # keep this simple for now; just worry about p1
2910 pctx = self._parents[0]
2893 pctx = self._parents[0]
2911 man = pctx.manifest().copy()
2894 man = pctx.manifest().copy()
2912
2895
2913 for f in self._status.modified:
2896 for f in self._status.modified:
2914 man[f] = self._repo.nodeconstants.modifiednodeid
2897 man[f] = self._repo.nodeconstants.modifiednodeid
2915
2898
2916 for f in self._status.added:
2899 for f in self._status.added:
2917 man[f] = self._repo.nodeconstants.addednodeid
2900 man[f] = self._repo.nodeconstants.addednodeid
2918
2901
2919 for f in self._status.removed:
2902 for f in self._status.removed:
2920 if f in man:
2903 if f in man:
2921 del man[f]
2904 del man[f]
2922
2905
2923 return man
2906 return man
2924
2907
2925 @propertycache
2908 @propertycache
2926 def _status(self):
2909 def _status(self):
2927 """Calculate exact status from ``files`` specified at construction"""
2910 """Calculate exact status from ``files`` specified at construction"""
2928 man1 = self.p1().manifest()
2911 man1 = self.p1().manifest()
2929 p2 = self._parents[1]
2912 p2 = self._parents[1]
2930 # "1 < len(self._parents)" can't be used for checking
2913 # "1 < len(self._parents)" can't be used for checking
2931 # existence of the 2nd parent, because "memctx._parents" is
2914 # existence of the 2nd parent, because "memctx._parents" is
2932 # explicitly initialized by the list, of which length is 2.
2915 # explicitly initialized by the list, of which length is 2.
2933 if p2.rev() != nullrev:
2916 if p2.rev() != nullrev:
2934 man2 = p2.manifest()
2917 man2 = p2.manifest()
2935 managing = lambda f: f in man1 or f in man2
2918 managing = lambda f: f in man1 or f in man2
2936 else:
2919 else:
2937 managing = lambda f: f in man1
2920 managing = lambda f: f in man1
2938
2921
2939 modified, added, removed = [], [], []
2922 modified, added, removed = [], [], []
2940 for f in self._files:
2923 for f in self._files:
2941 if not managing(f):
2924 if not managing(f):
2942 added.append(f)
2925 added.append(f)
2943 elif self[f]:
2926 elif self[f]:
2944 modified.append(f)
2927 modified.append(f)
2945 else:
2928 else:
2946 removed.append(f)
2929 removed.append(f)
2947
2930
2948 return scmutil.status(modified, added, removed, [], [], [], [])
2931 return scmutil.status(modified, added, removed, [], [], [], [])
2949
2932
2950 def parents(self):
2933 def parents(self):
2951 if self._parents[1].rev() == nullrev:
2934 if self._parents[1].rev() == nullrev:
2952 return [self._parents[0]]
2935 return [self._parents[0]]
2953 return self._parents
2936 return self._parents
2954
2937
2955
2938
2956 class memfilectx(committablefilectx):
2939 class memfilectx(committablefilectx):
2957 """memfilectx represents an in-memory file to commit.
2940 """memfilectx represents an in-memory file to commit.
2958
2941
2959 See memctx and committablefilectx for more details.
2942 See memctx and committablefilectx for more details.
2960 """
2943 """
2961
2944
2962 def __init__(
2945 def __init__(
2963 self,
2946 self,
2964 repo,
2947 repo,
2965 changectx,
2948 changectx,
2966 path,
2949 path,
2967 data,
2950 data,
2968 islink=False,
2951 islink=False,
2969 isexec=False,
2952 isexec=False,
2970 copysource=None,
2953 copysource=None,
2971 ):
2954 ):
2972 """
2955 """
2973 path is the normalized file path relative to repository root.
2956 path is the normalized file path relative to repository root.
2974 data is the file content as a string.
2957 data is the file content as a string.
2975 islink is True if the file is a symbolic link.
2958 islink is True if the file is a symbolic link.
2976 isexec is True if the file is executable.
2959 isexec is True if the file is executable.
2977 copied is the source file path if current file was copied in the
2960 copied is the source file path if current file was copied in the
2978 revision being committed, or None."""
2961 revision being committed, or None."""
2979 super(memfilectx, self).__init__(repo, path, None, changectx)
2962 super(memfilectx, self).__init__(repo, path, None, changectx)
2980 self._data = data
2963 self._data = data
2981 if islink:
2964 if islink:
2982 self._flags = b'l'
2965 self._flags = b'l'
2983 elif isexec:
2966 elif isexec:
2984 self._flags = b'x'
2967 self._flags = b'x'
2985 else:
2968 else:
2986 self._flags = b''
2969 self._flags = b''
2987 self._copysource = copysource
2970 self._copysource = copysource
2988
2971
2989 def copysource(self):
2972 def copysource(self):
2990 return self._copysource
2973 return self._copysource
2991
2974
2992 def cmp(self, fctx):
2975 def cmp(self, fctx):
2993 return self.data() != fctx.data()
2976 return self.data() != fctx.data()
2994
2977
2995 def data(self):
2978 def data(self):
2996 return self._data
2979 return self._data
2997
2980
2998 def remove(self, ignoremissing=False):
2981 def remove(self, ignoremissing=False):
2999 """wraps unlink for a repo's working directory"""
2982 """wraps unlink for a repo's working directory"""
3000 # need to figure out what to do here
2983 # need to figure out what to do here
3001 del self._changectx[self._path]
2984 del self._changectx[self._path]
3002
2985
3003 def write(self, data, flags, **kwargs):
2986 def write(self, data, flags, **kwargs):
3004 """wraps repo.wwrite"""
2987 """wraps repo.wwrite"""
3005 self._data = data
2988 self._data = data
3006
2989
3007
2990
3008 class metadataonlyctx(committablectx):
2991 class metadataonlyctx(committablectx):
3009 """Like memctx but it's reusing the manifest of different commit.
2992 """Like memctx but it's reusing the manifest of different commit.
3010 Intended to be used by lightweight operations that are creating
2993 Intended to be used by lightweight operations that are creating
3011 metadata-only changes.
2994 metadata-only changes.
3012
2995
3013 Revision information is supplied at initialization time. 'repo' is the
2996 Revision information is supplied at initialization time. 'repo' is the
3014 current localrepo, 'ctx' is original revision which manifest we're reuisng
2997 current localrepo, 'ctx' is original revision which manifest we're reuisng
3015 'parents' is a sequence of two parent revisions identifiers (pass None for
2998 'parents' is a sequence of two parent revisions identifiers (pass None for
3016 every missing parent), 'text' is the commit.
2999 every missing parent), 'text' is the commit.
3017
3000
3018 user receives the committer name and defaults to current repository
3001 user receives the committer name and defaults to current repository
3019 username, date is the commit date in any format supported by
3002 username, date is the commit date in any format supported by
3020 dateutil.parsedate() and defaults to current date, extra is a dictionary of
3003 dateutil.parsedate() and defaults to current date, extra is a dictionary of
3021 metadata or is left empty.
3004 metadata or is left empty.
3022 """
3005 """
3023
3006
3024 def __init__(
3007 def __init__(
3025 self,
3008 self,
3026 repo,
3009 repo,
3027 originalctx,
3010 originalctx,
3028 parents=None,
3011 parents=None,
3029 text=None,
3012 text=None,
3030 user=None,
3013 user=None,
3031 date=None,
3014 date=None,
3032 extra=None,
3015 extra=None,
3033 editor=None,
3016 editor=None,
3034 ):
3017 ):
3035 if text is None:
3018 if text is None:
3036 text = originalctx.description()
3019 text = originalctx.description()
3037 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
3020 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
3038 self._rev = None
3021 self._rev = None
3039 self._node = None
3022 self._node = None
3040 self._originalctx = originalctx
3023 self._originalctx = originalctx
3041 self._manifestnode = originalctx.manifestnode()
3024 self._manifestnode = originalctx.manifestnode()
3042 if parents is None:
3025 if parents is None:
3043 parents = originalctx.parents()
3026 parents = originalctx.parents()
3044 else:
3027 else:
3045 parents = [repo[p] for p in parents if p is not None]
3028 parents = [repo[p] for p in parents if p is not None]
3046 parents = parents[:]
3029 parents = parents[:]
3047 while len(parents) < 2:
3030 while len(parents) < 2:
3048 parents.append(repo[nullrev])
3031 parents.append(repo[nullrev])
3049 p1, p2 = self._parents = parents
3032 p1, p2 = self._parents = parents
3050
3033
3051 # sanity check to ensure that the reused manifest parents are
3034 # sanity check to ensure that the reused manifest parents are
3052 # manifests of our commit parents
3035 # manifests of our commit parents
3053 mp1, mp2 = self.manifestctx().parents
3036 mp1, mp2 = self.manifestctx().parents
3054 if p1 != self._repo.nodeconstants.nullid and p1.manifestnode() != mp1:
3037 if p1 != self._repo.nodeconstants.nullid and p1.manifestnode() != mp1:
3055 raise RuntimeError(
3038 raise RuntimeError(
3056 r"can't reuse the manifest: its p1 "
3039 r"can't reuse the manifest: its p1 "
3057 r"doesn't match the new ctx p1"
3040 r"doesn't match the new ctx p1"
3058 )
3041 )
3059 if p2 != self._repo.nodeconstants.nullid and p2.manifestnode() != mp2:
3042 if p2 != self._repo.nodeconstants.nullid and p2.manifestnode() != mp2:
3060 raise RuntimeError(
3043 raise RuntimeError(
3061 r"can't reuse the manifest: "
3044 r"can't reuse the manifest: "
3062 r"its p2 doesn't match the new ctx p2"
3045 r"its p2 doesn't match the new ctx p2"
3063 )
3046 )
3064
3047
3065 self._files = originalctx.files()
3048 self._files = originalctx.files()
3066 self.substate = {}
3049 self.substate = {}
3067
3050
3068 if editor:
3051 if editor:
3069 self._text = editor(self._repo, self, [])
3052 self._text = editor(self._repo, self, [])
3070 self._repo.savecommitmessage(self._text)
3053 self._repo.savecommitmessage(self._text)
3071
3054
3072 def manifestnode(self):
3055 def manifestnode(self):
3073 return self._manifestnode
3056 return self._manifestnode
3074
3057
3075 @property
3058 @property
3076 def _manifestctx(self):
3059 def _manifestctx(self):
3077 return self._repo.manifestlog[self._manifestnode]
3060 return self._repo.manifestlog[self._manifestnode]
3078
3061
3079 def filectx(self, path, filelog=None):
3062 def filectx(self, path, filelog=None):
3080 return self._originalctx.filectx(path, filelog=filelog)
3063 return self._originalctx.filectx(path, filelog=filelog)
3081
3064
3082 def commit(self):
3065 def commit(self):
3083 """commit context to the repo"""
3066 """commit context to the repo"""
3084 return self._repo.commitctx(self)
3067 return self._repo.commitctx(self)
3085
3068
3086 @property
3069 @property
3087 def _manifest(self):
3070 def _manifest(self):
3088 return self._originalctx.manifest()
3071 return self._originalctx.manifest()
3089
3072
3090 @propertycache
3073 @propertycache
3091 def _status(self):
3074 def _status(self):
3092 """Calculate exact status from ``files`` specified in the ``origctx``
3075 """Calculate exact status from ``files`` specified in the ``origctx``
3093 and parents manifests.
3076 and parents manifests.
3094 """
3077 """
3095 man1 = self.p1().manifest()
3078 man1 = self.p1().manifest()
3096 p2 = self._parents[1]
3079 p2 = self._parents[1]
3097 # "1 < len(self._parents)" can't be used for checking
3080 # "1 < len(self._parents)" can't be used for checking
3098 # existence of the 2nd parent, because "metadataonlyctx._parents" is
3081 # existence of the 2nd parent, because "metadataonlyctx._parents" is
3099 # explicitly initialized by the list, of which length is 2.
3082 # explicitly initialized by the list, of which length is 2.
3100 if p2.rev() != nullrev:
3083 if p2.rev() != nullrev:
3101 man2 = p2.manifest()
3084 man2 = p2.manifest()
3102 managing = lambda f: f in man1 or f in man2
3085 managing = lambda f: f in man1 or f in man2
3103 else:
3086 else:
3104 managing = lambda f: f in man1
3087 managing = lambda f: f in man1
3105
3088
3106 modified, added, removed = [], [], []
3089 modified, added, removed = [], [], []
3107 for f in self._files:
3090 for f in self._files:
3108 if not managing(f):
3091 if not managing(f):
3109 added.append(f)
3092 added.append(f)
3110 elif f in self:
3093 elif f in self:
3111 modified.append(f)
3094 modified.append(f)
3112 else:
3095 else:
3113 removed.append(f)
3096 removed.append(f)
3114
3097
3115 return scmutil.status(modified, added, removed, [], [], [], [])
3098 return scmutil.status(modified, added, removed, [], [], [], [])
3116
3099
3117
3100
3118 class arbitraryfilectx(object):
3101 class arbitraryfilectx(object):
3119 """Allows you to use filectx-like functions on a file in an arbitrary
3102 """Allows you to use filectx-like functions on a file in an arbitrary
3120 location on disk, possibly not in the working directory.
3103 location on disk, possibly not in the working directory.
3121 """
3104 """
3122
3105
3123 def __init__(self, path, repo=None):
3106 def __init__(self, path, repo=None):
3124 # Repo is optional because contrib/simplemerge uses this class.
3107 # Repo is optional because contrib/simplemerge uses this class.
3125 self._repo = repo
3108 self._repo = repo
3126 self._path = path
3109 self._path = path
3127
3110
3128 def cmp(self, fctx):
3111 def cmp(self, fctx):
3129 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
3112 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
3130 # path if either side is a symlink.
3113 # path if either side is a symlink.
3131 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3114 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3132 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3115 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3133 # Add a fast-path for merge if both sides are disk-backed.
3116 # Add a fast-path for merge if both sides are disk-backed.
3134 # Note that filecmp uses the opposite return values (True if same)
3117 # Note that filecmp uses the opposite return values (True if same)
3135 # from our cmp functions (True if different).
3118 # from our cmp functions (True if different).
3136 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3119 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3137 return self.data() != fctx.data()
3120 return self.data() != fctx.data()
3138
3121
3139 def path(self):
3122 def path(self):
3140 return self._path
3123 return self._path
3141
3124
3142 def flags(self):
3125 def flags(self):
3143 return b''
3126 return b''
3144
3127
3145 def data(self):
3128 def data(self):
3146 return util.readfile(self._path)
3129 return util.readfile(self._path)
3147
3130
3148 def decodeddata(self):
3131 def decodeddata(self):
3149 with open(self._path, b"rb") as f:
3132 with open(self._path, b"rb") as f:
3150 return f.read()
3133 return f.read()
3151
3134
3152 def remove(self):
3135 def remove(self):
3153 util.unlink(self._path)
3136 util.unlink(self._path)
3154
3137
3155 def write(self, data, flags, **kwargs):
3138 def write(self, data, flags, **kwargs):
3156 assert not flags
3139 assert not flags
3157 with open(self._path, b"wb") as f:
3140 with open(self._path, b"wb") as f:
3158 f.write(data)
3141 f.write(data)
@@ -1,101 +1,126 b''
1 # Copyright Mercurial Contributors
1 # Copyright Mercurial Contributors
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 from __future__ import absolute_import
6 from __future__ import absolute_import
7
7
8 import functools
8 import functools
9 import os
9 import os
10 import stat
10 import stat
11
11
12
12
13 rangemask = 0x7FFFFFFF
13 rangemask = 0x7FFFFFFF
14
14
15
15
16 @functools.total_ordering
16 @functools.total_ordering
17 class timestamp(tuple):
17 class timestamp(tuple):
18 """
18 """
19 A Unix timestamp with optional nanoseconds precision,
19 A Unix timestamp with optional nanoseconds precision,
20 modulo 2**31 seconds.
20 modulo 2**31 seconds.
21
21
22 A 2-tuple containing:
22 A 2-tuple containing:
23
23
24 `truncated_seconds`: seconds since the Unix epoch,
24 `truncated_seconds`: seconds since the Unix epoch,
25 truncated to its lower 31 bits
25 truncated to its lower 31 bits
26
26
27 `subsecond_nanoseconds`: number of nanoseconds since `truncated_seconds`.
27 `subsecond_nanoseconds`: number of nanoseconds since `truncated_seconds`.
28 When this is zero, the sub-second precision is considered unknown.
28 When this is zero, the sub-second precision is considered unknown.
29 """
29 """
30
30
31 def __new__(cls, value):
31 def __new__(cls, value):
32 truncated_seconds, subsec_nanos = value
32 truncated_seconds, subsec_nanos = value
33 value = (truncated_seconds & rangemask, subsec_nanos)
33 value = (truncated_seconds & rangemask, subsec_nanos)
34 return super(timestamp, cls).__new__(cls, value)
34 return super(timestamp, cls).__new__(cls, value)
35
35
36 def __eq__(self, other):
36 def __eq__(self, other):
37 self_secs, self_subsec_nanos = self
37 self_secs, self_subsec_nanos = self
38 other_secs, other_subsec_nanos = other
38 other_secs, other_subsec_nanos = other
39 return self_secs == other_secs and (
39 return self_secs == other_secs and (
40 self_subsec_nanos == other_subsec_nanos
40 self_subsec_nanos == other_subsec_nanos
41 or self_subsec_nanos == 0
41 or self_subsec_nanos == 0
42 or other_subsec_nanos == 0
42 or other_subsec_nanos == 0
43 )
43 )
44
44
45 def __gt__(self, other):
45 def __gt__(self, other):
46 self_secs, self_subsec_nanos = self
46 self_secs, self_subsec_nanos = self
47 other_secs, other_subsec_nanos = other
47 other_secs, other_subsec_nanos = other
48 if self_secs > other_secs:
48 if self_secs > other_secs:
49 return True
49 return True
50 if self_secs < other_secs:
50 if self_secs < other_secs:
51 return False
51 return False
52 if self_subsec_nanos == 0 or other_subsec_nanos == 0:
52 if self_subsec_nanos == 0 or other_subsec_nanos == 0:
53 # they are considered equal, so not "greater than"
53 # they are considered equal, so not "greater than"
54 return False
54 return False
55 return self_subsec_nanos > other_subsec_nanos
55 return self_subsec_nanos > other_subsec_nanos
56
56
57
57
58 def get_fs_now(vfs):
58 def get_fs_now(vfs):
59 """return a timestamp for "now" in the current vfs
59 """return a timestamp for "now" in the current vfs
60
60
61 This will raise an exception if no temporary files could be created.
61 This will raise an exception if no temporary files could be created.
62 """
62 """
63 tmpfd, tmpname = vfs.mkstemp()
63 tmpfd, tmpname = vfs.mkstemp()
64 try:
64 try:
65 return mtime_of(os.fstat(tmpfd))
65 return mtime_of(os.fstat(tmpfd))
66 finally:
66 finally:
67 os.close(tmpfd)
67 os.close(tmpfd)
68 vfs.unlink(tmpname)
68 vfs.unlink(tmpname)
69
69
70
70
71 def zero():
71 def zero():
72 """
72 """
73 Returns the `timestamp` at the Unix epoch.
73 Returns the `timestamp` at the Unix epoch.
74 """
74 """
75 return tuple.__new__(timestamp, (0, 0))
75 return tuple.__new__(timestamp, (0, 0))
76
76
77
77
78 def mtime_of(stat_result):
78 def mtime_of(stat_result):
79 """
79 """
80 Takes an `os.stat_result`-like object and returns a `timestamp` object
80 Takes an `os.stat_result`-like object and returns a `timestamp` object
81 for its modification time.
81 for its modification time.
82 """
82 """
83 try:
83 try:
84 # TODO: add this attribute to `osutil.stat` objects,
84 # TODO: add this attribute to `osutil.stat` objects,
85 # see `mercurial/cext/osutil.c`.
85 # see `mercurial/cext/osutil.c`.
86 #
86 #
87 # This attribute is also not available on Python 2.
87 # This attribute is also not available on Python 2.
88 nanos = stat_result.st_mtime_ns
88 nanos = stat_result.st_mtime_ns
89 except AttributeError:
89 except AttributeError:
90 # https://docs.python.org/2/library/os.html#os.stat_float_times
90 # https://docs.python.org/2/library/os.html#os.stat_float_times
91 # "For compatibility with older Python versions,
91 # "For compatibility with older Python versions,
92 # accessing stat_result as a tuple always returns integers."
92 # accessing stat_result as a tuple always returns integers."
93 secs = stat_result[stat.ST_MTIME]
93 secs = stat_result[stat.ST_MTIME]
94
94
95 subsec_nanos = 0
95 subsec_nanos = 0
96 else:
96 else:
97 billion = int(1e9)
97 billion = int(1e9)
98 secs = nanos // billion
98 secs = nanos // billion
99 subsec_nanos = nanos % billion
99 subsec_nanos = nanos % billion
100
100
101 return timestamp((secs, subsec_nanos))
101 return timestamp((secs, subsec_nanos))
102
103
104 def reliable_mtime_of(stat_result, present_mtime):
105 """same as `mtime_of`, but return None if the date might be ambiguous
106
107 A modification time is reliable if it is older than "present_time" (or
108 sufficiently in the futur).
109
110 Otherwise a concurrent modification might happens with the same mtime.
111 """
112 file_mtime = mtime_of(stat_result)
113 file_second = file_mtime[0]
114 boundary_second = present_mtime[0]
115 # If the mtime of the ambiguous file is younger (or equal) to the starting
116 # point of the `status` walk, we cannot garantee that another, racy, write
117 # will not happen right after with the same mtime and we cannot cache the
118 # information.
119 #
120 # However is the mtime is far away in the future, this is likely some
121 # mismatch between the current clock and previous file system operation. So
122 # mtime more than one days in the future are considered fine.
123 if boundary_second <= file_second < (3600 * 24 + boundary_second):
124 return None
125 else:
126 return file_mtime
General Comments 0
You need to be logged in to leave comments. Login now