##// END OF EJS Templates
dirstate: update backup functions to take full backup filename...
Adam Simpkins -
r33440:ec306bc6 default
parent child Browse files
Show More
@@ -1,1042 +1,1043 b''
1 # shelve.py - save/restore working directory state
1 # shelve.py - save/restore working directory state
2 #
2 #
3 # Copyright 2013 Facebook, Inc.
3 # Copyright 2013 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """save and restore changes to the working directory
8 """save and restore changes to the working directory
9
9
10 The "hg shelve" command saves changes made to the working directory
10 The "hg shelve" command saves changes made to the working directory
11 and reverts those changes, resetting the working directory to a clean
11 and reverts those changes, resetting the working directory to a clean
12 state.
12 state.
13
13
14 Later on, the "hg unshelve" command restores the changes saved by "hg
14 Later on, the "hg unshelve" command restores the changes saved by "hg
15 shelve". Changes can be restored even after updating to a different
15 shelve". Changes can be restored even after updating to a different
16 parent, in which case Mercurial's merge machinery will resolve any
16 parent, in which case Mercurial's merge machinery will resolve any
17 conflicts if necessary.
17 conflicts if necessary.
18
18
19 You can have more than one shelved change outstanding at a time; each
19 You can have more than one shelved change outstanding at a time; each
20 shelved change has a distinct name. For details, see the help for "hg
20 shelved change has a distinct name. For details, see the help for "hg
21 shelve".
21 shelve".
22 """
22 """
23 from __future__ import absolute_import
23 from __future__ import absolute_import
24
24
25 import collections
25 import collections
26 import errno
26 import errno
27 import itertools
27 import itertools
28
28
29 from mercurial.i18n import _
29 from mercurial.i18n import _
30 from mercurial import (
30 from mercurial import (
31 bookmarks,
31 bookmarks,
32 bundle2,
32 bundle2,
33 bundlerepo,
33 bundlerepo,
34 changegroup,
34 changegroup,
35 cmdutil,
35 cmdutil,
36 error,
36 error,
37 exchange,
37 exchange,
38 hg,
38 hg,
39 lock as lockmod,
39 lock as lockmod,
40 mdiff,
40 mdiff,
41 merge,
41 merge,
42 node as nodemod,
42 node as nodemod,
43 patch,
43 patch,
44 phases,
44 phases,
45 registrar,
45 registrar,
46 repair,
46 repair,
47 scmutil,
47 scmutil,
48 templatefilters,
48 templatefilters,
49 util,
49 util,
50 vfs as vfsmod,
50 vfs as vfsmod,
51 )
51 )
52
52
53 from . import (
53 from . import (
54 rebase,
54 rebase,
55 )
55 )
56
56
57 cmdtable = {}
57 cmdtable = {}
58 command = registrar.command(cmdtable)
58 command = registrar.command(cmdtable)
59 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
59 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
60 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
60 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
61 # be specifying the version(s) of Mercurial they are tested with, or
61 # be specifying the version(s) of Mercurial they are tested with, or
62 # leave the attribute unspecified.
62 # leave the attribute unspecified.
63 testedwith = 'ships-with-hg-core'
63 testedwith = 'ships-with-hg-core'
64
64
65 backupdir = 'shelve-backup'
65 backupdir = 'shelve-backup'
66 shelvedir = 'shelved'
66 shelvedir = 'shelved'
67 shelvefileextensions = ['hg', 'patch', 'oshelve']
67 shelvefileextensions = ['hg', 'patch', 'oshelve']
68 # universal extension is present in all types of shelves
68 # universal extension is present in all types of shelves
69 patchextension = 'patch'
69 patchextension = 'patch'
70
70
71 # we never need the user, so we use a
71 # we never need the user, so we use a
72 # generic user for all shelve operations
72 # generic user for all shelve operations
73 shelveuser = 'shelve@localhost'
73 shelveuser = 'shelve@localhost'
74
74
75 class shelvedfile(object):
75 class shelvedfile(object):
76 """Helper for the file storing a single shelve
76 """Helper for the file storing a single shelve
77
77
78 Handles common functions on shelve files (.hg/.patch) using
78 Handles common functions on shelve files (.hg/.patch) using
79 the vfs layer"""
79 the vfs layer"""
80 def __init__(self, repo, name, filetype=None):
80 def __init__(self, repo, name, filetype=None):
81 self.repo = repo
81 self.repo = repo
82 self.name = name
82 self.name = name
83 self.vfs = vfsmod.vfs(repo.vfs.join(shelvedir))
83 self.vfs = vfsmod.vfs(repo.vfs.join(shelvedir))
84 self.backupvfs = vfsmod.vfs(repo.vfs.join(backupdir))
84 self.backupvfs = vfsmod.vfs(repo.vfs.join(backupdir))
85 self.ui = self.repo.ui
85 self.ui = self.repo.ui
86 if filetype:
86 if filetype:
87 self.fname = name + '.' + filetype
87 self.fname = name + '.' + filetype
88 else:
88 else:
89 self.fname = name
89 self.fname = name
90
90
91 def exists(self):
91 def exists(self):
92 return self.vfs.exists(self.fname)
92 return self.vfs.exists(self.fname)
93
93
94 def filename(self):
94 def filename(self):
95 return self.vfs.join(self.fname)
95 return self.vfs.join(self.fname)
96
96
97 def backupfilename(self):
97 def backupfilename(self):
98 def gennames(base):
98 def gennames(base):
99 yield base
99 yield base
100 base, ext = base.rsplit('.', 1)
100 base, ext = base.rsplit('.', 1)
101 for i in itertools.count(1):
101 for i in itertools.count(1):
102 yield '%s-%d.%s' % (base, i, ext)
102 yield '%s-%d.%s' % (base, i, ext)
103
103
104 name = self.backupvfs.join(self.fname)
104 name = self.backupvfs.join(self.fname)
105 for n in gennames(name):
105 for n in gennames(name):
106 if not self.backupvfs.exists(n):
106 if not self.backupvfs.exists(n):
107 return n
107 return n
108
108
109 def movetobackup(self):
109 def movetobackup(self):
110 if not self.backupvfs.isdir():
110 if not self.backupvfs.isdir():
111 self.backupvfs.makedir()
111 self.backupvfs.makedir()
112 util.rename(self.filename(), self.backupfilename())
112 util.rename(self.filename(), self.backupfilename())
113
113
114 def stat(self):
114 def stat(self):
115 return self.vfs.stat(self.fname)
115 return self.vfs.stat(self.fname)
116
116
117 def opener(self, mode='rb'):
117 def opener(self, mode='rb'):
118 try:
118 try:
119 return self.vfs(self.fname, mode)
119 return self.vfs(self.fname, mode)
120 except IOError as err:
120 except IOError as err:
121 if err.errno != errno.ENOENT:
121 if err.errno != errno.ENOENT:
122 raise
122 raise
123 raise error.Abort(_("shelved change '%s' not found") % self.name)
123 raise error.Abort(_("shelved change '%s' not found") % self.name)
124
124
125 def applybundle(self):
125 def applybundle(self):
126 fp = self.opener()
126 fp = self.opener()
127 try:
127 try:
128 gen = exchange.readbundle(self.repo.ui, fp, self.fname, self.vfs)
128 gen = exchange.readbundle(self.repo.ui, fp, self.fname, self.vfs)
129 bundle2.applybundle(self.repo, gen, self.repo.currenttransaction(),
129 bundle2.applybundle(self.repo, gen, self.repo.currenttransaction(),
130 source='unshelve',
130 source='unshelve',
131 url='bundle:' + self.vfs.join(self.fname),
131 url='bundle:' + self.vfs.join(self.fname),
132 targetphase=phases.secret)
132 targetphase=phases.secret)
133 finally:
133 finally:
134 fp.close()
134 fp.close()
135
135
136 def bundlerepo(self):
136 def bundlerepo(self):
137 return bundlerepo.bundlerepository(self.repo.baseui, self.repo.root,
137 return bundlerepo.bundlerepository(self.repo.baseui, self.repo.root,
138 self.vfs.join(self.fname))
138 self.vfs.join(self.fname))
139 def writebundle(self, bases, node):
139 def writebundle(self, bases, node):
140 cgversion = changegroup.safeversion(self.repo)
140 cgversion = changegroup.safeversion(self.repo)
141 if cgversion == '01':
141 if cgversion == '01':
142 btype = 'HG10BZ'
142 btype = 'HG10BZ'
143 compression = None
143 compression = None
144 else:
144 else:
145 btype = 'HG20'
145 btype = 'HG20'
146 compression = 'BZ'
146 compression = 'BZ'
147
147
148 cg = changegroup.changegroupsubset(self.repo, bases, [node], 'shelve',
148 cg = changegroup.changegroupsubset(self.repo, bases, [node], 'shelve',
149 version=cgversion)
149 version=cgversion)
150 bundle2.writebundle(self.ui, cg, self.fname, btype, self.vfs,
150 bundle2.writebundle(self.ui, cg, self.fname, btype, self.vfs,
151 compression=compression)
151 compression=compression)
152
152
153 def writeobsshelveinfo(self, info):
153 def writeobsshelveinfo(self, info):
154 scmutil.simplekeyvaluefile(self.vfs, self.fname).write(info)
154 scmutil.simplekeyvaluefile(self.vfs, self.fname).write(info)
155
155
156 def readobsshelveinfo(self):
156 def readobsshelveinfo(self):
157 return scmutil.simplekeyvaluefile(self.vfs, self.fname).read()
157 return scmutil.simplekeyvaluefile(self.vfs, self.fname).read()
158
158
159 class shelvedstate(object):
159 class shelvedstate(object):
160 """Handle persistence during unshelving operations.
160 """Handle persistence during unshelving operations.
161
161
162 Handles saving and restoring a shelved state. Ensures that different
162 Handles saving and restoring a shelved state. Ensures that different
163 versions of a shelved state are possible and handles them appropriately.
163 versions of a shelved state are possible and handles them appropriately.
164 """
164 """
165 _version = 2
165 _version = 2
166 _filename = 'shelvedstate'
166 _filename = 'shelvedstate'
167 _keep = 'keep'
167 _keep = 'keep'
168 _nokeep = 'nokeep'
168 _nokeep = 'nokeep'
169 # colon is essential to differentiate from a real bookmark name
169 # colon is essential to differentiate from a real bookmark name
170 _noactivebook = ':no-active-bookmark'
170 _noactivebook = ':no-active-bookmark'
171
171
172 @classmethod
172 @classmethod
173 def _verifyandtransform(cls, d):
173 def _verifyandtransform(cls, d):
174 """Some basic shelvestate syntactic verification and transformation"""
174 """Some basic shelvestate syntactic verification and transformation"""
175 try:
175 try:
176 d['originalwctx'] = nodemod.bin(d['originalwctx'])
176 d['originalwctx'] = nodemod.bin(d['originalwctx'])
177 d['pendingctx'] = nodemod.bin(d['pendingctx'])
177 d['pendingctx'] = nodemod.bin(d['pendingctx'])
178 d['parents'] = [nodemod.bin(h)
178 d['parents'] = [nodemod.bin(h)
179 for h in d['parents'].split(' ')]
179 for h in d['parents'].split(' ')]
180 d['nodestoremove'] = [nodemod.bin(h)
180 d['nodestoremove'] = [nodemod.bin(h)
181 for h in d['nodestoremove'].split(' ')]
181 for h in d['nodestoremove'].split(' ')]
182 except (ValueError, TypeError, KeyError) as err:
182 except (ValueError, TypeError, KeyError) as err:
183 raise error.CorruptedState(str(err))
183 raise error.CorruptedState(str(err))
184
184
185 @classmethod
185 @classmethod
186 def _getversion(cls, repo):
186 def _getversion(cls, repo):
187 """Read version information from shelvestate file"""
187 """Read version information from shelvestate file"""
188 fp = repo.vfs(cls._filename)
188 fp = repo.vfs(cls._filename)
189 try:
189 try:
190 version = int(fp.readline().strip())
190 version = int(fp.readline().strip())
191 except ValueError as err:
191 except ValueError as err:
192 raise error.CorruptedState(str(err))
192 raise error.CorruptedState(str(err))
193 finally:
193 finally:
194 fp.close()
194 fp.close()
195 return version
195 return version
196
196
197 @classmethod
197 @classmethod
198 def _readold(cls, repo):
198 def _readold(cls, repo):
199 """Read the old position-based version of a shelvestate file"""
199 """Read the old position-based version of a shelvestate file"""
200 # Order is important, because old shelvestate file uses it
200 # Order is important, because old shelvestate file uses it
201 # to detemine values of fields (i.g. name is on the second line,
201 # to detemine values of fields (i.g. name is on the second line,
202 # originalwctx is on the third and so forth). Please do not change.
202 # originalwctx is on the third and so forth). Please do not change.
203 keys = ['version', 'name', 'originalwctx', 'pendingctx', 'parents',
203 keys = ['version', 'name', 'originalwctx', 'pendingctx', 'parents',
204 'nodestoremove', 'branchtorestore', 'keep', 'activebook']
204 'nodestoremove', 'branchtorestore', 'keep', 'activebook']
205 # this is executed only seldomly, so it is not a big deal
205 # this is executed only seldomly, so it is not a big deal
206 # that we open this file twice
206 # that we open this file twice
207 fp = repo.vfs(cls._filename)
207 fp = repo.vfs(cls._filename)
208 d = {}
208 d = {}
209 try:
209 try:
210 for key in keys:
210 for key in keys:
211 d[key] = fp.readline().strip()
211 d[key] = fp.readline().strip()
212 finally:
212 finally:
213 fp.close()
213 fp.close()
214 return d
214 return d
215
215
216 @classmethod
216 @classmethod
217 def load(cls, repo):
217 def load(cls, repo):
218 version = cls._getversion(repo)
218 version = cls._getversion(repo)
219 if version < cls._version:
219 if version < cls._version:
220 d = cls._readold(repo)
220 d = cls._readold(repo)
221 elif version == cls._version:
221 elif version == cls._version:
222 d = scmutil.simplekeyvaluefile(repo.vfs, cls._filename)\
222 d = scmutil.simplekeyvaluefile(repo.vfs, cls._filename)\
223 .read(firstlinenonkeyval=True)
223 .read(firstlinenonkeyval=True)
224 else:
224 else:
225 raise error.Abort(_('this version of shelve is incompatible '
225 raise error.Abort(_('this version of shelve is incompatible '
226 'with the version used in this repo'))
226 'with the version used in this repo'))
227
227
228 cls._verifyandtransform(d)
228 cls._verifyandtransform(d)
229 try:
229 try:
230 obj = cls()
230 obj = cls()
231 obj.name = d['name']
231 obj.name = d['name']
232 obj.wctx = repo[d['originalwctx']]
232 obj.wctx = repo[d['originalwctx']]
233 obj.pendingctx = repo[d['pendingctx']]
233 obj.pendingctx = repo[d['pendingctx']]
234 obj.parents = d['parents']
234 obj.parents = d['parents']
235 obj.nodestoremove = d['nodestoremove']
235 obj.nodestoremove = d['nodestoremove']
236 obj.branchtorestore = d.get('branchtorestore', '')
236 obj.branchtorestore = d.get('branchtorestore', '')
237 obj.keep = d.get('keep') == cls._keep
237 obj.keep = d.get('keep') == cls._keep
238 obj.activebookmark = ''
238 obj.activebookmark = ''
239 if d.get('activebook', '') != cls._noactivebook:
239 if d.get('activebook', '') != cls._noactivebook:
240 obj.activebookmark = d.get('activebook', '')
240 obj.activebookmark = d.get('activebook', '')
241 except (error.RepoLookupError, KeyError) as err:
241 except (error.RepoLookupError, KeyError) as err:
242 raise error.CorruptedState(str(err))
242 raise error.CorruptedState(str(err))
243
243
244 return obj
244 return obj
245
245
246 @classmethod
246 @classmethod
247 def save(cls, repo, name, originalwctx, pendingctx, nodestoremove,
247 def save(cls, repo, name, originalwctx, pendingctx, nodestoremove,
248 branchtorestore, keep=False, activebook=''):
248 branchtorestore, keep=False, activebook=''):
249 info = {
249 info = {
250 "name": name,
250 "name": name,
251 "originalwctx": nodemod.hex(originalwctx.node()),
251 "originalwctx": nodemod.hex(originalwctx.node()),
252 "pendingctx": nodemod.hex(pendingctx.node()),
252 "pendingctx": nodemod.hex(pendingctx.node()),
253 "parents": ' '.join([nodemod.hex(p)
253 "parents": ' '.join([nodemod.hex(p)
254 for p in repo.dirstate.parents()]),
254 for p in repo.dirstate.parents()]),
255 "nodestoremove": ' '.join([nodemod.hex(n)
255 "nodestoremove": ' '.join([nodemod.hex(n)
256 for n in nodestoremove]),
256 for n in nodestoremove]),
257 "branchtorestore": branchtorestore,
257 "branchtorestore": branchtorestore,
258 "keep": cls._keep if keep else cls._nokeep,
258 "keep": cls._keep if keep else cls._nokeep,
259 "activebook": activebook or cls._noactivebook
259 "activebook": activebook or cls._noactivebook
260 }
260 }
261 scmutil.simplekeyvaluefile(repo.vfs, cls._filename)\
261 scmutil.simplekeyvaluefile(repo.vfs, cls._filename)\
262 .write(info, firstline=str(cls._version))
262 .write(info, firstline=str(cls._version))
263
263
264 @classmethod
264 @classmethod
265 def clear(cls, repo):
265 def clear(cls, repo):
266 repo.vfs.unlinkpath(cls._filename, ignoremissing=True)
266 repo.vfs.unlinkpath(cls._filename, ignoremissing=True)
267
267
268 def cleanupoldbackups(repo):
268 def cleanupoldbackups(repo):
269 vfs = vfsmod.vfs(repo.vfs.join(backupdir))
269 vfs = vfsmod.vfs(repo.vfs.join(backupdir))
270 maxbackups = repo.ui.configint('shelve', 'maxbackups', 10)
270 maxbackups = repo.ui.configint('shelve', 'maxbackups', 10)
271 hgfiles = [f for f in vfs.listdir()
271 hgfiles = [f for f in vfs.listdir()
272 if f.endswith('.' + patchextension)]
272 if f.endswith('.' + patchextension)]
273 hgfiles = sorted([(vfs.stat(f).st_mtime, f) for f in hgfiles])
273 hgfiles = sorted([(vfs.stat(f).st_mtime, f) for f in hgfiles])
274 if 0 < maxbackups and maxbackups < len(hgfiles):
274 if 0 < maxbackups and maxbackups < len(hgfiles):
275 bordermtime = hgfiles[-maxbackups][0]
275 bordermtime = hgfiles[-maxbackups][0]
276 else:
276 else:
277 bordermtime = None
277 bordermtime = None
278 for mtime, f in hgfiles[:len(hgfiles) - maxbackups]:
278 for mtime, f in hgfiles[:len(hgfiles) - maxbackups]:
279 if mtime == bordermtime:
279 if mtime == bordermtime:
280 # keep it, because timestamp can't decide exact order of backups
280 # keep it, because timestamp can't decide exact order of backups
281 continue
281 continue
282 base = f[:-(1 + len(patchextension))]
282 base = f[:-(1 + len(patchextension))]
283 for ext in shelvefileextensions:
283 for ext in shelvefileextensions:
284 vfs.tryunlink(base + '.' + ext)
284 vfs.tryunlink(base + '.' + ext)
285
285
286 def _backupactivebookmark(repo):
286 def _backupactivebookmark(repo):
287 activebookmark = repo._activebookmark
287 activebookmark = repo._activebookmark
288 if activebookmark:
288 if activebookmark:
289 bookmarks.deactivate(repo)
289 bookmarks.deactivate(repo)
290 return activebookmark
290 return activebookmark
291
291
292 def _restoreactivebookmark(repo, mark):
292 def _restoreactivebookmark(repo, mark):
293 if mark:
293 if mark:
294 bookmarks.activate(repo, mark)
294 bookmarks.activate(repo, mark)
295
295
296 def _aborttransaction(repo):
296 def _aborttransaction(repo):
297 '''Abort current transaction for shelve/unshelve, but keep dirstate
297 '''Abort current transaction for shelve/unshelve, but keep dirstate
298 '''
298 '''
299 tr = repo.currenttransaction()
299 tr = repo.currenttransaction()
300 repo.dirstate.savebackup(tr, suffix='.shelve')
300 backupname = 'dirstate.shelve'
301 repo.dirstate.savebackup(tr, backupname)
301 tr.abort()
302 tr.abort()
302 repo.dirstate.restorebackup(None, suffix='.shelve')
303 repo.dirstate.restorebackup(None, backupname)
303
304
304 def createcmd(ui, repo, pats, opts):
305 def createcmd(ui, repo, pats, opts):
305 """subcommand that creates a new shelve"""
306 """subcommand that creates a new shelve"""
306 with repo.wlock():
307 with repo.wlock():
307 cmdutil.checkunfinished(repo)
308 cmdutil.checkunfinished(repo)
308 return _docreatecmd(ui, repo, pats, opts)
309 return _docreatecmd(ui, repo, pats, opts)
309
310
310 def getshelvename(repo, parent, opts):
311 def getshelvename(repo, parent, opts):
311 """Decide on the name this shelve is going to have"""
312 """Decide on the name this shelve is going to have"""
312 def gennames():
313 def gennames():
313 yield label
314 yield label
314 for i in itertools.count(1):
315 for i in itertools.count(1):
315 yield '%s-%02d' % (label, i)
316 yield '%s-%02d' % (label, i)
316 name = opts.get('name')
317 name = opts.get('name')
317 label = repo._activebookmark or parent.branch() or 'default'
318 label = repo._activebookmark or parent.branch() or 'default'
318 # slashes aren't allowed in filenames, therefore we rename it
319 # slashes aren't allowed in filenames, therefore we rename it
319 label = label.replace('/', '_')
320 label = label.replace('/', '_')
320 label = label.replace('\\', '_')
321 label = label.replace('\\', '_')
321 # filenames must not start with '.' as it should not be hidden
322 # filenames must not start with '.' as it should not be hidden
322 if label.startswith('.'):
323 if label.startswith('.'):
323 label = label.replace('.', '_', 1)
324 label = label.replace('.', '_', 1)
324
325
325 if name:
326 if name:
326 if shelvedfile(repo, name, patchextension).exists():
327 if shelvedfile(repo, name, patchextension).exists():
327 e = _("a shelved change named '%s' already exists") % name
328 e = _("a shelved change named '%s' already exists") % name
328 raise error.Abort(e)
329 raise error.Abort(e)
329
330
330 # ensure we are not creating a subdirectory or a hidden file
331 # ensure we are not creating a subdirectory or a hidden file
331 if '/' in name or '\\' in name:
332 if '/' in name or '\\' in name:
332 raise error.Abort(_('shelved change names can not contain slashes'))
333 raise error.Abort(_('shelved change names can not contain slashes'))
333 if name.startswith('.'):
334 if name.startswith('.'):
334 raise error.Abort(_("shelved change names can not start with '.'"))
335 raise error.Abort(_("shelved change names can not start with '.'"))
335
336
336 else:
337 else:
337 for n in gennames():
338 for n in gennames():
338 if not shelvedfile(repo, n, patchextension).exists():
339 if not shelvedfile(repo, n, patchextension).exists():
339 name = n
340 name = n
340 break
341 break
341
342
342 return name
343 return name
343
344
344 def mutableancestors(ctx):
345 def mutableancestors(ctx):
345 """return all mutable ancestors for ctx (included)
346 """return all mutable ancestors for ctx (included)
346
347
347 Much faster than the revset ancestors(ctx) & draft()"""
348 Much faster than the revset ancestors(ctx) & draft()"""
348 seen = {nodemod.nullrev}
349 seen = {nodemod.nullrev}
349 visit = collections.deque()
350 visit = collections.deque()
350 visit.append(ctx)
351 visit.append(ctx)
351 while visit:
352 while visit:
352 ctx = visit.popleft()
353 ctx = visit.popleft()
353 yield ctx.node()
354 yield ctx.node()
354 for parent in ctx.parents():
355 for parent in ctx.parents():
355 rev = parent.rev()
356 rev = parent.rev()
356 if rev not in seen:
357 if rev not in seen:
357 seen.add(rev)
358 seen.add(rev)
358 if parent.mutable():
359 if parent.mutable():
359 visit.append(parent)
360 visit.append(parent)
360
361
361 def getcommitfunc(extra, interactive, editor=False):
362 def getcommitfunc(extra, interactive, editor=False):
362 def commitfunc(ui, repo, message, match, opts):
363 def commitfunc(ui, repo, message, match, opts):
363 hasmq = util.safehasattr(repo, 'mq')
364 hasmq = util.safehasattr(repo, 'mq')
364 if hasmq:
365 if hasmq:
365 saved, repo.mq.checkapplied = repo.mq.checkapplied, False
366 saved, repo.mq.checkapplied = repo.mq.checkapplied, False
366 overrides = {('phases', 'new-commit'): phases.secret}
367 overrides = {('phases', 'new-commit'): phases.secret}
367 try:
368 try:
368 editor_ = False
369 editor_ = False
369 if editor:
370 if editor:
370 editor_ = cmdutil.getcommiteditor(editform='shelve.shelve',
371 editor_ = cmdutil.getcommiteditor(editform='shelve.shelve',
371 **opts)
372 **opts)
372 with repo.ui.configoverride(overrides):
373 with repo.ui.configoverride(overrides):
373 return repo.commit(message, shelveuser, opts.get('date'),
374 return repo.commit(message, shelveuser, opts.get('date'),
374 match, editor=editor_, extra=extra)
375 match, editor=editor_, extra=extra)
375 finally:
376 finally:
376 if hasmq:
377 if hasmq:
377 repo.mq.checkapplied = saved
378 repo.mq.checkapplied = saved
378
379
379 def interactivecommitfunc(ui, repo, *pats, **opts):
380 def interactivecommitfunc(ui, repo, *pats, **opts):
380 match = scmutil.match(repo['.'], pats, {})
381 match = scmutil.match(repo['.'], pats, {})
381 message = opts['message']
382 message = opts['message']
382 return commitfunc(ui, repo, message, match, opts)
383 return commitfunc(ui, repo, message, match, opts)
383
384
384 return interactivecommitfunc if interactive else commitfunc
385 return interactivecommitfunc if interactive else commitfunc
385
386
386 def _nothingtoshelvemessaging(ui, repo, pats, opts):
387 def _nothingtoshelvemessaging(ui, repo, pats, opts):
387 stat = repo.status(match=scmutil.match(repo[None], pats, opts))
388 stat = repo.status(match=scmutil.match(repo[None], pats, opts))
388 if stat.deleted:
389 if stat.deleted:
389 ui.status(_("nothing changed (%d missing files, see "
390 ui.status(_("nothing changed (%d missing files, see "
390 "'hg status')\n") % len(stat.deleted))
391 "'hg status')\n") % len(stat.deleted))
391 else:
392 else:
392 ui.status(_("nothing changed\n"))
393 ui.status(_("nothing changed\n"))
393
394
394 def _shelvecreatedcommit(repo, node, name):
395 def _shelvecreatedcommit(repo, node, name):
395 bases = list(mutableancestors(repo[node]))
396 bases = list(mutableancestors(repo[node]))
396 shelvedfile(repo, name, 'hg').writebundle(bases, node)
397 shelvedfile(repo, name, 'hg').writebundle(bases, node)
397 cmdutil.export(repo, [node],
398 cmdutil.export(repo, [node],
398 fp=shelvedfile(repo, name, patchextension).opener('wb'),
399 fp=shelvedfile(repo, name, patchextension).opener('wb'),
399 opts=mdiff.diffopts(git=True))
400 opts=mdiff.diffopts(git=True))
400
401
401 def _includeunknownfiles(repo, pats, opts, extra):
402 def _includeunknownfiles(repo, pats, opts, extra):
402 s = repo.status(match=scmutil.match(repo[None], pats, opts),
403 s = repo.status(match=scmutil.match(repo[None], pats, opts),
403 unknown=True)
404 unknown=True)
404 if s.unknown:
405 if s.unknown:
405 extra['shelve_unknown'] = '\0'.join(s.unknown)
406 extra['shelve_unknown'] = '\0'.join(s.unknown)
406 repo[None].add(s.unknown)
407 repo[None].add(s.unknown)
407
408
408 def _finishshelve(repo):
409 def _finishshelve(repo):
409 _aborttransaction(repo)
410 _aborttransaction(repo)
410
411
411 def _docreatecmd(ui, repo, pats, opts):
412 def _docreatecmd(ui, repo, pats, opts):
412 wctx = repo[None]
413 wctx = repo[None]
413 parents = wctx.parents()
414 parents = wctx.parents()
414 if len(parents) > 1:
415 if len(parents) > 1:
415 raise error.Abort(_('cannot shelve while merging'))
416 raise error.Abort(_('cannot shelve while merging'))
416 parent = parents[0]
417 parent = parents[0]
417 origbranch = wctx.branch()
418 origbranch = wctx.branch()
418
419
419 if parent.node() != nodemod.nullid:
420 if parent.node() != nodemod.nullid:
420 desc = "changes to: %s" % parent.description().split('\n', 1)[0]
421 desc = "changes to: %s" % parent.description().split('\n', 1)[0]
421 else:
422 else:
422 desc = '(changes in empty repository)'
423 desc = '(changes in empty repository)'
423
424
424 if not opts.get('message'):
425 if not opts.get('message'):
425 opts['message'] = desc
426 opts['message'] = desc
426
427
427 lock = tr = activebookmark = None
428 lock = tr = activebookmark = None
428 try:
429 try:
429 lock = repo.lock()
430 lock = repo.lock()
430
431
431 # use an uncommitted transaction to generate the bundle to avoid
432 # use an uncommitted transaction to generate the bundle to avoid
432 # pull races. ensure we don't print the abort message to stderr.
433 # pull races. ensure we don't print the abort message to stderr.
433 tr = repo.transaction('commit', report=lambda x: None)
434 tr = repo.transaction('commit', report=lambda x: None)
434
435
435 interactive = opts.get('interactive', False)
436 interactive = opts.get('interactive', False)
436 includeunknown = (opts.get('unknown', False) and
437 includeunknown = (opts.get('unknown', False) and
437 not opts.get('addremove', False))
438 not opts.get('addremove', False))
438
439
439 name = getshelvename(repo, parent, opts)
440 name = getshelvename(repo, parent, opts)
440 activebookmark = _backupactivebookmark(repo)
441 activebookmark = _backupactivebookmark(repo)
441 extra = {}
442 extra = {}
442 if includeunknown:
443 if includeunknown:
443 _includeunknownfiles(repo, pats, opts, extra)
444 _includeunknownfiles(repo, pats, opts, extra)
444
445
445 if _iswctxonnewbranch(repo) and not _isbareshelve(pats, opts):
446 if _iswctxonnewbranch(repo) and not _isbareshelve(pats, opts):
446 # In non-bare shelve we don't store newly created branch
447 # In non-bare shelve we don't store newly created branch
447 # at bundled commit
448 # at bundled commit
448 repo.dirstate.setbranch(repo['.'].branch())
449 repo.dirstate.setbranch(repo['.'].branch())
449
450
450 commitfunc = getcommitfunc(extra, interactive, editor=True)
451 commitfunc = getcommitfunc(extra, interactive, editor=True)
451 if not interactive:
452 if not interactive:
452 node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
453 node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
453 else:
454 else:
454 node = cmdutil.dorecord(ui, repo, commitfunc, None,
455 node = cmdutil.dorecord(ui, repo, commitfunc, None,
455 False, cmdutil.recordfilter, *pats,
456 False, cmdutil.recordfilter, *pats,
456 **opts)
457 **opts)
457 if not node:
458 if not node:
458 _nothingtoshelvemessaging(ui, repo, pats, opts)
459 _nothingtoshelvemessaging(ui, repo, pats, opts)
459 return 1
460 return 1
460
461
461 _shelvecreatedcommit(repo, node, name)
462 _shelvecreatedcommit(repo, node, name)
462
463
463 if ui.formatted():
464 if ui.formatted():
464 desc = util.ellipsis(desc, ui.termwidth())
465 desc = util.ellipsis(desc, ui.termwidth())
465 ui.status(_('shelved as %s\n') % name)
466 ui.status(_('shelved as %s\n') % name)
466 hg.update(repo, parent.node())
467 hg.update(repo, parent.node())
467 if origbranch != repo['.'].branch() and not _isbareshelve(pats, opts):
468 if origbranch != repo['.'].branch() and not _isbareshelve(pats, opts):
468 repo.dirstate.setbranch(origbranch)
469 repo.dirstate.setbranch(origbranch)
469
470
470 _finishshelve(repo)
471 _finishshelve(repo)
471 finally:
472 finally:
472 _restoreactivebookmark(repo, activebookmark)
473 _restoreactivebookmark(repo, activebookmark)
473 lockmod.release(tr, lock)
474 lockmod.release(tr, lock)
474
475
475 def _isbareshelve(pats, opts):
476 def _isbareshelve(pats, opts):
476 return (not pats
477 return (not pats
477 and not opts.get('interactive', False)
478 and not opts.get('interactive', False)
478 and not opts.get('include', False)
479 and not opts.get('include', False)
479 and not opts.get('exclude', False))
480 and not opts.get('exclude', False))
480
481
481 def _iswctxonnewbranch(repo):
482 def _iswctxonnewbranch(repo):
482 return repo[None].branch() != repo['.'].branch()
483 return repo[None].branch() != repo['.'].branch()
483
484
484 def cleanupcmd(ui, repo):
485 def cleanupcmd(ui, repo):
485 """subcommand that deletes all shelves"""
486 """subcommand that deletes all shelves"""
486
487
487 with repo.wlock():
488 with repo.wlock():
488 for (name, _type) in repo.vfs.readdir(shelvedir):
489 for (name, _type) in repo.vfs.readdir(shelvedir):
489 suffix = name.rsplit('.', 1)[-1]
490 suffix = name.rsplit('.', 1)[-1]
490 if suffix in shelvefileextensions:
491 if suffix in shelvefileextensions:
491 shelvedfile(repo, name).movetobackup()
492 shelvedfile(repo, name).movetobackup()
492 cleanupoldbackups(repo)
493 cleanupoldbackups(repo)
493
494
494 def deletecmd(ui, repo, pats):
495 def deletecmd(ui, repo, pats):
495 """subcommand that deletes a specific shelve"""
496 """subcommand that deletes a specific shelve"""
496 if not pats:
497 if not pats:
497 raise error.Abort(_('no shelved changes specified!'))
498 raise error.Abort(_('no shelved changes specified!'))
498 with repo.wlock():
499 with repo.wlock():
499 try:
500 try:
500 for name in pats:
501 for name in pats:
501 for suffix in shelvefileextensions:
502 for suffix in shelvefileextensions:
502 shfile = shelvedfile(repo, name, suffix)
503 shfile = shelvedfile(repo, name, suffix)
503 # patch file is necessary, as it should
504 # patch file is necessary, as it should
504 # be present for any kind of shelve,
505 # be present for any kind of shelve,
505 # but the .hg file is optional as in future we
506 # but the .hg file is optional as in future we
506 # will add obsolete shelve with does not create a
507 # will add obsolete shelve with does not create a
507 # bundle
508 # bundle
508 if shfile.exists() or suffix == patchextension:
509 if shfile.exists() or suffix == patchextension:
509 shfile.movetobackup()
510 shfile.movetobackup()
510 cleanupoldbackups(repo)
511 cleanupoldbackups(repo)
511 except OSError as err:
512 except OSError as err:
512 if err.errno != errno.ENOENT:
513 if err.errno != errno.ENOENT:
513 raise
514 raise
514 raise error.Abort(_("shelved change '%s' not found") % name)
515 raise error.Abort(_("shelved change '%s' not found") % name)
515
516
516 def listshelves(repo):
517 def listshelves(repo):
517 """return all shelves in repo as list of (time, filename)"""
518 """return all shelves in repo as list of (time, filename)"""
518 try:
519 try:
519 names = repo.vfs.readdir(shelvedir)
520 names = repo.vfs.readdir(shelvedir)
520 except OSError as err:
521 except OSError as err:
521 if err.errno != errno.ENOENT:
522 if err.errno != errno.ENOENT:
522 raise
523 raise
523 return []
524 return []
524 info = []
525 info = []
525 for (name, _type) in names:
526 for (name, _type) in names:
526 pfx, sfx = name.rsplit('.', 1)
527 pfx, sfx = name.rsplit('.', 1)
527 if not pfx or sfx != patchextension:
528 if not pfx or sfx != patchextension:
528 continue
529 continue
529 st = shelvedfile(repo, name).stat()
530 st = shelvedfile(repo, name).stat()
530 info.append((st.st_mtime, shelvedfile(repo, pfx).filename()))
531 info.append((st.st_mtime, shelvedfile(repo, pfx).filename()))
531 return sorted(info, reverse=True)
532 return sorted(info, reverse=True)
532
533
533 def listcmd(ui, repo, pats, opts):
534 def listcmd(ui, repo, pats, opts):
534 """subcommand that displays the list of shelves"""
535 """subcommand that displays the list of shelves"""
535 pats = set(pats)
536 pats = set(pats)
536 width = 80
537 width = 80
537 if not ui.plain():
538 if not ui.plain():
538 width = ui.termwidth()
539 width = ui.termwidth()
539 namelabel = 'shelve.newest'
540 namelabel = 'shelve.newest'
540 ui.pager('shelve')
541 ui.pager('shelve')
541 for mtime, name in listshelves(repo):
542 for mtime, name in listshelves(repo):
542 sname = util.split(name)[1]
543 sname = util.split(name)[1]
543 if pats and sname not in pats:
544 if pats and sname not in pats:
544 continue
545 continue
545 ui.write(sname, label=namelabel)
546 ui.write(sname, label=namelabel)
546 namelabel = 'shelve.name'
547 namelabel = 'shelve.name'
547 if ui.quiet:
548 if ui.quiet:
548 ui.write('\n')
549 ui.write('\n')
549 continue
550 continue
550 ui.write(' ' * (16 - len(sname)))
551 ui.write(' ' * (16 - len(sname)))
551 used = 16
552 used = 16
552 age = '(%s)' % templatefilters.age(util.makedate(mtime), abbrev=True)
553 age = '(%s)' % templatefilters.age(util.makedate(mtime), abbrev=True)
553 ui.write(age, label='shelve.age')
554 ui.write(age, label='shelve.age')
554 ui.write(' ' * (12 - len(age)))
555 ui.write(' ' * (12 - len(age)))
555 used += 12
556 used += 12
556 with open(name + '.' + patchextension, 'rb') as fp:
557 with open(name + '.' + patchextension, 'rb') as fp:
557 while True:
558 while True:
558 line = fp.readline()
559 line = fp.readline()
559 if not line:
560 if not line:
560 break
561 break
561 if not line.startswith('#'):
562 if not line.startswith('#'):
562 desc = line.rstrip()
563 desc = line.rstrip()
563 if ui.formatted():
564 if ui.formatted():
564 desc = util.ellipsis(desc, width - used)
565 desc = util.ellipsis(desc, width - used)
565 ui.write(desc)
566 ui.write(desc)
566 break
567 break
567 ui.write('\n')
568 ui.write('\n')
568 if not (opts['patch'] or opts['stat']):
569 if not (opts['patch'] or opts['stat']):
569 continue
570 continue
570 difflines = fp.readlines()
571 difflines = fp.readlines()
571 if opts['patch']:
572 if opts['patch']:
572 for chunk, label in patch.difflabel(iter, difflines):
573 for chunk, label in patch.difflabel(iter, difflines):
573 ui.write(chunk, label=label)
574 ui.write(chunk, label=label)
574 if opts['stat']:
575 if opts['stat']:
575 for chunk, label in patch.diffstatui(difflines, width=width):
576 for chunk, label in patch.diffstatui(difflines, width=width):
576 ui.write(chunk, label=label)
577 ui.write(chunk, label=label)
577
578
578 def patchcmds(ui, repo, pats, opts, subcommand):
579 def patchcmds(ui, repo, pats, opts, subcommand):
579 """subcommand that displays shelves"""
580 """subcommand that displays shelves"""
580 if len(pats) == 0:
581 if len(pats) == 0:
581 raise error.Abort(_("--%s expects at least one shelf") % subcommand)
582 raise error.Abort(_("--%s expects at least one shelf") % subcommand)
582
583
583 for shelfname in pats:
584 for shelfname in pats:
584 if not shelvedfile(repo, shelfname, patchextension).exists():
585 if not shelvedfile(repo, shelfname, patchextension).exists():
585 raise error.Abort(_("cannot find shelf %s") % shelfname)
586 raise error.Abort(_("cannot find shelf %s") % shelfname)
586
587
587 listcmd(ui, repo, pats, opts)
588 listcmd(ui, repo, pats, opts)
588
589
589 def checkparents(repo, state):
590 def checkparents(repo, state):
590 """check parent while resuming an unshelve"""
591 """check parent while resuming an unshelve"""
591 if state.parents != repo.dirstate.parents():
592 if state.parents != repo.dirstate.parents():
592 raise error.Abort(_('working directory parents do not match unshelve '
593 raise error.Abort(_('working directory parents do not match unshelve '
593 'state'))
594 'state'))
594
595
595 def pathtofiles(repo, files):
596 def pathtofiles(repo, files):
596 cwd = repo.getcwd()
597 cwd = repo.getcwd()
597 return [repo.pathto(f, cwd) for f in files]
598 return [repo.pathto(f, cwd) for f in files]
598
599
599 def unshelveabort(ui, repo, state, opts):
600 def unshelveabort(ui, repo, state, opts):
600 """subcommand that abort an in-progress unshelve"""
601 """subcommand that abort an in-progress unshelve"""
601 with repo.lock():
602 with repo.lock():
602 try:
603 try:
603 checkparents(repo, state)
604 checkparents(repo, state)
604
605
605 repo.vfs.rename('unshelverebasestate', 'rebasestate')
606 repo.vfs.rename('unshelverebasestate', 'rebasestate')
606 try:
607 try:
607 rebase.rebase(ui, repo, **{
608 rebase.rebase(ui, repo, **{
608 'abort' : True
609 'abort' : True
609 })
610 })
610 except Exception:
611 except Exception:
611 repo.vfs.rename('rebasestate', 'unshelverebasestate')
612 repo.vfs.rename('rebasestate', 'unshelverebasestate')
612 raise
613 raise
613
614
614 mergefiles(ui, repo, state.wctx, state.pendingctx)
615 mergefiles(ui, repo, state.wctx, state.pendingctx)
615 repair.strip(ui, repo, state.nodestoremove, backup=False,
616 repair.strip(ui, repo, state.nodestoremove, backup=False,
616 topic='shelve')
617 topic='shelve')
617 finally:
618 finally:
618 shelvedstate.clear(repo)
619 shelvedstate.clear(repo)
619 ui.warn(_("unshelve of '%s' aborted\n") % state.name)
620 ui.warn(_("unshelve of '%s' aborted\n") % state.name)
620
621
621 def mergefiles(ui, repo, wctx, shelvectx):
622 def mergefiles(ui, repo, wctx, shelvectx):
622 """updates to wctx and merges the changes from shelvectx into the
623 """updates to wctx and merges the changes from shelvectx into the
623 dirstate."""
624 dirstate."""
624 with ui.configoverride({('ui', 'quiet'): True}):
625 with ui.configoverride({('ui', 'quiet'): True}):
625 hg.update(repo, wctx.node())
626 hg.update(repo, wctx.node())
626 files = []
627 files = []
627 files.extend(shelvectx.files())
628 files.extend(shelvectx.files())
628 files.extend(shelvectx.parents()[0].files())
629 files.extend(shelvectx.parents()[0].files())
629
630
630 # revert will overwrite unknown files, so move them out of the way
631 # revert will overwrite unknown files, so move them out of the way
631 for file in repo.status(unknown=True).unknown:
632 for file in repo.status(unknown=True).unknown:
632 if file in files:
633 if file in files:
633 util.rename(file, scmutil.origpath(ui, repo, file))
634 util.rename(file, scmutil.origpath(ui, repo, file))
634 ui.pushbuffer(True)
635 ui.pushbuffer(True)
635 cmdutil.revert(ui, repo, shelvectx, repo.dirstate.parents(),
636 cmdutil.revert(ui, repo, shelvectx, repo.dirstate.parents(),
636 *pathtofiles(repo, files),
637 *pathtofiles(repo, files),
637 **{'no_backup': True})
638 **{'no_backup': True})
638 ui.popbuffer()
639 ui.popbuffer()
639
640
640 def restorebranch(ui, repo, branchtorestore):
641 def restorebranch(ui, repo, branchtorestore):
641 if branchtorestore and branchtorestore != repo.dirstate.branch():
642 if branchtorestore and branchtorestore != repo.dirstate.branch():
642 repo.dirstate.setbranch(branchtorestore)
643 repo.dirstate.setbranch(branchtorestore)
643 ui.status(_('marked working directory as branch %s\n')
644 ui.status(_('marked working directory as branch %s\n')
644 % branchtorestore)
645 % branchtorestore)
645
646
646 def unshelvecleanup(ui, repo, name, opts):
647 def unshelvecleanup(ui, repo, name, opts):
647 """remove related files after an unshelve"""
648 """remove related files after an unshelve"""
648 if not opts.get('keep'):
649 if not opts.get('keep'):
649 for filetype in shelvefileextensions:
650 for filetype in shelvefileextensions:
650 shfile = shelvedfile(repo, name, filetype)
651 shfile = shelvedfile(repo, name, filetype)
651 if shfile.exists():
652 if shfile.exists():
652 shfile.movetobackup()
653 shfile.movetobackup()
653 cleanupoldbackups(repo)
654 cleanupoldbackups(repo)
654
655
655 def unshelvecontinue(ui, repo, state, opts):
656 def unshelvecontinue(ui, repo, state, opts):
656 """subcommand to continue an in-progress unshelve"""
657 """subcommand to continue an in-progress unshelve"""
657 # We're finishing off a merge. First parent is our original
658 # We're finishing off a merge. First parent is our original
658 # parent, second is the temporary "fake" commit we're unshelving.
659 # parent, second is the temporary "fake" commit we're unshelving.
659 with repo.lock():
660 with repo.lock():
660 checkparents(repo, state)
661 checkparents(repo, state)
661 ms = merge.mergestate.read(repo)
662 ms = merge.mergestate.read(repo)
662 if list(ms.unresolved()):
663 if list(ms.unresolved()):
663 raise error.Abort(
664 raise error.Abort(
664 _("unresolved conflicts, can't continue"),
665 _("unresolved conflicts, can't continue"),
665 hint=_("see 'hg resolve', then 'hg unshelve --continue'"))
666 hint=_("see 'hg resolve', then 'hg unshelve --continue'"))
666
667
667 repo.vfs.rename('unshelverebasestate', 'rebasestate')
668 repo.vfs.rename('unshelverebasestate', 'rebasestate')
668 try:
669 try:
669 rebase.rebase(ui, repo, **{
670 rebase.rebase(ui, repo, **{
670 'continue' : True
671 'continue' : True
671 })
672 })
672 except Exception:
673 except Exception:
673 repo.vfs.rename('rebasestate', 'unshelverebasestate')
674 repo.vfs.rename('rebasestate', 'unshelverebasestate')
674 raise
675 raise
675
676
676 shelvectx = repo['tip']
677 shelvectx = repo['tip']
677 if state.pendingctx not in shelvectx.parents():
678 if state.pendingctx not in shelvectx.parents():
678 # rebase was a no-op, so it produced no child commit
679 # rebase was a no-op, so it produced no child commit
679 shelvectx = state.pendingctx
680 shelvectx = state.pendingctx
680 else:
681 else:
681 # only strip the shelvectx if the rebase produced it
682 # only strip the shelvectx if the rebase produced it
682 state.nodestoremove.append(shelvectx.node())
683 state.nodestoremove.append(shelvectx.node())
683
684
684 mergefiles(ui, repo, state.wctx, shelvectx)
685 mergefiles(ui, repo, state.wctx, shelvectx)
685 restorebranch(ui, repo, state.branchtorestore)
686 restorebranch(ui, repo, state.branchtorestore)
686
687
687 repair.strip(ui, repo, state.nodestoremove, backup=False,
688 repair.strip(ui, repo, state.nodestoremove, backup=False,
688 topic='shelve')
689 topic='shelve')
689 _restoreactivebookmark(repo, state.activebookmark)
690 _restoreactivebookmark(repo, state.activebookmark)
690 shelvedstate.clear(repo)
691 shelvedstate.clear(repo)
691 unshelvecleanup(ui, repo, state.name, opts)
692 unshelvecleanup(ui, repo, state.name, opts)
692 ui.status(_("unshelve of '%s' complete\n") % state.name)
693 ui.status(_("unshelve of '%s' complete\n") % state.name)
693
694
694 def _commitworkingcopychanges(ui, repo, opts, tmpwctx):
695 def _commitworkingcopychanges(ui, repo, opts, tmpwctx):
695 """Temporarily commit working copy changes before moving unshelve commit"""
696 """Temporarily commit working copy changes before moving unshelve commit"""
696 # Store pending changes in a commit and remember added in case a shelve
697 # Store pending changes in a commit and remember added in case a shelve
697 # contains unknown files that are part of the pending change
698 # contains unknown files that are part of the pending change
698 s = repo.status()
699 s = repo.status()
699 addedbefore = frozenset(s.added)
700 addedbefore = frozenset(s.added)
700 if not (s.modified or s.added or s.removed):
701 if not (s.modified or s.added or s.removed):
701 return tmpwctx, addedbefore
702 return tmpwctx, addedbefore
702 ui.status(_("temporarily committing pending changes "
703 ui.status(_("temporarily committing pending changes "
703 "(restore with 'hg unshelve --abort')\n"))
704 "(restore with 'hg unshelve --abort')\n"))
704 commitfunc = getcommitfunc(extra=None, interactive=False,
705 commitfunc = getcommitfunc(extra=None, interactive=False,
705 editor=False)
706 editor=False)
706 tempopts = {}
707 tempopts = {}
707 tempopts['message'] = "pending changes temporary commit"
708 tempopts['message'] = "pending changes temporary commit"
708 tempopts['date'] = opts.get('date')
709 tempopts['date'] = opts.get('date')
709 with ui.configoverride({('ui', 'quiet'): True}):
710 with ui.configoverride({('ui', 'quiet'): True}):
710 node = cmdutil.commit(ui, repo, commitfunc, [], tempopts)
711 node = cmdutil.commit(ui, repo, commitfunc, [], tempopts)
711 tmpwctx = repo[node]
712 tmpwctx = repo[node]
712 return tmpwctx, addedbefore
713 return tmpwctx, addedbefore
713
714
714 def _unshelverestorecommit(ui, repo, basename):
715 def _unshelverestorecommit(ui, repo, basename):
715 """Recreate commit in the repository during the unshelve"""
716 """Recreate commit in the repository during the unshelve"""
716 with ui.configoverride({('ui', 'quiet'): True}):
717 with ui.configoverride({('ui', 'quiet'): True}):
717 shelvedfile(repo, basename, 'hg').applybundle()
718 shelvedfile(repo, basename, 'hg').applybundle()
718 shelvectx = repo['tip']
719 shelvectx = repo['tip']
719 return repo, shelvectx
720 return repo, shelvectx
720
721
721 def _rebaserestoredcommit(ui, repo, opts, tr, oldtiprev, basename, pctx,
722 def _rebaserestoredcommit(ui, repo, opts, tr, oldtiprev, basename, pctx,
722 tmpwctx, shelvectx, branchtorestore,
723 tmpwctx, shelvectx, branchtorestore,
723 activebookmark):
724 activebookmark):
724 """Rebase restored commit from its original location to a destination"""
725 """Rebase restored commit from its original location to a destination"""
725 # If the shelve is not immediately on top of the commit
726 # If the shelve is not immediately on top of the commit
726 # we'll be merging with, rebase it to be on top.
727 # we'll be merging with, rebase it to be on top.
727 if tmpwctx.node() == shelvectx.parents()[0].node():
728 if tmpwctx.node() == shelvectx.parents()[0].node():
728 return shelvectx
729 return shelvectx
729
730
730 ui.status(_('rebasing shelved changes\n'))
731 ui.status(_('rebasing shelved changes\n'))
731 try:
732 try:
732 rebase.rebase(ui, repo, **{
733 rebase.rebase(ui, repo, **{
733 'rev': [shelvectx.rev()],
734 'rev': [shelvectx.rev()],
734 'dest': str(tmpwctx.rev()),
735 'dest': str(tmpwctx.rev()),
735 'keep': True,
736 'keep': True,
736 'tool': opts.get('tool', ''),
737 'tool': opts.get('tool', ''),
737 })
738 })
738 except error.InterventionRequired:
739 except error.InterventionRequired:
739 tr.close()
740 tr.close()
740
741
741 nodestoremove = [repo.changelog.node(rev)
742 nodestoremove = [repo.changelog.node(rev)
742 for rev in xrange(oldtiprev, len(repo))]
743 for rev in xrange(oldtiprev, len(repo))]
743 shelvedstate.save(repo, basename, pctx, tmpwctx, nodestoremove,
744 shelvedstate.save(repo, basename, pctx, tmpwctx, nodestoremove,
744 branchtorestore, opts.get('keep'), activebookmark)
745 branchtorestore, opts.get('keep'), activebookmark)
745
746
746 repo.vfs.rename('rebasestate', 'unshelverebasestate')
747 repo.vfs.rename('rebasestate', 'unshelverebasestate')
747 raise error.InterventionRequired(
748 raise error.InterventionRequired(
748 _("unresolved conflicts (see 'hg resolve', then "
749 _("unresolved conflicts (see 'hg resolve', then "
749 "'hg unshelve --continue')"))
750 "'hg unshelve --continue')"))
750
751
751 # refresh ctx after rebase completes
752 # refresh ctx after rebase completes
752 shelvectx = repo['tip']
753 shelvectx = repo['tip']
753
754
754 if tmpwctx not in shelvectx.parents():
755 if tmpwctx not in shelvectx.parents():
755 # rebase was a no-op, so it produced no child commit
756 # rebase was a no-op, so it produced no child commit
756 shelvectx = tmpwctx
757 shelvectx = tmpwctx
757 return shelvectx
758 return shelvectx
758
759
759 def _forgetunknownfiles(repo, shelvectx, addedbefore):
760 def _forgetunknownfiles(repo, shelvectx, addedbefore):
760 # Forget any files that were unknown before the shelve, unknown before
761 # Forget any files that were unknown before the shelve, unknown before
761 # unshelve started, but are now added.
762 # unshelve started, but are now added.
762 shelveunknown = shelvectx.extra().get('shelve_unknown')
763 shelveunknown = shelvectx.extra().get('shelve_unknown')
763 if not shelveunknown:
764 if not shelveunknown:
764 return
765 return
765 shelveunknown = frozenset(shelveunknown.split('\0'))
766 shelveunknown = frozenset(shelveunknown.split('\0'))
766 addedafter = frozenset(repo.status().added)
767 addedafter = frozenset(repo.status().added)
767 toforget = (addedafter & shelveunknown) - addedbefore
768 toforget = (addedafter & shelveunknown) - addedbefore
768 repo[None].forget(toforget)
769 repo[None].forget(toforget)
769
770
770 def _finishunshelve(repo, oldtiprev, tr, activebookmark):
771 def _finishunshelve(repo, oldtiprev, tr, activebookmark):
771 _restoreactivebookmark(repo, activebookmark)
772 _restoreactivebookmark(repo, activebookmark)
772 # The transaction aborting will strip all the commits for us,
773 # The transaction aborting will strip all the commits for us,
773 # but it doesn't update the inmemory structures, so addchangegroup
774 # but it doesn't update the inmemory structures, so addchangegroup
774 # hooks still fire and try to operate on the missing commits.
775 # hooks still fire and try to operate on the missing commits.
775 # Clean up manually to prevent this.
776 # Clean up manually to prevent this.
776 repo.unfiltered().changelog.strip(oldtiprev, tr)
777 repo.unfiltered().changelog.strip(oldtiprev, tr)
777 _aborttransaction(repo)
778 _aborttransaction(repo)
778
779
779 def _checkunshelveuntrackedproblems(ui, repo, shelvectx):
780 def _checkunshelveuntrackedproblems(ui, repo, shelvectx):
780 """Check potential problems which may result from working
781 """Check potential problems which may result from working
781 copy having untracked changes."""
782 copy having untracked changes."""
782 wcdeleted = set(repo.status().deleted)
783 wcdeleted = set(repo.status().deleted)
783 shelvetouched = set(shelvectx.files())
784 shelvetouched = set(shelvectx.files())
784 intersection = wcdeleted.intersection(shelvetouched)
785 intersection = wcdeleted.intersection(shelvetouched)
785 if intersection:
786 if intersection:
786 m = _("shelved change touches missing files")
787 m = _("shelved change touches missing files")
787 hint = _("run hg status to see which files are missing")
788 hint = _("run hg status to see which files are missing")
788 raise error.Abort(m, hint=hint)
789 raise error.Abort(m, hint=hint)
789
790
790 @command('unshelve',
791 @command('unshelve',
791 [('a', 'abort', None,
792 [('a', 'abort', None,
792 _('abort an incomplete unshelve operation')),
793 _('abort an incomplete unshelve operation')),
793 ('c', 'continue', None,
794 ('c', 'continue', None,
794 _('continue an incomplete unshelve operation')),
795 _('continue an incomplete unshelve operation')),
795 ('k', 'keep', None,
796 ('k', 'keep', None,
796 _('keep shelve after unshelving')),
797 _('keep shelve after unshelving')),
797 ('n', 'name', '',
798 ('n', 'name', '',
798 _('restore shelved change with given name'), _('NAME')),
799 _('restore shelved change with given name'), _('NAME')),
799 ('t', 'tool', '', _('specify merge tool')),
800 ('t', 'tool', '', _('specify merge tool')),
800 ('', 'date', '',
801 ('', 'date', '',
801 _('set date for temporary commits (DEPRECATED)'), _('DATE'))],
802 _('set date for temporary commits (DEPRECATED)'), _('DATE'))],
802 _('hg unshelve [[-n] SHELVED]'))
803 _('hg unshelve [[-n] SHELVED]'))
803 def unshelve(ui, repo, *shelved, **opts):
804 def unshelve(ui, repo, *shelved, **opts):
804 """restore a shelved change to the working directory
805 """restore a shelved change to the working directory
805
806
806 This command accepts an optional name of a shelved change to
807 This command accepts an optional name of a shelved change to
807 restore. If none is given, the most recent shelved change is used.
808 restore. If none is given, the most recent shelved change is used.
808
809
809 If a shelved change is applied successfully, the bundle that
810 If a shelved change is applied successfully, the bundle that
810 contains the shelved changes is moved to a backup location
811 contains the shelved changes is moved to a backup location
811 (.hg/shelve-backup).
812 (.hg/shelve-backup).
812
813
813 Since you can restore a shelved change on top of an arbitrary
814 Since you can restore a shelved change on top of an arbitrary
814 commit, it is possible that unshelving will result in a conflict
815 commit, it is possible that unshelving will result in a conflict
815 between your changes and the commits you are unshelving onto. If
816 between your changes and the commits you are unshelving onto. If
816 this occurs, you must resolve the conflict, then use
817 this occurs, you must resolve the conflict, then use
817 ``--continue`` to complete the unshelve operation. (The bundle
818 ``--continue`` to complete the unshelve operation. (The bundle
818 will not be moved until you successfully complete the unshelve.)
819 will not be moved until you successfully complete the unshelve.)
819
820
820 (Alternatively, you can use ``--abort`` to abandon an unshelve
821 (Alternatively, you can use ``--abort`` to abandon an unshelve
821 that causes a conflict. This reverts the unshelved changes, and
822 that causes a conflict. This reverts the unshelved changes, and
822 leaves the bundle in place.)
823 leaves the bundle in place.)
823
824
824 If bare shelved change(when no files are specified, without interactive,
825 If bare shelved change(when no files are specified, without interactive,
825 include and exclude option) was done on newly created branch it would
826 include and exclude option) was done on newly created branch it would
826 restore branch information to the working directory.
827 restore branch information to the working directory.
827
828
828 After a successful unshelve, the shelved changes are stored in a
829 After a successful unshelve, the shelved changes are stored in a
829 backup directory. Only the N most recent backups are kept. N
830 backup directory. Only the N most recent backups are kept. N
830 defaults to 10 but can be overridden using the ``shelve.maxbackups``
831 defaults to 10 but can be overridden using the ``shelve.maxbackups``
831 configuration option.
832 configuration option.
832
833
833 .. container:: verbose
834 .. container:: verbose
834
835
835 Timestamp in seconds is used to decide order of backups. More
836 Timestamp in seconds is used to decide order of backups. More
836 than ``maxbackups`` backups are kept, if same timestamp
837 than ``maxbackups`` backups are kept, if same timestamp
837 prevents from deciding exact order of them, for safety.
838 prevents from deciding exact order of them, for safety.
838 """
839 """
839 with repo.wlock():
840 with repo.wlock():
840 return _dounshelve(ui, repo, *shelved, **opts)
841 return _dounshelve(ui, repo, *shelved, **opts)
841
842
842 def _dounshelve(ui, repo, *shelved, **opts):
843 def _dounshelve(ui, repo, *shelved, **opts):
843 abortf = opts.get('abort')
844 abortf = opts.get('abort')
844 continuef = opts.get('continue')
845 continuef = opts.get('continue')
845 if not abortf and not continuef:
846 if not abortf and not continuef:
846 cmdutil.checkunfinished(repo)
847 cmdutil.checkunfinished(repo)
847 shelved = list(shelved)
848 shelved = list(shelved)
848 if opts.get("name"):
849 if opts.get("name"):
849 shelved.append(opts["name"])
850 shelved.append(opts["name"])
850
851
851 if abortf or continuef:
852 if abortf or continuef:
852 if abortf and continuef:
853 if abortf and continuef:
853 raise error.Abort(_('cannot use both abort and continue'))
854 raise error.Abort(_('cannot use both abort and continue'))
854 if shelved:
855 if shelved:
855 raise error.Abort(_('cannot combine abort/continue with '
856 raise error.Abort(_('cannot combine abort/continue with '
856 'naming a shelved change'))
857 'naming a shelved change'))
857 if abortf and opts.get('tool', False):
858 if abortf and opts.get('tool', False):
858 ui.warn(_('tool option will be ignored\n'))
859 ui.warn(_('tool option will be ignored\n'))
859
860
860 try:
861 try:
861 state = shelvedstate.load(repo)
862 state = shelvedstate.load(repo)
862 if opts.get('keep') is None:
863 if opts.get('keep') is None:
863 opts['keep'] = state.keep
864 opts['keep'] = state.keep
864 except IOError as err:
865 except IOError as err:
865 if err.errno != errno.ENOENT:
866 if err.errno != errno.ENOENT:
866 raise
867 raise
867 cmdutil.wrongtooltocontinue(repo, _('unshelve'))
868 cmdutil.wrongtooltocontinue(repo, _('unshelve'))
868 except error.CorruptedState as err:
869 except error.CorruptedState as err:
869 ui.debug(str(err) + '\n')
870 ui.debug(str(err) + '\n')
870 if continuef:
871 if continuef:
871 msg = _('corrupted shelved state file')
872 msg = _('corrupted shelved state file')
872 hint = _('please run hg unshelve --abort to abort unshelve '
873 hint = _('please run hg unshelve --abort to abort unshelve '
873 'operation')
874 'operation')
874 raise error.Abort(msg, hint=hint)
875 raise error.Abort(msg, hint=hint)
875 elif abortf:
876 elif abortf:
876 msg = _('could not read shelved state file, your working copy '
877 msg = _('could not read shelved state file, your working copy '
877 'may be in an unexpected state\nplease update to some '
878 'may be in an unexpected state\nplease update to some '
878 'commit\n')
879 'commit\n')
879 ui.warn(msg)
880 ui.warn(msg)
880 shelvedstate.clear(repo)
881 shelvedstate.clear(repo)
881 return
882 return
882
883
883 if abortf:
884 if abortf:
884 return unshelveabort(ui, repo, state, opts)
885 return unshelveabort(ui, repo, state, opts)
885 elif continuef:
886 elif continuef:
886 return unshelvecontinue(ui, repo, state, opts)
887 return unshelvecontinue(ui, repo, state, opts)
887 elif len(shelved) > 1:
888 elif len(shelved) > 1:
888 raise error.Abort(_('can only unshelve one change at a time'))
889 raise error.Abort(_('can only unshelve one change at a time'))
889 elif not shelved:
890 elif not shelved:
890 shelved = listshelves(repo)
891 shelved = listshelves(repo)
891 if not shelved:
892 if not shelved:
892 raise error.Abort(_('no shelved changes to apply!'))
893 raise error.Abort(_('no shelved changes to apply!'))
893 basename = util.split(shelved[0][1])[1]
894 basename = util.split(shelved[0][1])[1]
894 ui.status(_("unshelving change '%s'\n") % basename)
895 ui.status(_("unshelving change '%s'\n") % basename)
895 else:
896 else:
896 basename = shelved[0]
897 basename = shelved[0]
897
898
898 if not shelvedfile(repo, basename, patchextension).exists():
899 if not shelvedfile(repo, basename, patchextension).exists():
899 raise error.Abort(_("shelved change '%s' not found") % basename)
900 raise error.Abort(_("shelved change '%s' not found") % basename)
900
901
901 lock = tr = None
902 lock = tr = None
902 try:
903 try:
903 lock = repo.lock()
904 lock = repo.lock()
904 tr = repo.transaction('unshelve', report=lambda x: None)
905 tr = repo.transaction('unshelve', report=lambda x: None)
905 oldtiprev = len(repo)
906 oldtiprev = len(repo)
906
907
907 pctx = repo['.']
908 pctx = repo['.']
908 tmpwctx = pctx
909 tmpwctx = pctx
909 # The goal is to have a commit structure like so:
910 # The goal is to have a commit structure like so:
910 # ...-> pctx -> tmpwctx -> shelvectx
911 # ...-> pctx -> tmpwctx -> shelvectx
911 # where tmpwctx is an optional commit with the user's pending changes
912 # where tmpwctx is an optional commit with the user's pending changes
912 # and shelvectx is the unshelved changes. Then we merge it all down
913 # and shelvectx is the unshelved changes. Then we merge it all down
913 # to the original pctx.
914 # to the original pctx.
914
915
915 activebookmark = _backupactivebookmark(repo)
916 activebookmark = _backupactivebookmark(repo)
916 overrides = {('ui', 'forcemerge'): opts.get('tool', '')}
917 overrides = {('ui', 'forcemerge'): opts.get('tool', '')}
917 with ui.configoverride(overrides, 'unshelve'):
918 with ui.configoverride(overrides, 'unshelve'):
918 tmpwctx, addedbefore = _commitworkingcopychanges(ui, repo, opts,
919 tmpwctx, addedbefore = _commitworkingcopychanges(ui, repo, opts,
919 tmpwctx)
920 tmpwctx)
920 repo, shelvectx = _unshelverestorecommit(ui, repo, basename)
921 repo, shelvectx = _unshelverestorecommit(ui, repo, basename)
921 _checkunshelveuntrackedproblems(ui, repo, shelvectx)
922 _checkunshelveuntrackedproblems(ui, repo, shelvectx)
922 branchtorestore = ''
923 branchtorestore = ''
923 if shelvectx.branch() != shelvectx.p1().branch():
924 if shelvectx.branch() != shelvectx.p1().branch():
924 branchtorestore = shelvectx.branch()
925 branchtorestore = shelvectx.branch()
925
926
926 shelvectx = _rebaserestoredcommit(ui, repo, opts, tr, oldtiprev,
927 shelvectx = _rebaserestoredcommit(ui, repo, opts, tr, oldtiprev,
927 basename, pctx, tmpwctx,
928 basename, pctx, tmpwctx,
928 shelvectx, branchtorestore,
929 shelvectx, branchtorestore,
929 activebookmark)
930 activebookmark)
930 mergefiles(ui, repo, pctx, shelvectx)
931 mergefiles(ui, repo, pctx, shelvectx)
931 restorebranch(ui, repo, branchtorestore)
932 restorebranch(ui, repo, branchtorestore)
932 _forgetunknownfiles(repo, shelvectx, addedbefore)
933 _forgetunknownfiles(repo, shelvectx, addedbefore)
933
934
934 shelvedstate.clear(repo)
935 shelvedstate.clear(repo)
935 _finishunshelve(repo, oldtiprev, tr, activebookmark)
936 _finishunshelve(repo, oldtiprev, tr, activebookmark)
936 unshelvecleanup(ui, repo, basename, opts)
937 unshelvecleanup(ui, repo, basename, opts)
937 finally:
938 finally:
938 if tr:
939 if tr:
939 tr.release()
940 tr.release()
940 lockmod.release(lock)
941 lockmod.release(lock)
941
942
942 @command('shelve',
943 @command('shelve',
943 [('A', 'addremove', None,
944 [('A', 'addremove', None,
944 _('mark new/missing files as added/removed before shelving')),
945 _('mark new/missing files as added/removed before shelving')),
945 ('u', 'unknown', None,
946 ('u', 'unknown', None,
946 _('store unknown files in the shelve')),
947 _('store unknown files in the shelve')),
947 ('', 'cleanup', None,
948 ('', 'cleanup', None,
948 _('delete all shelved changes')),
949 _('delete all shelved changes')),
949 ('', 'date', '',
950 ('', 'date', '',
950 _('shelve with the specified commit date'), _('DATE')),
951 _('shelve with the specified commit date'), _('DATE')),
951 ('d', 'delete', None,
952 ('d', 'delete', None,
952 _('delete the named shelved change(s)')),
953 _('delete the named shelved change(s)')),
953 ('e', 'edit', False,
954 ('e', 'edit', False,
954 _('invoke editor on commit messages')),
955 _('invoke editor on commit messages')),
955 ('l', 'list', None,
956 ('l', 'list', None,
956 _('list current shelves')),
957 _('list current shelves')),
957 ('m', 'message', '',
958 ('m', 'message', '',
958 _('use text as shelve message'), _('TEXT')),
959 _('use text as shelve message'), _('TEXT')),
959 ('n', 'name', '',
960 ('n', 'name', '',
960 _('use the given name for the shelved commit'), _('NAME')),
961 _('use the given name for the shelved commit'), _('NAME')),
961 ('p', 'patch', None,
962 ('p', 'patch', None,
962 _('show patch')),
963 _('show patch')),
963 ('i', 'interactive', None,
964 ('i', 'interactive', None,
964 _('interactive mode, only works while creating a shelve')),
965 _('interactive mode, only works while creating a shelve')),
965 ('', 'stat', None,
966 ('', 'stat', None,
966 _('output diffstat-style summary of changes'))] + cmdutil.walkopts,
967 _('output diffstat-style summary of changes'))] + cmdutil.walkopts,
967 _('hg shelve [OPTION]... [FILE]...'))
968 _('hg shelve [OPTION]... [FILE]...'))
968 def shelvecmd(ui, repo, *pats, **opts):
969 def shelvecmd(ui, repo, *pats, **opts):
969 '''save and set aside changes from the working directory
970 '''save and set aside changes from the working directory
970
971
971 Shelving takes files that "hg status" reports as not clean, saves
972 Shelving takes files that "hg status" reports as not clean, saves
972 the modifications to a bundle (a shelved change), and reverts the
973 the modifications to a bundle (a shelved change), and reverts the
973 files so that their state in the working directory becomes clean.
974 files so that their state in the working directory becomes clean.
974
975
975 To restore these changes to the working directory, using "hg
976 To restore these changes to the working directory, using "hg
976 unshelve"; this will work even if you switch to a different
977 unshelve"; this will work even if you switch to a different
977 commit.
978 commit.
978
979
979 When no files are specified, "hg shelve" saves all not-clean
980 When no files are specified, "hg shelve" saves all not-clean
980 files. If specific files or directories are named, only changes to
981 files. If specific files or directories are named, only changes to
981 those files are shelved.
982 those files are shelved.
982
983
983 In bare shelve (when no files are specified, without interactive,
984 In bare shelve (when no files are specified, without interactive,
984 include and exclude option), shelving remembers information if the
985 include and exclude option), shelving remembers information if the
985 working directory was on newly created branch, in other words working
986 working directory was on newly created branch, in other words working
986 directory was on different branch than its first parent. In this
987 directory was on different branch than its first parent. In this
987 situation unshelving restores branch information to the working directory.
988 situation unshelving restores branch information to the working directory.
988
989
989 Each shelved change has a name that makes it easier to find later.
990 Each shelved change has a name that makes it easier to find later.
990 The name of a shelved change defaults to being based on the active
991 The name of a shelved change defaults to being based on the active
991 bookmark, or if there is no active bookmark, the current named
992 bookmark, or if there is no active bookmark, the current named
992 branch. To specify a different name, use ``--name``.
993 branch. To specify a different name, use ``--name``.
993
994
994 To see a list of existing shelved changes, use the ``--list``
995 To see a list of existing shelved changes, use the ``--list``
995 option. For each shelved change, this will print its name, age,
996 option. For each shelved change, this will print its name, age,
996 and description; use ``--patch`` or ``--stat`` for more details.
997 and description; use ``--patch`` or ``--stat`` for more details.
997
998
998 To delete specific shelved changes, use ``--delete``. To delete
999 To delete specific shelved changes, use ``--delete``. To delete
999 all shelved changes, use ``--cleanup``.
1000 all shelved changes, use ``--cleanup``.
1000 '''
1001 '''
1001 allowables = [
1002 allowables = [
1002 ('addremove', {'create'}), # 'create' is pseudo action
1003 ('addremove', {'create'}), # 'create' is pseudo action
1003 ('unknown', {'create'}),
1004 ('unknown', {'create'}),
1004 ('cleanup', {'cleanup'}),
1005 ('cleanup', {'cleanup'}),
1005 # ('date', {'create'}), # ignored for passing '--date "0 0"' in tests
1006 # ('date', {'create'}), # ignored for passing '--date "0 0"' in tests
1006 ('delete', {'delete'}),
1007 ('delete', {'delete'}),
1007 ('edit', {'create'}),
1008 ('edit', {'create'}),
1008 ('list', {'list'}),
1009 ('list', {'list'}),
1009 ('message', {'create'}),
1010 ('message', {'create'}),
1010 ('name', {'create'}),
1011 ('name', {'create'}),
1011 ('patch', {'patch', 'list'}),
1012 ('patch', {'patch', 'list'}),
1012 ('stat', {'stat', 'list'}),
1013 ('stat', {'stat', 'list'}),
1013 ]
1014 ]
1014 def checkopt(opt):
1015 def checkopt(opt):
1015 if opts.get(opt):
1016 if opts.get(opt):
1016 for i, allowable in allowables:
1017 for i, allowable in allowables:
1017 if opts[i] and opt not in allowable:
1018 if opts[i] and opt not in allowable:
1018 raise error.Abort(_("options '--%s' and '--%s' may not be "
1019 raise error.Abort(_("options '--%s' and '--%s' may not be "
1019 "used together") % (opt, i))
1020 "used together") % (opt, i))
1020 return True
1021 return True
1021 if checkopt('cleanup'):
1022 if checkopt('cleanup'):
1022 if pats:
1023 if pats:
1023 raise error.Abort(_("cannot specify names when using '--cleanup'"))
1024 raise error.Abort(_("cannot specify names when using '--cleanup'"))
1024 return cleanupcmd(ui, repo)
1025 return cleanupcmd(ui, repo)
1025 elif checkopt('delete'):
1026 elif checkopt('delete'):
1026 return deletecmd(ui, repo, pats)
1027 return deletecmd(ui, repo, pats)
1027 elif checkopt('list'):
1028 elif checkopt('list'):
1028 return listcmd(ui, repo, pats, opts)
1029 return listcmd(ui, repo, pats, opts)
1029 elif checkopt('patch'):
1030 elif checkopt('patch'):
1030 return patchcmds(ui, repo, pats, opts, subcommand='patch')
1031 return patchcmds(ui, repo, pats, opts, subcommand='patch')
1031 elif checkopt('stat'):
1032 elif checkopt('stat'):
1032 return patchcmds(ui, repo, pats, opts, subcommand='stat')
1033 return patchcmds(ui, repo, pats, opts, subcommand='stat')
1033 else:
1034 else:
1034 return createcmd(ui, repo, pats, opts)
1035 return createcmd(ui, repo, pats, opts)
1035
1036
1036 def extsetup(ui):
1037 def extsetup(ui):
1037 cmdutil.unfinishedstates.append(
1038 cmdutil.unfinishedstates.append(
1038 [shelvedstate._filename, False, False,
1039 [shelvedstate._filename, False, False,
1039 _('unshelve already in progress'),
1040 _('unshelve already in progress'),
1040 _("use 'hg unshelve --continue' or 'hg unshelve --abort'")])
1041 _("use 'hg unshelve --continue' or 'hg unshelve --abort'")])
1041 cmdutil.afterresolvedstates.append(
1042 cmdutil.afterresolvedstates.append(
1042 [shelvedstate._filename, _('hg unshelve --continue')])
1043 [shelvedstate._filename, _('hg unshelve --continue')])
@@ -1,1350 +1,1343 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import nullid
17 from .node import nullid
18 from . import (
18 from . import (
19 encoding,
19 encoding,
20 error,
20 error,
21 match as matchmod,
21 match as matchmod,
22 pathutil,
22 pathutil,
23 policy,
23 policy,
24 pycompat,
24 pycompat,
25 scmutil,
25 scmutil,
26 txnutil,
26 txnutil,
27 util,
27 util,
28 )
28 )
29
29
30 parsers = policy.importmod(r'parsers')
30 parsers = policy.importmod(r'parsers')
31
31
32 propertycache = util.propertycache
32 propertycache = util.propertycache
33 filecache = scmutil.filecache
33 filecache = scmutil.filecache
34 _rangemask = 0x7fffffff
34 _rangemask = 0x7fffffff
35
35
36 dirstatetuple = parsers.dirstatetuple
36 dirstatetuple = parsers.dirstatetuple
37
37
38 class repocache(filecache):
38 class repocache(filecache):
39 """filecache for files in .hg/"""
39 """filecache for files in .hg/"""
40 def join(self, obj, fname):
40 def join(self, obj, fname):
41 return obj._opener.join(fname)
41 return obj._opener.join(fname)
42
42
43 class rootcache(filecache):
43 class rootcache(filecache):
44 """filecache for files in the repository root"""
44 """filecache for files in the repository root"""
45 def join(self, obj, fname):
45 def join(self, obj, fname):
46 return obj._join(fname)
46 return obj._join(fname)
47
47
48 def _getfsnow(vfs):
48 def _getfsnow(vfs):
49 '''Get "now" timestamp on filesystem'''
49 '''Get "now" timestamp on filesystem'''
50 tmpfd, tmpname = vfs.mkstemp()
50 tmpfd, tmpname = vfs.mkstemp()
51 try:
51 try:
52 return os.fstat(tmpfd).st_mtime
52 return os.fstat(tmpfd).st_mtime
53 finally:
53 finally:
54 os.close(tmpfd)
54 os.close(tmpfd)
55 vfs.unlink(tmpname)
55 vfs.unlink(tmpname)
56
56
57 def nonnormalentries(dmap):
57 def nonnormalentries(dmap):
58 '''Compute the nonnormal dirstate entries from the dmap'''
58 '''Compute the nonnormal dirstate entries from the dmap'''
59 try:
59 try:
60 return parsers.nonnormalotherparententries(dmap)
60 return parsers.nonnormalotherparententries(dmap)
61 except AttributeError:
61 except AttributeError:
62 nonnorm = set()
62 nonnorm = set()
63 otherparent = set()
63 otherparent = set()
64 for fname, e in dmap.iteritems():
64 for fname, e in dmap.iteritems():
65 if e[0] != 'n' or e[3] == -1:
65 if e[0] != 'n' or e[3] == -1:
66 nonnorm.add(fname)
66 nonnorm.add(fname)
67 if e[0] == 'n' and e[2] == -2:
67 if e[0] == 'n' and e[2] == -2:
68 otherparent.add(fname)
68 otherparent.add(fname)
69 return nonnorm, otherparent
69 return nonnorm, otherparent
70
70
71 class dirstate(object):
71 class dirstate(object):
72
72
73 def __init__(self, opener, ui, root, validate, sparsematchfn):
73 def __init__(self, opener, ui, root, validate, sparsematchfn):
74 '''Create a new dirstate object.
74 '''Create a new dirstate object.
75
75
76 opener is an open()-like callable that can be used to open the
76 opener is an open()-like callable that can be used to open the
77 dirstate file; root is the root of the directory tracked by
77 dirstate file; root is the root of the directory tracked by
78 the dirstate.
78 the dirstate.
79 '''
79 '''
80 self._opener = opener
80 self._opener = opener
81 self._validate = validate
81 self._validate = validate
82 self._root = root
82 self._root = root
83 self._sparsematchfn = sparsematchfn
83 self._sparsematchfn = sparsematchfn
84 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
84 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
85 # UNC path pointing to root share (issue4557)
85 # UNC path pointing to root share (issue4557)
86 self._rootdir = pathutil.normasprefix(root)
86 self._rootdir = pathutil.normasprefix(root)
87 self._dirty = False
87 self._dirty = False
88 self._dirtypl = False
88 self._dirtypl = False
89 self._lastnormaltime = 0
89 self._lastnormaltime = 0
90 self._ui = ui
90 self._ui = ui
91 self._filecache = {}
91 self._filecache = {}
92 self._parentwriters = 0
92 self._parentwriters = 0
93 self._filename = 'dirstate'
93 self._filename = 'dirstate'
94 self._pendingfilename = '%s.pending' % self._filename
94 self._pendingfilename = '%s.pending' % self._filename
95 self._plchangecallbacks = {}
95 self._plchangecallbacks = {}
96 self._origpl = None
96 self._origpl = None
97 self._updatedfiles = set()
97 self._updatedfiles = set()
98
98
99 # for consistent view between _pl() and _read() invocations
99 # for consistent view between _pl() and _read() invocations
100 self._pendingmode = None
100 self._pendingmode = None
101
101
102 @contextlib.contextmanager
102 @contextlib.contextmanager
103 def parentchange(self):
103 def parentchange(self):
104 '''Context manager for handling dirstate parents.
104 '''Context manager for handling dirstate parents.
105
105
106 If an exception occurs in the scope of the context manager,
106 If an exception occurs in the scope of the context manager,
107 the incoherent dirstate won't be written when wlock is
107 the incoherent dirstate won't be written when wlock is
108 released.
108 released.
109 '''
109 '''
110 self._parentwriters += 1
110 self._parentwriters += 1
111 yield
111 yield
112 # Typically we want the "undo" step of a context manager in a
112 # Typically we want the "undo" step of a context manager in a
113 # finally block so it happens even when an exception
113 # finally block so it happens even when an exception
114 # occurs. In this case, however, we only want to decrement
114 # occurs. In this case, however, we only want to decrement
115 # parentwriters if the code in the with statement exits
115 # parentwriters if the code in the with statement exits
116 # normally, so we don't have a try/finally here on purpose.
116 # normally, so we don't have a try/finally here on purpose.
117 self._parentwriters -= 1
117 self._parentwriters -= 1
118
118
119 def beginparentchange(self):
119 def beginparentchange(self):
120 '''Marks the beginning of a set of changes that involve changing
120 '''Marks the beginning of a set of changes that involve changing
121 the dirstate parents. If there is an exception during this time,
121 the dirstate parents. If there is an exception during this time,
122 the dirstate will not be written when the wlock is released. This
122 the dirstate will not be written when the wlock is released. This
123 prevents writing an incoherent dirstate where the parent doesn't
123 prevents writing an incoherent dirstate where the parent doesn't
124 match the contents.
124 match the contents.
125 '''
125 '''
126 self._ui.deprecwarn('beginparentchange is obsoleted by the '
126 self._ui.deprecwarn('beginparentchange is obsoleted by the '
127 'parentchange context manager.', '4.3')
127 'parentchange context manager.', '4.3')
128 self._parentwriters += 1
128 self._parentwriters += 1
129
129
130 def endparentchange(self):
130 def endparentchange(self):
131 '''Marks the end of a set of changes that involve changing the
131 '''Marks the end of a set of changes that involve changing the
132 dirstate parents. Once all parent changes have been marked done,
132 dirstate parents. Once all parent changes have been marked done,
133 the wlock will be free to write the dirstate on release.
133 the wlock will be free to write the dirstate on release.
134 '''
134 '''
135 self._ui.deprecwarn('endparentchange is obsoleted by the '
135 self._ui.deprecwarn('endparentchange is obsoleted by the '
136 'parentchange context manager.', '4.3')
136 'parentchange context manager.', '4.3')
137 if self._parentwriters > 0:
137 if self._parentwriters > 0:
138 self._parentwriters -= 1
138 self._parentwriters -= 1
139
139
140 def pendingparentchange(self):
140 def pendingparentchange(self):
141 '''Returns true if the dirstate is in the middle of a set of changes
141 '''Returns true if the dirstate is in the middle of a set of changes
142 that modify the dirstate parent.
142 that modify the dirstate parent.
143 '''
143 '''
144 return self._parentwriters > 0
144 return self._parentwriters > 0
145
145
146 @propertycache
146 @propertycache
147 def _map(self):
147 def _map(self):
148 '''Return the dirstate contents as a map from filename to
148 '''Return the dirstate contents as a map from filename to
149 (state, mode, size, time).'''
149 (state, mode, size, time).'''
150 self._read()
150 self._read()
151 return self._map
151 return self._map
152
152
153 @propertycache
153 @propertycache
154 def _copymap(self):
154 def _copymap(self):
155 self._read()
155 self._read()
156 return self._copymap
156 return self._copymap
157
157
158 @propertycache
158 @propertycache
159 def _identity(self):
159 def _identity(self):
160 self._read()
160 self._read()
161 return self._identity
161 return self._identity
162
162
163 @propertycache
163 @propertycache
164 def _nonnormalset(self):
164 def _nonnormalset(self):
165 nonnorm, otherparents = nonnormalentries(self._map)
165 nonnorm, otherparents = nonnormalentries(self._map)
166 self._otherparentset = otherparents
166 self._otherparentset = otherparents
167 return nonnorm
167 return nonnorm
168
168
169 @propertycache
169 @propertycache
170 def _otherparentset(self):
170 def _otherparentset(self):
171 nonnorm, otherparents = nonnormalentries(self._map)
171 nonnorm, otherparents = nonnormalentries(self._map)
172 self._nonnormalset = nonnorm
172 self._nonnormalset = nonnorm
173 return otherparents
173 return otherparents
174
174
175 @propertycache
175 @propertycache
176 def _filefoldmap(self):
176 def _filefoldmap(self):
177 try:
177 try:
178 makefilefoldmap = parsers.make_file_foldmap
178 makefilefoldmap = parsers.make_file_foldmap
179 except AttributeError:
179 except AttributeError:
180 pass
180 pass
181 else:
181 else:
182 return makefilefoldmap(self._map, util.normcasespec,
182 return makefilefoldmap(self._map, util.normcasespec,
183 util.normcasefallback)
183 util.normcasefallback)
184
184
185 f = {}
185 f = {}
186 normcase = util.normcase
186 normcase = util.normcase
187 for name, s in self._map.iteritems():
187 for name, s in self._map.iteritems():
188 if s[0] != 'r':
188 if s[0] != 'r':
189 f[normcase(name)] = name
189 f[normcase(name)] = name
190 f['.'] = '.' # prevents useless util.fspath() invocation
190 f['.'] = '.' # prevents useless util.fspath() invocation
191 return f
191 return f
192
192
193 @propertycache
193 @propertycache
194 def _dirfoldmap(self):
194 def _dirfoldmap(self):
195 f = {}
195 f = {}
196 normcase = util.normcase
196 normcase = util.normcase
197 for name in self._dirs:
197 for name in self._dirs:
198 f[normcase(name)] = name
198 f[normcase(name)] = name
199 return f
199 return f
200
200
201 @property
201 @property
202 def _sparsematcher(self):
202 def _sparsematcher(self):
203 """The matcher for the sparse checkout.
203 """The matcher for the sparse checkout.
204
204
205 The working directory may not include every file from a manifest. The
205 The working directory may not include every file from a manifest. The
206 matcher obtained by this property will match a path if it is to be
206 matcher obtained by this property will match a path if it is to be
207 included in the working directory.
207 included in the working directory.
208 """
208 """
209 # TODO there is potential to cache this property. For now, the matcher
209 # TODO there is potential to cache this property. For now, the matcher
210 # is resolved on every access. (But the called function does use a
210 # is resolved on every access. (But the called function does use a
211 # cache to keep the lookup fast.)
211 # cache to keep the lookup fast.)
212 return self._sparsematchfn()
212 return self._sparsematchfn()
213
213
214 @repocache('branch')
214 @repocache('branch')
215 def _branch(self):
215 def _branch(self):
216 try:
216 try:
217 return self._opener.read("branch").strip() or "default"
217 return self._opener.read("branch").strip() or "default"
218 except IOError as inst:
218 except IOError as inst:
219 if inst.errno != errno.ENOENT:
219 if inst.errno != errno.ENOENT:
220 raise
220 raise
221 return "default"
221 return "default"
222
222
223 @propertycache
223 @propertycache
224 def _pl(self):
224 def _pl(self):
225 try:
225 try:
226 fp = self._opendirstatefile()
226 fp = self._opendirstatefile()
227 st = fp.read(40)
227 st = fp.read(40)
228 fp.close()
228 fp.close()
229 l = len(st)
229 l = len(st)
230 if l == 40:
230 if l == 40:
231 return st[:20], st[20:40]
231 return st[:20], st[20:40]
232 elif l > 0 and l < 40:
232 elif l > 0 and l < 40:
233 raise error.Abort(_('working directory state appears damaged!'))
233 raise error.Abort(_('working directory state appears damaged!'))
234 except IOError as err:
234 except IOError as err:
235 if err.errno != errno.ENOENT:
235 if err.errno != errno.ENOENT:
236 raise
236 raise
237 return [nullid, nullid]
237 return [nullid, nullid]
238
238
239 @propertycache
239 @propertycache
240 def _dirs(self):
240 def _dirs(self):
241 return util.dirs(self._map, 'r')
241 return util.dirs(self._map, 'r')
242
242
243 def dirs(self):
243 def dirs(self):
244 return self._dirs
244 return self._dirs
245
245
246 @rootcache('.hgignore')
246 @rootcache('.hgignore')
247 def _ignore(self):
247 def _ignore(self):
248 files = self._ignorefiles()
248 files = self._ignorefiles()
249 if not files:
249 if not files:
250 return matchmod.never(self._root, '')
250 return matchmod.never(self._root, '')
251
251
252 pats = ['include:%s' % f for f in files]
252 pats = ['include:%s' % f for f in files]
253 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
253 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
254
254
255 @propertycache
255 @propertycache
256 def _slash(self):
256 def _slash(self):
257 return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/'
257 return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/'
258
258
259 @propertycache
259 @propertycache
260 def _checklink(self):
260 def _checklink(self):
261 return util.checklink(self._root)
261 return util.checklink(self._root)
262
262
263 @propertycache
263 @propertycache
264 def _checkexec(self):
264 def _checkexec(self):
265 return util.checkexec(self._root)
265 return util.checkexec(self._root)
266
266
267 @propertycache
267 @propertycache
268 def _checkcase(self):
268 def _checkcase(self):
269 return not util.fscasesensitive(self._join('.hg'))
269 return not util.fscasesensitive(self._join('.hg'))
270
270
271 def _join(self, f):
271 def _join(self, f):
272 # much faster than os.path.join()
272 # much faster than os.path.join()
273 # it's safe because f is always a relative path
273 # it's safe because f is always a relative path
274 return self._rootdir + f
274 return self._rootdir + f
275
275
276 def flagfunc(self, buildfallback):
276 def flagfunc(self, buildfallback):
277 if self._checklink and self._checkexec:
277 if self._checklink and self._checkexec:
278 def f(x):
278 def f(x):
279 try:
279 try:
280 st = os.lstat(self._join(x))
280 st = os.lstat(self._join(x))
281 if util.statislink(st):
281 if util.statislink(st):
282 return 'l'
282 return 'l'
283 if util.statisexec(st):
283 if util.statisexec(st):
284 return 'x'
284 return 'x'
285 except OSError:
285 except OSError:
286 pass
286 pass
287 return ''
287 return ''
288 return f
288 return f
289
289
290 fallback = buildfallback()
290 fallback = buildfallback()
291 if self._checklink:
291 if self._checklink:
292 def f(x):
292 def f(x):
293 if os.path.islink(self._join(x)):
293 if os.path.islink(self._join(x)):
294 return 'l'
294 return 'l'
295 if 'x' in fallback(x):
295 if 'x' in fallback(x):
296 return 'x'
296 return 'x'
297 return ''
297 return ''
298 return f
298 return f
299 if self._checkexec:
299 if self._checkexec:
300 def f(x):
300 def f(x):
301 if 'l' in fallback(x):
301 if 'l' in fallback(x):
302 return 'l'
302 return 'l'
303 if util.isexec(self._join(x)):
303 if util.isexec(self._join(x)):
304 return 'x'
304 return 'x'
305 return ''
305 return ''
306 return f
306 return f
307 else:
307 else:
308 return fallback
308 return fallback
309
309
310 @propertycache
310 @propertycache
311 def _cwd(self):
311 def _cwd(self):
312 # internal config: ui.forcecwd
312 # internal config: ui.forcecwd
313 forcecwd = self._ui.config('ui', 'forcecwd')
313 forcecwd = self._ui.config('ui', 'forcecwd')
314 if forcecwd:
314 if forcecwd:
315 return forcecwd
315 return forcecwd
316 return pycompat.getcwd()
316 return pycompat.getcwd()
317
317
318 def getcwd(self):
318 def getcwd(self):
319 '''Return the path from which a canonical path is calculated.
319 '''Return the path from which a canonical path is calculated.
320
320
321 This path should be used to resolve file patterns or to convert
321 This path should be used to resolve file patterns or to convert
322 canonical paths back to file paths for display. It shouldn't be
322 canonical paths back to file paths for display. It shouldn't be
323 used to get real file paths. Use vfs functions instead.
323 used to get real file paths. Use vfs functions instead.
324 '''
324 '''
325 cwd = self._cwd
325 cwd = self._cwd
326 if cwd == self._root:
326 if cwd == self._root:
327 return ''
327 return ''
328 # self._root ends with a path separator if self._root is '/' or 'C:\'
328 # self._root ends with a path separator if self._root is '/' or 'C:\'
329 rootsep = self._root
329 rootsep = self._root
330 if not util.endswithsep(rootsep):
330 if not util.endswithsep(rootsep):
331 rootsep += pycompat.ossep
331 rootsep += pycompat.ossep
332 if cwd.startswith(rootsep):
332 if cwd.startswith(rootsep):
333 return cwd[len(rootsep):]
333 return cwd[len(rootsep):]
334 else:
334 else:
335 # we're outside the repo. return an absolute path.
335 # we're outside the repo. return an absolute path.
336 return cwd
336 return cwd
337
337
338 def pathto(self, f, cwd=None):
338 def pathto(self, f, cwd=None):
339 if cwd is None:
339 if cwd is None:
340 cwd = self.getcwd()
340 cwd = self.getcwd()
341 path = util.pathto(self._root, cwd, f)
341 path = util.pathto(self._root, cwd, f)
342 if self._slash:
342 if self._slash:
343 return util.pconvert(path)
343 return util.pconvert(path)
344 return path
344 return path
345
345
346 def __getitem__(self, key):
346 def __getitem__(self, key):
347 '''Return the current state of key (a filename) in the dirstate.
347 '''Return the current state of key (a filename) in the dirstate.
348
348
349 States are:
349 States are:
350 n normal
350 n normal
351 m needs merging
351 m needs merging
352 r marked for removal
352 r marked for removal
353 a marked for addition
353 a marked for addition
354 ? not tracked
354 ? not tracked
355 '''
355 '''
356 return self._map.get(key, ("?",))[0]
356 return self._map.get(key, ("?",))[0]
357
357
358 def __contains__(self, key):
358 def __contains__(self, key):
359 return key in self._map
359 return key in self._map
360
360
361 def __iter__(self):
361 def __iter__(self):
362 for x in sorted(self._map):
362 for x in sorted(self._map):
363 yield x
363 yield x
364
364
365 def items(self):
365 def items(self):
366 return self._map.iteritems()
366 return self._map.iteritems()
367
367
368 iteritems = items
368 iteritems = items
369
369
370 def parents(self):
370 def parents(self):
371 return [self._validate(p) for p in self._pl]
371 return [self._validate(p) for p in self._pl]
372
372
373 def p1(self):
373 def p1(self):
374 return self._validate(self._pl[0])
374 return self._validate(self._pl[0])
375
375
376 def p2(self):
376 def p2(self):
377 return self._validate(self._pl[1])
377 return self._validate(self._pl[1])
378
378
379 def branch(self):
379 def branch(self):
380 return encoding.tolocal(self._branch)
380 return encoding.tolocal(self._branch)
381
381
382 def setparents(self, p1, p2=nullid):
382 def setparents(self, p1, p2=nullid):
383 """Set dirstate parents to p1 and p2.
383 """Set dirstate parents to p1 and p2.
384
384
385 When moving from two parents to one, 'm' merged entries a
385 When moving from two parents to one, 'm' merged entries a
386 adjusted to normal and previous copy records discarded and
386 adjusted to normal and previous copy records discarded and
387 returned by the call.
387 returned by the call.
388
388
389 See localrepo.setparents()
389 See localrepo.setparents()
390 """
390 """
391 if self._parentwriters == 0:
391 if self._parentwriters == 0:
392 raise ValueError("cannot set dirstate parent without "
392 raise ValueError("cannot set dirstate parent without "
393 "calling dirstate.beginparentchange")
393 "calling dirstate.beginparentchange")
394
394
395 self._dirty = self._dirtypl = True
395 self._dirty = self._dirtypl = True
396 oldp2 = self._pl[1]
396 oldp2 = self._pl[1]
397 if self._origpl is None:
397 if self._origpl is None:
398 self._origpl = self._pl
398 self._origpl = self._pl
399 self._pl = p1, p2
399 self._pl = p1, p2
400 copies = {}
400 copies = {}
401 if oldp2 != nullid and p2 == nullid:
401 if oldp2 != nullid and p2 == nullid:
402 candidatefiles = self._nonnormalset.union(self._otherparentset)
402 candidatefiles = self._nonnormalset.union(self._otherparentset)
403 for f in candidatefiles:
403 for f in candidatefiles:
404 s = self._map.get(f)
404 s = self._map.get(f)
405 if s is None:
405 if s is None:
406 continue
406 continue
407
407
408 # Discard 'm' markers when moving away from a merge state
408 # Discard 'm' markers when moving away from a merge state
409 if s[0] == 'm':
409 if s[0] == 'm':
410 if f in self._copymap:
410 if f in self._copymap:
411 copies[f] = self._copymap[f]
411 copies[f] = self._copymap[f]
412 self.normallookup(f)
412 self.normallookup(f)
413 # Also fix up otherparent markers
413 # Also fix up otherparent markers
414 elif s[0] == 'n' and s[2] == -2:
414 elif s[0] == 'n' and s[2] == -2:
415 if f in self._copymap:
415 if f in self._copymap:
416 copies[f] = self._copymap[f]
416 copies[f] = self._copymap[f]
417 self.add(f)
417 self.add(f)
418 return copies
418 return copies
419
419
420 def setbranch(self, branch):
420 def setbranch(self, branch):
421 self._branch = encoding.fromlocal(branch)
421 self._branch = encoding.fromlocal(branch)
422 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
422 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
423 try:
423 try:
424 f.write(self._branch + '\n')
424 f.write(self._branch + '\n')
425 f.close()
425 f.close()
426
426
427 # make sure filecache has the correct stat info for _branch after
427 # make sure filecache has the correct stat info for _branch after
428 # replacing the underlying file
428 # replacing the underlying file
429 ce = self._filecache['_branch']
429 ce = self._filecache['_branch']
430 if ce:
430 if ce:
431 ce.refresh()
431 ce.refresh()
432 except: # re-raises
432 except: # re-raises
433 f.discard()
433 f.discard()
434 raise
434 raise
435
435
436 def _opendirstatefile(self):
436 def _opendirstatefile(self):
437 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
437 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
438 if self._pendingmode is not None and self._pendingmode != mode:
438 if self._pendingmode is not None and self._pendingmode != mode:
439 fp.close()
439 fp.close()
440 raise error.Abort(_('working directory state may be '
440 raise error.Abort(_('working directory state may be '
441 'changed parallelly'))
441 'changed parallelly'))
442 self._pendingmode = mode
442 self._pendingmode = mode
443 return fp
443 return fp
444
444
445 def _read(self):
445 def _read(self):
446 self._map = {}
446 self._map = {}
447 self._copymap = {}
447 self._copymap = {}
448 # ignore HG_PENDING because identity is used only for writing
448 # ignore HG_PENDING because identity is used only for writing
449 self._identity = util.filestat.frompath(
449 self._identity = util.filestat.frompath(
450 self._opener.join(self._filename))
450 self._opener.join(self._filename))
451 try:
451 try:
452 fp = self._opendirstatefile()
452 fp = self._opendirstatefile()
453 try:
453 try:
454 st = fp.read()
454 st = fp.read()
455 finally:
455 finally:
456 fp.close()
456 fp.close()
457 except IOError as err:
457 except IOError as err:
458 if err.errno != errno.ENOENT:
458 if err.errno != errno.ENOENT:
459 raise
459 raise
460 return
460 return
461 if not st:
461 if not st:
462 return
462 return
463
463
464 if util.safehasattr(parsers, 'dict_new_presized'):
464 if util.safehasattr(parsers, 'dict_new_presized'):
465 # Make an estimate of the number of files in the dirstate based on
465 # Make an estimate of the number of files in the dirstate based on
466 # its size. From a linear regression on a set of real-world repos,
466 # its size. From a linear regression on a set of real-world repos,
467 # all over 10,000 files, the size of a dirstate entry is 85
467 # all over 10,000 files, the size of a dirstate entry is 85
468 # bytes. The cost of resizing is significantly higher than the cost
468 # bytes. The cost of resizing is significantly higher than the cost
469 # of filling in a larger presized dict, so subtract 20% from the
469 # of filling in a larger presized dict, so subtract 20% from the
470 # size.
470 # size.
471 #
471 #
472 # This heuristic is imperfect in many ways, so in a future dirstate
472 # This heuristic is imperfect in many ways, so in a future dirstate
473 # format update it makes sense to just record the number of entries
473 # format update it makes sense to just record the number of entries
474 # on write.
474 # on write.
475 self._map = parsers.dict_new_presized(len(st) / 71)
475 self._map = parsers.dict_new_presized(len(st) / 71)
476
476
477 # Python's garbage collector triggers a GC each time a certain number
477 # Python's garbage collector triggers a GC each time a certain number
478 # of container objects (the number being defined by
478 # of container objects (the number being defined by
479 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
479 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
480 # for each file in the dirstate. The C version then immediately marks
480 # for each file in the dirstate. The C version then immediately marks
481 # them as not to be tracked by the collector. However, this has no
481 # them as not to be tracked by the collector. However, this has no
482 # effect on when GCs are triggered, only on what objects the GC looks
482 # effect on when GCs are triggered, only on what objects the GC looks
483 # into. This means that O(number of files) GCs are unavoidable.
483 # into. This means that O(number of files) GCs are unavoidable.
484 # Depending on when in the process's lifetime the dirstate is parsed,
484 # Depending on when in the process's lifetime the dirstate is parsed,
485 # this can get very expensive. As a workaround, disable GC while
485 # this can get very expensive. As a workaround, disable GC while
486 # parsing the dirstate.
486 # parsing the dirstate.
487 #
487 #
488 # (we cannot decorate the function directly since it is in a C module)
488 # (we cannot decorate the function directly since it is in a C module)
489 parse_dirstate = util.nogc(parsers.parse_dirstate)
489 parse_dirstate = util.nogc(parsers.parse_dirstate)
490 p = parse_dirstate(self._map, self._copymap, st)
490 p = parse_dirstate(self._map, self._copymap, st)
491 if not self._dirtypl:
491 if not self._dirtypl:
492 self._pl = p
492 self._pl = p
493
493
494 def invalidate(self):
494 def invalidate(self):
495 '''Causes the next access to reread the dirstate.
495 '''Causes the next access to reread the dirstate.
496
496
497 This is different from localrepo.invalidatedirstate() because it always
497 This is different from localrepo.invalidatedirstate() because it always
498 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
498 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
499 check whether the dirstate has changed before rereading it.'''
499 check whether the dirstate has changed before rereading it.'''
500
500
501 for a in ("_map", "_copymap", "_identity",
501 for a in ("_map", "_copymap", "_identity",
502 "_filefoldmap", "_dirfoldmap", "_branch",
502 "_filefoldmap", "_dirfoldmap", "_branch",
503 "_pl", "_dirs", "_ignore", "_nonnormalset",
503 "_pl", "_dirs", "_ignore", "_nonnormalset",
504 "_otherparentset"):
504 "_otherparentset"):
505 if a in self.__dict__:
505 if a in self.__dict__:
506 delattr(self, a)
506 delattr(self, a)
507 self._lastnormaltime = 0
507 self._lastnormaltime = 0
508 self._dirty = False
508 self._dirty = False
509 self._updatedfiles.clear()
509 self._updatedfiles.clear()
510 self._parentwriters = 0
510 self._parentwriters = 0
511 self._origpl = None
511 self._origpl = None
512
512
513 def copy(self, source, dest):
513 def copy(self, source, dest):
514 """Mark dest as a copy of source. Unmark dest if source is None."""
514 """Mark dest as a copy of source. Unmark dest if source is None."""
515 if source == dest:
515 if source == dest:
516 return
516 return
517 self._dirty = True
517 self._dirty = True
518 if source is not None:
518 if source is not None:
519 self._copymap[dest] = source
519 self._copymap[dest] = source
520 self._updatedfiles.add(source)
520 self._updatedfiles.add(source)
521 self._updatedfiles.add(dest)
521 self._updatedfiles.add(dest)
522 elif dest in self._copymap:
522 elif dest in self._copymap:
523 del self._copymap[dest]
523 del self._copymap[dest]
524 self._updatedfiles.add(dest)
524 self._updatedfiles.add(dest)
525
525
526 def copied(self, file):
526 def copied(self, file):
527 return self._copymap.get(file, None)
527 return self._copymap.get(file, None)
528
528
529 def copies(self):
529 def copies(self):
530 return self._copymap
530 return self._copymap
531
531
532 def _droppath(self, f):
532 def _droppath(self, f):
533 if self[f] not in "?r" and "_dirs" in self.__dict__:
533 if self[f] not in "?r" and "_dirs" in self.__dict__:
534 self._dirs.delpath(f)
534 self._dirs.delpath(f)
535
535
536 if "_filefoldmap" in self.__dict__:
536 if "_filefoldmap" in self.__dict__:
537 normed = util.normcase(f)
537 normed = util.normcase(f)
538 if normed in self._filefoldmap:
538 if normed in self._filefoldmap:
539 del self._filefoldmap[normed]
539 del self._filefoldmap[normed]
540
540
541 self._updatedfiles.add(f)
541 self._updatedfiles.add(f)
542
542
543 def _addpath(self, f, state, mode, size, mtime):
543 def _addpath(self, f, state, mode, size, mtime):
544 oldstate = self[f]
544 oldstate = self[f]
545 if state == 'a' or oldstate == 'r':
545 if state == 'a' or oldstate == 'r':
546 scmutil.checkfilename(f)
546 scmutil.checkfilename(f)
547 if f in self._dirs:
547 if f in self._dirs:
548 raise error.Abort(_('directory %r already in dirstate') % f)
548 raise error.Abort(_('directory %r already in dirstate') % f)
549 # shadows
549 # shadows
550 for d in util.finddirs(f):
550 for d in util.finddirs(f):
551 if d in self._dirs:
551 if d in self._dirs:
552 break
552 break
553 if d in self._map and self[d] != 'r':
553 if d in self._map and self[d] != 'r':
554 raise error.Abort(
554 raise error.Abort(
555 _('file %r in dirstate clashes with %r') % (d, f))
555 _('file %r in dirstate clashes with %r') % (d, f))
556 if oldstate in "?r" and "_dirs" in self.__dict__:
556 if oldstate in "?r" and "_dirs" in self.__dict__:
557 self._dirs.addpath(f)
557 self._dirs.addpath(f)
558 self._dirty = True
558 self._dirty = True
559 self._updatedfiles.add(f)
559 self._updatedfiles.add(f)
560 self._map[f] = dirstatetuple(state, mode, size, mtime)
560 self._map[f] = dirstatetuple(state, mode, size, mtime)
561 if state != 'n' or mtime == -1:
561 if state != 'n' or mtime == -1:
562 self._nonnormalset.add(f)
562 self._nonnormalset.add(f)
563 if size == -2:
563 if size == -2:
564 self._otherparentset.add(f)
564 self._otherparentset.add(f)
565
565
566 def normal(self, f):
566 def normal(self, f):
567 '''Mark a file normal and clean.'''
567 '''Mark a file normal and clean.'''
568 s = os.lstat(self._join(f))
568 s = os.lstat(self._join(f))
569 mtime = s.st_mtime
569 mtime = s.st_mtime
570 self._addpath(f, 'n', s.st_mode,
570 self._addpath(f, 'n', s.st_mode,
571 s.st_size & _rangemask, mtime & _rangemask)
571 s.st_size & _rangemask, mtime & _rangemask)
572 if f in self._copymap:
572 if f in self._copymap:
573 del self._copymap[f]
573 del self._copymap[f]
574 if f in self._nonnormalset:
574 if f in self._nonnormalset:
575 self._nonnormalset.remove(f)
575 self._nonnormalset.remove(f)
576 if mtime > self._lastnormaltime:
576 if mtime > self._lastnormaltime:
577 # Remember the most recent modification timeslot for status(),
577 # Remember the most recent modification timeslot for status(),
578 # to make sure we won't miss future size-preserving file content
578 # to make sure we won't miss future size-preserving file content
579 # modifications that happen within the same timeslot.
579 # modifications that happen within the same timeslot.
580 self._lastnormaltime = mtime
580 self._lastnormaltime = mtime
581
581
582 def normallookup(self, f):
582 def normallookup(self, f):
583 '''Mark a file normal, but possibly dirty.'''
583 '''Mark a file normal, but possibly dirty.'''
584 if self._pl[1] != nullid and f in self._map:
584 if self._pl[1] != nullid and f in self._map:
585 # if there is a merge going on and the file was either
585 # if there is a merge going on and the file was either
586 # in state 'm' (-1) or coming from other parent (-2) before
586 # in state 'm' (-1) or coming from other parent (-2) before
587 # being removed, restore that state.
587 # being removed, restore that state.
588 entry = self._map[f]
588 entry = self._map[f]
589 if entry[0] == 'r' and entry[2] in (-1, -2):
589 if entry[0] == 'r' and entry[2] in (-1, -2):
590 source = self._copymap.get(f)
590 source = self._copymap.get(f)
591 if entry[2] == -1:
591 if entry[2] == -1:
592 self.merge(f)
592 self.merge(f)
593 elif entry[2] == -2:
593 elif entry[2] == -2:
594 self.otherparent(f)
594 self.otherparent(f)
595 if source:
595 if source:
596 self.copy(source, f)
596 self.copy(source, f)
597 return
597 return
598 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
598 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
599 return
599 return
600 self._addpath(f, 'n', 0, -1, -1)
600 self._addpath(f, 'n', 0, -1, -1)
601 if f in self._copymap:
601 if f in self._copymap:
602 del self._copymap[f]
602 del self._copymap[f]
603 if f in self._nonnormalset:
603 if f in self._nonnormalset:
604 self._nonnormalset.remove(f)
604 self._nonnormalset.remove(f)
605
605
606 def otherparent(self, f):
606 def otherparent(self, f):
607 '''Mark as coming from the other parent, always dirty.'''
607 '''Mark as coming from the other parent, always dirty.'''
608 if self._pl[1] == nullid:
608 if self._pl[1] == nullid:
609 raise error.Abort(_("setting %r to other parent "
609 raise error.Abort(_("setting %r to other parent "
610 "only allowed in merges") % f)
610 "only allowed in merges") % f)
611 if f in self and self[f] == 'n':
611 if f in self and self[f] == 'n':
612 # merge-like
612 # merge-like
613 self._addpath(f, 'm', 0, -2, -1)
613 self._addpath(f, 'm', 0, -2, -1)
614 else:
614 else:
615 # add-like
615 # add-like
616 self._addpath(f, 'n', 0, -2, -1)
616 self._addpath(f, 'n', 0, -2, -1)
617
617
618 if f in self._copymap:
618 if f in self._copymap:
619 del self._copymap[f]
619 del self._copymap[f]
620
620
621 def add(self, f):
621 def add(self, f):
622 '''Mark a file added.'''
622 '''Mark a file added.'''
623 self._addpath(f, 'a', 0, -1, -1)
623 self._addpath(f, 'a', 0, -1, -1)
624 if f in self._copymap:
624 if f in self._copymap:
625 del self._copymap[f]
625 del self._copymap[f]
626
626
627 def remove(self, f):
627 def remove(self, f):
628 '''Mark a file removed.'''
628 '''Mark a file removed.'''
629 self._dirty = True
629 self._dirty = True
630 self._droppath(f)
630 self._droppath(f)
631 size = 0
631 size = 0
632 if self._pl[1] != nullid and f in self._map:
632 if self._pl[1] != nullid and f in self._map:
633 # backup the previous state
633 # backup the previous state
634 entry = self._map[f]
634 entry = self._map[f]
635 if entry[0] == 'm': # merge
635 if entry[0] == 'm': # merge
636 size = -1
636 size = -1
637 elif entry[0] == 'n' and entry[2] == -2: # other parent
637 elif entry[0] == 'n' and entry[2] == -2: # other parent
638 size = -2
638 size = -2
639 self._otherparentset.add(f)
639 self._otherparentset.add(f)
640 self._map[f] = dirstatetuple('r', 0, size, 0)
640 self._map[f] = dirstatetuple('r', 0, size, 0)
641 self._nonnormalset.add(f)
641 self._nonnormalset.add(f)
642 if size == 0 and f in self._copymap:
642 if size == 0 and f in self._copymap:
643 del self._copymap[f]
643 del self._copymap[f]
644
644
645 def merge(self, f):
645 def merge(self, f):
646 '''Mark a file merged.'''
646 '''Mark a file merged.'''
647 if self._pl[1] == nullid:
647 if self._pl[1] == nullid:
648 return self.normallookup(f)
648 return self.normallookup(f)
649 return self.otherparent(f)
649 return self.otherparent(f)
650
650
651 def drop(self, f):
651 def drop(self, f):
652 '''Drop a file from the dirstate'''
652 '''Drop a file from the dirstate'''
653 if f in self._map:
653 if f in self._map:
654 self._dirty = True
654 self._dirty = True
655 self._droppath(f)
655 self._droppath(f)
656 del self._map[f]
656 del self._map[f]
657 if f in self._nonnormalset:
657 if f in self._nonnormalset:
658 self._nonnormalset.remove(f)
658 self._nonnormalset.remove(f)
659 if f in self._copymap:
659 if f in self._copymap:
660 del self._copymap[f]
660 del self._copymap[f]
661
661
662 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
662 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
663 if exists is None:
663 if exists is None:
664 exists = os.path.lexists(os.path.join(self._root, path))
664 exists = os.path.lexists(os.path.join(self._root, path))
665 if not exists:
665 if not exists:
666 # Maybe a path component exists
666 # Maybe a path component exists
667 if not ignoremissing and '/' in path:
667 if not ignoremissing and '/' in path:
668 d, f = path.rsplit('/', 1)
668 d, f = path.rsplit('/', 1)
669 d = self._normalize(d, False, ignoremissing, None)
669 d = self._normalize(d, False, ignoremissing, None)
670 folded = d + "/" + f
670 folded = d + "/" + f
671 else:
671 else:
672 # No path components, preserve original case
672 # No path components, preserve original case
673 folded = path
673 folded = path
674 else:
674 else:
675 # recursively normalize leading directory components
675 # recursively normalize leading directory components
676 # against dirstate
676 # against dirstate
677 if '/' in normed:
677 if '/' in normed:
678 d, f = normed.rsplit('/', 1)
678 d, f = normed.rsplit('/', 1)
679 d = self._normalize(d, False, ignoremissing, True)
679 d = self._normalize(d, False, ignoremissing, True)
680 r = self._root + "/" + d
680 r = self._root + "/" + d
681 folded = d + "/" + util.fspath(f, r)
681 folded = d + "/" + util.fspath(f, r)
682 else:
682 else:
683 folded = util.fspath(normed, self._root)
683 folded = util.fspath(normed, self._root)
684 storemap[normed] = folded
684 storemap[normed] = folded
685
685
686 return folded
686 return folded
687
687
688 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
688 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
689 normed = util.normcase(path)
689 normed = util.normcase(path)
690 folded = self._filefoldmap.get(normed, None)
690 folded = self._filefoldmap.get(normed, None)
691 if folded is None:
691 if folded is None:
692 if isknown:
692 if isknown:
693 folded = path
693 folded = path
694 else:
694 else:
695 folded = self._discoverpath(path, normed, ignoremissing, exists,
695 folded = self._discoverpath(path, normed, ignoremissing, exists,
696 self._filefoldmap)
696 self._filefoldmap)
697 return folded
697 return folded
698
698
699 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
699 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
700 normed = util.normcase(path)
700 normed = util.normcase(path)
701 folded = self._filefoldmap.get(normed, None)
701 folded = self._filefoldmap.get(normed, None)
702 if folded is None:
702 if folded is None:
703 folded = self._dirfoldmap.get(normed, None)
703 folded = self._dirfoldmap.get(normed, None)
704 if folded is None:
704 if folded is None:
705 if isknown:
705 if isknown:
706 folded = path
706 folded = path
707 else:
707 else:
708 # store discovered result in dirfoldmap so that future
708 # store discovered result in dirfoldmap so that future
709 # normalizefile calls don't start matching directories
709 # normalizefile calls don't start matching directories
710 folded = self._discoverpath(path, normed, ignoremissing, exists,
710 folded = self._discoverpath(path, normed, ignoremissing, exists,
711 self._dirfoldmap)
711 self._dirfoldmap)
712 return folded
712 return folded
713
713
714 def normalize(self, path, isknown=False, ignoremissing=False):
714 def normalize(self, path, isknown=False, ignoremissing=False):
715 '''
715 '''
716 normalize the case of a pathname when on a casefolding filesystem
716 normalize the case of a pathname when on a casefolding filesystem
717
717
718 isknown specifies whether the filename came from walking the
718 isknown specifies whether the filename came from walking the
719 disk, to avoid extra filesystem access.
719 disk, to avoid extra filesystem access.
720
720
721 If ignoremissing is True, missing path are returned
721 If ignoremissing is True, missing path are returned
722 unchanged. Otherwise, we try harder to normalize possibly
722 unchanged. Otherwise, we try harder to normalize possibly
723 existing path components.
723 existing path components.
724
724
725 The normalized case is determined based on the following precedence:
725 The normalized case is determined based on the following precedence:
726
726
727 - version of name already stored in the dirstate
727 - version of name already stored in the dirstate
728 - version of name stored on disk
728 - version of name stored on disk
729 - version provided via command arguments
729 - version provided via command arguments
730 '''
730 '''
731
731
732 if self._checkcase:
732 if self._checkcase:
733 return self._normalize(path, isknown, ignoremissing)
733 return self._normalize(path, isknown, ignoremissing)
734 return path
734 return path
735
735
736 def clear(self):
736 def clear(self):
737 self._map = {}
737 self._map = {}
738 self._nonnormalset = set()
738 self._nonnormalset = set()
739 self._otherparentset = set()
739 self._otherparentset = set()
740 if "_dirs" in self.__dict__:
740 if "_dirs" in self.__dict__:
741 delattr(self, "_dirs")
741 delattr(self, "_dirs")
742 self._copymap = {}
742 self._copymap = {}
743 self._pl = [nullid, nullid]
743 self._pl = [nullid, nullid]
744 self._lastnormaltime = 0
744 self._lastnormaltime = 0
745 self._updatedfiles.clear()
745 self._updatedfiles.clear()
746 self._dirty = True
746 self._dirty = True
747
747
748 def rebuild(self, parent, allfiles, changedfiles=None):
748 def rebuild(self, parent, allfiles, changedfiles=None):
749 if changedfiles is None:
749 if changedfiles is None:
750 # Rebuild entire dirstate
750 # Rebuild entire dirstate
751 changedfiles = allfiles
751 changedfiles = allfiles
752 lastnormaltime = self._lastnormaltime
752 lastnormaltime = self._lastnormaltime
753 self.clear()
753 self.clear()
754 self._lastnormaltime = lastnormaltime
754 self._lastnormaltime = lastnormaltime
755
755
756 if self._origpl is None:
756 if self._origpl is None:
757 self._origpl = self._pl
757 self._origpl = self._pl
758 self._pl = (parent, nullid)
758 self._pl = (parent, nullid)
759 for f in changedfiles:
759 for f in changedfiles:
760 if f in allfiles:
760 if f in allfiles:
761 self.normallookup(f)
761 self.normallookup(f)
762 else:
762 else:
763 self.drop(f)
763 self.drop(f)
764
764
765 self._dirty = True
765 self._dirty = True
766
766
767 def identity(self):
767 def identity(self):
768 '''Return identity of dirstate itself to detect changing in storage
768 '''Return identity of dirstate itself to detect changing in storage
769
769
770 If identity of previous dirstate is equal to this, writing
770 If identity of previous dirstate is equal to this, writing
771 changes based on the former dirstate out can keep consistency.
771 changes based on the former dirstate out can keep consistency.
772 '''
772 '''
773 return self._identity
773 return self._identity
774
774
775 def write(self, tr):
775 def write(self, tr):
776 if not self._dirty:
776 if not self._dirty:
777 return
777 return
778
778
779 filename = self._filename
779 filename = self._filename
780 if tr:
780 if tr:
781 # 'dirstate.write()' is not only for writing in-memory
781 # 'dirstate.write()' is not only for writing in-memory
782 # changes out, but also for dropping ambiguous timestamp.
782 # changes out, but also for dropping ambiguous timestamp.
783 # delayed writing re-raise "ambiguous timestamp issue".
783 # delayed writing re-raise "ambiguous timestamp issue".
784 # See also the wiki page below for detail:
784 # See also the wiki page below for detail:
785 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
785 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
786
786
787 # emulate dropping timestamp in 'parsers.pack_dirstate'
787 # emulate dropping timestamp in 'parsers.pack_dirstate'
788 now = _getfsnow(self._opener)
788 now = _getfsnow(self._opener)
789 dmap = self._map
789 dmap = self._map
790 for f in self._updatedfiles:
790 for f in self._updatedfiles:
791 e = dmap.get(f)
791 e = dmap.get(f)
792 if e is not None and e[0] == 'n' and e[3] == now:
792 if e is not None and e[0] == 'n' and e[3] == now:
793 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
793 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
794 self._nonnormalset.add(f)
794 self._nonnormalset.add(f)
795
795
796 # emulate that all 'dirstate.normal' results are written out
796 # emulate that all 'dirstate.normal' results are written out
797 self._lastnormaltime = 0
797 self._lastnormaltime = 0
798 self._updatedfiles.clear()
798 self._updatedfiles.clear()
799
799
800 # delay writing in-memory changes out
800 # delay writing in-memory changes out
801 tr.addfilegenerator('dirstate', (self._filename,),
801 tr.addfilegenerator('dirstate', (self._filename,),
802 self._writedirstate, location='plain')
802 self._writedirstate, location='plain')
803 return
803 return
804
804
805 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
805 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
806 self._writedirstate(st)
806 self._writedirstate(st)
807
807
808 def addparentchangecallback(self, category, callback):
808 def addparentchangecallback(self, category, callback):
809 """add a callback to be called when the wd parents are changed
809 """add a callback to be called when the wd parents are changed
810
810
811 Callback will be called with the following arguments:
811 Callback will be called with the following arguments:
812 dirstate, (oldp1, oldp2), (newp1, newp2)
812 dirstate, (oldp1, oldp2), (newp1, newp2)
813
813
814 Category is a unique identifier to allow overwriting an old callback
814 Category is a unique identifier to allow overwriting an old callback
815 with a newer callback.
815 with a newer callback.
816 """
816 """
817 self._plchangecallbacks[category] = callback
817 self._plchangecallbacks[category] = callback
818
818
819 def _writedirstate(self, st):
819 def _writedirstate(self, st):
820 # notify callbacks about parents change
820 # notify callbacks about parents change
821 if self._origpl is not None and self._origpl != self._pl:
821 if self._origpl is not None and self._origpl != self._pl:
822 for c, callback in sorted(self._plchangecallbacks.iteritems()):
822 for c, callback in sorted(self._plchangecallbacks.iteritems()):
823 callback(self, self._origpl, self._pl)
823 callback(self, self._origpl, self._pl)
824 self._origpl = None
824 self._origpl = None
825 # use the modification time of the newly created temporary file as the
825 # use the modification time of the newly created temporary file as the
826 # filesystem's notion of 'now'
826 # filesystem's notion of 'now'
827 now = util.fstat(st).st_mtime & _rangemask
827 now = util.fstat(st).st_mtime & _rangemask
828
828
829 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
829 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
830 # timestamp of each entries in dirstate, because of 'now > mtime'
830 # timestamp of each entries in dirstate, because of 'now > mtime'
831 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
831 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
832 if delaywrite > 0:
832 if delaywrite > 0:
833 # do we have any files to delay for?
833 # do we have any files to delay for?
834 for f, e in self._map.iteritems():
834 for f, e in self._map.iteritems():
835 if e[0] == 'n' and e[3] == now:
835 if e[0] == 'n' and e[3] == now:
836 import time # to avoid useless import
836 import time # to avoid useless import
837 # rather than sleep n seconds, sleep until the next
837 # rather than sleep n seconds, sleep until the next
838 # multiple of n seconds
838 # multiple of n seconds
839 clock = time.time()
839 clock = time.time()
840 start = int(clock) - (int(clock) % delaywrite)
840 start = int(clock) - (int(clock) % delaywrite)
841 end = start + delaywrite
841 end = start + delaywrite
842 time.sleep(end - clock)
842 time.sleep(end - clock)
843 now = end # trust our estimate that the end is near now
843 now = end # trust our estimate that the end is near now
844 break
844 break
845
845
846 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
846 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
847 self._nonnormalset, self._otherparentset = nonnormalentries(self._map)
847 self._nonnormalset, self._otherparentset = nonnormalentries(self._map)
848 st.close()
848 st.close()
849 self._lastnormaltime = 0
849 self._lastnormaltime = 0
850 self._dirty = self._dirtypl = False
850 self._dirty = self._dirtypl = False
851
851
852 def _dirignore(self, f):
852 def _dirignore(self, f):
853 if f == '.':
853 if f == '.':
854 return False
854 return False
855 if self._ignore(f):
855 if self._ignore(f):
856 return True
856 return True
857 for p in util.finddirs(f):
857 for p in util.finddirs(f):
858 if self._ignore(p):
858 if self._ignore(p):
859 return True
859 return True
860 return False
860 return False
861
861
862 def _ignorefiles(self):
862 def _ignorefiles(self):
863 files = []
863 files = []
864 if os.path.exists(self._join('.hgignore')):
864 if os.path.exists(self._join('.hgignore')):
865 files.append(self._join('.hgignore'))
865 files.append(self._join('.hgignore'))
866 for name, path in self._ui.configitems("ui"):
866 for name, path in self._ui.configitems("ui"):
867 if name == 'ignore' or name.startswith('ignore.'):
867 if name == 'ignore' or name.startswith('ignore.'):
868 # we need to use os.path.join here rather than self._join
868 # we need to use os.path.join here rather than self._join
869 # because path is arbitrary and user-specified
869 # because path is arbitrary and user-specified
870 files.append(os.path.join(self._rootdir, util.expandpath(path)))
870 files.append(os.path.join(self._rootdir, util.expandpath(path)))
871 return files
871 return files
872
872
873 def _ignorefileandline(self, f):
873 def _ignorefileandline(self, f):
874 files = collections.deque(self._ignorefiles())
874 files = collections.deque(self._ignorefiles())
875 visited = set()
875 visited = set()
876 while files:
876 while files:
877 i = files.popleft()
877 i = files.popleft()
878 patterns = matchmod.readpatternfile(i, self._ui.warn,
878 patterns = matchmod.readpatternfile(i, self._ui.warn,
879 sourceinfo=True)
879 sourceinfo=True)
880 for pattern, lineno, line in patterns:
880 for pattern, lineno, line in patterns:
881 kind, p = matchmod._patsplit(pattern, 'glob')
881 kind, p = matchmod._patsplit(pattern, 'glob')
882 if kind == "subinclude":
882 if kind == "subinclude":
883 if p not in visited:
883 if p not in visited:
884 files.append(p)
884 files.append(p)
885 continue
885 continue
886 m = matchmod.match(self._root, '', [], [pattern],
886 m = matchmod.match(self._root, '', [], [pattern],
887 warn=self._ui.warn)
887 warn=self._ui.warn)
888 if m(f):
888 if m(f):
889 return (i, lineno, line)
889 return (i, lineno, line)
890 visited.add(i)
890 visited.add(i)
891 return (None, -1, "")
891 return (None, -1, "")
892
892
893 def _walkexplicit(self, match, subrepos):
893 def _walkexplicit(self, match, subrepos):
894 '''Get stat data about the files explicitly specified by match.
894 '''Get stat data about the files explicitly specified by match.
895
895
896 Return a triple (results, dirsfound, dirsnotfound).
896 Return a triple (results, dirsfound, dirsnotfound).
897 - results is a mapping from filename to stat result. It also contains
897 - results is a mapping from filename to stat result. It also contains
898 listings mapping subrepos and .hg to None.
898 listings mapping subrepos and .hg to None.
899 - dirsfound is a list of files found to be directories.
899 - dirsfound is a list of files found to be directories.
900 - dirsnotfound is a list of files that the dirstate thinks are
900 - dirsnotfound is a list of files that the dirstate thinks are
901 directories and that were not found.'''
901 directories and that were not found.'''
902
902
903 def badtype(mode):
903 def badtype(mode):
904 kind = _('unknown')
904 kind = _('unknown')
905 if stat.S_ISCHR(mode):
905 if stat.S_ISCHR(mode):
906 kind = _('character device')
906 kind = _('character device')
907 elif stat.S_ISBLK(mode):
907 elif stat.S_ISBLK(mode):
908 kind = _('block device')
908 kind = _('block device')
909 elif stat.S_ISFIFO(mode):
909 elif stat.S_ISFIFO(mode):
910 kind = _('fifo')
910 kind = _('fifo')
911 elif stat.S_ISSOCK(mode):
911 elif stat.S_ISSOCK(mode):
912 kind = _('socket')
912 kind = _('socket')
913 elif stat.S_ISDIR(mode):
913 elif stat.S_ISDIR(mode):
914 kind = _('directory')
914 kind = _('directory')
915 return _('unsupported file type (type is %s)') % kind
915 return _('unsupported file type (type is %s)') % kind
916
916
917 matchedir = match.explicitdir
917 matchedir = match.explicitdir
918 badfn = match.bad
918 badfn = match.bad
919 dmap = self._map
919 dmap = self._map
920 lstat = os.lstat
920 lstat = os.lstat
921 getkind = stat.S_IFMT
921 getkind = stat.S_IFMT
922 dirkind = stat.S_IFDIR
922 dirkind = stat.S_IFDIR
923 regkind = stat.S_IFREG
923 regkind = stat.S_IFREG
924 lnkkind = stat.S_IFLNK
924 lnkkind = stat.S_IFLNK
925 join = self._join
925 join = self._join
926 dirsfound = []
926 dirsfound = []
927 foundadd = dirsfound.append
927 foundadd = dirsfound.append
928 dirsnotfound = []
928 dirsnotfound = []
929 notfoundadd = dirsnotfound.append
929 notfoundadd = dirsnotfound.append
930
930
931 if not match.isexact() and self._checkcase:
931 if not match.isexact() and self._checkcase:
932 normalize = self._normalize
932 normalize = self._normalize
933 else:
933 else:
934 normalize = None
934 normalize = None
935
935
936 files = sorted(match.files())
936 files = sorted(match.files())
937 subrepos.sort()
937 subrepos.sort()
938 i, j = 0, 0
938 i, j = 0, 0
939 while i < len(files) and j < len(subrepos):
939 while i < len(files) and j < len(subrepos):
940 subpath = subrepos[j] + "/"
940 subpath = subrepos[j] + "/"
941 if files[i] < subpath:
941 if files[i] < subpath:
942 i += 1
942 i += 1
943 continue
943 continue
944 while i < len(files) and files[i].startswith(subpath):
944 while i < len(files) and files[i].startswith(subpath):
945 del files[i]
945 del files[i]
946 j += 1
946 j += 1
947
947
948 if not files or '.' in files:
948 if not files or '.' in files:
949 files = ['.']
949 files = ['.']
950 results = dict.fromkeys(subrepos)
950 results = dict.fromkeys(subrepos)
951 results['.hg'] = None
951 results['.hg'] = None
952
952
953 alldirs = None
953 alldirs = None
954 for ff in files:
954 for ff in files:
955 # constructing the foldmap is expensive, so don't do it for the
955 # constructing the foldmap is expensive, so don't do it for the
956 # common case where files is ['.']
956 # common case where files is ['.']
957 if normalize and ff != '.':
957 if normalize and ff != '.':
958 nf = normalize(ff, False, True)
958 nf = normalize(ff, False, True)
959 else:
959 else:
960 nf = ff
960 nf = ff
961 if nf in results:
961 if nf in results:
962 continue
962 continue
963
963
964 try:
964 try:
965 st = lstat(join(nf))
965 st = lstat(join(nf))
966 kind = getkind(st.st_mode)
966 kind = getkind(st.st_mode)
967 if kind == dirkind:
967 if kind == dirkind:
968 if nf in dmap:
968 if nf in dmap:
969 # file replaced by dir on disk but still in dirstate
969 # file replaced by dir on disk but still in dirstate
970 results[nf] = None
970 results[nf] = None
971 if matchedir:
971 if matchedir:
972 matchedir(nf)
972 matchedir(nf)
973 foundadd((nf, ff))
973 foundadd((nf, ff))
974 elif kind == regkind or kind == lnkkind:
974 elif kind == regkind or kind == lnkkind:
975 results[nf] = st
975 results[nf] = st
976 else:
976 else:
977 badfn(ff, badtype(kind))
977 badfn(ff, badtype(kind))
978 if nf in dmap:
978 if nf in dmap:
979 results[nf] = None
979 results[nf] = None
980 except OSError as inst: # nf not found on disk - it is dirstate only
980 except OSError as inst: # nf not found on disk - it is dirstate only
981 if nf in dmap: # does it exactly match a missing file?
981 if nf in dmap: # does it exactly match a missing file?
982 results[nf] = None
982 results[nf] = None
983 else: # does it match a missing directory?
983 else: # does it match a missing directory?
984 if alldirs is None:
984 if alldirs is None:
985 alldirs = util.dirs(dmap)
985 alldirs = util.dirs(dmap)
986 if nf in alldirs:
986 if nf in alldirs:
987 if matchedir:
987 if matchedir:
988 matchedir(nf)
988 matchedir(nf)
989 notfoundadd(nf)
989 notfoundadd(nf)
990 else:
990 else:
991 badfn(ff, inst.strerror)
991 badfn(ff, inst.strerror)
992
992
993 # Case insensitive filesystems cannot rely on lstat() failing to detect
993 # Case insensitive filesystems cannot rely on lstat() failing to detect
994 # a case-only rename. Prune the stat object for any file that does not
994 # a case-only rename. Prune the stat object for any file that does not
995 # match the case in the filesystem, if there are multiple files that
995 # match the case in the filesystem, if there are multiple files that
996 # normalize to the same path.
996 # normalize to the same path.
997 if match.isexact() and self._checkcase:
997 if match.isexact() and self._checkcase:
998 normed = {}
998 normed = {}
999
999
1000 for f, st in results.iteritems():
1000 for f, st in results.iteritems():
1001 if st is None:
1001 if st is None:
1002 continue
1002 continue
1003
1003
1004 nc = util.normcase(f)
1004 nc = util.normcase(f)
1005 paths = normed.get(nc)
1005 paths = normed.get(nc)
1006
1006
1007 if paths is None:
1007 if paths is None:
1008 paths = set()
1008 paths = set()
1009 normed[nc] = paths
1009 normed[nc] = paths
1010
1010
1011 paths.add(f)
1011 paths.add(f)
1012
1012
1013 for norm, paths in normed.iteritems():
1013 for norm, paths in normed.iteritems():
1014 if len(paths) > 1:
1014 if len(paths) > 1:
1015 for path in paths:
1015 for path in paths:
1016 folded = self._discoverpath(path, norm, True, None,
1016 folded = self._discoverpath(path, norm, True, None,
1017 self._dirfoldmap)
1017 self._dirfoldmap)
1018 if path != folded:
1018 if path != folded:
1019 results[path] = None
1019 results[path] = None
1020
1020
1021 return results, dirsfound, dirsnotfound
1021 return results, dirsfound, dirsnotfound
1022
1022
1023 def walk(self, match, subrepos, unknown, ignored, full=True):
1023 def walk(self, match, subrepos, unknown, ignored, full=True):
1024 '''
1024 '''
1025 Walk recursively through the directory tree, finding all files
1025 Walk recursively through the directory tree, finding all files
1026 matched by match.
1026 matched by match.
1027
1027
1028 If full is False, maybe skip some known-clean files.
1028 If full is False, maybe skip some known-clean files.
1029
1029
1030 Return a dict mapping filename to stat-like object (either
1030 Return a dict mapping filename to stat-like object (either
1031 mercurial.osutil.stat instance or return value of os.stat()).
1031 mercurial.osutil.stat instance or return value of os.stat()).
1032
1032
1033 '''
1033 '''
1034 # full is a flag that extensions that hook into walk can use -- this
1034 # full is a flag that extensions that hook into walk can use -- this
1035 # implementation doesn't use it at all. This satisfies the contract
1035 # implementation doesn't use it at all. This satisfies the contract
1036 # because we only guarantee a "maybe".
1036 # because we only guarantee a "maybe".
1037
1037
1038 if ignored:
1038 if ignored:
1039 ignore = util.never
1039 ignore = util.never
1040 dirignore = util.never
1040 dirignore = util.never
1041 elif unknown:
1041 elif unknown:
1042 ignore = self._ignore
1042 ignore = self._ignore
1043 dirignore = self._dirignore
1043 dirignore = self._dirignore
1044 else:
1044 else:
1045 # if not unknown and not ignored, drop dir recursion and step 2
1045 # if not unknown and not ignored, drop dir recursion and step 2
1046 ignore = util.always
1046 ignore = util.always
1047 dirignore = util.always
1047 dirignore = util.always
1048
1048
1049 matchfn = match.matchfn
1049 matchfn = match.matchfn
1050 matchalways = match.always()
1050 matchalways = match.always()
1051 matchtdir = match.traversedir
1051 matchtdir = match.traversedir
1052 dmap = self._map
1052 dmap = self._map
1053 listdir = util.listdir
1053 listdir = util.listdir
1054 lstat = os.lstat
1054 lstat = os.lstat
1055 dirkind = stat.S_IFDIR
1055 dirkind = stat.S_IFDIR
1056 regkind = stat.S_IFREG
1056 regkind = stat.S_IFREG
1057 lnkkind = stat.S_IFLNK
1057 lnkkind = stat.S_IFLNK
1058 join = self._join
1058 join = self._join
1059
1059
1060 exact = skipstep3 = False
1060 exact = skipstep3 = False
1061 if match.isexact(): # match.exact
1061 if match.isexact(): # match.exact
1062 exact = True
1062 exact = True
1063 dirignore = util.always # skip step 2
1063 dirignore = util.always # skip step 2
1064 elif match.prefix(): # match.match, no patterns
1064 elif match.prefix(): # match.match, no patterns
1065 skipstep3 = True
1065 skipstep3 = True
1066
1066
1067 if not exact and self._checkcase:
1067 if not exact and self._checkcase:
1068 normalize = self._normalize
1068 normalize = self._normalize
1069 normalizefile = self._normalizefile
1069 normalizefile = self._normalizefile
1070 skipstep3 = False
1070 skipstep3 = False
1071 else:
1071 else:
1072 normalize = self._normalize
1072 normalize = self._normalize
1073 normalizefile = None
1073 normalizefile = None
1074
1074
1075 # step 1: find all explicit files
1075 # step 1: find all explicit files
1076 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1076 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1077
1077
1078 skipstep3 = skipstep3 and not (work or dirsnotfound)
1078 skipstep3 = skipstep3 and not (work or dirsnotfound)
1079 work = [d for d in work if not dirignore(d[0])]
1079 work = [d for d in work if not dirignore(d[0])]
1080
1080
1081 # step 2: visit subdirectories
1081 # step 2: visit subdirectories
1082 def traverse(work, alreadynormed):
1082 def traverse(work, alreadynormed):
1083 wadd = work.append
1083 wadd = work.append
1084 while work:
1084 while work:
1085 nd = work.pop()
1085 nd = work.pop()
1086 if not match.visitdir(nd):
1086 if not match.visitdir(nd):
1087 continue
1087 continue
1088 skip = None
1088 skip = None
1089 if nd == '.':
1089 if nd == '.':
1090 nd = ''
1090 nd = ''
1091 else:
1091 else:
1092 skip = '.hg'
1092 skip = '.hg'
1093 try:
1093 try:
1094 entries = listdir(join(nd), stat=True, skip=skip)
1094 entries = listdir(join(nd), stat=True, skip=skip)
1095 except OSError as inst:
1095 except OSError as inst:
1096 if inst.errno in (errno.EACCES, errno.ENOENT):
1096 if inst.errno in (errno.EACCES, errno.ENOENT):
1097 match.bad(self.pathto(nd), inst.strerror)
1097 match.bad(self.pathto(nd), inst.strerror)
1098 continue
1098 continue
1099 raise
1099 raise
1100 for f, kind, st in entries:
1100 for f, kind, st in entries:
1101 if normalizefile:
1101 if normalizefile:
1102 # even though f might be a directory, we're only
1102 # even though f might be a directory, we're only
1103 # interested in comparing it to files currently in the
1103 # interested in comparing it to files currently in the
1104 # dmap -- therefore normalizefile is enough
1104 # dmap -- therefore normalizefile is enough
1105 nf = normalizefile(nd and (nd + "/" + f) or f, True,
1105 nf = normalizefile(nd and (nd + "/" + f) or f, True,
1106 True)
1106 True)
1107 else:
1107 else:
1108 nf = nd and (nd + "/" + f) or f
1108 nf = nd and (nd + "/" + f) or f
1109 if nf not in results:
1109 if nf not in results:
1110 if kind == dirkind:
1110 if kind == dirkind:
1111 if not ignore(nf):
1111 if not ignore(nf):
1112 if matchtdir:
1112 if matchtdir:
1113 matchtdir(nf)
1113 matchtdir(nf)
1114 wadd(nf)
1114 wadd(nf)
1115 if nf in dmap and (matchalways or matchfn(nf)):
1115 if nf in dmap and (matchalways or matchfn(nf)):
1116 results[nf] = None
1116 results[nf] = None
1117 elif kind == regkind or kind == lnkkind:
1117 elif kind == regkind or kind == lnkkind:
1118 if nf in dmap:
1118 if nf in dmap:
1119 if matchalways or matchfn(nf):
1119 if matchalways or matchfn(nf):
1120 results[nf] = st
1120 results[nf] = st
1121 elif ((matchalways or matchfn(nf))
1121 elif ((matchalways or matchfn(nf))
1122 and not ignore(nf)):
1122 and not ignore(nf)):
1123 # unknown file -- normalize if necessary
1123 # unknown file -- normalize if necessary
1124 if not alreadynormed:
1124 if not alreadynormed:
1125 nf = normalize(nf, False, True)
1125 nf = normalize(nf, False, True)
1126 results[nf] = st
1126 results[nf] = st
1127 elif nf in dmap and (matchalways or matchfn(nf)):
1127 elif nf in dmap and (matchalways or matchfn(nf)):
1128 results[nf] = None
1128 results[nf] = None
1129
1129
1130 for nd, d in work:
1130 for nd, d in work:
1131 # alreadynormed means that processwork doesn't have to do any
1131 # alreadynormed means that processwork doesn't have to do any
1132 # expensive directory normalization
1132 # expensive directory normalization
1133 alreadynormed = not normalize or nd == d
1133 alreadynormed = not normalize or nd == d
1134 traverse([d], alreadynormed)
1134 traverse([d], alreadynormed)
1135
1135
1136 for s in subrepos:
1136 for s in subrepos:
1137 del results[s]
1137 del results[s]
1138 del results['.hg']
1138 del results['.hg']
1139
1139
1140 # step 3: visit remaining files from dmap
1140 # step 3: visit remaining files from dmap
1141 if not skipstep3 and not exact:
1141 if not skipstep3 and not exact:
1142 # If a dmap file is not in results yet, it was either
1142 # If a dmap file is not in results yet, it was either
1143 # a) not matching matchfn b) ignored, c) missing, or d) under a
1143 # a) not matching matchfn b) ignored, c) missing, or d) under a
1144 # symlink directory.
1144 # symlink directory.
1145 if not results and matchalways:
1145 if not results and matchalways:
1146 visit = [f for f in dmap]
1146 visit = [f for f in dmap]
1147 else:
1147 else:
1148 visit = [f for f in dmap if f not in results and matchfn(f)]
1148 visit = [f for f in dmap if f not in results and matchfn(f)]
1149 visit.sort()
1149 visit.sort()
1150
1150
1151 if unknown:
1151 if unknown:
1152 # unknown == True means we walked all dirs under the roots
1152 # unknown == True means we walked all dirs under the roots
1153 # that wasn't ignored, and everything that matched was stat'ed
1153 # that wasn't ignored, and everything that matched was stat'ed
1154 # and is already in results.
1154 # and is already in results.
1155 # The rest must thus be ignored or under a symlink.
1155 # The rest must thus be ignored or under a symlink.
1156 audit_path = pathutil.pathauditor(self._root)
1156 audit_path = pathutil.pathauditor(self._root)
1157
1157
1158 for nf in iter(visit):
1158 for nf in iter(visit):
1159 # If a stat for the same file was already added with a
1159 # If a stat for the same file was already added with a
1160 # different case, don't add one for this, since that would
1160 # different case, don't add one for this, since that would
1161 # make it appear as if the file exists under both names
1161 # make it appear as if the file exists under both names
1162 # on disk.
1162 # on disk.
1163 if (normalizefile and
1163 if (normalizefile and
1164 normalizefile(nf, True, True) in results):
1164 normalizefile(nf, True, True) in results):
1165 results[nf] = None
1165 results[nf] = None
1166 # Report ignored items in the dmap as long as they are not
1166 # Report ignored items in the dmap as long as they are not
1167 # under a symlink directory.
1167 # under a symlink directory.
1168 elif audit_path.check(nf):
1168 elif audit_path.check(nf):
1169 try:
1169 try:
1170 results[nf] = lstat(join(nf))
1170 results[nf] = lstat(join(nf))
1171 # file was just ignored, no links, and exists
1171 # file was just ignored, no links, and exists
1172 except OSError:
1172 except OSError:
1173 # file doesn't exist
1173 # file doesn't exist
1174 results[nf] = None
1174 results[nf] = None
1175 else:
1175 else:
1176 # It's either missing or under a symlink directory
1176 # It's either missing or under a symlink directory
1177 # which we in this case report as missing
1177 # which we in this case report as missing
1178 results[nf] = None
1178 results[nf] = None
1179 else:
1179 else:
1180 # We may not have walked the full directory tree above,
1180 # We may not have walked the full directory tree above,
1181 # so stat and check everything we missed.
1181 # so stat and check everything we missed.
1182 iv = iter(visit)
1182 iv = iter(visit)
1183 for st in util.statfiles([join(i) for i in visit]):
1183 for st in util.statfiles([join(i) for i in visit]):
1184 results[next(iv)] = st
1184 results[next(iv)] = st
1185 return results
1185 return results
1186
1186
1187 def status(self, match, subrepos, ignored, clean, unknown):
1187 def status(self, match, subrepos, ignored, clean, unknown):
1188 '''Determine the status of the working copy relative to the
1188 '''Determine the status of the working copy relative to the
1189 dirstate and return a pair of (unsure, status), where status is of type
1189 dirstate and return a pair of (unsure, status), where status is of type
1190 scmutil.status and:
1190 scmutil.status and:
1191
1191
1192 unsure:
1192 unsure:
1193 files that might have been modified since the dirstate was
1193 files that might have been modified since the dirstate was
1194 written, but need to be read to be sure (size is the same
1194 written, but need to be read to be sure (size is the same
1195 but mtime differs)
1195 but mtime differs)
1196 status.modified:
1196 status.modified:
1197 files that have definitely been modified since the dirstate
1197 files that have definitely been modified since the dirstate
1198 was written (different size or mode)
1198 was written (different size or mode)
1199 status.clean:
1199 status.clean:
1200 files that have definitely not been modified since the
1200 files that have definitely not been modified since the
1201 dirstate was written
1201 dirstate was written
1202 '''
1202 '''
1203 listignored, listclean, listunknown = ignored, clean, unknown
1203 listignored, listclean, listunknown = ignored, clean, unknown
1204 lookup, modified, added, unknown, ignored = [], [], [], [], []
1204 lookup, modified, added, unknown, ignored = [], [], [], [], []
1205 removed, deleted, clean = [], [], []
1205 removed, deleted, clean = [], [], []
1206
1206
1207 dmap = self._map
1207 dmap = self._map
1208 ladd = lookup.append # aka "unsure"
1208 ladd = lookup.append # aka "unsure"
1209 madd = modified.append
1209 madd = modified.append
1210 aadd = added.append
1210 aadd = added.append
1211 uadd = unknown.append
1211 uadd = unknown.append
1212 iadd = ignored.append
1212 iadd = ignored.append
1213 radd = removed.append
1213 radd = removed.append
1214 dadd = deleted.append
1214 dadd = deleted.append
1215 cadd = clean.append
1215 cadd = clean.append
1216 mexact = match.exact
1216 mexact = match.exact
1217 dirignore = self._dirignore
1217 dirignore = self._dirignore
1218 checkexec = self._checkexec
1218 checkexec = self._checkexec
1219 copymap = self._copymap
1219 copymap = self._copymap
1220 lastnormaltime = self._lastnormaltime
1220 lastnormaltime = self._lastnormaltime
1221
1221
1222 # We need to do full walks when either
1222 # We need to do full walks when either
1223 # - we're listing all clean files, or
1223 # - we're listing all clean files, or
1224 # - match.traversedir does something, because match.traversedir should
1224 # - match.traversedir does something, because match.traversedir should
1225 # be called for every dir in the working dir
1225 # be called for every dir in the working dir
1226 full = listclean or match.traversedir is not None
1226 full = listclean or match.traversedir is not None
1227 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1227 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1228 full=full).iteritems():
1228 full=full).iteritems():
1229 if fn not in dmap:
1229 if fn not in dmap:
1230 if (listignored or mexact(fn)) and dirignore(fn):
1230 if (listignored or mexact(fn)) and dirignore(fn):
1231 if listignored:
1231 if listignored:
1232 iadd(fn)
1232 iadd(fn)
1233 else:
1233 else:
1234 uadd(fn)
1234 uadd(fn)
1235 continue
1235 continue
1236
1236
1237 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1237 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1238 # written like that for performance reasons. dmap[fn] is not a
1238 # written like that for performance reasons. dmap[fn] is not a
1239 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1239 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1240 # opcode has fast paths when the value to be unpacked is a tuple or
1240 # opcode has fast paths when the value to be unpacked is a tuple or
1241 # a list, but falls back to creating a full-fledged iterator in
1241 # a list, but falls back to creating a full-fledged iterator in
1242 # general. That is much slower than simply accessing and storing the
1242 # general. That is much slower than simply accessing and storing the
1243 # tuple members one by one.
1243 # tuple members one by one.
1244 t = dmap[fn]
1244 t = dmap[fn]
1245 state = t[0]
1245 state = t[0]
1246 mode = t[1]
1246 mode = t[1]
1247 size = t[2]
1247 size = t[2]
1248 time = t[3]
1248 time = t[3]
1249
1249
1250 if not st and state in "nma":
1250 if not st and state in "nma":
1251 dadd(fn)
1251 dadd(fn)
1252 elif state == 'n':
1252 elif state == 'n':
1253 if (size >= 0 and
1253 if (size >= 0 and
1254 ((size != st.st_size and size != st.st_size & _rangemask)
1254 ((size != st.st_size and size != st.st_size & _rangemask)
1255 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1255 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1256 or size == -2 # other parent
1256 or size == -2 # other parent
1257 or fn in copymap):
1257 or fn in copymap):
1258 madd(fn)
1258 madd(fn)
1259 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1259 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1260 ladd(fn)
1260 ladd(fn)
1261 elif st.st_mtime == lastnormaltime:
1261 elif st.st_mtime == lastnormaltime:
1262 # fn may have just been marked as normal and it may have
1262 # fn may have just been marked as normal and it may have
1263 # changed in the same second without changing its size.
1263 # changed in the same second without changing its size.
1264 # This can happen if we quickly do multiple commits.
1264 # This can happen if we quickly do multiple commits.
1265 # Force lookup, so we don't miss such a racy file change.
1265 # Force lookup, so we don't miss such a racy file change.
1266 ladd(fn)
1266 ladd(fn)
1267 elif listclean:
1267 elif listclean:
1268 cadd(fn)
1268 cadd(fn)
1269 elif state == 'm':
1269 elif state == 'm':
1270 madd(fn)
1270 madd(fn)
1271 elif state == 'a':
1271 elif state == 'a':
1272 aadd(fn)
1272 aadd(fn)
1273 elif state == 'r':
1273 elif state == 'r':
1274 radd(fn)
1274 radd(fn)
1275
1275
1276 return (lookup, scmutil.status(modified, added, removed, deleted,
1276 return (lookup, scmutil.status(modified, added, removed, deleted,
1277 unknown, ignored, clean))
1277 unknown, ignored, clean))
1278
1278
1279 def matches(self, match):
1279 def matches(self, match):
1280 '''
1280 '''
1281 return files in the dirstate (in whatever state) filtered by match
1281 return files in the dirstate (in whatever state) filtered by match
1282 '''
1282 '''
1283 dmap = self._map
1283 dmap = self._map
1284 if match.always():
1284 if match.always():
1285 return dmap.keys()
1285 return dmap.keys()
1286 files = match.files()
1286 files = match.files()
1287 if match.isexact():
1287 if match.isexact():
1288 # fast path -- filter the other way around, since typically files is
1288 # fast path -- filter the other way around, since typically files is
1289 # much smaller than dmap
1289 # much smaller than dmap
1290 return [f for f in files if f in dmap]
1290 return [f for f in files if f in dmap]
1291 if match.prefix() and all(fn in dmap for fn in files):
1291 if match.prefix() and all(fn in dmap for fn in files):
1292 # fast path -- all the values are known to be files, so just return
1292 # fast path -- all the values are known to be files, so just return
1293 # that
1293 # that
1294 return list(files)
1294 return list(files)
1295 return [f for f in dmap if match(f)]
1295 return [f for f in dmap if match(f)]
1296
1296
1297 def _actualfilename(self, tr):
1297 def _actualfilename(self, tr):
1298 if tr:
1298 if tr:
1299 return self._pendingfilename
1299 return self._pendingfilename
1300 else:
1300 else:
1301 return self._filename
1301 return self._filename
1302
1302
1303 def savebackup(self, tr, suffix='', prefix=''):
1303 def savebackup(self, tr, backupname):
1304 '''Save current dirstate into backup file with suffix'''
1304 '''Save current dirstate into backup file'''
1305 assert len(suffix) > 0 or len(prefix) > 0
1306 filename = self._actualfilename(tr)
1305 filename = self._actualfilename(tr)
1306 assert backupname != filename
1307
1307
1308 # use '_writedirstate' instead of 'write' to write changes certainly,
1308 # use '_writedirstate' instead of 'write' to write changes certainly,
1309 # because the latter omits writing out if transaction is running.
1309 # because the latter omits writing out if transaction is running.
1310 # output file will be used to create backup of dirstate at this point.
1310 # output file will be used to create backup of dirstate at this point.
1311 if self._dirty or not self._opener.exists(filename):
1311 if self._dirty or not self._opener.exists(filename):
1312 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1312 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1313 checkambig=True))
1313 checkambig=True))
1314
1314
1315 if tr:
1315 if tr:
1316 # ensure that subsequent tr.writepending returns True for
1316 # ensure that subsequent tr.writepending returns True for
1317 # changes written out above, even if dirstate is never
1317 # changes written out above, even if dirstate is never
1318 # changed after this
1318 # changed after this
1319 tr.addfilegenerator('dirstate', (self._filename,),
1319 tr.addfilegenerator('dirstate', (self._filename,),
1320 self._writedirstate, location='plain')
1320 self._writedirstate, location='plain')
1321
1321
1322 # ensure that pending file written above is unlinked at
1322 # ensure that pending file written above is unlinked at
1323 # failure, even if tr.writepending isn't invoked until the
1323 # failure, even if tr.writepending isn't invoked until the
1324 # end of this transaction
1324 # end of this transaction
1325 tr.registertmp(filename, location='plain')
1325 tr.registertmp(filename, location='plain')
1326
1326
1327 backupname = prefix + self._filename + suffix
1328 assert backupname != filename
1329 self._opener.tryunlink(backupname)
1327 self._opener.tryunlink(backupname)
1330 # hardlink backup is okay because _writedirstate is always called
1328 # hardlink backup is okay because _writedirstate is always called
1331 # with an "atomictemp=True" file.
1329 # with an "atomictemp=True" file.
1332 util.copyfile(self._opener.join(filename),
1330 util.copyfile(self._opener.join(filename),
1333 self._opener.join(backupname), hardlink=True)
1331 self._opener.join(backupname), hardlink=True)
1334
1332
1335 def restorebackup(self, tr, suffix='', prefix=''):
1333 def restorebackup(self, tr, backupname):
1336 '''Restore dirstate by backup file with suffix'''
1334 '''Restore dirstate by backup file'''
1337 assert len(suffix) > 0 or len(prefix) > 0
1338 # this "invalidate()" prevents "wlock.release()" from writing
1335 # this "invalidate()" prevents "wlock.release()" from writing
1339 # changes of dirstate out after restoring from backup file
1336 # changes of dirstate out after restoring from backup file
1340 self.invalidate()
1337 self.invalidate()
1341 filename = self._actualfilename(tr)
1338 filename = self._actualfilename(tr)
1342 # using self._filename to avoid having "pending" in the backup filename
1339 self._opener.rename(backupname, filename, checkambig=True)
1343 self._opener.rename(prefix + self._filename + suffix, filename,
1344 checkambig=True)
1345
1340
1346 def clearbackup(self, tr, suffix='', prefix=''):
1341 def clearbackup(self, tr, backupname):
1347 '''Clear backup file with suffix'''
1342 '''Clear backup file'''
1348 assert len(suffix) > 0 or len(prefix) > 0
1343 self._opener.unlink(backupname)
1349 # using self._filename to avoid having "pending" in the backup filename
1350 self._opener.unlink(prefix + self._filename + suffix)
@@ -1,69 +1,68 b''
1 # dirstateguard.py - class to allow restoring dirstate after failure
1 # dirstateguard.py - class to allow restoring dirstate after failure
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11
11
12 from . import (
12 from . import (
13 error,
13 error,
14 )
14 )
15
15
16 class dirstateguard(object):
16 class dirstateguard(object):
17 '''Restore dirstate at unexpected failure.
17 '''Restore dirstate at unexpected failure.
18
18
19 At the construction, this class does:
19 At the construction, this class does:
20
20
21 - write current ``repo.dirstate`` out, and
21 - write current ``repo.dirstate`` out, and
22 - save ``.hg/dirstate`` into the backup file
22 - save ``.hg/dirstate`` into the backup file
23
23
24 This restores ``.hg/dirstate`` from backup file, if ``release()``
24 This restores ``.hg/dirstate`` from backup file, if ``release()``
25 is invoked before ``close()``.
25 is invoked before ``close()``.
26
26
27 This just removes the backup file at ``close()`` before ``release()``.
27 This just removes the backup file at ``close()`` before ``release()``.
28 '''
28 '''
29
29
30 def __init__(self, repo, name):
30 def __init__(self, repo, name):
31 self._repo = repo
31 self._repo = repo
32 self._active = False
32 self._active = False
33 self._closed = False
33 self._closed = False
34 self._suffix = '.backup.%s.%d' % (name, id(self))
34 self._backupname = 'dirstate.backup.%s.%d' % (name, id(self))
35 repo.dirstate.savebackup(repo.currenttransaction(), self._suffix)
35 repo.dirstate.savebackup(repo.currenttransaction(), self._backupname)
36 self._active = True
36 self._active = True
37
37
38 def __del__(self):
38 def __del__(self):
39 if self._active: # still active
39 if self._active: # still active
40 # this may occur, even if this class is used correctly:
40 # this may occur, even if this class is used correctly:
41 # for example, releasing other resources like transaction
41 # for example, releasing other resources like transaction
42 # may raise exception before ``dirstateguard.release`` in
42 # may raise exception before ``dirstateguard.release`` in
43 # ``release(tr, ....)``.
43 # ``release(tr, ....)``.
44 self._abort()
44 self._abort()
45
45
46 def close(self):
46 def close(self):
47 if not self._active: # already inactivated
47 if not self._active: # already inactivated
48 msg = (_("can't close already inactivated backup: dirstate%s")
48 msg = (_("can't close already inactivated backup: %s")
49 % self._suffix)
49 % self._backupname)
50 raise error.Abort(msg)
50 raise error.Abort(msg)
51
51
52 self._repo.dirstate.clearbackup(self._repo.currenttransaction(),
52 self._repo.dirstate.clearbackup(self._repo.currenttransaction(),
53 self._suffix)
53 self._backupname)
54 self._active = False
54 self._active = False
55 self._closed = True
55 self._closed = True
56
56
57 def _abort(self):
57 def _abort(self):
58 self._repo.dirstate.restorebackup(self._repo.currenttransaction(),
58 self._repo.dirstate.restorebackup(self._repo.currenttransaction(),
59 self._suffix)
59 self._backupname)
60 self._active = False
60 self._active = False
61
61
62 def release(self):
62 def release(self):
63 if not self._closed:
63 if not self._closed:
64 if not self._active: # already inactivated
64 if not self._active: # already inactivated
65 msg = (_("can't release already inactivated backup:"
65 msg = (_("can't release already inactivated backup: %s")
66 " dirstate%s")
66 % self._backupname)
67 % self._suffix)
68 raise error.Abort(msg)
67 raise error.Abort(msg)
69 self._abort()
68 self._abort()
@@ -1,2246 +1,2246 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 color,
30 color,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 encoding,
34 encoding,
35 error,
35 error,
36 exchange,
36 exchange,
37 extensions,
37 extensions,
38 filelog,
38 filelog,
39 hook,
39 hook,
40 lock as lockmod,
40 lock as lockmod,
41 manifest,
41 manifest,
42 match as matchmod,
42 match as matchmod,
43 merge as mergemod,
43 merge as mergemod,
44 mergeutil,
44 mergeutil,
45 namespaces,
45 namespaces,
46 obsolete,
46 obsolete,
47 pathutil,
47 pathutil,
48 peer,
48 peer,
49 phases,
49 phases,
50 pushkey,
50 pushkey,
51 pycompat,
51 pycompat,
52 repoview,
52 repoview,
53 revset,
53 revset,
54 revsetlang,
54 revsetlang,
55 scmutil,
55 scmutil,
56 sparse,
56 sparse,
57 store,
57 store,
58 subrepo,
58 subrepo,
59 tags as tagsmod,
59 tags as tagsmod,
60 transaction,
60 transaction,
61 txnutil,
61 txnutil,
62 util,
62 util,
63 vfs as vfsmod,
63 vfs as vfsmod,
64 )
64 )
65
65
66 release = lockmod.release
66 release = lockmod.release
67 urlerr = util.urlerr
67 urlerr = util.urlerr
68 urlreq = util.urlreq
68 urlreq = util.urlreq
69
69
70 # set of (path, vfs-location) tuples. vfs-location is:
70 # set of (path, vfs-location) tuples. vfs-location is:
71 # - 'plain for vfs relative paths
71 # - 'plain for vfs relative paths
72 # - '' for svfs relative paths
72 # - '' for svfs relative paths
73 _cachedfiles = set()
73 _cachedfiles = set()
74
74
75 class _basefilecache(scmutil.filecache):
75 class _basefilecache(scmutil.filecache):
76 """All filecache usage on repo are done for logic that should be unfiltered
76 """All filecache usage on repo are done for logic that should be unfiltered
77 """
77 """
78 def __get__(self, repo, type=None):
78 def __get__(self, repo, type=None):
79 if repo is None:
79 if repo is None:
80 return self
80 return self
81 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
81 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
82 def __set__(self, repo, value):
82 def __set__(self, repo, value):
83 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
83 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
84 def __delete__(self, repo):
84 def __delete__(self, repo):
85 return super(_basefilecache, self).__delete__(repo.unfiltered())
85 return super(_basefilecache, self).__delete__(repo.unfiltered())
86
86
87 class repofilecache(_basefilecache):
87 class repofilecache(_basefilecache):
88 """filecache for files in .hg but outside of .hg/store"""
88 """filecache for files in .hg but outside of .hg/store"""
89 def __init__(self, *paths):
89 def __init__(self, *paths):
90 super(repofilecache, self).__init__(*paths)
90 super(repofilecache, self).__init__(*paths)
91 for path in paths:
91 for path in paths:
92 _cachedfiles.add((path, 'plain'))
92 _cachedfiles.add((path, 'plain'))
93
93
94 def join(self, obj, fname):
94 def join(self, obj, fname):
95 return obj.vfs.join(fname)
95 return obj.vfs.join(fname)
96
96
97 class storecache(_basefilecache):
97 class storecache(_basefilecache):
98 """filecache for files in the store"""
98 """filecache for files in the store"""
99 def __init__(self, *paths):
99 def __init__(self, *paths):
100 super(storecache, self).__init__(*paths)
100 super(storecache, self).__init__(*paths)
101 for path in paths:
101 for path in paths:
102 _cachedfiles.add((path, ''))
102 _cachedfiles.add((path, ''))
103
103
104 def join(self, obj, fname):
104 def join(self, obj, fname):
105 return obj.sjoin(fname)
105 return obj.sjoin(fname)
106
106
107 def isfilecached(repo, name):
107 def isfilecached(repo, name):
108 """check if a repo has already cached "name" filecache-ed property
108 """check if a repo has already cached "name" filecache-ed property
109
109
110 This returns (cachedobj-or-None, iscached) tuple.
110 This returns (cachedobj-or-None, iscached) tuple.
111 """
111 """
112 cacheentry = repo.unfiltered()._filecache.get(name, None)
112 cacheentry = repo.unfiltered()._filecache.get(name, None)
113 if not cacheentry:
113 if not cacheentry:
114 return None, False
114 return None, False
115 return cacheentry.obj, True
115 return cacheentry.obj, True
116
116
117 class unfilteredpropertycache(util.propertycache):
117 class unfilteredpropertycache(util.propertycache):
118 """propertycache that apply to unfiltered repo only"""
118 """propertycache that apply to unfiltered repo only"""
119
119
120 def __get__(self, repo, type=None):
120 def __get__(self, repo, type=None):
121 unfi = repo.unfiltered()
121 unfi = repo.unfiltered()
122 if unfi is repo:
122 if unfi is repo:
123 return super(unfilteredpropertycache, self).__get__(unfi)
123 return super(unfilteredpropertycache, self).__get__(unfi)
124 return getattr(unfi, self.name)
124 return getattr(unfi, self.name)
125
125
126 class filteredpropertycache(util.propertycache):
126 class filteredpropertycache(util.propertycache):
127 """propertycache that must take filtering in account"""
127 """propertycache that must take filtering in account"""
128
128
129 def cachevalue(self, obj, value):
129 def cachevalue(self, obj, value):
130 object.__setattr__(obj, self.name, value)
130 object.__setattr__(obj, self.name, value)
131
131
132
132
133 def hasunfilteredcache(repo, name):
133 def hasunfilteredcache(repo, name):
134 """check if a repo has an unfilteredpropertycache value for <name>"""
134 """check if a repo has an unfilteredpropertycache value for <name>"""
135 return name in vars(repo.unfiltered())
135 return name in vars(repo.unfiltered())
136
136
137 def unfilteredmethod(orig):
137 def unfilteredmethod(orig):
138 """decorate method that always need to be run on unfiltered version"""
138 """decorate method that always need to be run on unfiltered version"""
139 def wrapper(repo, *args, **kwargs):
139 def wrapper(repo, *args, **kwargs):
140 return orig(repo.unfiltered(), *args, **kwargs)
140 return orig(repo.unfiltered(), *args, **kwargs)
141 return wrapper
141 return wrapper
142
142
143 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
143 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
144 'unbundle'}
144 'unbundle'}
145 legacycaps = moderncaps.union({'changegroupsubset'})
145 legacycaps = moderncaps.union({'changegroupsubset'})
146
146
147 class localpeer(peer.peerrepository):
147 class localpeer(peer.peerrepository):
148 '''peer for a local repo; reflects only the most recent API'''
148 '''peer for a local repo; reflects only the most recent API'''
149
149
150 def __init__(self, repo, caps=None):
150 def __init__(self, repo, caps=None):
151 if caps is None:
151 if caps is None:
152 caps = moderncaps.copy()
152 caps = moderncaps.copy()
153 peer.peerrepository.__init__(self)
153 peer.peerrepository.__init__(self)
154 self._repo = repo.filtered('served')
154 self._repo = repo.filtered('served')
155 self.ui = repo.ui
155 self.ui = repo.ui
156 self._caps = repo._restrictcapabilities(caps)
156 self._caps = repo._restrictcapabilities(caps)
157 self.requirements = repo.requirements
157 self.requirements = repo.requirements
158 self.supportedformats = repo.supportedformats
158 self.supportedformats = repo.supportedformats
159
159
160 def close(self):
160 def close(self):
161 self._repo.close()
161 self._repo.close()
162
162
163 def _capabilities(self):
163 def _capabilities(self):
164 return self._caps
164 return self._caps
165
165
166 def local(self):
166 def local(self):
167 return self._repo
167 return self._repo
168
168
169 def canpush(self):
169 def canpush(self):
170 return True
170 return True
171
171
172 def url(self):
172 def url(self):
173 return self._repo.url()
173 return self._repo.url()
174
174
175 def lookup(self, key):
175 def lookup(self, key):
176 return self._repo.lookup(key)
176 return self._repo.lookup(key)
177
177
178 def branchmap(self):
178 def branchmap(self):
179 return self._repo.branchmap()
179 return self._repo.branchmap()
180
180
181 def heads(self):
181 def heads(self):
182 return self._repo.heads()
182 return self._repo.heads()
183
183
184 def known(self, nodes):
184 def known(self, nodes):
185 return self._repo.known(nodes)
185 return self._repo.known(nodes)
186
186
187 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
187 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
188 **kwargs):
188 **kwargs):
189 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
189 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
190 common=common, bundlecaps=bundlecaps,
190 common=common, bundlecaps=bundlecaps,
191 **kwargs)
191 **kwargs)
192 cb = util.chunkbuffer(chunks)
192 cb = util.chunkbuffer(chunks)
193
193
194 if exchange.bundle2requested(bundlecaps):
194 if exchange.bundle2requested(bundlecaps):
195 # When requesting a bundle2, getbundle returns a stream to make the
195 # When requesting a bundle2, getbundle returns a stream to make the
196 # wire level function happier. We need to build a proper object
196 # wire level function happier. We need to build a proper object
197 # from it in local peer.
197 # from it in local peer.
198 return bundle2.getunbundler(self.ui, cb)
198 return bundle2.getunbundler(self.ui, cb)
199 else:
199 else:
200 return changegroup.getunbundler('01', cb, None)
200 return changegroup.getunbundler('01', cb, None)
201
201
202 # TODO We might want to move the next two calls into legacypeer and add
202 # TODO We might want to move the next two calls into legacypeer and add
203 # unbundle instead.
203 # unbundle instead.
204
204
205 def unbundle(self, cg, heads, url):
205 def unbundle(self, cg, heads, url):
206 """apply a bundle on a repo
206 """apply a bundle on a repo
207
207
208 This function handles the repo locking itself."""
208 This function handles the repo locking itself."""
209 try:
209 try:
210 try:
210 try:
211 cg = exchange.readbundle(self.ui, cg, None)
211 cg = exchange.readbundle(self.ui, cg, None)
212 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
212 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
213 if util.safehasattr(ret, 'getchunks'):
213 if util.safehasattr(ret, 'getchunks'):
214 # This is a bundle20 object, turn it into an unbundler.
214 # This is a bundle20 object, turn it into an unbundler.
215 # This little dance should be dropped eventually when the
215 # This little dance should be dropped eventually when the
216 # API is finally improved.
216 # API is finally improved.
217 stream = util.chunkbuffer(ret.getchunks())
217 stream = util.chunkbuffer(ret.getchunks())
218 ret = bundle2.getunbundler(self.ui, stream)
218 ret = bundle2.getunbundler(self.ui, stream)
219 return ret
219 return ret
220 except Exception as exc:
220 except Exception as exc:
221 # If the exception contains output salvaged from a bundle2
221 # If the exception contains output salvaged from a bundle2
222 # reply, we need to make sure it is printed before continuing
222 # reply, we need to make sure it is printed before continuing
223 # to fail. So we build a bundle2 with such output and consume
223 # to fail. So we build a bundle2 with such output and consume
224 # it directly.
224 # it directly.
225 #
225 #
226 # This is not very elegant but allows a "simple" solution for
226 # This is not very elegant but allows a "simple" solution for
227 # issue4594
227 # issue4594
228 output = getattr(exc, '_bundle2salvagedoutput', ())
228 output = getattr(exc, '_bundle2salvagedoutput', ())
229 if output:
229 if output:
230 bundler = bundle2.bundle20(self._repo.ui)
230 bundler = bundle2.bundle20(self._repo.ui)
231 for out in output:
231 for out in output:
232 bundler.addpart(out)
232 bundler.addpart(out)
233 stream = util.chunkbuffer(bundler.getchunks())
233 stream = util.chunkbuffer(bundler.getchunks())
234 b = bundle2.getunbundler(self.ui, stream)
234 b = bundle2.getunbundler(self.ui, stream)
235 bundle2.processbundle(self._repo, b)
235 bundle2.processbundle(self._repo, b)
236 raise
236 raise
237 except error.PushRaced as exc:
237 except error.PushRaced as exc:
238 raise error.ResponseError(_('push failed:'), str(exc))
238 raise error.ResponseError(_('push failed:'), str(exc))
239
239
240 def lock(self):
240 def lock(self):
241 return self._repo.lock()
241 return self._repo.lock()
242
242
243 def pushkey(self, namespace, key, old, new):
243 def pushkey(self, namespace, key, old, new):
244 return self._repo.pushkey(namespace, key, old, new)
244 return self._repo.pushkey(namespace, key, old, new)
245
245
246 def listkeys(self, namespace):
246 def listkeys(self, namespace):
247 return self._repo.listkeys(namespace)
247 return self._repo.listkeys(namespace)
248
248
249 def debugwireargs(self, one, two, three=None, four=None, five=None):
249 def debugwireargs(self, one, two, three=None, four=None, five=None):
250 '''used to test argument passing over the wire'''
250 '''used to test argument passing over the wire'''
251 return "%s %s %s %s %s" % (one, two, three, four, five)
251 return "%s %s %s %s %s" % (one, two, three, four, five)
252
252
253 class locallegacypeer(localpeer):
253 class locallegacypeer(localpeer):
254 '''peer extension which implements legacy methods too; used for tests with
254 '''peer extension which implements legacy methods too; used for tests with
255 restricted capabilities'''
255 restricted capabilities'''
256
256
257 def __init__(self, repo):
257 def __init__(self, repo):
258 localpeer.__init__(self, repo, caps=legacycaps)
258 localpeer.__init__(self, repo, caps=legacycaps)
259
259
260 def branches(self, nodes):
260 def branches(self, nodes):
261 return self._repo.branches(nodes)
261 return self._repo.branches(nodes)
262
262
263 def between(self, pairs):
263 def between(self, pairs):
264 return self._repo.between(pairs)
264 return self._repo.between(pairs)
265
265
266 def changegroup(self, basenodes, source):
266 def changegroup(self, basenodes, source):
267 return changegroup.changegroup(self._repo, basenodes, source)
267 return changegroup.changegroup(self._repo, basenodes, source)
268
268
269 def changegroupsubset(self, bases, heads, source):
269 def changegroupsubset(self, bases, heads, source):
270 return changegroup.changegroupsubset(self._repo, bases, heads, source)
270 return changegroup.changegroupsubset(self._repo, bases, heads, source)
271
271
272 # Increment the sub-version when the revlog v2 format changes to lock out old
272 # Increment the sub-version when the revlog v2 format changes to lock out old
273 # clients.
273 # clients.
274 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
274 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
275
275
276 class localrepository(object):
276 class localrepository(object):
277
277
278 supportedformats = {
278 supportedformats = {
279 'revlogv1',
279 'revlogv1',
280 'generaldelta',
280 'generaldelta',
281 'treemanifest',
281 'treemanifest',
282 'manifestv2',
282 'manifestv2',
283 REVLOGV2_REQUIREMENT,
283 REVLOGV2_REQUIREMENT,
284 }
284 }
285 _basesupported = supportedformats | {
285 _basesupported = supportedformats | {
286 'store',
286 'store',
287 'fncache',
287 'fncache',
288 'shared',
288 'shared',
289 'relshared',
289 'relshared',
290 'dotencode',
290 'dotencode',
291 }
291 }
292 openerreqs = {
292 openerreqs = {
293 'revlogv1',
293 'revlogv1',
294 'generaldelta',
294 'generaldelta',
295 'treemanifest',
295 'treemanifest',
296 'manifestv2',
296 'manifestv2',
297 }
297 }
298
298
299 # a list of (ui, featureset) functions.
299 # a list of (ui, featureset) functions.
300 # only functions defined in module of enabled extensions are invoked
300 # only functions defined in module of enabled extensions are invoked
301 featuresetupfuncs = set()
301 featuresetupfuncs = set()
302
302
303 # list of prefix for file which can be written without 'wlock'
303 # list of prefix for file which can be written without 'wlock'
304 # Extensions should extend this list when needed
304 # Extensions should extend this list when needed
305 _wlockfreeprefix = {
305 _wlockfreeprefix = {
306 # We migh consider requiring 'wlock' for the next
306 # We migh consider requiring 'wlock' for the next
307 # two, but pretty much all the existing code assume
307 # two, but pretty much all the existing code assume
308 # wlock is not needed so we keep them excluded for
308 # wlock is not needed so we keep them excluded for
309 # now.
309 # now.
310 'hgrc',
310 'hgrc',
311 'requires',
311 'requires',
312 # XXX cache is a complicatged business someone
312 # XXX cache is a complicatged business someone
313 # should investigate this in depth at some point
313 # should investigate this in depth at some point
314 'cache/',
314 'cache/',
315 # XXX shouldn't be dirstate covered by the wlock?
315 # XXX shouldn't be dirstate covered by the wlock?
316 'dirstate',
316 'dirstate',
317 # XXX bisect was still a bit too messy at the time
317 # XXX bisect was still a bit too messy at the time
318 # this changeset was introduced. Someone should fix
318 # this changeset was introduced. Someone should fix
319 # the remainig bit and drop this line
319 # the remainig bit and drop this line
320 'bisect.state',
320 'bisect.state',
321 }
321 }
322
322
323 def __init__(self, baseui, path, create=False):
323 def __init__(self, baseui, path, create=False):
324 self.requirements = set()
324 self.requirements = set()
325 self.filtername = None
325 self.filtername = None
326 # wvfs: rooted at the repository root, used to access the working copy
326 # wvfs: rooted at the repository root, used to access the working copy
327 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
327 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
328 # vfs: rooted at .hg, used to access repo files outside of .hg/store
328 # vfs: rooted at .hg, used to access repo files outside of .hg/store
329 self.vfs = None
329 self.vfs = None
330 # svfs: usually rooted at .hg/store, used to access repository history
330 # svfs: usually rooted at .hg/store, used to access repository history
331 # If this is a shared repository, this vfs may point to another
331 # If this is a shared repository, this vfs may point to another
332 # repository's .hg/store directory.
332 # repository's .hg/store directory.
333 self.svfs = None
333 self.svfs = None
334 self.root = self.wvfs.base
334 self.root = self.wvfs.base
335 self.path = self.wvfs.join(".hg")
335 self.path = self.wvfs.join(".hg")
336 self.origroot = path
336 self.origroot = path
337 # These auditor are not used by the vfs,
337 # These auditor are not used by the vfs,
338 # only used when writing this comment: basectx.match
338 # only used when writing this comment: basectx.match
339 self.auditor = pathutil.pathauditor(self.root, self._checknested)
339 self.auditor = pathutil.pathauditor(self.root, self._checknested)
340 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
340 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
341 realfs=False)
341 realfs=False)
342 self.baseui = baseui
342 self.baseui = baseui
343 self.ui = baseui.copy()
343 self.ui = baseui.copy()
344 self.ui.copy = baseui.copy # prevent copying repo configuration
344 self.ui.copy = baseui.copy # prevent copying repo configuration
345 self.vfs = vfsmod.vfs(self.path)
345 self.vfs = vfsmod.vfs(self.path)
346 if (self.ui.configbool('devel', 'all-warnings') or
346 if (self.ui.configbool('devel', 'all-warnings') or
347 self.ui.configbool('devel', 'check-locks')):
347 self.ui.configbool('devel', 'check-locks')):
348 self.vfs.audit = self._getvfsward(self.vfs.audit)
348 self.vfs.audit = self._getvfsward(self.vfs.audit)
349 # A list of callback to shape the phase if no data were found.
349 # A list of callback to shape the phase if no data were found.
350 # Callback are in the form: func(repo, roots) --> processed root.
350 # Callback are in the form: func(repo, roots) --> processed root.
351 # This list it to be filled by extension during repo setup
351 # This list it to be filled by extension during repo setup
352 self._phasedefaults = []
352 self._phasedefaults = []
353 try:
353 try:
354 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
354 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
355 self._loadextensions()
355 self._loadextensions()
356 except IOError:
356 except IOError:
357 pass
357 pass
358
358
359 if self.featuresetupfuncs:
359 if self.featuresetupfuncs:
360 self.supported = set(self._basesupported) # use private copy
360 self.supported = set(self._basesupported) # use private copy
361 extmods = set(m.__name__ for n, m
361 extmods = set(m.__name__ for n, m
362 in extensions.extensions(self.ui))
362 in extensions.extensions(self.ui))
363 for setupfunc in self.featuresetupfuncs:
363 for setupfunc in self.featuresetupfuncs:
364 if setupfunc.__module__ in extmods:
364 if setupfunc.__module__ in extmods:
365 setupfunc(self.ui, self.supported)
365 setupfunc(self.ui, self.supported)
366 else:
366 else:
367 self.supported = self._basesupported
367 self.supported = self._basesupported
368 color.setup(self.ui)
368 color.setup(self.ui)
369
369
370 # Add compression engines.
370 # Add compression engines.
371 for name in util.compengines:
371 for name in util.compengines:
372 engine = util.compengines[name]
372 engine = util.compengines[name]
373 if engine.revlogheader():
373 if engine.revlogheader():
374 self.supported.add('exp-compression-%s' % name)
374 self.supported.add('exp-compression-%s' % name)
375
375
376 if not self.vfs.isdir():
376 if not self.vfs.isdir():
377 if create:
377 if create:
378 self.requirements = newreporequirements(self)
378 self.requirements = newreporequirements(self)
379
379
380 if not self.wvfs.exists():
380 if not self.wvfs.exists():
381 self.wvfs.makedirs()
381 self.wvfs.makedirs()
382 self.vfs.makedir(notindexed=True)
382 self.vfs.makedir(notindexed=True)
383
383
384 if 'store' in self.requirements:
384 if 'store' in self.requirements:
385 self.vfs.mkdir("store")
385 self.vfs.mkdir("store")
386
386
387 # create an invalid changelog
387 # create an invalid changelog
388 self.vfs.append(
388 self.vfs.append(
389 "00changelog.i",
389 "00changelog.i",
390 '\0\0\0\2' # represents revlogv2
390 '\0\0\0\2' # represents revlogv2
391 ' dummy changelog to prevent using the old repo layout'
391 ' dummy changelog to prevent using the old repo layout'
392 )
392 )
393 else:
393 else:
394 raise error.RepoError(_("repository %s not found") % path)
394 raise error.RepoError(_("repository %s not found") % path)
395 elif create:
395 elif create:
396 raise error.RepoError(_("repository %s already exists") % path)
396 raise error.RepoError(_("repository %s already exists") % path)
397 else:
397 else:
398 try:
398 try:
399 self.requirements = scmutil.readrequires(
399 self.requirements = scmutil.readrequires(
400 self.vfs, self.supported)
400 self.vfs, self.supported)
401 except IOError as inst:
401 except IOError as inst:
402 if inst.errno != errno.ENOENT:
402 if inst.errno != errno.ENOENT:
403 raise
403 raise
404
404
405 self.sharedpath = self.path
405 self.sharedpath = self.path
406 try:
406 try:
407 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
407 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
408 if 'relshared' in self.requirements:
408 if 'relshared' in self.requirements:
409 sharedpath = self.vfs.join(sharedpath)
409 sharedpath = self.vfs.join(sharedpath)
410 vfs = vfsmod.vfs(sharedpath, realpath=True)
410 vfs = vfsmod.vfs(sharedpath, realpath=True)
411 s = vfs.base
411 s = vfs.base
412 if not vfs.exists():
412 if not vfs.exists():
413 raise error.RepoError(
413 raise error.RepoError(
414 _('.hg/sharedpath points to nonexistent directory %s') % s)
414 _('.hg/sharedpath points to nonexistent directory %s') % s)
415 self.sharedpath = s
415 self.sharedpath = s
416 except IOError as inst:
416 except IOError as inst:
417 if inst.errno != errno.ENOENT:
417 if inst.errno != errno.ENOENT:
418 raise
418 raise
419
419
420 self.store = store.store(
420 self.store = store.store(
421 self.requirements, self.sharedpath, vfsmod.vfs)
421 self.requirements, self.sharedpath, vfsmod.vfs)
422 self.spath = self.store.path
422 self.spath = self.store.path
423 self.svfs = self.store.vfs
423 self.svfs = self.store.vfs
424 self.sjoin = self.store.join
424 self.sjoin = self.store.join
425 self.vfs.createmode = self.store.createmode
425 self.vfs.createmode = self.store.createmode
426 if (self.ui.configbool('devel', 'all-warnings') or
426 if (self.ui.configbool('devel', 'all-warnings') or
427 self.ui.configbool('devel', 'check-locks')):
427 self.ui.configbool('devel', 'check-locks')):
428 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
428 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
429 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
429 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
430 else: # standard vfs
430 else: # standard vfs
431 self.svfs.audit = self._getsvfsward(self.svfs.audit)
431 self.svfs.audit = self._getsvfsward(self.svfs.audit)
432 self._applyopenerreqs()
432 self._applyopenerreqs()
433 if create:
433 if create:
434 self._writerequirements()
434 self._writerequirements()
435
435
436 self._dirstatevalidatewarned = False
436 self._dirstatevalidatewarned = False
437
437
438 self._branchcaches = {}
438 self._branchcaches = {}
439 self._revbranchcache = None
439 self._revbranchcache = None
440 self.filterpats = {}
440 self.filterpats = {}
441 self._datafilters = {}
441 self._datafilters = {}
442 self._transref = self._lockref = self._wlockref = None
442 self._transref = self._lockref = self._wlockref = None
443
443
444 # A cache for various files under .hg/ that tracks file changes,
444 # A cache for various files under .hg/ that tracks file changes,
445 # (used by the filecache decorator)
445 # (used by the filecache decorator)
446 #
446 #
447 # Maps a property name to its util.filecacheentry
447 # Maps a property name to its util.filecacheentry
448 self._filecache = {}
448 self._filecache = {}
449
449
450 # hold sets of revision to be filtered
450 # hold sets of revision to be filtered
451 # should be cleared when something might have changed the filter value:
451 # should be cleared when something might have changed the filter value:
452 # - new changesets,
452 # - new changesets,
453 # - phase change,
453 # - phase change,
454 # - new obsolescence marker,
454 # - new obsolescence marker,
455 # - working directory parent change,
455 # - working directory parent change,
456 # - bookmark changes
456 # - bookmark changes
457 self.filteredrevcache = {}
457 self.filteredrevcache = {}
458
458
459 # post-dirstate-status hooks
459 # post-dirstate-status hooks
460 self._postdsstatus = []
460 self._postdsstatus = []
461
461
462 # Cache of types representing filtered repos.
462 # Cache of types representing filtered repos.
463 self._filteredrepotypes = weakref.WeakKeyDictionary()
463 self._filteredrepotypes = weakref.WeakKeyDictionary()
464
464
465 # generic mapping between names and nodes
465 # generic mapping between names and nodes
466 self.names = namespaces.namespaces()
466 self.names = namespaces.namespaces()
467
467
468 # Key to signature value.
468 # Key to signature value.
469 self._sparsesignaturecache = {}
469 self._sparsesignaturecache = {}
470 # Signature to cached matcher instance.
470 # Signature to cached matcher instance.
471 self._sparsematchercache = {}
471 self._sparsematchercache = {}
472
472
473 def _getvfsward(self, origfunc):
473 def _getvfsward(self, origfunc):
474 """build a ward for self.vfs"""
474 """build a ward for self.vfs"""
475 rref = weakref.ref(self)
475 rref = weakref.ref(self)
476 def checkvfs(path, mode=None):
476 def checkvfs(path, mode=None):
477 ret = origfunc(path, mode=mode)
477 ret = origfunc(path, mode=mode)
478 repo = rref()
478 repo = rref()
479 if (repo is None
479 if (repo is None
480 or not util.safehasattr(repo, '_wlockref')
480 or not util.safehasattr(repo, '_wlockref')
481 or not util.safehasattr(repo, '_lockref')):
481 or not util.safehasattr(repo, '_lockref')):
482 return
482 return
483 if mode in (None, 'r', 'rb'):
483 if mode in (None, 'r', 'rb'):
484 return
484 return
485 if path.startswith(repo.path):
485 if path.startswith(repo.path):
486 # truncate name relative to the repository (.hg)
486 # truncate name relative to the repository (.hg)
487 path = path[len(repo.path) + 1:]
487 path = path[len(repo.path) + 1:]
488 if path.startswith('journal.'):
488 if path.startswith('journal.'):
489 # journal is covered by 'lock'
489 # journal is covered by 'lock'
490 if repo._currentlock(repo._lockref) is None:
490 if repo._currentlock(repo._lockref) is None:
491 repo.ui.develwarn('write with no lock: "%s"' % path,
491 repo.ui.develwarn('write with no lock: "%s"' % path,
492 stacklevel=2)
492 stacklevel=2)
493 elif repo._currentlock(repo._wlockref) is None:
493 elif repo._currentlock(repo._wlockref) is None:
494 # rest of vfs files are covered by 'wlock'
494 # rest of vfs files are covered by 'wlock'
495 #
495 #
496 # exclude special files
496 # exclude special files
497 for prefix in self._wlockfreeprefix:
497 for prefix in self._wlockfreeprefix:
498 if path.startswith(prefix):
498 if path.startswith(prefix):
499 return
499 return
500 repo.ui.develwarn('write with no wlock: "%s"' % path,
500 repo.ui.develwarn('write with no wlock: "%s"' % path,
501 stacklevel=2)
501 stacklevel=2)
502 return ret
502 return ret
503 return checkvfs
503 return checkvfs
504
504
505 def _getsvfsward(self, origfunc):
505 def _getsvfsward(self, origfunc):
506 """build a ward for self.svfs"""
506 """build a ward for self.svfs"""
507 rref = weakref.ref(self)
507 rref = weakref.ref(self)
508 def checksvfs(path, mode=None):
508 def checksvfs(path, mode=None):
509 ret = origfunc(path, mode=mode)
509 ret = origfunc(path, mode=mode)
510 repo = rref()
510 repo = rref()
511 if repo is None or not util.safehasattr(repo, '_lockref'):
511 if repo is None or not util.safehasattr(repo, '_lockref'):
512 return
512 return
513 if mode in (None, 'r', 'rb'):
513 if mode in (None, 'r', 'rb'):
514 return
514 return
515 if path.startswith(repo.sharedpath):
515 if path.startswith(repo.sharedpath):
516 # truncate name relative to the repository (.hg)
516 # truncate name relative to the repository (.hg)
517 path = path[len(repo.sharedpath) + 1:]
517 path = path[len(repo.sharedpath) + 1:]
518 if repo._currentlock(repo._lockref) is None:
518 if repo._currentlock(repo._lockref) is None:
519 repo.ui.develwarn('write with no lock: "%s"' % path,
519 repo.ui.develwarn('write with no lock: "%s"' % path,
520 stacklevel=3)
520 stacklevel=3)
521 return ret
521 return ret
522 return checksvfs
522 return checksvfs
523
523
524 def close(self):
524 def close(self):
525 self._writecaches()
525 self._writecaches()
526
526
527 def _loadextensions(self):
527 def _loadextensions(self):
528 extensions.loadall(self.ui)
528 extensions.loadall(self.ui)
529
529
530 def _writecaches(self):
530 def _writecaches(self):
531 if self._revbranchcache:
531 if self._revbranchcache:
532 self._revbranchcache.write()
532 self._revbranchcache.write()
533
533
534 def _restrictcapabilities(self, caps):
534 def _restrictcapabilities(self, caps):
535 if self.ui.configbool('experimental', 'bundle2-advertise', True):
535 if self.ui.configbool('experimental', 'bundle2-advertise', True):
536 caps = set(caps)
536 caps = set(caps)
537 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
537 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
538 caps.add('bundle2=' + urlreq.quote(capsblob))
538 caps.add('bundle2=' + urlreq.quote(capsblob))
539 return caps
539 return caps
540
540
541 def _applyopenerreqs(self):
541 def _applyopenerreqs(self):
542 self.svfs.options = dict((r, 1) for r in self.requirements
542 self.svfs.options = dict((r, 1) for r in self.requirements
543 if r in self.openerreqs)
543 if r in self.openerreqs)
544 # experimental config: format.chunkcachesize
544 # experimental config: format.chunkcachesize
545 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
545 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
546 if chunkcachesize is not None:
546 if chunkcachesize is not None:
547 self.svfs.options['chunkcachesize'] = chunkcachesize
547 self.svfs.options['chunkcachesize'] = chunkcachesize
548 # experimental config: format.maxchainlen
548 # experimental config: format.maxchainlen
549 maxchainlen = self.ui.configint('format', 'maxchainlen')
549 maxchainlen = self.ui.configint('format', 'maxchainlen')
550 if maxchainlen is not None:
550 if maxchainlen is not None:
551 self.svfs.options['maxchainlen'] = maxchainlen
551 self.svfs.options['maxchainlen'] = maxchainlen
552 # experimental config: format.manifestcachesize
552 # experimental config: format.manifestcachesize
553 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
553 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
554 if manifestcachesize is not None:
554 if manifestcachesize is not None:
555 self.svfs.options['manifestcachesize'] = manifestcachesize
555 self.svfs.options['manifestcachesize'] = manifestcachesize
556 # experimental config: format.aggressivemergedeltas
556 # experimental config: format.aggressivemergedeltas
557 aggressivemergedeltas = self.ui.configbool('format',
557 aggressivemergedeltas = self.ui.configbool('format',
558 'aggressivemergedeltas')
558 'aggressivemergedeltas')
559 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
559 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
560 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
560 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
561 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan', -1)
561 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan', -1)
562 if 0 <= chainspan:
562 if 0 <= chainspan:
563 self.svfs.options['maxdeltachainspan'] = chainspan
563 self.svfs.options['maxdeltachainspan'] = chainspan
564
564
565 for r in self.requirements:
565 for r in self.requirements:
566 if r.startswith('exp-compression-'):
566 if r.startswith('exp-compression-'):
567 self.svfs.options['compengine'] = r[len('exp-compression-'):]
567 self.svfs.options['compengine'] = r[len('exp-compression-'):]
568
568
569 # TODO move "revlogv2" to openerreqs once finalized.
569 # TODO move "revlogv2" to openerreqs once finalized.
570 if REVLOGV2_REQUIREMENT in self.requirements:
570 if REVLOGV2_REQUIREMENT in self.requirements:
571 self.svfs.options['revlogv2'] = True
571 self.svfs.options['revlogv2'] = True
572
572
573 def _writerequirements(self):
573 def _writerequirements(self):
574 scmutil.writerequires(self.vfs, self.requirements)
574 scmutil.writerequires(self.vfs, self.requirements)
575
575
576 def _checknested(self, path):
576 def _checknested(self, path):
577 """Determine if path is a legal nested repository."""
577 """Determine if path is a legal nested repository."""
578 if not path.startswith(self.root):
578 if not path.startswith(self.root):
579 return False
579 return False
580 subpath = path[len(self.root) + 1:]
580 subpath = path[len(self.root) + 1:]
581 normsubpath = util.pconvert(subpath)
581 normsubpath = util.pconvert(subpath)
582
582
583 # XXX: Checking against the current working copy is wrong in
583 # XXX: Checking against the current working copy is wrong in
584 # the sense that it can reject things like
584 # the sense that it can reject things like
585 #
585 #
586 # $ hg cat -r 10 sub/x.txt
586 # $ hg cat -r 10 sub/x.txt
587 #
587 #
588 # if sub/ is no longer a subrepository in the working copy
588 # if sub/ is no longer a subrepository in the working copy
589 # parent revision.
589 # parent revision.
590 #
590 #
591 # However, it can of course also allow things that would have
591 # However, it can of course also allow things that would have
592 # been rejected before, such as the above cat command if sub/
592 # been rejected before, such as the above cat command if sub/
593 # is a subrepository now, but was a normal directory before.
593 # is a subrepository now, but was a normal directory before.
594 # The old path auditor would have rejected by mistake since it
594 # The old path auditor would have rejected by mistake since it
595 # panics when it sees sub/.hg/.
595 # panics when it sees sub/.hg/.
596 #
596 #
597 # All in all, checking against the working copy seems sensible
597 # All in all, checking against the working copy seems sensible
598 # since we want to prevent access to nested repositories on
598 # since we want to prevent access to nested repositories on
599 # the filesystem *now*.
599 # the filesystem *now*.
600 ctx = self[None]
600 ctx = self[None]
601 parts = util.splitpath(subpath)
601 parts = util.splitpath(subpath)
602 while parts:
602 while parts:
603 prefix = '/'.join(parts)
603 prefix = '/'.join(parts)
604 if prefix in ctx.substate:
604 if prefix in ctx.substate:
605 if prefix == normsubpath:
605 if prefix == normsubpath:
606 return True
606 return True
607 else:
607 else:
608 sub = ctx.sub(prefix)
608 sub = ctx.sub(prefix)
609 return sub.checknested(subpath[len(prefix) + 1:])
609 return sub.checknested(subpath[len(prefix) + 1:])
610 else:
610 else:
611 parts.pop()
611 parts.pop()
612 return False
612 return False
613
613
614 def peer(self):
614 def peer(self):
615 return localpeer(self) # not cached to avoid reference cycle
615 return localpeer(self) # not cached to avoid reference cycle
616
616
617 def unfiltered(self):
617 def unfiltered(self):
618 """Return unfiltered version of the repository
618 """Return unfiltered version of the repository
619
619
620 Intended to be overwritten by filtered repo."""
620 Intended to be overwritten by filtered repo."""
621 return self
621 return self
622
622
623 def filtered(self, name):
623 def filtered(self, name):
624 """Return a filtered version of a repository"""
624 """Return a filtered version of a repository"""
625 # Python <3.4 easily leaks types via __mro__. See
625 # Python <3.4 easily leaks types via __mro__. See
626 # https://bugs.python.org/issue17950. We cache dynamically
626 # https://bugs.python.org/issue17950. We cache dynamically
627 # created types so this method doesn't leak on every
627 # created types so this method doesn't leak on every
628 # invocation.
628 # invocation.
629
629
630 key = self.unfiltered().__class__
630 key = self.unfiltered().__class__
631 if key not in self._filteredrepotypes:
631 if key not in self._filteredrepotypes:
632 # Build a new type with the repoview mixin and the base
632 # Build a new type with the repoview mixin and the base
633 # class of this repo. Give it a name containing the
633 # class of this repo. Give it a name containing the
634 # filter name to aid debugging.
634 # filter name to aid debugging.
635 bases = (repoview.repoview, key)
635 bases = (repoview.repoview, key)
636 cls = type(r'%sfilteredrepo' % name, bases, {})
636 cls = type(r'%sfilteredrepo' % name, bases, {})
637 self._filteredrepotypes[key] = cls
637 self._filteredrepotypes[key] = cls
638
638
639 return self._filteredrepotypes[key](self, name)
639 return self._filteredrepotypes[key](self, name)
640
640
641 @repofilecache('bookmarks', 'bookmarks.current')
641 @repofilecache('bookmarks', 'bookmarks.current')
642 def _bookmarks(self):
642 def _bookmarks(self):
643 return bookmarks.bmstore(self)
643 return bookmarks.bmstore(self)
644
644
645 @property
645 @property
646 def _activebookmark(self):
646 def _activebookmark(self):
647 return self._bookmarks.active
647 return self._bookmarks.active
648
648
649 # _phaserevs and _phasesets depend on changelog. what we need is to
649 # _phaserevs and _phasesets depend on changelog. what we need is to
650 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
650 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
651 # can't be easily expressed in filecache mechanism.
651 # can't be easily expressed in filecache mechanism.
652 @storecache('phaseroots', '00changelog.i')
652 @storecache('phaseroots', '00changelog.i')
653 def _phasecache(self):
653 def _phasecache(self):
654 return phases.phasecache(self, self._phasedefaults)
654 return phases.phasecache(self, self._phasedefaults)
655
655
656 @storecache('obsstore')
656 @storecache('obsstore')
657 def obsstore(self):
657 def obsstore(self):
658 return obsolete.makestore(self.ui, self)
658 return obsolete.makestore(self.ui, self)
659
659
660 @storecache('00changelog.i')
660 @storecache('00changelog.i')
661 def changelog(self):
661 def changelog(self):
662 return changelog.changelog(self.svfs,
662 return changelog.changelog(self.svfs,
663 trypending=txnutil.mayhavepending(self.root))
663 trypending=txnutil.mayhavepending(self.root))
664
664
665 def _constructmanifest(self):
665 def _constructmanifest(self):
666 # This is a temporary function while we migrate from manifest to
666 # This is a temporary function while we migrate from manifest to
667 # manifestlog. It allows bundlerepo and unionrepo to intercept the
667 # manifestlog. It allows bundlerepo and unionrepo to intercept the
668 # manifest creation.
668 # manifest creation.
669 return manifest.manifestrevlog(self.svfs)
669 return manifest.manifestrevlog(self.svfs)
670
670
671 @storecache('00manifest.i')
671 @storecache('00manifest.i')
672 def manifestlog(self):
672 def manifestlog(self):
673 return manifest.manifestlog(self.svfs, self)
673 return manifest.manifestlog(self.svfs, self)
674
674
675 @repofilecache('dirstate')
675 @repofilecache('dirstate')
676 def dirstate(self):
676 def dirstate(self):
677 sparsematchfn = lambda: sparse.matcher(self)
677 sparsematchfn = lambda: sparse.matcher(self)
678
678
679 return dirstate.dirstate(self.vfs, self.ui, self.root,
679 return dirstate.dirstate(self.vfs, self.ui, self.root,
680 self._dirstatevalidate, sparsematchfn)
680 self._dirstatevalidate, sparsematchfn)
681
681
682 def _dirstatevalidate(self, node):
682 def _dirstatevalidate(self, node):
683 try:
683 try:
684 self.changelog.rev(node)
684 self.changelog.rev(node)
685 return node
685 return node
686 except error.LookupError:
686 except error.LookupError:
687 if not self._dirstatevalidatewarned:
687 if not self._dirstatevalidatewarned:
688 self._dirstatevalidatewarned = True
688 self._dirstatevalidatewarned = True
689 self.ui.warn(_("warning: ignoring unknown"
689 self.ui.warn(_("warning: ignoring unknown"
690 " working parent %s!\n") % short(node))
690 " working parent %s!\n") % short(node))
691 return nullid
691 return nullid
692
692
693 def __getitem__(self, changeid):
693 def __getitem__(self, changeid):
694 if changeid is None:
694 if changeid is None:
695 return context.workingctx(self)
695 return context.workingctx(self)
696 if isinstance(changeid, slice):
696 if isinstance(changeid, slice):
697 # wdirrev isn't contiguous so the slice shouldn't include it
697 # wdirrev isn't contiguous so the slice shouldn't include it
698 return [context.changectx(self, i)
698 return [context.changectx(self, i)
699 for i in xrange(*changeid.indices(len(self)))
699 for i in xrange(*changeid.indices(len(self)))
700 if i not in self.changelog.filteredrevs]
700 if i not in self.changelog.filteredrevs]
701 try:
701 try:
702 return context.changectx(self, changeid)
702 return context.changectx(self, changeid)
703 except error.WdirUnsupported:
703 except error.WdirUnsupported:
704 return context.workingctx(self)
704 return context.workingctx(self)
705
705
706 def __contains__(self, changeid):
706 def __contains__(self, changeid):
707 """True if the given changeid exists
707 """True if the given changeid exists
708
708
709 error.LookupError is raised if an ambiguous node specified.
709 error.LookupError is raised if an ambiguous node specified.
710 """
710 """
711 try:
711 try:
712 self[changeid]
712 self[changeid]
713 return True
713 return True
714 except error.RepoLookupError:
714 except error.RepoLookupError:
715 return False
715 return False
716
716
717 def __nonzero__(self):
717 def __nonzero__(self):
718 return True
718 return True
719
719
720 __bool__ = __nonzero__
720 __bool__ = __nonzero__
721
721
722 def __len__(self):
722 def __len__(self):
723 return len(self.changelog)
723 return len(self.changelog)
724
724
725 def __iter__(self):
725 def __iter__(self):
726 return iter(self.changelog)
726 return iter(self.changelog)
727
727
728 def revs(self, expr, *args):
728 def revs(self, expr, *args):
729 '''Find revisions matching a revset.
729 '''Find revisions matching a revset.
730
730
731 The revset is specified as a string ``expr`` that may contain
731 The revset is specified as a string ``expr`` that may contain
732 %-formatting to escape certain types. See ``revsetlang.formatspec``.
732 %-formatting to escape certain types. See ``revsetlang.formatspec``.
733
733
734 Revset aliases from the configuration are not expanded. To expand
734 Revset aliases from the configuration are not expanded. To expand
735 user aliases, consider calling ``scmutil.revrange()`` or
735 user aliases, consider calling ``scmutil.revrange()`` or
736 ``repo.anyrevs([expr], user=True)``.
736 ``repo.anyrevs([expr], user=True)``.
737
737
738 Returns a revset.abstractsmartset, which is a list-like interface
738 Returns a revset.abstractsmartset, which is a list-like interface
739 that contains integer revisions.
739 that contains integer revisions.
740 '''
740 '''
741 expr = revsetlang.formatspec(expr, *args)
741 expr = revsetlang.formatspec(expr, *args)
742 m = revset.match(None, expr)
742 m = revset.match(None, expr)
743 return m(self)
743 return m(self)
744
744
745 def set(self, expr, *args):
745 def set(self, expr, *args):
746 '''Find revisions matching a revset and emit changectx instances.
746 '''Find revisions matching a revset and emit changectx instances.
747
747
748 This is a convenience wrapper around ``revs()`` that iterates the
748 This is a convenience wrapper around ``revs()`` that iterates the
749 result and is a generator of changectx instances.
749 result and is a generator of changectx instances.
750
750
751 Revset aliases from the configuration are not expanded. To expand
751 Revset aliases from the configuration are not expanded. To expand
752 user aliases, consider calling ``scmutil.revrange()``.
752 user aliases, consider calling ``scmutil.revrange()``.
753 '''
753 '''
754 for r in self.revs(expr, *args):
754 for r in self.revs(expr, *args):
755 yield self[r]
755 yield self[r]
756
756
757 def anyrevs(self, specs, user=False, localalias=None):
757 def anyrevs(self, specs, user=False, localalias=None):
758 '''Find revisions matching one of the given revsets.
758 '''Find revisions matching one of the given revsets.
759
759
760 Revset aliases from the configuration are not expanded by default. To
760 Revset aliases from the configuration are not expanded by default. To
761 expand user aliases, specify ``user=True``. To provide some local
761 expand user aliases, specify ``user=True``. To provide some local
762 definitions overriding user aliases, set ``localalias`` to
762 definitions overriding user aliases, set ``localalias`` to
763 ``{name: definitionstring}``.
763 ``{name: definitionstring}``.
764 '''
764 '''
765 if user:
765 if user:
766 m = revset.matchany(self.ui, specs, repo=self,
766 m = revset.matchany(self.ui, specs, repo=self,
767 localalias=localalias)
767 localalias=localalias)
768 else:
768 else:
769 m = revset.matchany(None, specs, localalias=localalias)
769 m = revset.matchany(None, specs, localalias=localalias)
770 return m(self)
770 return m(self)
771
771
772 def url(self):
772 def url(self):
773 return 'file:' + self.root
773 return 'file:' + self.root
774
774
775 def hook(self, name, throw=False, **args):
775 def hook(self, name, throw=False, **args):
776 """Call a hook, passing this repo instance.
776 """Call a hook, passing this repo instance.
777
777
778 This a convenience method to aid invoking hooks. Extensions likely
778 This a convenience method to aid invoking hooks. Extensions likely
779 won't call this unless they have registered a custom hook or are
779 won't call this unless they have registered a custom hook or are
780 replacing code that is expected to call a hook.
780 replacing code that is expected to call a hook.
781 """
781 """
782 return hook.hook(self.ui, self, name, throw, **args)
782 return hook.hook(self.ui, self, name, throw, **args)
783
783
784 @filteredpropertycache
784 @filteredpropertycache
785 def _tagscache(self):
785 def _tagscache(self):
786 '''Returns a tagscache object that contains various tags related
786 '''Returns a tagscache object that contains various tags related
787 caches.'''
787 caches.'''
788
788
789 # This simplifies its cache management by having one decorated
789 # This simplifies its cache management by having one decorated
790 # function (this one) and the rest simply fetch things from it.
790 # function (this one) and the rest simply fetch things from it.
791 class tagscache(object):
791 class tagscache(object):
792 def __init__(self):
792 def __init__(self):
793 # These two define the set of tags for this repository. tags
793 # These two define the set of tags for this repository. tags
794 # maps tag name to node; tagtypes maps tag name to 'global' or
794 # maps tag name to node; tagtypes maps tag name to 'global' or
795 # 'local'. (Global tags are defined by .hgtags across all
795 # 'local'. (Global tags are defined by .hgtags across all
796 # heads, and local tags are defined in .hg/localtags.)
796 # heads, and local tags are defined in .hg/localtags.)
797 # They constitute the in-memory cache of tags.
797 # They constitute the in-memory cache of tags.
798 self.tags = self.tagtypes = None
798 self.tags = self.tagtypes = None
799
799
800 self.nodetagscache = self.tagslist = None
800 self.nodetagscache = self.tagslist = None
801
801
802 cache = tagscache()
802 cache = tagscache()
803 cache.tags, cache.tagtypes = self._findtags()
803 cache.tags, cache.tagtypes = self._findtags()
804
804
805 return cache
805 return cache
806
806
807 def tags(self):
807 def tags(self):
808 '''return a mapping of tag to node'''
808 '''return a mapping of tag to node'''
809 t = {}
809 t = {}
810 if self.changelog.filteredrevs:
810 if self.changelog.filteredrevs:
811 tags, tt = self._findtags()
811 tags, tt = self._findtags()
812 else:
812 else:
813 tags = self._tagscache.tags
813 tags = self._tagscache.tags
814 for k, v in tags.iteritems():
814 for k, v in tags.iteritems():
815 try:
815 try:
816 # ignore tags to unknown nodes
816 # ignore tags to unknown nodes
817 self.changelog.rev(v)
817 self.changelog.rev(v)
818 t[k] = v
818 t[k] = v
819 except (error.LookupError, ValueError):
819 except (error.LookupError, ValueError):
820 pass
820 pass
821 return t
821 return t
822
822
823 def _findtags(self):
823 def _findtags(self):
824 '''Do the hard work of finding tags. Return a pair of dicts
824 '''Do the hard work of finding tags. Return a pair of dicts
825 (tags, tagtypes) where tags maps tag name to node, and tagtypes
825 (tags, tagtypes) where tags maps tag name to node, and tagtypes
826 maps tag name to a string like \'global\' or \'local\'.
826 maps tag name to a string like \'global\' or \'local\'.
827 Subclasses or extensions are free to add their own tags, but
827 Subclasses or extensions are free to add their own tags, but
828 should be aware that the returned dicts will be retained for the
828 should be aware that the returned dicts will be retained for the
829 duration of the localrepo object.'''
829 duration of the localrepo object.'''
830
830
831 # XXX what tagtype should subclasses/extensions use? Currently
831 # XXX what tagtype should subclasses/extensions use? Currently
832 # mq and bookmarks add tags, but do not set the tagtype at all.
832 # mq and bookmarks add tags, but do not set the tagtype at all.
833 # Should each extension invent its own tag type? Should there
833 # Should each extension invent its own tag type? Should there
834 # be one tagtype for all such "virtual" tags? Or is the status
834 # be one tagtype for all such "virtual" tags? Or is the status
835 # quo fine?
835 # quo fine?
836
836
837
837
838 # map tag name to (node, hist)
838 # map tag name to (node, hist)
839 alltags = tagsmod.findglobaltags(self.ui, self)
839 alltags = tagsmod.findglobaltags(self.ui, self)
840 # map tag name to tag type
840 # map tag name to tag type
841 tagtypes = dict((tag, 'global') for tag in alltags)
841 tagtypes = dict((tag, 'global') for tag in alltags)
842
842
843 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
843 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
844
844
845 # Build the return dicts. Have to re-encode tag names because
845 # Build the return dicts. Have to re-encode tag names because
846 # the tags module always uses UTF-8 (in order not to lose info
846 # the tags module always uses UTF-8 (in order not to lose info
847 # writing to the cache), but the rest of Mercurial wants them in
847 # writing to the cache), but the rest of Mercurial wants them in
848 # local encoding.
848 # local encoding.
849 tags = {}
849 tags = {}
850 for (name, (node, hist)) in alltags.iteritems():
850 for (name, (node, hist)) in alltags.iteritems():
851 if node != nullid:
851 if node != nullid:
852 tags[encoding.tolocal(name)] = node
852 tags[encoding.tolocal(name)] = node
853 tags['tip'] = self.changelog.tip()
853 tags['tip'] = self.changelog.tip()
854 tagtypes = dict([(encoding.tolocal(name), value)
854 tagtypes = dict([(encoding.tolocal(name), value)
855 for (name, value) in tagtypes.iteritems()])
855 for (name, value) in tagtypes.iteritems()])
856 return (tags, tagtypes)
856 return (tags, tagtypes)
857
857
858 def tagtype(self, tagname):
858 def tagtype(self, tagname):
859 '''
859 '''
860 return the type of the given tag. result can be:
860 return the type of the given tag. result can be:
861
861
862 'local' : a local tag
862 'local' : a local tag
863 'global' : a global tag
863 'global' : a global tag
864 None : tag does not exist
864 None : tag does not exist
865 '''
865 '''
866
866
867 return self._tagscache.tagtypes.get(tagname)
867 return self._tagscache.tagtypes.get(tagname)
868
868
869 def tagslist(self):
869 def tagslist(self):
870 '''return a list of tags ordered by revision'''
870 '''return a list of tags ordered by revision'''
871 if not self._tagscache.tagslist:
871 if not self._tagscache.tagslist:
872 l = []
872 l = []
873 for t, n in self.tags().iteritems():
873 for t, n in self.tags().iteritems():
874 l.append((self.changelog.rev(n), t, n))
874 l.append((self.changelog.rev(n), t, n))
875 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
875 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
876
876
877 return self._tagscache.tagslist
877 return self._tagscache.tagslist
878
878
879 def nodetags(self, node):
879 def nodetags(self, node):
880 '''return the tags associated with a node'''
880 '''return the tags associated with a node'''
881 if not self._tagscache.nodetagscache:
881 if not self._tagscache.nodetagscache:
882 nodetagscache = {}
882 nodetagscache = {}
883 for t, n in self._tagscache.tags.iteritems():
883 for t, n in self._tagscache.tags.iteritems():
884 nodetagscache.setdefault(n, []).append(t)
884 nodetagscache.setdefault(n, []).append(t)
885 for tags in nodetagscache.itervalues():
885 for tags in nodetagscache.itervalues():
886 tags.sort()
886 tags.sort()
887 self._tagscache.nodetagscache = nodetagscache
887 self._tagscache.nodetagscache = nodetagscache
888 return self._tagscache.nodetagscache.get(node, [])
888 return self._tagscache.nodetagscache.get(node, [])
889
889
890 def nodebookmarks(self, node):
890 def nodebookmarks(self, node):
891 """return the list of bookmarks pointing to the specified node"""
891 """return the list of bookmarks pointing to the specified node"""
892 marks = []
892 marks = []
893 for bookmark, n in self._bookmarks.iteritems():
893 for bookmark, n in self._bookmarks.iteritems():
894 if n == node:
894 if n == node:
895 marks.append(bookmark)
895 marks.append(bookmark)
896 return sorted(marks)
896 return sorted(marks)
897
897
898 def branchmap(self):
898 def branchmap(self):
899 '''returns a dictionary {branch: [branchheads]} with branchheads
899 '''returns a dictionary {branch: [branchheads]} with branchheads
900 ordered by increasing revision number'''
900 ordered by increasing revision number'''
901 branchmap.updatecache(self)
901 branchmap.updatecache(self)
902 return self._branchcaches[self.filtername]
902 return self._branchcaches[self.filtername]
903
903
904 @unfilteredmethod
904 @unfilteredmethod
905 def revbranchcache(self):
905 def revbranchcache(self):
906 if not self._revbranchcache:
906 if not self._revbranchcache:
907 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
907 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
908 return self._revbranchcache
908 return self._revbranchcache
909
909
910 def branchtip(self, branch, ignoremissing=False):
910 def branchtip(self, branch, ignoremissing=False):
911 '''return the tip node for a given branch
911 '''return the tip node for a given branch
912
912
913 If ignoremissing is True, then this method will not raise an error.
913 If ignoremissing is True, then this method will not raise an error.
914 This is helpful for callers that only expect None for a missing branch
914 This is helpful for callers that only expect None for a missing branch
915 (e.g. namespace).
915 (e.g. namespace).
916
916
917 '''
917 '''
918 try:
918 try:
919 return self.branchmap().branchtip(branch)
919 return self.branchmap().branchtip(branch)
920 except KeyError:
920 except KeyError:
921 if not ignoremissing:
921 if not ignoremissing:
922 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
922 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
923 else:
923 else:
924 pass
924 pass
925
925
926 def lookup(self, key):
926 def lookup(self, key):
927 return self[key].node()
927 return self[key].node()
928
928
929 def lookupbranch(self, key, remote=None):
929 def lookupbranch(self, key, remote=None):
930 repo = remote or self
930 repo = remote or self
931 if key in repo.branchmap():
931 if key in repo.branchmap():
932 return key
932 return key
933
933
934 repo = (remote and remote.local()) and remote or self
934 repo = (remote and remote.local()) and remote or self
935 return repo[key].branch()
935 return repo[key].branch()
936
936
937 def known(self, nodes):
937 def known(self, nodes):
938 cl = self.changelog
938 cl = self.changelog
939 nm = cl.nodemap
939 nm = cl.nodemap
940 filtered = cl.filteredrevs
940 filtered = cl.filteredrevs
941 result = []
941 result = []
942 for n in nodes:
942 for n in nodes:
943 r = nm.get(n)
943 r = nm.get(n)
944 resp = not (r is None or r in filtered)
944 resp = not (r is None or r in filtered)
945 result.append(resp)
945 result.append(resp)
946 return result
946 return result
947
947
948 def local(self):
948 def local(self):
949 return self
949 return self
950
950
951 def publishing(self):
951 def publishing(self):
952 # it's safe (and desirable) to trust the publish flag unconditionally
952 # it's safe (and desirable) to trust the publish flag unconditionally
953 # so that we don't finalize changes shared between users via ssh or nfs
953 # so that we don't finalize changes shared between users via ssh or nfs
954 return self.ui.configbool('phases', 'publish', True, untrusted=True)
954 return self.ui.configbool('phases', 'publish', True, untrusted=True)
955
955
956 def cancopy(self):
956 def cancopy(self):
957 # so statichttprepo's override of local() works
957 # so statichttprepo's override of local() works
958 if not self.local():
958 if not self.local():
959 return False
959 return False
960 if not self.publishing():
960 if not self.publishing():
961 return True
961 return True
962 # if publishing we can't copy if there is filtered content
962 # if publishing we can't copy if there is filtered content
963 return not self.filtered('visible').changelog.filteredrevs
963 return not self.filtered('visible').changelog.filteredrevs
964
964
965 def shared(self):
965 def shared(self):
966 '''the type of shared repository (None if not shared)'''
966 '''the type of shared repository (None if not shared)'''
967 if self.sharedpath != self.path:
967 if self.sharedpath != self.path:
968 return 'store'
968 return 'store'
969 return None
969 return None
970
970
971 def wjoin(self, f, *insidef):
971 def wjoin(self, f, *insidef):
972 return self.vfs.reljoin(self.root, f, *insidef)
972 return self.vfs.reljoin(self.root, f, *insidef)
973
973
974 def file(self, f):
974 def file(self, f):
975 if f[0] == '/':
975 if f[0] == '/':
976 f = f[1:]
976 f = f[1:]
977 return filelog.filelog(self.svfs, f)
977 return filelog.filelog(self.svfs, f)
978
978
979 def changectx(self, changeid):
979 def changectx(self, changeid):
980 return self[changeid]
980 return self[changeid]
981
981
982 def setparents(self, p1, p2=nullid):
982 def setparents(self, p1, p2=nullid):
983 with self.dirstate.parentchange():
983 with self.dirstate.parentchange():
984 copies = self.dirstate.setparents(p1, p2)
984 copies = self.dirstate.setparents(p1, p2)
985 pctx = self[p1]
985 pctx = self[p1]
986 if copies:
986 if copies:
987 # Adjust copy records, the dirstate cannot do it, it
987 # Adjust copy records, the dirstate cannot do it, it
988 # requires access to parents manifests. Preserve them
988 # requires access to parents manifests. Preserve them
989 # only for entries added to first parent.
989 # only for entries added to first parent.
990 for f in copies:
990 for f in copies:
991 if f not in pctx and copies[f] in pctx:
991 if f not in pctx and copies[f] in pctx:
992 self.dirstate.copy(copies[f], f)
992 self.dirstate.copy(copies[f], f)
993 if p2 == nullid:
993 if p2 == nullid:
994 for f, s in sorted(self.dirstate.copies().items()):
994 for f, s in sorted(self.dirstate.copies().items()):
995 if f not in pctx and s not in pctx:
995 if f not in pctx and s not in pctx:
996 self.dirstate.copy(None, f)
996 self.dirstate.copy(None, f)
997
997
998 def filectx(self, path, changeid=None, fileid=None):
998 def filectx(self, path, changeid=None, fileid=None):
999 """changeid can be a changeset revision, node, or tag.
999 """changeid can be a changeset revision, node, or tag.
1000 fileid can be a file revision or node."""
1000 fileid can be a file revision or node."""
1001 return context.filectx(self, path, changeid, fileid)
1001 return context.filectx(self, path, changeid, fileid)
1002
1002
1003 def getcwd(self):
1003 def getcwd(self):
1004 return self.dirstate.getcwd()
1004 return self.dirstate.getcwd()
1005
1005
1006 def pathto(self, f, cwd=None):
1006 def pathto(self, f, cwd=None):
1007 return self.dirstate.pathto(f, cwd)
1007 return self.dirstate.pathto(f, cwd)
1008
1008
1009 def _loadfilter(self, filter):
1009 def _loadfilter(self, filter):
1010 if filter not in self.filterpats:
1010 if filter not in self.filterpats:
1011 l = []
1011 l = []
1012 for pat, cmd in self.ui.configitems(filter):
1012 for pat, cmd in self.ui.configitems(filter):
1013 if cmd == '!':
1013 if cmd == '!':
1014 continue
1014 continue
1015 mf = matchmod.match(self.root, '', [pat])
1015 mf = matchmod.match(self.root, '', [pat])
1016 fn = None
1016 fn = None
1017 params = cmd
1017 params = cmd
1018 for name, filterfn in self._datafilters.iteritems():
1018 for name, filterfn in self._datafilters.iteritems():
1019 if cmd.startswith(name):
1019 if cmd.startswith(name):
1020 fn = filterfn
1020 fn = filterfn
1021 params = cmd[len(name):].lstrip()
1021 params = cmd[len(name):].lstrip()
1022 break
1022 break
1023 if not fn:
1023 if not fn:
1024 fn = lambda s, c, **kwargs: util.filter(s, c)
1024 fn = lambda s, c, **kwargs: util.filter(s, c)
1025 # Wrap old filters not supporting keyword arguments
1025 # Wrap old filters not supporting keyword arguments
1026 if not inspect.getargspec(fn)[2]:
1026 if not inspect.getargspec(fn)[2]:
1027 oldfn = fn
1027 oldfn = fn
1028 fn = lambda s, c, **kwargs: oldfn(s, c)
1028 fn = lambda s, c, **kwargs: oldfn(s, c)
1029 l.append((mf, fn, params))
1029 l.append((mf, fn, params))
1030 self.filterpats[filter] = l
1030 self.filterpats[filter] = l
1031 return self.filterpats[filter]
1031 return self.filterpats[filter]
1032
1032
1033 def _filter(self, filterpats, filename, data):
1033 def _filter(self, filterpats, filename, data):
1034 for mf, fn, cmd in filterpats:
1034 for mf, fn, cmd in filterpats:
1035 if mf(filename):
1035 if mf(filename):
1036 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1036 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1037 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1037 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1038 break
1038 break
1039
1039
1040 return data
1040 return data
1041
1041
1042 @unfilteredpropertycache
1042 @unfilteredpropertycache
1043 def _encodefilterpats(self):
1043 def _encodefilterpats(self):
1044 return self._loadfilter('encode')
1044 return self._loadfilter('encode')
1045
1045
1046 @unfilteredpropertycache
1046 @unfilteredpropertycache
1047 def _decodefilterpats(self):
1047 def _decodefilterpats(self):
1048 return self._loadfilter('decode')
1048 return self._loadfilter('decode')
1049
1049
1050 def adddatafilter(self, name, filter):
1050 def adddatafilter(self, name, filter):
1051 self._datafilters[name] = filter
1051 self._datafilters[name] = filter
1052
1052
1053 def wread(self, filename):
1053 def wread(self, filename):
1054 if self.wvfs.islink(filename):
1054 if self.wvfs.islink(filename):
1055 data = self.wvfs.readlink(filename)
1055 data = self.wvfs.readlink(filename)
1056 else:
1056 else:
1057 data = self.wvfs.read(filename)
1057 data = self.wvfs.read(filename)
1058 return self._filter(self._encodefilterpats, filename, data)
1058 return self._filter(self._encodefilterpats, filename, data)
1059
1059
1060 def wwrite(self, filename, data, flags, backgroundclose=False):
1060 def wwrite(self, filename, data, flags, backgroundclose=False):
1061 """write ``data`` into ``filename`` in the working directory
1061 """write ``data`` into ``filename`` in the working directory
1062
1062
1063 This returns length of written (maybe decoded) data.
1063 This returns length of written (maybe decoded) data.
1064 """
1064 """
1065 data = self._filter(self._decodefilterpats, filename, data)
1065 data = self._filter(self._decodefilterpats, filename, data)
1066 if 'l' in flags:
1066 if 'l' in flags:
1067 self.wvfs.symlink(data, filename)
1067 self.wvfs.symlink(data, filename)
1068 else:
1068 else:
1069 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1069 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1070 if 'x' in flags:
1070 if 'x' in flags:
1071 self.wvfs.setflags(filename, False, True)
1071 self.wvfs.setflags(filename, False, True)
1072 return len(data)
1072 return len(data)
1073
1073
1074 def wwritedata(self, filename, data):
1074 def wwritedata(self, filename, data):
1075 return self._filter(self._decodefilterpats, filename, data)
1075 return self._filter(self._decodefilterpats, filename, data)
1076
1076
1077 def currenttransaction(self):
1077 def currenttransaction(self):
1078 """return the current transaction or None if non exists"""
1078 """return the current transaction or None if non exists"""
1079 if self._transref:
1079 if self._transref:
1080 tr = self._transref()
1080 tr = self._transref()
1081 else:
1081 else:
1082 tr = None
1082 tr = None
1083
1083
1084 if tr and tr.running():
1084 if tr and tr.running():
1085 return tr
1085 return tr
1086 return None
1086 return None
1087
1087
1088 def transaction(self, desc, report=None):
1088 def transaction(self, desc, report=None):
1089 if (self.ui.configbool('devel', 'all-warnings')
1089 if (self.ui.configbool('devel', 'all-warnings')
1090 or self.ui.configbool('devel', 'check-locks')):
1090 or self.ui.configbool('devel', 'check-locks')):
1091 if self._currentlock(self._lockref) is None:
1091 if self._currentlock(self._lockref) is None:
1092 raise error.ProgrammingError('transaction requires locking')
1092 raise error.ProgrammingError('transaction requires locking')
1093 tr = self.currenttransaction()
1093 tr = self.currenttransaction()
1094 if tr is not None:
1094 if tr is not None:
1095 return tr.nest()
1095 return tr.nest()
1096
1096
1097 # abort here if the journal already exists
1097 # abort here if the journal already exists
1098 if self.svfs.exists("journal"):
1098 if self.svfs.exists("journal"):
1099 raise error.RepoError(
1099 raise error.RepoError(
1100 _("abandoned transaction found"),
1100 _("abandoned transaction found"),
1101 hint=_("run 'hg recover' to clean up transaction"))
1101 hint=_("run 'hg recover' to clean up transaction"))
1102
1102
1103 idbase = "%.40f#%f" % (random.random(), time.time())
1103 idbase = "%.40f#%f" % (random.random(), time.time())
1104 ha = hex(hashlib.sha1(idbase).digest())
1104 ha = hex(hashlib.sha1(idbase).digest())
1105 txnid = 'TXN:' + ha
1105 txnid = 'TXN:' + ha
1106 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1106 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1107
1107
1108 self._writejournal(desc)
1108 self._writejournal(desc)
1109 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1109 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1110 if report:
1110 if report:
1111 rp = report
1111 rp = report
1112 else:
1112 else:
1113 rp = self.ui.warn
1113 rp = self.ui.warn
1114 vfsmap = {'plain': self.vfs} # root of .hg/
1114 vfsmap = {'plain': self.vfs} # root of .hg/
1115 # we must avoid cyclic reference between repo and transaction.
1115 # we must avoid cyclic reference between repo and transaction.
1116 reporef = weakref.ref(self)
1116 reporef = weakref.ref(self)
1117 # Code to track tag movement
1117 # Code to track tag movement
1118 #
1118 #
1119 # Since tags are all handled as file content, it is actually quite hard
1119 # Since tags are all handled as file content, it is actually quite hard
1120 # to track these movement from a code perspective. So we fallback to a
1120 # to track these movement from a code perspective. So we fallback to a
1121 # tracking at the repository level. One could envision to track changes
1121 # tracking at the repository level. One could envision to track changes
1122 # to the '.hgtags' file through changegroup apply but that fails to
1122 # to the '.hgtags' file through changegroup apply but that fails to
1123 # cope with case where transaction expose new heads without changegroup
1123 # cope with case where transaction expose new heads without changegroup
1124 # being involved (eg: phase movement).
1124 # being involved (eg: phase movement).
1125 #
1125 #
1126 # For now, We gate the feature behind a flag since this likely comes
1126 # For now, We gate the feature behind a flag since this likely comes
1127 # with performance impacts. The current code run more often than needed
1127 # with performance impacts. The current code run more often than needed
1128 # and do not use caches as much as it could. The current focus is on
1128 # and do not use caches as much as it could. The current focus is on
1129 # the behavior of the feature so we disable it by default. The flag
1129 # the behavior of the feature so we disable it by default. The flag
1130 # will be removed when we are happy with the performance impact.
1130 # will be removed when we are happy with the performance impact.
1131 #
1131 #
1132 # Once this feature is no longer experimental move the following
1132 # Once this feature is no longer experimental move the following
1133 # documentation to the appropriate help section:
1133 # documentation to the appropriate help section:
1134 #
1134 #
1135 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1135 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1136 # tags (new or changed or deleted tags). In addition the details of
1136 # tags (new or changed or deleted tags). In addition the details of
1137 # these changes are made available in a file at:
1137 # these changes are made available in a file at:
1138 # ``REPOROOT/.hg/changes/tags.changes``.
1138 # ``REPOROOT/.hg/changes/tags.changes``.
1139 # Make sure you check for HG_TAG_MOVED before reading that file as it
1139 # Make sure you check for HG_TAG_MOVED before reading that file as it
1140 # might exist from a previous transaction even if no tag were touched
1140 # might exist from a previous transaction even if no tag were touched
1141 # in this one. Changes are recorded in a line base format::
1141 # in this one. Changes are recorded in a line base format::
1142 #
1142 #
1143 # <action> <hex-node> <tag-name>\n
1143 # <action> <hex-node> <tag-name>\n
1144 #
1144 #
1145 # Actions are defined as follow:
1145 # Actions are defined as follow:
1146 # "-R": tag is removed,
1146 # "-R": tag is removed,
1147 # "+A": tag is added,
1147 # "+A": tag is added,
1148 # "-M": tag is moved (old value),
1148 # "-M": tag is moved (old value),
1149 # "+M": tag is moved (new value),
1149 # "+M": tag is moved (new value),
1150 tracktags = lambda x: None
1150 tracktags = lambda x: None
1151 # experimental config: experimental.hook-track-tags
1151 # experimental config: experimental.hook-track-tags
1152 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags',
1152 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags',
1153 False)
1153 False)
1154 if desc != 'strip' and shouldtracktags:
1154 if desc != 'strip' and shouldtracktags:
1155 oldheads = self.changelog.headrevs()
1155 oldheads = self.changelog.headrevs()
1156 def tracktags(tr2):
1156 def tracktags(tr2):
1157 repo = reporef()
1157 repo = reporef()
1158 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1158 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1159 newheads = repo.changelog.headrevs()
1159 newheads = repo.changelog.headrevs()
1160 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1160 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1161 # notes: we compare lists here.
1161 # notes: we compare lists here.
1162 # As we do it only once buiding set would not be cheaper
1162 # As we do it only once buiding set would not be cheaper
1163 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1163 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1164 if changes:
1164 if changes:
1165 tr2.hookargs['tag_moved'] = '1'
1165 tr2.hookargs['tag_moved'] = '1'
1166 with repo.vfs('changes/tags.changes', 'w',
1166 with repo.vfs('changes/tags.changes', 'w',
1167 atomictemp=True) as changesfile:
1167 atomictemp=True) as changesfile:
1168 # note: we do not register the file to the transaction
1168 # note: we do not register the file to the transaction
1169 # because we needs it to still exist on the transaction
1169 # because we needs it to still exist on the transaction
1170 # is close (for txnclose hooks)
1170 # is close (for txnclose hooks)
1171 tagsmod.writediff(changesfile, changes)
1171 tagsmod.writediff(changesfile, changes)
1172 def validate(tr2):
1172 def validate(tr2):
1173 """will run pre-closing hooks"""
1173 """will run pre-closing hooks"""
1174 # XXX the transaction API is a bit lacking here so we take a hacky
1174 # XXX the transaction API is a bit lacking here so we take a hacky
1175 # path for now
1175 # path for now
1176 #
1176 #
1177 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1177 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1178 # dict is copied before these run. In addition we needs the data
1178 # dict is copied before these run. In addition we needs the data
1179 # available to in memory hooks too.
1179 # available to in memory hooks too.
1180 #
1180 #
1181 # Moreover, we also need to make sure this runs before txnclose
1181 # Moreover, we also need to make sure this runs before txnclose
1182 # hooks and there is no "pending" mechanism that would execute
1182 # hooks and there is no "pending" mechanism that would execute
1183 # logic only if hooks are about to run.
1183 # logic only if hooks are about to run.
1184 #
1184 #
1185 # Fixing this limitation of the transaction is also needed to track
1185 # Fixing this limitation of the transaction is also needed to track
1186 # other families of changes (bookmarks, phases, obsolescence).
1186 # other families of changes (bookmarks, phases, obsolescence).
1187 #
1187 #
1188 # This will have to be fixed before we remove the experimental
1188 # This will have to be fixed before we remove the experimental
1189 # gating.
1189 # gating.
1190 tracktags(tr2)
1190 tracktags(tr2)
1191 reporef().hook('pretxnclose', throw=True,
1191 reporef().hook('pretxnclose', throw=True,
1192 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1192 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1193 def releasefn(tr, success):
1193 def releasefn(tr, success):
1194 repo = reporef()
1194 repo = reporef()
1195 if success:
1195 if success:
1196 # this should be explicitly invoked here, because
1196 # this should be explicitly invoked here, because
1197 # in-memory changes aren't written out at closing
1197 # in-memory changes aren't written out at closing
1198 # transaction, if tr.addfilegenerator (via
1198 # transaction, if tr.addfilegenerator (via
1199 # dirstate.write or so) isn't invoked while
1199 # dirstate.write or so) isn't invoked while
1200 # transaction running
1200 # transaction running
1201 repo.dirstate.write(None)
1201 repo.dirstate.write(None)
1202 else:
1202 else:
1203 # discard all changes (including ones already written
1203 # discard all changes (including ones already written
1204 # out) in this transaction
1204 # out) in this transaction
1205 repo.dirstate.restorebackup(None, prefix='journal.')
1205 repo.dirstate.restorebackup(None, 'journal.dirstate')
1206
1206
1207 repo.invalidate(clearfilecache=True)
1207 repo.invalidate(clearfilecache=True)
1208
1208
1209 tr = transaction.transaction(rp, self.svfs, vfsmap,
1209 tr = transaction.transaction(rp, self.svfs, vfsmap,
1210 "journal",
1210 "journal",
1211 "undo",
1211 "undo",
1212 aftertrans(renames),
1212 aftertrans(renames),
1213 self.store.createmode,
1213 self.store.createmode,
1214 validator=validate,
1214 validator=validate,
1215 releasefn=releasefn,
1215 releasefn=releasefn,
1216 checkambigfiles=_cachedfiles)
1216 checkambigfiles=_cachedfiles)
1217 tr.changes['revs'] = set()
1217 tr.changes['revs'] = set()
1218 tr.changes['obsmarkers'] = set()
1218 tr.changes['obsmarkers'] = set()
1219
1219
1220 tr.hookargs['txnid'] = txnid
1220 tr.hookargs['txnid'] = txnid
1221 # note: writing the fncache only during finalize mean that the file is
1221 # note: writing the fncache only during finalize mean that the file is
1222 # outdated when running hooks. As fncache is used for streaming clone,
1222 # outdated when running hooks. As fncache is used for streaming clone,
1223 # this is not expected to break anything that happen during the hooks.
1223 # this is not expected to break anything that happen during the hooks.
1224 tr.addfinalize('flush-fncache', self.store.write)
1224 tr.addfinalize('flush-fncache', self.store.write)
1225 def txnclosehook(tr2):
1225 def txnclosehook(tr2):
1226 """To be run if transaction is successful, will schedule a hook run
1226 """To be run if transaction is successful, will schedule a hook run
1227 """
1227 """
1228 # Don't reference tr2 in hook() so we don't hold a reference.
1228 # Don't reference tr2 in hook() so we don't hold a reference.
1229 # This reduces memory consumption when there are multiple
1229 # This reduces memory consumption when there are multiple
1230 # transactions per lock. This can likely go away if issue5045
1230 # transactions per lock. This can likely go away if issue5045
1231 # fixes the function accumulation.
1231 # fixes the function accumulation.
1232 hookargs = tr2.hookargs
1232 hookargs = tr2.hookargs
1233
1233
1234 def hook():
1234 def hook():
1235 reporef().hook('txnclose', throw=False, txnname=desc,
1235 reporef().hook('txnclose', throw=False, txnname=desc,
1236 **pycompat.strkwargs(hookargs))
1236 **pycompat.strkwargs(hookargs))
1237 reporef()._afterlock(hook)
1237 reporef()._afterlock(hook)
1238 tr.addfinalize('txnclose-hook', txnclosehook)
1238 tr.addfinalize('txnclose-hook', txnclosehook)
1239 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1239 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1240 def txnaborthook(tr2):
1240 def txnaborthook(tr2):
1241 """To be run if transaction is aborted
1241 """To be run if transaction is aborted
1242 """
1242 """
1243 reporef().hook('txnabort', throw=False, txnname=desc,
1243 reporef().hook('txnabort', throw=False, txnname=desc,
1244 **tr2.hookargs)
1244 **tr2.hookargs)
1245 tr.addabort('txnabort-hook', txnaborthook)
1245 tr.addabort('txnabort-hook', txnaborthook)
1246 # avoid eager cache invalidation. in-memory data should be identical
1246 # avoid eager cache invalidation. in-memory data should be identical
1247 # to stored data if transaction has no error.
1247 # to stored data if transaction has no error.
1248 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1248 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1249 self._transref = weakref.ref(tr)
1249 self._transref = weakref.ref(tr)
1250 return tr
1250 return tr
1251
1251
1252 def _journalfiles(self):
1252 def _journalfiles(self):
1253 return ((self.svfs, 'journal'),
1253 return ((self.svfs, 'journal'),
1254 (self.vfs, 'journal.dirstate'),
1254 (self.vfs, 'journal.dirstate'),
1255 (self.vfs, 'journal.branch'),
1255 (self.vfs, 'journal.branch'),
1256 (self.vfs, 'journal.desc'),
1256 (self.vfs, 'journal.desc'),
1257 (self.vfs, 'journal.bookmarks'),
1257 (self.vfs, 'journal.bookmarks'),
1258 (self.svfs, 'journal.phaseroots'))
1258 (self.svfs, 'journal.phaseroots'))
1259
1259
1260 def undofiles(self):
1260 def undofiles(self):
1261 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1261 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1262
1262
1263 @unfilteredmethod
1263 @unfilteredmethod
1264 def _writejournal(self, desc):
1264 def _writejournal(self, desc):
1265 self.dirstate.savebackup(None, prefix='journal.')
1265 self.dirstate.savebackup(None, 'journal.dirstate')
1266 self.vfs.write("journal.branch",
1266 self.vfs.write("journal.branch",
1267 encoding.fromlocal(self.dirstate.branch()))
1267 encoding.fromlocal(self.dirstate.branch()))
1268 self.vfs.write("journal.desc",
1268 self.vfs.write("journal.desc",
1269 "%d\n%s\n" % (len(self), desc))
1269 "%d\n%s\n" % (len(self), desc))
1270 self.vfs.write("journal.bookmarks",
1270 self.vfs.write("journal.bookmarks",
1271 self.vfs.tryread("bookmarks"))
1271 self.vfs.tryread("bookmarks"))
1272 self.svfs.write("journal.phaseroots",
1272 self.svfs.write("journal.phaseroots",
1273 self.svfs.tryread("phaseroots"))
1273 self.svfs.tryread("phaseroots"))
1274
1274
1275 def recover(self):
1275 def recover(self):
1276 with self.lock():
1276 with self.lock():
1277 if self.svfs.exists("journal"):
1277 if self.svfs.exists("journal"):
1278 self.ui.status(_("rolling back interrupted transaction\n"))
1278 self.ui.status(_("rolling back interrupted transaction\n"))
1279 vfsmap = {'': self.svfs,
1279 vfsmap = {'': self.svfs,
1280 'plain': self.vfs,}
1280 'plain': self.vfs,}
1281 transaction.rollback(self.svfs, vfsmap, "journal",
1281 transaction.rollback(self.svfs, vfsmap, "journal",
1282 self.ui.warn,
1282 self.ui.warn,
1283 checkambigfiles=_cachedfiles)
1283 checkambigfiles=_cachedfiles)
1284 self.invalidate()
1284 self.invalidate()
1285 return True
1285 return True
1286 else:
1286 else:
1287 self.ui.warn(_("no interrupted transaction available\n"))
1287 self.ui.warn(_("no interrupted transaction available\n"))
1288 return False
1288 return False
1289
1289
1290 def rollback(self, dryrun=False, force=False):
1290 def rollback(self, dryrun=False, force=False):
1291 wlock = lock = dsguard = None
1291 wlock = lock = dsguard = None
1292 try:
1292 try:
1293 wlock = self.wlock()
1293 wlock = self.wlock()
1294 lock = self.lock()
1294 lock = self.lock()
1295 if self.svfs.exists("undo"):
1295 if self.svfs.exists("undo"):
1296 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1296 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1297
1297
1298 return self._rollback(dryrun, force, dsguard)
1298 return self._rollback(dryrun, force, dsguard)
1299 else:
1299 else:
1300 self.ui.warn(_("no rollback information available\n"))
1300 self.ui.warn(_("no rollback information available\n"))
1301 return 1
1301 return 1
1302 finally:
1302 finally:
1303 release(dsguard, lock, wlock)
1303 release(dsguard, lock, wlock)
1304
1304
1305 @unfilteredmethod # Until we get smarter cache management
1305 @unfilteredmethod # Until we get smarter cache management
1306 def _rollback(self, dryrun, force, dsguard):
1306 def _rollback(self, dryrun, force, dsguard):
1307 ui = self.ui
1307 ui = self.ui
1308 try:
1308 try:
1309 args = self.vfs.read('undo.desc').splitlines()
1309 args = self.vfs.read('undo.desc').splitlines()
1310 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1310 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1311 if len(args) >= 3:
1311 if len(args) >= 3:
1312 detail = args[2]
1312 detail = args[2]
1313 oldtip = oldlen - 1
1313 oldtip = oldlen - 1
1314
1314
1315 if detail and ui.verbose:
1315 if detail and ui.verbose:
1316 msg = (_('repository tip rolled back to revision %d'
1316 msg = (_('repository tip rolled back to revision %d'
1317 ' (undo %s: %s)\n')
1317 ' (undo %s: %s)\n')
1318 % (oldtip, desc, detail))
1318 % (oldtip, desc, detail))
1319 else:
1319 else:
1320 msg = (_('repository tip rolled back to revision %d'
1320 msg = (_('repository tip rolled back to revision %d'
1321 ' (undo %s)\n')
1321 ' (undo %s)\n')
1322 % (oldtip, desc))
1322 % (oldtip, desc))
1323 except IOError:
1323 except IOError:
1324 msg = _('rolling back unknown transaction\n')
1324 msg = _('rolling back unknown transaction\n')
1325 desc = None
1325 desc = None
1326
1326
1327 if not force and self['.'] != self['tip'] and desc == 'commit':
1327 if not force and self['.'] != self['tip'] and desc == 'commit':
1328 raise error.Abort(
1328 raise error.Abort(
1329 _('rollback of last commit while not checked out '
1329 _('rollback of last commit while not checked out '
1330 'may lose data'), hint=_('use -f to force'))
1330 'may lose data'), hint=_('use -f to force'))
1331
1331
1332 ui.status(msg)
1332 ui.status(msg)
1333 if dryrun:
1333 if dryrun:
1334 return 0
1334 return 0
1335
1335
1336 parents = self.dirstate.parents()
1336 parents = self.dirstate.parents()
1337 self.destroying()
1337 self.destroying()
1338 vfsmap = {'plain': self.vfs, '': self.svfs}
1338 vfsmap = {'plain': self.vfs, '': self.svfs}
1339 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1339 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1340 checkambigfiles=_cachedfiles)
1340 checkambigfiles=_cachedfiles)
1341 if self.vfs.exists('undo.bookmarks'):
1341 if self.vfs.exists('undo.bookmarks'):
1342 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1342 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1343 if self.svfs.exists('undo.phaseroots'):
1343 if self.svfs.exists('undo.phaseroots'):
1344 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1344 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1345 self.invalidate()
1345 self.invalidate()
1346
1346
1347 parentgone = (parents[0] not in self.changelog.nodemap or
1347 parentgone = (parents[0] not in self.changelog.nodemap or
1348 parents[1] not in self.changelog.nodemap)
1348 parents[1] not in self.changelog.nodemap)
1349 if parentgone:
1349 if parentgone:
1350 # prevent dirstateguard from overwriting already restored one
1350 # prevent dirstateguard from overwriting already restored one
1351 dsguard.close()
1351 dsguard.close()
1352
1352
1353 self.dirstate.restorebackup(None, prefix='undo.')
1353 self.dirstate.restorebackup(None, 'undo.dirstate')
1354 try:
1354 try:
1355 branch = self.vfs.read('undo.branch')
1355 branch = self.vfs.read('undo.branch')
1356 self.dirstate.setbranch(encoding.tolocal(branch))
1356 self.dirstate.setbranch(encoding.tolocal(branch))
1357 except IOError:
1357 except IOError:
1358 ui.warn(_('named branch could not be reset: '
1358 ui.warn(_('named branch could not be reset: '
1359 'current branch is still \'%s\'\n')
1359 'current branch is still \'%s\'\n')
1360 % self.dirstate.branch())
1360 % self.dirstate.branch())
1361
1361
1362 parents = tuple([p.rev() for p in self[None].parents()])
1362 parents = tuple([p.rev() for p in self[None].parents()])
1363 if len(parents) > 1:
1363 if len(parents) > 1:
1364 ui.status(_('working directory now based on '
1364 ui.status(_('working directory now based on '
1365 'revisions %d and %d\n') % parents)
1365 'revisions %d and %d\n') % parents)
1366 else:
1366 else:
1367 ui.status(_('working directory now based on '
1367 ui.status(_('working directory now based on '
1368 'revision %d\n') % parents)
1368 'revision %d\n') % parents)
1369 mergemod.mergestate.clean(self, self['.'].node())
1369 mergemod.mergestate.clean(self, self['.'].node())
1370
1370
1371 # TODO: if we know which new heads may result from this rollback, pass
1371 # TODO: if we know which new heads may result from this rollback, pass
1372 # them to destroy(), which will prevent the branchhead cache from being
1372 # them to destroy(), which will prevent the branchhead cache from being
1373 # invalidated.
1373 # invalidated.
1374 self.destroyed()
1374 self.destroyed()
1375 return 0
1375 return 0
1376
1376
1377 def _buildcacheupdater(self, newtransaction):
1377 def _buildcacheupdater(self, newtransaction):
1378 """called during transaction to build the callback updating cache
1378 """called during transaction to build the callback updating cache
1379
1379
1380 Lives on the repository to help extension who might want to augment
1380 Lives on the repository to help extension who might want to augment
1381 this logic. For this purpose, the created transaction is passed to the
1381 this logic. For this purpose, the created transaction is passed to the
1382 method.
1382 method.
1383 """
1383 """
1384 # we must avoid cyclic reference between repo and transaction.
1384 # we must avoid cyclic reference between repo and transaction.
1385 reporef = weakref.ref(self)
1385 reporef = weakref.ref(self)
1386 def updater(tr):
1386 def updater(tr):
1387 repo = reporef()
1387 repo = reporef()
1388 repo.updatecaches(tr)
1388 repo.updatecaches(tr)
1389 return updater
1389 return updater
1390
1390
1391 @unfilteredmethod
1391 @unfilteredmethod
1392 def updatecaches(self, tr=None):
1392 def updatecaches(self, tr=None):
1393 """warm appropriate caches
1393 """warm appropriate caches
1394
1394
1395 If this function is called after a transaction closed. The transaction
1395 If this function is called after a transaction closed. The transaction
1396 will be available in the 'tr' argument. This can be used to selectively
1396 will be available in the 'tr' argument. This can be used to selectively
1397 update caches relevant to the changes in that transaction.
1397 update caches relevant to the changes in that transaction.
1398 """
1398 """
1399 if tr is not None and tr.hookargs.get('source') == 'strip':
1399 if tr is not None and tr.hookargs.get('source') == 'strip':
1400 # During strip, many caches are invalid but
1400 # During strip, many caches are invalid but
1401 # later call to `destroyed` will refresh them.
1401 # later call to `destroyed` will refresh them.
1402 return
1402 return
1403
1403
1404 if tr is None or tr.changes['revs']:
1404 if tr is None or tr.changes['revs']:
1405 # updating the unfiltered branchmap should refresh all the others,
1405 # updating the unfiltered branchmap should refresh all the others,
1406 self.ui.debug('updating the branch cache\n')
1406 self.ui.debug('updating the branch cache\n')
1407 branchmap.updatecache(self.filtered('served'))
1407 branchmap.updatecache(self.filtered('served'))
1408
1408
1409 def invalidatecaches(self):
1409 def invalidatecaches(self):
1410
1410
1411 if '_tagscache' in vars(self):
1411 if '_tagscache' in vars(self):
1412 # can't use delattr on proxy
1412 # can't use delattr on proxy
1413 del self.__dict__['_tagscache']
1413 del self.__dict__['_tagscache']
1414
1414
1415 self.unfiltered()._branchcaches.clear()
1415 self.unfiltered()._branchcaches.clear()
1416 self.invalidatevolatilesets()
1416 self.invalidatevolatilesets()
1417 self._sparsesignaturecache.clear()
1417 self._sparsesignaturecache.clear()
1418
1418
1419 def invalidatevolatilesets(self):
1419 def invalidatevolatilesets(self):
1420 self.filteredrevcache.clear()
1420 self.filteredrevcache.clear()
1421 obsolete.clearobscaches(self)
1421 obsolete.clearobscaches(self)
1422
1422
1423 def invalidatedirstate(self):
1423 def invalidatedirstate(self):
1424 '''Invalidates the dirstate, causing the next call to dirstate
1424 '''Invalidates the dirstate, causing the next call to dirstate
1425 to check if it was modified since the last time it was read,
1425 to check if it was modified since the last time it was read,
1426 rereading it if it has.
1426 rereading it if it has.
1427
1427
1428 This is different to dirstate.invalidate() that it doesn't always
1428 This is different to dirstate.invalidate() that it doesn't always
1429 rereads the dirstate. Use dirstate.invalidate() if you want to
1429 rereads the dirstate. Use dirstate.invalidate() if you want to
1430 explicitly read the dirstate again (i.e. restoring it to a previous
1430 explicitly read the dirstate again (i.e. restoring it to a previous
1431 known good state).'''
1431 known good state).'''
1432 if hasunfilteredcache(self, 'dirstate'):
1432 if hasunfilteredcache(self, 'dirstate'):
1433 for k in self.dirstate._filecache:
1433 for k in self.dirstate._filecache:
1434 try:
1434 try:
1435 delattr(self.dirstate, k)
1435 delattr(self.dirstate, k)
1436 except AttributeError:
1436 except AttributeError:
1437 pass
1437 pass
1438 delattr(self.unfiltered(), 'dirstate')
1438 delattr(self.unfiltered(), 'dirstate')
1439
1439
1440 def invalidate(self, clearfilecache=False):
1440 def invalidate(self, clearfilecache=False):
1441 '''Invalidates both store and non-store parts other than dirstate
1441 '''Invalidates both store and non-store parts other than dirstate
1442
1442
1443 If a transaction is running, invalidation of store is omitted,
1443 If a transaction is running, invalidation of store is omitted,
1444 because discarding in-memory changes might cause inconsistency
1444 because discarding in-memory changes might cause inconsistency
1445 (e.g. incomplete fncache causes unintentional failure, but
1445 (e.g. incomplete fncache causes unintentional failure, but
1446 redundant one doesn't).
1446 redundant one doesn't).
1447 '''
1447 '''
1448 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1448 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1449 for k in list(self._filecache.keys()):
1449 for k in list(self._filecache.keys()):
1450 # dirstate is invalidated separately in invalidatedirstate()
1450 # dirstate is invalidated separately in invalidatedirstate()
1451 if k == 'dirstate':
1451 if k == 'dirstate':
1452 continue
1452 continue
1453
1453
1454 if clearfilecache:
1454 if clearfilecache:
1455 del self._filecache[k]
1455 del self._filecache[k]
1456 try:
1456 try:
1457 delattr(unfiltered, k)
1457 delattr(unfiltered, k)
1458 except AttributeError:
1458 except AttributeError:
1459 pass
1459 pass
1460 self.invalidatecaches()
1460 self.invalidatecaches()
1461 if not self.currenttransaction():
1461 if not self.currenttransaction():
1462 # TODO: Changing contents of store outside transaction
1462 # TODO: Changing contents of store outside transaction
1463 # causes inconsistency. We should make in-memory store
1463 # causes inconsistency. We should make in-memory store
1464 # changes detectable, and abort if changed.
1464 # changes detectable, and abort if changed.
1465 self.store.invalidatecaches()
1465 self.store.invalidatecaches()
1466
1466
1467 def invalidateall(self):
1467 def invalidateall(self):
1468 '''Fully invalidates both store and non-store parts, causing the
1468 '''Fully invalidates both store and non-store parts, causing the
1469 subsequent operation to reread any outside changes.'''
1469 subsequent operation to reread any outside changes.'''
1470 # extension should hook this to invalidate its caches
1470 # extension should hook this to invalidate its caches
1471 self.invalidate()
1471 self.invalidate()
1472 self.invalidatedirstate()
1472 self.invalidatedirstate()
1473
1473
1474 @unfilteredmethod
1474 @unfilteredmethod
1475 def _refreshfilecachestats(self, tr):
1475 def _refreshfilecachestats(self, tr):
1476 """Reload stats of cached files so that they are flagged as valid"""
1476 """Reload stats of cached files so that they are flagged as valid"""
1477 for k, ce in self._filecache.items():
1477 for k, ce in self._filecache.items():
1478 if k == 'dirstate' or k not in self.__dict__:
1478 if k == 'dirstate' or k not in self.__dict__:
1479 continue
1479 continue
1480 ce.refresh()
1480 ce.refresh()
1481
1481
1482 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1482 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1483 inheritchecker=None, parentenvvar=None):
1483 inheritchecker=None, parentenvvar=None):
1484 parentlock = None
1484 parentlock = None
1485 # the contents of parentenvvar are used by the underlying lock to
1485 # the contents of parentenvvar are used by the underlying lock to
1486 # determine whether it can be inherited
1486 # determine whether it can be inherited
1487 if parentenvvar is not None:
1487 if parentenvvar is not None:
1488 parentlock = encoding.environ.get(parentenvvar)
1488 parentlock = encoding.environ.get(parentenvvar)
1489 try:
1489 try:
1490 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1490 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1491 acquirefn=acquirefn, desc=desc,
1491 acquirefn=acquirefn, desc=desc,
1492 inheritchecker=inheritchecker,
1492 inheritchecker=inheritchecker,
1493 parentlock=parentlock)
1493 parentlock=parentlock)
1494 except error.LockHeld as inst:
1494 except error.LockHeld as inst:
1495 if not wait:
1495 if not wait:
1496 raise
1496 raise
1497 # show more details for new-style locks
1497 # show more details for new-style locks
1498 if ':' in inst.locker:
1498 if ':' in inst.locker:
1499 host, pid = inst.locker.split(":", 1)
1499 host, pid = inst.locker.split(":", 1)
1500 self.ui.warn(
1500 self.ui.warn(
1501 _("waiting for lock on %s held by process %r "
1501 _("waiting for lock on %s held by process %r "
1502 "on host %r\n") % (desc, pid, host))
1502 "on host %r\n") % (desc, pid, host))
1503 else:
1503 else:
1504 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1504 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1505 (desc, inst.locker))
1505 (desc, inst.locker))
1506 # default to 600 seconds timeout
1506 # default to 600 seconds timeout
1507 l = lockmod.lock(vfs, lockname,
1507 l = lockmod.lock(vfs, lockname,
1508 int(self.ui.config("ui", "timeout", "600")),
1508 int(self.ui.config("ui", "timeout", "600")),
1509 releasefn=releasefn, acquirefn=acquirefn,
1509 releasefn=releasefn, acquirefn=acquirefn,
1510 desc=desc)
1510 desc=desc)
1511 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1511 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1512 return l
1512 return l
1513
1513
1514 def _afterlock(self, callback):
1514 def _afterlock(self, callback):
1515 """add a callback to be run when the repository is fully unlocked
1515 """add a callback to be run when the repository is fully unlocked
1516
1516
1517 The callback will be executed when the outermost lock is released
1517 The callback will be executed when the outermost lock is released
1518 (with wlock being higher level than 'lock')."""
1518 (with wlock being higher level than 'lock')."""
1519 for ref in (self._wlockref, self._lockref):
1519 for ref in (self._wlockref, self._lockref):
1520 l = ref and ref()
1520 l = ref and ref()
1521 if l and l.held:
1521 if l and l.held:
1522 l.postrelease.append(callback)
1522 l.postrelease.append(callback)
1523 break
1523 break
1524 else: # no lock have been found.
1524 else: # no lock have been found.
1525 callback()
1525 callback()
1526
1526
1527 def lock(self, wait=True):
1527 def lock(self, wait=True):
1528 '''Lock the repository store (.hg/store) and return a weak reference
1528 '''Lock the repository store (.hg/store) and return a weak reference
1529 to the lock. Use this before modifying the store (e.g. committing or
1529 to the lock. Use this before modifying the store (e.g. committing or
1530 stripping). If you are opening a transaction, get a lock as well.)
1530 stripping). If you are opening a transaction, get a lock as well.)
1531
1531
1532 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1532 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1533 'wlock' first to avoid a dead-lock hazard.'''
1533 'wlock' first to avoid a dead-lock hazard.'''
1534 l = self._currentlock(self._lockref)
1534 l = self._currentlock(self._lockref)
1535 if l is not None:
1535 if l is not None:
1536 l.lock()
1536 l.lock()
1537 return l
1537 return l
1538
1538
1539 l = self._lock(self.svfs, "lock", wait, None,
1539 l = self._lock(self.svfs, "lock", wait, None,
1540 self.invalidate, _('repository %s') % self.origroot)
1540 self.invalidate, _('repository %s') % self.origroot)
1541 self._lockref = weakref.ref(l)
1541 self._lockref = weakref.ref(l)
1542 return l
1542 return l
1543
1543
1544 def _wlockchecktransaction(self):
1544 def _wlockchecktransaction(self):
1545 if self.currenttransaction() is not None:
1545 if self.currenttransaction() is not None:
1546 raise error.LockInheritanceContractViolation(
1546 raise error.LockInheritanceContractViolation(
1547 'wlock cannot be inherited in the middle of a transaction')
1547 'wlock cannot be inherited in the middle of a transaction')
1548
1548
1549 def wlock(self, wait=True):
1549 def wlock(self, wait=True):
1550 '''Lock the non-store parts of the repository (everything under
1550 '''Lock the non-store parts of the repository (everything under
1551 .hg except .hg/store) and return a weak reference to the lock.
1551 .hg except .hg/store) and return a weak reference to the lock.
1552
1552
1553 Use this before modifying files in .hg.
1553 Use this before modifying files in .hg.
1554
1554
1555 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1555 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1556 'wlock' first to avoid a dead-lock hazard.'''
1556 'wlock' first to avoid a dead-lock hazard.'''
1557 l = self._wlockref and self._wlockref()
1557 l = self._wlockref and self._wlockref()
1558 if l is not None and l.held:
1558 if l is not None and l.held:
1559 l.lock()
1559 l.lock()
1560 return l
1560 return l
1561
1561
1562 # We do not need to check for non-waiting lock acquisition. Such
1562 # We do not need to check for non-waiting lock acquisition. Such
1563 # acquisition would not cause dead-lock as they would just fail.
1563 # acquisition would not cause dead-lock as they would just fail.
1564 if wait and (self.ui.configbool('devel', 'all-warnings')
1564 if wait and (self.ui.configbool('devel', 'all-warnings')
1565 or self.ui.configbool('devel', 'check-locks')):
1565 or self.ui.configbool('devel', 'check-locks')):
1566 if self._currentlock(self._lockref) is not None:
1566 if self._currentlock(self._lockref) is not None:
1567 self.ui.develwarn('"wlock" acquired after "lock"')
1567 self.ui.develwarn('"wlock" acquired after "lock"')
1568
1568
1569 def unlock():
1569 def unlock():
1570 if self.dirstate.pendingparentchange():
1570 if self.dirstate.pendingparentchange():
1571 self.dirstate.invalidate()
1571 self.dirstate.invalidate()
1572 else:
1572 else:
1573 self.dirstate.write(None)
1573 self.dirstate.write(None)
1574
1574
1575 self._filecache['dirstate'].refresh()
1575 self._filecache['dirstate'].refresh()
1576
1576
1577 l = self._lock(self.vfs, "wlock", wait, unlock,
1577 l = self._lock(self.vfs, "wlock", wait, unlock,
1578 self.invalidatedirstate, _('working directory of %s') %
1578 self.invalidatedirstate, _('working directory of %s') %
1579 self.origroot,
1579 self.origroot,
1580 inheritchecker=self._wlockchecktransaction,
1580 inheritchecker=self._wlockchecktransaction,
1581 parentenvvar='HG_WLOCK_LOCKER')
1581 parentenvvar='HG_WLOCK_LOCKER')
1582 self._wlockref = weakref.ref(l)
1582 self._wlockref = weakref.ref(l)
1583 return l
1583 return l
1584
1584
1585 def _currentlock(self, lockref):
1585 def _currentlock(self, lockref):
1586 """Returns the lock if it's held, or None if it's not."""
1586 """Returns the lock if it's held, or None if it's not."""
1587 if lockref is None:
1587 if lockref is None:
1588 return None
1588 return None
1589 l = lockref()
1589 l = lockref()
1590 if l is None or not l.held:
1590 if l is None or not l.held:
1591 return None
1591 return None
1592 return l
1592 return l
1593
1593
1594 def currentwlock(self):
1594 def currentwlock(self):
1595 """Returns the wlock if it's held, or None if it's not."""
1595 """Returns the wlock if it's held, or None if it's not."""
1596 return self._currentlock(self._wlockref)
1596 return self._currentlock(self._wlockref)
1597
1597
1598 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1598 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1599 """
1599 """
1600 commit an individual file as part of a larger transaction
1600 commit an individual file as part of a larger transaction
1601 """
1601 """
1602
1602
1603 fname = fctx.path()
1603 fname = fctx.path()
1604 fparent1 = manifest1.get(fname, nullid)
1604 fparent1 = manifest1.get(fname, nullid)
1605 fparent2 = manifest2.get(fname, nullid)
1605 fparent2 = manifest2.get(fname, nullid)
1606 if isinstance(fctx, context.filectx):
1606 if isinstance(fctx, context.filectx):
1607 node = fctx.filenode()
1607 node = fctx.filenode()
1608 if node in [fparent1, fparent2]:
1608 if node in [fparent1, fparent2]:
1609 self.ui.debug('reusing %s filelog entry\n' % fname)
1609 self.ui.debug('reusing %s filelog entry\n' % fname)
1610 if manifest1.flags(fname) != fctx.flags():
1610 if manifest1.flags(fname) != fctx.flags():
1611 changelist.append(fname)
1611 changelist.append(fname)
1612 return node
1612 return node
1613
1613
1614 flog = self.file(fname)
1614 flog = self.file(fname)
1615 meta = {}
1615 meta = {}
1616 copy = fctx.renamed()
1616 copy = fctx.renamed()
1617 if copy and copy[0] != fname:
1617 if copy and copy[0] != fname:
1618 # Mark the new revision of this file as a copy of another
1618 # Mark the new revision of this file as a copy of another
1619 # file. This copy data will effectively act as a parent
1619 # file. This copy data will effectively act as a parent
1620 # of this new revision. If this is a merge, the first
1620 # of this new revision. If this is a merge, the first
1621 # parent will be the nullid (meaning "look up the copy data")
1621 # parent will be the nullid (meaning "look up the copy data")
1622 # and the second one will be the other parent. For example:
1622 # and the second one will be the other parent. For example:
1623 #
1623 #
1624 # 0 --- 1 --- 3 rev1 changes file foo
1624 # 0 --- 1 --- 3 rev1 changes file foo
1625 # \ / rev2 renames foo to bar and changes it
1625 # \ / rev2 renames foo to bar and changes it
1626 # \- 2 -/ rev3 should have bar with all changes and
1626 # \- 2 -/ rev3 should have bar with all changes and
1627 # should record that bar descends from
1627 # should record that bar descends from
1628 # bar in rev2 and foo in rev1
1628 # bar in rev2 and foo in rev1
1629 #
1629 #
1630 # this allows this merge to succeed:
1630 # this allows this merge to succeed:
1631 #
1631 #
1632 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1632 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1633 # \ / merging rev3 and rev4 should use bar@rev2
1633 # \ / merging rev3 and rev4 should use bar@rev2
1634 # \- 2 --- 4 as the merge base
1634 # \- 2 --- 4 as the merge base
1635 #
1635 #
1636
1636
1637 cfname = copy[0]
1637 cfname = copy[0]
1638 crev = manifest1.get(cfname)
1638 crev = manifest1.get(cfname)
1639 newfparent = fparent2
1639 newfparent = fparent2
1640
1640
1641 if manifest2: # branch merge
1641 if manifest2: # branch merge
1642 if fparent2 == nullid or crev is None: # copied on remote side
1642 if fparent2 == nullid or crev is None: # copied on remote side
1643 if cfname in manifest2:
1643 if cfname in manifest2:
1644 crev = manifest2[cfname]
1644 crev = manifest2[cfname]
1645 newfparent = fparent1
1645 newfparent = fparent1
1646
1646
1647 # Here, we used to search backwards through history to try to find
1647 # Here, we used to search backwards through history to try to find
1648 # where the file copy came from if the source of a copy was not in
1648 # where the file copy came from if the source of a copy was not in
1649 # the parent directory. However, this doesn't actually make sense to
1649 # the parent directory. However, this doesn't actually make sense to
1650 # do (what does a copy from something not in your working copy even
1650 # do (what does a copy from something not in your working copy even
1651 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1651 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1652 # the user that copy information was dropped, so if they didn't
1652 # the user that copy information was dropped, so if they didn't
1653 # expect this outcome it can be fixed, but this is the correct
1653 # expect this outcome it can be fixed, but this is the correct
1654 # behavior in this circumstance.
1654 # behavior in this circumstance.
1655
1655
1656 if crev:
1656 if crev:
1657 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1657 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1658 meta["copy"] = cfname
1658 meta["copy"] = cfname
1659 meta["copyrev"] = hex(crev)
1659 meta["copyrev"] = hex(crev)
1660 fparent1, fparent2 = nullid, newfparent
1660 fparent1, fparent2 = nullid, newfparent
1661 else:
1661 else:
1662 self.ui.warn(_("warning: can't find ancestor for '%s' "
1662 self.ui.warn(_("warning: can't find ancestor for '%s' "
1663 "copied from '%s'!\n") % (fname, cfname))
1663 "copied from '%s'!\n") % (fname, cfname))
1664
1664
1665 elif fparent1 == nullid:
1665 elif fparent1 == nullid:
1666 fparent1, fparent2 = fparent2, nullid
1666 fparent1, fparent2 = fparent2, nullid
1667 elif fparent2 != nullid:
1667 elif fparent2 != nullid:
1668 # is one parent an ancestor of the other?
1668 # is one parent an ancestor of the other?
1669 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1669 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1670 if fparent1 in fparentancestors:
1670 if fparent1 in fparentancestors:
1671 fparent1, fparent2 = fparent2, nullid
1671 fparent1, fparent2 = fparent2, nullid
1672 elif fparent2 in fparentancestors:
1672 elif fparent2 in fparentancestors:
1673 fparent2 = nullid
1673 fparent2 = nullid
1674
1674
1675 # is the file changed?
1675 # is the file changed?
1676 text = fctx.data()
1676 text = fctx.data()
1677 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1677 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1678 changelist.append(fname)
1678 changelist.append(fname)
1679 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1679 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1680 # are just the flags changed during merge?
1680 # are just the flags changed during merge?
1681 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1681 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1682 changelist.append(fname)
1682 changelist.append(fname)
1683
1683
1684 return fparent1
1684 return fparent1
1685
1685
1686 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1686 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1687 """check for commit arguments that aren't committable"""
1687 """check for commit arguments that aren't committable"""
1688 if match.isexact() or match.prefix():
1688 if match.isexact() or match.prefix():
1689 matched = set(status.modified + status.added + status.removed)
1689 matched = set(status.modified + status.added + status.removed)
1690
1690
1691 for f in match.files():
1691 for f in match.files():
1692 f = self.dirstate.normalize(f)
1692 f = self.dirstate.normalize(f)
1693 if f == '.' or f in matched or f in wctx.substate:
1693 if f == '.' or f in matched or f in wctx.substate:
1694 continue
1694 continue
1695 if f in status.deleted:
1695 if f in status.deleted:
1696 fail(f, _('file not found!'))
1696 fail(f, _('file not found!'))
1697 if f in vdirs: # visited directory
1697 if f in vdirs: # visited directory
1698 d = f + '/'
1698 d = f + '/'
1699 for mf in matched:
1699 for mf in matched:
1700 if mf.startswith(d):
1700 if mf.startswith(d):
1701 break
1701 break
1702 else:
1702 else:
1703 fail(f, _("no match under directory!"))
1703 fail(f, _("no match under directory!"))
1704 elif f not in self.dirstate:
1704 elif f not in self.dirstate:
1705 fail(f, _("file not tracked!"))
1705 fail(f, _("file not tracked!"))
1706
1706
1707 @unfilteredmethod
1707 @unfilteredmethod
1708 def commit(self, text="", user=None, date=None, match=None, force=False,
1708 def commit(self, text="", user=None, date=None, match=None, force=False,
1709 editor=False, extra=None):
1709 editor=False, extra=None):
1710 """Add a new revision to current repository.
1710 """Add a new revision to current repository.
1711
1711
1712 Revision information is gathered from the working directory,
1712 Revision information is gathered from the working directory,
1713 match can be used to filter the committed files. If editor is
1713 match can be used to filter the committed files. If editor is
1714 supplied, it is called to get a commit message.
1714 supplied, it is called to get a commit message.
1715 """
1715 """
1716 if extra is None:
1716 if extra is None:
1717 extra = {}
1717 extra = {}
1718
1718
1719 def fail(f, msg):
1719 def fail(f, msg):
1720 raise error.Abort('%s: %s' % (f, msg))
1720 raise error.Abort('%s: %s' % (f, msg))
1721
1721
1722 if not match:
1722 if not match:
1723 match = matchmod.always(self.root, '')
1723 match = matchmod.always(self.root, '')
1724
1724
1725 if not force:
1725 if not force:
1726 vdirs = []
1726 vdirs = []
1727 match.explicitdir = vdirs.append
1727 match.explicitdir = vdirs.append
1728 match.bad = fail
1728 match.bad = fail
1729
1729
1730 wlock = lock = tr = None
1730 wlock = lock = tr = None
1731 try:
1731 try:
1732 wlock = self.wlock()
1732 wlock = self.wlock()
1733 lock = self.lock() # for recent changelog (see issue4368)
1733 lock = self.lock() # for recent changelog (see issue4368)
1734
1734
1735 wctx = self[None]
1735 wctx = self[None]
1736 merge = len(wctx.parents()) > 1
1736 merge = len(wctx.parents()) > 1
1737
1737
1738 if not force and merge and not match.always():
1738 if not force and merge and not match.always():
1739 raise error.Abort(_('cannot partially commit a merge '
1739 raise error.Abort(_('cannot partially commit a merge '
1740 '(do not specify files or patterns)'))
1740 '(do not specify files or patterns)'))
1741
1741
1742 status = self.status(match=match, clean=force)
1742 status = self.status(match=match, clean=force)
1743 if force:
1743 if force:
1744 status.modified.extend(status.clean) # mq may commit clean files
1744 status.modified.extend(status.clean) # mq may commit clean files
1745
1745
1746 # check subrepos
1746 # check subrepos
1747 subs = []
1747 subs = []
1748 commitsubs = set()
1748 commitsubs = set()
1749 newstate = wctx.substate.copy()
1749 newstate = wctx.substate.copy()
1750 # only manage subrepos and .hgsubstate if .hgsub is present
1750 # only manage subrepos and .hgsubstate if .hgsub is present
1751 if '.hgsub' in wctx:
1751 if '.hgsub' in wctx:
1752 # we'll decide whether to track this ourselves, thanks
1752 # we'll decide whether to track this ourselves, thanks
1753 for c in status.modified, status.added, status.removed:
1753 for c in status.modified, status.added, status.removed:
1754 if '.hgsubstate' in c:
1754 if '.hgsubstate' in c:
1755 c.remove('.hgsubstate')
1755 c.remove('.hgsubstate')
1756
1756
1757 # compare current state to last committed state
1757 # compare current state to last committed state
1758 # build new substate based on last committed state
1758 # build new substate based on last committed state
1759 oldstate = wctx.p1().substate
1759 oldstate = wctx.p1().substate
1760 for s in sorted(newstate.keys()):
1760 for s in sorted(newstate.keys()):
1761 if not match(s):
1761 if not match(s):
1762 # ignore working copy, use old state if present
1762 # ignore working copy, use old state if present
1763 if s in oldstate:
1763 if s in oldstate:
1764 newstate[s] = oldstate[s]
1764 newstate[s] = oldstate[s]
1765 continue
1765 continue
1766 if not force:
1766 if not force:
1767 raise error.Abort(
1767 raise error.Abort(
1768 _("commit with new subrepo %s excluded") % s)
1768 _("commit with new subrepo %s excluded") % s)
1769 dirtyreason = wctx.sub(s).dirtyreason(True)
1769 dirtyreason = wctx.sub(s).dirtyreason(True)
1770 if dirtyreason:
1770 if dirtyreason:
1771 if not self.ui.configbool('ui', 'commitsubrepos'):
1771 if not self.ui.configbool('ui', 'commitsubrepos'):
1772 raise error.Abort(dirtyreason,
1772 raise error.Abort(dirtyreason,
1773 hint=_("use --subrepos for recursive commit"))
1773 hint=_("use --subrepos for recursive commit"))
1774 subs.append(s)
1774 subs.append(s)
1775 commitsubs.add(s)
1775 commitsubs.add(s)
1776 else:
1776 else:
1777 bs = wctx.sub(s).basestate()
1777 bs = wctx.sub(s).basestate()
1778 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1778 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1779 if oldstate.get(s, (None, None, None))[1] != bs:
1779 if oldstate.get(s, (None, None, None))[1] != bs:
1780 subs.append(s)
1780 subs.append(s)
1781
1781
1782 # check for removed subrepos
1782 # check for removed subrepos
1783 for p in wctx.parents():
1783 for p in wctx.parents():
1784 r = [s for s in p.substate if s not in newstate]
1784 r = [s for s in p.substate if s not in newstate]
1785 subs += [s for s in r if match(s)]
1785 subs += [s for s in r if match(s)]
1786 if subs:
1786 if subs:
1787 if (not match('.hgsub') and
1787 if (not match('.hgsub') and
1788 '.hgsub' in (wctx.modified() + wctx.added())):
1788 '.hgsub' in (wctx.modified() + wctx.added())):
1789 raise error.Abort(
1789 raise error.Abort(
1790 _("can't commit subrepos without .hgsub"))
1790 _("can't commit subrepos without .hgsub"))
1791 status.modified.insert(0, '.hgsubstate')
1791 status.modified.insert(0, '.hgsubstate')
1792
1792
1793 elif '.hgsub' in status.removed:
1793 elif '.hgsub' in status.removed:
1794 # clean up .hgsubstate when .hgsub is removed
1794 # clean up .hgsubstate when .hgsub is removed
1795 if ('.hgsubstate' in wctx and
1795 if ('.hgsubstate' in wctx and
1796 '.hgsubstate' not in (status.modified + status.added +
1796 '.hgsubstate' not in (status.modified + status.added +
1797 status.removed)):
1797 status.removed)):
1798 status.removed.insert(0, '.hgsubstate')
1798 status.removed.insert(0, '.hgsubstate')
1799
1799
1800 # make sure all explicit patterns are matched
1800 # make sure all explicit patterns are matched
1801 if not force:
1801 if not force:
1802 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1802 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1803
1803
1804 cctx = context.workingcommitctx(self, status,
1804 cctx = context.workingcommitctx(self, status,
1805 text, user, date, extra)
1805 text, user, date, extra)
1806
1806
1807 # internal config: ui.allowemptycommit
1807 # internal config: ui.allowemptycommit
1808 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1808 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1809 or extra.get('close') or merge or cctx.files()
1809 or extra.get('close') or merge or cctx.files()
1810 or self.ui.configbool('ui', 'allowemptycommit'))
1810 or self.ui.configbool('ui', 'allowemptycommit'))
1811 if not allowemptycommit:
1811 if not allowemptycommit:
1812 return None
1812 return None
1813
1813
1814 if merge and cctx.deleted():
1814 if merge and cctx.deleted():
1815 raise error.Abort(_("cannot commit merge with missing files"))
1815 raise error.Abort(_("cannot commit merge with missing files"))
1816
1816
1817 ms = mergemod.mergestate.read(self)
1817 ms = mergemod.mergestate.read(self)
1818 mergeutil.checkunresolved(ms)
1818 mergeutil.checkunresolved(ms)
1819
1819
1820 if editor:
1820 if editor:
1821 cctx._text = editor(self, cctx, subs)
1821 cctx._text = editor(self, cctx, subs)
1822 edited = (text != cctx._text)
1822 edited = (text != cctx._text)
1823
1823
1824 # Save commit message in case this transaction gets rolled back
1824 # Save commit message in case this transaction gets rolled back
1825 # (e.g. by a pretxncommit hook). Leave the content alone on
1825 # (e.g. by a pretxncommit hook). Leave the content alone on
1826 # the assumption that the user will use the same editor again.
1826 # the assumption that the user will use the same editor again.
1827 msgfn = self.savecommitmessage(cctx._text)
1827 msgfn = self.savecommitmessage(cctx._text)
1828
1828
1829 # commit subs and write new state
1829 # commit subs and write new state
1830 if subs:
1830 if subs:
1831 for s in sorted(commitsubs):
1831 for s in sorted(commitsubs):
1832 sub = wctx.sub(s)
1832 sub = wctx.sub(s)
1833 self.ui.status(_('committing subrepository %s\n') %
1833 self.ui.status(_('committing subrepository %s\n') %
1834 subrepo.subrelpath(sub))
1834 subrepo.subrelpath(sub))
1835 sr = sub.commit(cctx._text, user, date)
1835 sr = sub.commit(cctx._text, user, date)
1836 newstate[s] = (newstate[s][0], sr)
1836 newstate[s] = (newstate[s][0], sr)
1837 subrepo.writestate(self, newstate)
1837 subrepo.writestate(self, newstate)
1838
1838
1839 p1, p2 = self.dirstate.parents()
1839 p1, p2 = self.dirstate.parents()
1840 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1840 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1841 try:
1841 try:
1842 self.hook("precommit", throw=True, parent1=hookp1,
1842 self.hook("precommit", throw=True, parent1=hookp1,
1843 parent2=hookp2)
1843 parent2=hookp2)
1844 tr = self.transaction('commit')
1844 tr = self.transaction('commit')
1845 ret = self.commitctx(cctx, True)
1845 ret = self.commitctx(cctx, True)
1846 except: # re-raises
1846 except: # re-raises
1847 if edited:
1847 if edited:
1848 self.ui.write(
1848 self.ui.write(
1849 _('note: commit message saved in %s\n') % msgfn)
1849 _('note: commit message saved in %s\n') % msgfn)
1850 raise
1850 raise
1851 # update bookmarks, dirstate and mergestate
1851 # update bookmarks, dirstate and mergestate
1852 bookmarks.update(self, [p1, p2], ret)
1852 bookmarks.update(self, [p1, p2], ret)
1853 cctx.markcommitted(ret)
1853 cctx.markcommitted(ret)
1854 ms.reset()
1854 ms.reset()
1855 tr.close()
1855 tr.close()
1856
1856
1857 finally:
1857 finally:
1858 lockmod.release(tr, lock, wlock)
1858 lockmod.release(tr, lock, wlock)
1859
1859
1860 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1860 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1861 # hack for command that use a temporary commit (eg: histedit)
1861 # hack for command that use a temporary commit (eg: histedit)
1862 # temporary commit got stripped before hook release
1862 # temporary commit got stripped before hook release
1863 if self.changelog.hasnode(ret):
1863 if self.changelog.hasnode(ret):
1864 self.hook("commit", node=node, parent1=parent1,
1864 self.hook("commit", node=node, parent1=parent1,
1865 parent2=parent2)
1865 parent2=parent2)
1866 self._afterlock(commithook)
1866 self._afterlock(commithook)
1867 return ret
1867 return ret
1868
1868
1869 @unfilteredmethod
1869 @unfilteredmethod
1870 def commitctx(self, ctx, error=False):
1870 def commitctx(self, ctx, error=False):
1871 """Add a new revision to current repository.
1871 """Add a new revision to current repository.
1872 Revision information is passed via the context argument.
1872 Revision information is passed via the context argument.
1873 """
1873 """
1874
1874
1875 tr = None
1875 tr = None
1876 p1, p2 = ctx.p1(), ctx.p2()
1876 p1, p2 = ctx.p1(), ctx.p2()
1877 user = ctx.user()
1877 user = ctx.user()
1878
1878
1879 lock = self.lock()
1879 lock = self.lock()
1880 try:
1880 try:
1881 tr = self.transaction("commit")
1881 tr = self.transaction("commit")
1882 trp = weakref.proxy(tr)
1882 trp = weakref.proxy(tr)
1883
1883
1884 if ctx.manifestnode():
1884 if ctx.manifestnode():
1885 # reuse an existing manifest revision
1885 # reuse an existing manifest revision
1886 mn = ctx.manifestnode()
1886 mn = ctx.manifestnode()
1887 files = ctx.files()
1887 files = ctx.files()
1888 elif ctx.files():
1888 elif ctx.files():
1889 m1ctx = p1.manifestctx()
1889 m1ctx = p1.manifestctx()
1890 m2ctx = p2.manifestctx()
1890 m2ctx = p2.manifestctx()
1891 mctx = m1ctx.copy()
1891 mctx = m1ctx.copy()
1892
1892
1893 m = mctx.read()
1893 m = mctx.read()
1894 m1 = m1ctx.read()
1894 m1 = m1ctx.read()
1895 m2 = m2ctx.read()
1895 m2 = m2ctx.read()
1896
1896
1897 # check in files
1897 # check in files
1898 added = []
1898 added = []
1899 changed = []
1899 changed = []
1900 removed = list(ctx.removed())
1900 removed = list(ctx.removed())
1901 linkrev = len(self)
1901 linkrev = len(self)
1902 self.ui.note(_("committing files:\n"))
1902 self.ui.note(_("committing files:\n"))
1903 for f in sorted(ctx.modified() + ctx.added()):
1903 for f in sorted(ctx.modified() + ctx.added()):
1904 self.ui.note(f + "\n")
1904 self.ui.note(f + "\n")
1905 try:
1905 try:
1906 fctx = ctx[f]
1906 fctx = ctx[f]
1907 if fctx is None:
1907 if fctx is None:
1908 removed.append(f)
1908 removed.append(f)
1909 else:
1909 else:
1910 added.append(f)
1910 added.append(f)
1911 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1911 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1912 trp, changed)
1912 trp, changed)
1913 m.setflag(f, fctx.flags())
1913 m.setflag(f, fctx.flags())
1914 except OSError as inst:
1914 except OSError as inst:
1915 self.ui.warn(_("trouble committing %s!\n") % f)
1915 self.ui.warn(_("trouble committing %s!\n") % f)
1916 raise
1916 raise
1917 except IOError as inst:
1917 except IOError as inst:
1918 errcode = getattr(inst, 'errno', errno.ENOENT)
1918 errcode = getattr(inst, 'errno', errno.ENOENT)
1919 if error or errcode and errcode != errno.ENOENT:
1919 if error or errcode and errcode != errno.ENOENT:
1920 self.ui.warn(_("trouble committing %s!\n") % f)
1920 self.ui.warn(_("trouble committing %s!\n") % f)
1921 raise
1921 raise
1922
1922
1923 # update manifest
1923 # update manifest
1924 self.ui.note(_("committing manifest\n"))
1924 self.ui.note(_("committing manifest\n"))
1925 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1925 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1926 drop = [f for f in removed if f in m]
1926 drop = [f for f in removed if f in m]
1927 for f in drop:
1927 for f in drop:
1928 del m[f]
1928 del m[f]
1929 mn = mctx.write(trp, linkrev,
1929 mn = mctx.write(trp, linkrev,
1930 p1.manifestnode(), p2.manifestnode(),
1930 p1.manifestnode(), p2.manifestnode(),
1931 added, drop)
1931 added, drop)
1932 files = changed + removed
1932 files = changed + removed
1933 else:
1933 else:
1934 mn = p1.manifestnode()
1934 mn = p1.manifestnode()
1935 files = []
1935 files = []
1936
1936
1937 # update changelog
1937 # update changelog
1938 self.ui.note(_("committing changelog\n"))
1938 self.ui.note(_("committing changelog\n"))
1939 self.changelog.delayupdate(tr)
1939 self.changelog.delayupdate(tr)
1940 n = self.changelog.add(mn, files, ctx.description(),
1940 n = self.changelog.add(mn, files, ctx.description(),
1941 trp, p1.node(), p2.node(),
1941 trp, p1.node(), p2.node(),
1942 user, ctx.date(), ctx.extra().copy())
1942 user, ctx.date(), ctx.extra().copy())
1943 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1943 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1944 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1944 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1945 parent2=xp2)
1945 parent2=xp2)
1946 # set the new commit is proper phase
1946 # set the new commit is proper phase
1947 targetphase = subrepo.newcommitphase(self.ui, ctx)
1947 targetphase = subrepo.newcommitphase(self.ui, ctx)
1948 if targetphase:
1948 if targetphase:
1949 # retract boundary do not alter parent changeset.
1949 # retract boundary do not alter parent changeset.
1950 # if a parent have higher the resulting phase will
1950 # if a parent have higher the resulting phase will
1951 # be compliant anyway
1951 # be compliant anyway
1952 #
1952 #
1953 # if minimal phase was 0 we don't need to retract anything
1953 # if minimal phase was 0 we don't need to retract anything
1954 phases.retractboundary(self, tr, targetphase, [n])
1954 phases.retractboundary(self, tr, targetphase, [n])
1955 tr.close()
1955 tr.close()
1956 return n
1956 return n
1957 finally:
1957 finally:
1958 if tr:
1958 if tr:
1959 tr.release()
1959 tr.release()
1960 lock.release()
1960 lock.release()
1961
1961
1962 @unfilteredmethod
1962 @unfilteredmethod
1963 def destroying(self):
1963 def destroying(self):
1964 '''Inform the repository that nodes are about to be destroyed.
1964 '''Inform the repository that nodes are about to be destroyed.
1965 Intended for use by strip and rollback, so there's a common
1965 Intended for use by strip and rollback, so there's a common
1966 place for anything that has to be done before destroying history.
1966 place for anything that has to be done before destroying history.
1967
1967
1968 This is mostly useful for saving state that is in memory and waiting
1968 This is mostly useful for saving state that is in memory and waiting
1969 to be flushed when the current lock is released. Because a call to
1969 to be flushed when the current lock is released. Because a call to
1970 destroyed is imminent, the repo will be invalidated causing those
1970 destroyed is imminent, the repo will be invalidated causing those
1971 changes to stay in memory (waiting for the next unlock), or vanish
1971 changes to stay in memory (waiting for the next unlock), or vanish
1972 completely.
1972 completely.
1973 '''
1973 '''
1974 # When using the same lock to commit and strip, the phasecache is left
1974 # When using the same lock to commit and strip, the phasecache is left
1975 # dirty after committing. Then when we strip, the repo is invalidated,
1975 # dirty after committing. Then when we strip, the repo is invalidated,
1976 # causing those changes to disappear.
1976 # causing those changes to disappear.
1977 if '_phasecache' in vars(self):
1977 if '_phasecache' in vars(self):
1978 self._phasecache.write()
1978 self._phasecache.write()
1979
1979
1980 @unfilteredmethod
1980 @unfilteredmethod
1981 def destroyed(self):
1981 def destroyed(self):
1982 '''Inform the repository that nodes have been destroyed.
1982 '''Inform the repository that nodes have been destroyed.
1983 Intended for use by strip and rollback, so there's a common
1983 Intended for use by strip and rollback, so there's a common
1984 place for anything that has to be done after destroying history.
1984 place for anything that has to be done after destroying history.
1985 '''
1985 '''
1986 # When one tries to:
1986 # When one tries to:
1987 # 1) destroy nodes thus calling this method (e.g. strip)
1987 # 1) destroy nodes thus calling this method (e.g. strip)
1988 # 2) use phasecache somewhere (e.g. commit)
1988 # 2) use phasecache somewhere (e.g. commit)
1989 #
1989 #
1990 # then 2) will fail because the phasecache contains nodes that were
1990 # then 2) will fail because the phasecache contains nodes that were
1991 # removed. We can either remove phasecache from the filecache,
1991 # removed. We can either remove phasecache from the filecache,
1992 # causing it to reload next time it is accessed, or simply filter
1992 # causing it to reload next time it is accessed, or simply filter
1993 # the removed nodes now and write the updated cache.
1993 # the removed nodes now and write the updated cache.
1994 self._phasecache.filterunknown(self)
1994 self._phasecache.filterunknown(self)
1995 self._phasecache.write()
1995 self._phasecache.write()
1996
1996
1997 # refresh all repository caches
1997 # refresh all repository caches
1998 self.updatecaches()
1998 self.updatecaches()
1999
1999
2000 # Ensure the persistent tag cache is updated. Doing it now
2000 # Ensure the persistent tag cache is updated. Doing it now
2001 # means that the tag cache only has to worry about destroyed
2001 # means that the tag cache only has to worry about destroyed
2002 # heads immediately after a strip/rollback. That in turn
2002 # heads immediately after a strip/rollback. That in turn
2003 # guarantees that "cachetip == currenttip" (comparing both rev
2003 # guarantees that "cachetip == currenttip" (comparing both rev
2004 # and node) always means no nodes have been added or destroyed.
2004 # and node) always means no nodes have been added or destroyed.
2005
2005
2006 # XXX this is suboptimal when qrefresh'ing: we strip the current
2006 # XXX this is suboptimal when qrefresh'ing: we strip the current
2007 # head, refresh the tag cache, then immediately add a new head.
2007 # head, refresh the tag cache, then immediately add a new head.
2008 # But I think doing it this way is necessary for the "instant
2008 # But I think doing it this way is necessary for the "instant
2009 # tag cache retrieval" case to work.
2009 # tag cache retrieval" case to work.
2010 self.invalidate()
2010 self.invalidate()
2011
2011
2012 def walk(self, match, node=None):
2012 def walk(self, match, node=None):
2013 '''
2013 '''
2014 walk recursively through the directory tree or a given
2014 walk recursively through the directory tree or a given
2015 changeset, finding all files matched by the match
2015 changeset, finding all files matched by the match
2016 function
2016 function
2017 '''
2017 '''
2018 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
2018 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
2019 return self[node].walk(match)
2019 return self[node].walk(match)
2020
2020
2021 def status(self, node1='.', node2=None, match=None,
2021 def status(self, node1='.', node2=None, match=None,
2022 ignored=False, clean=False, unknown=False,
2022 ignored=False, clean=False, unknown=False,
2023 listsubrepos=False):
2023 listsubrepos=False):
2024 '''a convenience method that calls node1.status(node2)'''
2024 '''a convenience method that calls node1.status(node2)'''
2025 return self[node1].status(node2, match, ignored, clean, unknown,
2025 return self[node1].status(node2, match, ignored, clean, unknown,
2026 listsubrepos)
2026 listsubrepos)
2027
2027
2028 def addpostdsstatus(self, ps):
2028 def addpostdsstatus(self, ps):
2029 """Add a callback to run within the wlock, at the point at which status
2029 """Add a callback to run within the wlock, at the point at which status
2030 fixups happen.
2030 fixups happen.
2031
2031
2032 On status completion, callback(wctx, status) will be called with the
2032 On status completion, callback(wctx, status) will be called with the
2033 wlock held, unless the dirstate has changed from underneath or the wlock
2033 wlock held, unless the dirstate has changed from underneath or the wlock
2034 couldn't be grabbed.
2034 couldn't be grabbed.
2035
2035
2036 Callbacks should not capture and use a cached copy of the dirstate --
2036 Callbacks should not capture and use a cached copy of the dirstate --
2037 it might change in the meanwhile. Instead, they should access the
2037 it might change in the meanwhile. Instead, they should access the
2038 dirstate via wctx.repo().dirstate.
2038 dirstate via wctx.repo().dirstate.
2039
2039
2040 This list is emptied out after each status run -- extensions should
2040 This list is emptied out after each status run -- extensions should
2041 make sure it adds to this list each time dirstate.status is called.
2041 make sure it adds to this list each time dirstate.status is called.
2042 Extensions should also make sure they don't call this for statuses
2042 Extensions should also make sure they don't call this for statuses
2043 that don't involve the dirstate.
2043 that don't involve the dirstate.
2044 """
2044 """
2045
2045
2046 # The list is located here for uniqueness reasons -- it is actually
2046 # The list is located here for uniqueness reasons -- it is actually
2047 # managed by the workingctx, but that isn't unique per-repo.
2047 # managed by the workingctx, but that isn't unique per-repo.
2048 self._postdsstatus.append(ps)
2048 self._postdsstatus.append(ps)
2049
2049
2050 def postdsstatus(self):
2050 def postdsstatus(self):
2051 """Used by workingctx to get the list of post-dirstate-status hooks."""
2051 """Used by workingctx to get the list of post-dirstate-status hooks."""
2052 return self._postdsstatus
2052 return self._postdsstatus
2053
2053
2054 def clearpostdsstatus(self):
2054 def clearpostdsstatus(self):
2055 """Used by workingctx to clear post-dirstate-status hooks."""
2055 """Used by workingctx to clear post-dirstate-status hooks."""
2056 del self._postdsstatus[:]
2056 del self._postdsstatus[:]
2057
2057
2058 def heads(self, start=None):
2058 def heads(self, start=None):
2059 if start is None:
2059 if start is None:
2060 cl = self.changelog
2060 cl = self.changelog
2061 headrevs = reversed(cl.headrevs())
2061 headrevs = reversed(cl.headrevs())
2062 return [cl.node(rev) for rev in headrevs]
2062 return [cl.node(rev) for rev in headrevs]
2063
2063
2064 heads = self.changelog.heads(start)
2064 heads = self.changelog.heads(start)
2065 # sort the output in rev descending order
2065 # sort the output in rev descending order
2066 return sorted(heads, key=self.changelog.rev, reverse=True)
2066 return sorted(heads, key=self.changelog.rev, reverse=True)
2067
2067
2068 def branchheads(self, branch=None, start=None, closed=False):
2068 def branchheads(self, branch=None, start=None, closed=False):
2069 '''return a (possibly filtered) list of heads for the given branch
2069 '''return a (possibly filtered) list of heads for the given branch
2070
2070
2071 Heads are returned in topological order, from newest to oldest.
2071 Heads are returned in topological order, from newest to oldest.
2072 If branch is None, use the dirstate branch.
2072 If branch is None, use the dirstate branch.
2073 If start is not None, return only heads reachable from start.
2073 If start is not None, return only heads reachable from start.
2074 If closed is True, return heads that are marked as closed as well.
2074 If closed is True, return heads that are marked as closed as well.
2075 '''
2075 '''
2076 if branch is None:
2076 if branch is None:
2077 branch = self[None].branch()
2077 branch = self[None].branch()
2078 branches = self.branchmap()
2078 branches = self.branchmap()
2079 if branch not in branches:
2079 if branch not in branches:
2080 return []
2080 return []
2081 # the cache returns heads ordered lowest to highest
2081 # the cache returns heads ordered lowest to highest
2082 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2082 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2083 if start is not None:
2083 if start is not None:
2084 # filter out the heads that cannot be reached from startrev
2084 # filter out the heads that cannot be reached from startrev
2085 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2085 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2086 bheads = [h for h in bheads if h in fbheads]
2086 bheads = [h for h in bheads if h in fbheads]
2087 return bheads
2087 return bheads
2088
2088
2089 def branches(self, nodes):
2089 def branches(self, nodes):
2090 if not nodes:
2090 if not nodes:
2091 nodes = [self.changelog.tip()]
2091 nodes = [self.changelog.tip()]
2092 b = []
2092 b = []
2093 for n in nodes:
2093 for n in nodes:
2094 t = n
2094 t = n
2095 while True:
2095 while True:
2096 p = self.changelog.parents(n)
2096 p = self.changelog.parents(n)
2097 if p[1] != nullid or p[0] == nullid:
2097 if p[1] != nullid or p[0] == nullid:
2098 b.append((t, n, p[0], p[1]))
2098 b.append((t, n, p[0], p[1]))
2099 break
2099 break
2100 n = p[0]
2100 n = p[0]
2101 return b
2101 return b
2102
2102
2103 def between(self, pairs):
2103 def between(self, pairs):
2104 r = []
2104 r = []
2105
2105
2106 for top, bottom in pairs:
2106 for top, bottom in pairs:
2107 n, l, i = top, [], 0
2107 n, l, i = top, [], 0
2108 f = 1
2108 f = 1
2109
2109
2110 while n != bottom and n != nullid:
2110 while n != bottom and n != nullid:
2111 p = self.changelog.parents(n)[0]
2111 p = self.changelog.parents(n)[0]
2112 if i == f:
2112 if i == f:
2113 l.append(n)
2113 l.append(n)
2114 f = f * 2
2114 f = f * 2
2115 n = p
2115 n = p
2116 i += 1
2116 i += 1
2117
2117
2118 r.append(l)
2118 r.append(l)
2119
2119
2120 return r
2120 return r
2121
2121
2122 def checkpush(self, pushop):
2122 def checkpush(self, pushop):
2123 """Extensions can override this function if additional checks have
2123 """Extensions can override this function if additional checks have
2124 to be performed before pushing, or call it if they override push
2124 to be performed before pushing, or call it if they override push
2125 command.
2125 command.
2126 """
2126 """
2127 pass
2127 pass
2128
2128
2129 @unfilteredpropertycache
2129 @unfilteredpropertycache
2130 def prepushoutgoinghooks(self):
2130 def prepushoutgoinghooks(self):
2131 """Return util.hooks consists of a pushop with repo, remote, outgoing
2131 """Return util.hooks consists of a pushop with repo, remote, outgoing
2132 methods, which are called before pushing changesets.
2132 methods, which are called before pushing changesets.
2133 """
2133 """
2134 return util.hooks()
2134 return util.hooks()
2135
2135
2136 def pushkey(self, namespace, key, old, new):
2136 def pushkey(self, namespace, key, old, new):
2137 try:
2137 try:
2138 tr = self.currenttransaction()
2138 tr = self.currenttransaction()
2139 hookargs = {}
2139 hookargs = {}
2140 if tr is not None:
2140 if tr is not None:
2141 hookargs.update(tr.hookargs)
2141 hookargs.update(tr.hookargs)
2142 hookargs['namespace'] = namespace
2142 hookargs['namespace'] = namespace
2143 hookargs['key'] = key
2143 hookargs['key'] = key
2144 hookargs['old'] = old
2144 hookargs['old'] = old
2145 hookargs['new'] = new
2145 hookargs['new'] = new
2146 self.hook('prepushkey', throw=True, **hookargs)
2146 self.hook('prepushkey', throw=True, **hookargs)
2147 except error.HookAbort as exc:
2147 except error.HookAbort as exc:
2148 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2148 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2149 if exc.hint:
2149 if exc.hint:
2150 self.ui.write_err(_("(%s)\n") % exc.hint)
2150 self.ui.write_err(_("(%s)\n") % exc.hint)
2151 return False
2151 return False
2152 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2152 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2153 ret = pushkey.push(self, namespace, key, old, new)
2153 ret = pushkey.push(self, namespace, key, old, new)
2154 def runhook():
2154 def runhook():
2155 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2155 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2156 ret=ret)
2156 ret=ret)
2157 self._afterlock(runhook)
2157 self._afterlock(runhook)
2158 return ret
2158 return ret
2159
2159
2160 def listkeys(self, namespace):
2160 def listkeys(self, namespace):
2161 self.hook('prelistkeys', throw=True, namespace=namespace)
2161 self.hook('prelistkeys', throw=True, namespace=namespace)
2162 self.ui.debug('listing keys for "%s"\n' % namespace)
2162 self.ui.debug('listing keys for "%s"\n' % namespace)
2163 values = pushkey.list(self, namespace)
2163 values = pushkey.list(self, namespace)
2164 self.hook('listkeys', namespace=namespace, values=values)
2164 self.hook('listkeys', namespace=namespace, values=values)
2165 return values
2165 return values
2166
2166
2167 def debugwireargs(self, one, two, three=None, four=None, five=None):
2167 def debugwireargs(self, one, two, three=None, four=None, five=None):
2168 '''used to test argument passing over the wire'''
2168 '''used to test argument passing over the wire'''
2169 return "%s %s %s %s %s" % (one, two, three, four, five)
2169 return "%s %s %s %s %s" % (one, two, three, four, five)
2170
2170
2171 def savecommitmessage(self, text):
2171 def savecommitmessage(self, text):
2172 fp = self.vfs('last-message.txt', 'wb')
2172 fp = self.vfs('last-message.txt', 'wb')
2173 try:
2173 try:
2174 fp.write(text)
2174 fp.write(text)
2175 finally:
2175 finally:
2176 fp.close()
2176 fp.close()
2177 return self.pathto(fp.name[len(self.root) + 1:])
2177 return self.pathto(fp.name[len(self.root) + 1:])
2178
2178
2179 # used to avoid circular references so destructors work
2179 # used to avoid circular references so destructors work
2180 def aftertrans(files):
2180 def aftertrans(files):
2181 renamefiles = [tuple(t) for t in files]
2181 renamefiles = [tuple(t) for t in files]
2182 def a():
2182 def a():
2183 for vfs, src, dest in renamefiles:
2183 for vfs, src, dest in renamefiles:
2184 # if src and dest refer to a same file, vfs.rename is a no-op,
2184 # if src and dest refer to a same file, vfs.rename is a no-op,
2185 # leaving both src and dest on disk. delete dest to make sure
2185 # leaving both src and dest on disk. delete dest to make sure
2186 # the rename couldn't be such a no-op.
2186 # the rename couldn't be such a no-op.
2187 vfs.tryunlink(dest)
2187 vfs.tryunlink(dest)
2188 try:
2188 try:
2189 vfs.rename(src, dest)
2189 vfs.rename(src, dest)
2190 except OSError: # journal file does not yet exist
2190 except OSError: # journal file does not yet exist
2191 pass
2191 pass
2192 return a
2192 return a
2193
2193
2194 def undoname(fn):
2194 def undoname(fn):
2195 base, name = os.path.split(fn)
2195 base, name = os.path.split(fn)
2196 assert name.startswith('journal')
2196 assert name.startswith('journal')
2197 return os.path.join(base, name.replace('journal', 'undo', 1))
2197 return os.path.join(base, name.replace('journal', 'undo', 1))
2198
2198
2199 def instance(ui, path, create):
2199 def instance(ui, path, create):
2200 return localrepository(ui, util.urllocalpath(path), create)
2200 return localrepository(ui, util.urllocalpath(path), create)
2201
2201
2202 def islocal(path):
2202 def islocal(path):
2203 return True
2203 return True
2204
2204
2205 def newreporequirements(repo):
2205 def newreporequirements(repo):
2206 """Determine the set of requirements for a new local repository.
2206 """Determine the set of requirements for a new local repository.
2207
2207
2208 Extensions can wrap this function to specify custom requirements for
2208 Extensions can wrap this function to specify custom requirements for
2209 new repositories.
2209 new repositories.
2210 """
2210 """
2211 ui = repo.ui
2211 ui = repo.ui
2212 requirements = {'revlogv1'}
2212 requirements = {'revlogv1'}
2213 if ui.configbool('format', 'usestore'):
2213 if ui.configbool('format', 'usestore'):
2214 requirements.add('store')
2214 requirements.add('store')
2215 if ui.configbool('format', 'usefncache'):
2215 if ui.configbool('format', 'usefncache'):
2216 requirements.add('fncache')
2216 requirements.add('fncache')
2217 if ui.configbool('format', 'dotencode'):
2217 if ui.configbool('format', 'dotencode'):
2218 requirements.add('dotencode')
2218 requirements.add('dotencode')
2219
2219
2220 compengine = ui.config('experimental', 'format.compression', 'zlib')
2220 compengine = ui.config('experimental', 'format.compression', 'zlib')
2221 if compengine not in util.compengines:
2221 if compengine not in util.compengines:
2222 raise error.Abort(_('compression engine %s defined by '
2222 raise error.Abort(_('compression engine %s defined by '
2223 'experimental.format.compression not available') %
2223 'experimental.format.compression not available') %
2224 compengine,
2224 compengine,
2225 hint=_('run "hg debuginstall" to list available '
2225 hint=_('run "hg debuginstall" to list available '
2226 'compression engines'))
2226 'compression engines'))
2227
2227
2228 # zlib is the historical default and doesn't need an explicit requirement.
2228 # zlib is the historical default and doesn't need an explicit requirement.
2229 if compengine != 'zlib':
2229 if compengine != 'zlib':
2230 requirements.add('exp-compression-%s' % compengine)
2230 requirements.add('exp-compression-%s' % compengine)
2231
2231
2232 if scmutil.gdinitconfig(ui):
2232 if scmutil.gdinitconfig(ui):
2233 requirements.add('generaldelta')
2233 requirements.add('generaldelta')
2234 if ui.configbool('experimental', 'treemanifest', False):
2234 if ui.configbool('experimental', 'treemanifest', False):
2235 requirements.add('treemanifest')
2235 requirements.add('treemanifest')
2236 if ui.configbool('experimental', 'manifestv2', False):
2236 if ui.configbool('experimental', 'manifestv2', False):
2237 requirements.add('manifestv2')
2237 requirements.add('manifestv2')
2238
2238
2239 revlogv2 = ui.config('experimental', 'revlogv2')
2239 revlogv2 = ui.config('experimental', 'revlogv2')
2240 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2240 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2241 requirements.remove('revlogv1')
2241 requirements.remove('revlogv1')
2242 # generaldelta is implied by revlogv2.
2242 # generaldelta is implied by revlogv2.
2243 requirements.discard('generaldelta')
2243 requirements.discard('generaldelta')
2244 requirements.add(REVLOGV2_REQUIREMENT)
2244 requirements.add(REVLOGV2_REQUIREMENT)
2245
2245
2246 return requirements
2246 return requirements
General Comments 0
You need to be logged in to leave comments. Login now