##// END OF EJS Templates
changegroup: replace changegroupsubset with makechangegroup...
Durham Goode -
r34099:f7d41b85 default
parent child Browse files
Show More
@@ -1,1043 +1,1047
1 # shelve.py - save/restore working directory state
1 # shelve.py - save/restore working directory state
2 #
2 #
3 # Copyright 2013 Facebook, Inc.
3 # Copyright 2013 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """save and restore changes to the working directory
8 """save and restore changes to the working directory
9
9
10 The "hg shelve" command saves changes made to the working directory
10 The "hg shelve" command saves changes made to the working directory
11 and reverts those changes, resetting the working directory to a clean
11 and reverts those changes, resetting the working directory to a clean
12 state.
12 state.
13
13
14 Later on, the "hg unshelve" command restores the changes saved by "hg
14 Later on, the "hg unshelve" command restores the changes saved by "hg
15 shelve". Changes can be restored even after updating to a different
15 shelve". Changes can be restored even after updating to a different
16 parent, in which case Mercurial's merge machinery will resolve any
16 parent, in which case Mercurial's merge machinery will resolve any
17 conflicts if necessary.
17 conflicts if necessary.
18
18
19 You can have more than one shelved change outstanding at a time; each
19 You can have more than one shelved change outstanding at a time; each
20 shelved change has a distinct name. For details, see the help for "hg
20 shelved change has a distinct name. For details, see the help for "hg
21 shelve".
21 shelve".
22 """
22 """
23 from __future__ import absolute_import
23 from __future__ import absolute_import
24
24
25 import collections
25 import collections
26 import errno
26 import errno
27 import itertools
27 import itertools
28
28
29 from mercurial.i18n import _
29 from mercurial.i18n import _
30 from mercurial import (
30 from mercurial import (
31 bookmarks,
31 bookmarks,
32 bundle2,
32 bundle2,
33 bundlerepo,
33 bundlerepo,
34 changegroup,
34 changegroup,
35 cmdutil,
35 cmdutil,
36 discovery,
36 error,
37 error,
37 exchange,
38 exchange,
38 hg,
39 hg,
39 lock as lockmod,
40 lock as lockmod,
40 mdiff,
41 mdiff,
41 merge,
42 merge,
42 node as nodemod,
43 node as nodemod,
43 patch,
44 patch,
44 phases,
45 phases,
45 registrar,
46 registrar,
46 repair,
47 repair,
47 scmutil,
48 scmutil,
48 templatefilters,
49 templatefilters,
49 util,
50 util,
50 vfs as vfsmod,
51 vfs as vfsmod,
51 )
52 )
52
53
53 from . import (
54 from . import (
54 rebase,
55 rebase,
55 )
56 )
56
57
57 cmdtable = {}
58 cmdtable = {}
58 command = registrar.command(cmdtable)
59 command = registrar.command(cmdtable)
59 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
60 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
60 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
61 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
61 # be specifying the version(s) of Mercurial they are tested with, or
62 # be specifying the version(s) of Mercurial they are tested with, or
62 # leave the attribute unspecified.
63 # leave the attribute unspecified.
63 testedwith = 'ships-with-hg-core'
64 testedwith = 'ships-with-hg-core'
64
65
65 backupdir = 'shelve-backup'
66 backupdir = 'shelve-backup'
66 shelvedir = 'shelved'
67 shelvedir = 'shelved'
67 shelvefileextensions = ['hg', 'patch', 'oshelve']
68 shelvefileextensions = ['hg', 'patch', 'oshelve']
68 # universal extension is present in all types of shelves
69 # universal extension is present in all types of shelves
69 patchextension = 'patch'
70 patchextension = 'patch'
70
71
71 # we never need the user, so we use a
72 # we never need the user, so we use a
72 # generic user for all shelve operations
73 # generic user for all shelve operations
73 shelveuser = 'shelve@localhost'
74 shelveuser = 'shelve@localhost'
74
75
75 class shelvedfile(object):
76 class shelvedfile(object):
76 """Helper for the file storing a single shelve
77 """Helper for the file storing a single shelve
77
78
78 Handles common functions on shelve files (.hg/.patch) using
79 Handles common functions on shelve files (.hg/.patch) using
79 the vfs layer"""
80 the vfs layer"""
80 def __init__(self, repo, name, filetype=None):
81 def __init__(self, repo, name, filetype=None):
81 self.repo = repo
82 self.repo = repo
82 self.name = name
83 self.name = name
83 self.vfs = vfsmod.vfs(repo.vfs.join(shelvedir))
84 self.vfs = vfsmod.vfs(repo.vfs.join(shelvedir))
84 self.backupvfs = vfsmod.vfs(repo.vfs.join(backupdir))
85 self.backupvfs = vfsmod.vfs(repo.vfs.join(backupdir))
85 self.ui = self.repo.ui
86 self.ui = self.repo.ui
86 if filetype:
87 if filetype:
87 self.fname = name + '.' + filetype
88 self.fname = name + '.' + filetype
88 else:
89 else:
89 self.fname = name
90 self.fname = name
90
91
91 def exists(self):
92 def exists(self):
92 return self.vfs.exists(self.fname)
93 return self.vfs.exists(self.fname)
93
94
94 def filename(self):
95 def filename(self):
95 return self.vfs.join(self.fname)
96 return self.vfs.join(self.fname)
96
97
97 def backupfilename(self):
98 def backupfilename(self):
98 def gennames(base):
99 def gennames(base):
99 yield base
100 yield base
100 base, ext = base.rsplit('.', 1)
101 base, ext = base.rsplit('.', 1)
101 for i in itertools.count(1):
102 for i in itertools.count(1):
102 yield '%s-%d.%s' % (base, i, ext)
103 yield '%s-%d.%s' % (base, i, ext)
103
104
104 name = self.backupvfs.join(self.fname)
105 name = self.backupvfs.join(self.fname)
105 for n in gennames(name):
106 for n in gennames(name):
106 if not self.backupvfs.exists(n):
107 if not self.backupvfs.exists(n):
107 return n
108 return n
108
109
109 def movetobackup(self):
110 def movetobackup(self):
110 if not self.backupvfs.isdir():
111 if not self.backupvfs.isdir():
111 self.backupvfs.makedir()
112 self.backupvfs.makedir()
112 util.rename(self.filename(), self.backupfilename())
113 util.rename(self.filename(), self.backupfilename())
113
114
114 def stat(self):
115 def stat(self):
115 return self.vfs.stat(self.fname)
116 return self.vfs.stat(self.fname)
116
117
117 def opener(self, mode='rb'):
118 def opener(self, mode='rb'):
118 try:
119 try:
119 return self.vfs(self.fname, mode)
120 return self.vfs(self.fname, mode)
120 except IOError as err:
121 except IOError as err:
121 if err.errno != errno.ENOENT:
122 if err.errno != errno.ENOENT:
122 raise
123 raise
123 raise error.Abort(_("shelved change '%s' not found") % self.name)
124 raise error.Abort(_("shelved change '%s' not found") % self.name)
124
125
125 def applybundle(self):
126 def applybundle(self):
126 fp = self.opener()
127 fp = self.opener()
127 try:
128 try:
128 gen = exchange.readbundle(self.repo.ui, fp, self.fname, self.vfs)
129 gen = exchange.readbundle(self.repo.ui, fp, self.fname, self.vfs)
129 bundle2.applybundle(self.repo, gen, self.repo.currenttransaction(),
130 bundle2.applybundle(self.repo, gen, self.repo.currenttransaction(),
130 source='unshelve',
131 source='unshelve',
131 url='bundle:' + self.vfs.join(self.fname),
132 url='bundle:' + self.vfs.join(self.fname),
132 targetphase=phases.secret)
133 targetphase=phases.secret)
133 finally:
134 finally:
134 fp.close()
135 fp.close()
135
136
136 def bundlerepo(self):
137 def bundlerepo(self):
137 return bundlerepo.bundlerepository(self.repo.baseui, self.repo.root,
138 return bundlerepo.bundlerepository(self.repo.baseui, self.repo.root,
138 self.vfs.join(self.fname))
139 self.vfs.join(self.fname))
139 def writebundle(self, bases, node):
140 def writebundle(self, bases, node):
140 cgversion = changegroup.safeversion(self.repo)
141 cgversion = changegroup.safeversion(self.repo)
141 if cgversion == '01':
142 if cgversion == '01':
142 btype = 'HG10BZ'
143 btype = 'HG10BZ'
143 compression = None
144 compression = None
144 else:
145 else:
145 btype = 'HG20'
146 btype = 'HG20'
146 compression = 'BZ'
147 compression = 'BZ'
147
148
148 cg = changegroup.changegroupsubset(self.repo, bases, [node], 'shelve',
149 outgoing = discovery.outgoing(self.repo, missingroots=bases,
149 version=cgversion)
150 missingheads=[node])
151 cg = changegroup.makechangegroup(self.repo, outgoing, cgversion,
152 'shelve')
153
150 bundle2.writebundle(self.ui, cg, self.fname, btype, self.vfs,
154 bundle2.writebundle(self.ui, cg, self.fname, btype, self.vfs,
151 compression=compression)
155 compression=compression)
152
156
153 def writeobsshelveinfo(self, info):
157 def writeobsshelveinfo(self, info):
154 scmutil.simplekeyvaluefile(self.vfs, self.fname).write(info)
158 scmutil.simplekeyvaluefile(self.vfs, self.fname).write(info)
155
159
156 def readobsshelveinfo(self):
160 def readobsshelveinfo(self):
157 return scmutil.simplekeyvaluefile(self.vfs, self.fname).read()
161 return scmutil.simplekeyvaluefile(self.vfs, self.fname).read()
158
162
159 class shelvedstate(object):
163 class shelvedstate(object):
160 """Handle persistence during unshelving operations.
164 """Handle persistence during unshelving operations.
161
165
162 Handles saving and restoring a shelved state. Ensures that different
166 Handles saving and restoring a shelved state. Ensures that different
163 versions of a shelved state are possible and handles them appropriately.
167 versions of a shelved state are possible and handles them appropriately.
164 """
168 """
165 _version = 2
169 _version = 2
166 _filename = 'shelvedstate'
170 _filename = 'shelvedstate'
167 _keep = 'keep'
171 _keep = 'keep'
168 _nokeep = 'nokeep'
172 _nokeep = 'nokeep'
169 # colon is essential to differentiate from a real bookmark name
173 # colon is essential to differentiate from a real bookmark name
170 _noactivebook = ':no-active-bookmark'
174 _noactivebook = ':no-active-bookmark'
171
175
172 @classmethod
176 @classmethod
173 def _verifyandtransform(cls, d):
177 def _verifyandtransform(cls, d):
174 """Some basic shelvestate syntactic verification and transformation"""
178 """Some basic shelvestate syntactic verification and transformation"""
175 try:
179 try:
176 d['originalwctx'] = nodemod.bin(d['originalwctx'])
180 d['originalwctx'] = nodemod.bin(d['originalwctx'])
177 d['pendingctx'] = nodemod.bin(d['pendingctx'])
181 d['pendingctx'] = nodemod.bin(d['pendingctx'])
178 d['parents'] = [nodemod.bin(h)
182 d['parents'] = [nodemod.bin(h)
179 for h in d['parents'].split(' ')]
183 for h in d['parents'].split(' ')]
180 d['nodestoremove'] = [nodemod.bin(h)
184 d['nodestoremove'] = [nodemod.bin(h)
181 for h in d['nodestoremove'].split(' ')]
185 for h in d['nodestoremove'].split(' ')]
182 except (ValueError, TypeError, KeyError) as err:
186 except (ValueError, TypeError, KeyError) as err:
183 raise error.CorruptedState(str(err))
187 raise error.CorruptedState(str(err))
184
188
185 @classmethod
189 @classmethod
186 def _getversion(cls, repo):
190 def _getversion(cls, repo):
187 """Read version information from shelvestate file"""
191 """Read version information from shelvestate file"""
188 fp = repo.vfs(cls._filename)
192 fp = repo.vfs(cls._filename)
189 try:
193 try:
190 version = int(fp.readline().strip())
194 version = int(fp.readline().strip())
191 except ValueError as err:
195 except ValueError as err:
192 raise error.CorruptedState(str(err))
196 raise error.CorruptedState(str(err))
193 finally:
197 finally:
194 fp.close()
198 fp.close()
195 return version
199 return version
196
200
197 @classmethod
201 @classmethod
198 def _readold(cls, repo):
202 def _readold(cls, repo):
199 """Read the old position-based version of a shelvestate file"""
203 """Read the old position-based version of a shelvestate file"""
200 # Order is important, because old shelvestate file uses it
204 # Order is important, because old shelvestate file uses it
201 # to detemine values of fields (i.g. name is on the second line,
205 # to detemine values of fields (i.g. name is on the second line,
202 # originalwctx is on the third and so forth). Please do not change.
206 # originalwctx is on the third and so forth). Please do not change.
203 keys = ['version', 'name', 'originalwctx', 'pendingctx', 'parents',
207 keys = ['version', 'name', 'originalwctx', 'pendingctx', 'parents',
204 'nodestoremove', 'branchtorestore', 'keep', 'activebook']
208 'nodestoremove', 'branchtorestore', 'keep', 'activebook']
205 # this is executed only seldomly, so it is not a big deal
209 # this is executed only seldomly, so it is not a big deal
206 # that we open this file twice
210 # that we open this file twice
207 fp = repo.vfs(cls._filename)
211 fp = repo.vfs(cls._filename)
208 d = {}
212 d = {}
209 try:
213 try:
210 for key in keys:
214 for key in keys:
211 d[key] = fp.readline().strip()
215 d[key] = fp.readline().strip()
212 finally:
216 finally:
213 fp.close()
217 fp.close()
214 return d
218 return d
215
219
216 @classmethod
220 @classmethod
217 def load(cls, repo):
221 def load(cls, repo):
218 version = cls._getversion(repo)
222 version = cls._getversion(repo)
219 if version < cls._version:
223 if version < cls._version:
220 d = cls._readold(repo)
224 d = cls._readold(repo)
221 elif version == cls._version:
225 elif version == cls._version:
222 d = scmutil.simplekeyvaluefile(repo.vfs, cls._filename)\
226 d = scmutil.simplekeyvaluefile(repo.vfs, cls._filename)\
223 .read(firstlinenonkeyval=True)
227 .read(firstlinenonkeyval=True)
224 else:
228 else:
225 raise error.Abort(_('this version of shelve is incompatible '
229 raise error.Abort(_('this version of shelve is incompatible '
226 'with the version used in this repo'))
230 'with the version used in this repo'))
227
231
228 cls._verifyandtransform(d)
232 cls._verifyandtransform(d)
229 try:
233 try:
230 obj = cls()
234 obj = cls()
231 obj.name = d['name']
235 obj.name = d['name']
232 obj.wctx = repo[d['originalwctx']]
236 obj.wctx = repo[d['originalwctx']]
233 obj.pendingctx = repo[d['pendingctx']]
237 obj.pendingctx = repo[d['pendingctx']]
234 obj.parents = d['parents']
238 obj.parents = d['parents']
235 obj.nodestoremove = d['nodestoremove']
239 obj.nodestoremove = d['nodestoremove']
236 obj.branchtorestore = d.get('branchtorestore', '')
240 obj.branchtorestore = d.get('branchtorestore', '')
237 obj.keep = d.get('keep') == cls._keep
241 obj.keep = d.get('keep') == cls._keep
238 obj.activebookmark = ''
242 obj.activebookmark = ''
239 if d.get('activebook', '') != cls._noactivebook:
243 if d.get('activebook', '') != cls._noactivebook:
240 obj.activebookmark = d.get('activebook', '')
244 obj.activebookmark = d.get('activebook', '')
241 except (error.RepoLookupError, KeyError) as err:
245 except (error.RepoLookupError, KeyError) as err:
242 raise error.CorruptedState(str(err))
246 raise error.CorruptedState(str(err))
243
247
244 return obj
248 return obj
245
249
246 @classmethod
250 @classmethod
247 def save(cls, repo, name, originalwctx, pendingctx, nodestoremove,
251 def save(cls, repo, name, originalwctx, pendingctx, nodestoremove,
248 branchtorestore, keep=False, activebook=''):
252 branchtorestore, keep=False, activebook=''):
249 info = {
253 info = {
250 "name": name,
254 "name": name,
251 "originalwctx": nodemod.hex(originalwctx.node()),
255 "originalwctx": nodemod.hex(originalwctx.node()),
252 "pendingctx": nodemod.hex(pendingctx.node()),
256 "pendingctx": nodemod.hex(pendingctx.node()),
253 "parents": ' '.join([nodemod.hex(p)
257 "parents": ' '.join([nodemod.hex(p)
254 for p in repo.dirstate.parents()]),
258 for p in repo.dirstate.parents()]),
255 "nodestoremove": ' '.join([nodemod.hex(n)
259 "nodestoremove": ' '.join([nodemod.hex(n)
256 for n in nodestoremove]),
260 for n in nodestoremove]),
257 "branchtorestore": branchtorestore,
261 "branchtorestore": branchtorestore,
258 "keep": cls._keep if keep else cls._nokeep,
262 "keep": cls._keep if keep else cls._nokeep,
259 "activebook": activebook or cls._noactivebook
263 "activebook": activebook or cls._noactivebook
260 }
264 }
261 scmutil.simplekeyvaluefile(repo.vfs, cls._filename)\
265 scmutil.simplekeyvaluefile(repo.vfs, cls._filename)\
262 .write(info, firstline=str(cls._version))
266 .write(info, firstline=str(cls._version))
263
267
264 @classmethod
268 @classmethod
265 def clear(cls, repo):
269 def clear(cls, repo):
266 repo.vfs.unlinkpath(cls._filename, ignoremissing=True)
270 repo.vfs.unlinkpath(cls._filename, ignoremissing=True)
267
271
268 def cleanupoldbackups(repo):
272 def cleanupoldbackups(repo):
269 vfs = vfsmod.vfs(repo.vfs.join(backupdir))
273 vfs = vfsmod.vfs(repo.vfs.join(backupdir))
270 maxbackups = repo.ui.configint('shelve', 'maxbackups', 10)
274 maxbackups = repo.ui.configint('shelve', 'maxbackups', 10)
271 hgfiles = [f for f in vfs.listdir()
275 hgfiles = [f for f in vfs.listdir()
272 if f.endswith('.' + patchextension)]
276 if f.endswith('.' + patchextension)]
273 hgfiles = sorted([(vfs.stat(f).st_mtime, f) for f in hgfiles])
277 hgfiles = sorted([(vfs.stat(f).st_mtime, f) for f in hgfiles])
274 if 0 < maxbackups and maxbackups < len(hgfiles):
278 if 0 < maxbackups and maxbackups < len(hgfiles):
275 bordermtime = hgfiles[-maxbackups][0]
279 bordermtime = hgfiles[-maxbackups][0]
276 else:
280 else:
277 bordermtime = None
281 bordermtime = None
278 for mtime, f in hgfiles[:len(hgfiles) - maxbackups]:
282 for mtime, f in hgfiles[:len(hgfiles) - maxbackups]:
279 if mtime == bordermtime:
283 if mtime == bordermtime:
280 # keep it, because timestamp can't decide exact order of backups
284 # keep it, because timestamp can't decide exact order of backups
281 continue
285 continue
282 base = f[:-(1 + len(patchextension))]
286 base = f[:-(1 + len(patchextension))]
283 for ext in shelvefileextensions:
287 for ext in shelvefileextensions:
284 vfs.tryunlink(base + '.' + ext)
288 vfs.tryunlink(base + '.' + ext)
285
289
286 def _backupactivebookmark(repo):
290 def _backupactivebookmark(repo):
287 activebookmark = repo._activebookmark
291 activebookmark = repo._activebookmark
288 if activebookmark:
292 if activebookmark:
289 bookmarks.deactivate(repo)
293 bookmarks.deactivate(repo)
290 return activebookmark
294 return activebookmark
291
295
292 def _restoreactivebookmark(repo, mark):
296 def _restoreactivebookmark(repo, mark):
293 if mark:
297 if mark:
294 bookmarks.activate(repo, mark)
298 bookmarks.activate(repo, mark)
295
299
296 def _aborttransaction(repo):
300 def _aborttransaction(repo):
297 '''Abort current transaction for shelve/unshelve, but keep dirstate
301 '''Abort current transaction for shelve/unshelve, but keep dirstate
298 '''
302 '''
299 tr = repo.currenttransaction()
303 tr = repo.currenttransaction()
300 backupname = 'dirstate.shelve'
304 backupname = 'dirstate.shelve'
301 repo.dirstate.savebackup(tr, backupname)
305 repo.dirstate.savebackup(tr, backupname)
302 tr.abort()
306 tr.abort()
303 repo.dirstate.restorebackup(None, backupname)
307 repo.dirstate.restorebackup(None, backupname)
304
308
305 def createcmd(ui, repo, pats, opts):
309 def createcmd(ui, repo, pats, opts):
306 """subcommand that creates a new shelve"""
310 """subcommand that creates a new shelve"""
307 with repo.wlock():
311 with repo.wlock():
308 cmdutil.checkunfinished(repo)
312 cmdutil.checkunfinished(repo)
309 return _docreatecmd(ui, repo, pats, opts)
313 return _docreatecmd(ui, repo, pats, opts)
310
314
311 def getshelvename(repo, parent, opts):
315 def getshelvename(repo, parent, opts):
312 """Decide on the name this shelve is going to have"""
316 """Decide on the name this shelve is going to have"""
313 def gennames():
317 def gennames():
314 yield label
318 yield label
315 for i in itertools.count(1):
319 for i in itertools.count(1):
316 yield '%s-%02d' % (label, i)
320 yield '%s-%02d' % (label, i)
317 name = opts.get('name')
321 name = opts.get('name')
318 label = repo._activebookmark or parent.branch() or 'default'
322 label = repo._activebookmark or parent.branch() or 'default'
319 # slashes aren't allowed in filenames, therefore we rename it
323 # slashes aren't allowed in filenames, therefore we rename it
320 label = label.replace('/', '_')
324 label = label.replace('/', '_')
321 label = label.replace('\\', '_')
325 label = label.replace('\\', '_')
322 # filenames must not start with '.' as it should not be hidden
326 # filenames must not start with '.' as it should not be hidden
323 if label.startswith('.'):
327 if label.startswith('.'):
324 label = label.replace('.', '_', 1)
328 label = label.replace('.', '_', 1)
325
329
326 if name:
330 if name:
327 if shelvedfile(repo, name, patchextension).exists():
331 if shelvedfile(repo, name, patchextension).exists():
328 e = _("a shelved change named '%s' already exists") % name
332 e = _("a shelved change named '%s' already exists") % name
329 raise error.Abort(e)
333 raise error.Abort(e)
330
334
331 # ensure we are not creating a subdirectory or a hidden file
335 # ensure we are not creating a subdirectory or a hidden file
332 if '/' in name or '\\' in name:
336 if '/' in name or '\\' in name:
333 raise error.Abort(_('shelved change names can not contain slashes'))
337 raise error.Abort(_('shelved change names can not contain slashes'))
334 if name.startswith('.'):
338 if name.startswith('.'):
335 raise error.Abort(_("shelved change names can not start with '.'"))
339 raise error.Abort(_("shelved change names can not start with '.'"))
336
340
337 else:
341 else:
338 for n in gennames():
342 for n in gennames():
339 if not shelvedfile(repo, n, patchextension).exists():
343 if not shelvedfile(repo, n, patchextension).exists():
340 name = n
344 name = n
341 break
345 break
342
346
343 return name
347 return name
344
348
345 def mutableancestors(ctx):
349 def mutableancestors(ctx):
346 """return all mutable ancestors for ctx (included)
350 """return all mutable ancestors for ctx (included)
347
351
348 Much faster than the revset ancestors(ctx) & draft()"""
352 Much faster than the revset ancestors(ctx) & draft()"""
349 seen = {nodemod.nullrev}
353 seen = {nodemod.nullrev}
350 visit = collections.deque()
354 visit = collections.deque()
351 visit.append(ctx)
355 visit.append(ctx)
352 while visit:
356 while visit:
353 ctx = visit.popleft()
357 ctx = visit.popleft()
354 yield ctx.node()
358 yield ctx.node()
355 for parent in ctx.parents():
359 for parent in ctx.parents():
356 rev = parent.rev()
360 rev = parent.rev()
357 if rev not in seen:
361 if rev not in seen:
358 seen.add(rev)
362 seen.add(rev)
359 if parent.mutable():
363 if parent.mutable():
360 visit.append(parent)
364 visit.append(parent)
361
365
362 def getcommitfunc(extra, interactive, editor=False):
366 def getcommitfunc(extra, interactive, editor=False):
363 def commitfunc(ui, repo, message, match, opts):
367 def commitfunc(ui, repo, message, match, opts):
364 hasmq = util.safehasattr(repo, 'mq')
368 hasmq = util.safehasattr(repo, 'mq')
365 if hasmq:
369 if hasmq:
366 saved, repo.mq.checkapplied = repo.mq.checkapplied, False
370 saved, repo.mq.checkapplied = repo.mq.checkapplied, False
367 overrides = {('phases', 'new-commit'): phases.secret}
371 overrides = {('phases', 'new-commit'): phases.secret}
368 try:
372 try:
369 editor_ = False
373 editor_ = False
370 if editor:
374 if editor:
371 editor_ = cmdutil.getcommiteditor(editform='shelve.shelve',
375 editor_ = cmdutil.getcommiteditor(editform='shelve.shelve',
372 **opts)
376 **opts)
373 with repo.ui.configoverride(overrides):
377 with repo.ui.configoverride(overrides):
374 return repo.commit(message, shelveuser, opts.get('date'),
378 return repo.commit(message, shelveuser, opts.get('date'),
375 match, editor=editor_, extra=extra)
379 match, editor=editor_, extra=extra)
376 finally:
380 finally:
377 if hasmq:
381 if hasmq:
378 repo.mq.checkapplied = saved
382 repo.mq.checkapplied = saved
379
383
380 def interactivecommitfunc(ui, repo, *pats, **opts):
384 def interactivecommitfunc(ui, repo, *pats, **opts):
381 match = scmutil.match(repo['.'], pats, {})
385 match = scmutil.match(repo['.'], pats, {})
382 message = opts['message']
386 message = opts['message']
383 return commitfunc(ui, repo, message, match, opts)
387 return commitfunc(ui, repo, message, match, opts)
384
388
385 return interactivecommitfunc if interactive else commitfunc
389 return interactivecommitfunc if interactive else commitfunc
386
390
387 def _nothingtoshelvemessaging(ui, repo, pats, opts):
391 def _nothingtoshelvemessaging(ui, repo, pats, opts):
388 stat = repo.status(match=scmutil.match(repo[None], pats, opts))
392 stat = repo.status(match=scmutil.match(repo[None], pats, opts))
389 if stat.deleted:
393 if stat.deleted:
390 ui.status(_("nothing changed (%d missing files, see "
394 ui.status(_("nothing changed (%d missing files, see "
391 "'hg status')\n") % len(stat.deleted))
395 "'hg status')\n") % len(stat.deleted))
392 else:
396 else:
393 ui.status(_("nothing changed\n"))
397 ui.status(_("nothing changed\n"))
394
398
395 def _shelvecreatedcommit(repo, node, name):
399 def _shelvecreatedcommit(repo, node, name):
396 bases = list(mutableancestors(repo[node]))
400 bases = list(mutableancestors(repo[node]))
397 shelvedfile(repo, name, 'hg').writebundle(bases, node)
401 shelvedfile(repo, name, 'hg').writebundle(bases, node)
398 cmdutil.export(repo, [node],
402 cmdutil.export(repo, [node],
399 fp=shelvedfile(repo, name, patchextension).opener('wb'),
403 fp=shelvedfile(repo, name, patchextension).opener('wb'),
400 opts=mdiff.diffopts(git=True))
404 opts=mdiff.diffopts(git=True))
401
405
402 def _includeunknownfiles(repo, pats, opts, extra):
406 def _includeunknownfiles(repo, pats, opts, extra):
403 s = repo.status(match=scmutil.match(repo[None], pats, opts),
407 s = repo.status(match=scmutil.match(repo[None], pats, opts),
404 unknown=True)
408 unknown=True)
405 if s.unknown:
409 if s.unknown:
406 extra['shelve_unknown'] = '\0'.join(s.unknown)
410 extra['shelve_unknown'] = '\0'.join(s.unknown)
407 repo[None].add(s.unknown)
411 repo[None].add(s.unknown)
408
412
409 def _finishshelve(repo):
413 def _finishshelve(repo):
410 _aborttransaction(repo)
414 _aborttransaction(repo)
411
415
412 def _docreatecmd(ui, repo, pats, opts):
416 def _docreatecmd(ui, repo, pats, opts):
413 wctx = repo[None]
417 wctx = repo[None]
414 parents = wctx.parents()
418 parents = wctx.parents()
415 if len(parents) > 1:
419 if len(parents) > 1:
416 raise error.Abort(_('cannot shelve while merging'))
420 raise error.Abort(_('cannot shelve while merging'))
417 parent = parents[0]
421 parent = parents[0]
418 origbranch = wctx.branch()
422 origbranch = wctx.branch()
419
423
420 if parent.node() != nodemod.nullid:
424 if parent.node() != nodemod.nullid:
421 desc = "changes to: %s" % parent.description().split('\n', 1)[0]
425 desc = "changes to: %s" % parent.description().split('\n', 1)[0]
422 else:
426 else:
423 desc = '(changes in empty repository)'
427 desc = '(changes in empty repository)'
424
428
425 if not opts.get('message'):
429 if not opts.get('message'):
426 opts['message'] = desc
430 opts['message'] = desc
427
431
428 lock = tr = activebookmark = None
432 lock = tr = activebookmark = None
429 try:
433 try:
430 lock = repo.lock()
434 lock = repo.lock()
431
435
432 # use an uncommitted transaction to generate the bundle to avoid
436 # use an uncommitted transaction to generate the bundle to avoid
433 # pull races. ensure we don't print the abort message to stderr.
437 # pull races. ensure we don't print the abort message to stderr.
434 tr = repo.transaction('commit', report=lambda x: None)
438 tr = repo.transaction('commit', report=lambda x: None)
435
439
436 interactive = opts.get('interactive', False)
440 interactive = opts.get('interactive', False)
437 includeunknown = (opts.get('unknown', False) and
441 includeunknown = (opts.get('unknown', False) and
438 not opts.get('addremove', False))
442 not opts.get('addremove', False))
439
443
440 name = getshelvename(repo, parent, opts)
444 name = getshelvename(repo, parent, opts)
441 activebookmark = _backupactivebookmark(repo)
445 activebookmark = _backupactivebookmark(repo)
442 extra = {}
446 extra = {}
443 if includeunknown:
447 if includeunknown:
444 _includeunknownfiles(repo, pats, opts, extra)
448 _includeunknownfiles(repo, pats, opts, extra)
445
449
446 if _iswctxonnewbranch(repo) and not _isbareshelve(pats, opts):
450 if _iswctxonnewbranch(repo) and not _isbareshelve(pats, opts):
447 # In non-bare shelve we don't store newly created branch
451 # In non-bare shelve we don't store newly created branch
448 # at bundled commit
452 # at bundled commit
449 repo.dirstate.setbranch(repo['.'].branch())
453 repo.dirstate.setbranch(repo['.'].branch())
450
454
451 commitfunc = getcommitfunc(extra, interactive, editor=True)
455 commitfunc = getcommitfunc(extra, interactive, editor=True)
452 if not interactive:
456 if not interactive:
453 node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
457 node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
454 else:
458 else:
455 node = cmdutil.dorecord(ui, repo, commitfunc, None,
459 node = cmdutil.dorecord(ui, repo, commitfunc, None,
456 False, cmdutil.recordfilter, *pats,
460 False, cmdutil.recordfilter, *pats,
457 **opts)
461 **opts)
458 if not node:
462 if not node:
459 _nothingtoshelvemessaging(ui, repo, pats, opts)
463 _nothingtoshelvemessaging(ui, repo, pats, opts)
460 return 1
464 return 1
461
465
462 _shelvecreatedcommit(repo, node, name)
466 _shelvecreatedcommit(repo, node, name)
463
467
464 if ui.formatted():
468 if ui.formatted():
465 desc = util.ellipsis(desc, ui.termwidth())
469 desc = util.ellipsis(desc, ui.termwidth())
466 ui.status(_('shelved as %s\n') % name)
470 ui.status(_('shelved as %s\n') % name)
467 hg.update(repo, parent.node())
471 hg.update(repo, parent.node())
468 if origbranch != repo['.'].branch() and not _isbareshelve(pats, opts):
472 if origbranch != repo['.'].branch() and not _isbareshelve(pats, opts):
469 repo.dirstate.setbranch(origbranch)
473 repo.dirstate.setbranch(origbranch)
470
474
471 _finishshelve(repo)
475 _finishshelve(repo)
472 finally:
476 finally:
473 _restoreactivebookmark(repo, activebookmark)
477 _restoreactivebookmark(repo, activebookmark)
474 lockmod.release(tr, lock)
478 lockmod.release(tr, lock)
475
479
476 def _isbareshelve(pats, opts):
480 def _isbareshelve(pats, opts):
477 return (not pats
481 return (not pats
478 and not opts.get('interactive', False)
482 and not opts.get('interactive', False)
479 and not opts.get('include', False)
483 and not opts.get('include', False)
480 and not opts.get('exclude', False))
484 and not opts.get('exclude', False))
481
485
482 def _iswctxonnewbranch(repo):
486 def _iswctxonnewbranch(repo):
483 return repo[None].branch() != repo['.'].branch()
487 return repo[None].branch() != repo['.'].branch()
484
488
485 def cleanupcmd(ui, repo):
489 def cleanupcmd(ui, repo):
486 """subcommand that deletes all shelves"""
490 """subcommand that deletes all shelves"""
487
491
488 with repo.wlock():
492 with repo.wlock():
489 for (name, _type) in repo.vfs.readdir(shelvedir):
493 for (name, _type) in repo.vfs.readdir(shelvedir):
490 suffix = name.rsplit('.', 1)[-1]
494 suffix = name.rsplit('.', 1)[-1]
491 if suffix in shelvefileextensions:
495 if suffix in shelvefileextensions:
492 shelvedfile(repo, name).movetobackup()
496 shelvedfile(repo, name).movetobackup()
493 cleanupoldbackups(repo)
497 cleanupoldbackups(repo)
494
498
495 def deletecmd(ui, repo, pats):
499 def deletecmd(ui, repo, pats):
496 """subcommand that deletes a specific shelve"""
500 """subcommand that deletes a specific shelve"""
497 if not pats:
501 if not pats:
498 raise error.Abort(_('no shelved changes specified!'))
502 raise error.Abort(_('no shelved changes specified!'))
499 with repo.wlock():
503 with repo.wlock():
500 try:
504 try:
501 for name in pats:
505 for name in pats:
502 for suffix in shelvefileextensions:
506 for suffix in shelvefileextensions:
503 shfile = shelvedfile(repo, name, suffix)
507 shfile = shelvedfile(repo, name, suffix)
504 # patch file is necessary, as it should
508 # patch file is necessary, as it should
505 # be present for any kind of shelve,
509 # be present for any kind of shelve,
506 # but the .hg file is optional as in future we
510 # but the .hg file is optional as in future we
507 # will add obsolete shelve with does not create a
511 # will add obsolete shelve with does not create a
508 # bundle
512 # bundle
509 if shfile.exists() or suffix == patchextension:
513 if shfile.exists() or suffix == patchextension:
510 shfile.movetobackup()
514 shfile.movetobackup()
511 cleanupoldbackups(repo)
515 cleanupoldbackups(repo)
512 except OSError as err:
516 except OSError as err:
513 if err.errno != errno.ENOENT:
517 if err.errno != errno.ENOENT:
514 raise
518 raise
515 raise error.Abort(_("shelved change '%s' not found") % name)
519 raise error.Abort(_("shelved change '%s' not found") % name)
516
520
517 def listshelves(repo):
521 def listshelves(repo):
518 """return all shelves in repo as list of (time, filename)"""
522 """return all shelves in repo as list of (time, filename)"""
519 try:
523 try:
520 names = repo.vfs.readdir(shelvedir)
524 names = repo.vfs.readdir(shelvedir)
521 except OSError as err:
525 except OSError as err:
522 if err.errno != errno.ENOENT:
526 if err.errno != errno.ENOENT:
523 raise
527 raise
524 return []
528 return []
525 info = []
529 info = []
526 for (name, _type) in names:
530 for (name, _type) in names:
527 pfx, sfx = name.rsplit('.', 1)
531 pfx, sfx = name.rsplit('.', 1)
528 if not pfx or sfx != patchextension:
532 if not pfx or sfx != patchextension:
529 continue
533 continue
530 st = shelvedfile(repo, name).stat()
534 st = shelvedfile(repo, name).stat()
531 info.append((st.st_mtime, shelvedfile(repo, pfx).filename()))
535 info.append((st.st_mtime, shelvedfile(repo, pfx).filename()))
532 return sorted(info, reverse=True)
536 return sorted(info, reverse=True)
533
537
534 def listcmd(ui, repo, pats, opts):
538 def listcmd(ui, repo, pats, opts):
535 """subcommand that displays the list of shelves"""
539 """subcommand that displays the list of shelves"""
536 pats = set(pats)
540 pats = set(pats)
537 width = 80
541 width = 80
538 if not ui.plain():
542 if not ui.plain():
539 width = ui.termwidth()
543 width = ui.termwidth()
540 namelabel = 'shelve.newest'
544 namelabel = 'shelve.newest'
541 ui.pager('shelve')
545 ui.pager('shelve')
542 for mtime, name in listshelves(repo):
546 for mtime, name in listshelves(repo):
543 sname = util.split(name)[1]
547 sname = util.split(name)[1]
544 if pats and sname not in pats:
548 if pats and sname not in pats:
545 continue
549 continue
546 ui.write(sname, label=namelabel)
550 ui.write(sname, label=namelabel)
547 namelabel = 'shelve.name'
551 namelabel = 'shelve.name'
548 if ui.quiet:
552 if ui.quiet:
549 ui.write('\n')
553 ui.write('\n')
550 continue
554 continue
551 ui.write(' ' * (16 - len(sname)))
555 ui.write(' ' * (16 - len(sname)))
552 used = 16
556 used = 16
553 age = '(%s)' % templatefilters.age(util.makedate(mtime), abbrev=True)
557 age = '(%s)' % templatefilters.age(util.makedate(mtime), abbrev=True)
554 ui.write(age, label='shelve.age')
558 ui.write(age, label='shelve.age')
555 ui.write(' ' * (12 - len(age)))
559 ui.write(' ' * (12 - len(age)))
556 used += 12
560 used += 12
557 with open(name + '.' + patchextension, 'rb') as fp:
561 with open(name + '.' + patchextension, 'rb') as fp:
558 while True:
562 while True:
559 line = fp.readline()
563 line = fp.readline()
560 if not line:
564 if not line:
561 break
565 break
562 if not line.startswith('#'):
566 if not line.startswith('#'):
563 desc = line.rstrip()
567 desc = line.rstrip()
564 if ui.formatted():
568 if ui.formatted():
565 desc = util.ellipsis(desc, width - used)
569 desc = util.ellipsis(desc, width - used)
566 ui.write(desc)
570 ui.write(desc)
567 break
571 break
568 ui.write('\n')
572 ui.write('\n')
569 if not (opts['patch'] or opts['stat']):
573 if not (opts['patch'] or opts['stat']):
570 continue
574 continue
571 difflines = fp.readlines()
575 difflines = fp.readlines()
572 if opts['patch']:
576 if opts['patch']:
573 for chunk, label in patch.difflabel(iter, difflines):
577 for chunk, label in patch.difflabel(iter, difflines):
574 ui.write(chunk, label=label)
578 ui.write(chunk, label=label)
575 if opts['stat']:
579 if opts['stat']:
576 for chunk, label in patch.diffstatui(difflines, width=width):
580 for chunk, label in patch.diffstatui(difflines, width=width):
577 ui.write(chunk, label=label)
581 ui.write(chunk, label=label)
578
582
579 def patchcmds(ui, repo, pats, opts, subcommand):
583 def patchcmds(ui, repo, pats, opts, subcommand):
580 """subcommand that displays shelves"""
584 """subcommand that displays shelves"""
581 if len(pats) == 0:
585 if len(pats) == 0:
582 raise error.Abort(_("--%s expects at least one shelf") % subcommand)
586 raise error.Abort(_("--%s expects at least one shelf") % subcommand)
583
587
584 for shelfname in pats:
588 for shelfname in pats:
585 if not shelvedfile(repo, shelfname, patchextension).exists():
589 if not shelvedfile(repo, shelfname, patchextension).exists():
586 raise error.Abort(_("cannot find shelf %s") % shelfname)
590 raise error.Abort(_("cannot find shelf %s") % shelfname)
587
591
588 listcmd(ui, repo, pats, opts)
592 listcmd(ui, repo, pats, opts)
589
593
590 def checkparents(repo, state):
594 def checkparents(repo, state):
591 """check parent while resuming an unshelve"""
595 """check parent while resuming an unshelve"""
592 if state.parents != repo.dirstate.parents():
596 if state.parents != repo.dirstate.parents():
593 raise error.Abort(_('working directory parents do not match unshelve '
597 raise error.Abort(_('working directory parents do not match unshelve '
594 'state'))
598 'state'))
595
599
596 def pathtofiles(repo, files):
600 def pathtofiles(repo, files):
597 cwd = repo.getcwd()
601 cwd = repo.getcwd()
598 return [repo.pathto(f, cwd) for f in files]
602 return [repo.pathto(f, cwd) for f in files]
599
603
600 def unshelveabort(ui, repo, state, opts):
604 def unshelveabort(ui, repo, state, opts):
601 """subcommand that abort an in-progress unshelve"""
605 """subcommand that abort an in-progress unshelve"""
602 with repo.lock():
606 with repo.lock():
603 try:
607 try:
604 checkparents(repo, state)
608 checkparents(repo, state)
605
609
606 repo.vfs.rename('unshelverebasestate', 'rebasestate')
610 repo.vfs.rename('unshelverebasestate', 'rebasestate')
607 try:
611 try:
608 rebase.rebase(ui, repo, **{
612 rebase.rebase(ui, repo, **{
609 'abort' : True
613 'abort' : True
610 })
614 })
611 except Exception:
615 except Exception:
612 repo.vfs.rename('rebasestate', 'unshelverebasestate')
616 repo.vfs.rename('rebasestate', 'unshelverebasestate')
613 raise
617 raise
614
618
615 mergefiles(ui, repo, state.wctx, state.pendingctx)
619 mergefiles(ui, repo, state.wctx, state.pendingctx)
616 repair.strip(ui, repo, state.nodestoremove, backup=False,
620 repair.strip(ui, repo, state.nodestoremove, backup=False,
617 topic='shelve')
621 topic='shelve')
618 finally:
622 finally:
619 shelvedstate.clear(repo)
623 shelvedstate.clear(repo)
620 ui.warn(_("unshelve of '%s' aborted\n") % state.name)
624 ui.warn(_("unshelve of '%s' aborted\n") % state.name)
621
625
622 def mergefiles(ui, repo, wctx, shelvectx):
626 def mergefiles(ui, repo, wctx, shelvectx):
623 """updates to wctx and merges the changes from shelvectx into the
627 """updates to wctx and merges the changes from shelvectx into the
624 dirstate."""
628 dirstate."""
625 with ui.configoverride({('ui', 'quiet'): True}):
629 with ui.configoverride({('ui', 'quiet'): True}):
626 hg.update(repo, wctx.node())
630 hg.update(repo, wctx.node())
627 files = []
631 files = []
628 files.extend(shelvectx.files())
632 files.extend(shelvectx.files())
629 files.extend(shelvectx.parents()[0].files())
633 files.extend(shelvectx.parents()[0].files())
630
634
631 # revert will overwrite unknown files, so move them out of the way
635 # revert will overwrite unknown files, so move them out of the way
632 for file in repo.status(unknown=True).unknown:
636 for file in repo.status(unknown=True).unknown:
633 if file in files:
637 if file in files:
634 util.rename(file, scmutil.origpath(ui, repo, file))
638 util.rename(file, scmutil.origpath(ui, repo, file))
635 ui.pushbuffer(True)
639 ui.pushbuffer(True)
636 cmdutil.revert(ui, repo, shelvectx, repo.dirstate.parents(),
640 cmdutil.revert(ui, repo, shelvectx, repo.dirstate.parents(),
637 *pathtofiles(repo, files),
641 *pathtofiles(repo, files),
638 **{'no_backup': True})
642 **{'no_backup': True})
639 ui.popbuffer()
643 ui.popbuffer()
640
644
641 def restorebranch(ui, repo, branchtorestore):
645 def restorebranch(ui, repo, branchtorestore):
642 if branchtorestore and branchtorestore != repo.dirstate.branch():
646 if branchtorestore and branchtorestore != repo.dirstate.branch():
643 repo.dirstate.setbranch(branchtorestore)
647 repo.dirstate.setbranch(branchtorestore)
644 ui.status(_('marked working directory as branch %s\n')
648 ui.status(_('marked working directory as branch %s\n')
645 % branchtorestore)
649 % branchtorestore)
646
650
647 def unshelvecleanup(ui, repo, name, opts):
651 def unshelvecleanup(ui, repo, name, opts):
648 """remove related files after an unshelve"""
652 """remove related files after an unshelve"""
649 if not opts.get('keep'):
653 if not opts.get('keep'):
650 for filetype in shelvefileextensions:
654 for filetype in shelvefileextensions:
651 shfile = shelvedfile(repo, name, filetype)
655 shfile = shelvedfile(repo, name, filetype)
652 if shfile.exists():
656 if shfile.exists():
653 shfile.movetobackup()
657 shfile.movetobackup()
654 cleanupoldbackups(repo)
658 cleanupoldbackups(repo)
655
659
656 def unshelvecontinue(ui, repo, state, opts):
660 def unshelvecontinue(ui, repo, state, opts):
657 """subcommand to continue an in-progress unshelve"""
661 """subcommand to continue an in-progress unshelve"""
658 # We're finishing off a merge. First parent is our original
662 # We're finishing off a merge. First parent is our original
659 # parent, second is the temporary "fake" commit we're unshelving.
663 # parent, second is the temporary "fake" commit we're unshelving.
660 with repo.lock():
664 with repo.lock():
661 checkparents(repo, state)
665 checkparents(repo, state)
662 ms = merge.mergestate.read(repo)
666 ms = merge.mergestate.read(repo)
663 if list(ms.unresolved()):
667 if list(ms.unresolved()):
664 raise error.Abort(
668 raise error.Abort(
665 _("unresolved conflicts, can't continue"),
669 _("unresolved conflicts, can't continue"),
666 hint=_("see 'hg resolve', then 'hg unshelve --continue'"))
670 hint=_("see 'hg resolve', then 'hg unshelve --continue'"))
667
671
668 repo.vfs.rename('unshelverebasestate', 'rebasestate')
672 repo.vfs.rename('unshelverebasestate', 'rebasestate')
669 try:
673 try:
670 rebase.rebase(ui, repo, **{
674 rebase.rebase(ui, repo, **{
671 'continue' : True
675 'continue' : True
672 })
676 })
673 except Exception:
677 except Exception:
674 repo.vfs.rename('rebasestate', 'unshelverebasestate')
678 repo.vfs.rename('rebasestate', 'unshelverebasestate')
675 raise
679 raise
676
680
677 shelvectx = repo['tip']
681 shelvectx = repo['tip']
678 if state.pendingctx not in shelvectx.parents():
682 if state.pendingctx not in shelvectx.parents():
679 # rebase was a no-op, so it produced no child commit
683 # rebase was a no-op, so it produced no child commit
680 shelvectx = state.pendingctx
684 shelvectx = state.pendingctx
681 else:
685 else:
682 # only strip the shelvectx if the rebase produced it
686 # only strip the shelvectx if the rebase produced it
683 state.nodestoremove.append(shelvectx.node())
687 state.nodestoremove.append(shelvectx.node())
684
688
685 mergefiles(ui, repo, state.wctx, shelvectx)
689 mergefiles(ui, repo, state.wctx, shelvectx)
686 restorebranch(ui, repo, state.branchtorestore)
690 restorebranch(ui, repo, state.branchtorestore)
687
691
688 repair.strip(ui, repo, state.nodestoremove, backup=False,
692 repair.strip(ui, repo, state.nodestoremove, backup=False,
689 topic='shelve')
693 topic='shelve')
690 _restoreactivebookmark(repo, state.activebookmark)
694 _restoreactivebookmark(repo, state.activebookmark)
691 shelvedstate.clear(repo)
695 shelvedstate.clear(repo)
692 unshelvecleanup(ui, repo, state.name, opts)
696 unshelvecleanup(ui, repo, state.name, opts)
693 ui.status(_("unshelve of '%s' complete\n") % state.name)
697 ui.status(_("unshelve of '%s' complete\n") % state.name)
694
698
695 def _commitworkingcopychanges(ui, repo, opts, tmpwctx):
699 def _commitworkingcopychanges(ui, repo, opts, tmpwctx):
696 """Temporarily commit working copy changes before moving unshelve commit"""
700 """Temporarily commit working copy changes before moving unshelve commit"""
697 # Store pending changes in a commit and remember added in case a shelve
701 # Store pending changes in a commit and remember added in case a shelve
698 # contains unknown files that are part of the pending change
702 # contains unknown files that are part of the pending change
699 s = repo.status()
703 s = repo.status()
700 addedbefore = frozenset(s.added)
704 addedbefore = frozenset(s.added)
701 if not (s.modified or s.added or s.removed):
705 if not (s.modified or s.added or s.removed):
702 return tmpwctx, addedbefore
706 return tmpwctx, addedbefore
703 ui.status(_("temporarily committing pending changes "
707 ui.status(_("temporarily committing pending changes "
704 "(restore with 'hg unshelve --abort')\n"))
708 "(restore with 'hg unshelve --abort')\n"))
705 commitfunc = getcommitfunc(extra=None, interactive=False,
709 commitfunc = getcommitfunc(extra=None, interactive=False,
706 editor=False)
710 editor=False)
707 tempopts = {}
711 tempopts = {}
708 tempopts['message'] = "pending changes temporary commit"
712 tempopts['message'] = "pending changes temporary commit"
709 tempopts['date'] = opts.get('date')
713 tempopts['date'] = opts.get('date')
710 with ui.configoverride({('ui', 'quiet'): True}):
714 with ui.configoverride({('ui', 'quiet'): True}):
711 node = cmdutil.commit(ui, repo, commitfunc, [], tempopts)
715 node = cmdutil.commit(ui, repo, commitfunc, [], tempopts)
712 tmpwctx = repo[node]
716 tmpwctx = repo[node]
713 return tmpwctx, addedbefore
717 return tmpwctx, addedbefore
714
718
715 def _unshelverestorecommit(ui, repo, basename):
719 def _unshelverestorecommit(ui, repo, basename):
716 """Recreate commit in the repository during the unshelve"""
720 """Recreate commit in the repository during the unshelve"""
717 with ui.configoverride({('ui', 'quiet'): True}):
721 with ui.configoverride({('ui', 'quiet'): True}):
718 shelvedfile(repo, basename, 'hg').applybundle()
722 shelvedfile(repo, basename, 'hg').applybundle()
719 shelvectx = repo['tip']
723 shelvectx = repo['tip']
720 return repo, shelvectx
724 return repo, shelvectx
721
725
722 def _rebaserestoredcommit(ui, repo, opts, tr, oldtiprev, basename, pctx,
726 def _rebaserestoredcommit(ui, repo, opts, tr, oldtiprev, basename, pctx,
723 tmpwctx, shelvectx, branchtorestore,
727 tmpwctx, shelvectx, branchtorestore,
724 activebookmark):
728 activebookmark):
725 """Rebase restored commit from its original location to a destination"""
729 """Rebase restored commit from its original location to a destination"""
726 # If the shelve is not immediately on top of the commit
730 # If the shelve is not immediately on top of the commit
727 # we'll be merging with, rebase it to be on top.
731 # we'll be merging with, rebase it to be on top.
728 if tmpwctx.node() == shelvectx.parents()[0].node():
732 if tmpwctx.node() == shelvectx.parents()[0].node():
729 return shelvectx
733 return shelvectx
730
734
731 ui.status(_('rebasing shelved changes\n'))
735 ui.status(_('rebasing shelved changes\n'))
732 try:
736 try:
733 rebase.rebase(ui, repo, **{
737 rebase.rebase(ui, repo, **{
734 'rev': [shelvectx.rev()],
738 'rev': [shelvectx.rev()],
735 'dest': str(tmpwctx.rev()),
739 'dest': str(tmpwctx.rev()),
736 'keep': True,
740 'keep': True,
737 'tool': opts.get('tool', ''),
741 'tool': opts.get('tool', ''),
738 })
742 })
739 except error.InterventionRequired:
743 except error.InterventionRequired:
740 tr.close()
744 tr.close()
741
745
742 nodestoremove = [repo.changelog.node(rev)
746 nodestoremove = [repo.changelog.node(rev)
743 for rev in xrange(oldtiprev, len(repo))]
747 for rev in xrange(oldtiprev, len(repo))]
744 shelvedstate.save(repo, basename, pctx, tmpwctx, nodestoremove,
748 shelvedstate.save(repo, basename, pctx, tmpwctx, nodestoremove,
745 branchtorestore, opts.get('keep'), activebookmark)
749 branchtorestore, opts.get('keep'), activebookmark)
746
750
747 repo.vfs.rename('rebasestate', 'unshelverebasestate')
751 repo.vfs.rename('rebasestate', 'unshelverebasestate')
748 raise error.InterventionRequired(
752 raise error.InterventionRequired(
749 _("unresolved conflicts (see 'hg resolve', then "
753 _("unresolved conflicts (see 'hg resolve', then "
750 "'hg unshelve --continue')"))
754 "'hg unshelve --continue')"))
751
755
752 # refresh ctx after rebase completes
756 # refresh ctx after rebase completes
753 shelvectx = repo['tip']
757 shelvectx = repo['tip']
754
758
755 if tmpwctx not in shelvectx.parents():
759 if tmpwctx not in shelvectx.parents():
756 # rebase was a no-op, so it produced no child commit
760 # rebase was a no-op, so it produced no child commit
757 shelvectx = tmpwctx
761 shelvectx = tmpwctx
758 return shelvectx
762 return shelvectx
759
763
760 def _forgetunknownfiles(repo, shelvectx, addedbefore):
764 def _forgetunknownfiles(repo, shelvectx, addedbefore):
761 # Forget any files that were unknown before the shelve, unknown before
765 # Forget any files that were unknown before the shelve, unknown before
762 # unshelve started, but are now added.
766 # unshelve started, but are now added.
763 shelveunknown = shelvectx.extra().get('shelve_unknown')
767 shelveunknown = shelvectx.extra().get('shelve_unknown')
764 if not shelveunknown:
768 if not shelveunknown:
765 return
769 return
766 shelveunknown = frozenset(shelveunknown.split('\0'))
770 shelveunknown = frozenset(shelveunknown.split('\0'))
767 addedafter = frozenset(repo.status().added)
771 addedafter = frozenset(repo.status().added)
768 toforget = (addedafter & shelveunknown) - addedbefore
772 toforget = (addedafter & shelveunknown) - addedbefore
769 repo[None].forget(toforget)
773 repo[None].forget(toforget)
770
774
771 def _finishunshelve(repo, oldtiprev, tr, activebookmark):
775 def _finishunshelve(repo, oldtiprev, tr, activebookmark):
772 _restoreactivebookmark(repo, activebookmark)
776 _restoreactivebookmark(repo, activebookmark)
773 # The transaction aborting will strip all the commits for us,
777 # The transaction aborting will strip all the commits for us,
774 # but it doesn't update the inmemory structures, so addchangegroup
778 # but it doesn't update the inmemory structures, so addchangegroup
775 # hooks still fire and try to operate on the missing commits.
779 # hooks still fire and try to operate on the missing commits.
776 # Clean up manually to prevent this.
780 # Clean up manually to prevent this.
777 repo.unfiltered().changelog.strip(oldtiprev, tr)
781 repo.unfiltered().changelog.strip(oldtiprev, tr)
778 _aborttransaction(repo)
782 _aborttransaction(repo)
779
783
780 def _checkunshelveuntrackedproblems(ui, repo, shelvectx):
784 def _checkunshelveuntrackedproblems(ui, repo, shelvectx):
781 """Check potential problems which may result from working
785 """Check potential problems which may result from working
782 copy having untracked changes."""
786 copy having untracked changes."""
783 wcdeleted = set(repo.status().deleted)
787 wcdeleted = set(repo.status().deleted)
784 shelvetouched = set(shelvectx.files())
788 shelvetouched = set(shelvectx.files())
785 intersection = wcdeleted.intersection(shelvetouched)
789 intersection = wcdeleted.intersection(shelvetouched)
786 if intersection:
790 if intersection:
787 m = _("shelved change touches missing files")
791 m = _("shelved change touches missing files")
788 hint = _("run hg status to see which files are missing")
792 hint = _("run hg status to see which files are missing")
789 raise error.Abort(m, hint=hint)
793 raise error.Abort(m, hint=hint)
790
794
791 @command('unshelve',
795 @command('unshelve',
792 [('a', 'abort', None,
796 [('a', 'abort', None,
793 _('abort an incomplete unshelve operation')),
797 _('abort an incomplete unshelve operation')),
794 ('c', 'continue', None,
798 ('c', 'continue', None,
795 _('continue an incomplete unshelve operation')),
799 _('continue an incomplete unshelve operation')),
796 ('k', 'keep', None,
800 ('k', 'keep', None,
797 _('keep shelve after unshelving')),
801 _('keep shelve after unshelving')),
798 ('n', 'name', '',
802 ('n', 'name', '',
799 _('restore shelved change with given name'), _('NAME')),
803 _('restore shelved change with given name'), _('NAME')),
800 ('t', 'tool', '', _('specify merge tool')),
804 ('t', 'tool', '', _('specify merge tool')),
801 ('', 'date', '',
805 ('', 'date', '',
802 _('set date for temporary commits (DEPRECATED)'), _('DATE'))],
806 _('set date for temporary commits (DEPRECATED)'), _('DATE'))],
803 _('hg unshelve [[-n] SHELVED]'))
807 _('hg unshelve [[-n] SHELVED]'))
804 def unshelve(ui, repo, *shelved, **opts):
808 def unshelve(ui, repo, *shelved, **opts):
805 """restore a shelved change to the working directory
809 """restore a shelved change to the working directory
806
810
807 This command accepts an optional name of a shelved change to
811 This command accepts an optional name of a shelved change to
808 restore. If none is given, the most recent shelved change is used.
812 restore. If none is given, the most recent shelved change is used.
809
813
810 If a shelved change is applied successfully, the bundle that
814 If a shelved change is applied successfully, the bundle that
811 contains the shelved changes is moved to a backup location
815 contains the shelved changes is moved to a backup location
812 (.hg/shelve-backup).
816 (.hg/shelve-backup).
813
817
814 Since you can restore a shelved change on top of an arbitrary
818 Since you can restore a shelved change on top of an arbitrary
815 commit, it is possible that unshelving will result in a conflict
819 commit, it is possible that unshelving will result in a conflict
816 between your changes and the commits you are unshelving onto. If
820 between your changes and the commits you are unshelving onto. If
817 this occurs, you must resolve the conflict, then use
821 this occurs, you must resolve the conflict, then use
818 ``--continue`` to complete the unshelve operation. (The bundle
822 ``--continue`` to complete the unshelve operation. (The bundle
819 will not be moved until you successfully complete the unshelve.)
823 will not be moved until you successfully complete the unshelve.)
820
824
821 (Alternatively, you can use ``--abort`` to abandon an unshelve
825 (Alternatively, you can use ``--abort`` to abandon an unshelve
822 that causes a conflict. This reverts the unshelved changes, and
826 that causes a conflict. This reverts the unshelved changes, and
823 leaves the bundle in place.)
827 leaves the bundle in place.)
824
828
825 If bare shelved change(when no files are specified, without interactive,
829 If bare shelved change(when no files are specified, without interactive,
826 include and exclude option) was done on newly created branch it would
830 include and exclude option) was done on newly created branch it would
827 restore branch information to the working directory.
831 restore branch information to the working directory.
828
832
829 After a successful unshelve, the shelved changes are stored in a
833 After a successful unshelve, the shelved changes are stored in a
830 backup directory. Only the N most recent backups are kept. N
834 backup directory. Only the N most recent backups are kept. N
831 defaults to 10 but can be overridden using the ``shelve.maxbackups``
835 defaults to 10 but can be overridden using the ``shelve.maxbackups``
832 configuration option.
836 configuration option.
833
837
834 .. container:: verbose
838 .. container:: verbose
835
839
836 Timestamp in seconds is used to decide order of backups. More
840 Timestamp in seconds is used to decide order of backups. More
837 than ``maxbackups`` backups are kept, if same timestamp
841 than ``maxbackups`` backups are kept, if same timestamp
838 prevents from deciding exact order of them, for safety.
842 prevents from deciding exact order of them, for safety.
839 """
843 """
840 with repo.wlock():
844 with repo.wlock():
841 return _dounshelve(ui, repo, *shelved, **opts)
845 return _dounshelve(ui, repo, *shelved, **opts)
842
846
843 def _dounshelve(ui, repo, *shelved, **opts):
847 def _dounshelve(ui, repo, *shelved, **opts):
844 abortf = opts.get('abort')
848 abortf = opts.get('abort')
845 continuef = opts.get('continue')
849 continuef = opts.get('continue')
846 if not abortf and not continuef:
850 if not abortf and not continuef:
847 cmdutil.checkunfinished(repo)
851 cmdutil.checkunfinished(repo)
848 shelved = list(shelved)
852 shelved = list(shelved)
849 if opts.get("name"):
853 if opts.get("name"):
850 shelved.append(opts["name"])
854 shelved.append(opts["name"])
851
855
852 if abortf or continuef:
856 if abortf or continuef:
853 if abortf and continuef:
857 if abortf and continuef:
854 raise error.Abort(_('cannot use both abort and continue'))
858 raise error.Abort(_('cannot use both abort and continue'))
855 if shelved:
859 if shelved:
856 raise error.Abort(_('cannot combine abort/continue with '
860 raise error.Abort(_('cannot combine abort/continue with '
857 'naming a shelved change'))
861 'naming a shelved change'))
858 if abortf and opts.get('tool', False):
862 if abortf and opts.get('tool', False):
859 ui.warn(_('tool option will be ignored\n'))
863 ui.warn(_('tool option will be ignored\n'))
860
864
861 try:
865 try:
862 state = shelvedstate.load(repo)
866 state = shelvedstate.load(repo)
863 if opts.get('keep') is None:
867 if opts.get('keep') is None:
864 opts['keep'] = state.keep
868 opts['keep'] = state.keep
865 except IOError as err:
869 except IOError as err:
866 if err.errno != errno.ENOENT:
870 if err.errno != errno.ENOENT:
867 raise
871 raise
868 cmdutil.wrongtooltocontinue(repo, _('unshelve'))
872 cmdutil.wrongtooltocontinue(repo, _('unshelve'))
869 except error.CorruptedState as err:
873 except error.CorruptedState as err:
870 ui.debug(str(err) + '\n')
874 ui.debug(str(err) + '\n')
871 if continuef:
875 if continuef:
872 msg = _('corrupted shelved state file')
876 msg = _('corrupted shelved state file')
873 hint = _('please run hg unshelve --abort to abort unshelve '
877 hint = _('please run hg unshelve --abort to abort unshelve '
874 'operation')
878 'operation')
875 raise error.Abort(msg, hint=hint)
879 raise error.Abort(msg, hint=hint)
876 elif abortf:
880 elif abortf:
877 msg = _('could not read shelved state file, your working copy '
881 msg = _('could not read shelved state file, your working copy '
878 'may be in an unexpected state\nplease update to some '
882 'may be in an unexpected state\nplease update to some '
879 'commit\n')
883 'commit\n')
880 ui.warn(msg)
884 ui.warn(msg)
881 shelvedstate.clear(repo)
885 shelvedstate.clear(repo)
882 return
886 return
883
887
884 if abortf:
888 if abortf:
885 return unshelveabort(ui, repo, state, opts)
889 return unshelveabort(ui, repo, state, opts)
886 elif continuef:
890 elif continuef:
887 return unshelvecontinue(ui, repo, state, opts)
891 return unshelvecontinue(ui, repo, state, opts)
888 elif len(shelved) > 1:
892 elif len(shelved) > 1:
889 raise error.Abort(_('can only unshelve one change at a time'))
893 raise error.Abort(_('can only unshelve one change at a time'))
890 elif not shelved:
894 elif not shelved:
891 shelved = listshelves(repo)
895 shelved = listshelves(repo)
892 if not shelved:
896 if not shelved:
893 raise error.Abort(_('no shelved changes to apply!'))
897 raise error.Abort(_('no shelved changes to apply!'))
894 basename = util.split(shelved[0][1])[1]
898 basename = util.split(shelved[0][1])[1]
895 ui.status(_("unshelving change '%s'\n") % basename)
899 ui.status(_("unshelving change '%s'\n") % basename)
896 else:
900 else:
897 basename = shelved[0]
901 basename = shelved[0]
898
902
899 if not shelvedfile(repo, basename, patchextension).exists():
903 if not shelvedfile(repo, basename, patchextension).exists():
900 raise error.Abort(_("shelved change '%s' not found") % basename)
904 raise error.Abort(_("shelved change '%s' not found") % basename)
901
905
902 lock = tr = None
906 lock = tr = None
903 try:
907 try:
904 lock = repo.lock()
908 lock = repo.lock()
905 tr = repo.transaction('unshelve', report=lambda x: None)
909 tr = repo.transaction('unshelve', report=lambda x: None)
906 oldtiprev = len(repo)
910 oldtiprev = len(repo)
907
911
908 pctx = repo['.']
912 pctx = repo['.']
909 tmpwctx = pctx
913 tmpwctx = pctx
910 # The goal is to have a commit structure like so:
914 # The goal is to have a commit structure like so:
911 # ...-> pctx -> tmpwctx -> shelvectx
915 # ...-> pctx -> tmpwctx -> shelvectx
912 # where tmpwctx is an optional commit with the user's pending changes
916 # where tmpwctx is an optional commit with the user's pending changes
913 # and shelvectx is the unshelved changes. Then we merge it all down
917 # and shelvectx is the unshelved changes. Then we merge it all down
914 # to the original pctx.
918 # to the original pctx.
915
919
916 activebookmark = _backupactivebookmark(repo)
920 activebookmark = _backupactivebookmark(repo)
917 overrides = {('ui', 'forcemerge'): opts.get('tool', '')}
921 overrides = {('ui', 'forcemerge'): opts.get('tool', '')}
918 with ui.configoverride(overrides, 'unshelve'):
922 with ui.configoverride(overrides, 'unshelve'):
919 tmpwctx, addedbefore = _commitworkingcopychanges(ui, repo, opts,
923 tmpwctx, addedbefore = _commitworkingcopychanges(ui, repo, opts,
920 tmpwctx)
924 tmpwctx)
921 repo, shelvectx = _unshelverestorecommit(ui, repo, basename)
925 repo, shelvectx = _unshelverestorecommit(ui, repo, basename)
922 _checkunshelveuntrackedproblems(ui, repo, shelvectx)
926 _checkunshelveuntrackedproblems(ui, repo, shelvectx)
923 branchtorestore = ''
927 branchtorestore = ''
924 if shelvectx.branch() != shelvectx.p1().branch():
928 if shelvectx.branch() != shelvectx.p1().branch():
925 branchtorestore = shelvectx.branch()
929 branchtorestore = shelvectx.branch()
926
930
927 shelvectx = _rebaserestoredcommit(ui, repo, opts, tr, oldtiprev,
931 shelvectx = _rebaserestoredcommit(ui, repo, opts, tr, oldtiprev,
928 basename, pctx, tmpwctx,
932 basename, pctx, tmpwctx,
929 shelvectx, branchtorestore,
933 shelvectx, branchtorestore,
930 activebookmark)
934 activebookmark)
931 mergefiles(ui, repo, pctx, shelvectx)
935 mergefiles(ui, repo, pctx, shelvectx)
932 restorebranch(ui, repo, branchtorestore)
936 restorebranch(ui, repo, branchtorestore)
933 _forgetunknownfiles(repo, shelvectx, addedbefore)
937 _forgetunknownfiles(repo, shelvectx, addedbefore)
934
938
935 shelvedstate.clear(repo)
939 shelvedstate.clear(repo)
936 _finishunshelve(repo, oldtiprev, tr, activebookmark)
940 _finishunshelve(repo, oldtiprev, tr, activebookmark)
937 unshelvecleanup(ui, repo, basename, opts)
941 unshelvecleanup(ui, repo, basename, opts)
938 finally:
942 finally:
939 if tr:
943 if tr:
940 tr.release()
944 tr.release()
941 lockmod.release(lock)
945 lockmod.release(lock)
942
946
943 @command('shelve',
947 @command('shelve',
944 [('A', 'addremove', None,
948 [('A', 'addremove', None,
945 _('mark new/missing files as added/removed before shelving')),
949 _('mark new/missing files as added/removed before shelving')),
946 ('u', 'unknown', None,
950 ('u', 'unknown', None,
947 _('store unknown files in the shelve')),
951 _('store unknown files in the shelve')),
948 ('', 'cleanup', None,
952 ('', 'cleanup', None,
949 _('delete all shelved changes')),
953 _('delete all shelved changes')),
950 ('', 'date', '',
954 ('', 'date', '',
951 _('shelve with the specified commit date'), _('DATE')),
955 _('shelve with the specified commit date'), _('DATE')),
952 ('d', 'delete', None,
956 ('d', 'delete', None,
953 _('delete the named shelved change(s)')),
957 _('delete the named shelved change(s)')),
954 ('e', 'edit', False,
958 ('e', 'edit', False,
955 _('invoke editor on commit messages')),
959 _('invoke editor on commit messages')),
956 ('l', 'list', None,
960 ('l', 'list', None,
957 _('list current shelves')),
961 _('list current shelves')),
958 ('m', 'message', '',
962 ('m', 'message', '',
959 _('use text as shelve message'), _('TEXT')),
963 _('use text as shelve message'), _('TEXT')),
960 ('n', 'name', '',
964 ('n', 'name', '',
961 _('use the given name for the shelved commit'), _('NAME')),
965 _('use the given name for the shelved commit'), _('NAME')),
962 ('p', 'patch', None,
966 ('p', 'patch', None,
963 _('show patch')),
967 _('show patch')),
964 ('i', 'interactive', None,
968 ('i', 'interactive', None,
965 _('interactive mode, only works while creating a shelve')),
969 _('interactive mode, only works while creating a shelve')),
966 ('', 'stat', None,
970 ('', 'stat', None,
967 _('output diffstat-style summary of changes'))] + cmdutil.walkopts,
971 _('output diffstat-style summary of changes'))] + cmdutil.walkopts,
968 _('hg shelve [OPTION]... [FILE]...'))
972 _('hg shelve [OPTION]... [FILE]...'))
969 def shelvecmd(ui, repo, *pats, **opts):
973 def shelvecmd(ui, repo, *pats, **opts):
970 '''save and set aside changes from the working directory
974 '''save and set aside changes from the working directory
971
975
972 Shelving takes files that "hg status" reports as not clean, saves
976 Shelving takes files that "hg status" reports as not clean, saves
973 the modifications to a bundle (a shelved change), and reverts the
977 the modifications to a bundle (a shelved change), and reverts the
974 files so that their state in the working directory becomes clean.
978 files so that their state in the working directory becomes clean.
975
979
976 To restore these changes to the working directory, using "hg
980 To restore these changes to the working directory, using "hg
977 unshelve"; this will work even if you switch to a different
981 unshelve"; this will work even if you switch to a different
978 commit.
982 commit.
979
983
980 When no files are specified, "hg shelve" saves all not-clean
984 When no files are specified, "hg shelve" saves all not-clean
981 files. If specific files or directories are named, only changes to
985 files. If specific files or directories are named, only changes to
982 those files are shelved.
986 those files are shelved.
983
987
984 In bare shelve (when no files are specified, without interactive,
988 In bare shelve (when no files are specified, without interactive,
985 include and exclude option), shelving remembers information if the
989 include and exclude option), shelving remembers information if the
986 working directory was on newly created branch, in other words working
990 working directory was on newly created branch, in other words working
987 directory was on different branch than its first parent. In this
991 directory was on different branch than its first parent. In this
988 situation unshelving restores branch information to the working directory.
992 situation unshelving restores branch information to the working directory.
989
993
990 Each shelved change has a name that makes it easier to find later.
994 Each shelved change has a name that makes it easier to find later.
991 The name of a shelved change defaults to being based on the active
995 The name of a shelved change defaults to being based on the active
992 bookmark, or if there is no active bookmark, the current named
996 bookmark, or if there is no active bookmark, the current named
993 branch. To specify a different name, use ``--name``.
997 branch. To specify a different name, use ``--name``.
994
998
995 To see a list of existing shelved changes, use the ``--list``
999 To see a list of existing shelved changes, use the ``--list``
996 option. For each shelved change, this will print its name, age,
1000 option. For each shelved change, this will print its name, age,
997 and description; use ``--patch`` or ``--stat`` for more details.
1001 and description; use ``--patch`` or ``--stat`` for more details.
998
1002
999 To delete specific shelved changes, use ``--delete``. To delete
1003 To delete specific shelved changes, use ``--delete``. To delete
1000 all shelved changes, use ``--cleanup``.
1004 all shelved changes, use ``--cleanup``.
1001 '''
1005 '''
1002 allowables = [
1006 allowables = [
1003 ('addremove', {'create'}), # 'create' is pseudo action
1007 ('addremove', {'create'}), # 'create' is pseudo action
1004 ('unknown', {'create'}),
1008 ('unknown', {'create'}),
1005 ('cleanup', {'cleanup'}),
1009 ('cleanup', {'cleanup'}),
1006 # ('date', {'create'}), # ignored for passing '--date "0 0"' in tests
1010 # ('date', {'create'}), # ignored for passing '--date "0 0"' in tests
1007 ('delete', {'delete'}),
1011 ('delete', {'delete'}),
1008 ('edit', {'create'}),
1012 ('edit', {'create'}),
1009 ('list', {'list'}),
1013 ('list', {'list'}),
1010 ('message', {'create'}),
1014 ('message', {'create'}),
1011 ('name', {'create'}),
1015 ('name', {'create'}),
1012 ('patch', {'patch', 'list'}),
1016 ('patch', {'patch', 'list'}),
1013 ('stat', {'stat', 'list'}),
1017 ('stat', {'stat', 'list'}),
1014 ]
1018 ]
1015 def checkopt(opt):
1019 def checkopt(opt):
1016 if opts.get(opt):
1020 if opts.get(opt):
1017 for i, allowable in allowables:
1021 for i, allowable in allowables:
1018 if opts[i] and opt not in allowable:
1022 if opts[i] and opt not in allowable:
1019 raise error.Abort(_("options '--%s' and '--%s' may not be "
1023 raise error.Abort(_("options '--%s' and '--%s' may not be "
1020 "used together") % (opt, i))
1024 "used together") % (opt, i))
1021 return True
1025 return True
1022 if checkopt('cleanup'):
1026 if checkopt('cleanup'):
1023 if pats:
1027 if pats:
1024 raise error.Abort(_("cannot specify names when using '--cleanup'"))
1028 raise error.Abort(_("cannot specify names when using '--cleanup'"))
1025 return cleanupcmd(ui, repo)
1029 return cleanupcmd(ui, repo)
1026 elif checkopt('delete'):
1030 elif checkopt('delete'):
1027 return deletecmd(ui, repo, pats)
1031 return deletecmd(ui, repo, pats)
1028 elif checkopt('list'):
1032 elif checkopt('list'):
1029 return listcmd(ui, repo, pats, opts)
1033 return listcmd(ui, repo, pats, opts)
1030 elif checkopt('patch'):
1034 elif checkopt('patch'):
1031 return patchcmds(ui, repo, pats, opts, subcommand='patch')
1035 return patchcmds(ui, repo, pats, opts, subcommand='patch')
1032 elif checkopt('stat'):
1036 elif checkopt('stat'):
1033 return patchcmds(ui, repo, pats, opts, subcommand='stat')
1037 return patchcmds(ui, repo, pats, opts, subcommand='stat')
1034 else:
1038 else:
1035 return createcmd(ui, repo, pats, opts)
1039 return createcmd(ui, repo, pats, opts)
1036
1040
1037 def extsetup(ui):
1041 def extsetup(ui):
1038 cmdutil.unfinishedstates.append(
1042 cmdutil.unfinishedstates.append(
1039 [shelvedstate._filename, False, False,
1043 [shelvedstate._filename, False, False,
1040 _('unshelve already in progress'),
1044 _('unshelve already in progress'),
1041 _("use 'hg unshelve --continue' or 'hg unshelve --abort'")])
1045 _("use 'hg unshelve --continue' or 'hg unshelve --abort'")])
1042 cmdutil.afterresolvedstates.append(
1046 cmdutil.afterresolvedstates.append(
1043 [shelvedstate._filename, _('hg unshelve --continue')])
1047 [shelvedstate._filename, _('hg unshelve --continue')])
@@ -1,1030 +1,1016
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import struct
11 import struct
12 import tempfile
12 import tempfile
13 import weakref
13 import weakref
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 hex,
17 hex,
18 nullrev,
18 nullrev,
19 short,
19 short,
20 )
20 )
21
21
22 from . import (
22 from . import (
23 dagutil,
23 dagutil,
24 discovery,
24 discovery,
25 error,
25 error,
26 mdiff,
26 mdiff,
27 phases,
27 phases,
28 pycompat,
28 pycompat,
29 util,
29 util,
30 )
30 )
31
31
32 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
32 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
33 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
33 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
34 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
34 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
35
35
36 def readexactly(stream, n):
36 def readexactly(stream, n):
37 '''read n bytes from stream.read and abort if less was available'''
37 '''read n bytes from stream.read and abort if less was available'''
38 s = stream.read(n)
38 s = stream.read(n)
39 if len(s) < n:
39 if len(s) < n:
40 raise error.Abort(_("stream ended unexpectedly"
40 raise error.Abort(_("stream ended unexpectedly"
41 " (got %d bytes, expected %d)")
41 " (got %d bytes, expected %d)")
42 % (len(s), n))
42 % (len(s), n))
43 return s
43 return s
44
44
45 def getchunk(stream):
45 def getchunk(stream):
46 """return the next chunk from stream as a string"""
46 """return the next chunk from stream as a string"""
47 d = readexactly(stream, 4)
47 d = readexactly(stream, 4)
48 l = struct.unpack(">l", d)[0]
48 l = struct.unpack(">l", d)[0]
49 if l <= 4:
49 if l <= 4:
50 if l:
50 if l:
51 raise error.Abort(_("invalid chunk length %d") % l)
51 raise error.Abort(_("invalid chunk length %d") % l)
52 return ""
52 return ""
53 return readexactly(stream, l - 4)
53 return readexactly(stream, l - 4)
54
54
55 def chunkheader(length):
55 def chunkheader(length):
56 """return a changegroup chunk header (string)"""
56 """return a changegroup chunk header (string)"""
57 return struct.pack(">l", length + 4)
57 return struct.pack(">l", length + 4)
58
58
59 def closechunk():
59 def closechunk():
60 """return a changegroup chunk header (string) for a zero-length chunk"""
60 """return a changegroup chunk header (string) for a zero-length chunk"""
61 return struct.pack(">l", 0)
61 return struct.pack(">l", 0)
62
62
63 def writechunks(ui, chunks, filename, vfs=None):
63 def writechunks(ui, chunks, filename, vfs=None):
64 """Write chunks to a file and return its filename.
64 """Write chunks to a file and return its filename.
65
65
66 The stream is assumed to be a bundle file.
66 The stream is assumed to be a bundle file.
67 Existing files will not be overwritten.
67 Existing files will not be overwritten.
68 If no filename is specified, a temporary file is created.
68 If no filename is specified, a temporary file is created.
69 """
69 """
70 fh = None
70 fh = None
71 cleanup = None
71 cleanup = None
72 try:
72 try:
73 if filename:
73 if filename:
74 if vfs:
74 if vfs:
75 fh = vfs.open(filename, "wb")
75 fh = vfs.open(filename, "wb")
76 else:
76 else:
77 # Increase default buffer size because default is usually
77 # Increase default buffer size because default is usually
78 # small (4k is common on Linux).
78 # small (4k is common on Linux).
79 fh = open(filename, "wb", 131072)
79 fh = open(filename, "wb", 131072)
80 else:
80 else:
81 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
81 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
82 fh = os.fdopen(fd, pycompat.sysstr("wb"))
82 fh = os.fdopen(fd, pycompat.sysstr("wb"))
83 cleanup = filename
83 cleanup = filename
84 for c in chunks:
84 for c in chunks:
85 fh.write(c)
85 fh.write(c)
86 cleanup = None
86 cleanup = None
87 return filename
87 return filename
88 finally:
88 finally:
89 if fh is not None:
89 if fh is not None:
90 fh.close()
90 fh.close()
91 if cleanup is not None:
91 if cleanup is not None:
92 if filename and vfs:
92 if filename and vfs:
93 vfs.unlink(cleanup)
93 vfs.unlink(cleanup)
94 else:
94 else:
95 os.unlink(cleanup)
95 os.unlink(cleanup)
96
96
97 class cg1unpacker(object):
97 class cg1unpacker(object):
98 """Unpacker for cg1 changegroup streams.
98 """Unpacker for cg1 changegroup streams.
99
99
100 A changegroup unpacker handles the framing of the revision data in
100 A changegroup unpacker handles the framing of the revision data in
101 the wire format. Most consumers will want to use the apply()
101 the wire format. Most consumers will want to use the apply()
102 method to add the changes from the changegroup to a repository.
102 method to add the changes from the changegroup to a repository.
103
103
104 If you're forwarding a changegroup unmodified to another consumer,
104 If you're forwarding a changegroup unmodified to another consumer,
105 use getchunks(), which returns an iterator of changegroup
105 use getchunks(), which returns an iterator of changegroup
106 chunks. This is mostly useful for cases where you need to know the
106 chunks. This is mostly useful for cases where you need to know the
107 data stream has ended by observing the end of the changegroup.
107 data stream has ended by observing the end of the changegroup.
108
108
109 deltachunk() is useful only if you're applying delta data. Most
109 deltachunk() is useful only if you're applying delta data. Most
110 consumers should prefer apply() instead.
110 consumers should prefer apply() instead.
111
111
112 A few other public methods exist. Those are used only for
112 A few other public methods exist. Those are used only for
113 bundlerepo and some debug commands - their use is discouraged.
113 bundlerepo and some debug commands - their use is discouraged.
114 """
114 """
115 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
115 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
116 deltaheadersize = struct.calcsize(deltaheader)
116 deltaheadersize = struct.calcsize(deltaheader)
117 version = '01'
117 version = '01'
118 _grouplistcount = 1 # One list of files after the manifests
118 _grouplistcount = 1 # One list of files after the manifests
119
119
120 def __init__(self, fh, alg, extras=None):
120 def __init__(self, fh, alg, extras=None):
121 if alg is None:
121 if alg is None:
122 alg = 'UN'
122 alg = 'UN'
123 if alg not in util.compengines.supportedbundletypes:
123 if alg not in util.compengines.supportedbundletypes:
124 raise error.Abort(_('unknown stream compression type: %s')
124 raise error.Abort(_('unknown stream compression type: %s')
125 % alg)
125 % alg)
126 if alg == 'BZ':
126 if alg == 'BZ':
127 alg = '_truncatedBZ'
127 alg = '_truncatedBZ'
128
128
129 compengine = util.compengines.forbundletype(alg)
129 compengine = util.compengines.forbundletype(alg)
130 self._stream = compengine.decompressorreader(fh)
130 self._stream = compengine.decompressorreader(fh)
131 self._type = alg
131 self._type = alg
132 self.extras = extras or {}
132 self.extras = extras or {}
133 self.callback = None
133 self.callback = None
134
134
135 # These methods (compressed, read, seek, tell) all appear to only
135 # These methods (compressed, read, seek, tell) all appear to only
136 # be used by bundlerepo, but it's a little hard to tell.
136 # be used by bundlerepo, but it's a little hard to tell.
137 def compressed(self):
137 def compressed(self):
138 return self._type is not None and self._type != 'UN'
138 return self._type is not None and self._type != 'UN'
139 def read(self, l):
139 def read(self, l):
140 return self._stream.read(l)
140 return self._stream.read(l)
141 def seek(self, pos):
141 def seek(self, pos):
142 return self._stream.seek(pos)
142 return self._stream.seek(pos)
143 def tell(self):
143 def tell(self):
144 return self._stream.tell()
144 return self._stream.tell()
145 def close(self):
145 def close(self):
146 return self._stream.close()
146 return self._stream.close()
147
147
148 def _chunklength(self):
148 def _chunklength(self):
149 d = readexactly(self._stream, 4)
149 d = readexactly(self._stream, 4)
150 l = struct.unpack(">l", d)[0]
150 l = struct.unpack(">l", d)[0]
151 if l <= 4:
151 if l <= 4:
152 if l:
152 if l:
153 raise error.Abort(_("invalid chunk length %d") % l)
153 raise error.Abort(_("invalid chunk length %d") % l)
154 return 0
154 return 0
155 if self.callback:
155 if self.callback:
156 self.callback()
156 self.callback()
157 return l - 4
157 return l - 4
158
158
159 def changelogheader(self):
159 def changelogheader(self):
160 """v10 does not have a changelog header chunk"""
160 """v10 does not have a changelog header chunk"""
161 return {}
161 return {}
162
162
163 def manifestheader(self):
163 def manifestheader(self):
164 """v10 does not have a manifest header chunk"""
164 """v10 does not have a manifest header chunk"""
165 return {}
165 return {}
166
166
167 def filelogheader(self):
167 def filelogheader(self):
168 """return the header of the filelogs chunk, v10 only has the filename"""
168 """return the header of the filelogs chunk, v10 only has the filename"""
169 l = self._chunklength()
169 l = self._chunklength()
170 if not l:
170 if not l:
171 return {}
171 return {}
172 fname = readexactly(self._stream, l)
172 fname = readexactly(self._stream, l)
173 return {'filename': fname}
173 return {'filename': fname}
174
174
175 def _deltaheader(self, headertuple, prevnode):
175 def _deltaheader(self, headertuple, prevnode):
176 node, p1, p2, cs = headertuple
176 node, p1, p2, cs = headertuple
177 if prevnode is None:
177 if prevnode is None:
178 deltabase = p1
178 deltabase = p1
179 else:
179 else:
180 deltabase = prevnode
180 deltabase = prevnode
181 flags = 0
181 flags = 0
182 return node, p1, p2, deltabase, cs, flags
182 return node, p1, p2, deltabase, cs, flags
183
183
184 def deltachunk(self, prevnode):
184 def deltachunk(self, prevnode):
185 l = self._chunklength()
185 l = self._chunklength()
186 if not l:
186 if not l:
187 return {}
187 return {}
188 headerdata = readexactly(self._stream, self.deltaheadersize)
188 headerdata = readexactly(self._stream, self.deltaheadersize)
189 header = struct.unpack(self.deltaheader, headerdata)
189 header = struct.unpack(self.deltaheader, headerdata)
190 delta = readexactly(self._stream, l - self.deltaheadersize)
190 delta = readexactly(self._stream, l - self.deltaheadersize)
191 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
191 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
192 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
192 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
193 'deltabase': deltabase, 'delta': delta, 'flags': flags}
193 'deltabase': deltabase, 'delta': delta, 'flags': flags}
194
194
195 def getchunks(self):
195 def getchunks(self):
196 """returns all the chunks contains in the bundle
196 """returns all the chunks contains in the bundle
197
197
198 Used when you need to forward the binary stream to a file or another
198 Used when you need to forward the binary stream to a file or another
199 network API. To do so, it parse the changegroup data, otherwise it will
199 network API. To do so, it parse the changegroup data, otherwise it will
200 block in case of sshrepo because it don't know the end of the stream.
200 block in case of sshrepo because it don't know the end of the stream.
201 """
201 """
202 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
202 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
203 # and a list of filelogs. For changegroup 3, we expect 4 parts:
203 # and a list of filelogs. For changegroup 3, we expect 4 parts:
204 # changelog, manifestlog, a list of tree manifestlogs, and a list of
204 # changelog, manifestlog, a list of tree manifestlogs, and a list of
205 # filelogs.
205 # filelogs.
206 #
206 #
207 # Changelog and manifestlog parts are terminated with empty chunks. The
207 # Changelog and manifestlog parts are terminated with empty chunks. The
208 # tree and file parts are a list of entry sections. Each entry section
208 # tree and file parts are a list of entry sections. Each entry section
209 # is a series of chunks terminating in an empty chunk. The list of these
209 # is a series of chunks terminating in an empty chunk. The list of these
210 # entry sections is terminated in yet another empty chunk, so we know
210 # entry sections is terminated in yet another empty chunk, so we know
211 # we've reached the end of the tree/file list when we reach an empty
211 # we've reached the end of the tree/file list when we reach an empty
212 # chunk that was proceeded by no non-empty chunks.
212 # chunk that was proceeded by no non-empty chunks.
213
213
214 parts = 0
214 parts = 0
215 while parts < 2 + self._grouplistcount:
215 while parts < 2 + self._grouplistcount:
216 noentries = True
216 noentries = True
217 while True:
217 while True:
218 chunk = getchunk(self)
218 chunk = getchunk(self)
219 if not chunk:
219 if not chunk:
220 # The first two empty chunks represent the end of the
220 # The first two empty chunks represent the end of the
221 # changelog and the manifestlog portions. The remaining
221 # changelog and the manifestlog portions. The remaining
222 # empty chunks represent either A) the end of individual
222 # empty chunks represent either A) the end of individual
223 # tree or file entries in the file list, or B) the end of
223 # tree or file entries in the file list, or B) the end of
224 # the entire list. It's the end of the entire list if there
224 # the entire list. It's the end of the entire list if there
225 # were no entries (i.e. noentries is True).
225 # were no entries (i.e. noentries is True).
226 if parts < 2:
226 if parts < 2:
227 parts += 1
227 parts += 1
228 elif noentries:
228 elif noentries:
229 parts += 1
229 parts += 1
230 break
230 break
231 noentries = False
231 noentries = False
232 yield chunkheader(len(chunk))
232 yield chunkheader(len(chunk))
233 pos = 0
233 pos = 0
234 while pos < len(chunk):
234 while pos < len(chunk):
235 next = pos + 2**20
235 next = pos + 2**20
236 yield chunk[pos:next]
236 yield chunk[pos:next]
237 pos = next
237 pos = next
238 yield closechunk()
238 yield closechunk()
239
239
240 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
240 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
241 # We know that we'll never have more manifests than we had
241 # We know that we'll never have more manifests than we had
242 # changesets.
242 # changesets.
243 self.callback = prog(_('manifests'), numchanges)
243 self.callback = prog(_('manifests'), numchanges)
244 # no need to check for empty manifest group here:
244 # no need to check for empty manifest group here:
245 # if the result of the merge of 1 and 2 is the same in 3 and 4,
245 # if the result of the merge of 1 and 2 is the same in 3 and 4,
246 # no new manifest will be created and the manifest group will
246 # no new manifest will be created and the manifest group will
247 # be empty during the pull
247 # be empty during the pull
248 self.manifestheader()
248 self.manifestheader()
249 repo.manifestlog._revlog.addgroup(self, revmap, trp)
249 repo.manifestlog._revlog.addgroup(self, revmap, trp)
250 repo.ui.progress(_('manifests'), None)
250 repo.ui.progress(_('manifests'), None)
251 self.callback = None
251 self.callback = None
252
252
253 def apply(self, repo, tr, srctype, url, targetphase=phases.draft,
253 def apply(self, repo, tr, srctype, url, targetphase=phases.draft,
254 expectedtotal=None):
254 expectedtotal=None):
255 """Add the changegroup returned by source.read() to this repo.
255 """Add the changegroup returned by source.read() to this repo.
256 srctype is a string like 'push', 'pull', or 'unbundle'. url is
256 srctype is a string like 'push', 'pull', or 'unbundle'. url is
257 the URL of the repo where this changegroup is coming from.
257 the URL of the repo where this changegroup is coming from.
258
258
259 Return an integer summarizing the change to this repo:
259 Return an integer summarizing the change to this repo:
260 - nothing changed or no source: 0
260 - nothing changed or no source: 0
261 - more heads than before: 1+added heads (2..n)
261 - more heads than before: 1+added heads (2..n)
262 - fewer heads than before: -1-removed heads (-2..-n)
262 - fewer heads than before: -1-removed heads (-2..-n)
263 - number of heads stays the same: 1
263 - number of heads stays the same: 1
264 """
264 """
265 repo = repo.unfiltered()
265 repo = repo.unfiltered()
266 def csmap(x):
266 def csmap(x):
267 repo.ui.debug("add changeset %s\n" % short(x))
267 repo.ui.debug("add changeset %s\n" % short(x))
268 return len(cl)
268 return len(cl)
269
269
270 def revmap(x):
270 def revmap(x):
271 return cl.rev(x)
271 return cl.rev(x)
272
272
273 changesets = files = revisions = 0
273 changesets = files = revisions = 0
274
274
275 try:
275 try:
276 # The transaction may already carry source information. In this
276 # The transaction may already carry source information. In this
277 # case we use the top level data. We overwrite the argument
277 # case we use the top level data. We overwrite the argument
278 # because we need to use the top level value (if they exist)
278 # because we need to use the top level value (if they exist)
279 # in this function.
279 # in this function.
280 srctype = tr.hookargs.setdefault('source', srctype)
280 srctype = tr.hookargs.setdefault('source', srctype)
281 url = tr.hookargs.setdefault('url', url)
281 url = tr.hookargs.setdefault('url', url)
282 repo.hook('prechangegroup',
282 repo.hook('prechangegroup',
283 throw=True, **pycompat.strkwargs(tr.hookargs))
283 throw=True, **pycompat.strkwargs(tr.hookargs))
284
284
285 # write changelog data to temp files so concurrent readers
285 # write changelog data to temp files so concurrent readers
286 # will not see an inconsistent view
286 # will not see an inconsistent view
287 cl = repo.changelog
287 cl = repo.changelog
288 cl.delayupdate(tr)
288 cl.delayupdate(tr)
289 oldheads = set(cl.heads())
289 oldheads = set(cl.heads())
290
290
291 trp = weakref.proxy(tr)
291 trp = weakref.proxy(tr)
292 # pull off the changeset group
292 # pull off the changeset group
293 repo.ui.status(_("adding changesets\n"))
293 repo.ui.status(_("adding changesets\n"))
294 clstart = len(cl)
294 clstart = len(cl)
295 class prog(object):
295 class prog(object):
296 def __init__(self, step, total):
296 def __init__(self, step, total):
297 self._step = step
297 self._step = step
298 self._total = total
298 self._total = total
299 self._count = 1
299 self._count = 1
300 def __call__(self):
300 def __call__(self):
301 repo.ui.progress(self._step, self._count, unit=_('chunks'),
301 repo.ui.progress(self._step, self._count, unit=_('chunks'),
302 total=self._total)
302 total=self._total)
303 self._count += 1
303 self._count += 1
304 self.callback = prog(_('changesets'), expectedtotal)
304 self.callback = prog(_('changesets'), expectedtotal)
305
305
306 efiles = set()
306 efiles = set()
307 def onchangelog(cl, node):
307 def onchangelog(cl, node):
308 efiles.update(cl.readfiles(node))
308 efiles.update(cl.readfiles(node))
309
309
310 self.changelogheader()
310 self.changelogheader()
311 cgnodes = cl.addgroup(self, csmap, trp, addrevisioncb=onchangelog)
311 cgnodes = cl.addgroup(self, csmap, trp, addrevisioncb=onchangelog)
312 efiles = len(efiles)
312 efiles = len(efiles)
313
313
314 if not cgnodes:
314 if not cgnodes:
315 repo.ui.develwarn('applied empty changegroup',
315 repo.ui.develwarn('applied empty changegroup',
316 config='empty-changegroup')
316 config='empty-changegroup')
317 clend = len(cl)
317 clend = len(cl)
318 changesets = clend - clstart
318 changesets = clend - clstart
319 repo.ui.progress(_('changesets'), None)
319 repo.ui.progress(_('changesets'), None)
320 self.callback = None
320 self.callback = None
321
321
322 # pull off the manifest group
322 # pull off the manifest group
323 repo.ui.status(_("adding manifests\n"))
323 repo.ui.status(_("adding manifests\n"))
324 self._unpackmanifests(repo, revmap, trp, prog, changesets)
324 self._unpackmanifests(repo, revmap, trp, prog, changesets)
325
325
326 needfiles = {}
326 needfiles = {}
327 if repo.ui.configbool('server', 'validate'):
327 if repo.ui.configbool('server', 'validate'):
328 cl = repo.changelog
328 cl = repo.changelog
329 ml = repo.manifestlog
329 ml = repo.manifestlog
330 # validate incoming csets have their manifests
330 # validate incoming csets have their manifests
331 for cset in xrange(clstart, clend):
331 for cset in xrange(clstart, clend):
332 mfnode = cl.changelogrevision(cset).manifest
332 mfnode = cl.changelogrevision(cset).manifest
333 mfest = ml[mfnode].readdelta()
333 mfest = ml[mfnode].readdelta()
334 # store file cgnodes we must see
334 # store file cgnodes we must see
335 for f, n in mfest.iteritems():
335 for f, n in mfest.iteritems():
336 needfiles.setdefault(f, set()).add(n)
336 needfiles.setdefault(f, set()).add(n)
337
337
338 # process the files
338 # process the files
339 repo.ui.status(_("adding file changes\n"))
339 repo.ui.status(_("adding file changes\n"))
340 newrevs, newfiles = _addchangegroupfiles(
340 newrevs, newfiles = _addchangegroupfiles(
341 repo, self, revmap, trp, efiles, needfiles)
341 repo, self, revmap, trp, efiles, needfiles)
342 revisions += newrevs
342 revisions += newrevs
343 files += newfiles
343 files += newfiles
344
344
345 deltaheads = 0
345 deltaheads = 0
346 if oldheads:
346 if oldheads:
347 heads = cl.heads()
347 heads = cl.heads()
348 deltaheads = len(heads) - len(oldheads)
348 deltaheads = len(heads) - len(oldheads)
349 for h in heads:
349 for h in heads:
350 if h not in oldheads and repo[h].closesbranch():
350 if h not in oldheads and repo[h].closesbranch():
351 deltaheads -= 1
351 deltaheads -= 1
352 htext = ""
352 htext = ""
353 if deltaheads:
353 if deltaheads:
354 htext = _(" (%+d heads)") % deltaheads
354 htext = _(" (%+d heads)") % deltaheads
355
355
356 repo.ui.status(_("added %d changesets"
356 repo.ui.status(_("added %d changesets"
357 " with %d changes to %d files%s\n")
357 " with %d changes to %d files%s\n")
358 % (changesets, revisions, files, htext))
358 % (changesets, revisions, files, htext))
359 repo.invalidatevolatilesets()
359 repo.invalidatevolatilesets()
360
360
361 if changesets > 0:
361 if changesets > 0:
362 if 'node' not in tr.hookargs:
362 if 'node' not in tr.hookargs:
363 tr.hookargs['node'] = hex(cl.node(clstart))
363 tr.hookargs['node'] = hex(cl.node(clstart))
364 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
364 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
365 hookargs = dict(tr.hookargs)
365 hookargs = dict(tr.hookargs)
366 else:
366 else:
367 hookargs = dict(tr.hookargs)
367 hookargs = dict(tr.hookargs)
368 hookargs['node'] = hex(cl.node(clstart))
368 hookargs['node'] = hex(cl.node(clstart))
369 hookargs['node_last'] = hex(cl.node(clend - 1))
369 hookargs['node_last'] = hex(cl.node(clend - 1))
370 repo.hook('pretxnchangegroup',
370 repo.hook('pretxnchangegroup',
371 throw=True, **pycompat.strkwargs(hookargs))
371 throw=True, **pycompat.strkwargs(hookargs))
372
372
373 added = [cl.node(r) for r in xrange(clstart, clend)]
373 added = [cl.node(r) for r in xrange(clstart, clend)]
374 phaseall = None
374 phaseall = None
375 if srctype in ('push', 'serve'):
375 if srctype in ('push', 'serve'):
376 # Old servers can not push the boundary themselves.
376 # Old servers can not push the boundary themselves.
377 # New servers won't push the boundary if changeset already
377 # New servers won't push the boundary if changeset already
378 # exists locally as secret
378 # exists locally as secret
379 #
379 #
380 # We should not use added here but the list of all change in
380 # We should not use added here but the list of all change in
381 # the bundle
381 # the bundle
382 if repo.publishing():
382 if repo.publishing():
383 targetphase = phaseall = phases.public
383 targetphase = phaseall = phases.public
384 else:
384 else:
385 # closer target phase computation
385 # closer target phase computation
386
386
387 # Those changesets have been pushed from the
387 # Those changesets have been pushed from the
388 # outside, their phases are going to be pushed
388 # outside, their phases are going to be pushed
389 # alongside. Therefor `targetphase` is
389 # alongside. Therefor `targetphase` is
390 # ignored.
390 # ignored.
391 targetphase = phaseall = phases.draft
391 targetphase = phaseall = phases.draft
392 if added:
392 if added:
393 phases.registernew(repo, tr, targetphase, added)
393 phases.registernew(repo, tr, targetphase, added)
394 if phaseall is not None:
394 if phaseall is not None:
395 phases.advanceboundary(repo, tr, phaseall, cgnodes)
395 phases.advanceboundary(repo, tr, phaseall, cgnodes)
396
396
397 if changesets > 0:
397 if changesets > 0:
398
398
399 def runhooks():
399 def runhooks():
400 # These hooks run when the lock releases, not when the
400 # These hooks run when the lock releases, not when the
401 # transaction closes. So it's possible for the changelog
401 # transaction closes. So it's possible for the changelog
402 # to have changed since we last saw it.
402 # to have changed since we last saw it.
403 if clstart >= len(repo):
403 if clstart >= len(repo):
404 return
404 return
405
405
406 repo.hook("changegroup", **pycompat.strkwargs(hookargs))
406 repo.hook("changegroup", **pycompat.strkwargs(hookargs))
407
407
408 for n in added:
408 for n in added:
409 args = hookargs.copy()
409 args = hookargs.copy()
410 args['node'] = hex(n)
410 args['node'] = hex(n)
411 del args['node_last']
411 del args['node_last']
412 repo.hook("incoming", **pycompat.strkwargs(args))
412 repo.hook("incoming", **pycompat.strkwargs(args))
413
413
414 newheads = [h for h in repo.heads()
414 newheads = [h for h in repo.heads()
415 if h not in oldheads]
415 if h not in oldheads]
416 repo.ui.log("incoming",
416 repo.ui.log("incoming",
417 "%s incoming changes - new heads: %s\n",
417 "%s incoming changes - new heads: %s\n",
418 len(added),
418 len(added),
419 ', '.join([hex(c[:6]) for c in newheads]))
419 ', '.join([hex(c[:6]) for c in newheads]))
420
420
421 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
421 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
422 lambda tr: repo._afterlock(runhooks))
422 lambda tr: repo._afterlock(runhooks))
423 finally:
423 finally:
424 repo.ui.flush()
424 repo.ui.flush()
425 # never return 0 here:
425 # never return 0 here:
426 if deltaheads < 0:
426 if deltaheads < 0:
427 ret = deltaheads - 1
427 ret = deltaheads - 1
428 else:
428 else:
429 ret = deltaheads + 1
429 ret = deltaheads + 1
430 return ret
430 return ret
431
431
432 class cg2unpacker(cg1unpacker):
432 class cg2unpacker(cg1unpacker):
433 """Unpacker for cg2 streams.
433 """Unpacker for cg2 streams.
434
434
435 cg2 streams add support for generaldelta, so the delta header
435 cg2 streams add support for generaldelta, so the delta header
436 format is slightly different. All other features about the data
436 format is slightly different. All other features about the data
437 remain the same.
437 remain the same.
438 """
438 """
439 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
439 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
440 deltaheadersize = struct.calcsize(deltaheader)
440 deltaheadersize = struct.calcsize(deltaheader)
441 version = '02'
441 version = '02'
442
442
443 def _deltaheader(self, headertuple, prevnode):
443 def _deltaheader(self, headertuple, prevnode):
444 node, p1, p2, deltabase, cs = headertuple
444 node, p1, p2, deltabase, cs = headertuple
445 flags = 0
445 flags = 0
446 return node, p1, p2, deltabase, cs, flags
446 return node, p1, p2, deltabase, cs, flags
447
447
448 class cg3unpacker(cg2unpacker):
448 class cg3unpacker(cg2unpacker):
449 """Unpacker for cg3 streams.
449 """Unpacker for cg3 streams.
450
450
451 cg3 streams add support for exchanging treemanifests and revlog
451 cg3 streams add support for exchanging treemanifests and revlog
452 flags. It adds the revlog flags to the delta header and an empty chunk
452 flags. It adds the revlog flags to the delta header and an empty chunk
453 separating manifests and files.
453 separating manifests and files.
454 """
454 """
455 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
455 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
456 deltaheadersize = struct.calcsize(deltaheader)
456 deltaheadersize = struct.calcsize(deltaheader)
457 version = '03'
457 version = '03'
458 _grouplistcount = 2 # One list of manifests and one list of files
458 _grouplistcount = 2 # One list of manifests and one list of files
459
459
460 def _deltaheader(self, headertuple, prevnode):
460 def _deltaheader(self, headertuple, prevnode):
461 node, p1, p2, deltabase, cs, flags = headertuple
461 node, p1, p2, deltabase, cs, flags = headertuple
462 return node, p1, p2, deltabase, cs, flags
462 return node, p1, p2, deltabase, cs, flags
463
463
464 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
464 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
465 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
465 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
466 numchanges)
466 numchanges)
467 for chunkdata in iter(self.filelogheader, {}):
467 for chunkdata in iter(self.filelogheader, {}):
468 # If we get here, there are directory manifests in the changegroup
468 # If we get here, there are directory manifests in the changegroup
469 d = chunkdata["filename"]
469 d = chunkdata["filename"]
470 repo.ui.debug("adding %s revisions\n" % d)
470 repo.ui.debug("adding %s revisions\n" % d)
471 dirlog = repo.manifestlog._revlog.dirlog(d)
471 dirlog = repo.manifestlog._revlog.dirlog(d)
472 if not dirlog.addgroup(self, revmap, trp):
472 if not dirlog.addgroup(self, revmap, trp):
473 raise error.Abort(_("received dir revlog group is empty"))
473 raise error.Abort(_("received dir revlog group is empty"))
474
474
475 class headerlessfixup(object):
475 class headerlessfixup(object):
476 def __init__(self, fh, h):
476 def __init__(self, fh, h):
477 self._h = h
477 self._h = h
478 self._fh = fh
478 self._fh = fh
479 def read(self, n):
479 def read(self, n):
480 if self._h:
480 if self._h:
481 d, self._h = self._h[:n], self._h[n:]
481 d, self._h = self._h[:n], self._h[n:]
482 if len(d) < n:
482 if len(d) < n:
483 d += readexactly(self._fh, n - len(d))
483 d += readexactly(self._fh, n - len(d))
484 return d
484 return d
485 return readexactly(self._fh, n)
485 return readexactly(self._fh, n)
486
486
487 class cg1packer(object):
487 class cg1packer(object):
488 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
488 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
489 version = '01'
489 version = '01'
490 def __init__(self, repo, bundlecaps=None):
490 def __init__(self, repo, bundlecaps=None):
491 """Given a source repo, construct a bundler.
491 """Given a source repo, construct a bundler.
492
492
493 bundlecaps is optional and can be used to specify the set of
493 bundlecaps is optional and can be used to specify the set of
494 capabilities which can be used to build the bundle. While bundlecaps is
494 capabilities which can be used to build the bundle. While bundlecaps is
495 unused in core Mercurial, extensions rely on this feature to communicate
495 unused in core Mercurial, extensions rely on this feature to communicate
496 capabilities to customize the changegroup packer.
496 capabilities to customize the changegroup packer.
497 """
497 """
498 # Set of capabilities we can use to build the bundle.
498 # Set of capabilities we can use to build the bundle.
499 if bundlecaps is None:
499 if bundlecaps is None:
500 bundlecaps = set()
500 bundlecaps = set()
501 self._bundlecaps = bundlecaps
501 self._bundlecaps = bundlecaps
502 # experimental config: bundle.reorder
502 # experimental config: bundle.reorder
503 reorder = repo.ui.config('bundle', 'reorder')
503 reorder = repo.ui.config('bundle', 'reorder')
504 if reorder == 'auto':
504 if reorder == 'auto':
505 reorder = None
505 reorder = None
506 else:
506 else:
507 reorder = util.parsebool(reorder)
507 reorder = util.parsebool(reorder)
508 self._repo = repo
508 self._repo = repo
509 self._reorder = reorder
509 self._reorder = reorder
510 self._progress = repo.ui.progress
510 self._progress = repo.ui.progress
511 if self._repo.ui.verbose and not self._repo.ui.debugflag:
511 if self._repo.ui.verbose and not self._repo.ui.debugflag:
512 self._verbosenote = self._repo.ui.note
512 self._verbosenote = self._repo.ui.note
513 else:
513 else:
514 self._verbosenote = lambda s: None
514 self._verbosenote = lambda s: None
515
515
516 def close(self):
516 def close(self):
517 return closechunk()
517 return closechunk()
518
518
519 def fileheader(self, fname):
519 def fileheader(self, fname):
520 return chunkheader(len(fname)) + fname
520 return chunkheader(len(fname)) + fname
521
521
522 # Extracted both for clarity and for overriding in extensions.
522 # Extracted both for clarity and for overriding in extensions.
523 def _sortgroup(self, revlog, nodelist, lookup):
523 def _sortgroup(self, revlog, nodelist, lookup):
524 """Sort nodes for change group and turn them into revnums."""
524 """Sort nodes for change group and turn them into revnums."""
525 # for generaldelta revlogs, we linearize the revs; this will both be
525 # for generaldelta revlogs, we linearize the revs; this will both be
526 # much quicker and generate a much smaller bundle
526 # much quicker and generate a much smaller bundle
527 if (revlog._generaldelta and self._reorder is None) or self._reorder:
527 if (revlog._generaldelta and self._reorder is None) or self._reorder:
528 dag = dagutil.revlogdag(revlog)
528 dag = dagutil.revlogdag(revlog)
529 return dag.linearize(set(revlog.rev(n) for n in nodelist))
529 return dag.linearize(set(revlog.rev(n) for n in nodelist))
530 else:
530 else:
531 return sorted([revlog.rev(n) for n in nodelist])
531 return sorted([revlog.rev(n) for n in nodelist])
532
532
533 def group(self, nodelist, revlog, lookup, units=None):
533 def group(self, nodelist, revlog, lookup, units=None):
534 """Calculate a delta group, yielding a sequence of changegroup chunks
534 """Calculate a delta group, yielding a sequence of changegroup chunks
535 (strings).
535 (strings).
536
536
537 Given a list of changeset revs, return a set of deltas and
537 Given a list of changeset revs, return a set of deltas and
538 metadata corresponding to nodes. The first delta is
538 metadata corresponding to nodes. The first delta is
539 first parent(nodelist[0]) -> nodelist[0], the receiver is
539 first parent(nodelist[0]) -> nodelist[0], the receiver is
540 guaranteed to have this parent as it has all history before
540 guaranteed to have this parent as it has all history before
541 these changesets. In the case firstparent is nullrev the
541 these changesets. In the case firstparent is nullrev the
542 changegroup starts with a full revision.
542 changegroup starts with a full revision.
543
543
544 If units is not None, progress detail will be generated, units specifies
544 If units is not None, progress detail will be generated, units specifies
545 the type of revlog that is touched (changelog, manifest, etc.).
545 the type of revlog that is touched (changelog, manifest, etc.).
546 """
546 """
547 # if we don't have any revisions touched by these changesets, bail
547 # if we don't have any revisions touched by these changesets, bail
548 if len(nodelist) == 0:
548 if len(nodelist) == 0:
549 yield self.close()
549 yield self.close()
550 return
550 return
551
551
552 revs = self._sortgroup(revlog, nodelist, lookup)
552 revs = self._sortgroup(revlog, nodelist, lookup)
553
553
554 # add the parent of the first rev
554 # add the parent of the first rev
555 p = revlog.parentrevs(revs[0])[0]
555 p = revlog.parentrevs(revs[0])[0]
556 revs.insert(0, p)
556 revs.insert(0, p)
557
557
558 # build deltas
558 # build deltas
559 total = len(revs) - 1
559 total = len(revs) - 1
560 msgbundling = _('bundling')
560 msgbundling = _('bundling')
561 for r in xrange(len(revs) - 1):
561 for r in xrange(len(revs) - 1):
562 if units is not None:
562 if units is not None:
563 self._progress(msgbundling, r + 1, unit=units, total=total)
563 self._progress(msgbundling, r + 1, unit=units, total=total)
564 prev, curr = revs[r], revs[r + 1]
564 prev, curr = revs[r], revs[r + 1]
565 linknode = lookup(revlog.node(curr))
565 linknode = lookup(revlog.node(curr))
566 for c in self.revchunk(revlog, curr, prev, linknode):
566 for c in self.revchunk(revlog, curr, prev, linknode):
567 yield c
567 yield c
568
568
569 if units is not None:
569 if units is not None:
570 self._progress(msgbundling, None)
570 self._progress(msgbundling, None)
571 yield self.close()
571 yield self.close()
572
572
573 # filter any nodes that claim to be part of the known set
573 # filter any nodes that claim to be part of the known set
574 def prune(self, revlog, missing, commonrevs):
574 def prune(self, revlog, missing, commonrevs):
575 rr, rl = revlog.rev, revlog.linkrev
575 rr, rl = revlog.rev, revlog.linkrev
576 return [n for n in missing if rl(rr(n)) not in commonrevs]
576 return [n for n in missing if rl(rr(n)) not in commonrevs]
577
577
578 def _packmanifests(self, dir, mfnodes, lookuplinknode):
578 def _packmanifests(self, dir, mfnodes, lookuplinknode):
579 """Pack flat manifests into a changegroup stream."""
579 """Pack flat manifests into a changegroup stream."""
580 assert not dir
580 assert not dir
581 for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
581 for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
582 lookuplinknode, units=_('manifests')):
582 lookuplinknode, units=_('manifests')):
583 yield chunk
583 yield chunk
584
584
585 def _manifestsdone(self):
585 def _manifestsdone(self):
586 return ''
586 return ''
587
587
588 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
588 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
589 '''yield a sequence of changegroup chunks (strings)'''
589 '''yield a sequence of changegroup chunks (strings)'''
590 repo = self._repo
590 repo = self._repo
591 cl = repo.changelog
591 cl = repo.changelog
592
592
593 clrevorder = {}
593 clrevorder = {}
594 mfs = {} # needed manifests
594 mfs = {} # needed manifests
595 fnodes = {} # needed file nodes
595 fnodes = {} # needed file nodes
596 changedfiles = set()
596 changedfiles = set()
597
597
598 # Callback for the changelog, used to collect changed files and manifest
598 # Callback for the changelog, used to collect changed files and manifest
599 # nodes.
599 # nodes.
600 # Returns the linkrev node (identity in the changelog case).
600 # Returns the linkrev node (identity in the changelog case).
601 def lookupcl(x):
601 def lookupcl(x):
602 c = cl.read(x)
602 c = cl.read(x)
603 clrevorder[x] = len(clrevorder)
603 clrevorder[x] = len(clrevorder)
604 n = c[0]
604 n = c[0]
605 # record the first changeset introducing this manifest version
605 # record the first changeset introducing this manifest version
606 mfs.setdefault(n, x)
606 mfs.setdefault(n, x)
607 # Record a complete list of potentially-changed files in
607 # Record a complete list of potentially-changed files in
608 # this manifest.
608 # this manifest.
609 changedfiles.update(c[3])
609 changedfiles.update(c[3])
610 return x
610 return x
611
611
612 self._verbosenote(_('uncompressed size of bundle content:\n'))
612 self._verbosenote(_('uncompressed size of bundle content:\n'))
613 size = 0
613 size = 0
614 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
614 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
615 size += len(chunk)
615 size += len(chunk)
616 yield chunk
616 yield chunk
617 self._verbosenote(_('%8.i (changelog)\n') % size)
617 self._verbosenote(_('%8.i (changelog)\n') % size)
618
618
619 # We need to make sure that the linkrev in the changegroup refers to
619 # We need to make sure that the linkrev in the changegroup refers to
620 # the first changeset that introduced the manifest or file revision.
620 # the first changeset that introduced the manifest or file revision.
621 # The fastpath is usually safer than the slowpath, because the filelogs
621 # The fastpath is usually safer than the slowpath, because the filelogs
622 # are walked in revlog order.
622 # are walked in revlog order.
623 #
623 #
624 # When taking the slowpath with reorder=None and the manifest revlog
624 # When taking the slowpath with reorder=None and the manifest revlog
625 # uses generaldelta, the manifest may be walked in the "wrong" order.
625 # uses generaldelta, the manifest may be walked in the "wrong" order.
626 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
626 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
627 # cc0ff93d0c0c).
627 # cc0ff93d0c0c).
628 #
628 #
629 # When taking the fastpath, we are only vulnerable to reordering
629 # When taking the fastpath, we are only vulnerable to reordering
630 # of the changelog itself. The changelog never uses generaldelta, so
630 # of the changelog itself. The changelog never uses generaldelta, so
631 # it is only reordered when reorder=True. To handle this case, we
631 # it is only reordered when reorder=True. To handle this case, we
632 # simply take the slowpath, which already has the 'clrevorder' logic.
632 # simply take the slowpath, which already has the 'clrevorder' logic.
633 # This was also fixed in cc0ff93d0c0c.
633 # This was also fixed in cc0ff93d0c0c.
634 fastpathlinkrev = fastpathlinkrev and not self._reorder
634 fastpathlinkrev = fastpathlinkrev and not self._reorder
635 # Treemanifests don't work correctly with fastpathlinkrev
635 # Treemanifests don't work correctly with fastpathlinkrev
636 # either, because we don't discover which directory nodes to
636 # either, because we don't discover which directory nodes to
637 # send along with files. This could probably be fixed.
637 # send along with files. This could probably be fixed.
638 fastpathlinkrev = fastpathlinkrev and (
638 fastpathlinkrev = fastpathlinkrev and (
639 'treemanifest' not in repo.requirements)
639 'treemanifest' not in repo.requirements)
640
640
641 for chunk in self.generatemanifests(commonrevs, clrevorder,
641 for chunk in self.generatemanifests(commonrevs, clrevorder,
642 fastpathlinkrev, mfs, fnodes):
642 fastpathlinkrev, mfs, fnodes):
643 yield chunk
643 yield chunk
644 mfs.clear()
644 mfs.clear()
645 clrevs = set(cl.rev(x) for x in clnodes)
645 clrevs = set(cl.rev(x) for x in clnodes)
646
646
647 if not fastpathlinkrev:
647 if not fastpathlinkrev:
648 def linknodes(unused, fname):
648 def linknodes(unused, fname):
649 return fnodes.get(fname, {})
649 return fnodes.get(fname, {})
650 else:
650 else:
651 cln = cl.node
651 cln = cl.node
652 def linknodes(filerevlog, fname):
652 def linknodes(filerevlog, fname):
653 llr = filerevlog.linkrev
653 llr = filerevlog.linkrev
654 fln = filerevlog.node
654 fln = filerevlog.node
655 revs = ((r, llr(r)) for r in filerevlog)
655 revs = ((r, llr(r)) for r in filerevlog)
656 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
656 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
657
657
658 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
658 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
659 source):
659 source):
660 yield chunk
660 yield chunk
661
661
662 yield self.close()
662 yield self.close()
663
663
664 if clnodes:
664 if clnodes:
665 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
665 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
666
666
667 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
667 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
668 fnodes):
668 fnodes):
669 repo = self._repo
669 repo = self._repo
670 mfl = repo.manifestlog
670 mfl = repo.manifestlog
671 dirlog = mfl._revlog.dirlog
671 dirlog = mfl._revlog.dirlog
672 tmfnodes = {'': mfs}
672 tmfnodes = {'': mfs}
673
673
674 # Callback for the manifest, used to collect linkrevs for filelog
674 # Callback for the manifest, used to collect linkrevs for filelog
675 # revisions.
675 # revisions.
676 # Returns the linkrev node (collected in lookupcl).
676 # Returns the linkrev node (collected in lookupcl).
677 def makelookupmflinknode(dir):
677 def makelookupmflinknode(dir):
678 if fastpathlinkrev:
678 if fastpathlinkrev:
679 assert not dir
679 assert not dir
680 return mfs.__getitem__
680 return mfs.__getitem__
681
681
682 def lookupmflinknode(x):
682 def lookupmflinknode(x):
683 """Callback for looking up the linknode for manifests.
683 """Callback for looking up the linknode for manifests.
684
684
685 Returns the linkrev node for the specified manifest.
685 Returns the linkrev node for the specified manifest.
686
686
687 SIDE EFFECT:
687 SIDE EFFECT:
688
688
689 1) fclnodes gets populated with the list of relevant
689 1) fclnodes gets populated with the list of relevant
690 file nodes if we're not using fastpathlinkrev
690 file nodes if we're not using fastpathlinkrev
691 2) When treemanifests are in use, collects treemanifest nodes
691 2) When treemanifests are in use, collects treemanifest nodes
692 to send
692 to send
693
693
694 Note that this means manifests must be completely sent to
694 Note that this means manifests must be completely sent to
695 the client before you can trust the list of files and
695 the client before you can trust the list of files and
696 treemanifests to send.
696 treemanifests to send.
697 """
697 """
698 clnode = tmfnodes[dir][x]
698 clnode = tmfnodes[dir][x]
699 mdata = mfl.get(dir, x).readfast(shallow=True)
699 mdata = mfl.get(dir, x).readfast(shallow=True)
700 for p, n, fl in mdata.iterentries():
700 for p, n, fl in mdata.iterentries():
701 if fl == 't': # subdirectory manifest
701 if fl == 't': # subdirectory manifest
702 subdir = dir + p + '/'
702 subdir = dir + p + '/'
703 tmfclnodes = tmfnodes.setdefault(subdir, {})
703 tmfclnodes = tmfnodes.setdefault(subdir, {})
704 tmfclnode = tmfclnodes.setdefault(n, clnode)
704 tmfclnode = tmfclnodes.setdefault(n, clnode)
705 if clrevorder[clnode] < clrevorder[tmfclnode]:
705 if clrevorder[clnode] < clrevorder[tmfclnode]:
706 tmfclnodes[n] = clnode
706 tmfclnodes[n] = clnode
707 else:
707 else:
708 f = dir + p
708 f = dir + p
709 fclnodes = fnodes.setdefault(f, {})
709 fclnodes = fnodes.setdefault(f, {})
710 fclnode = fclnodes.setdefault(n, clnode)
710 fclnode = fclnodes.setdefault(n, clnode)
711 if clrevorder[clnode] < clrevorder[fclnode]:
711 if clrevorder[clnode] < clrevorder[fclnode]:
712 fclnodes[n] = clnode
712 fclnodes[n] = clnode
713 return clnode
713 return clnode
714 return lookupmflinknode
714 return lookupmflinknode
715
715
716 size = 0
716 size = 0
717 while tmfnodes:
717 while tmfnodes:
718 dir = min(tmfnodes)
718 dir = min(tmfnodes)
719 nodes = tmfnodes[dir]
719 nodes = tmfnodes[dir]
720 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
720 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
721 if not dir or prunednodes:
721 if not dir or prunednodes:
722 for x in self._packmanifests(dir, prunednodes,
722 for x in self._packmanifests(dir, prunednodes,
723 makelookupmflinknode(dir)):
723 makelookupmflinknode(dir)):
724 size += len(x)
724 size += len(x)
725 yield x
725 yield x
726 del tmfnodes[dir]
726 del tmfnodes[dir]
727 self._verbosenote(_('%8.i (manifests)\n') % size)
727 self._verbosenote(_('%8.i (manifests)\n') % size)
728 yield self._manifestsdone()
728 yield self._manifestsdone()
729
729
730 # The 'source' parameter is useful for extensions
730 # The 'source' parameter is useful for extensions
731 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
731 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
732 repo = self._repo
732 repo = self._repo
733 progress = self._progress
733 progress = self._progress
734 msgbundling = _('bundling')
734 msgbundling = _('bundling')
735
735
736 total = len(changedfiles)
736 total = len(changedfiles)
737 # for progress output
737 # for progress output
738 msgfiles = _('files')
738 msgfiles = _('files')
739 for i, fname in enumerate(sorted(changedfiles)):
739 for i, fname in enumerate(sorted(changedfiles)):
740 filerevlog = repo.file(fname)
740 filerevlog = repo.file(fname)
741 if not filerevlog:
741 if not filerevlog:
742 raise error.Abort(_("empty or missing revlog for %s") % fname)
742 raise error.Abort(_("empty or missing revlog for %s") % fname)
743
743
744 linkrevnodes = linknodes(filerevlog, fname)
744 linkrevnodes = linknodes(filerevlog, fname)
745 # Lookup for filenodes, we collected the linkrev nodes above in the
745 # Lookup for filenodes, we collected the linkrev nodes above in the
746 # fastpath case and with lookupmf in the slowpath case.
746 # fastpath case and with lookupmf in the slowpath case.
747 def lookupfilelog(x):
747 def lookupfilelog(x):
748 return linkrevnodes[x]
748 return linkrevnodes[x]
749
749
750 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
750 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
751 if filenodes:
751 if filenodes:
752 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
752 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
753 total=total)
753 total=total)
754 h = self.fileheader(fname)
754 h = self.fileheader(fname)
755 size = len(h)
755 size = len(h)
756 yield h
756 yield h
757 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
757 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
758 size += len(chunk)
758 size += len(chunk)
759 yield chunk
759 yield chunk
760 self._verbosenote(_('%8.i %s\n') % (size, fname))
760 self._verbosenote(_('%8.i %s\n') % (size, fname))
761 progress(msgbundling, None)
761 progress(msgbundling, None)
762
762
763 def deltaparent(self, revlog, rev, p1, p2, prev):
763 def deltaparent(self, revlog, rev, p1, p2, prev):
764 return prev
764 return prev
765
765
766 def revchunk(self, revlog, rev, prev, linknode):
766 def revchunk(self, revlog, rev, prev, linknode):
767 node = revlog.node(rev)
767 node = revlog.node(rev)
768 p1, p2 = revlog.parentrevs(rev)
768 p1, p2 = revlog.parentrevs(rev)
769 base = self.deltaparent(revlog, rev, p1, p2, prev)
769 base = self.deltaparent(revlog, rev, p1, p2, prev)
770
770
771 prefix = ''
771 prefix = ''
772 if revlog.iscensored(base) or revlog.iscensored(rev):
772 if revlog.iscensored(base) or revlog.iscensored(rev):
773 try:
773 try:
774 delta = revlog.revision(node, raw=True)
774 delta = revlog.revision(node, raw=True)
775 except error.CensoredNodeError as e:
775 except error.CensoredNodeError as e:
776 delta = e.tombstone
776 delta = e.tombstone
777 if base == nullrev:
777 if base == nullrev:
778 prefix = mdiff.trivialdiffheader(len(delta))
778 prefix = mdiff.trivialdiffheader(len(delta))
779 else:
779 else:
780 baselen = revlog.rawsize(base)
780 baselen = revlog.rawsize(base)
781 prefix = mdiff.replacediffheader(baselen, len(delta))
781 prefix = mdiff.replacediffheader(baselen, len(delta))
782 elif base == nullrev:
782 elif base == nullrev:
783 delta = revlog.revision(node, raw=True)
783 delta = revlog.revision(node, raw=True)
784 prefix = mdiff.trivialdiffheader(len(delta))
784 prefix = mdiff.trivialdiffheader(len(delta))
785 else:
785 else:
786 delta = revlog.revdiff(base, rev)
786 delta = revlog.revdiff(base, rev)
787 p1n, p2n = revlog.parents(node)
787 p1n, p2n = revlog.parents(node)
788 basenode = revlog.node(base)
788 basenode = revlog.node(base)
789 flags = revlog.flags(rev)
789 flags = revlog.flags(rev)
790 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
790 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
791 meta += prefix
791 meta += prefix
792 l = len(meta) + len(delta)
792 l = len(meta) + len(delta)
793 yield chunkheader(l)
793 yield chunkheader(l)
794 yield meta
794 yield meta
795 yield delta
795 yield delta
796 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
796 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
797 # do nothing with basenode, it is implicitly the previous one in HG10
797 # do nothing with basenode, it is implicitly the previous one in HG10
798 # do nothing with flags, it is implicitly 0 for cg1 and cg2
798 # do nothing with flags, it is implicitly 0 for cg1 and cg2
799 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
799 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
800
800
801 class cg2packer(cg1packer):
801 class cg2packer(cg1packer):
802 version = '02'
802 version = '02'
803 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
803 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
804
804
805 def __init__(self, repo, bundlecaps=None):
805 def __init__(self, repo, bundlecaps=None):
806 super(cg2packer, self).__init__(repo, bundlecaps)
806 super(cg2packer, self).__init__(repo, bundlecaps)
807 if self._reorder is None:
807 if self._reorder is None:
808 # Since generaldelta is directly supported by cg2, reordering
808 # Since generaldelta is directly supported by cg2, reordering
809 # generally doesn't help, so we disable it by default (treating
809 # generally doesn't help, so we disable it by default (treating
810 # bundle.reorder=auto just like bundle.reorder=False).
810 # bundle.reorder=auto just like bundle.reorder=False).
811 self._reorder = False
811 self._reorder = False
812
812
813 def deltaparent(self, revlog, rev, p1, p2, prev):
813 def deltaparent(self, revlog, rev, p1, p2, prev):
814 dp = revlog.deltaparent(rev)
814 dp = revlog.deltaparent(rev)
815 if dp == nullrev and revlog.storedeltachains:
815 if dp == nullrev and revlog.storedeltachains:
816 # Avoid sending full revisions when delta parent is null. Pick prev
816 # Avoid sending full revisions when delta parent is null. Pick prev
817 # in that case. It's tempting to pick p1 in this case, as p1 will
817 # in that case. It's tempting to pick p1 in this case, as p1 will
818 # be smaller in the common case. However, computing a delta against
818 # be smaller in the common case. However, computing a delta against
819 # p1 may require resolving the raw text of p1, which could be
819 # p1 may require resolving the raw text of p1, which could be
820 # expensive. The revlog caches should have prev cached, meaning
820 # expensive. The revlog caches should have prev cached, meaning
821 # less CPU for changegroup generation. There is likely room to add
821 # less CPU for changegroup generation. There is likely room to add
822 # a flag and/or config option to control this behavior.
822 # a flag and/or config option to control this behavior.
823 return prev
823 return prev
824 elif dp == nullrev:
824 elif dp == nullrev:
825 # revlog is configured to use full snapshot for a reason,
825 # revlog is configured to use full snapshot for a reason,
826 # stick to full snapshot.
826 # stick to full snapshot.
827 return nullrev
827 return nullrev
828 elif dp not in (p1, p2, prev):
828 elif dp not in (p1, p2, prev):
829 # Pick prev when we can't be sure remote has the base revision.
829 # Pick prev when we can't be sure remote has the base revision.
830 return prev
830 return prev
831 else:
831 else:
832 return dp
832 return dp
833
833
834 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
834 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
835 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
835 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
836 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
836 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
837
837
838 class cg3packer(cg2packer):
838 class cg3packer(cg2packer):
839 version = '03'
839 version = '03'
840 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
840 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
841
841
842 def _packmanifests(self, dir, mfnodes, lookuplinknode):
842 def _packmanifests(self, dir, mfnodes, lookuplinknode):
843 if dir:
843 if dir:
844 yield self.fileheader(dir)
844 yield self.fileheader(dir)
845
845
846 dirlog = self._repo.manifestlog._revlog.dirlog(dir)
846 dirlog = self._repo.manifestlog._revlog.dirlog(dir)
847 for chunk in self.group(mfnodes, dirlog, lookuplinknode,
847 for chunk in self.group(mfnodes, dirlog, lookuplinknode,
848 units=_('manifests')):
848 units=_('manifests')):
849 yield chunk
849 yield chunk
850
850
851 def _manifestsdone(self):
851 def _manifestsdone(self):
852 return self.close()
852 return self.close()
853
853
854 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
854 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
855 return struct.pack(
855 return struct.pack(
856 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
856 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
857
857
858 _packermap = {'01': (cg1packer, cg1unpacker),
858 _packermap = {'01': (cg1packer, cg1unpacker),
859 # cg2 adds support for exchanging generaldelta
859 # cg2 adds support for exchanging generaldelta
860 '02': (cg2packer, cg2unpacker),
860 '02': (cg2packer, cg2unpacker),
861 # cg3 adds support for exchanging revlog flags and treemanifests
861 # cg3 adds support for exchanging revlog flags and treemanifests
862 '03': (cg3packer, cg3unpacker),
862 '03': (cg3packer, cg3unpacker),
863 }
863 }
864
864
865 def allsupportedversions(repo):
865 def allsupportedversions(repo):
866 versions = set(_packermap.keys())
866 versions = set(_packermap.keys())
867 if not (repo.ui.configbool('experimental', 'changegroup3') or
867 if not (repo.ui.configbool('experimental', 'changegroup3') or
868 repo.ui.configbool('experimental', 'treemanifest') or
868 repo.ui.configbool('experimental', 'treemanifest') or
869 'treemanifest' in repo.requirements):
869 'treemanifest' in repo.requirements):
870 versions.discard('03')
870 versions.discard('03')
871 return versions
871 return versions
872
872
873 # Changegroup versions that can be applied to the repo
873 # Changegroup versions that can be applied to the repo
874 def supportedincomingversions(repo):
874 def supportedincomingversions(repo):
875 return allsupportedversions(repo)
875 return allsupportedversions(repo)
876
876
877 # Changegroup versions that can be created from the repo
877 # Changegroup versions that can be created from the repo
878 def supportedoutgoingversions(repo):
878 def supportedoutgoingversions(repo):
879 versions = allsupportedversions(repo)
879 versions = allsupportedversions(repo)
880 if 'treemanifest' in repo.requirements:
880 if 'treemanifest' in repo.requirements:
881 # Versions 01 and 02 support only flat manifests and it's just too
881 # Versions 01 and 02 support only flat manifests and it's just too
882 # expensive to convert between the flat manifest and tree manifest on
882 # expensive to convert between the flat manifest and tree manifest on
883 # the fly. Since tree manifests are hashed differently, all of history
883 # the fly. Since tree manifests are hashed differently, all of history
884 # would have to be converted. Instead, we simply don't even pretend to
884 # would have to be converted. Instead, we simply don't even pretend to
885 # support versions 01 and 02.
885 # support versions 01 and 02.
886 versions.discard('01')
886 versions.discard('01')
887 versions.discard('02')
887 versions.discard('02')
888 return versions
888 return versions
889
889
890 def safeversion(repo):
890 def safeversion(repo):
891 # Finds the smallest version that it's safe to assume clients of the repo
891 # Finds the smallest version that it's safe to assume clients of the repo
892 # will support. For example, all hg versions that support generaldelta also
892 # will support. For example, all hg versions that support generaldelta also
893 # support changegroup 02.
893 # support changegroup 02.
894 versions = supportedoutgoingversions(repo)
894 versions = supportedoutgoingversions(repo)
895 if 'generaldelta' in repo.requirements:
895 if 'generaldelta' in repo.requirements:
896 versions.discard('01')
896 versions.discard('01')
897 assert versions
897 assert versions
898 return min(versions)
898 return min(versions)
899
899
900 def getbundler(version, repo, bundlecaps=None):
900 def getbundler(version, repo, bundlecaps=None):
901 assert version in supportedoutgoingversions(repo)
901 assert version in supportedoutgoingversions(repo)
902 return _packermap[version][0](repo, bundlecaps)
902 return _packermap[version][0](repo, bundlecaps)
903
903
904 def getunbundler(version, fh, alg, extras=None):
904 def getunbundler(version, fh, alg, extras=None):
905 return _packermap[version][1](fh, alg, extras=extras)
905 return _packermap[version][1](fh, alg, extras=extras)
906
906
907 def _changegroupinfo(repo, nodes, source):
907 def _changegroupinfo(repo, nodes, source):
908 if repo.ui.verbose or source == 'bundle':
908 if repo.ui.verbose or source == 'bundle':
909 repo.ui.status(_("%d changesets found\n") % len(nodes))
909 repo.ui.status(_("%d changesets found\n") % len(nodes))
910 if repo.ui.debugflag:
910 if repo.ui.debugflag:
911 repo.ui.debug("list of changesets:\n")
911 repo.ui.debug("list of changesets:\n")
912 for node in nodes:
912 for node in nodes:
913 repo.ui.debug("%s\n" % hex(node))
913 repo.ui.debug("%s\n" % hex(node))
914
914
915 def makestream(repo, outgoing, version, source, fastpath=False,
915 def makestream(repo, outgoing, version, source, fastpath=False,
916 bundlecaps=None):
916 bundlecaps=None):
917 bundler = getbundler(version, repo, bundlecaps=bundlecaps)
917 bundler = getbundler(version, repo, bundlecaps=bundlecaps)
918 return getsubsetraw(repo, outgoing, bundler, source, fastpath=fastpath)
918 return getsubsetraw(repo, outgoing, bundler, source, fastpath=fastpath)
919
919
920 def makechangegroup(repo, outgoing, version, source, fastpath=False,
920 def makechangegroup(repo, outgoing, version, source, fastpath=False,
921 bundlecaps=None):
921 bundlecaps=None):
922 cgstream = makestream(repo, outgoing, version, source,
922 cgstream = makestream(repo, outgoing, version, source,
923 fastpath=fastpath, bundlecaps=bundlecaps)
923 fastpath=fastpath, bundlecaps=bundlecaps)
924 return getunbundler(version, util.chunkbuffer(cgstream), None,
924 return getunbundler(version, util.chunkbuffer(cgstream), None,
925 {'clcount': len(outgoing.missing) })
925 {'clcount': len(outgoing.missing) })
926
926
927 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
927 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
928 repo = repo.unfiltered()
928 repo = repo.unfiltered()
929 commonrevs = outgoing.common
929 commonrevs = outgoing.common
930 csets = outgoing.missing
930 csets = outgoing.missing
931 heads = outgoing.missingheads
931 heads = outgoing.missingheads
932 # We go through the fast path if we get told to, or if all (unfiltered
932 # We go through the fast path if we get told to, or if all (unfiltered
933 # heads have been requested (since we then know there all linkrevs will
933 # heads have been requested (since we then know there all linkrevs will
934 # be pulled by the client).
934 # be pulled by the client).
935 heads.sort()
935 heads.sort()
936 fastpathlinkrev = fastpath or (
936 fastpathlinkrev = fastpath or (
937 repo.filtername is None and heads == sorted(repo.heads()))
937 repo.filtername is None and heads == sorted(repo.heads()))
938
938
939 repo.hook('preoutgoing', throw=True, source=source)
939 repo.hook('preoutgoing', throw=True, source=source)
940 _changegroupinfo(repo, csets, source)
940 _changegroupinfo(repo, csets, source)
941 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
941 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
942
942
943 def changegroupsubset(repo, roots, heads, source, version='01'):
944 """Compute a changegroup consisting of all the nodes that are
945 descendants of any of the roots and ancestors of any of the heads.
946 Return a chunkbuffer object whose read() method will return
947 successive changegroup chunks.
948
949 It is fairly complex as determining which filenodes and which
950 manifest nodes need to be included for the changeset to be complete
951 is non-trivial.
952
953 Another wrinkle is doing the reverse, figuring out which changeset in
954 the changegroup a particular filenode or manifestnode belongs to.
955 """
956 outgoing = discovery.outgoing(repo, missingroots=roots, missingheads=heads)
957 return makechangegroup(repo, outgoing, version, source)
958
959 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
943 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
960 version='01'):
944 version='01'):
961 """Like getbundle, but taking a discovery.outgoing as an argument.
945 """Like getbundle, but taking a discovery.outgoing as an argument.
962
946
963 This is only implemented for local repos and reuses potentially
947 This is only implemented for local repos and reuses potentially
964 precomputed sets in outgoing. Returns a raw changegroup generator."""
948 precomputed sets in outgoing. Returns a raw changegroup generator."""
965 if not outgoing.missing:
949 if not outgoing.missing:
966 return None
950 return None
967 bundler = getbundler(version, repo, bundlecaps)
951 bundler = getbundler(version, repo, bundlecaps)
968 return getsubsetraw(repo, outgoing, bundler, source)
952 return getsubsetraw(repo, outgoing, bundler, source)
969
953
970 def getchangegroup(repo, source, outgoing, bundlecaps=None,
954 def getchangegroup(repo, source, outgoing, bundlecaps=None,
971 version='01'):
955 version='01'):
972 """Like getbundle, but taking a discovery.outgoing as an argument.
956 """Like getbundle, but taking a discovery.outgoing as an argument.
973
957
974 This is only implemented for local repos and reuses potentially
958 This is only implemented for local repos and reuses potentially
975 precomputed sets in outgoing."""
959 precomputed sets in outgoing."""
976 if not outgoing.missing:
960 if not outgoing.missing:
977 return None
961 return None
978 return makechangegroup(repo, outgoing, version, source,
962 return makechangegroup(repo, outgoing, version, source,
979 bundlecaps=bundlecaps)
963 bundlecaps=bundlecaps)
980
964
981 def getlocalchangegroup(repo, *args, **kwargs):
965 def getlocalchangegroup(repo, *args, **kwargs):
982 repo.ui.deprecwarn('getlocalchangegroup is deprecated, use getchangegroup',
966 repo.ui.deprecwarn('getlocalchangegroup is deprecated, use getchangegroup',
983 '4.3')
967 '4.3')
984 return getchangegroup(repo, *args, **kwargs)
968 return getchangegroup(repo, *args, **kwargs)
985
969
986 def changegroup(repo, basenodes, source):
970 def changegroup(repo, basenodes, source):
987 # to avoid a race we use changegroupsubset() (issue1320)
971 # to avoid a race we use changegroupsubset() (issue1320)
988 return changegroupsubset(repo, basenodes, repo.heads(), source)
972 outgoing = discovery.outgoing(repo, missingroots=basenodes,
973 missingheads=repo.heads())
974 return makechangegroup(repo, outgoing, '01', source)
989
975
990 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
976 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
991 revisions = 0
977 revisions = 0
992 files = 0
978 files = 0
993 for chunkdata in iter(source.filelogheader, {}):
979 for chunkdata in iter(source.filelogheader, {}):
994 files += 1
980 files += 1
995 f = chunkdata["filename"]
981 f = chunkdata["filename"]
996 repo.ui.debug("adding %s revisions\n" % f)
982 repo.ui.debug("adding %s revisions\n" % f)
997 repo.ui.progress(_('files'), files, unit=_('files'),
983 repo.ui.progress(_('files'), files, unit=_('files'),
998 total=expectedfiles)
984 total=expectedfiles)
999 fl = repo.file(f)
985 fl = repo.file(f)
1000 o = len(fl)
986 o = len(fl)
1001 try:
987 try:
1002 if not fl.addgroup(source, revmap, trp):
988 if not fl.addgroup(source, revmap, trp):
1003 raise error.Abort(_("received file revlog group is empty"))
989 raise error.Abort(_("received file revlog group is empty"))
1004 except error.CensoredBaseError as e:
990 except error.CensoredBaseError as e:
1005 raise error.Abort(_("received delta base is censored: %s") % e)
991 raise error.Abort(_("received delta base is censored: %s") % e)
1006 revisions += len(fl) - o
992 revisions += len(fl) - o
1007 if f in needfiles:
993 if f in needfiles:
1008 needs = needfiles[f]
994 needs = needfiles[f]
1009 for new in xrange(o, len(fl)):
995 for new in xrange(o, len(fl)):
1010 n = fl.node(new)
996 n = fl.node(new)
1011 if n in needs:
997 if n in needs:
1012 needs.remove(n)
998 needs.remove(n)
1013 else:
999 else:
1014 raise error.Abort(
1000 raise error.Abort(
1015 _("received spurious file revlog entry"))
1001 _("received spurious file revlog entry"))
1016 if not needs:
1002 if not needs:
1017 del needfiles[f]
1003 del needfiles[f]
1018 repo.ui.progress(_('files'), None)
1004 repo.ui.progress(_('files'), None)
1019
1005
1020 for f, needs in needfiles.iteritems():
1006 for f, needs in needfiles.iteritems():
1021 fl = repo.file(f)
1007 fl = repo.file(f)
1022 for n in needs:
1008 for n in needs:
1023 try:
1009 try:
1024 fl.rev(n)
1010 fl.rev(n)
1025 except error.LookupError:
1011 except error.LookupError:
1026 raise error.Abort(
1012 raise error.Abort(
1027 _('missing file data for %s:%s - run hg verify') %
1013 _('missing file data for %s:%s - run hg verify') %
1028 (f, hex(n)))
1014 (f, hex(n)))
1029
1015
1030 return revisions, files
1016 return revisions, files
@@ -1,2294 +1,2297
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 color,
30 color,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 discovery,
34 encoding,
35 encoding,
35 error,
36 error,
36 exchange,
37 exchange,
37 extensions,
38 extensions,
38 filelog,
39 filelog,
39 hook,
40 hook,
40 lock as lockmod,
41 lock as lockmod,
41 manifest,
42 manifest,
42 match as matchmod,
43 match as matchmod,
43 merge as mergemod,
44 merge as mergemod,
44 mergeutil,
45 mergeutil,
45 namespaces,
46 namespaces,
46 obsolete,
47 obsolete,
47 pathutil,
48 pathutil,
48 peer,
49 peer,
49 phases,
50 phases,
50 pushkey,
51 pushkey,
51 pycompat,
52 pycompat,
52 repository,
53 repository,
53 repoview,
54 repoview,
54 revset,
55 revset,
55 revsetlang,
56 revsetlang,
56 scmutil,
57 scmutil,
57 sparse,
58 sparse,
58 store,
59 store,
59 subrepo,
60 subrepo,
60 tags as tagsmod,
61 tags as tagsmod,
61 transaction,
62 transaction,
62 txnutil,
63 txnutil,
63 util,
64 util,
64 vfs as vfsmod,
65 vfs as vfsmod,
65 )
66 )
66
67
67 release = lockmod.release
68 release = lockmod.release
68 urlerr = util.urlerr
69 urlerr = util.urlerr
69 urlreq = util.urlreq
70 urlreq = util.urlreq
70
71
71 # set of (path, vfs-location) tuples. vfs-location is:
72 # set of (path, vfs-location) tuples. vfs-location is:
72 # - 'plain for vfs relative paths
73 # - 'plain for vfs relative paths
73 # - '' for svfs relative paths
74 # - '' for svfs relative paths
74 _cachedfiles = set()
75 _cachedfiles = set()
75
76
76 class _basefilecache(scmutil.filecache):
77 class _basefilecache(scmutil.filecache):
77 """All filecache usage on repo are done for logic that should be unfiltered
78 """All filecache usage on repo are done for logic that should be unfiltered
78 """
79 """
79 def __get__(self, repo, type=None):
80 def __get__(self, repo, type=None):
80 if repo is None:
81 if repo is None:
81 return self
82 return self
82 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
83 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
83 def __set__(self, repo, value):
84 def __set__(self, repo, value):
84 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
85 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
85 def __delete__(self, repo):
86 def __delete__(self, repo):
86 return super(_basefilecache, self).__delete__(repo.unfiltered())
87 return super(_basefilecache, self).__delete__(repo.unfiltered())
87
88
88 class repofilecache(_basefilecache):
89 class repofilecache(_basefilecache):
89 """filecache for files in .hg but outside of .hg/store"""
90 """filecache for files in .hg but outside of .hg/store"""
90 def __init__(self, *paths):
91 def __init__(self, *paths):
91 super(repofilecache, self).__init__(*paths)
92 super(repofilecache, self).__init__(*paths)
92 for path in paths:
93 for path in paths:
93 _cachedfiles.add((path, 'plain'))
94 _cachedfiles.add((path, 'plain'))
94
95
95 def join(self, obj, fname):
96 def join(self, obj, fname):
96 return obj.vfs.join(fname)
97 return obj.vfs.join(fname)
97
98
98 class storecache(_basefilecache):
99 class storecache(_basefilecache):
99 """filecache for files in the store"""
100 """filecache for files in the store"""
100 def __init__(self, *paths):
101 def __init__(self, *paths):
101 super(storecache, self).__init__(*paths)
102 super(storecache, self).__init__(*paths)
102 for path in paths:
103 for path in paths:
103 _cachedfiles.add((path, ''))
104 _cachedfiles.add((path, ''))
104
105
105 def join(self, obj, fname):
106 def join(self, obj, fname):
106 return obj.sjoin(fname)
107 return obj.sjoin(fname)
107
108
108 def isfilecached(repo, name):
109 def isfilecached(repo, name):
109 """check if a repo has already cached "name" filecache-ed property
110 """check if a repo has already cached "name" filecache-ed property
110
111
111 This returns (cachedobj-or-None, iscached) tuple.
112 This returns (cachedobj-or-None, iscached) tuple.
112 """
113 """
113 cacheentry = repo.unfiltered()._filecache.get(name, None)
114 cacheentry = repo.unfiltered()._filecache.get(name, None)
114 if not cacheentry:
115 if not cacheentry:
115 return None, False
116 return None, False
116 return cacheentry.obj, True
117 return cacheentry.obj, True
117
118
118 class unfilteredpropertycache(util.propertycache):
119 class unfilteredpropertycache(util.propertycache):
119 """propertycache that apply to unfiltered repo only"""
120 """propertycache that apply to unfiltered repo only"""
120
121
121 def __get__(self, repo, type=None):
122 def __get__(self, repo, type=None):
122 unfi = repo.unfiltered()
123 unfi = repo.unfiltered()
123 if unfi is repo:
124 if unfi is repo:
124 return super(unfilteredpropertycache, self).__get__(unfi)
125 return super(unfilteredpropertycache, self).__get__(unfi)
125 return getattr(unfi, self.name)
126 return getattr(unfi, self.name)
126
127
127 class filteredpropertycache(util.propertycache):
128 class filteredpropertycache(util.propertycache):
128 """propertycache that must take filtering in account"""
129 """propertycache that must take filtering in account"""
129
130
130 def cachevalue(self, obj, value):
131 def cachevalue(self, obj, value):
131 object.__setattr__(obj, self.name, value)
132 object.__setattr__(obj, self.name, value)
132
133
133
134
134 def hasunfilteredcache(repo, name):
135 def hasunfilteredcache(repo, name):
135 """check if a repo has an unfilteredpropertycache value for <name>"""
136 """check if a repo has an unfilteredpropertycache value for <name>"""
136 return name in vars(repo.unfiltered())
137 return name in vars(repo.unfiltered())
137
138
138 def unfilteredmethod(orig):
139 def unfilteredmethod(orig):
139 """decorate method that always need to be run on unfiltered version"""
140 """decorate method that always need to be run on unfiltered version"""
140 def wrapper(repo, *args, **kwargs):
141 def wrapper(repo, *args, **kwargs):
141 return orig(repo.unfiltered(), *args, **kwargs)
142 return orig(repo.unfiltered(), *args, **kwargs)
142 return wrapper
143 return wrapper
143
144
144 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
145 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
145 'unbundle'}
146 'unbundle'}
146 legacycaps = moderncaps.union({'changegroupsubset'})
147 legacycaps = moderncaps.union({'changegroupsubset'})
147
148
148 class localpeer(repository.peer):
149 class localpeer(repository.peer):
149 '''peer for a local repo; reflects only the most recent API'''
150 '''peer for a local repo; reflects only the most recent API'''
150
151
151 def __init__(self, repo, caps=None):
152 def __init__(self, repo, caps=None):
152 super(localpeer, self).__init__()
153 super(localpeer, self).__init__()
153
154
154 if caps is None:
155 if caps is None:
155 caps = moderncaps.copy()
156 caps = moderncaps.copy()
156 self._repo = repo.filtered('served')
157 self._repo = repo.filtered('served')
157 self._ui = repo.ui
158 self._ui = repo.ui
158 self._caps = repo._restrictcapabilities(caps)
159 self._caps = repo._restrictcapabilities(caps)
159
160
160 # Begin of _basepeer interface.
161 # Begin of _basepeer interface.
161
162
162 @util.propertycache
163 @util.propertycache
163 def ui(self):
164 def ui(self):
164 return self._ui
165 return self._ui
165
166
166 def url(self):
167 def url(self):
167 return self._repo.url()
168 return self._repo.url()
168
169
169 def local(self):
170 def local(self):
170 return self._repo
171 return self._repo
171
172
172 def peer(self):
173 def peer(self):
173 return self
174 return self
174
175
175 def canpush(self):
176 def canpush(self):
176 return True
177 return True
177
178
178 def close(self):
179 def close(self):
179 self._repo.close()
180 self._repo.close()
180
181
181 # End of _basepeer interface.
182 # End of _basepeer interface.
182
183
183 # Begin of _basewirecommands interface.
184 # Begin of _basewirecommands interface.
184
185
185 def branchmap(self):
186 def branchmap(self):
186 return self._repo.branchmap()
187 return self._repo.branchmap()
187
188
188 def capabilities(self):
189 def capabilities(self):
189 return self._caps
190 return self._caps
190
191
191 def debugwireargs(self, one, two, three=None, four=None, five=None):
192 def debugwireargs(self, one, two, three=None, four=None, five=None):
192 """Used to test argument passing over the wire"""
193 """Used to test argument passing over the wire"""
193 return "%s %s %s %s %s" % (one, two, three, four, five)
194 return "%s %s %s %s %s" % (one, two, three, four, five)
194
195
195 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
196 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
196 **kwargs):
197 **kwargs):
197 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
198 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
198 common=common, bundlecaps=bundlecaps,
199 common=common, bundlecaps=bundlecaps,
199 **kwargs)
200 **kwargs)
200 cb = util.chunkbuffer(chunks)
201 cb = util.chunkbuffer(chunks)
201
202
202 if exchange.bundle2requested(bundlecaps):
203 if exchange.bundle2requested(bundlecaps):
203 # When requesting a bundle2, getbundle returns a stream to make the
204 # When requesting a bundle2, getbundle returns a stream to make the
204 # wire level function happier. We need to build a proper object
205 # wire level function happier. We need to build a proper object
205 # from it in local peer.
206 # from it in local peer.
206 return bundle2.getunbundler(self.ui, cb)
207 return bundle2.getunbundler(self.ui, cb)
207 else:
208 else:
208 return changegroup.getunbundler('01', cb, None)
209 return changegroup.getunbundler('01', cb, None)
209
210
210 def heads(self):
211 def heads(self):
211 return self._repo.heads()
212 return self._repo.heads()
212
213
213 def known(self, nodes):
214 def known(self, nodes):
214 return self._repo.known(nodes)
215 return self._repo.known(nodes)
215
216
216 def listkeys(self, namespace):
217 def listkeys(self, namespace):
217 return self._repo.listkeys(namespace)
218 return self._repo.listkeys(namespace)
218
219
219 def lookup(self, key):
220 def lookup(self, key):
220 return self._repo.lookup(key)
221 return self._repo.lookup(key)
221
222
222 def pushkey(self, namespace, key, old, new):
223 def pushkey(self, namespace, key, old, new):
223 return self._repo.pushkey(namespace, key, old, new)
224 return self._repo.pushkey(namespace, key, old, new)
224
225
225 def stream_out(self):
226 def stream_out(self):
226 raise error.Abort(_('cannot perform stream clone against local '
227 raise error.Abort(_('cannot perform stream clone against local '
227 'peer'))
228 'peer'))
228
229
229 def unbundle(self, cg, heads, url):
230 def unbundle(self, cg, heads, url):
230 """apply a bundle on a repo
231 """apply a bundle on a repo
231
232
232 This function handles the repo locking itself."""
233 This function handles the repo locking itself."""
233 try:
234 try:
234 try:
235 try:
235 cg = exchange.readbundle(self.ui, cg, None)
236 cg = exchange.readbundle(self.ui, cg, None)
236 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
237 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
237 if util.safehasattr(ret, 'getchunks'):
238 if util.safehasattr(ret, 'getchunks'):
238 # This is a bundle20 object, turn it into an unbundler.
239 # This is a bundle20 object, turn it into an unbundler.
239 # This little dance should be dropped eventually when the
240 # This little dance should be dropped eventually when the
240 # API is finally improved.
241 # API is finally improved.
241 stream = util.chunkbuffer(ret.getchunks())
242 stream = util.chunkbuffer(ret.getchunks())
242 ret = bundle2.getunbundler(self.ui, stream)
243 ret = bundle2.getunbundler(self.ui, stream)
243 return ret
244 return ret
244 except Exception as exc:
245 except Exception as exc:
245 # If the exception contains output salvaged from a bundle2
246 # If the exception contains output salvaged from a bundle2
246 # reply, we need to make sure it is printed before continuing
247 # reply, we need to make sure it is printed before continuing
247 # to fail. So we build a bundle2 with such output and consume
248 # to fail. So we build a bundle2 with such output and consume
248 # it directly.
249 # it directly.
249 #
250 #
250 # This is not very elegant but allows a "simple" solution for
251 # This is not very elegant but allows a "simple" solution for
251 # issue4594
252 # issue4594
252 output = getattr(exc, '_bundle2salvagedoutput', ())
253 output = getattr(exc, '_bundle2salvagedoutput', ())
253 if output:
254 if output:
254 bundler = bundle2.bundle20(self._repo.ui)
255 bundler = bundle2.bundle20(self._repo.ui)
255 for out in output:
256 for out in output:
256 bundler.addpart(out)
257 bundler.addpart(out)
257 stream = util.chunkbuffer(bundler.getchunks())
258 stream = util.chunkbuffer(bundler.getchunks())
258 b = bundle2.getunbundler(self.ui, stream)
259 b = bundle2.getunbundler(self.ui, stream)
259 bundle2.processbundle(self._repo, b)
260 bundle2.processbundle(self._repo, b)
260 raise
261 raise
261 except error.PushRaced as exc:
262 except error.PushRaced as exc:
262 raise error.ResponseError(_('push failed:'), str(exc))
263 raise error.ResponseError(_('push failed:'), str(exc))
263
264
264 # End of _basewirecommands interface.
265 # End of _basewirecommands interface.
265
266
266 # Begin of peer interface.
267 # Begin of peer interface.
267
268
268 def iterbatch(self):
269 def iterbatch(self):
269 return peer.localiterbatcher(self)
270 return peer.localiterbatcher(self)
270
271
271 # End of peer interface.
272 # End of peer interface.
272
273
273 class locallegacypeer(repository.legacypeer, localpeer):
274 class locallegacypeer(repository.legacypeer, localpeer):
274 '''peer extension which implements legacy methods too; used for tests with
275 '''peer extension which implements legacy methods too; used for tests with
275 restricted capabilities'''
276 restricted capabilities'''
276
277
277 def __init__(self, repo):
278 def __init__(self, repo):
278 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
279 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
279
280
280 # Begin of baselegacywirecommands interface.
281 # Begin of baselegacywirecommands interface.
281
282
282 def between(self, pairs):
283 def between(self, pairs):
283 return self._repo.between(pairs)
284 return self._repo.between(pairs)
284
285
285 def branches(self, nodes):
286 def branches(self, nodes):
286 return self._repo.branches(nodes)
287 return self._repo.branches(nodes)
287
288
288 def changegroup(self, basenodes, source):
289 def changegroup(self, basenodes, source):
289 return changegroup.changegroup(self._repo, basenodes, source)
290 return changegroup.changegroup(self._repo, basenodes, source)
290
291
291 def changegroupsubset(self, bases, heads, source):
292 def changegroupsubset(self, bases, heads, source):
292 return changegroup.changegroupsubset(self._repo, bases, heads, source)
293 outgoing = discovery.outgoing(self._repo, missingroots=bases,
294 missingheads=heads)
295 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
293
296
294 # End of baselegacywirecommands interface.
297 # End of baselegacywirecommands interface.
295
298
296 # Increment the sub-version when the revlog v2 format changes to lock out old
299 # Increment the sub-version when the revlog v2 format changes to lock out old
297 # clients.
300 # clients.
298 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
301 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
299
302
300 class localrepository(object):
303 class localrepository(object):
301
304
302 supportedformats = {
305 supportedformats = {
303 'revlogv1',
306 'revlogv1',
304 'generaldelta',
307 'generaldelta',
305 'treemanifest',
308 'treemanifest',
306 'manifestv2',
309 'manifestv2',
307 REVLOGV2_REQUIREMENT,
310 REVLOGV2_REQUIREMENT,
308 }
311 }
309 _basesupported = supportedformats | {
312 _basesupported = supportedformats | {
310 'store',
313 'store',
311 'fncache',
314 'fncache',
312 'shared',
315 'shared',
313 'relshared',
316 'relshared',
314 'dotencode',
317 'dotencode',
315 'exp-sparse',
318 'exp-sparse',
316 }
319 }
317 openerreqs = {
320 openerreqs = {
318 'revlogv1',
321 'revlogv1',
319 'generaldelta',
322 'generaldelta',
320 'treemanifest',
323 'treemanifest',
321 'manifestv2',
324 'manifestv2',
322 }
325 }
323
326
324 # a list of (ui, featureset) functions.
327 # a list of (ui, featureset) functions.
325 # only functions defined in module of enabled extensions are invoked
328 # only functions defined in module of enabled extensions are invoked
326 featuresetupfuncs = set()
329 featuresetupfuncs = set()
327
330
328 # list of prefix for file which can be written without 'wlock'
331 # list of prefix for file which can be written without 'wlock'
329 # Extensions should extend this list when needed
332 # Extensions should extend this list when needed
330 _wlockfreeprefix = {
333 _wlockfreeprefix = {
331 # We migh consider requiring 'wlock' for the next
334 # We migh consider requiring 'wlock' for the next
332 # two, but pretty much all the existing code assume
335 # two, but pretty much all the existing code assume
333 # wlock is not needed so we keep them excluded for
336 # wlock is not needed so we keep them excluded for
334 # now.
337 # now.
335 'hgrc',
338 'hgrc',
336 'requires',
339 'requires',
337 # XXX cache is a complicatged business someone
340 # XXX cache is a complicatged business someone
338 # should investigate this in depth at some point
341 # should investigate this in depth at some point
339 'cache/',
342 'cache/',
340 # XXX shouldn't be dirstate covered by the wlock?
343 # XXX shouldn't be dirstate covered by the wlock?
341 'dirstate',
344 'dirstate',
342 # XXX bisect was still a bit too messy at the time
345 # XXX bisect was still a bit too messy at the time
343 # this changeset was introduced. Someone should fix
346 # this changeset was introduced. Someone should fix
344 # the remainig bit and drop this line
347 # the remainig bit and drop this line
345 'bisect.state',
348 'bisect.state',
346 }
349 }
347
350
348 def __init__(self, baseui, path, create=False):
351 def __init__(self, baseui, path, create=False):
349 self.requirements = set()
352 self.requirements = set()
350 self.filtername = None
353 self.filtername = None
351 # wvfs: rooted at the repository root, used to access the working copy
354 # wvfs: rooted at the repository root, used to access the working copy
352 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
355 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
353 # vfs: rooted at .hg, used to access repo files outside of .hg/store
356 # vfs: rooted at .hg, used to access repo files outside of .hg/store
354 self.vfs = None
357 self.vfs = None
355 # svfs: usually rooted at .hg/store, used to access repository history
358 # svfs: usually rooted at .hg/store, used to access repository history
356 # If this is a shared repository, this vfs may point to another
359 # If this is a shared repository, this vfs may point to another
357 # repository's .hg/store directory.
360 # repository's .hg/store directory.
358 self.svfs = None
361 self.svfs = None
359 self.root = self.wvfs.base
362 self.root = self.wvfs.base
360 self.path = self.wvfs.join(".hg")
363 self.path = self.wvfs.join(".hg")
361 self.origroot = path
364 self.origroot = path
362 # These auditor are not used by the vfs,
365 # These auditor are not used by the vfs,
363 # only used when writing this comment: basectx.match
366 # only used when writing this comment: basectx.match
364 self.auditor = pathutil.pathauditor(self.root, self._checknested)
367 self.auditor = pathutil.pathauditor(self.root, self._checknested)
365 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
368 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
366 realfs=False, cached=True)
369 realfs=False, cached=True)
367 self.baseui = baseui
370 self.baseui = baseui
368 self.ui = baseui.copy()
371 self.ui = baseui.copy()
369 self.ui.copy = baseui.copy # prevent copying repo configuration
372 self.ui.copy = baseui.copy # prevent copying repo configuration
370 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
373 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
371 if (self.ui.configbool('devel', 'all-warnings') or
374 if (self.ui.configbool('devel', 'all-warnings') or
372 self.ui.configbool('devel', 'check-locks')):
375 self.ui.configbool('devel', 'check-locks')):
373 self.vfs.audit = self._getvfsward(self.vfs.audit)
376 self.vfs.audit = self._getvfsward(self.vfs.audit)
374 # A list of callback to shape the phase if no data were found.
377 # A list of callback to shape the phase if no data were found.
375 # Callback are in the form: func(repo, roots) --> processed root.
378 # Callback are in the form: func(repo, roots) --> processed root.
376 # This list it to be filled by extension during repo setup
379 # This list it to be filled by extension during repo setup
377 self._phasedefaults = []
380 self._phasedefaults = []
378 try:
381 try:
379 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
382 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
380 self._loadextensions()
383 self._loadextensions()
381 except IOError:
384 except IOError:
382 pass
385 pass
383
386
384 if self.featuresetupfuncs:
387 if self.featuresetupfuncs:
385 self.supported = set(self._basesupported) # use private copy
388 self.supported = set(self._basesupported) # use private copy
386 extmods = set(m.__name__ for n, m
389 extmods = set(m.__name__ for n, m
387 in extensions.extensions(self.ui))
390 in extensions.extensions(self.ui))
388 for setupfunc in self.featuresetupfuncs:
391 for setupfunc in self.featuresetupfuncs:
389 if setupfunc.__module__ in extmods:
392 if setupfunc.__module__ in extmods:
390 setupfunc(self.ui, self.supported)
393 setupfunc(self.ui, self.supported)
391 else:
394 else:
392 self.supported = self._basesupported
395 self.supported = self._basesupported
393 color.setup(self.ui)
396 color.setup(self.ui)
394
397
395 # Add compression engines.
398 # Add compression engines.
396 for name in util.compengines:
399 for name in util.compengines:
397 engine = util.compengines[name]
400 engine = util.compengines[name]
398 if engine.revlogheader():
401 if engine.revlogheader():
399 self.supported.add('exp-compression-%s' % name)
402 self.supported.add('exp-compression-%s' % name)
400
403
401 if not self.vfs.isdir():
404 if not self.vfs.isdir():
402 if create:
405 if create:
403 self.requirements = newreporequirements(self)
406 self.requirements = newreporequirements(self)
404
407
405 if not self.wvfs.exists():
408 if not self.wvfs.exists():
406 self.wvfs.makedirs()
409 self.wvfs.makedirs()
407 self.vfs.makedir(notindexed=True)
410 self.vfs.makedir(notindexed=True)
408
411
409 if 'store' in self.requirements:
412 if 'store' in self.requirements:
410 self.vfs.mkdir("store")
413 self.vfs.mkdir("store")
411
414
412 # create an invalid changelog
415 # create an invalid changelog
413 self.vfs.append(
416 self.vfs.append(
414 "00changelog.i",
417 "00changelog.i",
415 '\0\0\0\2' # represents revlogv2
418 '\0\0\0\2' # represents revlogv2
416 ' dummy changelog to prevent using the old repo layout'
419 ' dummy changelog to prevent using the old repo layout'
417 )
420 )
418 else:
421 else:
419 raise error.RepoError(_("repository %s not found") % path)
422 raise error.RepoError(_("repository %s not found") % path)
420 elif create:
423 elif create:
421 raise error.RepoError(_("repository %s already exists") % path)
424 raise error.RepoError(_("repository %s already exists") % path)
422 else:
425 else:
423 try:
426 try:
424 self.requirements = scmutil.readrequires(
427 self.requirements = scmutil.readrequires(
425 self.vfs, self.supported)
428 self.vfs, self.supported)
426 except IOError as inst:
429 except IOError as inst:
427 if inst.errno != errno.ENOENT:
430 if inst.errno != errno.ENOENT:
428 raise
431 raise
429
432
430 cachepath = self.vfs.join('cache')
433 cachepath = self.vfs.join('cache')
431 self.sharedpath = self.path
434 self.sharedpath = self.path
432 try:
435 try:
433 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
436 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
434 if 'relshared' in self.requirements:
437 if 'relshared' in self.requirements:
435 sharedpath = self.vfs.join(sharedpath)
438 sharedpath = self.vfs.join(sharedpath)
436 vfs = vfsmod.vfs(sharedpath, realpath=True)
439 vfs = vfsmod.vfs(sharedpath, realpath=True)
437 cachepath = vfs.join('cache')
440 cachepath = vfs.join('cache')
438 s = vfs.base
441 s = vfs.base
439 if not vfs.exists():
442 if not vfs.exists():
440 raise error.RepoError(
443 raise error.RepoError(
441 _('.hg/sharedpath points to nonexistent directory %s') % s)
444 _('.hg/sharedpath points to nonexistent directory %s') % s)
442 self.sharedpath = s
445 self.sharedpath = s
443 except IOError as inst:
446 except IOError as inst:
444 if inst.errno != errno.ENOENT:
447 if inst.errno != errno.ENOENT:
445 raise
448 raise
446
449
447 if 'exp-sparse' in self.requirements and not sparse.enabled:
450 if 'exp-sparse' in self.requirements and not sparse.enabled:
448 raise error.RepoError(_('repository is using sparse feature but '
451 raise error.RepoError(_('repository is using sparse feature but '
449 'sparse is not enabled; enable the '
452 'sparse is not enabled; enable the '
450 '"sparse" extensions to access'))
453 '"sparse" extensions to access'))
451
454
452 self.store = store.store(
455 self.store = store.store(
453 self.requirements, self.sharedpath,
456 self.requirements, self.sharedpath,
454 lambda base: vfsmod.vfs(base, cacheaudited=True))
457 lambda base: vfsmod.vfs(base, cacheaudited=True))
455 self.spath = self.store.path
458 self.spath = self.store.path
456 self.svfs = self.store.vfs
459 self.svfs = self.store.vfs
457 self.sjoin = self.store.join
460 self.sjoin = self.store.join
458 self.vfs.createmode = self.store.createmode
461 self.vfs.createmode = self.store.createmode
459 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
462 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
460 self.cachevfs.createmode = self.store.createmode
463 self.cachevfs.createmode = self.store.createmode
461 if (self.ui.configbool('devel', 'all-warnings') or
464 if (self.ui.configbool('devel', 'all-warnings') or
462 self.ui.configbool('devel', 'check-locks')):
465 self.ui.configbool('devel', 'check-locks')):
463 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
466 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
464 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
467 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
465 else: # standard vfs
468 else: # standard vfs
466 self.svfs.audit = self._getsvfsward(self.svfs.audit)
469 self.svfs.audit = self._getsvfsward(self.svfs.audit)
467 self._applyopenerreqs()
470 self._applyopenerreqs()
468 if create:
471 if create:
469 self._writerequirements()
472 self._writerequirements()
470
473
471 self._dirstatevalidatewarned = False
474 self._dirstatevalidatewarned = False
472
475
473 self._branchcaches = {}
476 self._branchcaches = {}
474 self._revbranchcache = None
477 self._revbranchcache = None
475 self.filterpats = {}
478 self.filterpats = {}
476 self._datafilters = {}
479 self._datafilters = {}
477 self._transref = self._lockref = self._wlockref = None
480 self._transref = self._lockref = self._wlockref = None
478
481
479 # A cache for various files under .hg/ that tracks file changes,
482 # A cache for various files under .hg/ that tracks file changes,
480 # (used by the filecache decorator)
483 # (used by the filecache decorator)
481 #
484 #
482 # Maps a property name to its util.filecacheentry
485 # Maps a property name to its util.filecacheentry
483 self._filecache = {}
486 self._filecache = {}
484
487
485 # hold sets of revision to be filtered
488 # hold sets of revision to be filtered
486 # should be cleared when something might have changed the filter value:
489 # should be cleared when something might have changed the filter value:
487 # - new changesets,
490 # - new changesets,
488 # - phase change,
491 # - phase change,
489 # - new obsolescence marker,
492 # - new obsolescence marker,
490 # - working directory parent change,
493 # - working directory parent change,
491 # - bookmark changes
494 # - bookmark changes
492 self.filteredrevcache = {}
495 self.filteredrevcache = {}
493
496
494 # post-dirstate-status hooks
497 # post-dirstate-status hooks
495 self._postdsstatus = []
498 self._postdsstatus = []
496
499
497 # Cache of types representing filtered repos.
500 # Cache of types representing filtered repos.
498 self._filteredrepotypes = weakref.WeakKeyDictionary()
501 self._filteredrepotypes = weakref.WeakKeyDictionary()
499
502
500 # generic mapping between names and nodes
503 # generic mapping between names and nodes
501 self.names = namespaces.namespaces()
504 self.names = namespaces.namespaces()
502
505
503 # Key to signature value.
506 # Key to signature value.
504 self._sparsesignaturecache = {}
507 self._sparsesignaturecache = {}
505 # Signature to cached matcher instance.
508 # Signature to cached matcher instance.
506 self._sparsematchercache = {}
509 self._sparsematchercache = {}
507
510
508 def _getvfsward(self, origfunc):
511 def _getvfsward(self, origfunc):
509 """build a ward for self.vfs"""
512 """build a ward for self.vfs"""
510 rref = weakref.ref(self)
513 rref = weakref.ref(self)
511 def checkvfs(path, mode=None):
514 def checkvfs(path, mode=None):
512 ret = origfunc(path, mode=mode)
515 ret = origfunc(path, mode=mode)
513 repo = rref()
516 repo = rref()
514 if (repo is None
517 if (repo is None
515 or not util.safehasattr(repo, '_wlockref')
518 or not util.safehasattr(repo, '_wlockref')
516 or not util.safehasattr(repo, '_lockref')):
519 or not util.safehasattr(repo, '_lockref')):
517 return
520 return
518 if mode in (None, 'r', 'rb'):
521 if mode in (None, 'r', 'rb'):
519 return
522 return
520 if path.startswith(repo.path):
523 if path.startswith(repo.path):
521 # truncate name relative to the repository (.hg)
524 # truncate name relative to the repository (.hg)
522 path = path[len(repo.path) + 1:]
525 path = path[len(repo.path) + 1:]
523 if path.startswith('cache/'):
526 if path.startswith('cache/'):
524 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
527 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
525 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
528 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
526 if path.startswith('journal.'):
529 if path.startswith('journal.'):
527 # journal is covered by 'lock'
530 # journal is covered by 'lock'
528 if repo._currentlock(repo._lockref) is None:
531 if repo._currentlock(repo._lockref) is None:
529 repo.ui.develwarn('write with no lock: "%s"' % path,
532 repo.ui.develwarn('write with no lock: "%s"' % path,
530 stacklevel=2, config='check-locks')
533 stacklevel=2, config='check-locks')
531 elif repo._currentlock(repo._wlockref) is None:
534 elif repo._currentlock(repo._wlockref) is None:
532 # rest of vfs files are covered by 'wlock'
535 # rest of vfs files are covered by 'wlock'
533 #
536 #
534 # exclude special files
537 # exclude special files
535 for prefix in self._wlockfreeprefix:
538 for prefix in self._wlockfreeprefix:
536 if path.startswith(prefix):
539 if path.startswith(prefix):
537 return
540 return
538 repo.ui.develwarn('write with no wlock: "%s"' % path,
541 repo.ui.develwarn('write with no wlock: "%s"' % path,
539 stacklevel=2, config='check-locks')
542 stacklevel=2, config='check-locks')
540 return ret
543 return ret
541 return checkvfs
544 return checkvfs
542
545
543 def _getsvfsward(self, origfunc):
546 def _getsvfsward(self, origfunc):
544 """build a ward for self.svfs"""
547 """build a ward for self.svfs"""
545 rref = weakref.ref(self)
548 rref = weakref.ref(self)
546 def checksvfs(path, mode=None):
549 def checksvfs(path, mode=None):
547 ret = origfunc(path, mode=mode)
550 ret = origfunc(path, mode=mode)
548 repo = rref()
551 repo = rref()
549 if repo is None or not util.safehasattr(repo, '_lockref'):
552 if repo is None or not util.safehasattr(repo, '_lockref'):
550 return
553 return
551 if mode in (None, 'r', 'rb'):
554 if mode in (None, 'r', 'rb'):
552 return
555 return
553 if path.startswith(repo.sharedpath):
556 if path.startswith(repo.sharedpath):
554 # truncate name relative to the repository (.hg)
557 # truncate name relative to the repository (.hg)
555 path = path[len(repo.sharedpath) + 1:]
558 path = path[len(repo.sharedpath) + 1:]
556 if repo._currentlock(repo._lockref) is None:
559 if repo._currentlock(repo._lockref) is None:
557 repo.ui.develwarn('write with no lock: "%s"' % path,
560 repo.ui.develwarn('write with no lock: "%s"' % path,
558 stacklevel=3)
561 stacklevel=3)
559 return ret
562 return ret
560 return checksvfs
563 return checksvfs
561
564
562 def close(self):
565 def close(self):
563 self._writecaches()
566 self._writecaches()
564
567
565 def _loadextensions(self):
568 def _loadextensions(self):
566 extensions.loadall(self.ui)
569 extensions.loadall(self.ui)
567
570
568 def _writecaches(self):
571 def _writecaches(self):
569 if self._revbranchcache:
572 if self._revbranchcache:
570 self._revbranchcache.write()
573 self._revbranchcache.write()
571
574
572 def _restrictcapabilities(self, caps):
575 def _restrictcapabilities(self, caps):
573 if self.ui.configbool('experimental', 'bundle2-advertise'):
576 if self.ui.configbool('experimental', 'bundle2-advertise'):
574 caps = set(caps)
577 caps = set(caps)
575 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
578 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
576 caps.add('bundle2=' + urlreq.quote(capsblob))
579 caps.add('bundle2=' + urlreq.quote(capsblob))
577 return caps
580 return caps
578
581
579 def _applyopenerreqs(self):
582 def _applyopenerreqs(self):
580 self.svfs.options = dict((r, 1) for r in self.requirements
583 self.svfs.options = dict((r, 1) for r in self.requirements
581 if r in self.openerreqs)
584 if r in self.openerreqs)
582 # experimental config: format.chunkcachesize
585 # experimental config: format.chunkcachesize
583 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
586 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
584 if chunkcachesize is not None:
587 if chunkcachesize is not None:
585 self.svfs.options['chunkcachesize'] = chunkcachesize
588 self.svfs.options['chunkcachesize'] = chunkcachesize
586 # experimental config: format.maxchainlen
589 # experimental config: format.maxchainlen
587 maxchainlen = self.ui.configint('format', 'maxchainlen')
590 maxchainlen = self.ui.configint('format', 'maxchainlen')
588 if maxchainlen is not None:
591 if maxchainlen is not None:
589 self.svfs.options['maxchainlen'] = maxchainlen
592 self.svfs.options['maxchainlen'] = maxchainlen
590 # experimental config: format.manifestcachesize
593 # experimental config: format.manifestcachesize
591 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
594 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
592 if manifestcachesize is not None:
595 if manifestcachesize is not None:
593 self.svfs.options['manifestcachesize'] = manifestcachesize
596 self.svfs.options['manifestcachesize'] = manifestcachesize
594 # experimental config: format.aggressivemergedeltas
597 # experimental config: format.aggressivemergedeltas
595 aggressivemergedeltas = self.ui.configbool('format',
598 aggressivemergedeltas = self.ui.configbool('format',
596 'aggressivemergedeltas')
599 'aggressivemergedeltas')
597 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
600 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
598 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
601 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
599 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan', -1)
602 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan', -1)
600 if 0 <= chainspan:
603 if 0 <= chainspan:
601 self.svfs.options['maxdeltachainspan'] = chainspan
604 self.svfs.options['maxdeltachainspan'] = chainspan
602
605
603 for r in self.requirements:
606 for r in self.requirements:
604 if r.startswith('exp-compression-'):
607 if r.startswith('exp-compression-'):
605 self.svfs.options['compengine'] = r[len('exp-compression-'):]
608 self.svfs.options['compengine'] = r[len('exp-compression-'):]
606
609
607 # TODO move "revlogv2" to openerreqs once finalized.
610 # TODO move "revlogv2" to openerreqs once finalized.
608 if REVLOGV2_REQUIREMENT in self.requirements:
611 if REVLOGV2_REQUIREMENT in self.requirements:
609 self.svfs.options['revlogv2'] = True
612 self.svfs.options['revlogv2'] = True
610
613
611 def _writerequirements(self):
614 def _writerequirements(self):
612 scmutil.writerequires(self.vfs, self.requirements)
615 scmutil.writerequires(self.vfs, self.requirements)
613
616
614 def _checknested(self, path):
617 def _checknested(self, path):
615 """Determine if path is a legal nested repository."""
618 """Determine if path is a legal nested repository."""
616 if not path.startswith(self.root):
619 if not path.startswith(self.root):
617 return False
620 return False
618 subpath = path[len(self.root) + 1:]
621 subpath = path[len(self.root) + 1:]
619 normsubpath = util.pconvert(subpath)
622 normsubpath = util.pconvert(subpath)
620
623
621 # XXX: Checking against the current working copy is wrong in
624 # XXX: Checking against the current working copy is wrong in
622 # the sense that it can reject things like
625 # the sense that it can reject things like
623 #
626 #
624 # $ hg cat -r 10 sub/x.txt
627 # $ hg cat -r 10 sub/x.txt
625 #
628 #
626 # if sub/ is no longer a subrepository in the working copy
629 # if sub/ is no longer a subrepository in the working copy
627 # parent revision.
630 # parent revision.
628 #
631 #
629 # However, it can of course also allow things that would have
632 # However, it can of course also allow things that would have
630 # been rejected before, such as the above cat command if sub/
633 # been rejected before, such as the above cat command if sub/
631 # is a subrepository now, but was a normal directory before.
634 # is a subrepository now, but was a normal directory before.
632 # The old path auditor would have rejected by mistake since it
635 # The old path auditor would have rejected by mistake since it
633 # panics when it sees sub/.hg/.
636 # panics when it sees sub/.hg/.
634 #
637 #
635 # All in all, checking against the working copy seems sensible
638 # All in all, checking against the working copy seems sensible
636 # since we want to prevent access to nested repositories on
639 # since we want to prevent access to nested repositories on
637 # the filesystem *now*.
640 # the filesystem *now*.
638 ctx = self[None]
641 ctx = self[None]
639 parts = util.splitpath(subpath)
642 parts = util.splitpath(subpath)
640 while parts:
643 while parts:
641 prefix = '/'.join(parts)
644 prefix = '/'.join(parts)
642 if prefix in ctx.substate:
645 if prefix in ctx.substate:
643 if prefix == normsubpath:
646 if prefix == normsubpath:
644 return True
647 return True
645 else:
648 else:
646 sub = ctx.sub(prefix)
649 sub = ctx.sub(prefix)
647 return sub.checknested(subpath[len(prefix) + 1:])
650 return sub.checknested(subpath[len(prefix) + 1:])
648 else:
651 else:
649 parts.pop()
652 parts.pop()
650 return False
653 return False
651
654
652 def peer(self):
655 def peer(self):
653 return localpeer(self) # not cached to avoid reference cycle
656 return localpeer(self) # not cached to avoid reference cycle
654
657
655 def unfiltered(self):
658 def unfiltered(self):
656 """Return unfiltered version of the repository
659 """Return unfiltered version of the repository
657
660
658 Intended to be overwritten by filtered repo."""
661 Intended to be overwritten by filtered repo."""
659 return self
662 return self
660
663
661 def filtered(self, name):
664 def filtered(self, name):
662 """Return a filtered version of a repository"""
665 """Return a filtered version of a repository"""
663 # Python <3.4 easily leaks types via __mro__. See
666 # Python <3.4 easily leaks types via __mro__. See
664 # https://bugs.python.org/issue17950. We cache dynamically
667 # https://bugs.python.org/issue17950. We cache dynamically
665 # created types so this method doesn't leak on every
668 # created types so this method doesn't leak on every
666 # invocation.
669 # invocation.
667
670
668 key = self.unfiltered().__class__
671 key = self.unfiltered().__class__
669 if key not in self._filteredrepotypes:
672 if key not in self._filteredrepotypes:
670 # Build a new type with the repoview mixin and the base
673 # Build a new type with the repoview mixin and the base
671 # class of this repo. Give it a name containing the
674 # class of this repo. Give it a name containing the
672 # filter name to aid debugging.
675 # filter name to aid debugging.
673 bases = (repoview.repoview, key)
676 bases = (repoview.repoview, key)
674 cls = type(r'%sfilteredrepo' % name, bases, {})
677 cls = type(r'%sfilteredrepo' % name, bases, {})
675 self._filteredrepotypes[key] = cls
678 self._filteredrepotypes[key] = cls
676
679
677 return self._filteredrepotypes[key](self, name)
680 return self._filteredrepotypes[key](self, name)
678
681
679 @repofilecache('bookmarks', 'bookmarks.current')
682 @repofilecache('bookmarks', 'bookmarks.current')
680 def _bookmarks(self):
683 def _bookmarks(self):
681 return bookmarks.bmstore(self)
684 return bookmarks.bmstore(self)
682
685
683 @property
686 @property
684 def _activebookmark(self):
687 def _activebookmark(self):
685 return self._bookmarks.active
688 return self._bookmarks.active
686
689
687 # _phaserevs and _phasesets depend on changelog. what we need is to
690 # _phaserevs and _phasesets depend on changelog. what we need is to
688 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
691 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
689 # can't be easily expressed in filecache mechanism.
692 # can't be easily expressed in filecache mechanism.
690 @storecache('phaseroots', '00changelog.i')
693 @storecache('phaseroots', '00changelog.i')
691 def _phasecache(self):
694 def _phasecache(self):
692 return phases.phasecache(self, self._phasedefaults)
695 return phases.phasecache(self, self._phasedefaults)
693
696
694 @storecache('obsstore')
697 @storecache('obsstore')
695 def obsstore(self):
698 def obsstore(self):
696 return obsolete.makestore(self.ui, self)
699 return obsolete.makestore(self.ui, self)
697
700
698 @storecache('00changelog.i')
701 @storecache('00changelog.i')
699 def changelog(self):
702 def changelog(self):
700 return changelog.changelog(self.svfs,
703 return changelog.changelog(self.svfs,
701 trypending=txnutil.mayhavepending(self.root))
704 trypending=txnutil.mayhavepending(self.root))
702
705
703 def _constructmanifest(self):
706 def _constructmanifest(self):
704 # This is a temporary function while we migrate from manifest to
707 # This is a temporary function while we migrate from manifest to
705 # manifestlog. It allows bundlerepo and unionrepo to intercept the
708 # manifestlog. It allows bundlerepo and unionrepo to intercept the
706 # manifest creation.
709 # manifest creation.
707 return manifest.manifestrevlog(self.svfs)
710 return manifest.manifestrevlog(self.svfs)
708
711
709 @storecache('00manifest.i')
712 @storecache('00manifest.i')
710 def manifestlog(self):
713 def manifestlog(self):
711 return manifest.manifestlog(self.svfs, self)
714 return manifest.manifestlog(self.svfs, self)
712
715
713 @repofilecache('dirstate')
716 @repofilecache('dirstate')
714 def dirstate(self):
717 def dirstate(self):
715 sparsematchfn = lambda: sparse.matcher(self)
718 sparsematchfn = lambda: sparse.matcher(self)
716
719
717 return dirstate.dirstate(self.vfs, self.ui, self.root,
720 return dirstate.dirstate(self.vfs, self.ui, self.root,
718 self._dirstatevalidate, sparsematchfn)
721 self._dirstatevalidate, sparsematchfn)
719
722
720 def _dirstatevalidate(self, node):
723 def _dirstatevalidate(self, node):
721 try:
724 try:
722 self.changelog.rev(node)
725 self.changelog.rev(node)
723 return node
726 return node
724 except error.LookupError:
727 except error.LookupError:
725 if not self._dirstatevalidatewarned:
728 if not self._dirstatevalidatewarned:
726 self._dirstatevalidatewarned = True
729 self._dirstatevalidatewarned = True
727 self.ui.warn(_("warning: ignoring unknown"
730 self.ui.warn(_("warning: ignoring unknown"
728 " working parent %s!\n") % short(node))
731 " working parent %s!\n") % short(node))
729 return nullid
732 return nullid
730
733
731 def __getitem__(self, changeid):
734 def __getitem__(self, changeid):
732 if changeid is None:
735 if changeid is None:
733 return context.workingctx(self)
736 return context.workingctx(self)
734 if isinstance(changeid, slice):
737 if isinstance(changeid, slice):
735 # wdirrev isn't contiguous so the slice shouldn't include it
738 # wdirrev isn't contiguous so the slice shouldn't include it
736 return [context.changectx(self, i)
739 return [context.changectx(self, i)
737 for i in xrange(*changeid.indices(len(self)))
740 for i in xrange(*changeid.indices(len(self)))
738 if i not in self.changelog.filteredrevs]
741 if i not in self.changelog.filteredrevs]
739 try:
742 try:
740 return context.changectx(self, changeid)
743 return context.changectx(self, changeid)
741 except error.WdirUnsupported:
744 except error.WdirUnsupported:
742 return context.workingctx(self)
745 return context.workingctx(self)
743
746
744 def __contains__(self, changeid):
747 def __contains__(self, changeid):
745 """True if the given changeid exists
748 """True if the given changeid exists
746
749
747 error.LookupError is raised if an ambiguous node specified.
750 error.LookupError is raised if an ambiguous node specified.
748 """
751 """
749 try:
752 try:
750 self[changeid]
753 self[changeid]
751 return True
754 return True
752 except error.RepoLookupError:
755 except error.RepoLookupError:
753 return False
756 return False
754
757
755 def __nonzero__(self):
758 def __nonzero__(self):
756 return True
759 return True
757
760
758 __bool__ = __nonzero__
761 __bool__ = __nonzero__
759
762
760 def __len__(self):
763 def __len__(self):
761 return len(self.changelog)
764 return len(self.changelog)
762
765
763 def __iter__(self):
766 def __iter__(self):
764 return iter(self.changelog)
767 return iter(self.changelog)
765
768
766 def revs(self, expr, *args):
769 def revs(self, expr, *args):
767 '''Find revisions matching a revset.
770 '''Find revisions matching a revset.
768
771
769 The revset is specified as a string ``expr`` that may contain
772 The revset is specified as a string ``expr`` that may contain
770 %-formatting to escape certain types. See ``revsetlang.formatspec``.
773 %-formatting to escape certain types. See ``revsetlang.formatspec``.
771
774
772 Revset aliases from the configuration are not expanded. To expand
775 Revset aliases from the configuration are not expanded. To expand
773 user aliases, consider calling ``scmutil.revrange()`` or
776 user aliases, consider calling ``scmutil.revrange()`` or
774 ``repo.anyrevs([expr], user=True)``.
777 ``repo.anyrevs([expr], user=True)``.
775
778
776 Returns a revset.abstractsmartset, which is a list-like interface
779 Returns a revset.abstractsmartset, which is a list-like interface
777 that contains integer revisions.
780 that contains integer revisions.
778 '''
781 '''
779 expr = revsetlang.formatspec(expr, *args)
782 expr = revsetlang.formatspec(expr, *args)
780 m = revset.match(None, expr)
783 m = revset.match(None, expr)
781 return m(self)
784 return m(self)
782
785
783 def set(self, expr, *args):
786 def set(self, expr, *args):
784 '''Find revisions matching a revset and emit changectx instances.
787 '''Find revisions matching a revset and emit changectx instances.
785
788
786 This is a convenience wrapper around ``revs()`` that iterates the
789 This is a convenience wrapper around ``revs()`` that iterates the
787 result and is a generator of changectx instances.
790 result and is a generator of changectx instances.
788
791
789 Revset aliases from the configuration are not expanded. To expand
792 Revset aliases from the configuration are not expanded. To expand
790 user aliases, consider calling ``scmutil.revrange()``.
793 user aliases, consider calling ``scmutil.revrange()``.
791 '''
794 '''
792 for r in self.revs(expr, *args):
795 for r in self.revs(expr, *args):
793 yield self[r]
796 yield self[r]
794
797
795 def anyrevs(self, specs, user=False, localalias=None):
798 def anyrevs(self, specs, user=False, localalias=None):
796 '''Find revisions matching one of the given revsets.
799 '''Find revisions matching one of the given revsets.
797
800
798 Revset aliases from the configuration are not expanded by default. To
801 Revset aliases from the configuration are not expanded by default. To
799 expand user aliases, specify ``user=True``. To provide some local
802 expand user aliases, specify ``user=True``. To provide some local
800 definitions overriding user aliases, set ``localalias`` to
803 definitions overriding user aliases, set ``localalias`` to
801 ``{name: definitionstring}``.
804 ``{name: definitionstring}``.
802 '''
805 '''
803 if user:
806 if user:
804 m = revset.matchany(self.ui, specs, repo=self,
807 m = revset.matchany(self.ui, specs, repo=self,
805 localalias=localalias)
808 localalias=localalias)
806 else:
809 else:
807 m = revset.matchany(None, specs, localalias=localalias)
810 m = revset.matchany(None, specs, localalias=localalias)
808 return m(self)
811 return m(self)
809
812
810 def url(self):
813 def url(self):
811 return 'file:' + self.root
814 return 'file:' + self.root
812
815
813 def hook(self, name, throw=False, **args):
816 def hook(self, name, throw=False, **args):
814 """Call a hook, passing this repo instance.
817 """Call a hook, passing this repo instance.
815
818
816 This a convenience method to aid invoking hooks. Extensions likely
819 This a convenience method to aid invoking hooks. Extensions likely
817 won't call this unless they have registered a custom hook or are
820 won't call this unless they have registered a custom hook or are
818 replacing code that is expected to call a hook.
821 replacing code that is expected to call a hook.
819 """
822 """
820 return hook.hook(self.ui, self, name, throw, **args)
823 return hook.hook(self.ui, self, name, throw, **args)
821
824
822 @filteredpropertycache
825 @filteredpropertycache
823 def _tagscache(self):
826 def _tagscache(self):
824 '''Returns a tagscache object that contains various tags related
827 '''Returns a tagscache object that contains various tags related
825 caches.'''
828 caches.'''
826
829
827 # This simplifies its cache management by having one decorated
830 # This simplifies its cache management by having one decorated
828 # function (this one) and the rest simply fetch things from it.
831 # function (this one) and the rest simply fetch things from it.
829 class tagscache(object):
832 class tagscache(object):
830 def __init__(self):
833 def __init__(self):
831 # These two define the set of tags for this repository. tags
834 # These two define the set of tags for this repository. tags
832 # maps tag name to node; tagtypes maps tag name to 'global' or
835 # maps tag name to node; tagtypes maps tag name to 'global' or
833 # 'local'. (Global tags are defined by .hgtags across all
836 # 'local'. (Global tags are defined by .hgtags across all
834 # heads, and local tags are defined in .hg/localtags.)
837 # heads, and local tags are defined in .hg/localtags.)
835 # They constitute the in-memory cache of tags.
838 # They constitute the in-memory cache of tags.
836 self.tags = self.tagtypes = None
839 self.tags = self.tagtypes = None
837
840
838 self.nodetagscache = self.tagslist = None
841 self.nodetagscache = self.tagslist = None
839
842
840 cache = tagscache()
843 cache = tagscache()
841 cache.tags, cache.tagtypes = self._findtags()
844 cache.tags, cache.tagtypes = self._findtags()
842
845
843 return cache
846 return cache
844
847
845 def tags(self):
848 def tags(self):
846 '''return a mapping of tag to node'''
849 '''return a mapping of tag to node'''
847 t = {}
850 t = {}
848 if self.changelog.filteredrevs:
851 if self.changelog.filteredrevs:
849 tags, tt = self._findtags()
852 tags, tt = self._findtags()
850 else:
853 else:
851 tags = self._tagscache.tags
854 tags = self._tagscache.tags
852 for k, v in tags.iteritems():
855 for k, v in tags.iteritems():
853 try:
856 try:
854 # ignore tags to unknown nodes
857 # ignore tags to unknown nodes
855 self.changelog.rev(v)
858 self.changelog.rev(v)
856 t[k] = v
859 t[k] = v
857 except (error.LookupError, ValueError):
860 except (error.LookupError, ValueError):
858 pass
861 pass
859 return t
862 return t
860
863
861 def _findtags(self):
864 def _findtags(self):
862 '''Do the hard work of finding tags. Return a pair of dicts
865 '''Do the hard work of finding tags. Return a pair of dicts
863 (tags, tagtypes) where tags maps tag name to node, and tagtypes
866 (tags, tagtypes) where tags maps tag name to node, and tagtypes
864 maps tag name to a string like \'global\' or \'local\'.
867 maps tag name to a string like \'global\' or \'local\'.
865 Subclasses or extensions are free to add their own tags, but
868 Subclasses or extensions are free to add their own tags, but
866 should be aware that the returned dicts will be retained for the
869 should be aware that the returned dicts will be retained for the
867 duration of the localrepo object.'''
870 duration of the localrepo object.'''
868
871
869 # XXX what tagtype should subclasses/extensions use? Currently
872 # XXX what tagtype should subclasses/extensions use? Currently
870 # mq and bookmarks add tags, but do not set the tagtype at all.
873 # mq and bookmarks add tags, but do not set the tagtype at all.
871 # Should each extension invent its own tag type? Should there
874 # Should each extension invent its own tag type? Should there
872 # be one tagtype for all such "virtual" tags? Or is the status
875 # be one tagtype for all such "virtual" tags? Or is the status
873 # quo fine?
876 # quo fine?
874
877
875
878
876 # map tag name to (node, hist)
879 # map tag name to (node, hist)
877 alltags = tagsmod.findglobaltags(self.ui, self)
880 alltags = tagsmod.findglobaltags(self.ui, self)
878 # map tag name to tag type
881 # map tag name to tag type
879 tagtypes = dict((tag, 'global') for tag in alltags)
882 tagtypes = dict((tag, 'global') for tag in alltags)
880
883
881 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
884 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
882
885
883 # Build the return dicts. Have to re-encode tag names because
886 # Build the return dicts. Have to re-encode tag names because
884 # the tags module always uses UTF-8 (in order not to lose info
887 # the tags module always uses UTF-8 (in order not to lose info
885 # writing to the cache), but the rest of Mercurial wants them in
888 # writing to the cache), but the rest of Mercurial wants them in
886 # local encoding.
889 # local encoding.
887 tags = {}
890 tags = {}
888 for (name, (node, hist)) in alltags.iteritems():
891 for (name, (node, hist)) in alltags.iteritems():
889 if node != nullid:
892 if node != nullid:
890 tags[encoding.tolocal(name)] = node
893 tags[encoding.tolocal(name)] = node
891 tags['tip'] = self.changelog.tip()
894 tags['tip'] = self.changelog.tip()
892 tagtypes = dict([(encoding.tolocal(name), value)
895 tagtypes = dict([(encoding.tolocal(name), value)
893 for (name, value) in tagtypes.iteritems()])
896 for (name, value) in tagtypes.iteritems()])
894 return (tags, tagtypes)
897 return (tags, tagtypes)
895
898
896 def tagtype(self, tagname):
899 def tagtype(self, tagname):
897 '''
900 '''
898 return the type of the given tag. result can be:
901 return the type of the given tag. result can be:
899
902
900 'local' : a local tag
903 'local' : a local tag
901 'global' : a global tag
904 'global' : a global tag
902 None : tag does not exist
905 None : tag does not exist
903 '''
906 '''
904
907
905 return self._tagscache.tagtypes.get(tagname)
908 return self._tagscache.tagtypes.get(tagname)
906
909
907 def tagslist(self):
910 def tagslist(self):
908 '''return a list of tags ordered by revision'''
911 '''return a list of tags ordered by revision'''
909 if not self._tagscache.tagslist:
912 if not self._tagscache.tagslist:
910 l = []
913 l = []
911 for t, n in self.tags().iteritems():
914 for t, n in self.tags().iteritems():
912 l.append((self.changelog.rev(n), t, n))
915 l.append((self.changelog.rev(n), t, n))
913 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
916 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
914
917
915 return self._tagscache.tagslist
918 return self._tagscache.tagslist
916
919
917 def nodetags(self, node):
920 def nodetags(self, node):
918 '''return the tags associated with a node'''
921 '''return the tags associated with a node'''
919 if not self._tagscache.nodetagscache:
922 if not self._tagscache.nodetagscache:
920 nodetagscache = {}
923 nodetagscache = {}
921 for t, n in self._tagscache.tags.iteritems():
924 for t, n in self._tagscache.tags.iteritems():
922 nodetagscache.setdefault(n, []).append(t)
925 nodetagscache.setdefault(n, []).append(t)
923 for tags in nodetagscache.itervalues():
926 for tags in nodetagscache.itervalues():
924 tags.sort()
927 tags.sort()
925 self._tagscache.nodetagscache = nodetagscache
928 self._tagscache.nodetagscache = nodetagscache
926 return self._tagscache.nodetagscache.get(node, [])
929 return self._tagscache.nodetagscache.get(node, [])
927
930
928 def nodebookmarks(self, node):
931 def nodebookmarks(self, node):
929 """return the list of bookmarks pointing to the specified node"""
932 """return the list of bookmarks pointing to the specified node"""
930 marks = []
933 marks = []
931 for bookmark, n in self._bookmarks.iteritems():
934 for bookmark, n in self._bookmarks.iteritems():
932 if n == node:
935 if n == node:
933 marks.append(bookmark)
936 marks.append(bookmark)
934 return sorted(marks)
937 return sorted(marks)
935
938
936 def branchmap(self):
939 def branchmap(self):
937 '''returns a dictionary {branch: [branchheads]} with branchheads
940 '''returns a dictionary {branch: [branchheads]} with branchheads
938 ordered by increasing revision number'''
941 ordered by increasing revision number'''
939 branchmap.updatecache(self)
942 branchmap.updatecache(self)
940 return self._branchcaches[self.filtername]
943 return self._branchcaches[self.filtername]
941
944
942 @unfilteredmethod
945 @unfilteredmethod
943 def revbranchcache(self):
946 def revbranchcache(self):
944 if not self._revbranchcache:
947 if not self._revbranchcache:
945 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
948 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
946 return self._revbranchcache
949 return self._revbranchcache
947
950
948 def branchtip(self, branch, ignoremissing=False):
951 def branchtip(self, branch, ignoremissing=False):
949 '''return the tip node for a given branch
952 '''return the tip node for a given branch
950
953
951 If ignoremissing is True, then this method will not raise an error.
954 If ignoremissing is True, then this method will not raise an error.
952 This is helpful for callers that only expect None for a missing branch
955 This is helpful for callers that only expect None for a missing branch
953 (e.g. namespace).
956 (e.g. namespace).
954
957
955 '''
958 '''
956 try:
959 try:
957 return self.branchmap().branchtip(branch)
960 return self.branchmap().branchtip(branch)
958 except KeyError:
961 except KeyError:
959 if not ignoremissing:
962 if not ignoremissing:
960 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
963 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
961 else:
964 else:
962 pass
965 pass
963
966
964 def lookup(self, key):
967 def lookup(self, key):
965 return self[key].node()
968 return self[key].node()
966
969
967 def lookupbranch(self, key, remote=None):
970 def lookupbranch(self, key, remote=None):
968 repo = remote or self
971 repo = remote or self
969 if key in repo.branchmap():
972 if key in repo.branchmap():
970 return key
973 return key
971
974
972 repo = (remote and remote.local()) and remote or self
975 repo = (remote and remote.local()) and remote or self
973 return repo[key].branch()
976 return repo[key].branch()
974
977
975 def known(self, nodes):
978 def known(self, nodes):
976 cl = self.changelog
979 cl = self.changelog
977 nm = cl.nodemap
980 nm = cl.nodemap
978 filtered = cl.filteredrevs
981 filtered = cl.filteredrevs
979 result = []
982 result = []
980 for n in nodes:
983 for n in nodes:
981 r = nm.get(n)
984 r = nm.get(n)
982 resp = not (r is None or r in filtered)
985 resp = not (r is None or r in filtered)
983 result.append(resp)
986 result.append(resp)
984 return result
987 return result
985
988
986 def local(self):
989 def local(self):
987 return self
990 return self
988
991
989 def publishing(self):
992 def publishing(self):
990 # it's safe (and desirable) to trust the publish flag unconditionally
993 # it's safe (and desirable) to trust the publish flag unconditionally
991 # so that we don't finalize changes shared between users via ssh or nfs
994 # so that we don't finalize changes shared between users via ssh or nfs
992 return self.ui.configbool('phases', 'publish', untrusted=True)
995 return self.ui.configbool('phases', 'publish', untrusted=True)
993
996
994 def cancopy(self):
997 def cancopy(self):
995 # so statichttprepo's override of local() works
998 # so statichttprepo's override of local() works
996 if not self.local():
999 if not self.local():
997 return False
1000 return False
998 if not self.publishing():
1001 if not self.publishing():
999 return True
1002 return True
1000 # if publishing we can't copy if there is filtered content
1003 # if publishing we can't copy if there is filtered content
1001 return not self.filtered('visible').changelog.filteredrevs
1004 return not self.filtered('visible').changelog.filteredrevs
1002
1005
1003 def shared(self):
1006 def shared(self):
1004 '''the type of shared repository (None if not shared)'''
1007 '''the type of shared repository (None if not shared)'''
1005 if self.sharedpath != self.path:
1008 if self.sharedpath != self.path:
1006 return 'store'
1009 return 'store'
1007 return None
1010 return None
1008
1011
1009 def wjoin(self, f, *insidef):
1012 def wjoin(self, f, *insidef):
1010 return self.vfs.reljoin(self.root, f, *insidef)
1013 return self.vfs.reljoin(self.root, f, *insidef)
1011
1014
1012 def file(self, f):
1015 def file(self, f):
1013 if f[0] == '/':
1016 if f[0] == '/':
1014 f = f[1:]
1017 f = f[1:]
1015 return filelog.filelog(self.svfs, f)
1018 return filelog.filelog(self.svfs, f)
1016
1019
1017 def changectx(self, changeid):
1020 def changectx(self, changeid):
1018 return self[changeid]
1021 return self[changeid]
1019
1022
1020 def setparents(self, p1, p2=nullid):
1023 def setparents(self, p1, p2=nullid):
1021 with self.dirstate.parentchange():
1024 with self.dirstate.parentchange():
1022 copies = self.dirstate.setparents(p1, p2)
1025 copies = self.dirstate.setparents(p1, p2)
1023 pctx = self[p1]
1026 pctx = self[p1]
1024 if copies:
1027 if copies:
1025 # Adjust copy records, the dirstate cannot do it, it
1028 # Adjust copy records, the dirstate cannot do it, it
1026 # requires access to parents manifests. Preserve them
1029 # requires access to parents manifests. Preserve them
1027 # only for entries added to first parent.
1030 # only for entries added to first parent.
1028 for f in copies:
1031 for f in copies:
1029 if f not in pctx and copies[f] in pctx:
1032 if f not in pctx and copies[f] in pctx:
1030 self.dirstate.copy(copies[f], f)
1033 self.dirstate.copy(copies[f], f)
1031 if p2 == nullid:
1034 if p2 == nullid:
1032 for f, s in sorted(self.dirstate.copies().items()):
1035 for f, s in sorted(self.dirstate.copies().items()):
1033 if f not in pctx and s not in pctx:
1036 if f not in pctx and s not in pctx:
1034 self.dirstate.copy(None, f)
1037 self.dirstate.copy(None, f)
1035
1038
1036 def filectx(self, path, changeid=None, fileid=None):
1039 def filectx(self, path, changeid=None, fileid=None):
1037 """changeid can be a changeset revision, node, or tag.
1040 """changeid can be a changeset revision, node, or tag.
1038 fileid can be a file revision or node."""
1041 fileid can be a file revision or node."""
1039 return context.filectx(self, path, changeid, fileid)
1042 return context.filectx(self, path, changeid, fileid)
1040
1043
1041 def getcwd(self):
1044 def getcwd(self):
1042 return self.dirstate.getcwd()
1045 return self.dirstate.getcwd()
1043
1046
1044 def pathto(self, f, cwd=None):
1047 def pathto(self, f, cwd=None):
1045 return self.dirstate.pathto(f, cwd)
1048 return self.dirstate.pathto(f, cwd)
1046
1049
1047 def _loadfilter(self, filter):
1050 def _loadfilter(self, filter):
1048 if filter not in self.filterpats:
1051 if filter not in self.filterpats:
1049 l = []
1052 l = []
1050 for pat, cmd in self.ui.configitems(filter):
1053 for pat, cmd in self.ui.configitems(filter):
1051 if cmd == '!':
1054 if cmd == '!':
1052 continue
1055 continue
1053 mf = matchmod.match(self.root, '', [pat])
1056 mf = matchmod.match(self.root, '', [pat])
1054 fn = None
1057 fn = None
1055 params = cmd
1058 params = cmd
1056 for name, filterfn in self._datafilters.iteritems():
1059 for name, filterfn in self._datafilters.iteritems():
1057 if cmd.startswith(name):
1060 if cmd.startswith(name):
1058 fn = filterfn
1061 fn = filterfn
1059 params = cmd[len(name):].lstrip()
1062 params = cmd[len(name):].lstrip()
1060 break
1063 break
1061 if not fn:
1064 if not fn:
1062 fn = lambda s, c, **kwargs: util.filter(s, c)
1065 fn = lambda s, c, **kwargs: util.filter(s, c)
1063 # Wrap old filters not supporting keyword arguments
1066 # Wrap old filters not supporting keyword arguments
1064 if not inspect.getargspec(fn)[2]:
1067 if not inspect.getargspec(fn)[2]:
1065 oldfn = fn
1068 oldfn = fn
1066 fn = lambda s, c, **kwargs: oldfn(s, c)
1069 fn = lambda s, c, **kwargs: oldfn(s, c)
1067 l.append((mf, fn, params))
1070 l.append((mf, fn, params))
1068 self.filterpats[filter] = l
1071 self.filterpats[filter] = l
1069 return self.filterpats[filter]
1072 return self.filterpats[filter]
1070
1073
1071 def _filter(self, filterpats, filename, data):
1074 def _filter(self, filterpats, filename, data):
1072 for mf, fn, cmd in filterpats:
1075 for mf, fn, cmd in filterpats:
1073 if mf(filename):
1076 if mf(filename):
1074 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1077 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1075 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1078 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1076 break
1079 break
1077
1080
1078 return data
1081 return data
1079
1082
1080 @unfilteredpropertycache
1083 @unfilteredpropertycache
1081 def _encodefilterpats(self):
1084 def _encodefilterpats(self):
1082 return self._loadfilter('encode')
1085 return self._loadfilter('encode')
1083
1086
1084 @unfilteredpropertycache
1087 @unfilteredpropertycache
1085 def _decodefilterpats(self):
1088 def _decodefilterpats(self):
1086 return self._loadfilter('decode')
1089 return self._loadfilter('decode')
1087
1090
1088 def adddatafilter(self, name, filter):
1091 def adddatafilter(self, name, filter):
1089 self._datafilters[name] = filter
1092 self._datafilters[name] = filter
1090
1093
1091 def wread(self, filename):
1094 def wread(self, filename):
1092 if self.wvfs.islink(filename):
1095 if self.wvfs.islink(filename):
1093 data = self.wvfs.readlink(filename)
1096 data = self.wvfs.readlink(filename)
1094 else:
1097 else:
1095 data = self.wvfs.read(filename)
1098 data = self.wvfs.read(filename)
1096 return self._filter(self._encodefilterpats, filename, data)
1099 return self._filter(self._encodefilterpats, filename, data)
1097
1100
1098 def wwrite(self, filename, data, flags, backgroundclose=False):
1101 def wwrite(self, filename, data, flags, backgroundclose=False):
1099 """write ``data`` into ``filename`` in the working directory
1102 """write ``data`` into ``filename`` in the working directory
1100
1103
1101 This returns length of written (maybe decoded) data.
1104 This returns length of written (maybe decoded) data.
1102 """
1105 """
1103 data = self._filter(self._decodefilterpats, filename, data)
1106 data = self._filter(self._decodefilterpats, filename, data)
1104 if 'l' in flags:
1107 if 'l' in flags:
1105 self.wvfs.symlink(data, filename)
1108 self.wvfs.symlink(data, filename)
1106 else:
1109 else:
1107 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1110 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1108 if 'x' in flags:
1111 if 'x' in flags:
1109 self.wvfs.setflags(filename, False, True)
1112 self.wvfs.setflags(filename, False, True)
1110 return len(data)
1113 return len(data)
1111
1114
1112 def wwritedata(self, filename, data):
1115 def wwritedata(self, filename, data):
1113 return self._filter(self._decodefilterpats, filename, data)
1116 return self._filter(self._decodefilterpats, filename, data)
1114
1117
1115 def currenttransaction(self):
1118 def currenttransaction(self):
1116 """return the current transaction or None if non exists"""
1119 """return the current transaction or None if non exists"""
1117 if self._transref:
1120 if self._transref:
1118 tr = self._transref()
1121 tr = self._transref()
1119 else:
1122 else:
1120 tr = None
1123 tr = None
1121
1124
1122 if tr and tr.running():
1125 if tr and tr.running():
1123 return tr
1126 return tr
1124 return None
1127 return None
1125
1128
1126 def transaction(self, desc, report=None):
1129 def transaction(self, desc, report=None):
1127 if (self.ui.configbool('devel', 'all-warnings')
1130 if (self.ui.configbool('devel', 'all-warnings')
1128 or self.ui.configbool('devel', 'check-locks')):
1131 or self.ui.configbool('devel', 'check-locks')):
1129 if self._currentlock(self._lockref) is None:
1132 if self._currentlock(self._lockref) is None:
1130 raise error.ProgrammingError('transaction requires locking')
1133 raise error.ProgrammingError('transaction requires locking')
1131 tr = self.currenttransaction()
1134 tr = self.currenttransaction()
1132 if tr is not None:
1135 if tr is not None:
1133 scmutil.registersummarycallback(self, tr, desc)
1136 scmutil.registersummarycallback(self, tr, desc)
1134 return tr.nest()
1137 return tr.nest()
1135
1138
1136 # abort here if the journal already exists
1139 # abort here if the journal already exists
1137 if self.svfs.exists("journal"):
1140 if self.svfs.exists("journal"):
1138 raise error.RepoError(
1141 raise error.RepoError(
1139 _("abandoned transaction found"),
1142 _("abandoned transaction found"),
1140 hint=_("run 'hg recover' to clean up transaction"))
1143 hint=_("run 'hg recover' to clean up transaction"))
1141
1144
1142 idbase = "%.40f#%f" % (random.random(), time.time())
1145 idbase = "%.40f#%f" % (random.random(), time.time())
1143 ha = hex(hashlib.sha1(idbase).digest())
1146 ha = hex(hashlib.sha1(idbase).digest())
1144 txnid = 'TXN:' + ha
1147 txnid = 'TXN:' + ha
1145 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1148 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1146
1149
1147 self._writejournal(desc)
1150 self._writejournal(desc)
1148 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1151 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1149 if report:
1152 if report:
1150 rp = report
1153 rp = report
1151 else:
1154 else:
1152 rp = self.ui.warn
1155 rp = self.ui.warn
1153 vfsmap = {'plain': self.vfs} # root of .hg/
1156 vfsmap = {'plain': self.vfs} # root of .hg/
1154 # we must avoid cyclic reference between repo and transaction.
1157 # we must avoid cyclic reference between repo and transaction.
1155 reporef = weakref.ref(self)
1158 reporef = weakref.ref(self)
1156 # Code to track tag movement
1159 # Code to track tag movement
1157 #
1160 #
1158 # Since tags are all handled as file content, it is actually quite hard
1161 # Since tags are all handled as file content, it is actually quite hard
1159 # to track these movement from a code perspective. So we fallback to a
1162 # to track these movement from a code perspective. So we fallback to a
1160 # tracking at the repository level. One could envision to track changes
1163 # tracking at the repository level. One could envision to track changes
1161 # to the '.hgtags' file through changegroup apply but that fails to
1164 # to the '.hgtags' file through changegroup apply but that fails to
1162 # cope with case where transaction expose new heads without changegroup
1165 # cope with case where transaction expose new heads without changegroup
1163 # being involved (eg: phase movement).
1166 # being involved (eg: phase movement).
1164 #
1167 #
1165 # For now, We gate the feature behind a flag since this likely comes
1168 # For now, We gate the feature behind a flag since this likely comes
1166 # with performance impacts. The current code run more often than needed
1169 # with performance impacts. The current code run more often than needed
1167 # and do not use caches as much as it could. The current focus is on
1170 # and do not use caches as much as it could. The current focus is on
1168 # the behavior of the feature so we disable it by default. The flag
1171 # the behavior of the feature so we disable it by default. The flag
1169 # will be removed when we are happy with the performance impact.
1172 # will be removed when we are happy with the performance impact.
1170 #
1173 #
1171 # Once this feature is no longer experimental move the following
1174 # Once this feature is no longer experimental move the following
1172 # documentation to the appropriate help section:
1175 # documentation to the appropriate help section:
1173 #
1176 #
1174 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1177 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1175 # tags (new or changed or deleted tags). In addition the details of
1178 # tags (new or changed or deleted tags). In addition the details of
1176 # these changes are made available in a file at:
1179 # these changes are made available in a file at:
1177 # ``REPOROOT/.hg/changes/tags.changes``.
1180 # ``REPOROOT/.hg/changes/tags.changes``.
1178 # Make sure you check for HG_TAG_MOVED before reading that file as it
1181 # Make sure you check for HG_TAG_MOVED before reading that file as it
1179 # might exist from a previous transaction even if no tag were touched
1182 # might exist from a previous transaction even if no tag were touched
1180 # in this one. Changes are recorded in a line base format::
1183 # in this one. Changes are recorded in a line base format::
1181 #
1184 #
1182 # <action> <hex-node> <tag-name>\n
1185 # <action> <hex-node> <tag-name>\n
1183 #
1186 #
1184 # Actions are defined as follow:
1187 # Actions are defined as follow:
1185 # "-R": tag is removed,
1188 # "-R": tag is removed,
1186 # "+A": tag is added,
1189 # "+A": tag is added,
1187 # "-M": tag is moved (old value),
1190 # "-M": tag is moved (old value),
1188 # "+M": tag is moved (new value),
1191 # "+M": tag is moved (new value),
1189 tracktags = lambda x: None
1192 tracktags = lambda x: None
1190 # experimental config: experimental.hook-track-tags
1193 # experimental config: experimental.hook-track-tags
1191 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1194 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1192 if desc != 'strip' and shouldtracktags:
1195 if desc != 'strip' and shouldtracktags:
1193 oldheads = self.changelog.headrevs()
1196 oldheads = self.changelog.headrevs()
1194 def tracktags(tr2):
1197 def tracktags(tr2):
1195 repo = reporef()
1198 repo = reporef()
1196 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1199 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1197 newheads = repo.changelog.headrevs()
1200 newheads = repo.changelog.headrevs()
1198 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1201 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1199 # notes: we compare lists here.
1202 # notes: we compare lists here.
1200 # As we do it only once buiding set would not be cheaper
1203 # As we do it only once buiding set would not be cheaper
1201 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1204 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1202 if changes:
1205 if changes:
1203 tr2.hookargs['tag_moved'] = '1'
1206 tr2.hookargs['tag_moved'] = '1'
1204 with repo.vfs('changes/tags.changes', 'w',
1207 with repo.vfs('changes/tags.changes', 'w',
1205 atomictemp=True) as changesfile:
1208 atomictemp=True) as changesfile:
1206 # note: we do not register the file to the transaction
1209 # note: we do not register the file to the transaction
1207 # because we needs it to still exist on the transaction
1210 # because we needs it to still exist on the transaction
1208 # is close (for txnclose hooks)
1211 # is close (for txnclose hooks)
1209 tagsmod.writediff(changesfile, changes)
1212 tagsmod.writediff(changesfile, changes)
1210 def validate(tr2):
1213 def validate(tr2):
1211 """will run pre-closing hooks"""
1214 """will run pre-closing hooks"""
1212 # XXX the transaction API is a bit lacking here so we take a hacky
1215 # XXX the transaction API is a bit lacking here so we take a hacky
1213 # path for now
1216 # path for now
1214 #
1217 #
1215 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1218 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1216 # dict is copied before these run. In addition we needs the data
1219 # dict is copied before these run. In addition we needs the data
1217 # available to in memory hooks too.
1220 # available to in memory hooks too.
1218 #
1221 #
1219 # Moreover, we also need to make sure this runs before txnclose
1222 # Moreover, we also need to make sure this runs before txnclose
1220 # hooks and there is no "pending" mechanism that would execute
1223 # hooks and there is no "pending" mechanism that would execute
1221 # logic only if hooks are about to run.
1224 # logic only if hooks are about to run.
1222 #
1225 #
1223 # Fixing this limitation of the transaction is also needed to track
1226 # Fixing this limitation of the transaction is also needed to track
1224 # other families of changes (bookmarks, phases, obsolescence).
1227 # other families of changes (bookmarks, phases, obsolescence).
1225 #
1228 #
1226 # This will have to be fixed before we remove the experimental
1229 # This will have to be fixed before we remove the experimental
1227 # gating.
1230 # gating.
1228 tracktags(tr2)
1231 tracktags(tr2)
1229 reporef().hook('pretxnclose', throw=True,
1232 reporef().hook('pretxnclose', throw=True,
1230 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1233 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1231 def releasefn(tr, success):
1234 def releasefn(tr, success):
1232 repo = reporef()
1235 repo = reporef()
1233 if success:
1236 if success:
1234 # this should be explicitly invoked here, because
1237 # this should be explicitly invoked here, because
1235 # in-memory changes aren't written out at closing
1238 # in-memory changes aren't written out at closing
1236 # transaction, if tr.addfilegenerator (via
1239 # transaction, if tr.addfilegenerator (via
1237 # dirstate.write or so) isn't invoked while
1240 # dirstate.write or so) isn't invoked while
1238 # transaction running
1241 # transaction running
1239 repo.dirstate.write(None)
1242 repo.dirstate.write(None)
1240 else:
1243 else:
1241 # discard all changes (including ones already written
1244 # discard all changes (including ones already written
1242 # out) in this transaction
1245 # out) in this transaction
1243 repo.dirstate.restorebackup(None, 'journal.dirstate')
1246 repo.dirstate.restorebackup(None, 'journal.dirstate')
1244
1247
1245 repo.invalidate(clearfilecache=True)
1248 repo.invalidate(clearfilecache=True)
1246
1249
1247 tr = transaction.transaction(rp, self.svfs, vfsmap,
1250 tr = transaction.transaction(rp, self.svfs, vfsmap,
1248 "journal",
1251 "journal",
1249 "undo",
1252 "undo",
1250 aftertrans(renames),
1253 aftertrans(renames),
1251 self.store.createmode,
1254 self.store.createmode,
1252 validator=validate,
1255 validator=validate,
1253 releasefn=releasefn,
1256 releasefn=releasefn,
1254 checkambigfiles=_cachedfiles)
1257 checkambigfiles=_cachedfiles)
1255 tr.changes['revs'] = set()
1258 tr.changes['revs'] = set()
1256 tr.changes['obsmarkers'] = set()
1259 tr.changes['obsmarkers'] = set()
1257 tr.changes['phases'] = {}
1260 tr.changes['phases'] = {}
1258 tr.changes['bookmarks'] = {}
1261 tr.changes['bookmarks'] = {}
1259
1262
1260 tr.hookargs['txnid'] = txnid
1263 tr.hookargs['txnid'] = txnid
1261 # note: writing the fncache only during finalize mean that the file is
1264 # note: writing the fncache only during finalize mean that the file is
1262 # outdated when running hooks. As fncache is used for streaming clone,
1265 # outdated when running hooks. As fncache is used for streaming clone,
1263 # this is not expected to break anything that happen during the hooks.
1266 # this is not expected to break anything that happen during the hooks.
1264 tr.addfinalize('flush-fncache', self.store.write)
1267 tr.addfinalize('flush-fncache', self.store.write)
1265 def txnclosehook(tr2):
1268 def txnclosehook(tr2):
1266 """To be run if transaction is successful, will schedule a hook run
1269 """To be run if transaction is successful, will schedule a hook run
1267 """
1270 """
1268 # Don't reference tr2 in hook() so we don't hold a reference.
1271 # Don't reference tr2 in hook() so we don't hold a reference.
1269 # This reduces memory consumption when there are multiple
1272 # This reduces memory consumption when there are multiple
1270 # transactions per lock. This can likely go away if issue5045
1273 # transactions per lock. This can likely go away if issue5045
1271 # fixes the function accumulation.
1274 # fixes the function accumulation.
1272 hookargs = tr2.hookargs
1275 hookargs = tr2.hookargs
1273
1276
1274 def hook():
1277 def hook():
1275 reporef().hook('txnclose', throw=False, txnname=desc,
1278 reporef().hook('txnclose', throw=False, txnname=desc,
1276 **pycompat.strkwargs(hookargs))
1279 **pycompat.strkwargs(hookargs))
1277 reporef()._afterlock(hook)
1280 reporef()._afterlock(hook)
1278 tr.addfinalize('txnclose-hook', txnclosehook)
1281 tr.addfinalize('txnclose-hook', txnclosehook)
1279 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1282 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1280 def txnaborthook(tr2):
1283 def txnaborthook(tr2):
1281 """To be run if transaction is aborted
1284 """To be run if transaction is aborted
1282 """
1285 """
1283 reporef().hook('txnabort', throw=False, txnname=desc,
1286 reporef().hook('txnabort', throw=False, txnname=desc,
1284 **tr2.hookargs)
1287 **tr2.hookargs)
1285 tr.addabort('txnabort-hook', txnaborthook)
1288 tr.addabort('txnabort-hook', txnaborthook)
1286 # avoid eager cache invalidation. in-memory data should be identical
1289 # avoid eager cache invalidation. in-memory data should be identical
1287 # to stored data if transaction has no error.
1290 # to stored data if transaction has no error.
1288 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1291 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1289 self._transref = weakref.ref(tr)
1292 self._transref = weakref.ref(tr)
1290 scmutil.registersummarycallback(self, tr, desc)
1293 scmutil.registersummarycallback(self, tr, desc)
1291 return tr
1294 return tr
1292
1295
1293 def _journalfiles(self):
1296 def _journalfiles(self):
1294 return ((self.svfs, 'journal'),
1297 return ((self.svfs, 'journal'),
1295 (self.vfs, 'journal.dirstate'),
1298 (self.vfs, 'journal.dirstate'),
1296 (self.vfs, 'journal.branch'),
1299 (self.vfs, 'journal.branch'),
1297 (self.vfs, 'journal.desc'),
1300 (self.vfs, 'journal.desc'),
1298 (self.vfs, 'journal.bookmarks'),
1301 (self.vfs, 'journal.bookmarks'),
1299 (self.svfs, 'journal.phaseroots'))
1302 (self.svfs, 'journal.phaseroots'))
1300
1303
1301 def undofiles(self):
1304 def undofiles(self):
1302 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1305 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1303
1306
1304 @unfilteredmethod
1307 @unfilteredmethod
1305 def _writejournal(self, desc):
1308 def _writejournal(self, desc):
1306 self.dirstate.savebackup(None, 'journal.dirstate')
1309 self.dirstate.savebackup(None, 'journal.dirstate')
1307 self.vfs.write("journal.branch",
1310 self.vfs.write("journal.branch",
1308 encoding.fromlocal(self.dirstate.branch()))
1311 encoding.fromlocal(self.dirstate.branch()))
1309 self.vfs.write("journal.desc",
1312 self.vfs.write("journal.desc",
1310 "%d\n%s\n" % (len(self), desc))
1313 "%d\n%s\n" % (len(self), desc))
1311 self.vfs.write("journal.bookmarks",
1314 self.vfs.write("journal.bookmarks",
1312 self.vfs.tryread("bookmarks"))
1315 self.vfs.tryread("bookmarks"))
1313 self.svfs.write("journal.phaseroots",
1316 self.svfs.write("journal.phaseroots",
1314 self.svfs.tryread("phaseroots"))
1317 self.svfs.tryread("phaseroots"))
1315
1318
1316 def recover(self):
1319 def recover(self):
1317 with self.lock():
1320 with self.lock():
1318 if self.svfs.exists("journal"):
1321 if self.svfs.exists("journal"):
1319 self.ui.status(_("rolling back interrupted transaction\n"))
1322 self.ui.status(_("rolling back interrupted transaction\n"))
1320 vfsmap = {'': self.svfs,
1323 vfsmap = {'': self.svfs,
1321 'plain': self.vfs,}
1324 'plain': self.vfs,}
1322 transaction.rollback(self.svfs, vfsmap, "journal",
1325 transaction.rollback(self.svfs, vfsmap, "journal",
1323 self.ui.warn,
1326 self.ui.warn,
1324 checkambigfiles=_cachedfiles)
1327 checkambigfiles=_cachedfiles)
1325 self.invalidate()
1328 self.invalidate()
1326 return True
1329 return True
1327 else:
1330 else:
1328 self.ui.warn(_("no interrupted transaction available\n"))
1331 self.ui.warn(_("no interrupted transaction available\n"))
1329 return False
1332 return False
1330
1333
1331 def rollback(self, dryrun=False, force=False):
1334 def rollback(self, dryrun=False, force=False):
1332 wlock = lock = dsguard = None
1335 wlock = lock = dsguard = None
1333 try:
1336 try:
1334 wlock = self.wlock()
1337 wlock = self.wlock()
1335 lock = self.lock()
1338 lock = self.lock()
1336 if self.svfs.exists("undo"):
1339 if self.svfs.exists("undo"):
1337 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1340 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1338
1341
1339 return self._rollback(dryrun, force, dsguard)
1342 return self._rollback(dryrun, force, dsguard)
1340 else:
1343 else:
1341 self.ui.warn(_("no rollback information available\n"))
1344 self.ui.warn(_("no rollback information available\n"))
1342 return 1
1345 return 1
1343 finally:
1346 finally:
1344 release(dsguard, lock, wlock)
1347 release(dsguard, lock, wlock)
1345
1348
1346 @unfilteredmethod # Until we get smarter cache management
1349 @unfilteredmethod # Until we get smarter cache management
1347 def _rollback(self, dryrun, force, dsguard):
1350 def _rollback(self, dryrun, force, dsguard):
1348 ui = self.ui
1351 ui = self.ui
1349 try:
1352 try:
1350 args = self.vfs.read('undo.desc').splitlines()
1353 args = self.vfs.read('undo.desc').splitlines()
1351 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1354 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1352 if len(args) >= 3:
1355 if len(args) >= 3:
1353 detail = args[2]
1356 detail = args[2]
1354 oldtip = oldlen - 1
1357 oldtip = oldlen - 1
1355
1358
1356 if detail and ui.verbose:
1359 if detail and ui.verbose:
1357 msg = (_('repository tip rolled back to revision %d'
1360 msg = (_('repository tip rolled back to revision %d'
1358 ' (undo %s: %s)\n')
1361 ' (undo %s: %s)\n')
1359 % (oldtip, desc, detail))
1362 % (oldtip, desc, detail))
1360 else:
1363 else:
1361 msg = (_('repository tip rolled back to revision %d'
1364 msg = (_('repository tip rolled back to revision %d'
1362 ' (undo %s)\n')
1365 ' (undo %s)\n')
1363 % (oldtip, desc))
1366 % (oldtip, desc))
1364 except IOError:
1367 except IOError:
1365 msg = _('rolling back unknown transaction\n')
1368 msg = _('rolling back unknown transaction\n')
1366 desc = None
1369 desc = None
1367
1370
1368 if not force and self['.'] != self['tip'] and desc == 'commit':
1371 if not force and self['.'] != self['tip'] and desc == 'commit':
1369 raise error.Abort(
1372 raise error.Abort(
1370 _('rollback of last commit while not checked out '
1373 _('rollback of last commit while not checked out '
1371 'may lose data'), hint=_('use -f to force'))
1374 'may lose data'), hint=_('use -f to force'))
1372
1375
1373 ui.status(msg)
1376 ui.status(msg)
1374 if dryrun:
1377 if dryrun:
1375 return 0
1378 return 0
1376
1379
1377 parents = self.dirstate.parents()
1380 parents = self.dirstate.parents()
1378 self.destroying()
1381 self.destroying()
1379 vfsmap = {'plain': self.vfs, '': self.svfs}
1382 vfsmap = {'plain': self.vfs, '': self.svfs}
1380 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1383 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1381 checkambigfiles=_cachedfiles)
1384 checkambigfiles=_cachedfiles)
1382 if self.vfs.exists('undo.bookmarks'):
1385 if self.vfs.exists('undo.bookmarks'):
1383 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1386 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1384 if self.svfs.exists('undo.phaseroots'):
1387 if self.svfs.exists('undo.phaseroots'):
1385 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1388 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1386 self.invalidate()
1389 self.invalidate()
1387
1390
1388 parentgone = (parents[0] not in self.changelog.nodemap or
1391 parentgone = (parents[0] not in self.changelog.nodemap or
1389 parents[1] not in self.changelog.nodemap)
1392 parents[1] not in self.changelog.nodemap)
1390 if parentgone:
1393 if parentgone:
1391 # prevent dirstateguard from overwriting already restored one
1394 # prevent dirstateguard from overwriting already restored one
1392 dsguard.close()
1395 dsguard.close()
1393
1396
1394 self.dirstate.restorebackup(None, 'undo.dirstate')
1397 self.dirstate.restorebackup(None, 'undo.dirstate')
1395 try:
1398 try:
1396 branch = self.vfs.read('undo.branch')
1399 branch = self.vfs.read('undo.branch')
1397 self.dirstate.setbranch(encoding.tolocal(branch))
1400 self.dirstate.setbranch(encoding.tolocal(branch))
1398 except IOError:
1401 except IOError:
1399 ui.warn(_('named branch could not be reset: '
1402 ui.warn(_('named branch could not be reset: '
1400 'current branch is still \'%s\'\n')
1403 'current branch is still \'%s\'\n')
1401 % self.dirstate.branch())
1404 % self.dirstate.branch())
1402
1405
1403 parents = tuple([p.rev() for p in self[None].parents()])
1406 parents = tuple([p.rev() for p in self[None].parents()])
1404 if len(parents) > 1:
1407 if len(parents) > 1:
1405 ui.status(_('working directory now based on '
1408 ui.status(_('working directory now based on '
1406 'revisions %d and %d\n') % parents)
1409 'revisions %d and %d\n') % parents)
1407 else:
1410 else:
1408 ui.status(_('working directory now based on '
1411 ui.status(_('working directory now based on '
1409 'revision %d\n') % parents)
1412 'revision %d\n') % parents)
1410 mergemod.mergestate.clean(self, self['.'].node())
1413 mergemod.mergestate.clean(self, self['.'].node())
1411
1414
1412 # TODO: if we know which new heads may result from this rollback, pass
1415 # TODO: if we know which new heads may result from this rollback, pass
1413 # them to destroy(), which will prevent the branchhead cache from being
1416 # them to destroy(), which will prevent the branchhead cache from being
1414 # invalidated.
1417 # invalidated.
1415 self.destroyed()
1418 self.destroyed()
1416 return 0
1419 return 0
1417
1420
1418 def _buildcacheupdater(self, newtransaction):
1421 def _buildcacheupdater(self, newtransaction):
1419 """called during transaction to build the callback updating cache
1422 """called during transaction to build the callback updating cache
1420
1423
1421 Lives on the repository to help extension who might want to augment
1424 Lives on the repository to help extension who might want to augment
1422 this logic. For this purpose, the created transaction is passed to the
1425 this logic. For this purpose, the created transaction is passed to the
1423 method.
1426 method.
1424 """
1427 """
1425 # we must avoid cyclic reference between repo and transaction.
1428 # we must avoid cyclic reference between repo and transaction.
1426 reporef = weakref.ref(self)
1429 reporef = weakref.ref(self)
1427 def updater(tr):
1430 def updater(tr):
1428 repo = reporef()
1431 repo = reporef()
1429 repo.updatecaches(tr)
1432 repo.updatecaches(tr)
1430 return updater
1433 return updater
1431
1434
1432 @unfilteredmethod
1435 @unfilteredmethod
1433 def updatecaches(self, tr=None):
1436 def updatecaches(self, tr=None):
1434 """warm appropriate caches
1437 """warm appropriate caches
1435
1438
1436 If this function is called after a transaction closed. The transaction
1439 If this function is called after a transaction closed. The transaction
1437 will be available in the 'tr' argument. This can be used to selectively
1440 will be available in the 'tr' argument. This can be used to selectively
1438 update caches relevant to the changes in that transaction.
1441 update caches relevant to the changes in that transaction.
1439 """
1442 """
1440 if tr is not None and tr.hookargs.get('source') == 'strip':
1443 if tr is not None and tr.hookargs.get('source') == 'strip':
1441 # During strip, many caches are invalid but
1444 # During strip, many caches are invalid but
1442 # later call to `destroyed` will refresh them.
1445 # later call to `destroyed` will refresh them.
1443 return
1446 return
1444
1447
1445 if tr is None or tr.changes['revs']:
1448 if tr is None or tr.changes['revs']:
1446 # updating the unfiltered branchmap should refresh all the others,
1449 # updating the unfiltered branchmap should refresh all the others,
1447 self.ui.debug('updating the branch cache\n')
1450 self.ui.debug('updating the branch cache\n')
1448 branchmap.updatecache(self.filtered('served'))
1451 branchmap.updatecache(self.filtered('served'))
1449
1452
1450 def invalidatecaches(self):
1453 def invalidatecaches(self):
1451
1454
1452 if '_tagscache' in vars(self):
1455 if '_tagscache' in vars(self):
1453 # can't use delattr on proxy
1456 # can't use delattr on proxy
1454 del self.__dict__['_tagscache']
1457 del self.__dict__['_tagscache']
1455
1458
1456 self.unfiltered()._branchcaches.clear()
1459 self.unfiltered()._branchcaches.clear()
1457 self.invalidatevolatilesets()
1460 self.invalidatevolatilesets()
1458 self._sparsesignaturecache.clear()
1461 self._sparsesignaturecache.clear()
1459
1462
1460 def invalidatevolatilesets(self):
1463 def invalidatevolatilesets(self):
1461 self.filteredrevcache.clear()
1464 self.filteredrevcache.clear()
1462 obsolete.clearobscaches(self)
1465 obsolete.clearobscaches(self)
1463
1466
1464 def invalidatedirstate(self):
1467 def invalidatedirstate(self):
1465 '''Invalidates the dirstate, causing the next call to dirstate
1468 '''Invalidates the dirstate, causing the next call to dirstate
1466 to check if it was modified since the last time it was read,
1469 to check if it was modified since the last time it was read,
1467 rereading it if it has.
1470 rereading it if it has.
1468
1471
1469 This is different to dirstate.invalidate() that it doesn't always
1472 This is different to dirstate.invalidate() that it doesn't always
1470 rereads the dirstate. Use dirstate.invalidate() if you want to
1473 rereads the dirstate. Use dirstate.invalidate() if you want to
1471 explicitly read the dirstate again (i.e. restoring it to a previous
1474 explicitly read the dirstate again (i.e. restoring it to a previous
1472 known good state).'''
1475 known good state).'''
1473 if hasunfilteredcache(self, 'dirstate'):
1476 if hasunfilteredcache(self, 'dirstate'):
1474 for k in self.dirstate._filecache:
1477 for k in self.dirstate._filecache:
1475 try:
1478 try:
1476 delattr(self.dirstate, k)
1479 delattr(self.dirstate, k)
1477 except AttributeError:
1480 except AttributeError:
1478 pass
1481 pass
1479 delattr(self.unfiltered(), 'dirstate')
1482 delattr(self.unfiltered(), 'dirstate')
1480
1483
1481 def invalidate(self, clearfilecache=False):
1484 def invalidate(self, clearfilecache=False):
1482 '''Invalidates both store and non-store parts other than dirstate
1485 '''Invalidates both store and non-store parts other than dirstate
1483
1486
1484 If a transaction is running, invalidation of store is omitted,
1487 If a transaction is running, invalidation of store is omitted,
1485 because discarding in-memory changes might cause inconsistency
1488 because discarding in-memory changes might cause inconsistency
1486 (e.g. incomplete fncache causes unintentional failure, but
1489 (e.g. incomplete fncache causes unintentional failure, but
1487 redundant one doesn't).
1490 redundant one doesn't).
1488 '''
1491 '''
1489 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1492 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1490 for k in list(self._filecache.keys()):
1493 for k in list(self._filecache.keys()):
1491 # dirstate is invalidated separately in invalidatedirstate()
1494 # dirstate is invalidated separately in invalidatedirstate()
1492 if k == 'dirstate':
1495 if k == 'dirstate':
1493 continue
1496 continue
1494 if (k == 'changelog' and
1497 if (k == 'changelog' and
1495 self.currenttransaction() and
1498 self.currenttransaction() and
1496 self.changelog._delayed):
1499 self.changelog._delayed):
1497 # The changelog object may store unwritten revisions. We don't
1500 # The changelog object may store unwritten revisions. We don't
1498 # want to lose them.
1501 # want to lose them.
1499 # TODO: Solve the problem instead of working around it.
1502 # TODO: Solve the problem instead of working around it.
1500 continue
1503 continue
1501
1504
1502 if clearfilecache:
1505 if clearfilecache:
1503 del self._filecache[k]
1506 del self._filecache[k]
1504 try:
1507 try:
1505 delattr(unfiltered, k)
1508 delattr(unfiltered, k)
1506 except AttributeError:
1509 except AttributeError:
1507 pass
1510 pass
1508 self.invalidatecaches()
1511 self.invalidatecaches()
1509 if not self.currenttransaction():
1512 if not self.currenttransaction():
1510 # TODO: Changing contents of store outside transaction
1513 # TODO: Changing contents of store outside transaction
1511 # causes inconsistency. We should make in-memory store
1514 # causes inconsistency. We should make in-memory store
1512 # changes detectable, and abort if changed.
1515 # changes detectable, and abort if changed.
1513 self.store.invalidatecaches()
1516 self.store.invalidatecaches()
1514
1517
1515 def invalidateall(self):
1518 def invalidateall(self):
1516 '''Fully invalidates both store and non-store parts, causing the
1519 '''Fully invalidates both store and non-store parts, causing the
1517 subsequent operation to reread any outside changes.'''
1520 subsequent operation to reread any outside changes.'''
1518 # extension should hook this to invalidate its caches
1521 # extension should hook this to invalidate its caches
1519 self.invalidate()
1522 self.invalidate()
1520 self.invalidatedirstate()
1523 self.invalidatedirstate()
1521
1524
1522 @unfilteredmethod
1525 @unfilteredmethod
1523 def _refreshfilecachestats(self, tr):
1526 def _refreshfilecachestats(self, tr):
1524 """Reload stats of cached files so that they are flagged as valid"""
1527 """Reload stats of cached files so that they are flagged as valid"""
1525 for k, ce in self._filecache.items():
1528 for k, ce in self._filecache.items():
1526 if k == 'dirstate' or k not in self.__dict__:
1529 if k == 'dirstate' or k not in self.__dict__:
1527 continue
1530 continue
1528 ce.refresh()
1531 ce.refresh()
1529
1532
1530 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1533 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1531 inheritchecker=None, parentenvvar=None):
1534 inheritchecker=None, parentenvvar=None):
1532 parentlock = None
1535 parentlock = None
1533 # the contents of parentenvvar are used by the underlying lock to
1536 # the contents of parentenvvar are used by the underlying lock to
1534 # determine whether it can be inherited
1537 # determine whether it can be inherited
1535 if parentenvvar is not None:
1538 if parentenvvar is not None:
1536 parentlock = encoding.environ.get(parentenvvar)
1539 parentlock = encoding.environ.get(parentenvvar)
1537 try:
1540 try:
1538 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1541 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1539 acquirefn=acquirefn, desc=desc,
1542 acquirefn=acquirefn, desc=desc,
1540 inheritchecker=inheritchecker,
1543 inheritchecker=inheritchecker,
1541 parentlock=parentlock)
1544 parentlock=parentlock)
1542 except error.LockHeld as inst:
1545 except error.LockHeld as inst:
1543 if not wait:
1546 if not wait:
1544 raise
1547 raise
1545 # show more details for new-style locks
1548 # show more details for new-style locks
1546 if ':' in inst.locker:
1549 if ':' in inst.locker:
1547 host, pid = inst.locker.split(":", 1)
1550 host, pid = inst.locker.split(":", 1)
1548 self.ui.warn(
1551 self.ui.warn(
1549 _("waiting for lock on %s held by process %r "
1552 _("waiting for lock on %s held by process %r "
1550 "on host %r\n") % (desc, pid, host))
1553 "on host %r\n") % (desc, pid, host))
1551 else:
1554 else:
1552 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1555 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1553 (desc, inst.locker))
1556 (desc, inst.locker))
1554 # default to 600 seconds timeout
1557 # default to 600 seconds timeout
1555 l = lockmod.lock(vfs, lockname,
1558 l = lockmod.lock(vfs, lockname,
1556 int(self.ui.config("ui", "timeout")),
1559 int(self.ui.config("ui", "timeout")),
1557 releasefn=releasefn, acquirefn=acquirefn,
1560 releasefn=releasefn, acquirefn=acquirefn,
1558 desc=desc)
1561 desc=desc)
1559 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1562 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1560 return l
1563 return l
1561
1564
1562 def _afterlock(self, callback):
1565 def _afterlock(self, callback):
1563 """add a callback to be run when the repository is fully unlocked
1566 """add a callback to be run when the repository is fully unlocked
1564
1567
1565 The callback will be executed when the outermost lock is released
1568 The callback will be executed when the outermost lock is released
1566 (with wlock being higher level than 'lock')."""
1569 (with wlock being higher level than 'lock')."""
1567 for ref in (self._wlockref, self._lockref):
1570 for ref in (self._wlockref, self._lockref):
1568 l = ref and ref()
1571 l = ref and ref()
1569 if l and l.held:
1572 if l and l.held:
1570 l.postrelease.append(callback)
1573 l.postrelease.append(callback)
1571 break
1574 break
1572 else: # no lock have been found.
1575 else: # no lock have been found.
1573 callback()
1576 callback()
1574
1577
1575 def lock(self, wait=True):
1578 def lock(self, wait=True):
1576 '''Lock the repository store (.hg/store) and return a weak reference
1579 '''Lock the repository store (.hg/store) and return a weak reference
1577 to the lock. Use this before modifying the store (e.g. committing or
1580 to the lock. Use this before modifying the store (e.g. committing or
1578 stripping). If you are opening a transaction, get a lock as well.)
1581 stripping). If you are opening a transaction, get a lock as well.)
1579
1582
1580 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1583 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1581 'wlock' first to avoid a dead-lock hazard.'''
1584 'wlock' first to avoid a dead-lock hazard.'''
1582 l = self._currentlock(self._lockref)
1585 l = self._currentlock(self._lockref)
1583 if l is not None:
1586 if l is not None:
1584 l.lock()
1587 l.lock()
1585 return l
1588 return l
1586
1589
1587 l = self._lock(self.svfs, "lock", wait, None,
1590 l = self._lock(self.svfs, "lock", wait, None,
1588 self.invalidate, _('repository %s') % self.origroot)
1591 self.invalidate, _('repository %s') % self.origroot)
1589 self._lockref = weakref.ref(l)
1592 self._lockref = weakref.ref(l)
1590 return l
1593 return l
1591
1594
1592 def _wlockchecktransaction(self):
1595 def _wlockchecktransaction(self):
1593 if self.currenttransaction() is not None:
1596 if self.currenttransaction() is not None:
1594 raise error.LockInheritanceContractViolation(
1597 raise error.LockInheritanceContractViolation(
1595 'wlock cannot be inherited in the middle of a transaction')
1598 'wlock cannot be inherited in the middle of a transaction')
1596
1599
1597 def wlock(self, wait=True):
1600 def wlock(self, wait=True):
1598 '''Lock the non-store parts of the repository (everything under
1601 '''Lock the non-store parts of the repository (everything under
1599 .hg except .hg/store) and return a weak reference to the lock.
1602 .hg except .hg/store) and return a weak reference to the lock.
1600
1603
1601 Use this before modifying files in .hg.
1604 Use this before modifying files in .hg.
1602
1605
1603 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1606 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1604 'wlock' first to avoid a dead-lock hazard.'''
1607 'wlock' first to avoid a dead-lock hazard.'''
1605 l = self._wlockref and self._wlockref()
1608 l = self._wlockref and self._wlockref()
1606 if l is not None and l.held:
1609 if l is not None and l.held:
1607 l.lock()
1610 l.lock()
1608 return l
1611 return l
1609
1612
1610 # We do not need to check for non-waiting lock acquisition. Such
1613 # We do not need to check for non-waiting lock acquisition. Such
1611 # acquisition would not cause dead-lock as they would just fail.
1614 # acquisition would not cause dead-lock as they would just fail.
1612 if wait and (self.ui.configbool('devel', 'all-warnings')
1615 if wait and (self.ui.configbool('devel', 'all-warnings')
1613 or self.ui.configbool('devel', 'check-locks')):
1616 or self.ui.configbool('devel', 'check-locks')):
1614 if self._currentlock(self._lockref) is not None:
1617 if self._currentlock(self._lockref) is not None:
1615 self.ui.develwarn('"wlock" acquired after "lock"')
1618 self.ui.develwarn('"wlock" acquired after "lock"')
1616
1619
1617 def unlock():
1620 def unlock():
1618 if self.dirstate.pendingparentchange():
1621 if self.dirstate.pendingparentchange():
1619 self.dirstate.invalidate()
1622 self.dirstate.invalidate()
1620 else:
1623 else:
1621 self.dirstate.write(None)
1624 self.dirstate.write(None)
1622
1625
1623 self._filecache['dirstate'].refresh()
1626 self._filecache['dirstate'].refresh()
1624
1627
1625 l = self._lock(self.vfs, "wlock", wait, unlock,
1628 l = self._lock(self.vfs, "wlock", wait, unlock,
1626 self.invalidatedirstate, _('working directory of %s') %
1629 self.invalidatedirstate, _('working directory of %s') %
1627 self.origroot,
1630 self.origroot,
1628 inheritchecker=self._wlockchecktransaction,
1631 inheritchecker=self._wlockchecktransaction,
1629 parentenvvar='HG_WLOCK_LOCKER')
1632 parentenvvar='HG_WLOCK_LOCKER')
1630 self._wlockref = weakref.ref(l)
1633 self._wlockref = weakref.ref(l)
1631 return l
1634 return l
1632
1635
1633 def _currentlock(self, lockref):
1636 def _currentlock(self, lockref):
1634 """Returns the lock if it's held, or None if it's not."""
1637 """Returns the lock if it's held, or None if it's not."""
1635 if lockref is None:
1638 if lockref is None:
1636 return None
1639 return None
1637 l = lockref()
1640 l = lockref()
1638 if l is None or not l.held:
1641 if l is None or not l.held:
1639 return None
1642 return None
1640 return l
1643 return l
1641
1644
1642 def currentwlock(self):
1645 def currentwlock(self):
1643 """Returns the wlock if it's held, or None if it's not."""
1646 """Returns the wlock if it's held, or None if it's not."""
1644 return self._currentlock(self._wlockref)
1647 return self._currentlock(self._wlockref)
1645
1648
1646 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1649 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1647 """
1650 """
1648 commit an individual file as part of a larger transaction
1651 commit an individual file as part of a larger transaction
1649 """
1652 """
1650
1653
1651 fname = fctx.path()
1654 fname = fctx.path()
1652 fparent1 = manifest1.get(fname, nullid)
1655 fparent1 = manifest1.get(fname, nullid)
1653 fparent2 = manifest2.get(fname, nullid)
1656 fparent2 = manifest2.get(fname, nullid)
1654 if isinstance(fctx, context.filectx):
1657 if isinstance(fctx, context.filectx):
1655 node = fctx.filenode()
1658 node = fctx.filenode()
1656 if node in [fparent1, fparent2]:
1659 if node in [fparent1, fparent2]:
1657 self.ui.debug('reusing %s filelog entry\n' % fname)
1660 self.ui.debug('reusing %s filelog entry\n' % fname)
1658 if manifest1.flags(fname) != fctx.flags():
1661 if manifest1.flags(fname) != fctx.flags():
1659 changelist.append(fname)
1662 changelist.append(fname)
1660 return node
1663 return node
1661
1664
1662 flog = self.file(fname)
1665 flog = self.file(fname)
1663 meta = {}
1666 meta = {}
1664 copy = fctx.renamed()
1667 copy = fctx.renamed()
1665 if copy and copy[0] != fname:
1668 if copy and copy[0] != fname:
1666 # Mark the new revision of this file as a copy of another
1669 # Mark the new revision of this file as a copy of another
1667 # file. This copy data will effectively act as a parent
1670 # file. This copy data will effectively act as a parent
1668 # of this new revision. If this is a merge, the first
1671 # of this new revision. If this is a merge, the first
1669 # parent will be the nullid (meaning "look up the copy data")
1672 # parent will be the nullid (meaning "look up the copy data")
1670 # and the second one will be the other parent. For example:
1673 # and the second one will be the other parent. For example:
1671 #
1674 #
1672 # 0 --- 1 --- 3 rev1 changes file foo
1675 # 0 --- 1 --- 3 rev1 changes file foo
1673 # \ / rev2 renames foo to bar and changes it
1676 # \ / rev2 renames foo to bar and changes it
1674 # \- 2 -/ rev3 should have bar with all changes and
1677 # \- 2 -/ rev3 should have bar with all changes and
1675 # should record that bar descends from
1678 # should record that bar descends from
1676 # bar in rev2 and foo in rev1
1679 # bar in rev2 and foo in rev1
1677 #
1680 #
1678 # this allows this merge to succeed:
1681 # this allows this merge to succeed:
1679 #
1682 #
1680 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1683 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1681 # \ / merging rev3 and rev4 should use bar@rev2
1684 # \ / merging rev3 and rev4 should use bar@rev2
1682 # \- 2 --- 4 as the merge base
1685 # \- 2 --- 4 as the merge base
1683 #
1686 #
1684
1687
1685 cfname = copy[0]
1688 cfname = copy[0]
1686 crev = manifest1.get(cfname)
1689 crev = manifest1.get(cfname)
1687 newfparent = fparent2
1690 newfparent = fparent2
1688
1691
1689 if manifest2: # branch merge
1692 if manifest2: # branch merge
1690 if fparent2 == nullid or crev is None: # copied on remote side
1693 if fparent2 == nullid or crev is None: # copied on remote side
1691 if cfname in manifest2:
1694 if cfname in manifest2:
1692 crev = manifest2[cfname]
1695 crev = manifest2[cfname]
1693 newfparent = fparent1
1696 newfparent = fparent1
1694
1697
1695 # Here, we used to search backwards through history to try to find
1698 # Here, we used to search backwards through history to try to find
1696 # where the file copy came from if the source of a copy was not in
1699 # where the file copy came from if the source of a copy was not in
1697 # the parent directory. However, this doesn't actually make sense to
1700 # the parent directory. However, this doesn't actually make sense to
1698 # do (what does a copy from something not in your working copy even
1701 # do (what does a copy from something not in your working copy even
1699 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1702 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1700 # the user that copy information was dropped, so if they didn't
1703 # the user that copy information was dropped, so if they didn't
1701 # expect this outcome it can be fixed, but this is the correct
1704 # expect this outcome it can be fixed, but this is the correct
1702 # behavior in this circumstance.
1705 # behavior in this circumstance.
1703
1706
1704 if crev:
1707 if crev:
1705 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1708 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1706 meta["copy"] = cfname
1709 meta["copy"] = cfname
1707 meta["copyrev"] = hex(crev)
1710 meta["copyrev"] = hex(crev)
1708 fparent1, fparent2 = nullid, newfparent
1711 fparent1, fparent2 = nullid, newfparent
1709 else:
1712 else:
1710 self.ui.warn(_("warning: can't find ancestor for '%s' "
1713 self.ui.warn(_("warning: can't find ancestor for '%s' "
1711 "copied from '%s'!\n") % (fname, cfname))
1714 "copied from '%s'!\n") % (fname, cfname))
1712
1715
1713 elif fparent1 == nullid:
1716 elif fparent1 == nullid:
1714 fparent1, fparent2 = fparent2, nullid
1717 fparent1, fparent2 = fparent2, nullid
1715 elif fparent2 != nullid:
1718 elif fparent2 != nullid:
1716 # is one parent an ancestor of the other?
1719 # is one parent an ancestor of the other?
1717 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1720 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1718 if fparent1 in fparentancestors:
1721 if fparent1 in fparentancestors:
1719 fparent1, fparent2 = fparent2, nullid
1722 fparent1, fparent2 = fparent2, nullid
1720 elif fparent2 in fparentancestors:
1723 elif fparent2 in fparentancestors:
1721 fparent2 = nullid
1724 fparent2 = nullid
1722
1725
1723 # is the file changed?
1726 # is the file changed?
1724 text = fctx.data()
1727 text = fctx.data()
1725 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1728 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1726 changelist.append(fname)
1729 changelist.append(fname)
1727 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1730 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1728 # are just the flags changed during merge?
1731 # are just the flags changed during merge?
1729 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1732 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1730 changelist.append(fname)
1733 changelist.append(fname)
1731
1734
1732 return fparent1
1735 return fparent1
1733
1736
1734 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1737 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1735 """check for commit arguments that aren't committable"""
1738 """check for commit arguments that aren't committable"""
1736 if match.isexact() or match.prefix():
1739 if match.isexact() or match.prefix():
1737 matched = set(status.modified + status.added + status.removed)
1740 matched = set(status.modified + status.added + status.removed)
1738
1741
1739 for f in match.files():
1742 for f in match.files():
1740 f = self.dirstate.normalize(f)
1743 f = self.dirstate.normalize(f)
1741 if f == '.' or f in matched or f in wctx.substate:
1744 if f == '.' or f in matched or f in wctx.substate:
1742 continue
1745 continue
1743 if f in status.deleted:
1746 if f in status.deleted:
1744 fail(f, _('file not found!'))
1747 fail(f, _('file not found!'))
1745 if f in vdirs: # visited directory
1748 if f in vdirs: # visited directory
1746 d = f + '/'
1749 d = f + '/'
1747 for mf in matched:
1750 for mf in matched:
1748 if mf.startswith(d):
1751 if mf.startswith(d):
1749 break
1752 break
1750 else:
1753 else:
1751 fail(f, _("no match under directory!"))
1754 fail(f, _("no match under directory!"))
1752 elif f not in self.dirstate:
1755 elif f not in self.dirstate:
1753 fail(f, _("file not tracked!"))
1756 fail(f, _("file not tracked!"))
1754
1757
1755 @unfilteredmethod
1758 @unfilteredmethod
1756 def commit(self, text="", user=None, date=None, match=None, force=False,
1759 def commit(self, text="", user=None, date=None, match=None, force=False,
1757 editor=False, extra=None):
1760 editor=False, extra=None):
1758 """Add a new revision to current repository.
1761 """Add a new revision to current repository.
1759
1762
1760 Revision information is gathered from the working directory,
1763 Revision information is gathered from the working directory,
1761 match can be used to filter the committed files. If editor is
1764 match can be used to filter the committed files. If editor is
1762 supplied, it is called to get a commit message.
1765 supplied, it is called to get a commit message.
1763 """
1766 """
1764 if extra is None:
1767 if extra is None:
1765 extra = {}
1768 extra = {}
1766
1769
1767 def fail(f, msg):
1770 def fail(f, msg):
1768 raise error.Abort('%s: %s' % (f, msg))
1771 raise error.Abort('%s: %s' % (f, msg))
1769
1772
1770 if not match:
1773 if not match:
1771 match = matchmod.always(self.root, '')
1774 match = matchmod.always(self.root, '')
1772
1775
1773 if not force:
1776 if not force:
1774 vdirs = []
1777 vdirs = []
1775 match.explicitdir = vdirs.append
1778 match.explicitdir = vdirs.append
1776 match.bad = fail
1779 match.bad = fail
1777
1780
1778 wlock = lock = tr = None
1781 wlock = lock = tr = None
1779 try:
1782 try:
1780 wlock = self.wlock()
1783 wlock = self.wlock()
1781 lock = self.lock() # for recent changelog (see issue4368)
1784 lock = self.lock() # for recent changelog (see issue4368)
1782
1785
1783 wctx = self[None]
1786 wctx = self[None]
1784 merge = len(wctx.parents()) > 1
1787 merge = len(wctx.parents()) > 1
1785
1788
1786 if not force and merge and not match.always():
1789 if not force and merge and not match.always():
1787 raise error.Abort(_('cannot partially commit a merge '
1790 raise error.Abort(_('cannot partially commit a merge '
1788 '(do not specify files or patterns)'))
1791 '(do not specify files or patterns)'))
1789
1792
1790 status = self.status(match=match, clean=force)
1793 status = self.status(match=match, clean=force)
1791 if force:
1794 if force:
1792 status.modified.extend(status.clean) # mq may commit clean files
1795 status.modified.extend(status.clean) # mq may commit clean files
1793
1796
1794 # check subrepos
1797 # check subrepos
1795 subs = []
1798 subs = []
1796 commitsubs = set()
1799 commitsubs = set()
1797 newstate = wctx.substate.copy()
1800 newstate = wctx.substate.copy()
1798 # only manage subrepos and .hgsubstate if .hgsub is present
1801 # only manage subrepos and .hgsubstate if .hgsub is present
1799 if '.hgsub' in wctx:
1802 if '.hgsub' in wctx:
1800 # we'll decide whether to track this ourselves, thanks
1803 # we'll decide whether to track this ourselves, thanks
1801 for c in status.modified, status.added, status.removed:
1804 for c in status.modified, status.added, status.removed:
1802 if '.hgsubstate' in c:
1805 if '.hgsubstate' in c:
1803 c.remove('.hgsubstate')
1806 c.remove('.hgsubstate')
1804
1807
1805 # compare current state to last committed state
1808 # compare current state to last committed state
1806 # build new substate based on last committed state
1809 # build new substate based on last committed state
1807 oldstate = wctx.p1().substate
1810 oldstate = wctx.p1().substate
1808 for s in sorted(newstate.keys()):
1811 for s in sorted(newstate.keys()):
1809 if not match(s):
1812 if not match(s):
1810 # ignore working copy, use old state if present
1813 # ignore working copy, use old state if present
1811 if s in oldstate:
1814 if s in oldstate:
1812 newstate[s] = oldstate[s]
1815 newstate[s] = oldstate[s]
1813 continue
1816 continue
1814 if not force:
1817 if not force:
1815 raise error.Abort(
1818 raise error.Abort(
1816 _("commit with new subrepo %s excluded") % s)
1819 _("commit with new subrepo %s excluded") % s)
1817 dirtyreason = wctx.sub(s).dirtyreason(True)
1820 dirtyreason = wctx.sub(s).dirtyreason(True)
1818 if dirtyreason:
1821 if dirtyreason:
1819 if not self.ui.configbool('ui', 'commitsubrepos'):
1822 if not self.ui.configbool('ui', 'commitsubrepos'):
1820 raise error.Abort(dirtyreason,
1823 raise error.Abort(dirtyreason,
1821 hint=_("use --subrepos for recursive commit"))
1824 hint=_("use --subrepos for recursive commit"))
1822 subs.append(s)
1825 subs.append(s)
1823 commitsubs.add(s)
1826 commitsubs.add(s)
1824 else:
1827 else:
1825 bs = wctx.sub(s).basestate()
1828 bs = wctx.sub(s).basestate()
1826 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1829 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1827 if oldstate.get(s, (None, None, None))[1] != bs:
1830 if oldstate.get(s, (None, None, None))[1] != bs:
1828 subs.append(s)
1831 subs.append(s)
1829
1832
1830 # check for removed subrepos
1833 # check for removed subrepos
1831 for p in wctx.parents():
1834 for p in wctx.parents():
1832 r = [s for s in p.substate if s not in newstate]
1835 r = [s for s in p.substate if s not in newstate]
1833 subs += [s for s in r if match(s)]
1836 subs += [s for s in r if match(s)]
1834 if subs:
1837 if subs:
1835 if (not match('.hgsub') and
1838 if (not match('.hgsub') and
1836 '.hgsub' in (wctx.modified() + wctx.added())):
1839 '.hgsub' in (wctx.modified() + wctx.added())):
1837 raise error.Abort(
1840 raise error.Abort(
1838 _("can't commit subrepos without .hgsub"))
1841 _("can't commit subrepos without .hgsub"))
1839 status.modified.insert(0, '.hgsubstate')
1842 status.modified.insert(0, '.hgsubstate')
1840
1843
1841 elif '.hgsub' in status.removed:
1844 elif '.hgsub' in status.removed:
1842 # clean up .hgsubstate when .hgsub is removed
1845 # clean up .hgsubstate when .hgsub is removed
1843 if ('.hgsubstate' in wctx and
1846 if ('.hgsubstate' in wctx and
1844 '.hgsubstate' not in (status.modified + status.added +
1847 '.hgsubstate' not in (status.modified + status.added +
1845 status.removed)):
1848 status.removed)):
1846 status.removed.insert(0, '.hgsubstate')
1849 status.removed.insert(0, '.hgsubstate')
1847
1850
1848 # make sure all explicit patterns are matched
1851 # make sure all explicit patterns are matched
1849 if not force:
1852 if not force:
1850 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1853 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1851
1854
1852 cctx = context.workingcommitctx(self, status,
1855 cctx = context.workingcommitctx(self, status,
1853 text, user, date, extra)
1856 text, user, date, extra)
1854
1857
1855 # internal config: ui.allowemptycommit
1858 # internal config: ui.allowemptycommit
1856 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1859 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1857 or extra.get('close') or merge or cctx.files()
1860 or extra.get('close') or merge or cctx.files()
1858 or self.ui.configbool('ui', 'allowemptycommit'))
1861 or self.ui.configbool('ui', 'allowemptycommit'))
1859 if not allowemptycommit:
1862 if not allowemptycommit:
1860 return None
1863 return None
1861
1864
1862 if merge and cctx.deleted():
1865 if merge and cctx.deleted():
1863 raise error.Abort(_("cannot commit merge with missing files"))
1866 raise error.Abort(_("cannot commit merge with missing files"))
1864
1867
1865 ms = mergemod.mergestate.read(self)
1868 ms = mergemod.mergestate.read(self)
1866 mergeutil.checkunresolved(ms)
1869 mergeutil.checkunresolved(ms)
1867
1870
1868 if editor:
1871 if editor:
1869 cctx._text = editor(self, cctx, subs)
1872 cctx._text = editor(self, cctx, subs)
1870 edited = (text != cctx._text)
1873 edited = (text != cctx._text)
1871
1874
1872 # Save commit message in case this transaction gets rolled back
1875 # Save commit message in case this transaction gets rolled back
1873 # (e.g. by a pretxncommit hook). Leave the content alone on
1876 # (e.g. by a pretxncommit hook). Leave the content alone on
1874 # the assumption that the user will use the same editor again.
1877 # the assumption that the user will use the same editor again.
1875 msgfn = self.savecommitmessage(cctx._text)
1878 msgfn = self.savecommitmessage(cctx._text)
1876
1879
1877 # commit subs and write new state
1880 # commit subs and write new state
1878 if subs:
1881 if subs:
1879 for s in sorted(commitsubs):
1882 for s in sorted(commitsubs):
1880 sub = wctx.sub(s)
1883 sub = wctx.sub(s)
1881 self.ui.status(_('committing subrepository %s\n') %
1884 self.ui.status(_('committing subrepository %s\n') %
1882 subrepo.subrelpath(sub))
1885 subrepo.subrelpath(sub))
1883 sr = sub.commit(cctx._text, user, date)
1886 sr = sub.commit(cctx._text, user, date)
1884 newstate[s] = (newstate[s][0], sr)
1887 newstate[s] = (newstate[s][0], sr)
1885 subrepo.writestate(self, newstate)
1888 subrepo.writestate(self, newstate)
1886
1889
1887 p1, p2 = self.dirstate.parents()
1890 p1, p2 = self.dirstate.parents()
1888 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1891 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1889 try:
1892 try:
1890 self.hook("precommit", throw=True, parent1=hookp1,
1893 self.hook("precommit", throw=True, parent1=hookp1,
1891 parent2=hookp2)
1894 parent2=hookp2)
1892 tr = self.transaction('commit')
1895 tr = self.transaction('commit')
1893 ret = self.commitctx(cctx, True)
1896 ret = self.commitctx(cctx, True)
1894 except: # re-raises
1897 except: # re-raises
1895 if edited:
1898 if edited:
1896 self.ui.write(
1899 self.ui.write(
1897 _('note: commit message saved in %s\n') % msgfn)
1900 _('note: commit message saved in %s\n') % msgfn)
1898 raise
1901 raise
1899 # update bookmarks, dirstate and mergestate
1902 # update bookmarks, dirstate and mergestate
1900 bookmarks.update(self, [p1, p2], ret)
1903 bookmarks.update(self, [p1, p2], ret)
1901 cctx.markcommitted(ret)
1904 cctx.markcommitted(ret)
1902 ms.reset()
1905 ms.reset()
1903 tr.close()
1906 tr.close()
1904
1907
1905 finally:
1908 finally:
1906 lockmod.release(tr, lock, wlock)
1909 lockmod.release(tr, lock, wlock)
1907
1910
1908 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1911 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1909 # hack for command that use a temporary commit (eg: histedit)
1912 # hack for command that use a temporary commit (eg: histedit)
1910 # temporary commit got stripped before hook release
1913 # temporary commit got stripped before hook release
1911 if self.changelog.hasnode(ret):
1914 if self.changelog.hasnode(ret):
1912 self.hook("commit", node=node, parent1=parent1,
1915 self.hook("commit", node=node, parent1=parent1,
1913 parent2=parent2)
1916 parent2=parent2)
1914 self._afterlock(commithook)
1917 self._afterlock(commithook)
1915 return ret
1918 return ret
1916
1919
1917 @unfilteredmethod
1920 @unfilteredmethod
1918 def commitctx(self, ctx, error=False):
1921 def commitctx(self, ctx, error=False):
1919 """Add a new revision to current repository.
1922 """Add a new revision to current repository.
1920 Revision information is passed via the context argument.
1923 Revision information is passed via the context argument.
1921 """
1924 """
1922
1925
1923 tr = None
1926 tr = None
1924 p1, p2 = ctx.p1(), ctx.p2()
1927 p1, p2 = ctx.p1(), ctx.p2()
1925 user = ctx.user()
1928 user = ctx.user()
1926
1929
1927 lock = self.lock()
1930 lock = self.lock()
1928 try:
1931 try:
1929 tr = self.transaction("commit")
1932 tr = self.transaction("commit")
1930 trp = weakref.proxy(tr)
1933 trp = weakref.proxy(tr)
1931
1934
1932 if ctx.manifestnode():
1935 if ctx.manifestnode():
1933 # reuse an existing manifest revision
1936 # reuse an existing manifest revision
1934 mn = ctx.manifestnode()
1937 mn = ctx.manifestnode()
1935 files = ctx.files()
1938 files = ctx.files()
1936 elif ctx.files():
1939 elif ctx.files():
1937 m1ctx = p1.manifestctx()
1940 m1ctx = p1.manifestctx()
1938 m2ctx = p2.manifestctx()
1941 m2ctx = p2.manifestctx()
1939 mctx = m1ctx.copy()
1942 mctx = m1ctx.copy()
1940
1943
1941 m = mctx.read()
1944 m = mctx.read()
1942 m1 = m1ctx.read()
1945 m1 = m1ctx.read()
1943 m2 = m2ctx.read()
1946 m2 = m2ctx.read()
1944
1947
1945 # check in files
1948 # check in files
1946 added = []
1949 added = []
1947 changed = []
1950 changed = []
1948 removed = list(ctx.removed())
1951 removed = list(ctx.removed())
1949 linkrev = len(self)
1952 linkrev = len(self)
1950 self.ui.note(_("committing files:\n"))
1953 self.ui.note(_("committing files:\n"))
1951 for f in sorted(ctx.modified() + ctx.added()):
1954 for f in sorted(ctx.modified() + ctx.added()):
1952 self.ui.note(f + "\n")
1955 self.ui.note(f + "\n")
1953 try:
1956 try:
1954 fctx = ctx[f]
1957 fctx = ctx[f]
1955 if fctx is None:
1958 if fctx is None:
1956 removed.append(f)
1959 removed.append(f)
1957 else:
1960 else:
1958 added.append(f)
1961 added.append(f)
1959 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1962 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1960 trp, changed)
1963 trp, changed)
1961 m.setflag(f, fctx.flags())
1964 m.setflag(f, fctx.flags())
1962 except OSError as inst:
1965 except OSError as inst:
1963 self.ui.warn(_("trouble committing %s!\n") % f)
1966 self.ui.warn(_("trouble committing %s!\n") % f)
1964 raise
1967 raise
1965 except IOError as inst:
1968 except IOError as inst:
1966 errcode = getattr(inst, 'errno', errno.ENOENT)
1969 errcode = getattr(inst, 'errno', errno.ENOENT)
1967 if error or errcode and errcode != errno.ENOENT:
1970 if error or errcode and errcode != errno.ENOENT:
1968 self.ui.warn(_("trouble committing %s!\n") % f)
1971 self.ui.warn(_("trouble committing %s!\n") % f)
1969 raise
1972 raise
1970
1973
1971 # update manifest
1974 # update manifest
1972 self.ui.note(_("committing manifest\n"))
1975 self.ui.note(_("committing manifest\n"))
1973 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1976 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1974 drop = [f for f in removed if f in m]
1977 drop = [f for f in removed if f in m]
1975 for f in drop:
1978 for f in drop:
1976 del m[f]
1979 del m[f]
1977 mn = mctx.write(trp, linkrev,
1980 mn = mctx.write(trp, linkrev,
1978 p1.manifestnode(), p2.manifestnode(),
1981 p1.manifestnode(), p2.manifestnode(),
1979 added, drop)
1982 added, drop)
1980 files = changed + removed
1983 files = changed + removed
1981 else:
1984 else:
1982 mn = p1.manifestnode()
1985 mn = p1.manifestnode()
1983 files = []
1986 files = []
1984
1987
1985 # update changelog
1988 # update changelog
1986 self.ui.note(_("committing changelog\n"))
1989 self.ui.note(_("committing changelog\n"))
1987 self.changelog.delayupdate(tr)
1990 self.changelog.delayupdate(tr)
1988 n = self.changelog.add(mn, files, ctx.description(),
1991 n = self.changelog.add(mn, files, ctx.description(),
1989 trp, p1.node(), p2.node(),
1992 trp, p1.node(), p2.node(),
1990 user, ctx.date(), ctx.extra().copy())
1993 user, ctx.date(), ctx.extra().copy())
1991 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1994 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1992 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1995 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1993 parent2=xp2)
1996 parent2=xp2)
1994 # set the new commit is proper phase
1997 # set the new commit is proper phase
1995 targetphase = subrepo.newcommitphase(self.ui, ctx)
1998 targetphase = subrepo.newcommitphase(self.ui, ctx)
1996 if targetphase:
1999 if targetphase:
1997 # retract boundary do not alter parent changeset.
2000 # retract boundary do not alter parent changeset.
1998 # if a parent have higher the resulting phase will
2001 # if a parent have higher the resulting phase will
1999 # be compliant anyway
2002 # be compliant anyway
2000 #
2003 #
2001 # if minimal phase was 0 we don't need to retract anything
2004 # if minimal phase was 0 we don't need to retract anything
2002 phases.registernew(self, tr, targetphase, [n])
2005 phases.registernew(self, tr, targetphase, [n])
2003 tr.close()
2006 tr.close()
2004 return n
2007 return n
2005 finally:
2008 finally:
2006 if tr:
2009 if tr:
2007 tr.release()
2010 tr.release()
2008 lock.release()
2011 lock.release()
2009
2012
2010 @unfilteredmethod
2013 @unfilteredmethod
2011 def destroying(self):
2014 def destroying(self):
2012 '''Inform the repository that nodes are about to be destroyed.
2015 '''Inform the repository that nodes are about to be destroyed.
2013 Intended for use by strip and rollback, so there's a common
2016 Intended for use by strip and rollback, so there's a common
2014 place for anything that has to be done before destroying history.
2017 place for anything that has to be done before destroying history.
2015
2018
2016 This is mostly useful for saving state that is in memory and waiting
2019 This is mostly useful for saving state that is in memory and waiting
2017 to be flushed when the current lock is released. Because a call to
2020 to be flushed when the current lock is released. Because a call to
2018 destroyed is imminent, the repo will be invalidated causing those
2021 destroyed is imminent, the repo will be invalidated causing those
2019 changes to stay in memory (waiting for the next unlock), or vanish
2022 changes to stay in memory (waiting for the next unlock), or vanish
2020 completely.
2023 completely.
2021 '''
2024 '''
2022 # When using the same lock to commit and strip, the phasecache is left
2025 # When using the same lock to commit and strip, the phasecache is left
2023 # dirty after committing. Then when we strip, the repo is invalidated,
2026 # dirty after committing. Then when we strip, the repo is invalidated,
2024 # causing those changes to disappear.
2027 # causing those changes to disappear.
2025 if '_phasecache' in vars(self):
2028 if '_phasecache' in vars(self):
2026 self._phasecache.write()
2029 self._phasecache.write()
2027
2030
2028 @unfilteredmethod
2031 @unfilteredmethod
2029 def destroyed(self):
2032 def destroyed(self):
2030 '''Inform the repository that nodes have been destroyed.
2033 '''Inform the repository that nodes have been destroyed.
2031 Intended for use by strip and rollback, so there's a common
2034 Intended for use by strip and rollback, so there's a common
2032 place for anything that has to be done after destroying history.
2035 place for anything that has to be done after destroying history.
2033 '''
2036 '''
2034 # When one tries to:
2037 # When one tries to:
2035 # 1) destroy nodes thus calling this method (e.g. strip)
2038 # 1) destroy nodes thus calling this method (e.g. strip)
2036 # 2) use phasecache somewhere (e.g. commit)
2039 # 2) use phasecache somewhere (e.g. commit)
2037 #
2040 #
2038 # then 2) will fail because the phasecache contains nodes that were
2041 # then 2) will fail because the phasecache contains nodes that were
2039 # removed. We can either remove phasecache from the filecache,
2042 # removed. We can either remove phasecache from the filecache,
2040 # causing it to reload next time it is accessed, or simply filter
2043 # causing it to reload next time it is accessed, or simply filter
2041 # the removed nodes now and write the updated cache.
2044 # the removed nodes now and write the updated cache.
2042 self._phasecache.filterunknown(self)
2045 self._phasecache.filterunknown(self)
2043 self._phasecache.write()
2046 self._phasecache.write()
2044
2047
2045 # refresh all repository caches
2048 # refresh all repository caches
2046 self.updatecaches()
2049 self.updatecaches()
2047
2050
2048 # Ensure the persistent tag cache is updated. Doing it now
2051 # Ensure the persistent tag cache is updated. Doing it now
2049 # means that the tag cache only has to worry about destroyed
2052 # means that the tag cache only has to worry about destroyed
2050 # heads immediately after a strip/rollback. That in turn
2053 # heads immediately after a strip/rollback. That in turn
2051 # guarantees that "cachetip == currenttip" (comparing both rev
2054 # guarantees that "cachetip == currenttip" (comparing both rev
2052 # and node) always means no nodes have been added or destroyed.
2055 # and node) always means no nodes have been added or destroyed.
2053
2056
2054 # XXX this is suboptimal when qrefresh'ing: we strip the current
2057 # XXX this is suboptimal when qrefresh'ing: we strip the current
2055 # head, refresh the tag cache, then immediately add a new head.
2058 # head, refresh the tag cache, then immediately add a new head.
2056 # But I think doing it this way is necessary for the "instant
2059 # But I think doing it this way is necessary for the "instant
2057 # tag cache retrieval" case to work.
2060 # tag cache retrieval" case to work.
2058 self.invalidate()
2061 self.invalidate()
2059
2062
2060 def walk(self, match, node=None):
2063 def walk(self, match, node=None):
2061 '''
2064 '''
2062 walk recursively through the directory tree or a given
2065 walk recursively through the directory tree or a given
2063 changeset, finding all files matched by the match
2066 changeset, finding all files matched by the match
2064 function
2067 function
2065 '''
2068 '''
2066 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
2069 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
2067 return self[node].walk(match)
2070 return self[node].walk(match)
2068
2071
2069 def status(self, node1='.', node2=None, match=None,
2072 def status(self, node1='.', node2=None, match=None,
2070 ignored=False, clean=False, unknown=False,
2073 ignored=False, clean=False, unknown=False,
2071 listsubrepos=False):
2074 listsubrepos=False):
2072 '''a convenience method that calls node1.status(node2)'''
2075 '''a convenience method that calls node1.status(node2)'''
2073 return self[node1].status(node2, match, ignored, clean, unknown,
2076 return self[node1].status(node2, match, ignored, clean, unknown,
2074 listsubrepos)
2077 listsubrepos)
2075
2078
2076 def addpostdsstatus(self, ps):
2079 def addpostdsstatus(self, ps):
2077 """Add a callback to run within the wlock, at the point at which status
2080 """Add a callback to run within the wlock, at the point at which status
2078 fixups happen.
2081 fixups happen.
2079
2082
2080 On status completion, callback(wctx, status) will be called with the
2083 On status completion, callback(wctx, status) will be called with the
2081 wlock held, unless the dirstate has changed from underneath or the wlock
2084 wlock held, unless the dirstate has changed from underneath or the wlock
2082 couldn't be grabbed.
2085 couldn't be grabbed.
2083
2086
2084 Callbacks should not capture and use a cached copy of the dirstate --
2087 Callbacks should not capture and use a cached copy of the dirstate --
2085 it might change in the meanwhile. Instead, they should access the
2088 it might change in the meanwhile. Instead, they should access the
2086 dirstate via wctx.repo().dirstate.
2089 dirstate via wctx.repo().dirstate.
2087
2090
2088 This list is emptied out after each status run -- extensions should
2091 This list is emptied out after each status run -- extensions should
2089 make sure it adds to this list each time dirstate.status is called.
2092 make sure it adds to this list each time dirstate.status is called.
2090 Extensions should also make sure they don't call this for statuses
2093 Extensions should also make sure they don't call this for statuses
2091 that don't involve the dirstate.
2094 that don't involve the dirstate.
2092 """
2095 """
2093
2096
2094 # The list is located here for uniqueness reasons -- it is actually
2097 # The list is located here for uniqueness reasons -- it is actually
2095 # managed by the workingctx, but that isn't unique per-repo.
2098 # managed by the workingctx, but that isn't unique per-repo.
2096 self._postdsstatus.append(ps)
2099 self._postdsstatus.append(ps)
2097
2100
2098 def postdsstatus(self):
2101 def postdsstatus(self):
2099 """Used by workingctx to get the list of post-dirstate-status hooks."""
2102 """Used by workingctx to get the list of post-dirstate-status hooks."""
2100 return self._postdsstatus
2103 return self._postdsstatus
2101
2104
2102 def clearpostdsstatus(self):
2105 def clearpostdsstatus(self):
2103 """Used by workingctx to clear post-dirstate-status hooks."""
2106 """Used by workingctx to clear post-dirstate-status hooks."""
2104 del self._postdsstatus[:]
2107 del self._postdsstatus[:]
2105
2108
2106 def heads(self, start=None):
2109 def heads(self, start=None):
2107 if start is None:
2110 if start is None:
2108 cl = self.changelog
2111 cl = self.changelog
2109 headrevs = reversed(cl.headrevs())
2112 headrevs = reversed(cl.headrevs())
2110 return [cl.node(rev) for rev in headrevs]
2113 return [cl.node(rev) for rev in headrevs]
2111
2114
2112 heads = self.changelog.heads(start)
2115 heads = self.changelog.heads(start)
2113 # sort the output in rev descending order
2116 # sort the output in rev descending order
2114 return sorted(heads, key=self.changelog.rev, reverse=True)
2117 return sorted(heads, key=self.changelog.rev, reverse=True)
2115
2118
2116 def branchheads(self, branch=None, start=None, closed=False):
2119 def branchheads(self, branch=None, start=None, closed=False):
2117 '''return a (possibly filtered) list of heads for the given branch
2120 '''return a (possibly filtered) list of heads for the given branch
2118
2121
2119 Heads are returned in topological order, from newest to oldest.
2122 Heads are returned in topological order, from newest to oldest.
2120 If branch is None, use the dirstate branch.
2123 If branch is None, use the dirstate branch.
2121 If start is not None, return only heads reachable from start.
2124 If start is not None, return only heads reachable from start.
2122 If closed is True, return heads that are marked as closed as well.
2125 If closed is True, return heads that are marked as closed as well.
2123 '''
2126 '''
2124 if branch is None:
2127 if branch is None:
2125 branch = self[None].branch()
2128 branch = self[None].branch()
2126 branches = self.branchmap()
2129 branches = self.branchmap()
2127 if branch not in branches:
2130 if branch not in branches:
2128 return []
2131 return []
2129 # the cache returns heads ordered lowest to highest
2132 # the cache returns heads ordered lowest to highest
2130 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2133 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2131 if start is not None:
2134 if start is not None:
2132 # filter out the heads that cannot be reached from startrev
2135 # filter out the heads that cannot be reached from startrev
2133 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2136 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2134 bheads = [h for h in bheads if h in fbheads]
2137 bheads = [h for h in bheads if h in fbheads]
2135 return bheads
2138 return bheads
2136
2139
2137 def branches(self, nodes):
2140 def branches(self, nodes):
2138 if not nodes:
2141 if not nodes:
2139 nodes = [self.changelog.tip()]
2142 nodes = [self.changelog.tip()]
2140 b = []
2143 b = []
2141 for n in nodes:
2144 for n in nodes:
2142 t = n
2145 t = n
2143 while True:
2146 while True:
2144 p = self.changelog.parents(n)
2147 p = self.changelog.parents(n)
2145 if p[1] != nullid or p[0] == nullid:
2148 if p[1] != nullid or p[0] == nullid:
2146 b.append((t, n, p[0], p[1]))
2149 b.append((t, n, p[0], p[1]))
2147 break
2150 break
2148 n = p[0]
2151 n = p[0]
2149 return b
2152 return b
2150
2153
2151 def between(self, pairs):
2154 def between(self, pairs):
2152 r = []
2155 r = []
2153
2156
2154 for top, bottom in pairs:
2157 for top, bottom in pairs:
2155 n, l, i = top, [], 0
2158 n, l, i = top, [], 0
2156 f = 1
2159 f = 1
2157
2160
2158 while n != bottom and n != nullid:
2161 while n != bottom and n != nullid:
2159 p = self.changelog.parents(n)[0]
2162 p = self.changelog.parents(n)[0]
2160 if i == f:
2163 if i == f:
2161 l.append(n)
2164 l.append(n)
2162 f = f * 2
2165 f = f * 2
2163 n = p
2166 n = p
2164 i += 1
2167 i += 1
2165
2168
2166 r.append(l)
2169 r.append(l)
2167
2170
2168 return r
2171 return r
2169
2172
2170 def checkpush(self, pushop):
2173 def checkpush(self, pushop):
2171 """Extensions can override this function if additional checks have
2174 """Extensions can override this function if additional checks have
2172 to be performed before pushing, or call it if they override push
2175 to be performed before pushing, or call it if they override push
2173 command.
2176 command.
2174 """
2177 """
2175 pass
2178 pass
2176
2179
2177 @unfilteredpropertycache
2180 @unfilteredpropertycache
2178 def prepushoutgoinghooks(self):
2181 def prepushoutgoinghooks(self):
2179 """Return util.hooks consists of a pushop with repo, remote, outgoing
2182 """Return util.hooks consists of a pushop with repo, remote, outgoing
2180 methods, which are called before pushing changesets.
2183 methods, which are called before pushing changesets.
2181 """
2184 """
2182 return util.hooks()
2185 return util.hooks()
2183
2186
2184 def pushkey(self, namespace, key, old, new):
2187 def pushkey(self, namespace, key, old, new):
2185 try:
2188 try:
2186 tr = self.currenttransaction()
2189 tr = self.currenttransaction()
2187 hookargs = {}
2190 hookargs = {}
2188 if tr is not None:
2191 if tr is not None:
2189 hookargs.update(tr.hookargs)
2192 hookargs.update(tr.hookargs)
2190 hookargs['namespace'] = namespace
2193 hookargs['namespace'] = namespace
2191 hookargs['key'] = key
2194 hookargs['key'] = key
2192 hookargs['old'] = old
2195 hookargs['old'] = old
2193 hookargs['new'] = new
2196 hookargs['new'] = new
2194 self.hook('prepushkey', throw=True, **hookargs)
2197 self.hook('prepushkey', throw=True, **hookargs)
2195 except error.HookAbort as exc:
2198 except error.HookAbort as exc:
2196 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2199 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2197 if exc.hint:
2200 if exc.hint:
2198 self.ui.write_err(_("(%s)\n") % exc.hint)
2201 self.ui.write_err(_("(%s)\n") % exc.hint)
2199 return False
2202 return False
2200 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2203 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2201 ret = pushkey.push(self, namespace, key, old, new)
2204 ret = pushkey.push(self, namespace, key, old, new)
2202 def runhook():
2205 def runhook():
2203 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2206 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2204 ret=ret)
2207 ret=ret)
2205 self._afterlock(runhook)
2208 self._afterlock(runhook)
2206 return ret
2209 return ret
2207
2210
2208 def listkeys(self, namespace):
2211 def listkeys(self, namespace):
2209 self.hook('prelistkeys', throw=True, namespace=namespace)
2212 self.hook('prelistkeys', throw=True, namespace=namespace)
2210 self.ui.debug('listing keys for "%s"\n' % namespace)
2213 self.ui.debug('listing keys for "%s"\n' % namespace)
2211 values = pushkey.list(self, namespace)
2214 values = pushkey.list(self, namespace)
2212 self.hook('listkeys', namespace=namespace, values=values)
2215 self.hook('listkeys', namespace=namespace, values=values)
2213 return values
2216 return values
2214
2217
2215 def debugwireargs(self, one, two, three=None, four=None, five=None):
2218 def debugwireargs(self, one, two, three=None, four=None, five=None):
2216 '''used to test argument passing over the wire'''
2219 '''used to test argument passing over the wire'''
2217 return "%s %s %s %s %s" % (one, two, three, four, five)
2220 return "%s %s %s %s %s" % (one, two, three, four, five)
2218
2221
2219 def savecommitmessage(self, text):
2222 def savecommitmessage(self, text):
2220 fp = self.vfs('last-message.txt', 'wb')
2223 fp = self.vfs('last-message.txt', 'wb')
2221 try:
2224 try:
2222 fp.write(text)
2225 fp.write(text)
2223 finally:
2226 finally:
2224 fp.close()
2227 fp.close()
2225 return self.pathto(fp.name[len(self.root) + 1:])
2228 return self.pathto(fp.name[len(self.root) + 1:])
2226
2229
2227 # used to avoid circular references so destructors work
2230 # used to avoid circular references so destructors work
2228 def aftertrans(files):
2231 def aftertrans(files):
2229 renamefiles = [tuple(t) for t in files]
2232 renamefiles = [tuple(t) for t in files]
2230 def a():
2233 def a():
2231 for vfs, src, dest in renamefiles:
2234 for vfs, src, dest in renamefiles:
2232 # if src and dest refer to a same file, vfs.rename is a no-op,
2235 # if src and dest refer to a same file, vfs.rename is a no-op,
2233 # leaving both src and dest on disk. delete dest to make sure
2236 # leaving both src and dest on disk. delete dest to make sure
2234 # the rename couldn't be such a no-op.
2237 # the rename couldn't be such a no-op.
2235 vfs.tryunlink(dest)
2238 vfs.tryunlink(dest)
2236 try:
2239 try:
2237 vfs.rename(src, dest)
2240 vfs.rename(src, dest)
2238 except OSError: # journal file does not yet exist
2241 except OSError: # journal file does not yet exist
2239 pass
2242 pass
2240 return a
2243 return a
2241
2244
2242 def undoname(fn):
2245 def undoname(fn):
2243 base, name = os.path.split(fn)
2246 base, name = os.path.split(fn)
2244 assert name.startswith('journal')
2247 assert name.startswith('journal')
2245 return os.path.join(base, name.replace('journal', 'undo', 1))
2248 return os.path.join(base, name.replace('journal', 'undo', 1))
2246
2249
2247 def instance(ui, path, create):
2250 def instance(ui, path, create):
2248 return localrepository(ui, util.urllocalpath(path), create)
2251 return localrepository(ui, util.urllocalpath(path), create)
2249
2252
2250 def islocal(path):
2253 def islocal(path):
2251 return True
2254 return True
2252
2255
2253 def newreporequirements(repo):
2256 def newreporequirements(repo):
2254 """Determine the set of requirements for a new local repository.
2257 """Determine the set of requirements for a new local repository.
2255
2258
2256 Extensions can wrap this function to specify custom requirements for
2259 Extensions can wrap this function to specify custom requirements for
2257 new repositories.
2260 new repositories.
2258 """
2261 """
2259 ui = repo.ui
2262 ui = repo.ui
2260 requirements = {'revlogv1'}
2263 requirements = {'revlogv1'}
2261 if ui.configbool('format', 'usestore'):
2264 if ui.configbool('format', 'usestore'):
2262 requirements.add('store')
2265 requirements.add('store')
2263 if ui.configbool('format', 'usefncache'):
2266 if ui.configbool('format', 'usefncache'):
2264 requirements.add('fncache')
2267 requirements.add('fncache')
2265 if ui.configbool('format', 'dotencode'):
2268 if ui.configbool('format', 'dotencode'):
2266 requirements.add('dotencode')
2269 requirements.add('dotencode')
2267
2270
2268 compengine = ui.config('experimental', 'format.compression')
2271 compengine = ui.config('experimental', 'format.compression')
2269 if compengine not in util.compengines:
2272 if compengine not in util.compengines:
2270 raise error.Abort(_('compression engine %s defined by '
2273 raise error.Abort(_('compression engine %s defined by '
2271 'experimental.format.compression not available') %
2274 'experimental.format.compression not available') %
2272 compengine,
2275 compengine,
2273 hint=_('run "hg debuginstall" to list available '
2276 hint=_('run "hg debuginstall" to list available '
2274 'compression engines'))
2277 'compression engines'))
2275
2278
2276 # zlib is the historical default and doesn't need an explicit requirement.
2279 # zlib is the historical default and doesn't need an explicit requirement.
2277 if compengine != 'zlib':
2280 if compengine != 'zlib':
2278 requirements.add('exp-compression-%s' % compengine)
2281 requirements.add('exp-compression-%s' % compengine)
2279
2282
2280 if scmutil.gdinitconfig(ui):
2283 if scmutil.gdinitconfig(ui):
2281 requirements.add('generaldelta')
2284 requirements.add('generaldelta')
2282 if ui.configbool('experimental', 'treemanifest'):
2285 if ui.configbool('experimental', 'treemanifest'):
2283 requirements.add('treemanifest')
2286 requirements.add('treemanifest')
2284 if ui.configbool('experimental', 'manifestv2'):
2287 if ui.configbool('experimental', 'manifestv2'):
2285 requirements.add('manifestv2')
2288 requirements.add('manifestv2')
2286
2289
2287 revlogv2 = ui.config('experimental', 'revlogv2')
2290 revlogv2 = ui.config('experimental', 'revlogv2')
2288 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2291 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2289 requirements.remove('revlogv1')
2292 requirements.remove('revlogv1')
2290 # generaldelta is implied by revlogv2.
2293 # generaldelta is implied by revlogv2.
2291 requirements.discard('generaldelta')
2294 requirements.discard('generaldelta')
2292 requirements.add(REVLOGV2_REQUIREMENT)
2295 requirements.add(REVLOGV2_REQUIREMENT)
2293
2296
2294 return requirements
2297 return requirements
@@ -1,1060 +1,1063
1 # wireproto.py - generic wire protocol support functions
1 # wireproto.py - generic wire protocol support functions
2 #
2 #
3 # Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import hashlib
10 import hashlib
11 import os
11 import os
12 import tempfile
12 import tempfile
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 bin,
16 bin,
17 hex,
17 hex,
18 nullid,
18 nullid,
19 )
19 )
20
20
21 from . import (
21 from . import (
22 bundle2,
22 bundle2,
23 changegroup as changegroupmod,
23 changegroup as changegroupmod,
24 discovery,
24 encoding,
25 encoding,
25 error,
26 error,
26 exchange,
27 exchange,
27 peer,
28 peer,
28 pushkey as pushkeymod,
29 pushkey as pushkeymod,
29 pycompat,
30 pycompat,
30 repository,
31 repository,
31 streamclone,
32 streamclone,
32 util,
33 util,
33 )
34 )
34
35
35 urlerr = util.urlerr
36 urlerr = util.urlerr
36 urlreq = util.urlreq
37 urlreq = util.urlreq
37
38
38 bundle2requiredmain = _('incompatible Mercurial client; bundle2 required')
39 bundle2requiredmain = _('incompatible Mercurial client; bundle2 required')
39 bundle2requiredhint = _('see https://www.mercurial-scm.org/wiki/'
40 bundle2requiredhint = _('see https://www.mercurial-scm.org/wiki/'
40 'IncompatibleClient')
41 'IncompatibleClient')
41 bundle2required = '%s\n(%s)\n' % (bundle2requiredmain, bundle2requiredhint)
42 bundle2required = '%s\n(%s)\n' % (bundle2requiredmain, bundle2requiredhint)
42
43
43 class abstractserverproto(object):
44 class abstractserverproto(object):
44 """abstract class that summarizes the protocol API
45 """abstract class that summarizes the protocol API
45
46
46 Used as reference and documentation.
47 Used as reference and documentation.
47 """
48 """
48
49
49 def getargs(self, args):
50 def getargs(self, args):
50 """return the value for arguments in <args>
51 """return the value for arguments in <args>
51
52
52 returns a list of values (same order as <args>)"""
53 returns a list of values (same order as <args>)"""
53 raise NotImplementedError()
54 raise NotImplementedError()
54
55
55 def getfile(self, fp):
56 def getfile(self, fp):
56 """write the whole content of a file into a file like object
57 """write the whole content of a file into a file like object
57
58
58 The file is in the form::
59 The file is in the form::
59
60
60 (<chunk-size>\n<chunk>)+0\n
61 (<chunk-size>\n<chunk>)+0\n
61
62
62 chunk size is the ascii version of the int.
63 chunk size is the ascii version of the int.
63 """
64 """
64 raise NotImplementedError()
65 raise NotImplementedError()
65
66
66 def redirect(self):
67 def redirect(self):
67 """may setup interception for stdout and stderr
68 """may setup interception for stdout and stderr
68
69
69 See also the `restore` method."""
70 See also the `restore` method."""
70 raise NotImplementedError()
71 raise NotImplementedError()
71
72
72 # If the `redirect` function does install interception, the `restore`
73 # If the `redirect` function does install interception, the `restore`
73 # function MUST be defined. If interception is not used, this function
74 # function MUST be defined. If interception is not used, this function
74 # MUST NOT be defined.
75 # MUST NOT be defined.
75 #
76 #
76 # left commented here on purpose
77 # left commented here on purpose
77 #
78 #
78 #def restore(self):
79 #def restore(self):
79 # """reinstall previous stdout and stderr and return intercepted stdout
80 # """reinstall previous stdout and stderr and return intercepted stdout
80 # """
81 # """
81 # raise NotImplementedError()
82 # raise NotImplementedError()
82
83
83 class remoteiterbatcher(peer.iterbatcher):
84 class remoteiterbatcher(peer.iterbatcher):
84 def __init__(self, remote):
85 def __init__(self, remote):
85 super(remoteiterbatcher, self).__init__()
86 super(remoteiterbatcher, self).__init__()
86 self._remote = remote
87 self._remote = remote
87
88
88 def __getattr__(self, name):
89 def __getattr__(self, name):
89 # Validate this method is batchable, since submit() only supports
90 # Validate this method is batchable, since submit() only supports
90 # batchable methods.
91 # batchable methods.
91 fn = getattr(self._remote, name)
92 fn = getattr(self._remote, name)
92 if not getattr(fn, 'batchable', None):
93 if not getattr(fn, 'batchable', None):
93 raise error.ProgrammingError('Attempted to batch a non-batchable '
94 raise error.ProgrammingError('Attempted to batch a non-batchable '
94 'call to %r' % name)
95 'call to %r' % name)
95
96
96 return super(remoteiterbatcher, self).__getattr__(name)
97 return super(remoteiterbatcher, self).__getattr__(name)
97
98
98 def submit(self):
99 def submit(self):
99 """Break the batch request into many patch calls and pipeline them.
100 """Break the batch request into many patch calls and pipeline them.
100
101
101 This is mostly valuable over http where request sizes can be
102 This is mostly valuable over http where request sizes can be
102 limited, but can be used in other places as well.
103 limited, but can be used in other places as well.
103 """
104 """
104 # 2-tuple of (command, arguments) that represents what will be
105 # 2-tuple of (command, arguments) that represents what will be
105 # sent over the wire.
106 # sent over the wire.
106 requests = []
107 requests = []
107
108
108 # 4-tuple of (command, final future, @batchable generator, remote
109 # 4-tuple of (command, final future, @batchable generator, remote
109 # future).
110 # future).
110 results = []
111 results = []
111
112
112 for command, args, opts, finalfuture in self.calls:
113 for command, args, opts, finalfuture in self.calls:
113 mtd = getattr(self._remote, command)
114 mtd = getattr(self._remote, command)
114 batchable = mtd.batchable(mtd.im_self, *args, **opts)
115 batchable = mtd.batchable(mtd.im_self, *args, **opts)
115
116
116 commandargs, fremote = next(batchable)
117 commandargs, fremote = next(batchable)
117 assert fremote
118 assert fremote
118 requests.append((command, commandargs))
119 requests.append((command, commandargs))
119 results.append((command, finalfuture, batchable, fremote))
120 results.append((command, finalfuture, batchable, fremote))
120
121
121 if requests:
122 if requests:
122 self._resultiter = self._remote._submitbatch(requests)
123 self._resultiter = self._remote._submitbatch(requests)
123
124
124 self._results = results
125 self._results = results
125
126
126 def results(self):
127 def results(self):
127 for command, finalfuture, batchable, remotefuture in self._results:
128 for command, finalfuture, batchable, remotefuture in self._results:
128 # Get the raw result, set it in the remote future, feed it
129 # Get the raw result, set it in the remote future, feed it
129 # back into the @batchable generator so it can be decoded, and
130 # back into the @batchable generator so it can be decoded, and
130 # set the result on the final future to this value.
131 # set the result on the final future to this value.
131 remoteresult = next(self._resultiter)
132 remoteresult = next(self._resultiter)
132 remotefuture.set(remoteresult)
133 remotefuture.set(remoteresult)
133 finalfuture.set(next(batchable))
134 finalfuture.set(next(batchable))
134
135
135 # Verify our @batchable generators only emit 2 values.
136 # Verify our @batchable generators only emit 2 values.
136 try:
137 try:
137 next(batchable)
138 next(batchable)
138 except StopIteration:
139 except StopIteration:
139 pass
140 pass
140 else:
141 else:
141 raise error.ProgrammingError('%s @batchable generator emitted '
142 raise error.ProgrammingError('%s @batchable generator emitted '
142 'unexpected value count' % command)
143 'unexpected value count' % command)
143
144
144 yield finalfuture.value
145 yield finalfuture.value
145
146
146 # Forward a couple of names from peer to make wireproto interactions
147 # Forward a couple of names from peer to make wireproto interactions
147 # slightly more sensible.
148 # slightly more sensible.
148 batchable = peer.batchable
149 batchable = peer.batchable
149 future = peer.future
150 future = peer.future
150
151
151 # list of nodes encoding / decoding
152 # list of nodes encoding / decoding
152
153
153 def decodelist(l, sep=' '):
154 def decodelist(l, sep=' '):
154 if l:
155 if l:
155 return map(bin, l.split(sep))
156 return map(bin, l.split(sep))
156 return []
157 return []
157
158
158 def encodelist(l, sep=' '):
159 def encodelist(l, sep=' '):
159 try:
160 try:
160 return sep.join(map(hex, l))
161 return sep.join(map(hex, l))
161 except TypeError:
162 except TypeError:
162 raise
163 raise
163
164
164 # batched call argument encoding
165 # batched call argument encoding
165
166
166 def escapearg(plain):
167 def escapearg(plain):
167 return (plain
168 return (plain
168 .replace(':', ':c')
169 .replace(':', ':c')
169 .replace(',', ':o')
170 .replace(',', ':o')
170 .replace(';', ':s')
171 .replace(';', ':s')
171 .replace('=', ':e'))
172 .replace('=', ':e'))
172
173
173 def unescapearg(escaped):
174 def unescapearg(escaped):
174 return (escaped
175 return (escaped
175 .replace(':e', '=')
176 .replace(':e', '=')
176 .replace(':s', ';')
177 .replace(':s', ';')
177 .replace(':o', ',')
178 .replace(':o', ',')
178 .replace(':c', ':'))
179 .replace(':c', ':'))
179
180
180 def encodebatchcmds(req):
181 def encodebatchcmds(req):
181 """Return a ``cmds`` argument value for the ``batch`` command."""
182 """Return a ``cmds`` argument value for the ``batch`` command."""
182 cmds = []
183 cmds = []
183 for op, argsdict in req:
184 for op, argsdict in req:
184 # Old servers didn't properly unescape argument names. So prevent
185 # Old servers didn't properly unescape argument names. So prevent
185 # the sending of argument names that may not be decoded properly by
186 # the sending of argument names that may not be decoded properly by
186 # servers.
187 # servers.
187 assert all(escapearg(k) == k for k in argsdict)
188 assert all(escapearg(k) == k for k in argsdict)
188
189
189 args = ','.join('%s=%s' % (escapearg(k), escapearg(v))
190 args = ','.join('%s=%s' % (escapearg(k), escapearg(v))
190 for k, v in argsdict.iteritems())
191 for k, v in argsdict.iteritems())
191 cmds.append('%s %s' % (op, args))
192 cmds.append('%s %s' % (op, args))
192
193
193 return ';'.join(cmds)
194 return ';'.join(cmds)
194
195
195 # mapping of options accepted by getbundle and their types
196 # mapping of options accepted by getbundle and their types
196 #
197 #
197 # Meant to be extended by extensions. It is extensions responsibility to ensure
198 # Meant to be extended by extensions. It is extensions responsibility to ensure
198 # such options are properly processed in exchange.getbundle.
199 # such options are properly processed in exchange.getbundle.
199 #
200 #
200 # supported types are:
201 # supported types are:
201 #
202 #
202 # :nodes: list of binary nodes
203 # :nodes: list of binary nodes
203 # :csv: list of comma-separated values
204 # :csv: list of comma-separated values
204 # :scsv: list of comma-separated values return as set
205 # :scsv: list of comma-separated values return as set
205 # :plain: string with no transformation needed.
206 # :plain: string with no transformation needed.
206 gboptsmap = {'heads': 'nodes',
207 gboptsmap = {'heads': 'nodes',
207 'common': 'nodes',
208 'common': 'nodes',
208 'obsmarkers': 'boolean',
209 'obsmarkers': 'boolean',
209 'bundlecaps': 'scsv',
210 'bundlecaps': 'scsv',
210 'listkeys': 'csv',
211 'listkeys': 'csv',
211 'cg': 'boolean',
212 'cg': 'boolean',
212 'cbattempted': 'boolean'}
213 'cbattempted': 'boolean'}
213
214
214 # client side
215 # client side
215
216
216 class wirepeer(repository.legacypeer):
217 class wirepeer(repository.legacypeer):
217 """Client-side interface for communicating with a peer repository.
218 """Client-side interface for communicating with a peer repository.
218
219
219 Methods commonly call wire protocol commands of the same name.
220 Methods commonly call wire protocol commands of the same name.
220
221
221 See also httppeer.py and sshpeer.py for protocol-specific
222 See also httppeer.py and sshpeer.py for protocol-specific
222 implementations of this interface.
223 implementations of this interface.
223 """
224 """
224 # Begin of basewirepeer interface.
225 # Begin of basewirepeer interface.
225
226
226 def iterbatch(self):
227 def iterbatch(self):
227 return remoteiterbatcher(self)
228 return remoteiterbatcher(self)
228
229
229 @batchable
230 @batchable
230 def lookup(self, key):
231 def lookup(self, key):
231 self.requirecap('lookup', _('look up remote revision'))
232 self.requirecap('lookup', _('look up remote revision'))
232 f = future()
233 f = future()
233 yield {'key': encoding.fromlocal(key)}, f
234 yield {'key': encoding.fromlocal(key)}, f
234 d = f.value
235 d = f.value
235 success, data = d[:-1].split(" ", 1)
236 success, data = d[:-1].split(" ", 1)
236 if int(success):
237 if int(success):
237 yield bin(data)
238 yield bin(data)
238 else:
239 else:
239 self._abort(error.RepoError(data))
240 self._abort(error.RepoError(data))
240
241
241 @batchable
242 @batchable
242 def heads(self):
243 def heads(self):
243 f = future()
244 f = future()
244 yield {}, f
245 yield {}, f
245 d = f.value
246 d = f.value
246 try:
247 try:
247 yield decodelist(d[:-1])
248 yield decodelist(d[:-1])
248 except ValueError:
249 except ValueError:
249 self._abort(error.ResponseError(_("unexpected response:"), d))
250 self._abort(error.ResponseError(_("unexpected response:"), d))
250
251
251 @batchable
252 @batchable
252 def known(self, nodes):
253 def known(self, nodes):
253 f = future()
254 f = future()
254 yield {'nodes': encodelist(nodes)}, f
255 yield {'nodes': encodelist(nodes)}, f
255 d = f.value
256 d = f.value
256 try:
257 try:
257 yield [bool(int(b)) for b in d]
258 yield [bool(int(b)) for b in d]
258 except ValueError:
259 except ValueError:
259 self._abort(error.ResponseError(_("unexpected response:"), d))
260 self._abort(error.ResponseError(_("unexpected response:"), d))
260
261
261 @batchable
262 @batchable
262 def branchmap(self):
263 def branchmap(self):
263 f = future()
264 f = future()
264 yield {}, f
265 yield {}, f
265 d = f.value
266 d = f.value
266 try:
267 try:
267 branchmap = {}
268 branchmap = {}
268 for branchpart in d.splitlines():
269 for branchpart in d.splitlines():
269 branchname, branchheads = branchpart.split(' ', 1)
270 branchname, branchheads = branchpart.split(' ', 1)
270 branchname = encoding.tolocal(urlreq.unquote(branchname))
271 branchname = encoding.tolocal(urlreq.unquote(branchname))
271 branchheads = decodelist(branchheads)
272 branchheads = decodelist(branchheads)
272 branchmap[branchname] = branchheads
273 branchmap[branchname] = branchheads
273 yield branchmap
274 yield branchmap
274 except TypeError:
275 except TypeError:
275 self._abort(error.ResponseError(_("unexpected response:"), d))
276 self._abort(error.ResponseError(_("unexpected response:"), d))
276
277
277 @batchable
278 @batchable
278 def listkeys(self, namespace):
279 def listkeys(self, namespace):
279 if not self.capable('pushkey'):
280 if not self.capable('pushkey'):
280 yield {}, None
281 yield {}, None
281 f = future()
282 f = future()
282 self.ui.debug('preparing listkeys for "%s"\n' % namespace)
283 self.ui.debug('preparing listkeys for "%s"\n' % namespace)
283 yield {'namespace': encoding.fromlocal(namespace)}, f
284 yield {'namespace': encoding.fromlocal(namespace)}, f
284 d = f.value
285 d = f.value
285 self.ui.debug('received listkey for "%s": %i bytes\n'
286 self.ui.debug('received listkey for "%s": %i bytes\n'
286 % (namespace, len(d)))
287 % (namespace, len(d)))
287 yield pushkeymod.decodekeys(d)
288 yield pushkeymod.decodekeys(d)
288
289
289 @batchable
290 @batchable
290 def pushkey(self, namespace, key, old, new):
291 def pushkey(self, namespace, key, old, new):
291 if not self.capable('pushkey'):
292 if not self.capable('pushkey'):
292 yield False, None
293 yield False, None
293 f = future()
294 f = future()
294 self.ui.debug('preparing pushkey for "%s:%s"\n' % (namespace, key))
295 self.ui.debug('preparing pushkey for "%s:%s"\n' % (namespace, key))
295 yield {'namespace': encoding.fromlocal(namespace),
296 yield {'namespace': encoding.fromlocal(namespace),
296 'key': encoding.fromlocal(key),
297 'key': encoding.fromlocal(key),
297 'old': encoding.fromlocal(old),
298 'old': encoding.fromlocal(old),
298 'new': encoding.fromlocal(new)}, f
299 'new': encoding.fromlocal(new)}, f
299 d = f.value
300 d = f.value
300 d, output = d.split('\n', 1)
301 d, output = d.split('\n', 1)
301 try:
302 try:
302 d = bool(int(d))
303 d = bool(int(d))
303 except ValueError:
304 except ValueError:
304 raise error.ResponseError(
305 raise error.ResponseError(
305 _('push failed (unexpected response):'), d)
306 _('push failed (unexpected response):'), d)
306 for l in output.splitlines(True):
307 for l in output.splitlines(True):
307 self.ui.status(_('remote: '), l)
308 self.ui.status(_('remote: '), l)
308 yield d
309 yield d
309
310
310 def stream_out(self):
311 def stream_out(self):
311 return self._callstream('stream_out')
312 return self._callstream('stream_out')
312
313
313 def getbundle(self, source, **kwargs):
314 def getbundle(self, source, **kwargs):
314 self.requirecap('getbundle', _('look up remote changes'))
315 self.requirecap('getbundle', _('look up remote changes'))
315 opts = {}
316 opts = {}
316 bundlecaps = kwargs.get('bundlecaps')
317 bundlecaps = kwargs.get('bundlecaps')
317 if bundlecaps is not None:
318 if bundlecaps is not None:
318 kwargs['bundlecaps'] = sorted(bundlecaps)
319 kwargs['bundlecaps'] = sorted(bundlecaps)
319 else:
320 else:
320 bundlecaps = () # kwargs could have it to None
321 bundlecaps = () # kwargs could have it to None
321 for key, value in kwargs.iteritems():
322 for key, value in kwargs.iteritems():
322 if value is None:
323 if value is None:
323 continue
324 continue
324 keytype = gboptsmap.get(key)
325 keytype = gboptsmap.get(key)
325 if keytype is None:
326 if keytype is None:
326 assert False, 'unexpected'
327 assert False, 'unexpected'
327 elif keytype == 'nodes':
328 elif keytype == 'nodes':
328 value = encodelist(value)
329 value = encodelist(value)
329 elif keytype in ('csv', 'scsv'):
330 elif keytype in ('csv', 'scsv'):
330 value = ','.join(value)
331 value = ','.join(value)
331 elif keytype == 'boolean':
332 elif keytype == 'boolean':
332 value = '%i' % bool(value)
333 value = '%i' % bool(value)
333 elif keytype != 'plain':
334 elif keytype != 'plain':
334 raise KeyError('unknown getbundle option type %s'
335 raise KeyError('unknown getbundle option type %s'
335 % keytype)
336 % keytype)
336 opts[key] = value
337 opts[key] = value
337 f = self._callcompressable("getbundle", **opts)
338 f = self._callcompressable("getbundle", **opts)
338 if any((cap.startswith('HG2') for cap in bundlecaps)):
339 if any((cap.startswith('HG2') for cap in bundlecaps)):
339 return bundle2.getunbundler(self.ui, f)
340 return bundle2.getunbundler(self.ui, f)
340 else:
341 else:
341 return changegroupmod.cg1unpacker(f, 'UN')
342 return changegroupmod.cg1unpacker(f, 'UN')
342
343
343 def unbundle(self, cg, heads, url):
344 def unbundle(self, cg, heads, url):
344 '''Send cg (a readable file-like object representing the
345 '''Send cg (a readable file-like object representing the
345 changegroup to push, typically a chunkbuffer object) to the
346 changegroup to push, typically a chunkbuffer object) to the
346 remote server as a bundle.
347 remote server as a bundle.
347
348
348 When pushing a bundle10 stream, return an integer indicating the
349 When pushing a bundle10 stream, return an integer indicating the
349 result of the push (see changegroup.apply()).
350 result of the push (see changegroup.apply()).
350
351
351 When pushing a bundle20 stream, return a bundle20 stream.
352 When pushing a bundle20 stream, return a bundle20 stream.
352
353
353 `url` is the url the client thinks it's pushing to, which is
354 `url` is the url the client thinks it's pushing to, which is
354 visible to hooks.
355 visible to hooks.
355 '''
356 '''
356
357
357 if heads != ['force'] and self.capable('unbundlehash'):
358 if heads != ['force'] and self.capable('unbundlehash'):
358 heads = encodelist(['hashed',
359 heads = encodelist(['hashed',
359 hashlib.sha1(''.join(sorted(heads))).digest()])
360 hashlib.sha1(''.join(sorted(heads))).digest()])
360 else:
361 else:
361 heads = encodelist(heads)
362 heads = encodelist(heads)
362
363
363 if util.safehasattr(cg, 'deltaheader'):
364 if util.safehasattr(cg, 'deltaheader'):
364 # this a bundle10, do the old style call sequence
365 # this a bundle10, do the old style call sequence
365 ret, output = self._callpush("unbundle", cg, heads=heads)
366 ret, output = self._callpush("unbundle", cg, heads=heads)
366 if ret == "":
367 if ret == "":
367 raise error.ResponseError(
368 raise error.ResponseError(
368 _('push failed:'), output)
369 _('push failed:'), output)
369 try:
370 try:
370 ret = int(ret)
371 ret = int(ret)
371 except ValueError:
372 except ValueError:
372 raise error.ResponseError(
373 raise error.ResponseError(
373 _('push failed (unexpected response):'), ret)
374 _('push failed (unexpected response):'), ret)
374
375
375 for l in output.splitlines(True):
376 for l in output.splitlines(True):
376 self.ui.status(_('remote: '), l)
377 self.ui.status(_('remote: '), l)
377 else:
378 else:
378 # bundle2 push. Send a stream, fetch a stream.
379 # bundle2 push. Send a stream, fetch a stream.
379 stream = self._calltwowaystream('unbundle', cg, heads=heads)
380 stream = self._calltwowaystream('unbundle', cg, heads=heads)
380 ret = bundle2.getunbundler(self.ui, stream)
381 ret = bundle2.getunbundler(self.ui, stream)
381 return ret
382 return ret
382
383
383 # End of basewirepeer interface.
384 # End of basewirepeer interface.
384
385
385 # Begin of baselegacywirepeer interface.
386 # Begin of baselegacywirepeer interface.
386
387
387 def branches(self, nodes):
388 def branches(self, nodes):
388 n = encodelist(nodes)
389 n = encodelist(nodes)
389 d = self._call("branches", nodes=n)
390 d = self._call("branches", nodes=n)
390 try:
391 try:
391 br = [tuple(decodelist(b)) for b in d.splitlines()]
392 br = [tuple(decodelist(b)) for b in d.splitlines()]
392 return br
393 return br
393 except ValueError:
394 except ValueError:
394 self._abort(error.ResponseError(_("unexpected response:"), d))
395 self._abort(error.ResponseError(_("unexpected response:"), d))
395
396
396 def between(self, pairs):
397 def between(self, pairs):
397 batch = 8 # avoid giant requests
398 batch = 8 # avoid giant requests
398 r = []
399 r = []
399 for i in xrange(0, len(pairs), batch):
400 for i in xrange(0, len(pairs), batch):
400 n = " ".join([encodelist(p, '-') for p in pairs[i:i + batch]])
401 n = " ".join([encodelist(p, '-') for p in pairs[i:i + batch]])
401 d = self._call("between", pairs=n)
402 d = self._call("between", pairs=n)
402 try:
403 try:
403 r.extend(l and decodelist(l) or [] for l in d.splitlines())
404 r.extend(l and decodelist(l) or [] for l in d.splitlines())
404 except ValueError:
405 except ValueError:
405 self._abort(error.ResponseError(_("unexpected response:"), d))
406 self._abort(error.ResponseError(_("unexpected response:"), d))
406 return r
407 return r
407
408
408 def changegroup(self, nodes, kind):
409 def changegroup(self, nodes, kind):
409 n = encodelist(nodes)
410 n = encodelist(nodes)
410 f = self._callcompressable("changegroup", roots=n)
411 f = self._callcompressable("changegroup", roots=n)
411 return changegroupmod.cg1unpacker(f, 'UN')
412 return changegroupmod.cg1unpacker(f, 'UN')
412
413
413 def changegroupsubset(self, bases, heads, kind):
414 def changegroupsubset(self, bases, heads, kind):
414 self.requirecap('changegroupsubset', _('look up remote changes'))
415 self.requirecap('changegroupsubset', _('look up remote changes'))
415 bases = encodelist(bases)
416 bases = encodelist(bases)
416 heads = encodelist(heads)
417 heads = encodelist(heads)
417 f = self._callcompressable("changegroupsubset",
418 f = self._callcompressable("changegroupsubset",
418 bases=bases, heads=heads)
419 bases=bases, heads=heads)
419 return changegroupmod.cg1unpacker(f, 'UN')
420 return changegroupmod.cg1unpacker(f, 'UN')
420
421
421 # End of baselegacywirepeer interface.
422 # End of baselegacywirepeer interface.
422
423
423 def _submitbatch(self, req):
424 def _submitbatch(self, req):
424 """run batch request <req> on the server
425 """run batch request <req> on the server
425
426
426 Returns an iterator of the raw responses from the server.
427 Returns an iterator of the raw responses from the server.
427 """
428 """
428 rsp = self._callstream("batch", cmds=encodebatchcmds(req))
429 rsp = self._callstream("batch", cmds=encodebatchcmds(req))
429 chunk = rsp.read(1024)
430 chunk = rsp.read(1024)
430 work = [chunk]
431 work = [chunk]
431 while chunk:
432 while chunk:
432 while ';' not in chunk and chunk:
433 while ';' not in chunk and chunk:
433 chunk = rsp.read(1024)
434 chunk = rsp.read(1024)
434 work.append(chunk)
435 work.append(chunk)
435 merged = ''.join(work)
436 merged = ''.join(work)
436 while ';' in merged:
437 while ';' in merged:
437 one, merged = merged.split(';', 1)
438 one, merged = merged.split(';', 1)
438 yield unescapearg(one)
439 yield unescapearg(one)
439 chunk = rsp.read(1024)
440 chunk = rsp.read(1024)
440 work = [merged, chunk]
441 work = [merged, chunk]
441 yield unescapearg(''.join(work))
442 yield unescapearg(''.join(work))
442
443
443 def _submitone(self, op, args):
444 def _submitone(self, op, args):
444 return self._call(op, **args)
445 return self._call(op, **args)
445
446
446 def debugwireargs(self, one, two, three=None, four=None, five=None):
447 def debugwireargs(self, one, two, three=None, four=None, five=None):
447 # don't pass optional arguments left at their default value
448 # don't pass optional arguments left at their default value
448 opts = {}
449 opts = {}
449 if three is not None:
450 if three is not None:
450 opts['three'] = three
451 opts['three'] = three
451 if four is not None:
452 if four is not None:
452 opts['four'] = four
453 opts['four'] = four
453 return self._call('debugwireargs', one=one, two=two, **opts)
454 return self._call('debugwireargs', one=one, two=two, **opts)
454
455
455 def _call(self, cmd, **args):
456 def _call(self, cmd, **args):
456 """execute <cmd> on the server
457 """execute <cmd> on the server
457
458
458 The command is expected to return a simple string.
459 The command is expected to return a simple string.
459
460
460 returns the server reply as a string."""
461 returns the server reply as a string."""
461 raise NotImplementedError()
462 raise NotImplementedError()
462
463
463 def _callstream(self, cmd, **args):
464 def _callstream(self, cmd, **args):
464 """execute <cmd> on the server
465 """execute <cmd> on the server
465
466
466 The command is expected to return a stream. Note that if the
467 The command is expected to return a stream. Note that if the
467 command doesn't return a stream, _callstream behaves
468 command doesn't return a stream, _callstream behaves
468 differently for ssh and http peers.
469 differently for ssh and http peers.
469
470
470 returns the server reply as a file like object.
471 returns the server reply as a file like object.
471 """
472 """
472 raise NotImplementedError()
473 raise NotImplementedError()
473
474
474 def _callcompressable(self, cmd, **args):
475 def _callcompressable(self, cmd, **args):
475 """execute <cmd> on the server
476 """execute <cmd> on the server
476
477
477 The command is expected to return a stream.
478 The command is expected to return a stream.
478
479
479 The stream may have been compressed in some implementations. This
480 The stream may have been compressed in some implementations. This
480 function takes care of the decompression. This is the only difference
481 function takes care of the decompression. This is the only difference
481 with _callstream.
482 with _callstream.
482
483
483 returns the server reply as a file like object.
484 returns the server reply as a file like object.
484 """
485 """
485 raise NotImplementedError()
486 raise NotImplementedError()
486
487
487 def _callpush(self, cmd, fp, **args):
488 def _callpush(self, cmd, fp, **args):
488 """execute a <cmd> on server
489 """execute a <cmd> on server
489
490
490 The command is expected to be related to a push. Push has a special
491 The command is expected to be related to a push. Push has a special
491 return method.
492 return method.
492
493
493 returns the server reply as a (ret, output) tuple. ret is either
494 returns the server reply as a (ret, output) tuple. ret is either
494 empty (error) or a stringified int.
495 empty (error) or a stringified int.
495 """
496 """
496 raise NotImplementedError()
497 raise NotImplementedError()
497
498
498 def _calltwowaystream(self, cmd, fp, **args):
499 def _calltwowaystream(self, cmd, fp, **args):
499 """execute <cmd> on server
500 """execute <cmd> on server
500
501
501 The command will send a stream to the server and get a stream in reply.
502 The command will send a stream to the server and get a stream in reply.
502 """
503 """
503 raise NotImplementedError()
504 raise NotImplementedError()
504
505
505 def _abort(self, exception):
506 def _abort(self, exception):
506 """clearly abort the wire protocol connection and raise the exception
507 """clearly abort the wire protocol connection and raise the exception
507 """
508 """
508 raise NotImplementedError()
509 raise NotImplementedError()
509
510
510 # server side
511 # server side
511
512
512 # wire protocol command can either return a string or one of these classes.
513 # wire protocol command can either return a string or one of these classes.
513 class streamres(object):
514 class streamres(object):
514 """wireproto reply: binary stream
515 """wireproto reply: binary stream
515
516
516 The call was successful and the result is a stream.
517 The call was successful and the result is a stream.
517
518
518 Accepts either a generator or an object with a ``read(size)`` method.
519 Accepts either a generator or an object with a ``read(size)`` method.
519
520
520 ``v1compressible`` indicates whether this data can be compressed to
521 ``v1compressible`` indicates whether this data can be compressed to
521 "version 1" clients (technically: HTTP peers using
522 "version 1" clients (technically: HTTP peers using
522 application/mercurial-0.1 media type). This flag should NOT be used on
523 application/mercurial-0.1 media type). This flag should NOT be used on
523 new commands because new clients should support a more modern compression
524 new commands because new clients should support a more modern compression
524 mechanism.
525 mechanism.
525 """
526 """
526 def __init__(self, gen=None, reader=None, v1compressible=False):
527 def __init__(self, gen=None, reader=None, v1compressible=False):
527 self.gen = gen
528 self.gen = gen
528 self.reader = reader
529 self.reader = reader
529 self.v1compressible = v1compressible
530 self.v1compressible = v1compressible
530
531
531 class pushres(object):
532 class pushres(object):
532 """wireproto reply: success with simple integer return
533 """wireproto reply: success with simple integer return
533
534
534 The call was successful and returned an integer contained in `self.res`.
535 The call was successful and returned an integer contained in `self.res`.
535 """
536 """
536 def __init__(self, res):
537 def __init__(self, res):
537 self.res = res
538 self.res = res
538
539
539 class pusherr(object):
540 class pusherr(object):
540 """wireproto reply: failure
541 """wireproto reply: failure
541
542
542 The call failed. The `self.res` attribute contains the error message.
543 The call failed. The `self.res` attribute contains the error message.
543 """
544 """
544 def __init__(self, res):
545 def __init__(self, res):
545 self.res = res
546 self.res = res
546
547
547 class ooberror(object):
548 class ooberror(object):
548 """wireproto reply: failure of a batch of operation
549 """wireproto reply: failure of a batch of operation
549
550
550 Something failed during a batch call. The error message is stored in
551 Something failed during a batch call. The error message is stored in
551 `self.message`.
552 `self.message`.
552 """
553 """
553 def __init__(self, message):
554 def __init__(self, message):
554 self.message = message
555 self.message = message
555
556
556 def getdispatchrepo(repo, proto, command):
557 def getdispatchrepo(repo, proto, command):
557 """Obtain the repo used for processing wire protocol commands.
558 """Obtain the repo used for processing wire protocol commands.
558
559
559 The intent of this function is to serve as a monkeypatch point for
560 The intent of this function is to serve as a monkeypatch point for
560 extensions that need commands to operate on different repo views under
561 extensions that need commands to operate on different repo views under
561 specialized circumstances.
562 specialized circumstances.
562 """
563 """
563 return repo.filtered('served')
564 return repo.filtered('served')
564
565
565 def dispatch(repo, proto, command):
566 def dispatch(repo, proto, command):
566 repo = getdispatchrepo(repo, proto, command)
567 repo = getdispatchrepo(repo, proto, command)
567 func, spec = commands[command]
568 func, spec = commands[command]
568 args = proto.getargs(spec)
569 args = proto.getargs(spec)
569 return func(repo, proto, *args)
570 return func(repo, proto, *args)
570
571
571 def options(cmd, keys, others):
572 def options(cmd, keys, others):
572 opts = {}
573 opts = {}
573 for k in keys:
574 for k in keys:
574 if k in others:
575 if k in others:
575 opts[k] = others[k]
576 opts[k] = others[k]
576 del others[k]
577 del others[k]
577 if others:
578 if others:
578 util.stderr.write("warning: %s ignored unexpected arguments %s\n"
579 util.stderr.write("warning: %s ignored unexpected arguments %s\n"
579 % (cmd, ",".join(others)))
580 % (cmd, ",".join(others)))
580 return opts
581 return opts
581
582
582 def bundle1allowed(repo, action):
583 def bundle1allowed(repo, action):
583 """Whether a bundle1 operation is allowed from the server.
584 """Whether a bundle1 operation is allowed from the server.
584
585
585 Priority is:
586 Priority is:
586
587
587 1. server.bundle1gd.<action> (if generaldelta active)
588 1. server.bundle1gd.<action> (if generaldelta active)
588 2. server.bundle1.<action>
589 2. server.bundle1.<action>
589 3. server.bundle1gd (if generaldelta active)
590 3. server.bundle1gd (if generaldelta active)
590 4. server.bundle1
591 4. server.bundle1
591 """
592 """
592 ui = repo.ui
593 ui = repo.ui
593 gd = 'generaldelta' in repo.requirements
594 gd = 'generaldelta' in repo.requirements
594
595
595 if gd:
596 if gd:
596 v = ui.configbool('server', 'bundle1gd.%s' % action, None)
597 v = ui.configbool('server', 'bundle1gd.%s' % action, None)
597 if v is not None:
598 if v is not None:
598 return v
599 return v
599
600
600 v = ui.configbool('server', 'bundle1.%s' % action, None)
601 v = ui.configbool('server', 'bundle1.%s' % action, None)
601 if v is not None:
602 if v is not None:
602 return v
603 return v
603
604
604 if gd:
605 if gd:
605 v = ui.configbool('server', 'bundle1gd')
606 v = ui.configbool('server', 'bundle1gd')
606 if v is not None:
607 if v is not None:
607 return v
608 return v
608
609
609 return ui.configbool('server', 'bundle1')
610 return ui.configbool('server', 'bundle1')
610
611
611 def supportedcompengines(ui, proto, role):
612 def supportedcompengines(ui, proto, role):
612 """Obtain the list of supported compression engines for a request."""
613 """Obtain the list of supported compression engines for a request."""
613 assert role in (util.CLIENTROLE, util.SERVERROLE)
614 assert role in (util.CLIENTROLE, util.SERVERROLE)
614
615
615 compengines = util.compengines.supportedwireengines(role)
616 compengines = util.compengines.supportedwireengines(role)
616
617
617 # Allow config to override default list and ordering.
618 # Allow config to override default list and ordering.
618 if role == util.SERVERROLE:
619 if role == util.SERVERROLE:
619 configengines = ui.configlist('server', 'compressionengines')
620 configengines = ui.configlist('server', 'compressionengines')
620 config = 'server.compressionengines'
621 config = 'server.compressionengines'
621 else:
622 else:
622 # This is currently implemented mainly to facilitate testing. In most
623 # This is currently implemented mainly to facilitate testing. In most
623 # cases, the server should be in charge of choosing a compression engine
624 # cases, the server should be in charge of choosing a compression engine
624 # because a server has the most to lose from a sub-optimal choice. (e.g.
625 # because a server has the most to lose from a sub-optimal choice. (e.g.
625 # CPU DoS due to an expensive engine or a network DoS due to poor
626 # CPU DoS due to an expensive engine or a network DoS due to poor
626 # compression ratio).
627 # compression ratio).
627 configengines = ui.configlist('experimental',
628 configengines = ui.configlist('experimental',
628 'clientcompressionengines')
629 'clientcompressionengines')
629 config = 'experimental.clientcompressionengines'
630 config = 'experimental.clientcompressionengines'
630
631
631 # No explicit config. Filter out the ones that aren't supposed to be
632 # No explicit config. Filter out the ones that aren't supposed to be
632 # advertised and return default ordering.
633 # advertised and return default ordering.
633 if not configengines:
634 if not configengines:
634 attr = 'serverpriority' if role == util.SERVERROLE else 'clientpriority'
635 attr = 'serverpriority' if role == util.SERVERROLE else 'clientpriority'
635 return [e for e in compengines
636 return [e for e in compengines
636 if getattr(e.wireprotosupport(), attr) > 0]
637 if getattr(e.wireprotosupport(), attr) > 0]
637
638
638 # If compression engines are listed in the config, assume there is a good
639 # If compression engines are listed in the config, assume there is a good
639 # reason for it (like server operators wanting to achieve specific
640 # reason for it (like server operators wanting to achieve specific
640 # performance characteristics). So fail fast if the config references
641 # performance characteristics). So fail fast if the config references
641 # unusable compression engines.
642 # unusable compression engines.
642 validnames = set(e.name() for e in compengines)
643 validnames = set(e.name() for e in compengines)
643 invalidnames = set(e for e in configengines if e not in validnames)
644 invalidnames = set(e for e in configengines if e not in validnames)
644 if invalidnames:
645 if invalidnames:
645 raise error.Abort(_('invalid compression engine defined in %s: %s') %
646 raise error.Abort(_('invalid compression engine defined in %s: %s') %
646 (config, ', '.join(sorted(invalidnames))))
647 (config, ', '.join(sorted(invalidnames))))
647
648
648 compengines = [e for e in compengines if e.name() in configengines]
649 compengines = [e for e in compengines if e.name() in configengines]
649 compengines = sorted(compengines,
650 compengines = sorted(compengines,
650 key=lambda e: configengines.index(e.name()))
651 key=lambda e: configengines.index(e.name()))
651
652
652 if not compengines:
653 if not compengines:
653 raise error.Abort(_('%s config option does not specify any known '
654 raise error.Abort(_('%s config option does not specify any known '
654 'compression engines') % config,
655 'compression engines') % config,
655 hint=_('usable compression engines: %s') %
656 hint=_('usable compression engines: %s') %
656 ', '.sorted(validnames))
657 ', '.sorted(validnames))
657
658
658 return compengines
659 return compengines
659
660
660 # list of commands
661 # list of commands
661 commands = {}
662 commands = {}
662
663
663 def wireprotocommand(name, args=''):
664 def wireprotocommand(name, args=''):
664 """decorator for wire protocol command"""
665 """decorator for wire protocol command"""
665 def register(func):
666 def register(func):
666 commands[name] = (func, args)
667 commands[name] = (func, args)
667 return func
668 return func
668 return register
669 return register
669
670
670 @wireprotocommand('batch', 'cmds *')
671 @wireprotocommand('batch', 'cmds *')
671 def batch(repo, proto, cmds, others):
672 def batch(repo, proto, cmds, others):
672 repo = repo.filtered("served")
673 repo = repo.filtered("served")
673 res = []
674 res = []
674 for pair in cmds.split(';'):
675 for pair in cmds.split(';'):
675 op, args = pair.split(' ', 1)
676 op, args = pair.split(' ', 1)
676 vals = {}
677 vals = {}
677 for a in args.split(','):
678 for a in args.split(','):
678 if a:
679 if a:
679 n, v = a.split('=')
680 n, v = a.split('=')
680 vals[unescapearg(n)] = unescapearg(v)
681 vals[unescapearg(n)] = unescapearg(v)
681 func, spec = commands[op]
682 func, spec = commands[op]
682 if spec:
683 if spec:
683 keys = spec.split()
684 keys = spec.split()
684 data = {}
685 data = {}
685 for k in keys:
686 for k in keys:
686 if k == '*':
687 if k == '*':
687 star = {}
688 star = {}
688 for key in vals.keys():
689 for key in vals.keys():
689 if key not in keys:
690 if key not in keys:
690 star[key] = vals[key]
691 star[key] = vals[key]
691 data['*'] = star
692 data['*'] = star
692 else:
693 else:
693 data[k] = vals[k]
694 data[k] = vals[k]
694 result = func(repo, proto, *[data[k] for k in keys])
695 result = func(repo, proto, *[data[k] for k in keys])
695 else:
696 else:
696 result = func(repo, proto)
697 result = func(repo, proto)
697 if isinstance(result, ooberror):
698 if isinstance(result, ooberror):
698 return result
699 return result
699 res.append(escapearg(result))
700 res.append(escapearg(result))
700 return ';'.join(res)
701 return ';'.join(res)
701
702
702 @wireprotocommand('between', 'pairs')
703 @wireprotocommand('between', 'pairs')
703 def between(repo, proto, pairs):
704 def between(repo, proto, pairs):
704 pairs = [decodelist(p, '-') for p in pairs.split(" ")]
705 pairs = [decodelist(p, '-') for p in pairs.split(" ")]
705 r = []
706 r = []
706 for b in repo.between(pairs):
707 for b in repo.between(pairs):
707 r.append(encodelist(b) + "\n")
708 r.append(encodelist(b) + "\n")
708 return "".join(r)
709 return "".join(r)
709
710
710 @wireprotocommand('branchmap')
711 @wireprotocommand('branchmap')
711 def branchmap(repo, proto):
712 def branchmap(repo, proto):
712 branchmap = repo.branchmap()
713 branchmap = repo.branchmap()
713 heads = []
714 heads = []
714 for branch, nodes in branchmap.iteritems():
715 for branch, nodes in branchmap.iteritems():
715 branchname = urlreq.quote(encoding.fromlocal(branch))
716 branchname = urlreq.quote(encoding.fromlocal(branch))
716 branchnodes = encodelist(nodes)
717 branchnodes = encodelist(nodes)
717 heads.append('%s %s' % (branchname, branchnodes))
718 heads.append('%s %s' % (branchname, branchnodes))
718 return '\n'.join(heads)
719 return '\n'.join(heads)
719
720
720 @wireprotocommand('branches', 'nodes')
721 @wireprotocommand('branches', 'nodes')
721 def branches(repo, proto, nodes):
722 def branches(repo, proto, nodes):
722 nodes = decodelist(nodes)
723 nodes = decodelist(nodes)
723 r = []
724 r = []
724 for b in repo.branches(nodes):
725 for b in repo.branches(nodes):
725 r.append(encodelist(b) + "\n")
726 r.append(encodelist(b) + "\n")
726 return "".join(r)
727 return "".join(r)
727
728
728 @wireprotocommand('clonebundles', '')
729 @wireprotocommand('clonebundles', '')
729 def clonebundles(repo, proto):
730 def clonebundles(repo, proto):
730 """Server command for returning info for available bundles to seed clones.
731 """Server command for returning info for available bundles to seed clones.
731
732
732 Clients will parse this response and determine what bundle to fetch.
733 Clients will parse this response and determine what bundle to fetch.
733
734
734 Extensions may wrap this command to filter or dynamically emit data
735 Extensions may wrap this command to filter or dynamically emit data
735 depending on the request. e.g. you could advertise URLs for the closest
736 depending on the request. e.g. you could advertise URLs for the closest
736 data center given the client's IP address.
737 data center given the client's IP address.
737 """
738 """
738 return repo.vfs.tryread('clonebundles.manifest')
739 return repo.vfs.tryread('clonebundles.manifest')
739
740
740 wireprotocaps = ['lookup', 'changegroupsubset', 'branchmap', 'pushkey',
741 wireprotocaps = ['lookup', 'changegroupsubset', 'branchmap', 'pushkey',
741 'known', 'getbundle', 'unbundlehash', 'batch']
742 'known', 'getbundle', 'unbundlehash', 'batch']
742
743
743 def _capabilities(repo, proto):
744 def _capabilities(repo, proto):
744 """return a list of capabilities for a repo
745 """return a list of capabilities for a repo
745
746
746 This function exists to allow extensions to easily wrap capabilities
747 This function exists to allow extensions to easily wrap capabilities
747 computation
748 computation
748
749
749 - returns a lists: easy to alter
750 - returns a lists: easy to alter
750 - change done here will be propagated to both `capabilities` and `hello`
751 - change done here will be propagated to both `capabilities` and `hello`
751 command without any other action needed.
752 command without any other action needed.
752 """
753 """
753 # copy to prevent modification of the global list
754 # copy to prevent modification of the global list
754 caps = list(wireprotocaps)
755 caps = list(wireprotocaps)
755 if streamclone.allowservergeneration(repo):
756 if streamclone.allowservergeneration(repo):
756 if repo.ui.configbool('server', 'preferuncompressed'):
757 if repo.ui.configbool('server', 'preferuncompressed'):
757 caps.append('stream-preferred')
758 caps.append('stream-preferred')
758 requiredformats = repo.requirements & repo.supportedformats
759 requiredformats = repo.requirements & repo.supportedformats
759 # if our local revlogs are just revlogv1, add 'stream' cap
760 # if our local revlogs are just revlogv1, add 'stream' cap
760 if not requiredformats - {'revlogv1'}:
761 if not requiredformats - {'revlogv1'}:
761 caps.append('stream')
762 caps.append('stream')
762 # otherwise, add 'streamreqs' detailing our local revlog format
763 # otherwise, add 'streamreqs' detailing our local revlog format
763 else:
764 else:
764 caps.append('streamreqs=%s' % ','.join(sorted(requiredformats)))
765 caps.append('streamreqs=%s' % ','.join(sorted(requiredformats)))
765 if repo.ui.configbool('experimental', 'bundle2-advertise'):
766 if repo.ui.configbool('experimental', 'bundle2-advertise'):
766 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
767 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
767 caps.append('bundle2=' + urlreq.quote(capsblob))
768 caps.append('bundle2=' + urlreq.quote(capsblob))
768 caps.append('unbundle=%s' % ','.join(bundle2.bundlepriority))
769 caps.append('unbundle=%s' % ','.join(bundle2.bundlepriority))
769
770
770 if proto.name == 'http':
771 if proto.name == 'http':
771 caps.append('httpheader=%d' %
772 caps.append('httpheader=%d' %
772 repo.ui.configint('server', 'maxhttpheaderlen'))
773 repo.ui.configint('server', 'maxhttpheaderlen'))
773 if repo.ui.configbool('experimental', 'httppostargs'):
774 if repo.ui.configbool('experimental', 'httppostargs'):
774 caps.append('httppostargs')
775 caps.append('httppostargs')
775
776
776 # FUTURE advertise 0.2rx once support is implemented
777 # FUTURE advertise 0.2rx once support is implemented
777 # FUTURE advertise minrx and mintx after consulting config option
778 # FUTURE advertise minrx and mintx after consulting config option
778 caps.append('httpmediatype=0.1rx,0.1tx,0.2tx')
779 caps.append('httpmediatype=0.1rx,0.1tx,0.2tx')
779
780
780 compengines = supportedcompengines(repo.ui, proto, util.SERVERROLE)
781 compengines = supportedcompengines(repo.ui, proto, util.SERVERROLE)
781 if compengines:
782 if compengines:
782 comptypes = ','.join(urlreq.quote(e.wireprotosupport().name)
783 comptypes = ','.join(urlreq.quote(e.wireprotosupport().name)
783 for e in compengines)
784 for e in compengines)
784 caps.append('compression=%s' % comptypes)
785 caps.append('compression=%s' % comptypes)
785
786
786 return caps
787 return caps
787
788
788 # If you are writing an extension and consider wrapping this function. Wrap
789 # If you are writing an extension and consider wrapping this function. Wrap
789 # `_capabilities` instead.
790 # `_capabilities` instead.
790 @wireprotocommand('capabilities')
791 @wireprotocommand('capabilities')
791 def capabilities(repo, proto):
792 def capabilities(repo, proto):
792 return ' '.join(_capabilities(repo, proto))
793 return ' '.join(_capabilities(repo, proto))
793
794
794 @wireprotocommand('changegroup', 'roots')
795 @wireprotocommand('changegroup', 'roots')
795 def changegroup(repo, proto, roots):
796 def changegroup(repo, proto, roots):
796 nodes = decodelist(roots)
797 nodes = decodelist(roots)
797 cg = changegroupmod.changegroup(repo, nodes, 'serve')
798 cg = changegroupmod.changegroup(repo, nodes, 'serve')
798 return streamres(reader=cg, v1compressible=True)
799 return streamres(reader=cg, v1compressible=True)
799
800
800 @wireprotocommand('changegroupsubset', 'bases heads')
801 @wireprotocommand('changegroupsubset', 'bases heads')
801 def changegroupsubset(repo, proto, bases, heads):
802 def changegroupsubset(repo, proto, bases, heads):
802 bases = decodelist(bases)
803 bases = decodelist(bases)
803 heads = decodelist(heads)
804 heads = decodelist(heads)
804 cg = changegroupmod.changegroupsubset(repo, bases, heads, 'serve')
805 outgoing = discovery.outgoing(repo, missingroots=bases,
806 missingheads=heads)
807 cg = changegroupmod.makechangegroup(repo, outgoing, '01', 'serve')
805 return streamres(reader=cg, v1compressible=True)
808 return streamres(reader=cg, v1compressible=True)
806
809
807 @wireprotocommand('debugwireargs', 'one two *')
810 @wireprotocommand('debugwireargs', 'one two *')
808 def debugwireargs(repo, proto, one, two, others):
811 def debugwireargs(repo, proto, one, two, others):
809 # only accept optional args from the known set
812 # only accept optional args from the known set
810 opts = options('debugwireargs', ['three', 'four'], others)
813 opts = options('debugwireargs', ['three', 'four'], others)
811 return repo.debugwireargs(one, two, **opts)
814 return repo.debugwireargs(one, two, **opts)
812
815
813 @wireprotocommand('getbundle', '*')
816 @wireprotocommand('getbundle', '*')
814 def getbundle(repo, proto, others):
817 def getbundle(repo, proto, others):
815 opts = options('getbundle', gboptsmap.keys(), others)
818 opts = options('getbundle', gboptsmap.keys(), others)
816 for k, v in opts.iteritems():
819 for k, v in opts.iteritems():
817 keytype = gboptsmap[k]
820 keytype = gboptsmap[k]
818 if keytype == 'nodes':
821 if keytype == 'nodes':
819 opts[k] = decodelist(v)
822 opts[k] = decodelist(v)
820 elif keytype == 'csv':
823 elif keytype == 'csv':
821 opts[k] = list(v.split(','))
824 opts[k] = list(v.split(','))
822 elif keytype == 'scsv':
825 elif keytype == 'scsv':
823 opts[k] = set(v.split(','))
826 opts[k] = set(v.split(','))
824 elif keytype == 'boolean':
827 elif keytype == 'boolean':
825 # Client should serialize False as '0', which is a non-empty string
828 # Client should serialize False as '0', which is a non-empty string
826 # so it evaluates as a True bool.
829 # so it evaluates as a True bool.
827 if v == '0':
830 if v == '0':
828 opts[k] = False
831 opts[k] = False
829 else:
832 else:
830 opts[k] = bool(v)
833 opts[k] = bool(v)
831 elif keytype != 'plain':
834 elif keytype != 'plain':
832 raise KeyError('unknown getbundle option type %s'
835 raise KeyError('unknown getbundle option type %s'
833 % keytype)
836 % keytype)
834
837
835 if not bundle1allowed(repo, 'pull'):
838 if not bundle1allowed(repo, 'pull'):
836 if not exchange.bundle2requested(opts.get('bundlecaps')):
839 if not exchange.bundle2requested(opts.get('bundlecaps')):
837 if proto.name == 'http':
840 if proto.name == 'http':
838 return ooberror(bundle2required)
841 return ooberror(bundle2required)
839 raise error.Abort(bundle2requiredmain,
842 raise error.Abort(bundle2requiredmain,
840 hint=bundle2requiredhint)
843 hint=bundle2requiredhint)
841
844
842 try:
845 try:
843 if repo.ui.configbool('server', 'disablefullbundle'):
846 if repo.ui.configbool('server', 'disablefullbundle'):
844 # Check to see if this is a full clone.
847 # Check to see if this is a full clone.
845 clheads = set(repo.changelog.heads())
848 clheads = set(repo.changelog.heads())
846 heads = set(opts.get('heads', set()))
849 heads = set(opts.get('heads', set()))
847 common = set(opts.get('common', set()))
850 common = set(opts.get('common', set()))
848 common.discard(nullid)
851 common.discard(nullid)
849 if not common and clheads == heads:
852 if not common and clheads == heads:
850 raise error.Abort(
853 raise error.Abort(
851 _('server has pull-based clones disabled'),
854 _('server has pull-based clones disabled'),
852 hint=_('remove --pull if specified or upgrade Mercurial'))
855 hint=_('remove --pull if specified or upgrade Mercurial'))
853
856
854 chunks = exchange.getbundlechunks(repo, 'serve', **opts)
857 chunks = exchange.getbundlechunks(repo, 'serve', **opts)
855 except error.Abort as exc:
858 except error.Abort as exc:
856 # cleanly forward Abort error to the client
859 # cleanly forward Abort error to the client
857 if not exchange.bundle2requested(opts.get('bundlecaps')):
860 if not exchange.bundle2requested(opts.get('bundlecaps')):
858 if proto.name == 'http':
861 if proto.name == 'http':
859 return ooberror(str(exc) + '\n')
862 return ooberror(str(exc) + '\n')
860 raise # cannot do better for bundle1 + ssh
863 raise # cannot do better for bundle1 + ssh
861 # bundle2 request expect a bundle2 reply
864 # bundle2 request expect a bundle2 reply
862 bundler = bundle2.bundle20(repo.ui)
865 bundler = bundle2.bundle20(repo.ui)
863 manargs = [('message', str(exc))]
866 manargs = [('message', str(exc))]
864 advargs = []
867 advargs = []
865 if exc.hint is not None:
868 if exc.hint is not None:
866 advargs.append(('hint', exc.hint))
869 advargs.append(('hint', exc.hint))
867 bundler.addpart(bundle2.bundlepart('error:abort',
870 bundler.addpart(bundle2.bundlepart('error:abort',
868 manargs, advargs))
871 manargs, advargs))
869 return streamres(gen=bundler.getchunks(), v1compressible=True)
872 return streamres(gen=bundler.getchunks(), v1compressible=True)
870 return streamres(gen=chunks, v1compressible=True)
873 return streamres(gen=chunks, v1compressible=True)
871
874
872 @wireprotocommand('heads')
875 @wireprotocommand('heads')
873 def heads(repo, proto):
876 def heads(repo, proto):
874 h = repo.heads()
877 h = repo.heads()
875 return encodelist(h) + "\n"
878 return encodelist(h) + "\n"
876
879
877 @wireprotocommand('hello')
880 @wireprotocommand('hello')
878 def hello(repo, proto):
881 def hello(repo, proto):
879 '''the hello command returns a set of lines describing various
882 '''the hello command returns a set of lines describing various
880 interesting things about the server, in an RFC822-like format.
883 interesting things about the server, in an RFC822-like format.
881 Currently the only one defined is "capabilities", which
884 Currently the only one defined is "capabilities", which
882 consists of a line in the form:
885 consists of a line in the form:
883
886
884 capabilities: space separated list of tokens
887 capabilities: space separated list of tokens
885 '''
888 '''
886 return "capabilities: %s\n" % (capabilities(repo, proto))
889 return "capabilities: %s\n" % (capabilities(repo, proto))
887
890
888 @wireprotocommand('listkeys', 'namespace')
891 @wireprotocommand('listkeys', 'namespace')
889 def listkeys(repo, proto, namespace):
892 def listkeys(repo, proto, namespace):
890 d = repo.listkeys(encoding.tolocal(namespace)).items()
893 d = repo.listkeys(encoding.tolocal(namespace)).items()
891 return pushkeymod.encodekeys(d)
894 return pushkeymod.encodekeys(d)
892
895
893 @wireprotocommand('lookup', 'key')
896 @wireprotocommand('lookup', 'key')
894 def lookup(repo, proto, key):
897 def lookup(repo, proto, key):
895 try:
898 try:
896 k = encoding.tolocal(key)
899 k = encoding.tolocal(key)
897 c = repo[k]
900 c = repo[k]
898 r = c.hex()
901 r = c.hex()
899 success = 1
902 success = 1
900 except Exception as inst:
903 except Exception as inst:
901 r = str(inst)
904 r = str(inst)
902 success = 0
905 success = 0
903 return "%s %s\n" % (success, r)
906 return "%s %s\n" % (success, r)
904
907
905 @wireprotocommand('known', 'nodes *')
908 @wireprotocommand('known', 'nodes *')
906 def known(repo, proto, nodes, others):
909 def known(repo, proto, nodes, others):
907 return ''.join(b and "1" or "0" for b in repo.known(decodelist(nodes)))
910 return ''.join(b and "1" or "0" for b in repo.known(decodelist(nodes)))
908
911
909 @wireprotocommand('pushkey', 'namespace key old new')
912 @wireprotocommand('pushkey', 'namespace key old new')
910 def pushkey(repo, proto, namespace, key, old, new):
913 def pushkey(repo, proto, namespace, key, old, new):
911 # compatibility with pre-1.8 clients which were accidentally
914 # compatibility with pre-1.8 clients which were accidentally
912 # sending raw binary nodes rather than utf-8-encoded hex
915 # sending raw binary nodes rather than utf-8-encoded hex
913 if len(new) == 20 and util.escapestr(new) != new:
916 if len(new) == 20 and util.escapestr(new) != new:
914 # looks like it could be a binary node
917 # looks like it could be a binary node
915 try:
918 try:
916 new.decode('utf-8')
919 new.decode('utf-8')
917 new = encoding.tolocal(new) # but cleanly decodes as UTF-8
920 new = encoding.tolocal(new) # but cleanly decodes as UTF-8
918 except UnicodeDecodeError:
921 except UnicodeDecodeError:
919 pass # binary, leave unmodified
922 pass # binary, leave unmodified
920 else:
923 else:
921 new = encoding.tolocal(new) # normal path
924 new = encoding.tolocal(new) # normal path
922
925
923 if util.safehasattr(proto, 'restore'):
926 if util.safehasattr(proto, 'restore'):
924
927
925 proto.redirect()
928 proto.redirect()
926
929
927 try:
930 try:
928 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
931 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
929 encoding.tolocal(old), new) or False
932 encoding.tolocal(old), new) or False
930 except error.Abort:
933 except error.Abort:
931 r = False
934 r = False
932
935
933 output = proto.restore()
936 output = proto.restore()
934
937
935 return '%s\n%s' % (int(r), output)
938 return '%s\n%s' % (int(r), output)
936
939
937 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
940 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
938 encoding.tolocal(old), new)
941 encoding.tolocal(old), new)
939 return '%s\n' % int(r)
942 return '%s\n' % int(r)
940
943
941 @wireprotocommand('stream_out')
944 @wireprotocommand('stream_out')
942 def stream(repo, proto):
945 def stream(repo, proto):
943 '''If the server supports streaming clone, it advertises the "stream"
946 '''If the server supports streaming clone, it advertises the "stream"
944 capability with a value representing the version and flags of the repo
947 capability with a value representing the version and flags of the repo
945 it is serving. Client checks to see if it understands the format.
948 it is serving. Client checks to see if it understands the format.
946 '''
949 '''
947 if not streamclone.allowservergeneration(repo):
950 if not streamclone.allowservergeneration(repo):
948 return '1\n'
951 return '1\n'
949
952
950 def getstream(it):
953 def getstream(it):
951 yield '0\n'
954 yield '0\n'
952 for chunk in it:
955 for chunk in it:
953 yield chunk
956 yield chunk
954
957
955 try:
958 try:
956 # LockError may be raised before the first result is yielded. Don't
959 # LockError may be raised before the first result is yielded. Don't
957 # emit output until we're sure we got the lock successfully.
960 # emit output until we're sure we got the lock successfully.
958 it = streamclone.generatev1wireproto(repo)
961 it = streamclone.generatev1wireproto(repo)
959 return streamres(gen=getstream(it))
962 return streamres(gen=getstream(it))
960 except error.LockError:
963 except error.LockError:
961 return '2\n'
964 return '2\n'
962
965
963 @wireprotocommand('unbundle', 'heads')
966 @wireprotocommand('unbundle', 'heads')
964 def unbundle(repo, proto, heads):
967 def unbundle(repo, proto, heads):
965 their_heads = decodelist(heads)
968 their_heads = decodelist(heads)
966
969
967 try:
970 try:
968 proto.redirect()
971 proto.redirect()
969
972
970 exchange.check_heads(repo, their_heads, 'preparing changes')
973 exchange.check_heads(repo, their_heads, 'preparing changes')
971
974
972 # write bundle data to temporary file because it can be big
975 # write bundle data to temporary file because it can be big
973 fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
976 fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
974 fp = os.fdopen(fd, pycompat.sysstr('wb+'))
977 fp = os.fdopen(fd, pycompat.sysstr('wb+'))
975 r = 0
978 r = 0
976 try:
979 try:
977 proto.getfile(fp)
980 proto.getfile(fp)
978 fp.seek(0)
981 fp.seek(0)
979 gen = exchange.readbundle(repo.ui, fp, None)
982 gen = exchange.readbundle(repo.ui, fp, None)
980 if (isinstance(gen, changegroupmod.cg1unpacker)
983 if (isinstance(gen, changegroupmod.cg1unpacker)
981 and not bundle1allowed(repo, 'push')):
984 and not bundle1allowed(repo, 'push')):
982 if proto.name == 'http':
985 if proto.name == 'http':
983 # need to special case http because stderr do not get to
986 # need to special case http because stderr do not get to
984 # the http client on failed push so we need to abuse some
987 # the http client on failed push so we need to abuse some
985 # other error type to make sure the message get to the
988 # other error type to make sure the message get to the
986 # user.
989 # user.
987 return ooberror(bundle2required)
990 return ooberror(bundle2required)
988 raise error.Abort(bundle2requiredmain,
991 raise error.Abort(bundle2requiredmain,
989 hint=bundle2requiredhint)
992 hint=bundle2requiredhint)
990
993
991 r = exchange.unbundle(repo, gen, their_heads, 'serve',
994 r = exchange.unbundle(repo, gen, their_heads, 'serve',
992 proto._client())
995 proto._client())
993 if util.safehasattr(r, 'addpart'):
996 if util.safehasattr(r, 'addpart'):
994 # The return looks streamable, we are in the bundle2 case and
997 # The return looks streamable, we are in the bundle2 case and
995 # should return a stream.
998 # should return a stream.
996 return streamres(gen=r.getchunks())
999 return streamres(gen=r.getchunks())
997 return pushres(r)
1000 return pushres(r)
998
1001
999 finally:
1002 finally:
1000 fp.close()
1003 fp.close()
1001 os.unlink(tempname)
1004 os.unlink(tempname)
1002
1005
1003 except (error.BundleValueError, error.Abort, error.PushRaced) as exc:
1006 except (error.BundleValueError, error.Abort, error.PushRaced) as exc:
1004 # handle non-bundle2 case first
1007 # handle non-bundle2 case first
1005 if not getattr(exc, 'duringunbundle2', False):
1008 if not getattr(exc, 'duringunbundle2', False):
1006 try:
1009 try:
1007 raise
1010 raise
1008 except error.Abort:
1011 except error.Abort:
1009 # The old code we moved used util.stderr directly.
1012 # The old code we moved used util.stderr directly.
1010 # We did not change it to minimise code change.
1013 # We did not change it to minimise code change.
1011 # This need to be moved to something proper.
1014 # This need to be moved to something proper.
1012 # Feel free to do it.
1015 # Feel free to do it.
1013 util.stderr.write("abort: %s\n" % exc)
1016 util.stderr.write("abort: %s\n" % exc)
1014 if exc.hint is not None:
1017 if exc.hint is not None:
1015 util.stderr.write("(%s)\n" % exc.hint)
1018 util.stderr.write("(%s)\n" % exc.hint)
1016 return pushres(0)
1019 return pushres(0)
1017 except error.PushRaced:
1020 except error.PushRaced:
1018 return pusherr(str(exc))
1021 return pusherr(str(exc))
1019
1022
1020 bundler = bundle2.bundle20(repo.ui)
1023 bundler = bundle2.bundle20(repo.ui)
1021 for out in getattr(exc, '_bundle2salvagedoutput', ()):
1024 for out in getattr(exc, '_bundle2salvagedoutput', ()):
1022 bundler.addpart(out)
1025 bundler.addpart(out)
1023 try:
1026 try:
1024 try:
1027 try:
1025 raise
1028 raise
1026 except error.PushkeyFailed as exc:
1029 except error.PushkeyFailed as exc:
1027 # check client caps
1030 # check client caps
1028 remotecaps = getattr(exc, '_replycaps', None)
1031 remotecaps = getattr(exc, '_replycaps', None)
1029 if (remotecaps is not None
1032 if (remotecaps is not None
1030 and 'pushkey' not in remotecaps.get('error', ())):
1033 and 'pushkey' not in remotecaps.get('error', ())):
1031 # no support remote side, fallback to Abort handler.
1034 # no support remote side, fallback to Abort handler.
1032 raise
1035 raise
1033 part = bundler.newpart('error:pushkey')
1036 part = bundler.newpart('error:pushkey')
1034 part.addparam('in-reply-to', exc.partid)
1037 part.addparam('in-reply-to', exc.partid)
1035 if exc.namespace is not None:
1038 if exc.namespace is not None:
1036 part.addparam('namespace', exc.namespace, mandatory=False)
1039 part.addparam('namespace', exc.namespace, mandatory=False)
1037 if exc.key is not None:
1040 if exc.key is not None:
1038 part.addparam('key', exc.key, mandatory=False)
1041 part.addparam('key', exc.key, mandatory=False)
1039 if exc.new is not None:
1042 if exc.new is not None:
1040 part.addparam('new', exc.new, mandatory=False)
1043 part.addparam('new', exc.new, mandatory=False)
1041 if exc.old is not None:
1044 if exc.old is not None:
1042 part.addparam('old', exc.old, mandatory=False)
1045 part.addparam('old', exc.old, mandatory=False)
1043 if exc.ret is not None:
1046 if exc.ret is not None:
1044 part.addparam('ret', exc.ret, mandatory=False)
1047 part.addparam('ret', exc.ret, mandatory=False)
1045 except error.BundleValueError as exc:
1048 except error.BundleValueError as exc:
1046 errpart = bundler.newpart('error:unsupportedcontent')
1049 errpart = bundler.newpart('error:unsupportedcontent')
1047 if exc.parttype is not None:
1050 if exc.parttype is not None:
1048 errpart.addparam('parttype', exc.parttype)
1051 errpart.addparam('parttype', exc.parttype)
1049 if exc.params:
1052 if exc.params:
1050 errpart.addparam('params', '\0'.join(exc.params))
1053 errpart.addparam('params', '\0'.join(exc.params))
1051 except error.Abort as exc:
1054 except error.Abort as exc:
1052 manargs = [('message', str(exc))]
1055 manargs = [('message', str(exc))]
1053 advargs = []
1056 advargs = []
1054 if exc.hint is not None:
1057 if exc.hint is not None:
1055 advargs.append(('hint', exc.hint))
1058 advargs.append(('hint', exc.hint))
1056 bundler.addpart(bundle2.bundlepart('error:abort',
1059 bundler.addpart(bundle2.bundlepart('error:abort',
1057 manargs, advargs))
1060 manargs, advargs))
1058 except error.PushRaced as exc:
1061 except error.PushRaced as exc:
1059 bundler.newpart('error:pushraced', [('message', str(exc))])
1062 bundler.newpart('error:pushraced', [('message', str(exc))])
1060 return streamres(gen=bundler.getchunks())
1063 return streamres(gen=bundler.getchunks())
General Comments 0
You need to be logged in to leave comments. Login now