##// END OF EJS Templates
shelve: use cg3 for treemanifests...
Martin von Zweigbergk -
r27931:1289a122 stable
parent child Browse files
Show More
@@ -1,859 +1,859 b''
1 # shelve.py - save/restore working directory state
1 # shelve.py - save/restore working directory state
2 #
2 #
3 # Copyright 2013 Facebook, Inc.
3 # Copyright 2013 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """save and restore changes to the working directory
8 """save and restore changes to the working directory
9
9
10 The "hg shelve" command saves changes made to the working directory
10 The "hg shelve" command saves changes made to the working directory
11 and reverts those changes, resetting the working directory to a clean
11 and reverts those changes, resetting the working directory to a clean
12 state.
12 state.
13
13
14 Later on, the "hg unshelve" command restores the changes saved by "hg
14 Later on, the "hg unshelve" command restores the changes saved by "hg
15 shelve". Changes can be restored even after updating to a different
15 shelve". Changes can be restored even after updating to a different
16 parent, in which case Mercurial's merge machinery will resolve any
16 parent, in which case Mercurial's merge machinery will resolve any
17 conflicts if necessary.
17 conflicts if necessary.
18
18
19 You can have more than one shelved change outstanding at a time; each
19 You can have more than one shelved change outstanding at a time; each
20 shelved change has a distinct name. For details, see the help for "hg
20 shelved change has a distinct name. For details, see the help for "hg
21 shelve".
21 shelve".
22 """
22 """
23
23
24 import collections
24 import collections
25 import itertools
25 import itertools
26 from mercurial.i18n import _
26 from mercurial.i18n import _
27 from mercurial.node import nullid, nullrev, bin, hex
27 from mercurial.node import nullid, nullrev, bin, hex
28 from mercurial import changegroup, cmdutil, scmutil, phases, commands
28 from mercurial import changegroup, cmdutil, scmutil, phases, commands
29 from mercurial import error, hg, mdiff, merge, patch, repair, util
29 from mercurial import error, hg, mdiff, merge, patch, repair, util
30 from mercurial import templatefilters, exchange, bundlerepo, bundle2
30 from mercurial import templatefilters, exchange, bundlerepo, bundle2
31 from mercurial import lock as lockmod
31 from mercurial import lock as lockmod
32 from hgext import rebase
32 from hgext import rebase
33 import errno
33 import errno
34
34
35 cmdtable = {}
35 cmdtable = {}
36 command = cmdutil.command(cmdtable)
36 command = cmdutil.command(cmdtable)
37 # Note for extension authors: ONLY specify testedwith = 'internal' for
37 # Note for extension authors: ONLY specify testedwith = 'internal' for
38 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
38 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
39 # be specifying the version(s) of Mercurial they are tested with, or
39 # be specifying the version(s) of Mercurial they are tested with, or
40 # leave the attribute unspecified.
40 # leave the attribute unspecified.
41 testedwith = 'internal'
41 testedwith = 'internal'
42
42
43 backupdir = 'shelve-backup'
43 backupdir = 'shelve-backup'
44
44
45 class shelvedfile(object):
45 class shelvedfile(object):
46 """Helper for the file storing a single shelve
46 """Helper for the file storing a single shelve
47
47
48 Handles common functions on shelve files (.hg/.patch) using
48 Handles common functions on shelve files (.hg/.patch) using
49 the vfs layer"""
49 the vfs layer"""
50 def __init__(self, repo, name, filetype=None):
50 def __init__(self, repo, name, filetype=None):
51 self.repo = repo
51 self.repo = repo
52 self.name = name
52 self.name = name
53 self.vfs = scmutil.vfs(repo.join('shelved'))
53 self.vfs = scmutil.vfs(repo.join('shelved'))
54 self.backupvfs = scmutil.vfs(repo.join(backupdir))
54 self.backupvfs = scmutil.vfs(repo.join(backupdir))
55 self.ui = self.repo.ui
55 self.ui = self.repo.ui
56 if filetype:
56 if filetype:
57 self.fname = name + '.' + filetype
57 self.fname = name + '.' + filetype
58 else:
58 else:
59 self.fname = name
59 self.fname = name
60
60
61 def exists(self):
61 def exists(self):
62 return self.vfs.exists(self.fname)
62 return self.vfs.exists(self.fname)
63
63
64 def filename(self):
64 def filename(self):
65 return self.vfs.join(self.fname)
65 return self.vfs.join(self.fname)
66
66
67 def backupfilename(self):
67 def backupfilename(self):
68 def gennames(base):
68 def gennames(base):
69 yield base
69 yield base
70 base, ext = base.rsplit('.', 1)
70 base, ext = base.rsplit('.', 1)
71 for i in itertools.count(1):
71 for i in itertools.count(1):
72 yield '%s-%d.%s' % (base, i, ext)
72 yield '%s-%d.%s' % (base, i, ext)
73
73
74 name = self.backupvfs.join(self.fname)
74 name = self.backupvfs.join(self.fname)
75 for n in gennames(name):
75 for n in gennames(name):
76 if not self.backupvfs.exists(n):
76 if not self.backupvfs.exists(n):
77 return n
77 return n
78
78
79 def movetobackup(self):
79 def movetobackup(self):
80 if not self.backupvfs.isdir():
80 if not self.backupvfs.isdir():
81 self.backupvfs.makedir()
81 self.backupvfs.makedir()
82 util.rename(self.filename(), self.backupfilename())
82 util.rename(self.filename(), self.backupfilename())
83
83
84 def stat(self):
84 def stat(self):
85 return self.vfs.stat(self.fname)
85 return self.vfs.stat(self.fname)
86
86
87 def opener(self, mode='rb'):
87 def opener(self, mode='rb'):
88 try:
88 try:
89 return self.vfs(self.fname, mode)
89 return self.vfs(self.fname, mode)
90 except IOError as err:
90 except IOError as err:
91 if err.errno != errno.ENOENT:
91 if err.errno != errno.ENOENT:
92 raise
92 raise
93 raise error.Abort(_("shelved change '%s' not found") % self.name)
93 raise error.Abort(_("shelved change '%s' not found") % self.name)
94
94
95 def applybundle(self):
95 def applybundle(self):
96 fp = self.opener()
96 fp = self.opener()
97 try:
97 try:
98 gen = exchange.readbundle(self.repo.ui, fp, self.fname, self.vfs)
98 gen = exchange.readbundle(self.repo.ui, fp, self.fname, self.vfs)
99 if not isinstance(gen, bundle2.unbundle20):
99 if not isinstance(gen, bundle2.unbundle20):
100 gen.apply(self.repo, 'unshelve',
100 gen.apply(self.repo, 'unshelve',
101 'bundle:' + self.vfs.join(self.fname),
101 'bundle:' + self.vfs.join(self.fname),
102 targetphase=phases.secret)
102 targetphase=phases.secret)
103 if isinstance(gen, bundle2.unbundle20):
103 if isinstance(gen, bundle2.unbundle20):
104 bundle2.applybundle(self.repo, gen,
104 bundle2.applybundle(self.repo, gen,
105 self.repo.currenttransaction(),
105 self.repo.currenttransaction(),
106 source='unshelve',
106 source='unshelve',
107 url='bundle:' + self.vfs.join(self.fname))
107 url='bundle:' + self.vfs.join(self.fname))
108 finally:
108 finally:
109 fp.close()
109 fp.close()
110
110
111 def bundlerepo(self):
111 def bundlerepo(self):
112 return bundlerepo.bundlerepository(self.repo.baseui, self.repo.root,
112 return bundlerepo.bundlerepository(self.repo.baseui, self.repo.root,
113 self.vfs.join(self.fname))
113 self.vfs.join(self.fname))
114 def writebundle(self, bases, node):
114 def writebundle(self, bases, node):
115 btype = 'HG10BZ'
115 cgversion = changegroup.safeversion(self.repo)
116 cgversion = '01'
116 if cgversion == '01':
117 compression = None
117 btype = 'HG10BZ'
118 if 'generaldelta' in self.repo.requirements:
118 compression = None
119 else:
119 btype = 'HG20'
120 btype = 'HG20'
120 cgversion = '02'
121 compression = 'BZ'
121 compression = 'BZ'
122
122
123 cg = changegroup.changegroupsubset(self.repo, bases, [node], 'shelve',
123 cg = changegroup.changegroupsubset(self.repo, bases, [node], 'shelve',
124 version=cgversion)
124 version=cgversion)
125 changegroup.writebundle(self.ui, cg, self.fname, btype, self.vfs,
125 changegroup.writebundle(self.ui, cg, self.fname, btype, self.vfs,
126 compression=compression)
126 compression=compression)
127
127
128 class shelvedstate(object):
128 class shelvedstate(object):
129 """Handle persistence during unshelving operations.
129 """Handle persistence during unshelving operations.
130
130
131 Handles saving and restoring a shelved state. Ensures that different
131 Handles saving and restoring a shelved state. Ensures that different
132 versions of a shelved state are possible and handles them appropriately.
132 versions of a shelved state are possible and handles them appropriately.
133 """
133 """
134 _version = 1
134 _version = 1
135 _filename = 'shelvedstate'
135 _filename = 'shelvedstate'
136
136
137 @classmethod
137 @classmethod
138 def load(cls, repo):
138 def load(cls, repo):
139 fp = repo.vfs(cls._filename)
139 fp = repo.vfs(cls._filename)
140 try:
140 try:
141 version = int(fp.readline().strip())
141 version = int(fp.readline().strip())
142
142
143 if version != cls._version:
143 if version != cls._version:
144 raise error.Abort(_('this version of shelve is incompatible '
144 raise error.Abort(_('this version of shelve is incompatible '
145 'with the version used in this repo'))
145 'with the version used in this repo'))
146 name = fp.readline().strip()
146 name = fp.readline().strip()
147 wctx = fp.readline().strip()
147 wctx = fp.readline().strip()
148 pendingctx = fp.readline().strip()
148 pendingctx = fp.readline().strip()
149 parents = [bin(h) for h in fp.readline().split()]
149 parents = [bin(h) for h in fp.readline().split()]
150 stripnodes = [bin(h) for h in fp.readline().split()]
150 stripnodes = [bin(h) for h in fp.readline().split()]
151 finally:
151 finally:
152 fp.close()
152 fp.close()
153
153
154 obj = cls()
154 obj = cls()
155 obj.name = name
155 obj.name = name
156 obj.wctx = repo[bin(wctx)]
156 obj.wctx = repo[bin(wctx)]
157 obj.pendingctx = repo[bin(pendingctx)]
157 obj.pendingctx = repo[bin(pendingctx)]
158 obj.parents = parents
158 obj.parents = parents
159 obj.stripnodes = stripnodes
159 obj.stripnodes = stripnodes
160
160
161 return obj
161 return obj
162
162
163 @classmethod
163 @classmethod
164 def save(cls, repo, name, originalwctx, pendingctx, stripnodes):
164 def save(cls, repo, name, originalwctx, pendingctx, stripnodes):
165 fp = repo.vfs(cls._filename, 'wb')
165 fp = repo.vfs(cls._filename, 'wb')
166 fp.write('%i\n' % cls._version)
166 fp.write('%i\n' % cls._version)
167 fp.write('%s\n' % name)
167 fp.write('%s\n' % name)
168 fp.write('%s\n' % hex(originalwctx.node()))
168 fp.write('%s\n' % hex(originalwctx.node()))
169 fp.write('%s\n' % hex(pendingctx.node()))
169 fp.write('%s\n' % hex(pendingctx.node()))
170 fp.write('%s\n' % ' '.join([hex(p) for p in repo.dirstate.parents()]))
170 fp.write('%s\n' % ' '.join([hex(p) for p in repo.dirstate.parents()]))
171 fp.write('%s\n' % ' '.join([hex(n) for n in stripnodes]))
171 fp.write('%s\n' % ' '.join([hex(n) for n in stripnodes]))
172 fp.close()
172 fp.close()
173
173
174 @classmethod
174 @classmethod
175 def clear(cls, repo):
175 def clear(cls, repo):
176 util.unlinkpath(repo.join(cls._filename), ignoremissing=True)
176 util.unlinkpath(repo.join(cls._filename), ignoremissing=True)
177
177
178 def cleanupoldbackups(repo):
178 def cleanupoldbackups(repo):
179 vfs = scmutil.vfs(repo.join(backupdir))
179 vfs = scmutil.vfs(repo.join(backupdir))
180 maxbackups = repo.ui.configint('shelve', 'maxbackups', 10)
180 maxbackups = repo.ui.configint('shelve', 'maxbackups', 10)
181 hgfiles = [f for f in vfs.listdir() if f.endswith('.hg')]
181 hgfiles = [f for f in vfs.listdir() if f.endswith('.hg')]
182 hgfiles = sorted([(vfs.stat(f).st_mtime, f) for f in hgfiles])
182 hgfiles = sorted([(vfs.stat(f).st_mtime, f) for f in hgfiles])
183 if 0 < maxbackups and maxbackups < len(hgfiles):
183 if 0 < maxbackups and maxbackups < len(hgfiles):
184 bordermtime = hgfiles[-maxbackups][0]
184 bordermtime = hgfiles[-maxbackups][0]
185 else:
185 else:
186 bordermtime = None
186 bordermtime = None
187 for mtime, f in hgfiles[:len(hgfiles) - maxbackups]:
187 for mtime, f in hgfiles[:len(hgfiles) - maxbackups]:
188 if mtime == bordermtime:
188 if mtime == bordermtime:
189 # keep it, because timestamp can't decide exact order of backups
189 # keep it, because timestamp can't decide exact order of backups
190 continue
190 continue
191 base = f[:-3]
191 base = f[:-3]
192 for ext in 'hg patch'.split():
192 for ext in 'hg patch'.split():
193 try:
193 try:
194 vfs.unlink(base + '.' + ext)
194 vfs.unlink(base + '.' + ext)
195 except OSError as err:
195 except OSError as err:
196 if err.errno != errno.ENOENT:
196 if err.errno != errno.ENOENT:
197 raise
197 raise
198
198
199 def _aborttransaction(repo):
199 def _aborttransaction(repo):
200 '''Abort current transaction for shelve/unshelve, but keep dirstate
200 '''Abort current transaction for shelve/unshelve, but keep dirstate
201 '''
201 '''
202 backupname = 'dirstate.shelve'
202 backupname = 'dirstate.shelve'
203 dirstatebackup = None
203 dirstatebackup = None
204 try:
204 try:
205 # create backup of (un)shelved dirstate, because aborting transaction
205 # create backup of (un)shelved dirstate, because aborting transaction
206 # should restore dirstate to one at the beginning of the
206 # should restore dirstate to one at the beginning of the
207 # transaction, which doesn't include the result of (un)shelving
207 # transaction, which doesn't include the result of (un)shelving
208 fp = repo.vfs.open(backupname, "w")
208 fp = repo.vfs.open(backupname, "w")
209 dirstatebackup = backupname
209 dirstatebackup = backupname
210 # clearing _dirty/_dirtypl of dirstate by _writedirstate below
210 # clearing _dirty/_dirtypl of dirstate by _writedirstate below
211 # is unintentional. but it doesn't cause problem in this case,
211 # is unintentional. but it doesn't cause problem in this case,
212 # because no code path refers them until transaction is aborted.
212 # because no code path refers them until transaction is aborted.
213 repo.dirstate._writedirstate(fp) # write in-memory changes forcibly
213 repo.dirstate._writedirstate(fp) # write in-memory changes forcibly
214
214
215 tr = repo.currenttransaction()
215 tr = repo.currenttransaction()
216 tr.abort()
216 tr.abort()
217
217
218 # restore to backuped dirstate
218 # restore to backuped dirstate
219 repo.vfs.rename(dirstatebackup, 'dirstate')
219 repo.vfs.rename(dirstatebackup, 'dirstate')
220 dirstatebackup = None
220 dirstatebackup = None
221 finally:
221 finally:
222 if dirstatebackup:
222 if dirstatebackup:
223 repo.vfs.unlink(dirstatebackup)
223 repo.vfs.unlink(dirstatebackup)
224
224
225 def createcmd(ui, repo, pats, opts):
225 def createcmd(ui, repo, pats, opts):
226 """subcommand that creates a new shelve"""
226 """subcommand that creates a new shelve"""
227 with repo.wlock():
227 with repo.wlock():
228 cmdutil.checkunfinished(repo)
228 cmdutil.checkunfinished(repo)
229 return _docreatecmd(ui, repo, pats, opts)
229 return _docreatecmd(ui, repo, pats, opts)
230
230
231 def _docreatecmd(ui, repo, pats, opts):
231 def _docreatecmd(ui, repo, pats, opts):
232 def mutableancestors(ctx):
232 def mutableancestors(ctx):
233 """return all mutable ancestors for ctx (included)
233 """return all mutable ancestors for ctx (included)
234
234
235 Much faster than the revset ancestors(ctx) & draft()"""
235 Much faster than the revset ancestors(ctx) & draft()"""
236 seen = set([nullrev])
236 seen = set([nullrev])
237 visit = collections.deque()
237 visit = collections.deque()
238 visit.append(ctx)
238 visit.append(ctx)
239 while visit:
239 while visit:
240 ctx = visit.popleft()
240 ctx = visit.popleft()
241 yield ctx.node()
241 yield ctx.node()
242 for parent in ctx.parents():
242 for parent in ctx.parents():
243 rev = parent.rev()
243 rev = parent.rev()
244 if rev not in seen:
244 if rev not in seen:
245 seen.add(rev)
245 seen.add(rev)
246 if parent.mutable():
246 if parent.mutable():
247 visit.append(parent)
247 visit.append(parent)
248
248
249 wctx = repo[None]
249 wctx = repo[None]
250 parents = wctx.parents()
250 parents = wctx.parents()
251 if len(parents) > 1:
251 if len(parents) > 1:
252 raise error.Abort(_('cannot shelve while merging'))
252 raise error.Abort(_('cannot shelve while merging'))
253 parent = parents[0]
253 parent = parents[0]
254
254
255 # we never need the user, so we use a generic user for all shelve operations
255 # we never need the user, so we use a generic user for all shelve operations
256 user = 'shelve@localhost'
256 user = 'shelve@localhost'
257 label = repo._activebookmark or parent.branch() or 'default'
257 label = repo._activebookmark or parent.branch() or 'default'
258
258
259 # slashes aren't allowed in filenames, therefore we rename it
259 # slashes aren't allowed in filenames, therefore we rename it
260 label = label.replace('/', '_')
260 label = label.replace('/', '_')
261
261
262 def gennames():
262 def gennames():
263 yield label
263 yield label
264 for i in xrange(1, 100):
264 for i in xrange(1, 100):
265 yield '%s-%02d' % (label, i)
265 yield '%s-%02d' % (label, i)
266
266
267 if parent.node() != nullid:
267 if parent.node() != nullid:
268 desc = "changes to: %s" % parent.description().split('\n', 1)[0]
268 desc = "changes to: %s" % parent.description().split('\n', 1)[0]
269 else:
269 else:
270 desc = '(changes in empty repository)'
270 desc = '(changes in empty repository)'
271
271
272 if not opts['message']:
272 if not opts['message']:
273 opts['message'] = desc
273 opts['message'] = desc
274
274
275 name = opts['name']
275 name = opts['name']
276
276
277 lock = tr = None
277 lock = tr = None
278 try:
278 try:
279 lock = repo.lock()
279 lock = repo.lock()
280
280
281 # use an uncommitted transaction to generate the bundle to avoid
281 # use an uncommitted transaction to generate the bundle to avoid
282 # pull races. ensure we don't print the abort message to stderr.
282 # pull races. ensure we don't print the abort message to stderr.
283 tr = repo.transaction('commit', report=lambda x: None)
283 tr = repo.transaction('commit', report=lambda x: None)
284
284
285 if name:
285 if name:
286 if shelvedfile(repo, name, 'hg').exists():
286 if shelvedfile(repo, name, 'hg').exists():
287 raise error.Abort(_("a shelved change named '%s' already exists"
287 raise error.Abort(_("a shelved change named '%s' already exists"
288 ) % name)
288 ) % name)
289 else:
289 else:
290 for n in gennames():
290 for n in gennames():
291 if not shelvedfile(repo, n, 'hg').exists():
291 if not shelvedfile(repo, n, 'hg').exists():
292 name = n
292 name = n
293 break
293 break
294 else:
294 else:
295 raise error.Abort(_("too many shelved changes named '%s'") %
295 raise error.Abort(_("too many shelved changes named '%s'") %
296 label)
296 label)
297
297
298 # ensure we are not creating a subdirectory or a hidden file
298 # ensure we are not creating a subdirectory or a hidden file
299 if '/' in name or '\\' in name:
299 if '/' in name or '\\' in name:
300 raise error.Abort(_('shelved change names may not contain slashes'))
300 raise error.Abort(_('shelved change names may not contain slashes'))
301 if name.startswith('.'):
301 if name.startswith('.'):
302 raise error.Abort(_("shelved change names may not start with '.'"))
302 raise error.Abort(_("shelved change names may not start with '.'"))
303 interactive = opts.get('interactive', False)
303 interactive = opts.get('interactive', False)
304 includeunknown = (opts.get('unknown', False) and
304 includeunknown = (opts.get('unknown', False) and
305 not opts.get('addremove', False))
305 not opts.get('addremove', False))
306
306
307 extra={}
307 extra={}
308 if includeunknown:
308 if includeunknown:
309 s = repo.status(match=scmutil.match(repo[None], pats, opts),
309 s = repo.status(match=scmutil.match(repo[None], pats, opts),
310 unknown=True)
310 unknown=True)
311 if s.unknown:
311 if s.unknown:
312 extra['shelve_unknown'] = '\0'.join(s.unknown)
312 extra['shelve_unknown'] = '\0'.join(s.unknown)
313 repo[None].add(s.unknown)
313 repo[None].add(s.unknown)
314
314
315 def commitfunc(ui, repo, message, match, opts):
315 def commitfunc(ui, repo, message, match, opts):
316 hasmq = util.safehasattr(repo, 'mq')
316 hasmq = util.safehasattr(repo, 'mq')
317 if hasmq:
317 if hasmq:
318 saved, repo.mq.checkapplied = repo.mq.checkapplied, False
318 saved, repo.mq.checkapplied = repo.mq.checkapplied, False
319 backup = repo.ui.backupconfig('phases', 'new-commit')
319 backup = repo.ui.backupconfig('phases', 'new-commit')
320 try:
320 try:
321 repo.ui. setconfig('phases', 'new-commit', phases.secret)
321 repo.ui. setconfig('phases', 'new-commit', phases.secret)
322 editor = cmdutil.getcommiteditor(editform='shelve.shelve',
322 editor = cmdutil.getcommiteditor(editform='shelve.shelve',
323 **opts)
323 **opts)
324 return repo.commit(message, user, opts.get('date'), match,
324 return repo.commit(message, user, opts.get('date'), match,
325 editor=editor, extra=extra)
325 editor=editor, extra=extra)
326 finally:
326 finally:
327 repo.ui.restoreconfig(backup)
327 repo.ui.restoreconfig(backup)
328 if hasmq:
328 if hasmq:
329 repo.mq.checkapplied = saved
329 repo.mq.checkapplied = saved
330
330
331 def interactivecommitfunc(ui, repo, *pats, **opts):
331 def interactivecommitfunc(ui, repo, *pats, **opts):
332 match = scmutil.match(repo['.'], pats, {})
332 match = scmutil.match(repo['.'], pats, {})
333 message = opts['message']
333 message = opts['message']
334 return commitfunc(ui, repo, message, match, opts)
334 return commitfunc(ui, repo, message, match, opts)
335 if not interactive:
335 if not interactive:
336 node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
336 node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
337 else:
337 else:
338 node = cmdutil.dorecord(ui, repo, interactivecommitfunc, None,
338 node = cmdutil.dorecord(ui, repo, interactivecommitfunc, None,
339 False, cmdutil.recordfilter, *pats, **opts)
339 False, cmdutil.recordfilter, *pats, **opts)
340 if not node:
340 if not node:
341 stat = repo.status(match=scmutil.match(repo[None], pats, opts))
341 stat = repo.status(match=scmutil.match(repo[None], pats, opts))
342 if stat.deleted:
342 if stat.deleted:
343 ui.status(_("nothing changed (%d missing files, see "
343 ui.status(_("nothing changed (%d missing files, see "
344 "'hg status')\n") % len(stat.deleted))
344 "'hg status')\n") % len(stat.deleted))
345 else:
345 else:
346 ui.status(_("nothing changed\n"))
346 ui.status(_("nothing changed\n"))
347 return 1
347 return 1
348
348
349 bases = list(mutableancestors(repo[node]))
349 bases = list(mutableancestors(repo[node]))
350 shelvedfile(repo, name, 'hg').writebundle(bases, node)
350 shelvedfile(repo, name, 'hg').writebundle(bases, node)
351 cmdutil.export(repo, [node],
351 cmdutil.export(repo, [node],
352 fp=shelvedfile(repo, name, 'patch').opener('wb'),
352 fp=shelvedfile(repo, name, 'patch').opener('wb'),
353 opts=mdiff.diffopts(git=True))
353 opts=mdiff.diffopts(git=True))
354
354
355
355
356 if ui.formatted():
356 if ui.formatted():
357 desc = util.ellipsis(desc, ui.termwidth())
357 desc = util.ellipsis(desc, ui.termwidth())
358 ui.status(_('shelved as %s\n') % name)
358 ui.status(_('shelved as %s\n') % name)
359 hg.update(repo, parent.node())
359 hg.update(repo, parent.node())
360
360
361 _aborttransaction(repo)
361 _aborttransaction(repo)
362 finally:
362 finally:
363 lockmod.release(tr, lock)
363 lockmod.release(tr, lock)
364
364
365 def cleanupcmd(ui, repo):
365 def cleanupcmd(ui, repo):
366 """subcommand that deletes all shelves"""
366 """subcommand that deletes all shelves"""
367
367
368 with repo.wlock():
368 with repo.wlock():
369 for (name, _type) in repo.vfs.readdir('shelved'):
369 for (name, _type) in repo.vfs.readdir('shelved'):
370 suffix = name.rsplit('.', 1)[-1]
370 suffix = name.rsplit('.', 1)[-1]
371 if suffix in ('hg', 'patch'):
371 if suffix in ('hg', 'patch'):
372 shelvedfile(repo, name).movetobackup()
372 shelvedfile(repo, name).movetobackup()
373 cleanupoldbackups(repo)
373 cleanupoldbackups(repo)
374
374
375 def deletecmd(ui, repo, pats):
375 def deletecmd(ui, repo, pats):
376 """subcommand that deletes a specific shelve"""
376 """subcommand that deletes a specific shelve"""
377 if not pats:
377 if not pats:
378 raise error.Abort(_('no shelved changes specified!'))
378 raise error.Abort(_('no shelved changes specified!'))
379 with repo.wlock():
379 with repo.wlock():
380 try:
380 try:
381 for name in pats:
381 for name in pats:
382 for suffix in 'hg patch'.split():
382 for suffix in 'hg patch'.split():
383 shelvedfile(repo, name, suffix).movetobackup()
383 shelvedfile(repo, name, suffix).movetobackup()
384 cleanupoldbackups(repo)
384 cleanupoldbackups(repo)
385 except OSError as err:
385 except OSError as err:
386 if err.errno != errno.ENOENT:
386 if err.errno != errno.ENOENT:
387 raise
387 raise
388 raise error.Abort(_("shelved change '%s' not found") % name)
388 raise error.Abort(_("shelved change '%s' not found") % name)
389
389
390 def listshelves(repo):
390 def listshelves(repo):
391 """return all shelves in repo as list of (time, filename)"""
391 """return all shelves in repo as list of (time, filename)"""
392 try:
392 try:
393 names = repo.vfs.readdir('shelved')
393 names = repo.vfs.readdir('shelved')
394 except OSError as err:
394 except OSError as err:
395 if err.errno != errno.ENOENT:
395 if err.errno != errno.ENOENT:
396 raise
396 raise
397 return []
397 return []
398 info = []
398 info = []
399 for (name, _type) in names:
399 for (name, _type) in names:
400 pfx, sfx = name.rsplit('.', 1)
400 pfx, sfx = name.rsplit('.', 1)
401 if not pfx or sfx != 'patch':
401 if not pfx or sfx != 'patch':
402 continue
402 continue
403 st = shelvedfile(repo, name).stat()
403 st = shelvedfile(repo, name).stat()
404 info.append((st.st_mtime, shelvedfile(repo, pfx).filename()))
404 info.append((st.st_mtime, shelvedfile(repo, pfx).filename()))
405 return sorted(info, reverse=True)
405 return sorted(info, reverse=True)
406
406
407 def listcmd(ui, repo, pats, opts):
407 def listcmd(ui, repo, pats, opts):
408 """subcommand that displays the list of shelves"""
408 """subcommand that displays the list of shelves"""
409 pats = set(pats)
409 pats = set(pats)
410 width = 80
410 width = 80
411 if not ui.plain():
411 if not ui.plain():
412 width = ui.termwidth()
412 width = ui.termwidth()
413 namelabel = 'shelve.newest'
413 namelabel = 'shelve.newest'
414 for mtime, name in listshelves(repo):
414 for mtime, name in listshelves(repo):
415 sname = util.split(name)[1]
415 sname = util.split(name)[1]
416 if pats and sname not in pats:
416 if pats and sname not in pats:
417 continue
417 continue
418 ui.write(sname, label=namelabel)
418 ui.write(sname, label=namelabel)
419 namelabel = 'shelve.name'
419 namelabel = 'shelve.name'
420 if ui.quiet:
420 if ui.quiet:
421 ui.write('\n')
421 ui.write('\n')
422 continue
422 continue
423 ui.write(' ' * (16 - len(sname)))
423 ui.write(' ' * (16 - len(sname)))
424 used = 16
424 used = 16
425 age = '(%s)' % templatefilters.age(util.makedate(mtime), abbrev=True)
425 age = '(%s)' % templatefilters.age(util.makedate(mtime), abbrev=True)
426 ui.write(age, label='shelve.age')
426 ui.write(age, label='shelve.age')
427 ui.write(' ' * (12 - len(age)))
427 ui.write(' ' * (12 - len(age)))
428 used += 12
428 used += 12
429 with open(name + '.patch', 'rb') as fp:
429 with open(name + '.patch', 'rb') as fp:
430 while True:
430 while True:
431 line = fp.readline()
431 line = fp.readline()
432 if not line:
432 if not line:
433 break
433 break
434 if not line.startswith('#'):
434 if not line.startswith('#'):
435 desc = line.rstrip()
435 desc = line.rstrip()
436 if ui.formatted():
436 if ui.formatted():
437 desc = util.ellipsis(desc, width - used)
437 desc = util.ellipsis(desc, width - used)
438 ui.write(desc)
438 ui.write(desc)
439 break
439 break
440 ui.write('\n')
440 ui.write('\n')
441 if not (opts['patch'] or opts['stat']):
441 if not (opts['patch'] or opts['stat']):
442 continue
442 continue
443 difflines = fp.readlines()
443 difflines = fp.readlines()
444 if opts['patch']:
444 if opts['patch']:
445 for chunk, label in patch.difflabel(iter, difflines):
445 for chunk, label in patch.difflabel(iter, difflines):
446 ui.write(chunk, label=label)
446 ui.write(chunk, label=label)
447 if opts['stat']:
447 if opts['stat']:
448 for chunk, label in patch.diffstatui(difflines, width=width,
448 for chunk, label in patch.diffstatui(difflines, width=width,
449 git=True):
449 git=True):
450 ui.write(chunk, label=label)
450 ui.write(chunk, label=label)
451
451
452 def singlepatchcmds(ui, repo, pats, opts, subcommand):
452 def singlepatchcmds(ui, repo, pats, opts, subcommand):
453 """subcommand that displays a single shelf"""
453 """subcommand that displays a single shelf"""
454 if len(pats) != 1:
454 if len(pats) != 1:
455 raise error.Abort(_("--%s expects a single shelf") % subcommand)
455 raise error.Abort(_("--%s expects a single shelf") % subcommand)
456 shelfname = pats[0]
456 shelfname = pats[0]
457
457
458 if not shelvedfile(repo, shelfname, 'patch').exists():
458 if not shelvedfile(repo, shelfname, 'patch').exists():
459 raise error.Abort(_("cannot find shelf %s") % shelfname)
459 raise error.Abort(_("cannot find shelf %s") % shelfname)
460
460
461 listcmd(ui, repo, pats, opts)
461 listcmd(ui, repo, pats, opts)
462
462
463 def checkparents(repo, state):
463 def checkparents(repo, state):
464 """check parent while resuming an unshelve"""
464 """check parent while resuming an unshelve"""
465 if state.parents != repo.dirstate.parents():
465 if state.parents != repo.dirstate.parents():
466 raise error.Abort(_('working directory parents do not match unshelve '
466 raise error.Abort(_('working directory parents do not match unshelve '
467 'state'))
467 'state'))
468
468
469 def pathtofiles(repo, files):
469 def pathtofiles(repo, files):
470 cwd = repo.getcwd()
470 cwd = repo.getcwd()
471 return [repo.pathto(f, cwd) for f in files]
471 return [repo.pathto(f, cwd) for f in files]
472
472
473 def unshelveabort(ui, repo, state, opts):
473 def unshelveabort(ui, repo, state, opts):
474 """subcommand that abort an in-progress unshelve"""
474 """subcommand that abort an in-progress unshelve"""
475 with repo.lock():
475 with repo.lock():
476 try:
476 try:
477 checkparents(repo, state)
477 checkparents(repo, state)
478
478
479 util.rename(repo.join('unshelverebasestate'),
479 util.rename(repo.join('unshelverebasestate'),
480 repo.join('rebasestate'))
480 repo.join('rebasestate'))
481 try:
481 try:
482 rebase.rebase(ui, repo, **{
482 rebase.rebase(ui, repo, **{
483 'abort' : True
483 'abort' : True
484 })
484 })
485 except Exception:
485 except Exception:
486 util.rename(repo.join('rebasestate'),
486 util.rename(repo.join('rebasestate'),
487 repo.join('unshelverebasestate'))
487 repo.join('unshelverebasestate'))
488 raise
488 raise
489
489
490 mergefiles(ui, repo, state.wctx, state.pendingctx)
490 mergefiles(ui, repo, state.wctx, state.pendingctx)
491 repair.strip(ui, repo, state.stripnodes, backup=False,
491 repair.strip(ui, repo, state.stripnodes, backup=False,
492 topic='shelve')
492 topic='shelve')
493 finally:
493 finally:
494 shelvedstate.clear(repo)
494 shelvedstate.clear(repo)
495 ui.warn(_("unshelve of '%s' aborted\n") % state.name)
495 ui.warn(_("unshelve of '%s' aborted\n") % state.name)
496
496
497 def mergefiles(ui, repo, wctx, shelvectx):
497 def mergefiles(ui, repo, wctx, shelvectx):
498 """updates to wctx and merges the changes from shelvectx into the
498 """updates to wctx and merges the changes from shelvectx into the
499 dirstate."""
499 dirstate."""
500 oldquiet = ui.quiet
500 oldquiet = ui.quiet
501 try:
501 try:
502 ui.quiet = True
502 ui.quiet = True
503 hg.update(repo, wctx.node())
503 hg.update(repo, wctx.node())
504 files = []
504 files = []
505 files.extend(shelvectx.files())
505 files.extend(shelvectx.files())
506 files.extend(shelvectx.parents()[0].files())
506 files.extend(shelvectx.parents()[0].files())
507
507
508 # revert will overwrite unknown files, so move them out of the way
508 # revert will overwrite unknown files, so move them out of the way
509 for file in repo.status(unknown=True).unknown:
509 for file in repo.status(unknown=True).unknown:
510 if file in files:
510 if file in files:
511 util.rename(file, scmutil.origpath(ui, repo, file))
511 util.rename(file, scmutil.origpath(ui, repo, file))
512 ui.pushbuffer(True)
512 ui.pushbuffer(True)
513 cmdutil.revert(ui, repo, shelvectx, repo.dirstate.parents(),
513 cmdutil.revert(ui, repo, shelvectx, repo.dirstate.parents(),
514 *pathtofiles(repo, files),
514 *pathtofiles(repo, files),
515 **{'no_backup': True})
515 **{'no_backup': True})
516 ui.popbuffer()
516 ui.popbuffer()
517 finally:
517 finally:
518 ui.quiet = oldquiet
518 ui.quiet = oldquiet
519
519
520 def unshelvecleanup(ui, repo, name, opts):
520 def unshelvecleanup(ui, repo, name, opts):
521 """remove related files after an unshelve"""
521 """remove related files after an unshelve"""
522 if not opts['keep']:
522 if not opts['keep']:
523 for filetype in 'hg patch'.split():
523 for filetype in 'hg patch'.split():
524 shelvedfile(repo, name, filetype).movetobackup()
524 shelvedfile(repo, name, filetype).movetobackup()
525 cleanupoldbackups(repo)
525 cleanupoldbackups(repo)
526
526
527 def unshelvecontinue(ui, repo, state, opts):
527 def unshelvecontinue(ui, repo, state, opts):
528 """subcommand to continue an in-progress unshelve"""
528 """subcommand to continue an in-progress unshelve"""
529 # We're finishing off a merge. First parent is our original
529 # We're finishing off a merge. First parent is our original
530 # parent, second is the temporary "fake" commit we're unshelving.
530 # parent, second is the temporary "fake" commit we're unshelving.
531 with repo.lock():
531 with repo.lock():
532 checkparents(repo, state)
532 checkparents(repo, state)
533 ms = merge.mergestate.read(repo)
533 ms = merge.mergestate.read(repo)
534 if [f for f in ms if ms[f] == 'u']:
534 if [f for f in ms if ms[f] == 'u']:
535 raise error.Abort(
535 raise error.Abort(
536 _("unresolved conflicts, can't continue"),
536 _("unresolved conflicts, can't continue"),
537 hint=_("see 'hg resolve', then 'hg unshelve --continue'"))
537 hint=_("see 'hg resolve', then 'hg unshelve --continue'"))
538
538
539 util.rename(repo.join('unshelverebasestate'),
539 util.rename(repo.join('unshelverebasestate'),
540 repo.join('rebasestate'))
540 repo.join('rebasestate'))
541 try:
541 try:
542 rebase.rebase(ui, repo, **{
542 rebase.rebase(ui, repo, **{
543 'continue' : True
543 'continue' : True
544 })
544 })
545 except Exception:
545 except Exception:
546 util.rename(repo.join('rebasestate'),
546 util.rename(repo.join('rebasestate'),
547 repo.join('unshelverebasestate'))
547 repo.join('unshelverebasestate'))
548 raise
548 raise
549
549
550 shelvectx = repo['tip']
550 shelvectx = repo['tip']
551 if not shelvectx in state.pendingctx.children():
551 if not shelvectx in state.pendingctx.children():
552 # rebase was a no-op, so it produced no child commit
552 # rebase was a no-op, so it produced no child commit
553 shelvectx = state.pendingctx
553 shelvectx = state.pendingctx
554 else:
554 else:
555 # only strip the shelvectx if the rebase produced it
555 # only strip the shelvectx if the rebase produced it
556 state.stripnodes.append(shelvectx.node())
556 state.stripnodes.append(shelvectx.node())
557
557
558 mergefiles(ui, repo, state.wctx, shelvectx)
558 mergefiles(ui, repo, state.wctx, shelvectx)
559
559
560 repair.strip(ui, repo, state.stripnodes, backup=False, topic='shelve')
560 repair.strip(ui, repo, state.stripnodes, backup=False, topic='shelve')
561 shelvedstate.clear(repo)
561 shelvedstate.clear(repo)
562 unshelvecleanup(ui, repo, state.name, opts)
562 unshelvecleanup(ui, repo, state.name, opts)
563 ui.status(_("unshelve of '%s' complete\n") % state.name)
563 ui.status(_("unshelve of '%s' complete\n") % state.name)
564
564
565 @command('unshelve',
565 @command('unshelve',
566 [('a', 'abort', None,
566 [('a', 'abort', None,
567 _('abort an incomplete unshelve operation')),
567 _('abort an incomplete unshelve operation')),
568 ('c', 'continue', None,
568 ('c', 'continue', None,
569 _('continue an incomplete unshelve operation')),
569 _('continue an incomplete unshelve operation')),
570 ('k', 'keep', None,
570 ('k', 'keep', None,
571 _('keep shelve after unshelving')),
571 _('keep shelve after unshelving')),
572 ('t', 'tool', '', _('specify merge tool')),
572 ('t', 'tool', '', _('specify merge tool')),
573 ('', 'date', '',
573 ('', 'date', '',
574 _('set date for temporary commits (DEPRECATED)'), _('DATE'))],
574 _('set date for temporary commits (DEPRECATED)'), _('DATE'))],
575 _('hg unshelve [SHELVED]'))
575 _('hg unshelve [SHELVED]'))
576 def unshelve(ui, repo, *shelved, **opts):
576 def unshelve(ui, repo, *shelved, **opts):
577 """restore a shelved change to the working directory
577 """restore a shelved change to the working directory
578
578
579 This command accepts an optional name of a shelved change to
579 This command accepts an optional name of a shelved change to
580 restore. If none is given, the most recent shelved change is used.
580 restore. If none is given, the most recent shelved change is used.
581
581
582 If a shelved change is applied successfully, the bundle that
582 If a shelved change is applied successfully, the bundle that
583 contains the shelved changes is moved to a backup location
583 contains the shelved changes is moved to a backup location
584 (.hg/shelve-backup).
584 (.hg/shelve-backup).
585
585
586 Since you can restore a shelved change on top of an arbitrary
586 Since you can restore a shelved change on top of an arbitrary
587 commit, it is possible that unshelving will result in a conflict
587 commit, it is possible that unshelving will result in a conflict
588 between your changes and the commits you are unshelving onto. If
588 between your changes and the commits you are unshelving onto. If
589 this occurs, you must resolve the conflict, then use
589 this occurs, you must resolve the conflict, then use
590 ``--continue`` to complete the unshelve operation. (The bundle
590 ``--continue`` to complete the unshelve operation. (The bundle
591 will not be moved until you successfully complete the unshelve.)
591 will not be moved until you successfully complete the unshelve.)
592
592
593 (Alternatively, you can use ``--abort`` to abandon an unshelve
593 (Alternatively, you can use ``--abort`` to abandon an unshelve
594 that causes a conflict. This reverts the unshelved changes, and
594 that causes a conflict. This reverts the unshelved changes, and
595 leaves the bundle in place.)
595 leaves the bundle in place.)
596
596
597 After a successful unshelve, the shelved changes are stored in a
597 After a successful unshelve, the shelved changes are stored in a
598 backup directory. Only the N most recent backups are kept. N
598 backup directory. Only the N most recent backups are kept. N
599 defaults to 10 but can be overridden using the ``shelve.maxbackups``
599 defaults to 10 but can be overridden using the ``shelve.maxbackups``
600 configuration option.
600 configuration option.
601
601
602 .. container:: verbose
602 .. container:: verbose
603
603
604 Timestamp in seconds is used to decide order of backups. More
604 Timestamp in seconds is used to decide order of backups. More
605 than ``maxbackups`` backups are kept, if same timestamp
605 than ``maxbackups`` backups are kept, if same timestamp
606 prevents from deciding exact order of them, for safety.
606 prevents from deciding exact order of them, for safety.
607 """
607 """
608 with repo.wlock():
608 with repo.wlock():
609 return _dounshelve(ui, repo, *shelved, **opts)
609 return _dounshelve(ui, repo, *shelved, **opts)
610
610
611 def _dounshelve(ui, repo, *shelved, **opts):
611 def _dounshelve(ui, repo, *shelved, **opts):
612 abortf = opts['abort']
612 abortf = opts['abort']
613 continuef = opts['continue']
613 continuef = opts['continue']
614 if not abortf and not continuef:
614 if not abortf and not continuef:
615 cmdutil.checkunfinished(repo)
615 cmdutil.checkunfinished(repo)
616
616
617 if abortf or continuef:
617 if abortf or continuef:
618 if abortf and continuef:
618 if abortf and continuef:
619 raise error.Abort(_('cannot use both abort and continue'))
619 raise error.Abort(_('cannot use both abort and continue'))
620 if shelved:
620 if shelved:
621 raise error.Abort(_('cannot combine abort/continue with '
621 raise error.Abort(_('cannot combine abort/continue with '
622 'naming a shelved change'))
622 'naming a shelved change'))
623 if abortf and opts.get('tool', False):
623 if abortf and opts.get('tool', False):
624 ui.warn(_('tool option will be ignored\n'))
624 ui.warn(_('tool option will be ignored\n'))
625
625
626 try:
626 try:
627 state = shelvedstate.load(repo)
627 state = shelvedstate.load(repo)
628 except IOError as err:
628 except IOError as err:
629 if err.errno != errno.ENOENT:
629 if err.errno != errno.ENOENT:
630 raise
630 raise
631 raise error.Abort(_('no unshelve operation underway'))
631 raise error.Abort(_('no unshelve operation underway'))
632
632
633 if abortf:
633 if abortf:
634 return unshelveabort(ui, repo, state, opts)
634 return unshelveabort(ui, repo, state, opts)
635 elif continuef:
635 elif continuef:
636 return unshelvecontinue(ui, repo, state, opts)
636 return unshelvecontinue(ui, repo, state, opts)
637 elif len(shelved) > 1:
637 elif len(shelved) > 1:
638 raise error.Abort(_('can only unshelve one change at a time'))
638 raise error.Abort(_('can only unshelve one change at a time'))
639 elif not shelved:
639 elif not shelved:
640 shelved = listshelves(repo)
640 shelved = listshelves(repo)
641 if not shelved:
641 if not shelved:
642 raise error.Abort(_('no shelved changes to apply!'))
642 raise error.Abort(_('no shelved changes to apply!'))
643 basename = util.split(shelved[0][1])[1]
643 basename = util.split(shelved[0][1])[1]
644 ui.status(_("unshelving change '%s'\n") % basename)
644 ui.status(_("unshelving change '%s'\n") % basename)
645 else:
645 else:
646 basename = shelved[0]
646 basename = shelved[0]
647
647
648 if not shelvedfile(repo, basename, 'patch').exists():
648 if not shelvedfile(repo, basename, 'patch').exists():
649 raise error.Abort(_("shelved change '%s' not found") % basename)
649 raise error.Abort(_("shelved change '%s' not found") % basename)
650
650
651 oldquiet = ui.quiet
651 oldquiet = ui.quiet
652 lock = tr = None
652 lock = tr = None
653 forcemerge = ui.backupconfig('ui', 'forcemerge')
653 forcemerge = ui.backupconfig('ui', 'forcemerge')
654 try:
654 try:
655 ui.setconfig('ui', 'forcemerge', opts.get('tool', ''), 'unshelve')
655 ui.setconfig('ui', 'forcemerge', opts.get('tool', ''), 'unshelve')
656 lock = repo.lock()
656 lock = repo.lock()
657
657
658 tr = repo.transaction('unshelve', report=lambda x: None)
658 tr = repo.transaction('unshelve', report=lambda x: None)
659 oldtiprev = len(repo)
659 oldtiprev = len(repo)
660
660
661 pctx = repo['.']
661 pctx = repo['.']
662 tmpwctx = pctx
662 tmpwctx = pctx
663 # The goal is to have a commit structure like so:
663 # The goal is to have a commit structure like so:
664 # ...-> pctx -> tmpwctx -> shelvectx
664 # ...-> pctx -> tmpwctx -> shelvectx
665 # where tmpwctx is an optional commit with the user's pending changes
665 # where tmpwctx is an optional commit with the user's pending changes
666 # and shelvectx is the unshelved changes. Then we merge it all down
666 # and shelvectx is the unshelved changes. Then we merge it all down
667 # to the original pctx.
667 # to the original pctx.
668
668
669 # Store pending changes in a commit and remember added in case a shelve
669 # Store pending changes in a commit and remember added in case a shelve
670 # contains unknown files that are part of the pending change
670 # contains unknown files that are part of the pending change
671 s = repo.status()
671 s = repo.status()
672 addedbefore = frozenset(s.added)
672 addedbefore = frozenset(s.added)
673 if s.modified or s.added or s.removed or s.deleted:
673 if s.modified or s.added or s.removed or s.deleted:
674 ui.status(_("temporarily committing pending changes "
674 ui.status(_("temporarily committing pending changes "
675 "(restore with 'hg unshelve --abort')\n"))
675 "(restore with 'hg unshelve --abort')\n"))
676 def commitfunc(ui, repo, message, match, opts):
676 def commitfunc(ui, repo, message, match, opts):
677 hasmq = util.safehasattr(repo, 'mq')
677 hasmq = util.safehasattr(repo, 'mq')
678 if hasmq:
678 if hasmq:
679 saved, repo.mq.checkapplied = repo.mq.checkapplied, False
679 saved, repo.mq.checkapplied = repo.mq.checkapplied, False
680
680
681 backup = repo.ui.backupconfig('phases', 'new-commit')
681 backup = repo.ui.backupconfig('phases', 'new-commit')
682 try:
682 try:
683 repo.ui.setconfig('phases', 'new-commit', phases.secret)
683 repo.ui.setconfig('phases', 'new-commit', phases.secret)
684 return repo.commit(message, 'shelve@localhost',
684 return repo.commit(message, 'shelve@localhost',
685 opts.get('date'), match)
685 opts.get('date'), match)
686 finally:
686 finally:
687 repo.ui.restoreconfig(backup)
687 repo.ui.restoreconfig(backup)
688 if hasmq:
688 if hasmq:
689 repo.mq.checkapplied = saved
689 repo.mq.checkapplied = saved
690
690
691 tempopts = {}
691 tempopts = {}
692 tempopts['message'] = "pending changes temporary commit"
692 tempopts['message'] = "pending changes temporary commit"
693 tempopts['date'] = opts.get('date')
693 tempopts['date'] = opts.get('date')
694 ui.quiet = True
694 ui.quiet = True
695 node = cmdutil.commit(ui, repo, commitfunc, [], tempopts)
695 node = cmdutil.commit(ui, repo, commitfunc, [], tempopts)
696 tmpwctx = repo[node]
696 tmpwctx = repo[node]
697
697
698 ui.quiet = True
698 ui.quiet = True
699 shelvedfile(repo, basename, 'hg').applybundle()
699 shelvedfile(repo, basename, 'hg').applybundle()
700
700
701 ui.quiet = oldquiet
701 ui.quiet = oldquiet
702
702
703 shelvectx = repo['tip']
703 shelvectx = repo['tip']
704
704
705 # If the shelve is not immediately on top of the commit
705 # If the shelve is not immediately on top of the commit
706 # we'll be merging with, rebase it to be on top.
706 # we'll be merging with, rebase it to be on top.
707 if tmpwctx.node() != shelvectx.parents()[0].node():
707 if tmpwctx.node() != shelvectx.parents()[0].node():
708 ui.status(_('rebasing shelved changes\n'))
708 ui.status(_('rebasing shelved changes\n'))
709 try:
709 try:
710 rebase.rebase(ui, repo, **{
710 rebase.rebase(ui, repo, **{
711 'rev' : [shelvectx.rev()],
711 'rev' : [shelvectx.rev()],
712 'dest' : str(tmpwctx.rev()),
712 'dest' : str(tmpwctx.rev()),
713 'keep' : True,
713 'keep' : True,
714 'tool' : opts.get('tool', ''),
714 'tool' : opts.get('tool', ''),
715 })
715 })
716 except error.InterventionRequired:
716 except error.InterventionRequired:
717 tr.close()
717 tr.close()
718
718
719 stripnodes = [repo.changelog.node(rev)
719 stripnodes = [repo.changelog.node(rev)
720 for rev in xrange(oldtiprev, len(repo))]
720 for rev in xrange(oldtiprev, len(repo))]
721 shelvedstate.save(repo, basename, pctx, tmpwctx, stripnodes)
721 shelvedstate.save(repo, basename, pctx, tmpwctx, stripnodes)
722
722
723 util.rename(repo.join('rebasestate'),
723 util.rename(repo.join('rebasestate'),
724 repo.join('unshelverebasestate'))
724 repo.join('unshelverebasestate'))
725 raise error.InterventionRequired(
725 raise error.InterventionRequired(
726 _("unresolved conflicts (see 'hg resolve', then "
726 _("unresolved conflicts (see 'hg resolve', then "
727 "'hg unshelve --continue')"))
727 "'hg unshelve --continue')"))
728
728
729 # refresh ctx after rebase completes
729 # refresh ctx after rebase completes
730 shelvectx = repo['tip']
730 shelvectx = repo['tip']
731
731
732 if not shelvectx in tmpwctx.children():
732 if not shelvectx in tmpwctx.children():
733 # rebase was a no-op, so it produced no child commit
733 # rebase was a no-op, so it produced no child commit
734 shelvectx = tmpwctx
734 shelvectx = tmpwctx
735
735
736 mergefiles(ui, repo, pctx, shelvectx)
736 mergefiles(ui, repo, pctx, shelvectx)
737
737
738 # Forget any files that were unknown before the shelve, unknown before
738 # Forget any files that were unknown before the shelve, unknown before
739 # unshelve started, but are now added.
739 # unshelve started, but are now added.
740 shelveunknown = shelvectx.extra().get('shelve_unknown')
740 shelveunknown = shelvectx.extra().get('shelve_unknown')
741 if shelveunknown:
741 if shelveunknown:
742 shelveunknown = frozenset(shelveunknown.split('\0'))
742 shelveunknown = frozenset(shelveunknown.split('\0'))
743 addedafter = frozenset(repo.status().added)
743 addedafter = frozenset(repo.status().added)
744 toforget = (addedafter & shelveunknown) - addedbefore
744 toforget = (addedafter & shelveunknown) - addedbefore
745 repo[None].forget(toforget)
745 repo[None].forget(toforget)
746
746
747 shelvedstate.clear(repo)
747 shelvedstate.clear(repo)
748
748
749 # The transaction aborting will strip all the commits for us,
749 # The transaction aborting will strip all the commits for us,
750 # but it doesn't update the inmemory structures, so addchangegroup
750 # but it doesn't update the inmemory structures, so addchangegroup
751 # hooks still fire and try to operate on the missing commits.
751 # hooks still fire and try to operate on the missing commits.
752 # Clean up manually to prevent this.
752 # Clean up manually to prevent this.
753 repo.unfiltered().changelog.strip(oldtiprev, tr)
753 repo.unfiltered().changelog.strip(oldtiprev, tr)
754
754
755 unshelvecleanup(ui, repo, basename, opts)
755 unshelvecleanup(ui, repo, basename, opts)
756
756
757 _aborttransaction(repo)
757 _aborttransaction(repo)
758 finally:
758 finally:
759 ui.quiet = oldquiet
759 ui.quiet = oldquiet
760 if tr:
760 if tr:
761 tr.release()
761 tr.release()
762 lockmod.release(lock)
762 lockmod.release(lock)
763 ui.restoreconfig(forcemerge)
763 ui.restoreconfig(forcemerge)
764
764
765 @command('shelve',
765 @command('shelve',
766 [('A', 'addremove', None,
766 [('A', 'addremove', None,
767 _('mark new/missing files as added/removed before shelving')),
767 _('mark new/missing files as added/removed before shelving')),
768 ('u', 'unknown', None,
768 ('u', 'unknown', None,
769 _('store unknown files in the shelve')),
769 _('store unknown files in the shelve')),
770 ('', 'cleanup', None,
770 ('', 'cleanup', None,
771 _('delete all shelved changes')),
771 _('delete all shelved changes')),
772 ('', 'date', '',
772 ('', 'date', '',
773 _('shelve with the specified commit date'), _('DATE')),
773 _('shelve with the specified commit date'), _('DATE')),
774 ('d', 'delete', None,
774 ('d', 'delete', None,
775 _('delete the named shelved change(s)')),
775 _('delete the named shelved change(s)')),
776 ('e', 'edit', False,
776 ('e', 'edit', False,
777 _('invoke editor on commit messages')),
777 _('invoke editor on commit messages')),
778 ('l', 'list', None,
778 ('l', 'list', None,
779 _('list current shelves')),
779 _('list current shelves')),
780 ('m', 'message', '',
780 ('m', 'message', '',
781 _('use text as shelve message'), _('TEXT')),
781 _('use text as shelve message'), _('TEXT')),
782 ('n', 'name', '',
782 ('n', 'name', '',
783 _('use the given name for the shelved commit'), _('NAME')),
783 _('use the given name for the shelved commit'), _('NAME')),
784 ('p', 'patch', None,
784 ('p', 'patch', None,
785 _('show patch')),
785 _('show patch')),
786 ('i', 'interactive', None,
786 ('i', 'interactive', None,
787 _('interactive mode, only works while creating a shelve')),
787 _('interactive mode, only works while creating a shelve')),
788 ('', 'stat', None,
788 ('', 'stat', None,
789 _('output diffstat-style summary of changes'))] + commands.walkopts,
789 _('output diffstat-style summary of changes'))] + commands.walkopts,
790 _('hg shelve [OPTION]... [FILE]...'))
790 _('hg shelve [OPTION]... [FILE]...'))
791 def shelvecmd(ui, repo, *pats, **opts):
791 def shelvecmd(ui, repo, *pats, **opts):
792 '''save and set aside changes from the working directory
792 '''save and set aside changes from the working directory
793
793
794 Shelving takes files that "hg status" reports as not clean, saves
794 Shelving takes files that "hg status" reports as not clean, saves
795 the modifications to a bundle (a shelved change), and reverts the
795 the modifications to a bundle (a shelved change), and reverts the
796 files so that their state in the working directory becomes clean.
796 files so that their state in the working directory becomes clean.
797
797
798 To restore these changes to the working directory, using "hg
798 To restore these changes to the working directory, using "hg
799 unshelve"; this will work even if you switch to a different
799 unshelve"; this will work even if you switch to a different
800 commit.
800 commit.
801
801
802 When no files are specified, "hg shelve" saves all not-clean
802 When no files are specified, "hg shelve" saves all not-clean
803 files. If specific files or directories are named, only changes to
803 files. If specific files or directories are named, only changes to
804 those files are shelved.
804 those files are shelved.
805
805
806 Each shelved change has a name that makes it easier to find later.
806 Each shelved change has a name that makes it easier to find later.
807 The name of a shelved change defaults to being based on the active
807 The name of a shelved change defaults to being based on the active
808 bookmark, or if there is no active bookmark, the current named
808 bookmark, or if there is no active bookmark, the current named
809 branch. To specify a different name, use ``--name``.
809 branch. To specify a different name, use ``--name``.
810
810
811 To see a list of existing shelved changes, use the ``--list``
811 To see a list of existing shelved changes, use the ``--list``
812 option. For each shelved change, this will print its name, age,
812 option. For each shelved change, this will print its name, age,
813 and description; use ``--patch`` or ``--stat`` for more details.
813 and description; use ``--patch`` or ``--stat`` for more details.
814
814
815 To delete specific shelved changes, use ``--delete``. To delete
815 To delete specific shelved changes, use ``--delete``. To delete
816 all shelved changes, use ``--cleanup``.
816 all shelved changes, use ``--cleanup``.
817 '''
817 '''
818 allowables = [
818 allowables = [
819 ('addremove', set(['create'])), # 'create' is pseudo action
819 ('addremove', set(['create'])), # 'create' is pseudo action
820 ('unknown', set(['create'])),
820 ('unknown', set(['create'])),
821 ('cleanup', set(['cleanup'])),
821 ('cleanup', set(['cleanup'])),
822 # ('date', set(['create'])), # ignored for passing '--date "0 0"' in tests
822 # ('date', set(['create'])), # ignored for passing '--date "0 0"' in tests
823 ('delete', set(['delete'])),
823 ('delete', set(['delete'])),
824 ('edit', set(['create'])),
824 ('edit', set(['create'])),
825 ('list', set(['list'])),
825 ('list', set(['list'])),
826 ('message', set(['create'])),
826 ('message', set(['create'])),
827 ('name', set(['create'])),
827 ('name', set(['create'])),
828 ('patch', set(['patch', 'list'])),
828 ('patch', set(['patch', 'list'])),
829 ('stat', set(['stat', 'list'])),
829 ('stat', set(['stat', 'list'])),
830 ]
830 ]
831 def checkopt(opt):
831 def checkopt(opt):
832 if opts[opt]:
832 if opts[opt]:
833 for i, allowable in allowables:
833 for i, allowable in allowables:
834 if opts[i] and opt not in allowable:
834 if opts[i] and opt not in allowable:
835 raise error.Abort(_("options '--%s' and '--%s' may not be "
835 raise error.Abort(_("options '--%s' and '--%s' may not be "
836 "used together") % (opt, i))
836 "used together") % (opt, i))
837 return True
837 return True
838 if checkopt('cleanup'):
838 if checkopt('cleanup'):
839 if pats:
839 if pats:
840 raise error.Abort(_("cannot specify names when using '--cleanup'"))
840 raise error.Abort(_("cannot specify names when using '--cleanup'"))
841 return cleanupcmd(ui, repo)
841 return cleanupcmd(ui, repo)
842 elif checkopt('delete'):
842 elif checkopt('delete'):
843 return deletecmd(ui, repo, pats)
843 return deletecmd(ui, repo, pats)
844 elif checkopt('list'):
844 elif checkopt('list'):
845 return listcmd(ui, repo, pats, opts)
845 return listcmd(ui, repo, pats, opts)
846 elif checkopt('patch'):
846 elif checkopt('patch'):
847 return singlepatchcmds(ui, repo, pats, opts, subcommand='patch')
847 return singlepatchcmds(ui, repo, pats, opts, subcommand='patch')
848 elif checkopt('stat'):
848 elif checkopt('stat'):
849 return singlepatchcmds(ui, repo, pats, opts, subcommand='stat')
849 return singlepatchcmds(ui, repo, pats, opts, subcommand='stat')
850 else:
850 else:
851 return createcmd(ui, repo, pats, opts)
851 return createcmd(ui, repo, pats, opts)
852
852
853 def extsetup(ui):
853 def extsetup(ui):
854 cmdutil.unfinishedstates.append(
854 cmdutil.unfinishedstates.append(
855 [shelvedstate._filename, False, False,
855 [shelvedstate._filename, False, False,
856 _('unshelve already in progress'),
856 _('unshelve already in progress'),
857 _("use 'hg unshelve --continue' or 'hg unshelve --abort'")])
857 _("use 'hg unshelve --continue' or 'hg unshelve --abort'")])
858 cmdutil.afterresolvedstates.append(
858 cmdutil.afterresolvedstates.append(
859 [shelvedstate._filename, _('hg unshelve --continue')])
859 [shelvedstate._filename, _('hg unshelve --continue')])
@@ -1,1136 +1,1137 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import struct
11 import struct
12 import tempfile
12 import tempfile
13 import weakref
13 import weakref
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 hex,
17 hex,
18 nullid,
18 nullid,
19 nullrev,
19 nullrev,
20 short,
20 short,
21 )
21 )
22
22
23 from . import (
23 from . import (
24 branchmap,
24 branchmap,
25 dagutil,
25 dagutil,
26 discovery,
26 discovery,
27 error,
27 error,
28 mdiff,
28 mdiff,
29 phases,
29 phases,
30 util,
30 util,
31 )
31 )
32
32
33 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
33 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
34 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
34 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
35 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
35 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
36
36
37 def readexactly(stream, n):
37 def readexactly(stream, n):
38 '''read n bytes from stream.read and abort if less was available'''
38 '''read n bytes from stream.read and abort if less was available'''
39 s = stream.read(n)
39 s = stream.read(n)
40 if len(s) < n:
40 if len(s) < n:
41 raise error.Abort(_("stream ended unexpectedly"
41 raise error.Abort(_("stream ended unexpectedly"
42 " (got %d bytes, expected %d)")
42 " (got %d bytes, expected %d)")
43 % (len(s), n))
43 % (len(s), n))
44 return s
44 return s
45
45
46 def getchunk(stream):
46 def getchunk(stream):
47 """return the next chunk from stream as a string"""
47 """return the next chunk from stream as a string"""
48 d = readexactly(stream, 4)
48 d = readexactly(stream, 4)
49 l = struct.unpack(">l", d)[0]
49 l = struct.unpack(">l", d)[0]
50 if l <= 4:
50 if l <= 4:
51 if l:
51 if l:
52 raise error.Abort(_("invalid chunk length %d") % l)
52 raise error.Abort(_("invalid chunk length %d") % l)
53 return ""
53 return ""
54 return readexactly(stream, l - 4)
54 return readexactly(stream, l - 4)
55
55
56 def chunkheader(length):
56 def chunkheader(length):
57 """return a changegroup chunk header (string)"""
57 """return a changegroup chunk header (string)"""
58 return struct.pack(">l", length + 4)
58 return struct.pack(">l", length + 4)
59
59
60 def closechunk():
60 def closechunk():
61 """return a changegroup chunk header (string) for a zero-length chunk"""
61 """return a changegroup chunk header (string) for a zero-length chunk"""
62 return struct.pack(">l", 0)
62 return struct.pack(">l", 0)
63
63
64 def combineresults(results):
64 def combineresults(results):
65 """logic to combine 0 or more addchangegroup results into one"""
65 """logic to combine 0 or more addchangegroup results into one"""
66 changedheads = 0
66 changedheads = 0
67 result = 1
67 result = 1
68 for ret in results:
68 for ret in results:
69 # If any changegroup result is 0, return 0
69 # If any changegroup result is 0, return 0
70 if ret == 0:
70 if ret == 0:
71 result = 0
71 result = 0
72 break
72 break
73 if ret < -1:
73 if ret < -1:
74 changedheads += ret + 1
74 changedheads += ret + 1
75 elif ret > 1:
75 elif ret > 1:
76 changedheads += ret - 1
76 changedheads += ret - 1
77 if changedheads > 0:
77 if changedheads > 0:
78 result = 1 + changedheads
78 result = 1 + changedheads
79 elif changedheads < 0:
79 elif changedheads < 0:
80 result = -1 + changedheads
80 result = -1 + changedheads
81 return result
81 return result
82
82
83 bundletypes = {
83 bundletypes = {
84 "": ("", None), # only when using unbundle on ssh and old http servers
84 "": ("", None), # only when using unbundle on ssh and old http servers
85 # since the unification ssh accepts a header but there
85 # since the unification ssh accepts a header but there
86 # is no capability signaling it.
86 # is no capability signaling it.
87 "HG20": (), # special-cased below
87 "HG20": (), # special-cased below
88 "HG10UN": ("HG10UN", None),
88 "HG10UN": ("HG10UN", None),
89 "HG10BZ": ("HG10", 'BZ'),
89 "HG10BZ": ("HG10", 'BZ'),
90 "HG10GZ": ("HG10GZ", 'GZ'),
90 "HG10GZ": ("HG10GZ", 'GZ'),
91 }
91 }
92
92
93 # hgweb uses this list to communicate its preferred type
93 # hgweb uses this list to communicate its preferred type
94 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
94 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
95
95
96 def writechunks(ui, chunks, filename, vfs=None):
96 def writechunks(ui, chunks, filename, vfs=None):
97 """Write chunks to a file and return its filename.
97 """Write chunks to a file and return its filename.
98
98
99 The stream is assumed to be a bundle file.
99 The stream is assumed to be a bundle file.
100 Existing files will not be overwritten.
100 Existing files will not be overwritten.
101 If no filename is specified, a temporary file is created.
101 If no filename is specified, a temporary file is created.
102 """
102 """
103 fh = None
103 fh = None
104 cleanup = None
104 cleanup = None
105 try:
105 try:
106 if filename:
106 if filename:
107 if vfs:
107 if vfs:
108 fh = vfs.open(filename, "wb")
108 fh = vfs.open(filename, "wb")
109 else:
109 else:
110 fh = open(filename, "wb")
110 fh = open(filename, "wb")
111 else:
111 else:
112 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
112 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
113 fh = os.fdopen(fd, "wb")
113 fh = os.fdopen(fd, "wb")
114 cleanup = filename
114 cleanup = filename
115 for c in chunks:
115 for c in chunks:
116 fh.write(c)
116 fh.write(c)
117 cleanup = None
117 cleanup = None
118 return filename
118 return filename
119 finally:
119 finally:
120 if fh is not None:
120 if fh is not None:
121 fh.close()
121 fh.close()
122 if cleanup is not None:
122 if cleanup is not None:
123 if filename and vfs:
123 if filename and vfs:
124 vfs.unlink(cleanup)
124 vfs.unlink(cleanup)
125 else:
125 else:
126 os.unlink(cleanup)
126 os.unlink(cleanup)
127
127
128 def writebundle(ui, cg, filename, bundletype, vfs=None, compression=None):
128 def writebundle(ui, cg, filename, bundletype, vfs=None, compression=None):
129 """Write a bundle file and return its filename.
129 """Write a bundle file and return its filename.
130
130
131 Existing files will not be overwritten.
131 Existing files will not be overwritten.
132 If no filename is specified, a temporary file is created.
132 If no filename is specified, a temporary file is created.
133 bz2 compression can be turned off.
133 bz2 compression can be turned off.
134 The bundle file will be deleted in case of errors.
134 The bundle file will be deleted in case of errors.
135 """
135 """
136
136
137 if bundletype == "HG20":
137 if bundletype == "HG20":
138 from . import bundle2
138 from . import bundle2
139 bundle = bundle2.bundle20(ui)
139 bundle = bundle2.bundle20(ui)
140 bundle.setcompression(compression)
140 bundle.setcompression(compression)
141 part = bundle.newpart('changegroup', data=cg.getchunks())
141 part = bundle.newpart('changegroup', data=cg.getchunks())
142 part.addparam('version', cg.version)
142 part.addparam('version', cg.version)
143 chunkiter = bundle.getchunks()
143 chunkiter = bundle.getchunks()
144 else:
144 else:
145 # compression argument is only for the bundle2 case
145 # compression argument is only for the bundle2 case
146 assert compression is None
146 assert compression is None
147 if cg.version != '01':
147 if cg.version != '01':
148 raise error.Abort(_('old bundle types only supports v1 '
148 raise error.Abort(_('old bundle types only supports v1 '
149 'changegroups'))
149 'changegroups'))
150 header, comp = bundletypes[bundletype]
150 header, comp = bundletypes[bundletype]
151 if comp not in util.compressors:
151 if comp not in util.compressors:
152 raise error.Abort(_('unknown stream compression type: %s')
152 raise error.Abort(_('unknown stream compression type: %s')
153 % comp)
153 % comp)
154 z = util.compressors[comp]()
154 z = util.compressors[comp]()
155 subchunkiter = cg.getchunks()
155 subchunkiter = cg.getchunks()
156 def chunkiter():
156 def chunkiter():
157 yield header
157 yield header
158 for chunk in subchunkiter:
158 for chunk in subchunkiter:
159 yield z.compress(chunk)
159 yield z.compress(chunk)
160 yield z.flush()
160 yield z.flush()
161 chunkiter = chunkiter()
161 chunkiter = chunkiter()
162
162
163 # parse the changegroup data, otherwise we will block
163 # parse the changegroup data, otherwise we will block
164 # in case of sshrepo because we don't know the end of the stream
164 # in case of sshrepo because we don't know the end of the stream
165
165
166 # an empty chunkgroup is the end of the changegroup
166 # an empty chunkgroup is the end of the changegroup
167 # a changegroup has at least 2 chunkgroups (changelog and manifest).
167 # a changegroup has at least 2 chunkgroups (changelog and manifest).
168 # after that, an empty chunkgroup is the end of the changegroup
168 # after that, an empty chunkgroup is the end of the changegroup
169 return writechunks(ui, chunkiter, filename, vfs=vfs)
169 return writechunks(ui, chunkiter, filename, vfs=vfs)
170
170
171 class cg1unpacker(object):
171 class cg1unpacker(object):
172 """Unpacker for cg1 changegroup streams.
172 """Unpacker for cg1 changegroup streams.
173
173
174 A changegroup unpacker handles the framing of the revision data in
174 A changegroup unpacker handles the framing of the revision data in
175 the wire format. Most consumers will want to use the apply()
175 the wire format. Most consumers will want to use the apply()
176 method to add the changes from the changegroup to a repository.
176 method to add the changes from the changegroup to a repository.
177
177
178 If you're forwarding a changegroup unmodified to another consumer,
178 If you're forwarding a changegroup unmodified to another consumer,
179 use getchunks(), which returns an iterator of changegroup
179 use getchunks(), which returns an iterator of changegroup
180 chunks. This is mostly useful for cases where you need to know the
180 chunks. This is mostly useful for cases where you need to know the
181 data stream has ended by observing the end of the changegroup.
181 data stream has ended by observing the end of the changegroup.
182
182
183 deltachunk() is useful only if you're applying delta data. Most
183 deltachunk() is useful only if you're applying delta data. Most
184 consumers should prefer apply() instead.
184 consumers should prefer apply() instead.
185
185
186 A few other public methods exist. Those are used only for
186 A few other public methods exist. Those are used only for
187 bundlerepo and some debug commands - their use is discouraged.
187 bundlerepo and some debug commands - their use is discouraged.
188 """
188 """
189 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
189 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
190 deltaheadersize = struct.calcsize(deltaheader)
190 deltaheadersize = struct.calcsize(deltaheader)
191 version = '01'
191 version = '01'
192 _grouplistcount = 1 # One list of files after the manifests
192 _grouplistcount = 1 # One list of files after the manifests
193
193
194 def __init__(self, fh, alg):
194 def __init__(self, fh, alg):
195 if alg == 'UN':
195 if alg == 'UN':
196 alg = None # get more modern without breaking too much
196 alg = None # get more modern without breaking too much
197 if not alg in util.decompressors:
197 if not alg in util.decompressors:
198 raise error.Abort(_('unknown stream compression type: %s')
198 raise error.Abort(_('unknown stream compression type: %s')
199 % alg)
199 % alg)
200 if alg == 'BZ':
200 if alg == 'BZ':
201 alg = '_truncatedBZ'
201 alg = '_truncatedBZ'
202 self._stream = util.decompressors[alg](fh)
202 self._stream = util.decompressors[alg](fh)
203 self._type = alg
203 self._type = alg
204 self.callback = None
204 self.callback = None
205
205
206 # These methods (compressed, read, seek, tell) all appear to only
206 # These methods (compressed, read, seek, tell) all appear to only
207 # be used by bundlerepo, but it's a little hard to tell.
207 # be used by bundlerepo, but it's a little hard to tell.
208 def compressed(self):
208 def compressed(self):
209 return self._type is not None
209 return self._type is not None
210 def read(self, l):
210 def read(self, l):
211 return self._stream.read(l)
211 return self._stream.read(l)
212 def seek(self, pos):
212 def seek(self, pos):
213 return self._stream.seek(pos)
213 return self._stream.seek(pos)
214 def tell(self):
214 def tell(self):
215 return self._stream.tell()
215 return self._stream.tell()
216 def close(self):
216 def close(self):
217 return self._stream.close()
217 return self._stream.close()
218
218
219 def _chunklength(self):
219 def _chunklength(self):
220 d = readexactly(self._stream, 4)
220 d = readexactly(self._stream, 4)
221 l = struct.unpack(">l", d)[0]
221 l = struct.unpack(">l", d)[0]
222 if l <= 4:
222 if l <= 4:
223 if l:
223 if l:
224 raise error.Abort(_("invalid chunk length %d") % l)
224 raise error.Abort(_("invalid chunk length %d") % l)
225 return 0
225 return 0
226 if self.callback:
226 if self.callback:
227 self.callback()
227 self.callback()
228 return l - 4
228 return l - 4
229
229
230 def changelogheader(self):
230 def changelogheader(self):
231 """v10 does not have a changelog header chunk"""
231 """v10 does not have a changelog header chunk"""
232 return {}
232 return {}
233
233
234 def manifestheader(self):
234 def manifestheader(self):
235 """v10 does not have a manifest header chunk"""
235 """v10 does not have a manifest header chunk"""
236 return {}
236 return {}
237
237
238 def filelogheader(self):
238 def filelogheader(self):
239 """return the header of the filelogs chunk, v10 only has the filename"""
239 """return the header of the filelogs chunk, v10 only has the filename"""
240 l = self._chunklength()
240 l = self._chunklength()
241 if not l:
241 if not l:
242 return {}
242 return {}
243 fname = readexactly(self._stream, l)
243 fname = readexactly(self._stream, l)
244 return {'filename': fname}
244 return {'filename': fname}
245
245
246 def _deltaheader(self, headertuple, prevnode):
246 def _deltaheader(self, headertuple, prevnode):
247 node, p1, p2, cs = headertuple
247 node, p1, p2, cs = headertuple
248 if prevnode is None:
248 if prevnode is None:
249 deltabase = p1
249 deltabase = p1
250 else:
250 else:
251 deltabase = prevnode
251 deltabase = prevnode
252 flags = 0
252 flags = 0
253 return node, p1, p2, deltabase, cs, flags
253 return node, p1, p2, deltabase, cs, flags
254
254
255 def deltachunk(self, prevnode):
255 def deltachunk(self, prevnode):
256 l = self._chunklength()
256 l = self._chunklength()
257 if not l:
257 if not l:
258 return {}
258 return {}
259 headerdata = readexactly(self._stream, self.deltaheadersize)
259 headerdata = readexactly(self._stream, self.deltaheadersize)
260 header = struct.unpack(self.deltaheader, headerdata)
260 header = struct.unpack(self.deltaheader, headerdata)
261 delta = readexactly(self._stream, l - self.deltaheadersize)
261 delta = readexactly(self._stream, l - self.deltaheadersize)
262 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
262 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
263 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
263 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
264 'deltabase': deltabase, 'delta': delta, 'flags': flags}
264 'deltabase': deltabase, 'delta': delta, 'flags': flags}
265
265
266 def getchunks(self):
266 def getchunks(self):
267 """returns all the chunks contains in the bundle
267 """returns all the chunks contains in the bundle
268
268
269 Used when you need to forward the binary stream to a file or another
269 Used when you need to forward the binary stream to a file or another
270 network API. To do so, it parse the changegroup data, otherwise it will
270 network API. To do so, it parse the changegroup data, otherwise it will
271 block in case of sshrepo because it don't know the end of the stream.
271 block in case of sshrepo because it don't know the end of the stream.
272 """
272 """
273 # an empty chunkgroup is the end of the changegroup
273 # an empty chunkgroup is the end of the changegroup
274 # a changegroup has at least 2 chunkgroups (changelog and manifest).
274 # a changegroup has at least 2 chunkgroups (changelog and manifest).
275 # after that, changegroup versions 1 and 2 have a series of groups
275 # after that, changegroup versions 1 and 2 have a series of groups
276 # with one group per file. changegroup 3 has a series of directory
276 # with one group per file. changegroup 3 has a series of directory
277 # manifests before the files.
277 # manifests before the files.
278 count = 0
278 count = 0
279 emptycount = 0
279 emptycount = 0
280 while emptycount < self._grouplistcount:
280 while emptycount < self._grouplistcount:
281 empty = True
281 empty = True
282 count += 1
282 count += 1
283 while True:
283 while True:
284 chunk = getchunk(self)
284 chunk = getchunk(self)
285 if not chunk:
285 if not chunk:
286 if empty and count > 2:
286 if empty and count > 2:
287 emptycount += 1
287 emptycount += 1
288 break
288 break
289 empty = False
289 empty = False
290 yield chunkheader(len(chunk))
290 yield chunkheader(len(chunk))
291 pos = 0
291 pos = 0
292 while pos < len(chunk):
292 while pos < len(chunk):
293 next = pos + 2**20
293 next = pos + 2**20
294 yield chunk[pos:next]
294 yield chunk[pos:next]
295 pos = next
295 pos = next
296 yield closechunk()
296 yield closechunk()
297
297
298 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
298 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
299 # We know that we'll never have more manifests than we had
299 # We know that we'll never have more manifests than we had
300 # changesets.
300 # changesets.
301 self.callback = prog(_('manifests'), numchanges)
301 self.callback = prog(_('manifests'), numchanges)
302 # no need to check for empty manifest group here:
302 # no need to check for empty manifest group here:
303 # if the result of the merge of 1 and 2 is the same in 3 and 4,
303 # if the result of the merge of 1 and 2 is the same in 3 and 4,
304 # no new manifest will be created and the manifest group will
304 # no new manifest will be created and the manifest group will
305 # be empty during the pull
305 # be empty during the pull
306 self.manifestheader()
306 self.manifestheader()
307 repo.manifest.addgroup(self, revmap, trp)
307 repo.manifest.addgroup(self, revmap, trp)
308 repo.ui.progress(_('manifests'), None)
308 repo.ui.progress(_('manifests'), None)
309
309
310 def apply(self, repo, srctype, url, emptyok=False,
310 def apply(self, repo, srctype, url, emptyok=False,
311 targetphase=phases.draft, expectedtotal=None):
311 targetphase=phases.draft, expectedtotal=None):
312 """Add the changegroup returned by source.read() to this repo.
312 """Add the changegroup returned by source.read() to this repo.
313 srctype is a string like 'push', 'pull', or 'unbundle'. url is
313 srctype is a string like 'push', 'pull', or 'unbundle'. url is
314 the URL of the repo where this changegroup is coming from.
314 the URL of the repo where this changegroup is coming from.
315
315
316 Return an integer summarizing the change to this repo:
316 Return an integer summarizing the change to this repo:
317 - nothing changed or no source: 0
317 - nothing changed or no source: 0
318 - more heads than before: 1+added heads (2..n)
318 - more heads than before: 1+added heads (2..n)
319 - fewer heads than before: -1-removed heads (-2..-n)
319 - fewer heads than before: -1-removed heads (-2..-n)
320 - number of heads stays the same: 1
320 - number of heads stays the same: 1
321 """
321 """
322 repo = repo.unfiltered()
322 repo = repo.unfiltered()
323 def csmap(x):
323 def csmap(x):
324 repo.ui.debug("add changeset %s\n" % short(x))
324 repo.ui.debug("add changeset %s\n" % short(x))
325 return len(cl)
325 return len(cl)
326
326
327 def revmap(x):
327 def revmap(x):
328 return cl.rev(x)
328 return cl.rev(x)
329
329
330 changesets = files = revisions = 0
330 changesets = files = revisions = 0
331
331
332 try:
332 try:
333 with repo.transaction("\n".join([srctype,
333 with repo.transaction("\n".join([srctype,
334 util.hidepassword(url)])) as tr:
334 util.hidepassword(url)])) as tr:
335 # The transaction could have been created before and already
335 # The transaction could have been created before and already
336 # carries source information. In this case we use the top
336 # carries source information. In this case we use the top
337 # level data. We overwrite the argument because we need to use
337 # level data. We overwrite the argument because we need to use
338 # the top level value (if they exist) in this function.
338 # the top level value (if they exist) in this function.
339 srctype = tr.hookargs.setdefault('source', srctype)
339 srctype = tr.hookargs.setdefault('source', srctype)
340 url = tr.hookargs.setdefault('url', url)
340 url = tr.hookargs.setdefault('url', url)
341 repo.hook('prechangegroup', throw=True, **tr.hookargs)
341 repo.hook('prechangegroup', throw=True, **tr.hookargs)
342
342
343 # write changelog data to temp files so concurrent readers
343 # write changelog data to temp files so concurrent readers
344 # will not see an inconsistent view
344 # will not see an inconsistent view
345 cl = repo.changelog
345 cl = repo.changelog
346 cl.delayupdate(tr)
346 cl.delayupdate(tr)
347 oldheads = cl.heads()
347 oldheads = cl.heads()
348
348
349 trp = weakref.proxy(tr)
349 trp = weakref.proxy(tr)
350 # pull off the changeset group
350 # pull off the changeset group
351 repo.ui.status(_("adding changesets\n"))
351 repo.ui.status(_("adding changesets\n"))
352 clstart = len(cl)
352 clstart = len(cl)
353 class prog(object):
353 class prog(object):
354 def __init__(self, step, total):
354 def __init__(self, step, total):
355 self._step = step
355 self._step = step
356 self._total = total
356 self._total = total
357 self._count = 1
357 self._count = 1
358 def __call__(self):
358 def __call__(self):
359 repo.ui.progress(self._step, self._count,
359 repo.ui.progress(self._step, self._count,
360 unit=_('chunks'), total=self._total)
360 unit=_('chunks'), total=self._total)
361 self._count += 1
361 self._count += 1
362 self.callback = prog(_('changesets'), expectedtotal)
362 self.callback = prog(_('changesets'), expectedtotal)
363
363
364 efiles = set()
364 efiles = set()
365 def onchangelog(cl, node):
365 def onchangelog(cl, node):
366 efiles.update(cl.read(node)[3])
366 efiles.update(cl.read(node)[3])
367
367
368 self.changelogheader()
368 self.changelogheader()
369 srccontent = cl.addgroup(self, csmap, trp,
369 srccontent = cl.addgroup(self, csmap, trp,
370 addrevisioncb=onchangelog)
370 addrevisioncb=onchangelog)
371 efiles = len(efiles)
371 efiles = len(efiles)
372
372
373 if not (srccontent or emptyok):
373 if not (srccontent or emptyok):
374 raise error.Abort(_("received changelog group is empty"))
374 raise error.Abort(_("received changelog group is empty"))
375 clend = len(cl)
375 clend = len(cl)
376 changesets = clend - clstart
376 changesets = clend - clstart
377 repo.ui.progress(_('changesets'), None)
377 repo.ui.progress(_('changesets'), None)
378
378
379 # pull off the manifest group
379 # pull off the manifest group
380 repo.ui.status(_("adding manifests\n"))
380 repo.ui.status(_("adding manifests\n"))
381 self._unpackmanifests(repo, revmap, trp, prog, changesets)
381 self._unpackmanifests(repo, revmap, trp, prog, changesets)
382
382
383 needfiles = {}
383 needfiles = {}
384 if repo.ui.configbool('server', 'validate', default=False):
384 if repo.ui.configbool('server', 'validate', default=False):
385 # validate incoming csets have their manifests
385 # validate incoming csets have their manifests
386 for cset in xrange(clstart, clend):
386 for cset in xrange(clstart, clend):
387 mfnode = repo.changelog.read(
387 mfnode = repo.changelog.read(
388 repo.changelog.node(cset))[0]
388 repo.changelog.node(cset))[0]
389 mfest = repo.manifest.readdelta(mfnode)
389 mfest = repo.manifest.readdelta(mfnode)
390 # store file nodes we must see
390 # store file nodes we must see
391 for f, n in mfest.iteritems():
391 for f, n in mfest.iteritems():
392 needfiles.setdefault(f, set()).add(n)
392 needfiles.setdefault(f, set()).add(n)
393
393
394 # process the files
394 # process the files
395 repo.ui.status(_("adding file changes\n"))
395 repo.ui.status(_("adding file changes\n"))
396 self.callback = None
396 self.callback = None
397 pr = prog(_('files'), efiles)
397 pr = prog(_('files'), efiles)
398 newrevs, newfiles = _addchangegroupfiles(
398 newrevs, newfiles = _addchangegroupfiles(
399 repo, self, revmap, trp, pr, needfiles)
399 repo, self, revmap, trp, pr, needfiles)
400 revisions += newrevs
400 revisions += newrevs
401 files += newfiles
401 files += newfiles
402
402
403 dh = 0
403 dh = 0
404 if oldheads:
404 if oldheads:
405 heads = cl.heads()
405 heads = cl.heads()
406 dh = len(heads) - len(oldheads)
406 dh = len(heads) - len(oldheads)
407 for h in heads:
407 for h in heads:
408 if h not in oldheads and repo[h].closesbranch():
408 if h not in oldheads and repo[h].closesbranch():
409 dh -= 1
409 dh -= 1
410 htext = ""
410 htext = ""
411 if dh:
411 if dh:
412 htext = _(" (%+d heads)") % dh
412 htext = _(" (%+d heads)") % dh
413
413
414 repo.ui.status(_("added %d changesets"
414 repo.ui.status(_("added %d changesets"
415 " with %d changes to %d files%s\n")
415 " with %d changes to %d files%s\n")
416 % (changesets, revisions, files, htext))
416 % (changesets, revisions, files, htext))
417 repo.invalidatevolatilesets()
417 repo.invalidatevolatilesets()
418
418
419 if changesets > 0:
419 if changesets > 0:
420 if 'node' not in tr.hookargs:
420 if 'node' not in tr.hookargs:
421 tr.hookargs['node'] = hex(cl.node(clstart))
421 tr.hookargs['node'] = hex(cl.node(clstart))
422 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
422 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
423 hookargs = dict(tr.hookargs)
423 hookargs = dict(tr.hookargs)
424 else:
424 else:
425 hookargs = dict(tr.hookargs)
425 hookargs = dict(tr.hookargs)
426 hookargs['node'] = hex(cl.node(clstart))
426 hookargs['node'] = hex(cl.node(clstart))
427 hookargs['node_last'] = hex(cl.node(clend - 1))
427 hookargs['node_last'] = hex(cl.node(clend - 1))
428 repo.hook('pretxnchangegroup', throw=True, **hookargs)
428 repo.hook('pretxnchangegroup', throw=True, **hookargs)
429
429
430 added = [cl.node(r) for r in xrange(clstart, clend)]
430 added = [cl.node(r) for r in xrange(clstart, clend)]
431 publishing = repo.publishing()
431 publishing = repo.publishing()
432 if srctype in ('push', 'serve'):
432 if srctype in ('push', 'serve'):
433 # Old servers can not push the boundary themselves.
433 # Old servers can not push the boundary themselves.
434 # New servers won't push the boundary if changeset already
434 # New servers won't push the boundary if changeset already
435 # exists locally as secret
435 # exists locally as secret
436 #
436 #
437 # We should not use added here but the list of all change in
437 # We should not use added here but the list of all change in
438 # the bundle
438 # the bundle
439 if publishing:
439 if publishing:
440 phases.advanceboundary(repo, tr, phases.public,
440 phases.advanceboundary(repo, tr, phases.public,
441 srccontent)
441 srccontent)
442 else:
442 else:
443 # Those changesets have been pushed from the
443 # Those changesets have been pushed from the
444 # outside, their phases are going to be pushed
444 # outside, their phases are going to be pushed
445 # alongside. Therefor `targetphase` is
445 # alongside. Therefor `targetphase` is
446 # ignored.
446 # ignored.
447 phases.advanceboundary(repo, tr, phases.draft,
447 phases.advanceboundary(repo, tr, phases.draft,
448 srccontent)
448 srccontent)
449 phases.retractboundary(repo, tr, phases.draft, added)
449 phases.retractboundary(repo, tr, phases.draft, added)
450 elif srctype != 'strip':
450 elif srctype != 'strip':
451 # publishing only alter behavior during push
451 # publishing only alter behavior during push
452 #
452 #
453 # strip should not touch boundary at all
453 # strip should not touch boundary at all
454 phases.retractboundary(repo, tr, targetphase, added)
454 phases.retractboundary(repo, tr, targetphase, added)
455
455
456 if changesets > 0:
456 if changesets > 0:
457 if srctype != 'strip':
457 if srctype != 'strip':
458 # During strip, branchcache is invalid but
458 # During strip, branchcache is invalid but
459 # coming call to `destroyed` will repair it.
459 # coming call to `destroyed` will repair it.
460 # In other case we can safely update cache on
460 # In other case we can safely update cache on
461 # disk.
461 # disk.
462 branchmap.updatecache(repo.filtered('served'))
462 branchmap.updatecache(repo.filtered('served'))
463
463
464 def runhooks():
464 def runhooks():
465 # These hooks run when the lock releases, not when the
465 # These hooks run when the lock releases, not when the
466 # transaction closes. So it's possible for the changelog
466 # transaction closes. So it's possible for the changelog
467 # to have changed since we last saw it.
467 # to have changed since we last saw it.
468 if clstart >= len(repo):
468 if clstart >= len(repo):
469 return
469 return
470
470
471 # forcefully update the on-disk branch cache
471 # forcefully update the on-disk branch cache
472 repo.ui.debug("updating the branch cache\n")
472 repo.ui.debug("updating the branch cache\n")
473 repo.hook("changegroup", **hookargs)
473 repo.hook("changegroup", **hookargs)
474
474
475 for n in added:
475 for n in added:
476 args = hookargs.copy()
476 args = hookargs.copy()
477 args['node'] = hex(n)
477 args['node'] = hex(n)
478 del args['node_last']
478 del args['node_last']
479 repo.hook("incoming", **args)
479 repo.hook("incoming", **args)
480
480
481 newheads = [h for h in repo.heads()
481 newheads = [h for h in repo.heads()
482 if h not in oldheads]
482 if h not in oldheads]
483 repo.ui.log("incoming",
483 repo.ui.log("incoming",
484 "%s incoming changes - new heads: %s\n",
484 "%s incoming changes - new heads: %s\n",
485 len(added),
485 len(added),
486 ', '.join([hex(c[:6]) for c in newheads]))
486 ', '.join([hex(c[:6]) for c in newheads]))
487
487
488 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
488 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
489 lambda tr: repo._afterlock(runhooks))
489 lambda tr: repo._afterlock(runhooks))
490 finally:
490 finally:
491 repo.ui.flush()
491 repo.ui.flush()
492 # never return 0 here:
492 # never return 0 here:
493 if dh < 0:
493 if dh < 0:
494 return dh - 1
494 return dh - 1
495 else:
495 else:
496 return dh + 1
496 return dh + 1
497
497
498 class cg2unpacker(cg1unpacker):
498 class cg2unpacker(cg1unpacker):
499 """Unpacker for cg2 streams.
499 """Unpacker for cg2 streams.
500
500
501 cg2 streams add support for generaldelta, so the delta header
501 cg2 streams add support for generaldelta, so the delta header
502 format is slightly different. All other features about the data
502 format is slightly different. All other features about the data
503 remain the same.
503 remain the same.
504 """
504 """
505 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
505 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
506 deltaheadersize = struct.calcsize(deltaheader)
506 deltaheadersize = struct.calcsize(deltaheader)
507 version = '02'
507 version = '02'
508
508
509 def _deltaheader(self, headertuple, prevnode):
509 def _deltaheader(self, headertuple, prevnode):
510 node, p1, p2, deltabase, cs = headertuple
510 node, p1, p2, deltabase, cs = headertuple
511 flags = 0
511 flags = 0
512 return node, p1, p2, deltabase, cs, flags
512 return node, p1, p2, deltabase, cs, flags
513
513
514 class cg3unpacker(cg2unpacker):
514 class cg3unpacker(cg2unpacker):
515 """Unpacker for cg3 streams.
515 """Unpacker for cg3 streams.
516
516
517 cg3 streams add support for exchanging treemanifests and revlog
517 cg3 streams add support for exchanging treemanifests and revlog
518 flags. It adds the revlog flags to the delta header and an empty chunk
518 flags. It adds the revlog flags to the delta header and an empty chunk
519 separating manifests and files.
519 separating manifests and files.
520 """
520 """
521 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
521 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
522 deltaheadersize = struct.calcsize(deltaheader)
522 deltaheadersize = struct.calcsize(deltaheader)
523 version = '03'
523 version = '03'
524 _grouplistcount = 2 # One list of manifests and one list of files
524 _grouplistcount = 2 # One list of manifests and one list of files
525
525
526 def _deltaheader(self, headertuple, prevnode):
526 def _deltaheader(self, headertuple, prevnode):
527 node, p1, p2, deltabase, cs, flags = headertuple
527 node, p1, p2, deltabase, cs, flags = headertuple
528 return node, p1, p2, deltabase, cs, flags
528 return node, p1, p2, deltabase, cs, flags
529
529
530 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
530 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
531 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
531 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
532 numchanges)
532 numchanges)
533 while True:
533 while True:
534 chunkdata = self.filelogheader()
534 chunkdata = self.filelogheader()
535 if not chunkdata:
535 if not chunkdata:
536 break
536 break
537 # If we get here, there are directory manifests in the changegroup
537 # If we get here, there are directory manifests in the changegroup
538 d = chunkdata["filename"]
538 d = chunkdata["filename"]
539 repo.ui.debug("adding %s revisions\n" % d)
539 repo.ui.debug("adding %s revisions\n" % d)
540 dirlog = repo.manifest.dirlog(d)
540 dirlog = repo.manifest.dirlog(d)
541 if not dirlog.addgroup(self, revmap, trp):
541 if not dirlog.addgroup(self, revmap, trp):
542 raise error.Abort(_("received dir revlog group is empty"))
542 raise error.Abort(_("received dir revlog group is empty"))
543
543
544 class headerlessfixup(object):
544 class headerlessfixup(object):
545 def __init__(self, fh, h):
545 def __init__(self, fh, h):
546 self._h = h
546 self._h = h
547 self._fh = fh
547 self._fh = fh
548 def read(self, n):
548 def read(self, n):
549 if self._h:
549 if self._h:
550 d, self._h = self._h[:n], self._h[n:]
550 d, self._h = self._h[:n], self._h[n:]
551 if len(d) < n:
551 if len(d) < n:
552 d += readexactly(self._fh, n - len(d))
552 d += readexactly(self._fh, n - len(d))
553 return d
553 return d
554 return readexactly(self._fh, n)
554 return readexactly(self._fh, n)
555
555
556 def _moddirs(files):
556 def _moddirs(files):
557 """Given a set of modified files, find the list of modified directories.
557 """Given a set of modified files, find the list of modified directories.
558
558
559 This returns a list of (path to changed dir, changed dir) tuples,
559 This returns a list of (path to changed dir, changed dir) tuples,
560 as that's what the one client needs anyway.
560 as that's what the one client needs anyway.
561
561
562 >>> _moddirs(['a/b/c.py', 'a/b/c.txt', 'a/d/e/f/g.txt', 'i.txt', ])
562 >>> _moddirs(['a/b/c.py', 'a/b/c.txt', 'a/d/e/f/g.txt', 'i.txt', ])
563 [('/', 'a/'), ('a/', 'b/'), ('a/', 'd/'), ('a/d/', 'e/'), ('a/d/e/', 'f/')]
563 [('/', 'a/'), ('a/', 'b/'), ('a/', 'd/'), ('a/d/', 'e/'), ('a/d/e/', 'f/')]
564
564
565 """
565 """
566 alldirs = set()
566 alldirs = set()
567 for f in files:
567 for f in files:
568 path = f.split('/')[:-1]
568 path = f.split('/')[:-1]
569 for i in xrange(len(path) - 1, -1, -1):
569 for i in xrange(len(path) - 1, -1, -1):
570 dn = '/'.join(path[:i])
570 dn = '/'.join(path[:i])
571 current = dn + '/', path[i] + '/'
571 current = dn + '/', path[i] + '/'
572 if current in alldirs:
572 if current in alldirs:
573 break
573 break
574 alldirs.add(current)
574 alldirs.add(current)
575 return sorted(alldirs)
575 return sorted(alldirs)
576
576
577 class cg1packer(object):
577 class cg1packer(object):
578 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
578 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
579 version = '01'
579 version = '01'
580 def __init__(self, repo, bundlecaps=None):
580 def __init__(self, repo, bundlecaps=None):
581 """Given a source repo, construct a bundler.
581 """Given a source repo, construct a bundler.
582
582
583 bundlecaps is optional and can be used to specify the set of
583 bundlecaps is optional and can be used to specify the set of
584 capabilities which can be used to build the bundle.
584 capabilities which can be used to build the bundle.
585 """
585 """
586 # Set of capabilities we can use to build the bundle.
586 # Set of capabilities we can use to build the bundle.
587 if bundlecaps is None:
587 if bundlecaps is None:
588 bundlecaps = set()
588 bundlecaps = set()
589 self._bundlecaps = bundlecaps
589 self._bundlecaps = bundlecaps
590 # experimental config: bundle.reorder
590 # experimental config: bundle.reorder
591 reorder = repo.ui.config('bundle', 'reorder', 'auto')
591 reorder = repo.ui.config('bundle', 'reorder', 'auto')
592 if reorder == 'auto':
592 if reorder == 'auto':
593 reorder = None
593 reorder = None
594 else:
594 else:
595 reorder = util.parsebool(reorder)
595 reorder = util.parsebool(reorder)
596 self._repo = repo
596 self._repo = repo
597 self._reorder = reorder
597 self._reorder = reorder
598 self._progress = repo.ui.progress
598 self._progress = repo.ui.progress
599 if self._repo.ui.verbose and not self._repo.ui.debugflag:
599 if self._repo.ui.verbose and not self._repo.ui.debugflag:
600 self._verbosenote = self._repo.ui.note
600 self._verbosenote = self._repo.ui.note
601 else:
601 else:
602 self._verbosenote = lambda s: None
602 self._verbosenote = lambda s: None
603
603
604 def close(self):
604 def close(self):
605 return closechunk()
605 return closechunk()
606
606
607 def fileheader(self, fname):
607 def fileheader(self, fname):
608 return chunkheader(len(fname)) + fname
608 return chunkheader(len(fname)) + fname
609
609
610 def group(self, nodelist, revlog, lookup, units=None):
610 def group(self, nodelist, revlog, lookup, units=None):
611 """Calculate a delta group, yielding a sequence of changegroup chunks
611 """Calculate a delta group, yielding a sequence of changegroup chunks
612 (strings).
612 (strings).
613
613
614 Given a list of changeset revs, return a set of deltas and
614 Given a list of changeset revs, return a set of deltas and
615 metadata corresponding to nodes. The first delta is
615 metadata corresponding to nodes. The first delta is
616 first parent(nodelist[0]) -> nodelist[0], the receiver is
616 first parent(nodelist[0]) -> nodelist[0], the receiver is
617 guaranteed to have this parent as it has all history before
617 guaranteed to have this parent as it has all history before
618 these changesets. In the case firstparent is nullrev the
618 these changesets. In the case firstparent is nullrev the
619 changegroup starts with a full revision.
619 changegroup starts with a full revision.
620
620
621 If units is not None, progress detail will be generated, units specifies
621 If units is not None, progress detail will be generated, units specifies
622 the type of revlog that is touched (changelog, manifest, etc.).
622 the type of revlog that is touched (changelog, manifest, etc.).
623 """
623 """
624 # if we don't have any revisions touched by these changesets, bail
624 # if we don't have any revisions touched by these changesets, bail
625 if len(nodelist) == 0:
625 if len(nodelist) == 0:
626 yield self.close()
626 yield self.close()
627 return
627 return
628
628
629 # for generaldelta revlogs, we linearize the revs; this will both be
629 # for generaldelta revlogs, we linearize the revs; this will both be
630 # much quicker and generate a much smaller bundle
630 # much quicker and generate a much smaller bundle
631 if (revlog._generaldelta and self._reorder is None) or self._reorder:
631 if (revlog._generaldelta and self._reorder is None) or self._reorder:
632 dag = dagutil.revlogdag(revlog)
632 dag = dagutil.revlogdag(revlog)
633 revs = set(revlog.rev(n) for n in nodelist)
633 revs = set(revlog.rev(n) for n in nodelist)
634 revs = dag.linearize(revs)
634 revs = dag.linearize(revs)
635 else:
635 else:
636 revs = sorted([revlog.rev(n) for n in nodelist])
636 revs = sorted([revlog.rev(n) for n in nodelist])
637
637
638 # add the parent of the first rev
638 # add the parent of the first rev
639 p = revlog.parentrevs(revs[0])[0]
639 p = revlog.parentrevs(revs[0])[0]
640 revs.insert(0, p)
640 revs.insert(0, p)
641
641
642 # build deltas
642 # build deltas
643 total = len(revs) - 1
643 total = len(revs) - 1
644 msgbundling = _('bundling')
644 msgbundling = _('bundling')
645 for r in xrange(len(revs) - 1):
645 for r in xrange(len(revs) - 1):
646 if units is not None:
646 if units is not None:
647 self._progress(msgbundling, r + 1, unit=units, total=total)
647 self._progress(msgbundling, r + 1, unit=units, total=total)
648 prev, curr = revs[r], revs[r + 1]
648 prev, curr = revs[r], revs[r + 1]
649 linknode = lookup(revlog.node(curr))
649 linknode = lookup(revlog.node(curr))
650 for c in self.revchunk(revlog, curr, prev, linknode):
650 for c in self.revchunk(revlog, curr, prev, linknode):
651 yield c
651 yield c
652
652
653 if units is not None:
653 if units is not None:
654 self._progress(msgbundling, None)
654 self._progress(msgbundling, None)
655 yield self.close()
655 yield self.close()
656
656
657 # filter any nodes that claim to be part of the known set
657 # filter any nodes that claim to be part of the known set
658 def prune(self, revlog, missing, commonrevs):
658 def prune(self, revlog, missing, commonrevs):
659 rr, rl = revlog.rev, revlog.linkrev
659 rr, rl = revlog.rev, revlog.linkrev
660 return [n for n in missing if rl(rr(n)) not in commonrevs]
660 return [n for n in missing if rl(rr(n)) not in commonrevs]
661
661
662 def _packmanifests(self, mfnodes, tmfnodes, lookuplinknode):
662 def _packmanifests(self, mfnodes, tmfnodes, lookuplinknode):
663 """Pack flat manifests into a changegroup stream."""
663 """Pack flat manifests into a changegroup stream."""
664 ml = self._repo.manifest
664 ml = self._repo.manifest
665 size = 0
665 size = 0
666 for chunk in self.group(
666 for chunk in self.group(
667 mfnodes, ml, lookuplinknode, units=_('manifests')):
667 mfnodes, ml, lookuplinknode, units=_('manifests')):
668 size += len(chunk)
668 size += len(chunk)
669 yield chunk
669 yield chunk
670 self._verbosenote(_('%8.i (manifests)\n') % size)
670 self._verbosenote(_('%8.i (manifests)\n') % size)
671 # It looks odd to assert this here, but tmfnodes doesn't get
671 # It looks odd to assert this here, but tmfnodes doesn't get
672 # filled in until after we've called lookuplinknode for
672 # filled in until after we've called lookuplinknode for
673 # sending root manifests, so the only way to tell the streams
673 # sending root manifests, so the only way to tell the streams
674 # got crossed is to check after we've done all the work.
674 # got crossed is to check after we've done all the work.
675 assert not tmfnodes
675 assert not tmfnodes
676
676
677 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
677 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
678 '''yield a sequence of changegroup chunks (strings)'''
678 '''yield a sequence of changegroup chunks (strings)'''
679 repo = self._repo
679 repo = self._repo
680 cl = repo.changelog
680 cl = repo.changelog
681 ml = repo.manifest
681 ml = repo.manifest
682
682
683 clrevorder = {}
683 clrevorder = {}
684 mfs = {} # needed manifests
684 mfs = {} # needed manifests
685 tmfnodes = {}
685 tmfnodes = {}
686 fnodes = {} # needed file nodes
686 fnodes = {} # needed file nodes
687 # maps manifest node id -> set(changed files)
687 # maps manifest node id -> set(changed files)
688 mfchangedfiles = {}
688 mfchangedfiles = {}
689
689
690 # Callback for the changelog, used to collect changed files and manifest
690 # Callback for the changelog, used to collect changed files and manifest
691 # nodes.
691 # nodes.
692 # Returns the linkrev node (identity in the changelog case).
692 # Returns the linkrev node (identity in the changelog case).
693 def lookupcl(x):
693 def lookupcl(x):
694 c = cl.read(x)
694 c = cl.read(x)
695 clrevorder[x] = len(clrevorder)
695 clrevorder[x] = len(clrevorder)
696 n = c[0]
696 n = c[0]
697 # record the first changeset introducing this manifest version
697 # record the first changeset introducing this manifest version
698 mfs.setdefault(n, x)
698 mfs.setdefault(n, x)
699 # Record a complete list of potentially-changed files in
699 # Record a complete list of potentially-changed files in
700 # this manifest.
700 # this manifest.
701 mfchangedfiles.setdefault(n, set()).update(c[3])
701 mfchangedfiles.setdefault(n, set()).update(c[3])
702 return x
702 return x
703
703
704 self._verbosenote(_('uncompressed size of bundle content:\n'))
704 self._verbosenote(_('uncompressed size of bundle content:\n'))
705 size = 0
705 size = 0
706 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
706 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
707 size += len(chunk)
707 size += len(chunk)
708 yield chunk
708 yield chunk
709 self._verbosenote(_('%8.i (changelog)\n') % size)
709 self._verbosenote(_('%8.i (changelog)\n') % size)
710
710
711 # We need to make sure that the linkrev in the changegroup refers to
711 # We need to make sure that the linkrev in the changegroup refers to
712 # the first changeset that introduced the manifest or file revision.
712 # the first changeset that introduced the manifest or file revision.
713 # The fastpath is usually safer than the slowpath, because the filelogs
713 # The fastpath is usually safer than the slowpath, because the filelogs
714 # are walked in revlog order.
714 # are walked in revlog order.
715 #
715 #
716 # When taking the slowpath with reorder=None and the manifest revlog
716 # When taking the slowpath with reorder=None and the manifest revlog
717 # uses generaldelta, the manifest may be walked in the "wrong" order.
717 # uses generaldelta, the manifest may be walked in the "wrong" order.
718 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
718 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
719 # cc0ff93d0c0c).
719 # cc0ff93d0c0c).
720 #
720 #
721 # When taking the fastpath, we are only vulnerable to reordering
721 # When taking the fastpath, we are only vulnerable to reordering
722 # of the changelog itself. The changelog never uses generaldelta, so
722 # of the changelog itself. The changelog never uses generaldelta, so
723 # it is only reordered when reorder=True. To handle this case, we
723 # it is only reordered when reorder=True. To handle this case, we
724 # simply take the slowpath, which already has the 'clrevorder' logic.
724 # simply take the slowpath, which already has the 'clrevorder' logic.
725 # This was also fixed in cc0ff93d0c0c.
725 # This was also fixed in cc0ff93d0c0c.
726 fastpathlinkrev = fastpathlinkrev and not self._reorder
726 fastpathlinkrev = fastpathlinkrev and not self._reorder
727 # Treemanifests don't work correctly with fastpathlinkrev
727 # Treemanifests don't work correctly with fastpathlinkrev
728 # either, because we don't discover which directory nodes to
728 # either, because we don't discover which directory nodes to
729 # send along with files. This could probably be fixed.
729 # send along with files. This could probably be fixed.
730 fastpathlinkrev = fastpathlinkrev and (
730 fastpathlinkrev = fastpathlinkrev and (
731 'treemanifest' not in repo.requirements)
731 'treemanifest' not in repo.requirements)
732 # Callback for the manifest, used to collect linkrevs for filelog
732 # Callback for the manifest, used to collect linkrevs for filelog
733 # revisions.
733 # revisions.
734 # Returns the linkrev node (collected in lookupcl).
734 # Returns the linkrev node (collected in lookupcl).
735 if fastpathlinkrev:
735 if fastpathlinkrev:
736 lookupmflinknode = mfs.__getitem__
736 lookupmflinknode = mfs.__getitem__
737 else:
737 else:
738 def lookupmflinknode(x):
738 def lookupmflinknode(x):
739 """Callback for looking up the linknode for manifests.
739 """Callback for looking up the linknode for manifests.
740
740
741 Returns the linkrev node for the specified manifest.
741 Returns the linkrev node for the specified manifest.
742
742
743 SIDE EFFECT:
743 SIDE EFFECT:
744
744
745 1) fclnodes gets populated with the list of relevant
745 1) fclnodes gets populated with the list of relevant
746 file nodes if we're not using fastpathlinkrev
746 file nodes if we're not using fastpathlinkrev
747 2) When treemanifests are in use, collects treemanifest nodes
747 2) When treemanifests are in use, collects treemanifest nodes
748 to send
748 to send
749
749
750 Note that this means manifests must be completely sent to
750 Note that this means manifests must be completely sent to
751 the client before you can trust the list of files and
751 the client before you can trust the list of files and
752 treemanifests to send.
752 treemanifests to send.
753 """
753 """
754 clnode = mfs[x]
754 clnode = mfs[x]
755 # We no longer actually care about reading deltas of
755 # We no longer actually care about reading deltas of
756 # the manifest here, because we already know the list
756 # the manifest here, because we already know the list
757 # of changed files, so for treemanifests (which
757 # of changed files, so for treemanifests (which
758 # lazily-load anyway to *generate* a readdelta) we can
758 # lazily-load anyway to *generate* a readdelta) we can
759 # just load them with read() and then we'll actually
759 # just load them with read() and then we'll actually
760 # be able to correctly load node IDs from the
760 # be able to correctly load node IDs from the
761 # submanifest entries.
761 # submanifest entries.
762 if 'treemanifest' in repo.requirements:
762 if 'treemanifest' in repo.requirements:
763 mdata = ml.read(x)
763 mdata = ml.read(x)
764 else:
764 else:
765 mdata = ml.readfast(x)
765 mdata = ml.readfast(x)
766 for f in mfchangedfiles[x]:
766 for f in mfchangedfiles[x]:
767 try:
767 try:
768 n = mdata[f]
768 n = mdata[f]
769 except KeyError:
769 except KeyError:
770 continue
770 continue
771 # record the first changeset introducing this filelog
771 # record the first changeset introducing this filelog
772 # version
772 # version
773 fclnodes = fnodes.setdefault(f, {})
773 fclnodes = fnodes.setdefault(f, {})
774 fclnode = fclnodes.setdefault(n, clnode)
774 fclnode = fclnodes.setdefault(n, clnode)
775 if clrevorder[clnode] < clrevorder[fclnode]:
775 if clrevorder[clnode] < clrevorder[fclnode]:
776 fclnodes[n] = clnode
776 fclnodes[n] = clnode
777 # gather list of changed treemanifest nodes
777 # gather list of changed treemanifest nodes
778 if 'treemanifest' in repo.requirements:
778 if 'treemanifest' in repo.requirements:
779 submfs = {'/': mdata}
779 submfs = {'/': mdata}
780 for dn, bn in _moddirs(mfchangedfiles[x]):
780 for dn, bn in _moddirs(mfchangedfiles[x]):
781 submf = submfs[dn]
781 submf = submfs[dn]
782 submf = submf._dirs[bn]
782 submf = submf._dirs[bn]
783 submfs[submf.dir()] = submf
783 submfs[submf.dir()] = submf
784 tmfclnodes = tmfnodes.setdefault(submf.dir(), {})
784 tmfclnodes = tmfnodes.setdefault(submf.dir(), {})
785 tmfclnodes.setdefault(submf._node, clnode)
785 tmfclnodes.setdefault(submf._node, clnode)
786 if clrevorder[clnode] < clrevorder[fclnode]:
786 if clrevorder[clnode] < clrevorder[fclnode]:
787 tmfclnodes[n] = clnode
787 tmfclnodes[n] = clnode
788 return clnode
788 return clnode
789
789
790 mfnodes = self.prune(ml, mfs, commonrevs)
790 mfnodes = self.prune(ml, mfs, commonrevs)
791 for x in self._packmanifests(
791 for x in self._packmanifests(
792 mfnodes, tmfnodes, lookupmflinknode):
792 mfnodes, tmfnodes, lookupmflinknode):
793 yield x
793 yield x
794
794
795 mfs.clear()
795 mfs.clear()
796 clrevs = set(cl.rev(x) for x in clnodes)
796 clrevs = set(cl.rev(x) for x in clnodes)
797
797
798 if not fastpathlinkrev:
798 if not fastpathlinkrev:
799 def linknodes(unused, fname):
799 def linknodes(unused, fname):
800 return fnodes.get(fname, {})
800 return fnodes.get(fname, {})
801 else:
801 else:
802 cln = cl.node
802 cln = cl.node
803 def linknodes(filerevlog, fname):
803 def linknodes(filerevlog, fname):
804 llr = filerevlog.linkrev
804 llr = filerevlog.linkrev
805 fln = filerevlog.node
805 fln = filerevlog.node
806 revs = ((r, llr(r)) for r in filerevlog)
806 revs = ((r, llr(r)) for r in filerevlog)
807 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
807 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
808
808
809 changedfiles = set()
809 changedfiles = set()
810 for x in mfchangedfiles.itervalues():
810 for x in mfchangedfiles.itervalues():
811 changedfiles.update(x)
811 changedfiles.update(x)
812 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
812 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
813 source):
813 source):
814 yield chunk
814 yield chunk
815
815
816 yield self.close()
816 yield self.close()
817
817
818 if clnodes:
818 if clnodes:
819 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
819 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
820
820
821 # The 'source' parameter is useful for extensions
821 # The 'source' parameter is useful for extensions
822 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
822 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
823 repo = self._repo
823 repo = self._repo
824 progress = self._progress
824 progress = self._progress
825 msgbundling = _('bundling')
825 msgbundling = _('bundling')
826
826
827 total = len(changedfiles)
827 total = len(changedfiles)
828 # for progress output
828 # for progress output
829 msgfiles = _('files')
829 msgfiles = _('files')
830 for i, fname in enumerate(sorted(changedfiles)):
830 for i, fname in enumerate(sorted(changedfiles)):
831 filerevlog = repo.file(fname)
831 filerevlog = repo.file(fname)
832 if not filerevlog:
832 if not filerevlog:
833 raise error.Abort(_("empty or missing revlog for %s") % fname)
833 raise error.Abort(_("empty or missing revlog for %s") % fname)
834
834
835 linkrevnodes = linknodes(filerevlog, fname)
835 linkrevnodes = linknodes(filerevlog, fname)
836 # Lookup for filenodes, we collected the linkrev nodes above in the
836 # Lookup for filenodes, we collected the linkrev nodes above in the
837 # fastpath case and with lookupmf in the slowpath case.
837 # fastpath case and with lookupmf in the slowpath case.
838 def lookupfilelog(x):
838 def lookupfilelog(x):
839 return linkrevnodes[x]
839 return linkrevnodes[x]
840
840
841 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
841 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
842 if filenodes:
842 if filenodes:
843 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
843 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
844 total=total)
844 total=total)
845 h = self.fileheader(fname)
845 h = self.fileheader(fname)
846 size = len(h)
846 size = len(h)
847 yield h
847 yield h
848 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
848 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
849 size += len(chunk)
849 size += len(chunk)
850 yield chunk
850 yield chunk
851 self._verbosenote(_('%8.i %s\n') % (size, fname))
851 self._verbosenote(_('%8.i %s\n') % (size, fname))
852 progress(msgbundling, None)
852 progress(msgbundling, None)
853
853
854 def deltaparent(self, revlog, rev, p1, p2, prev):
854 def deltaparent(self, revlog, rev, p1, p2, prev):
855 return prev
855 return prev
856
856
857 def revchunk(self, revlog, rev, prev, linknode):
857 def revchunk(self, revlog, rev, prev, linknode):
858 node = revlog.node(rev)
858 node = revlog.node(rev)
859 p1, p2 = revlog.parentrevs(rev)
859 p1, p2 = revlog.parentrevs(rev)
860 base = self.deltaparent(revlog, rev, p1, p2, prev)
860 base = self.deltaparent(revlog, rev, p1, p2, prev)
861
861
862 prefix = ''
862 prefix = ''
863 if revlog.iscensored(base) or revlog.iscensored(rev):
863 if revlog.iscensored(base) or revlog.iscensored(rev):
864 try:
864 try:
865 delta = revlog.revision(node)
865 delta = revlog.revision(node)
866 except error.CensoredNodeError as e:
866 except error.CensoredNodeError as e:
867 delta = e.tombstone
867 delta = e.tombstone
868 if base == nullrev:
868 if base == nullrev:
869 prefix = mdiff.trivialdiffheader(len(delta))
869 prefix = mdiff.trivialdiffheader(len(delta))
870 else:
870 else:
871 baselen = revlog.rawsize(base)
871 baselen = revlog.rawsize(base)
872 prefix = mdiff.replacediffheader(baselen, len(delta))
872 prefix = mdiff.replacediffheader(baselen, len(delta))
873 elif base == nullrev:
873 elif base == nullrev:
874 delta = revlog.revision(node)
874 delta = revlog.revision(node)
875 prefix = mdiff.trivialdiffheader(len(delta))
875 prefix = mdiff.trivialdiffheader(len(delta))
876 else:
876 else:
877 delta = revlog.revdiff(base, rev)
877 delta = revlog.revdiff(base, rev)
878 p1n, p2n = revlog.parents(node)
878 p1n, p2n = revlog.parents(node)
879 basenode = revlog.node(base)
879 basenode = revlog.node(base)
880 flags = revlog.flags(rev)
880 flags = revlog.flags(rev)
881 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
881 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
882 meta += prefix
882 meta += prefix
883 l = len(meta) + len(delta)
883 l = len(meta) + len(delta)
884 yield chunkheader(l)
884 yield chunkheader(l)
885 yield meta
885 yield meta
886 yield delta
886 yield delta
887 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
887 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
888 # do nothing with basenode, it is implicitly the previous one in HG10
888 # do nothing with basenode, it is implicitly the previous one in HG10
889 # do nothing with flags, it is implicitly 0 for cg1 and cg2
889 # do nothing with flags, it is implicitly 0 for cg1 and cg2
890 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
890 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
891
891
892 class cg2packer(cg1packer):
892 class cg2packer(cg1packer):
893 version = '02'
893 version = '02'
894 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
894 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
895
895
896 def __init__(self, repo, bundlecaps=None):
896 def __init__(self, repo, bundlecaps=None):
897 super(cg2packer, self).__init__(repo, bundlecaps)
897 super(cg2packer, self).__init__(repo, bundlecaps)
898 if self._reorder is None:
898 if self._reorder is None:
899 # Since generaldelta is directly supported by cg2, reordering
899 # Since generaldelta is directly supported by cg2, reordering
900 # generally doesn't help, so we disable it by default (treating
900 # generally doesn't help, so we disable it by default (treating
901 # bundle.reorder=auto just like bundle.reorder=False).
901 # bundle.reorder=auto just like bundle.reorder=False).
902 self._reorder = False
902 self._reorder = False
903
903
904 def deltaparent(self, revlog, rev, p1, p2, prev):
904 def deltaparent(self, revlog, rev, p1, p2, prev):
905 dp = revlog.deltaparent(rev)
905 dp = revlog.deltaparent(rev)
906 # avoid storing full revisions; pick prev in those cases
906 # avoid storing full revisions; pick prev in those cases
907 # also pick prev when we can't be sure remote has dp
907 # also pick prev when we can't be sure remote has dp
908 if dp == nullrev or (dp != p1 and dp != p2 and dp != prev):
908 if dp == nullrev or (dp != p1 and dp != p2 and dp != prev):
909 return prev
909 return prev
910 return dp
910 return dp
911
911
912 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
912 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
913 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
913 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
914 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
914 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
915
915
916 class cg3packer(cg2packer):
916 class cg3packer(cg2packer):
917 version = '03'
917 version = '03'
918 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
918 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
919
919
920 def _packmanifests(self, mfnodes, tmfnodes, lookuplinknode):
920 def _packmanifests(self, mfnodes, tmfnodes, lookuplinknode):
921 # Note that debug prints are super confusing in this code, as
921 # Note that debug prints are super confusing in this code, as
922 # tmfnodes gets populated by the calls to lookuplinknode in
922 # tmfnodes gets populated by the calls to lookuplinknode in
923 # the superclass's manifest packer. In the future we should
923 # the superclass's manifest packer. In the future we should
924 # probably see if we can refactor this somehow to be less
924 # probably see if we can refactor this somehow to be less
925 # confusing.
925 # confusing.
926 for x in super(cg3packer, self)._packmanifests(
926 for x in super(cg3packer, self)._packmanifests(
927 mfnodes, {}, lookuplinknode):
927 mfnodes, {}, lookuplinknode):
928 yield x
928 yield x
929 dirlog = self._repo.manifest.dirlog
929 dirlog = self._repo.manifest.dirlog
930 for name, nodes in tmfnodes.iteritems():
930 for name, nodes in tmfnodes.iteritems():
931 # For now, directory headers are simply file headers with
931 # For now, directory headers are simply file headers with
932 # a trailing '/' on the path (already in the name).
932 # a trailing '/' on the path (already in the name).
933 yield self.fileheader(name)
933 yield self.fileheader(name)
934 for chunk in self.group(nodes, dirlog(name), nodes.get):
934 for chunk in self.group(nodes, dirlog(name), nodes.get):
935 yield chunk
935 yield chunk
936 yield self.close()
936 yield self.close()
937
937
938 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
938 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
939 return struct.pack(
939 return struct.pack(
940 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
940 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
941
941
942 _packermap = {'01': (cg1packer, cg1unpacker),
942 _packermap = {'01': (cg1packer, cg1unpacker),
943 # cg2 adds support for exchanging generaldelta
943 # cg2 adds support for exchanging generaldelta
944 '02': (cg2packer, cg2unpacker),
944 '02': (cg2packer, cg2unpacker),
945 # cg3 adds support for exchanging revlog flags and treemanifests
945 # cg3 adds support for exchanging revlog flags and treemanifests
946 '03': (cg3packer, cg3unpacker),
946 '03': (cg3packer, cg3unpacker),
947 }
947 }
948
948
949 def supportedversions(repo):
949 def supportedversions(repo):
950 versions = set(_packermap.keys())
950 versions = set(_packermap.keys())
951 if ('treemanifest' in repo.requirements or
951 if ('treemanifest' in repo.requirements or
952 repo.ui.configbool('experimental', 'treemanifest')):
952 repo.ui.configbool('experimental', 'treemanifest')):
953 # Versions 01 and 02 support only flat manifests and it's just too
953 # Versions 01 and 02 support only flat manifests and it's just too
954 # expensive to convert between the flat manifest and tree manifest on
954 # expensive to convert between the flat manifest and tree manifest on
955 # the fly. Since tree manifests are hashed differently, all of history
955 # the fly. Since tree manifests are hashed differently, all of history
956 # would have to be converted. Instead, we simply don't even pretend to
956 # would have to be converted. Instead, we simply don't even pretend to
957 # support versions 01 and 02.
957 # support versions 01 and 02.
958 versions.discard('01')
958 versions.discard('01')
959 versions.discard('02')
959 versions.discard('02')
960 elif not repo.ui.configbool('experimental', 'changegroup3'):
960 elif not repo.ui.configbool('experimental', 'changegroup3'):
961 versions.discard('03')
961 versions.discard('03')
962 return versions
962 return versions
963
963
964 def safeversion(repo):
964 def safeversion(repo):
965 # Finds the smallest version that it's safe to assume clients of the repo
965 # Finds the smallest version that it's safe to assume clients of the repo
966 # will support.
966 # will support. For example, all hg versions that support generaldelta also
967 # support changegroup 02.
967 versions = supportedversions(repo)
968 versions = supportedversions(repo)
968 if 'generaldelta' in repo.requirements:
969 if 'generaldelta' in repo.requirements:
969 versions.discard('01')
970 versions.discard('01')
970 assert versions
971 assert versions
971 return min(versions)
972 return min(versions)
972
973
973 def getbundler(version, repo, bundlecaps=None):
974 def getbundler(version, repo, bundlecaps=None):
974 assert version in supportedversions(repo)
975 assert version in supportedversions(repo)
975 return _packermap[version][0](repo, bundlecaps)
976 return _packermap[version][0](repo, bundlecaps)
976
977
977 def getunbundler(version, fh, alg):
978 def getunbundler(version, fh, alg):
978 return _packermap[version][1](fh, alg)
979 return _packermap[version][1](fh, alg)
979
980
980 def _changegroupinfo(repo, nodes, source):
981 def _changegroupinfo(repo, nodes, source):
981 if repo.ui.verbose or source == 'bundle':
982 if repo.ui.verbose or source == 'bundle':
982 repo.ui.status(_("%d changesets found\n") % len(nodes))
983 repo.ui.status(_("%d changesets found\n") % len(nodes))
983 if repo.ui.debugflag:
984 if repo.ui.debugflag:
984 repo.ui.debug("list of changesets:\n")
985 repo.ui.debug("list of changesets:\n")
985 for node in nodes:
986 for node in nodes:
986 repo.ui.debug("%s\n" % hex(node))
987 repo.ui.debug("%s\n" % hex(node))
987
988
988 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
989 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
989 repo = repo.unfiltered()
990 repo = repo.unfiltered()
990 commonrevs = outgoing.common
991 commonrevs = outgoing.common
991 csets = outgoing.missing
992 csets = outgoing.missing
992 heads = outgoing.missingheads
993 heads = outgoing.missingheads
993 # We go through the fast path if we get told to, or if all (unfiltered
994 # We go through the fast path if we get told to, or if all (unfiltered
994 # heads have been requested (since we then know there all linkrevs will
995 # heads have been requested (since we then know there all linkrevs will
995 # be pulled by the client).
996 # be pulled by the client).
996 heads.sort()
997 heads.sort()
997 fastpathlinkrev = fastpath or (
998 fastpathlinkrev = fastpath or (
998 repo.filtername is None and heads == sorted(repo.heads()))
999 repo.filtername is None and heads == sorted(repo.heads()))
999
1000
1000 repo.hook('preoutgoing', throw=True, source=source)
1001 repo.hook('preoutgoing', throw=True, source=source)
1001 _changegroupinfo(repo, csets, source)
1002 _changegroupinfo(repo, csets, source)
1002 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1003 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1003
1004
1004 def getsubset(repo, outgoing, bundler, source, fastpath=False):
1005 def getsubset(repo, outgoing, bundler, source, fastpath=False):
1005 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
1006 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
1006 return getunbundler(bundler.version, util.chunkbuffer(gengroup), None)
1007 return getunbundler(bundler.version, util.chunkbuffer(gengroup), None)
1007
1008
1008 def changegroupsubset(repo, roots, heads, source, version='01'):
1009 def changegroupsubset(repo, roots, heads, source, version='01'):
1009 """Compute a changegroup consisting of all the nodes that are
1010 """Compute a changegroup consisting of all the nodes that are
1010 descendants of any of the roots and ancestors of any of the heads.
1011 descendants of any of the roots and ancestors of any of the heads.
1011 Return a chunkbuffer object whose read() method will return
1012 Return a chunkbuffer object whose read() method will return
1012 successive changegroup chunks.
1013 successive changegroup chunks.
1013
1014
1014 It is fairly complex as determining which filenodes and which
1015 It is fairly complex as determining which filenodes and which
1015 manifest nodes need to be included for the changeset to be complete
1016 manifest nodes need to be included for the changeset to be complete
1016 is non-trivial.
1017 is non-trivial.
1017
1018
1018 Another wrinkle is doing the reverse, figuring out which changeset in
1019 Another wrinkle is doing the reverse, figuring out which changeset in
1019 the changegroup a particular filenode or manifestnode belongs to.
1020 the changegroup a particular filenode or manifestnode belongs to.
1020 """
1021 """
1021 cl = repo.changelog
1022 cl = repo.changelog
1022 if not roots:
1023 if not roots:
1023 roots = [nullid]
1024 roots = [nullid]
1024 discbases = []
1025 discbases = []
1025 for n in roots:
1026 for n in roots:
1026 discbases.extend([p for p in cl.parents(n) if p != nullid])
1027 discbases.extend([p for p in cl.parents(n) if p != nullid])
1027 # TODO: remove call to nodesbetween.
1028 # TODO: remove call to nodesbetween.
1028 csets, roots, heads = cl.nodesbetween(roots, heads)
1029 csets, roots, heads = cl.nodesbetween(roots, heads)
1029 included = set(csets)
1030 included = set(csets)
1030 discbases = [n for n in discbases if n not in included]
1031 discbases = [n for n in discbases if n not in included]
1031 outgoing = discovery.outgoing(cl, discbases, heads)
1032 outgoing = discovery.outgoing(cl, discbases, heads)
1032 bundler = getbundler(version, repo)
1033 bundler = getbundler(version, repo)
1033 return getsubset(repo, outgoing, bundler, source)
1034 return getsubset(repo, outgoing, bundler, source)
1034
1035
1035 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
1036 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
1036 version='01'):
1037 version='01'):
1037 """Like getbundle, but taking a discovery.outgoing as an argument.
1038 """Like getbundle, but taking a discovery.outgoing as an argument.
1038
1039
1039 This is only implemented for local repos and reuses potentially
1040 This is only implemented for local repos and reuses potentially
1040 precomputed sets in outgoing. Returns a raw changegroup generator."""
1041 precomputed sets in outgoing. Returns a raw changegroup generator."""
1041 if not outgoing.missing:
1042 if not outgoing.missing:
1042 return None
1043 return None
1043 bundler = getbundler(version, repo, bundlecaps)
1044 bundler = getbundler(version, repo, bundlecaps)
1044 return getsubsetraw(repo, outgoing, bundler, source)
1045 return getsubsetraw(repo, outgoing, bundler, source)
1045
1046
1046 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None,
1047 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None,
1047 version='01'):
1048 version='01'):
1048 """Like getbundle, but taking a discovery.outgoing as an argument.
1049 """Like getbundle, but taking a discovery.outgoing as an argument.
1049
1050
1050 This is only implemented for local repos and reuses potentially
1051 This is only implemented for local repos and reuses potentially
1051 precomputed sets in outgoing."""
1052 precomputed sets in outgoing."""
1052 if not outgoing.missing:
1053 if not outgoing.missing:
1053 return None
1054 return None
1054 bundler = getbundler(version, repo, bundlecaps)
1055 bundler = getbundler(version, repo, bundlecaps)
1055 return getsubset(repo, outgoing, bundler, source)
1056 return getsubset(repo, outgoing, bundler, source)
1056
1057
1057 def computeoutgoing(repo, heads, common):
1058 def computeoutgoing(repo, heads, common):
1058 """Computes which revs are outgoing given a set of common
1059 """Computes which revs are outgoing given a set of common
1059 and a set of heads.
1060 and a set of heads.
1060
1061
1061 This is a separate function so extensions can have access to
1062 This is a separate function so extensions can have access to
1062 the logic.
1063 the logic.
1063
1064
1064 Returns a discovery.outgoing object.
1065 Returns a discovery.outgoing object.
1065 """
1066 """
1066 cl = repo.changelog
1067 cl = repo.changelog
1067 if common:
1068 if common:
1068 hasnode = cl.hasnode
1069 hasnode = cl.hasnode
1069 common = [n for n in common if hasnode(n)]
1070 common = [n for n in common if hasnode(n)]
1070 else:
1071 else:
1071 common = [nullid]
1072 common = [nullid]
1072 if not heads:
1073 if not heads:
1073 heads = cl.heads()
1074 heads = cl.heads()
1074 return discovery.outgoing(cl, common, heads)
1075 return discovery.outgoing(cl, common, heads)
1075
1076
1076 def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None,
1077 def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None,
1077 version='01'):
1078 version='01'):
1078 """Like changegroupsubset, but returns the set difference between the
1079 """Like changegroupsubset, but returns the set difference between the
1079 ancestors of heads and the ancestors common.
1080 ancestors of heads and the ancestors common.
1080
1081
1081 If heads is None, use the local heads. If common is None, use [nullid].
1082 If heads is None, use the local heads. If common is None, use [nullid].
1082
1083
1083 The nodes in common might not all be known locally due to the way the
1084 The nodes in common might not all be known locally due to the way the
1084 current discovery protocol works.
1085 current discovery protocol works.
1085 """
1086 """
1086 outgoing = computeoutgoing(repo, heads, common)
1087 outgoing = computeoutgoing(repo, heads, common)
1087 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps,
1088 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps,
1088 version=version)
1089 version=version)
1089
1090
1090 def changegroup(repo, basenodes, source):
1091 def changegroup(repo, basenodes, source):
1091 # to avoid a race we use changegroupsubset() (issue1320)
1092 # to avoid a race we use changegroupsubset() (issue1320)
1092 return changegroupsubset(repo, basenodes, repo.heads(), source)
1093 return changegroupsubset(repo, basenodes, repo.heads(), source)
1093
1094
1094 def _addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
1095 def _addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
1095 revisions = 0
1096 revisions = 0
1096 files = 0
1097 files = 0
1097 while True:
1098 while True:
1098 chunkdata = source.filelogheader()
1099 chunkdata = source.filelogheader()
1099 if not chunkdata:
1100 if not chunkdata:
1100 break
1101 break
1101 f = chunkdata["filename"]
1102 f = chunkdata["filename"]
1102 repo.ui.debug("adding %s revisions\n" % f)
1103 repo.ui.debug("adding %s revisions\n" % f)
1103 pr()
1104 pr()
1104 fl = repo.file(f)
1105 fl = repo.file(f)
1105 o = len(fl)
1106 o = len(fl)
1106 try:
1107 try:
1107 if not fl.addgroup(source, revmap, trp):
1108 if not fl.addgroup(source, revmap, trp):
1108 raise error.Abort(_("received file revlog group is empty"))
1109 raise error.Abort(_("received file revlog group is empty"))
1109 except error.CensoredBaseError as e:
1110 except error.CensoredBaseError as e:
1110 raise error.Abort(_("received delta base is censored: %s") % e)
1111 raise error.Abort(_("received delta base is censored: %s") % e)
1111 revisions += len(fl) - o
1112 revisions += len(fl) - o
1112 files += 1
1113 files += 1
1113 if f in needfiles:
1114 if f in needfiles:
1114 needs = needfiles[f]
1115 needs = needfiles[f]
1115 for new in xrange(o, len(fl)):
1116 for new in xrange(o, len(fl)):
1116 n = fl.node(new)
1117 n = fl.node(new)
1117 if n in needs:
1118 if n in needs:
1118 needs.remove(n)
1119 needs.remove(n)
1119 else:
1120 else:
1120 raise error.Abort(
1121 raise error.Abort(
1121 _("received spurious file revlog entry"))
1122 _("received spurious file revlog entry"))
1122 if not needs:
1123 if not needs:
1123 del needfiles[f]
1124 del needfiles[f]
1124 repo.ui.progress(_('files'), None)
1125 repo.ui.progress(_('files'), None)
1125
1126
1126 for f, needs in needfiles.iteritems():
1127 for f, needs in needfiles.iteritems():
1127 fl = repo.file(f)
1128 fl = repo.file(f)
1128 for n in needs:
1129 for n in needs:
1129 try:
1130 try:
1130 fl.rev(n)
1131 fl.rev(n)
1131 except error.LookupError:
1132 except error.LookupError:
1132 raise error.Abort(
1133 raise error.Abort(
1133 _('missing file data for %s:%s - run hg verify') %
1134 _('missing file data for %s:%s - run hg verify') %
1134 (f, hex(n)))
1135 (f, hex(n)))
1135
1136
1136 return revisions, files
1137 return revisions, files
@@ -1,455 +1,471 b''
1 $ cat << EOF >> $HGRCPATH
1 $ cat << EOF >> $HGRCPATH
2 > [format]
2 > [format]
3 > usegeneraldelta=yes
3 > usegeneraldelta=yes
4 > EOF
4 > EOF
5
5
6 Set up repo
6 Set up repo
7
7
8 $ hg --config experimental.treemanifest=True init repo
8 $ hg --config experimental.treemanifest=True init repo
9 $ cd repo
9 $ cd repo
10
10
11 Requirements get set on init
11 Requirements get set on init
12
12
13 $ grep treemanifest .hg/requires
13 $ grep treemanifest .hg/requires
14 treemanifest
14 treemanifest
15
15
16 Without directories, looks like any other repo
16 Without directories, looks like any other repo
17
17
18 $ echo 0 > a
18 $ echo 0 > a
19 $ echo 0 > b
19 $ echo 0 > b
20 $ hg ci -Aqm initial
20 $ hg ci -Aqm initial
21 $ hg debugdata -m 0
21 $ hg debugdata -m 0
22 a\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
22 a\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
23 b\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
23 b\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
24
24
25 Submanifest is stored in separate revlog
25 Submanifest is stored in separate revlog
26
26
27 $ mkdir dir1
27 $ mkdir dir1
28 $ echo 1 > dir1/a
28 $ echo 1 > dir1/a
29 $ echo 1 > dir1/b
29 $ echo 1 > dir1/b
30 $ echo 1 > e
30 $ echo 1 > e
31 $ hg ci -Aqm 'add dir1'
31 $ hg ci -Aqm 'add dir1'
32 $ hg debugdata -m 1
32 $ hg debugdata -m 1
33 a\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
33 a\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
34 b\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
34 b\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
35 dir1\x008b3ffd73f901e83304c83d33132c8e774ceac44et (esc)
35 dir1\x008b3ffd73f901e83304c83d33132c8e774ceac44et (esc)
36 e\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc)
36 e\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc)
37 $ hg debugdata --dir dir1 0
37 $ hg debugdata --dir dir1 0
38 a\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc)
38 a\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc)
39 b\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc)
39 b\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc)
40
40
41 Can add nested directories
41 Can add nested directories
42
42
43 $ mkdir dir1/dir1
43 $ mkdir dir1/dir1
44 $ echo 2 > dir1/dir1/a
44 $ echo 2 > dir1/dir1/a
45 $ echo 2 > dir1/dir1/b
45 $ echo 2 > dir1/dir1/b
46 $ mkdir dir1/dir2
46 $ mkdir dir1/dir2
47 $ echo 2 > dir1/dir2/a
47 $ echo 2 > dir1/dir2/a
48 $ echo 2 > dir1/dir2/b
48 $ echo 2 > dir1/dir2/b
49 $ hg ci -Aqm 'add dir1/dir1'
49 $ hg ci -Aqm 'add dir1/dir1'
50 $ hg files -r .
50 $ hg files -r .
51 a
51 a
52 b
52 b
53 dir1/a (glob)
53 dir1/a (glob)
54 dir1/b (glob)
54 dir1/b (glob)
55 dir1/dir1/a (glob)
55 dir1/dir1/a (glob)
56 dir1/dir1/b (glob)
56 dir1/dir1/b (glob)
57 dir1/dir2/a (glob)
57 dir1/dir2/a (glob)
58 dir1/dir2/b (glob)
58 dir1/dir2/b (glob)
59 e
59 e
60
60
61 Revision is not created for unchanged directory
61 Revision is not created for unchanged directory
62
62
63 $ mkdir dir2
63 $ mkdir dir2
64 $ echo 3 > dir2/a
64 $ echo 3 > dir2/a
65 $ hg add dir2
65 $ hg add dir2
66 adding dir2/a (glob)
66 adding dir2/a (glob)
67 $ hg debugindex --dir dir1 > before
67 $ hg debugindex --dir dir1 > before
68 $ hg ci -qm 'add dir2'
68 $ hg ci -qm 'add dir2'
69 $ hg debugindex --dir dir1 > after
69 $ hg debugindex --dir dir1 > after
70 $ diff before after
70 $ diff before after
71 $ rm before after
71 $ rm before after
72
72
73 Removing directory does not create an revlog entry
73 Removing directory does not create an revlog entry
74
74
75 $ hg rm dir1/dir1
75 $ hg rm dir1/dir1
76 removing dir1/dir1/a (glob)
76 removing dir1/dir1/a (glob)
77 removing dir1/dir1/b (glob)
77 removing dir1/dir1/b (glob)
78 $ hg debugindex --dir dir1/dir1 > before
78 $ hg debugindex --dir dir1/dir1 > before
79 $ hg ci -qm 'remove dir1/dir1'
79 $ hg ci -qm 'remove dir1/dir1'
80 $ hg debugindex --dir dir1/dir1 > after
80 $ hg debugindex --dir dir1/dir1 > after
81 $ diff before after
81 $ diff before after
82 $ rm before after
82 $ rm before after
83
83
84 Check that hg files (calls treemanifest.walk()) works
84 Check that hg files (calls treemanifest.walk()) works
85 without loading all directory revlogs
85 without loading all directory revlogs
86
86
87 $ hg co 'desc("add dir2")'
87 $ hg co 'desc("add dir2")'
88 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
88 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
89 $ mv .hg/store/meta/dir2 .hg/store/meta/dir2-backup
89 $ mv .hg/store/meta/dir2 .hg/store/meta/dir2-backup
90 $ hg files -r . dir1
90 $ hg files -r . dir1
91 dir1/a (glob)
91 dir1/a (glob)
92 dir1/b (glob)
92 dir1/b (glob)
93 dir1/dir1/a (glob)
93 dir1/dir1/a (glob)
94 dir1/dir1/b (glob)
94 dir1/dir1/b (glob)
95 dir1/dir2/a (glob)
95 dir1/dir2/a (glob)
96 dir1/dir2/b (glob)
96 dir1/dir2/b (glob)
97
97
98 Check that status between revisions works (calls treemanifest.matches())
98 Check that status between revisions works (calls treemanifest.matches())
99 without loading all directory revlogs
99 without loading all directory revlogs
100
100
101 $ hg status --rev 'desc("add dir1")' --rev . dir1
101 $ hg status --rev 'desc("add dir1")' --rev . dir1
102 A dir1/dir1/a
102 A dir1/dir1/a
103 A dir1/dir1/b
103 A dir1/dir1/b
104 A dir1/dir2/a
104 A dir1/dir2/a
105 A dir1/dir2/b
105 A dir1/dir2/b
106 $ mv .hg/store/meta/dir2-backup .hg/store/meta/dir2
106 $ mv .hg/store/meta/dir2-backup .hg/store/meta/dir2
107
107
108 Merge creates 2-parent revision of directory revlog
108 Merge creates 2-parent revision of directory revlog
109
109
110 $ echo 5 > dir1/a
110 $ echo 5 > dir1/a
111 $ hg ci -Aqm 'modify dir1/a'
111 $ hg ci -Aqm 'modify dir1/a'
112 $ hg co '.^'
112 $ hg co '.^'
113 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
113 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
114 $ echo 6 > dir1/b
114 $ echo 6 > dir1/b
115 $ hg ci -Aqm 'modify dir1/b'
115 $ hg ci -Aqm 'modify dir1/b'
116 $ hg merge 'desc("modify dir1/a")'
116 $ hg merge 'desc("modify dir1/a")'
117 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
117 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
118 (branch merge, don't forget to commit)
118 (branch merge, don't forget to commit)
119 $ hg ci -m 'conflict-free merge involving dir1/'
119 $ hg ci -m 'conflict-free merge involving dir1/'
120 $ cat dir1/a
120 $ cat dir1/a
121 5
121 5
122 $ cat dir1/b
122 $ cat dir1/b
123 6
123 6
124 $ hg debugindex --dir dir1
124 $ hg debugindex --dir dir1
125 rev offset length delta linkrev nodeid p1 p2
125 rev offset length delta linkrev nodeid p1 p2
126 0 0 54 -1 1 8b3ffd73f901 000000000000 000000000000
126 0 0 54 -1 1 8b3ffd73f901 000000000000 000000000000
127 1 54 68 0 2 68e9d057c5a8 8b3ffd73f901 000000000000
127 1 54 68 0 2 68e9d057c5a8 8b3ffd73f901 000000000000
128 2 122 12 1 4 4698198d2624 68e9d057c5a8 000000000000
128 2 122 12 1 4 4698198d2624 68e9d057c5a8 000000000000
129 3 134 55 1 5 44844058ccce 68e9d057c5a8 000000000000
129 3 134 55 1 5 44844058ccce 68e9d057c5a8 000000000000
130 4 189 55 1 6 bf3d9b744927 68e9d057c5a8 000000000000
130 4 189 55 1 6 bf3d9b744927 68e9d057c5a8 000000000000
131 5 244 55 4 7 dde7c0af2a03 bf3d9b744927 44844058ccce
131 5 244 55 4 7 dde7c0af2a03 bf3d9b744927 44844058ccce
132
132
133 Merge keeping directory from parent 1 does not create revlog entry. (Note that
133 Merge keeping directory from parent 1 does not create revlog entry. (Note that
134 dir1's manifest does change, but only because dir1/a's filelog changes.)
134 dir1's manifest does change, but only because dir1/a's filelog changes.)
135
135
136 $ hg co 'desc("add dir2")'
136 $ hg co 'desc("add dir2")'
137 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
137 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
138 $ echo 8 > dir2/a
138 $ echo 8 > dir2/a
139 $ hg ci -m 'modify dir2/a'
139 $ hg ci -m 'modify dir2/a'
140 created new head
140 created new head
141
141
142 $ hg debugindex --dir dir2 > before
142 $ hg debugindex --dir dir2 > before
143 $ hg merge 'desc("modify dir1/a")'
143 $ hg merge 'desc("modify dir1/a")'
144 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
144 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
145 (branch merge, don't forget to commit)
145 (branch merge, don't forget to commit)
146 $ hg revert -r 'desc("modify dir2/a")' .
146 $ hg revert -r 'desc("modify dir2/a")' .
147 reverting dir1/a (glob)
147 reverting dir1/a (glob)
148 $ hg ci -m 'merge, keeping parent 1'
148 $ hg ci -m 'merge, keeping parent 1'
149 $ hg debugindex --dir dir2 > after
149 $ hg debugindex --dir dir2 > after
150 $ diff before after
150 $ diff before after
151 $ rm before after
151 $ rm before after
152
152
153 Merge keeping directory from parent 2 does not create revlog entry. (Note that
153 Merge keeping directory from parent 2 does not create revlog entry. (Note that
154 dir2's manifest does change, but only because dir2/a's filelog changes.)
154 dir2's manifest does change, but only because dir2/a's filelog changes.)
155
155
156 $ hg co 'desc("modify dir2/a")'
156 $ hg co 'desc("modify dir2/a")'
157 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
157 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
158 $ hg debugindex --dir dir1 > before
158 $ hg debugindex --dir dir1 > before
159 $ hg merge 'desc("modify dir1/a")'
159 $ hg merge 'desc("modify dir1/a")'
160 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
160 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
161 (branch merge, don't forget to commit)
161 (branch merge, don't forget to commit)
162 $ hg revert -r 'desc("modify dir1/a")' .
162 $ hg revert -r 'desc("modify dir1/a")' .
163 reverting dir2/a (glob)
163 reverting dir2/a (glob)
164 $ hg ci -m 'merge, keeping parent 2'
164 $ hg ci -m 'merge, keeping parent 2'
165 created new head
165 created new head
166 $ hg debugindex --dir dir1 > after
166 $ hg debugindex --dir dir1 > after
167 $ diff before after
167 $ diff before after
168 $ rm before after
168 $ rm before after
169
169
170 Create flat source repo for tests with mixed flat/tree manifests
170 Create flat source repo for tests with mixed flat/tree manifests
171
171
172 $ cd ..
172 $ cd ..
173 $ hg init repo-flat
173 $ hg init repo-flat
174 $ cd repo-flat
174 $ cd repo-flat
175
175
176 Create a few commits with flat manifest
176 Create a few commits with flat manifest
177
177
178 $ echo 0 > a
178 $ echo 0 > a
179 $ echo 0 > b
179 $ echo 0 > b
180 $ echo 0 > e
180 $ echo 0 > e
181 $ for d in dir1 dir1/dir1 dir1/dir2 dir2
181 $ for d in dir1 dir1/dir1 dir1/dir2 dir2
182 > do
182 > do
183 > mkdir $d
183 > mkdir $d
184 > echo 0 > $d/a
184 > echo 0 > $d/a
185 > echo 0 > $d/b
185 > echo 0 > $d/b
186 > done
186 > done
187 $ hg ci -Aqm initial
187 $ hg ci -Aqm initial
188
188
189 $ echo 1 > a
189 $ echo 1 > a
190 $ echo 1 > dir1/a
190 $ echo 1 > dir1/a
191 $ echo 1 > dir1/dir1/a
191 $ echo 1 > dir1/dir1/a
192 $ hg ci -Aqm 'modify on branch 1'
192 $ hg ci -Aqm 'modify on branch 1'
193
193
194 $ hg co 0
194 $ hg co 0
195 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
195 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
196 $ echo 2 > b
196 $ echo 2 > b
197 $ echo 2 > dir1/b
197 $ echo 2 > dir1/b
198 $ echo 2 > dir1/dir1/b
198 $ echo 2 > dir1/dir1/b
199 $ hg ci -Aqm 'modify on branch 2'
199 $ hg ci -Aqm 'modify on branch 2'
200
200
201 $ hg merge 1
201 $ hg merge 1
202 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
202 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
203 (branch merge, don't forget to commit)
203 (branch merge, don't forget to commit)
204 $ hg ci -m 'merge of flat manifests to new flat manifest'
204 $ hg ci -m 'merge of flat manifests to new flat manifest'
205
205
206 Create clone with tree manifests enabled
206 Create clone with tree manifests enabled
207
207
208 $ cd ..
208 $ cd ..
209 $ hg clone --pull --config experimental.treemanifest=1 repo-flat repo-mixed
209 $ hg clone --pull --config experimental.treemanifest=1 repo-flat repo-mixed
210 requesting all changes
210 requesting all changes
211 adding changesets
211 adding changesets
212 adding manifests
212 adding manifests
213 adding file changes
213 adding file changes
214 added 4 changesets with 17 changes to 11 files
214 added 4 changesets with 17 changes to 11 files
215 updating to branch default
215 updating to branch default
216 11 files updated, 0 files merged, 0 files removed, 0 files unresolved
216 11 files updated, 0 files merged, 0 files removed, 0 files unresolved
217 $ cd repo-mixed
217 $ cd repo-mixed
218 $ test -f .hg/store/meta
218 $ test -f .hg/store/meta
219 [1]
219 [1]
220 $ grep treemanifest .hg/requires
220 $ grep treemanifest .hg/requires
221 treemanifest
221 treemanifest
222
222
223 Commit should store revlog per directory
223 Commit should store revlog per directory
224
224
225 $ hg co 1
225 $ hg co 1
226 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
226 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
227 $ echo 3 > a
227 $ echo 3 > a
228 $ echo 3 > dir1/a
228 $ echo 3 > dir1/a
229 $ echo 3 > dir1/dir1/a
229 $ echo 3 > dir1/dir1/a
230 $ hg ci -m 'first tree'
230 $ hg ci -m 'first tree'
231 created new head
231 created new head
232 $ find .hg/store/meta | sort
232 $ find .hg/store/meta | sort
233 .hg/store/meta
233 .hg/store/meta
234 .hg/store/meta/dir1
234 .hg/store/meta/dir1
235 .hg/store/meta/dir1/00manifest.i
235 .hg/store/meta/dir1/00manifest.i
236 .hg/store/meta/dir1/dir1
236 .hg/store/meta/dir1/dir1
237 .hg/store/meta/dir1/dir1/00manifest.i
237 .hg/store/meta/dir1/dir1/00manifest.i
238 .hg/store/meta/dir1/dir2
238 .hg/store/meta/dir1/dir2
239 .hg/store/meta/dir1/dir2/00manifest.i
239 .hg/store/meta/dir1/dir2/00manifest.i
240 .hg/store/meta/dir2
240 .hg/store/meta/dir2
241 .hg/store/meta/dir2/00manifest.i
241 .hg/store/meta/dir2/00manifest.i
242
242
243 Merge of two trees
243 Merge of two trees
244
244
245 $ hg co 2
245 $ hg co 2
246 6 files updated, 0 files merged, 0 files removed, 0 files unresolved
246 6 files updated, 0 files merged, 0 files removed, 0 files unresolved
247 $ hg merge 1
247 $ hg merge 1
248 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
248 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
249 (branch merge, don't forget to commit)
249 (branch merge, don't forget to commit)
250 $ hg ci -m 'merge of flat manifests to new tree manifest'
250 $ hg ci -m 'merge of flat manifests to new tree manifest'
251 created new head
251 created new head
252 $ hg diff -r 3
252 $ hg diff -r 3
253
253
254 Parent of tree root manifest should be flat manifest, and two for merge
254 Parent of tree root manifest should be flat manifest, and two for merge
255
255
256 $ hg debugindex -m
256 $ hg debugindex -m
257 rev offset length delta linkrev nodeid p1 p2
257 rev offset length delta linkrev nodeid p1 p2
258 0 0 80 -1 0 40536115ed9e 000000000000 000000000000
258 0 0 80 -1 0 40536115ed9e 000000000000 000000000000
259 1 80 83 0 1 f3376063c255 40536115ed9e 000000000000
259 1 80 83 0 1 f3376063c255 40536115ed9e 000000000000
260 2 163 89 0 2 5d9b9da231a2 40536115ed9e 000000000000
260 2 163 89 0 2 5d9b9da231a2 40536115ed9e 000000000000
261 3 252 83 2 3 d17d663cbd8a 5d9b9da231a2 f3376063c255
261 3 252 83 2 3 d17d663cbd8a 5d9b9da231a2 f3376063c255
262 4 335 124 1 4 51e32a8c60ee f3376063c255 000000000000
262 4 335 124 1 4 51e32a8c60ee f3376063c255 000000000000
263 5 459 126 2 5 cc5baa78b230 5d9b9da231a2 f3376063c255
263 5 459 126 2 5 cc5baa78b230 5d9b9da231a2 f3376063c255
264
264
265
265
266 Status across flat/tree boundary should work
266 Status across flat/tree boundary should work
267
267
268 $ hg status --rev '.^' --rev .
268 $ hg status --rev '.^' --rev .
269 M a
269 M a
270 M dir1/a
270 M dir1/a
271 M dir1/dir1/a
271 M dir1/dir1/a
272
272
273
273
274 Turning off treemanifest config has no effect
274 Turning off treemanifest config has no effect
275
275
276 $ hg debugindex .hg/store/meta/dir1/00manifest.i
276 $ hg debugindex .hg/store/meta/dir1/00manifest.i
277 rev offset length delta linkrev nodeid p1 p2
277 rev offset length delta linkrev nodeid p1 p2
278 0 0 127 -1 4 064927a0648a 000000000000 000000000000
278 0 0 127 -1 4 064927a0648a 000000000000 000000000000
279 1 127 111 0 5 25ecb8cb8618 000000000000 000000000000
279 1 127 111 0 5 25ecb8cb8618 000000000000 000000000000
280 $ echo 2 > dir1/a
280 $ echo 2 > dir1/a
281 $ hg --config experimental.treemanifest=False ci -qm 'modify dir1/a'
281 $ hg --config experimental.treemanifest=False ci -qm 'modify dir1/a'
282 $ hg debugindex .hg/store/meta/dir1/00manifest.i
282 $ hg debugindex .hg/store/meta/dir1/00manifest.i
283 rev offset length delta linkrev nodeid p1 p2
283 rev offset length delta linkrev nodeid p1 p2
284 0 0 127 -1 4 064927a0648a 000000000000 000000000000
284 0 0 127 -1 4 064927a0648a 000000000000 000000000000
285 1 127 111 0 5 25ecb8cb8618 000000000000 000000000000
285 1 127 111 0 5 25ecb8cb8618 000000000000 000000000000
286 2 238 55 1 6 5b16163a30c6 25ecb8cb8618 000000000000
286 2 238 55 1 6 5b16163a30c6 25ecb8cb8618 000000000000
287
287
288 Stripping and recovering changes should work
288 Stripping and recovering changes should work
289
289
290 $ hg st --change tip
290 $ hg st --change tip
291 M dir1/a
291 M dir1/a
292 $ hg --config extensions.strip= strip tip
292 $ hg --config extensions.strip= strip tip
293 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
293 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
294 saved backup bundle to $TESTTMP/repo-mixed/.hg/strip-backup/51cfd7b1e13b-78a2f3ed-backup.hg (glob)
294 saved backup bundle to $TESTTMP/repo-mixed/.hg/strip-backup/51cfd7b1e13b-78a2f3ed-backup.hg (glob)
295 $ hg unbundle -q .hg/strip-backup/*
295 $ hg unbundle -q .hg/strip-backup/*
296 $ hg st --change tip
296 $ hg st --change tip
297 M dir1/a
297 M dir1/a
298
298
299 Shelving and unshelving should work
300
301 $ echo foo >> dir1/a
302 $ hg --config extensions.shelve= shelve
303 shelved as default
304 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
305 $ hg --config extensions.shelve= unshelve
306 unshelving change 'default'
307 $ hg diff --nodates
308 diff -r 708a273da119 dir1/a
309 --- a/dir1/a
310 +++ b/dir1/a
311 @@ -1,1 +1,2 @@
312 1
313 +foo
314
299 Create deeper repo with tree manifests.
315 Create deeper repo with tree manifests.
300
316
301 $ cd ..
317 $ cd ..
302 $ hg --config experimental.treemanifest=True init deeprepo
318 $ hg --config experimental.treemanifest=True init deeprepo
303 $ cd deeprepo
319 $ cd deeprepo
304
320
305 $ mkdir a
321 $ mkdir a
306 $ mkdir b
322 $ mkdir b
307 $ mkdir b/bar
323 $ mkdir b/bar
308 $ mkdir b/bar/orange
324 $ mkdir b/bar/orange
309 $ mkdir b/bar/orange/fly
325 $ mkdir b/bar/orange/fly
310 $ mkdir b/foo
326 $ mkdir b/foo
311 $ mkdir b/foo/apple
327 $ mkdir b/foo/apple
312 $ mkdir b/foo/apple/bees
328 $ mkdir b/foo/apple/bees
313
329
314 $ touch a/one.txt
330 $ touch a/one.txt
315 $ touch a/two.txt
331 $ touch a/two.txt
316 $ touch b/bar/fruits.txt
332 $ touch b/bar/fruits.txt
317 $ touch b/bar/orange/fly/gnat.py
333 $ touch b/bar/orange/fly/gnat.py
318 $ touch b/bar/orange/fly/housefly.txt
334 $ touch b/bar/orange/fly/housefly.txt
319 $ touch b/foo/apple/bees/flower.py
335 $ touch b/foo/apple/bees/flower.py
320 $ touch c.txt
336 $ touch c.txt
321 $ touch d.py
337 $ touch d.py
322
338
323 $ hg ci -Aqm 'initial'
339 $ hg ci -Aqm 'initial'
324
340
325 We'll see that visitdir works by removing some treemanifest revlogs and running
341 We'll see that visitdir works by removing some treemanifest revlogs and running
326 the files command with various parameters.
342 the files command with various parameters.
327
343
328 Test files from the root.
344 Test files from the root.
329
345
330 $ hg files -r .
346 $ hg files -r .
331 a/one.txt (glob)
347 a/one.txt (glob)
332 a/two.txt (glob)
348 a/two.txt (glob)
333 b/bar/fruits.txt (glob)
349 b/bar/fruits.txt (glob)
334 b/bar/orange/fly/gnat.py (glob)
350 b/bar/orange/fly/gnat.py (glob)
335 b/bar/orange/fly/housefly.txt (glob)
351 b/bar/orange/fly/housefly.txt (glob)
336 b/foo/apple/bees/flower.py (glob)
352 b/foo/apple/bees/flower.py (glob)
337 c.txt
353 c.txt
338 d.py
354 d.py
339
355
340 Excludes with a glob should not exclude everything from the glob's root
356 Excludes with a glob should not exclude everything from the glob's root
341
357
342 $ hg files -r . -X 'b/fo?' b
358 $ hg files -r . -X 'b/fo?' b
343 b/bar/fruits.txt (glob)
359 b/bar/fruits.txt (glob)
344 b/bar/orange/fly/gnat.py (glob)
360 b/bar/orange/fly/gnat.py (glob)
345 b/bar/orange/fly/housefly.txt (glob)
361 b/bar/orange/fly/housefly.txt (glob)
346
362
347 Test files for a subdirectory.
363 Test files for a subdirectory.
348
364
349 $ mv .hg/store/meta/a oldmf
365 $ mv .hg/store/meta/a oldmf
350 $ hg files -r . b
366 $ hg files -r . b
351 b/bar/fruits.txt (glob)
367 b/bar/fruits.txt (glob)
352 b/bar/orange/fly/gnat.py (glob)
368 b/bar/orange/fly/gnat.py (glob)
353 b/bar/orange/fly/housefly.txt (glob)
369 b/bar/orange/fly/housefly.txt (glob)
354 b/foo/apple/bees/flower.py (glob)
370 b/foo/apple/bees/flower.py (glob)
355 $ mv oldmf .hg/store/meta/a
371 $ mv oldmf .hg/store/meta/a
356
372
357 Test files with just includes and excludes.
373 Test files with just includes and excludes.
358
374
359 $ mv .hg/store/meta/a oldmf
375 $ mv .hg/store/meta/a oldmf
360 $ mv .hg/store/meta/b/bar/orange/fly oldmf2
376 $ mv .hg/store/meta/b/bar/orange/fly oldmf2
361 $ mv .hg/store/meta/b/foo/apple/bees oldmf3
377 $ mv .hg/store/meta/b/foo/apple/bees oldmf3
362 $ hg files -r . -I path:b/bar -X path:b/bar/orange/fly -I path:b/foo -X path:b/foo/apple/bees
378 $ hg files -r . -I path:b/bar -X path:b/bar/orange/fly -I path:b/foo -X path:b/foo/apple/bees
363 b/bar/fruits.txt (glob)
379 b/bar/fruits.txt (glob)
364 $ mv oldmf .hg/store/meta/a
380 $ mv oldmf .hg/store/meta/a
365 $ mv oldmf2 .hg/store/meta/b/bar/orange/fly
381 $ mv oldmf2 .hg/store/meta/b/bar/orange/fly
366 $ mv oldmf3 .hg/store/meta/b/foo/apple/bees
382 $ mv oldmf3 .hg/store/meta/b/foo/apple/bees
367
383
368 Test files for a subdirectory, excluding a directory within it.
384 Test files for a subdirectory, excluding a directory within it.
369
385
370 $ mv .hg/store/meta/a oldmf
386 $ mv .hg/store/meta/a oldmf
371 $ mv .hg/store/meta/b/foo oldmf2
387 $ mv .hg/store/meta/b/foo oldmf2
372 $ hg files -r . -X path:b/foo b
388 $ hg files -r . -X path:b/foo b
373 b/bar/fruits.txt (glob)
389 b/bar/fruits.txt (glob)
374 b/bar/orange/fly/gnat.py (glob)
390 b/bar/orange/fly/gnat.py (glob)
375 b/bar/orange/fly/housefly.txt (glob)
391 b/bar/orange/fly/housefly.txt (glob)
376 $ mv oldmf .hg/store/meta/a
392 $ mv oldmf .hg/store/meta/a
377 $ mv oldmf2 .hg/store/meta/b/foo
393 $ mv oldmf2 .hg/store/meta/b/foo
378
394
379 Test files for a sub directory, including only a directory within it, and
395 Test files for a sub directory, including only a directory within it, and
380 including an unrelated directory.
396 including an unrelated directory.
381
397
382 $ mv .hg/store/meta/a oldmf
398 $ mv .hg/store/meta/a oldmf
383 $ mv .hg/store/meta/b/foo oldmf2
399 $ mv .hg/store/meta/b/foo oldmf2
384 $ hg files -r . -I path:b/bar/orange -I path:a b
400 $ hg files -r . -I path:b/bar/orange -I path:a b
385 b/bar/orange/fly/gnat.py (glob)
401 b/bar/orange/fly/gnat.py (glob)
386 b/bar/orange/fly/housefly.txt (glob)
402 b/bar/orange/fly/housefly.txt (glob)
387 $ mv oldmf .hg/store/meta/a
403 $ mv oldmf .hg/store/meta/a
388 $ mv oldmf2 .hg/store/meta/b/foo
404 $ mv oldmf2 .hg/store/meta/b/foo
389
405
390 Test files for a pattern, including a directory, and excluding a directory
406 Test files for a pattern, including a directory, and excluding a directory
391 within that.
407 within that.
392
408
393 $ mv .hg/store/meta/a oldmf
409 $ mv .hg/store/meta/a oldmf
394 $ mv .hg/store/meta/b/foo oldmf2
410 $ mv .hg/store/meta/b/foo oldmf2
395 $ mv .hg/store/meta/b/bar/orange oldmf3
411 $ mv .hg/store/meta/b/bar/orange oldmf3
396 $ hg files -r . glob:**.txt -I path:b/bar -X path:b/bar/orange
412 $ hg files -r . glob:**.txt -I path:b/bar -X path:b/bar/orange
397 b/bar/fruits.txt (glob)
413 b/bar/fruits.txt (glob)
398 $ mv oldmf .hg/store/meta/a
414 $ mv oldmf .hg/store/meta/a
399 $ mv oldmf2 .hg/store/meta/b/foo
415 $ mv oldmf2 .hg/store/meta/b/foo
400 $ mv oldmf3 .hg/store/meta/b/bar/orange
416 $ mv oldmf3 .hg/store/meta/b/bar/orange
401
417
402 Add some more changes to the deep repo
418 Add some more changes to the deep repo
403 $ echo narf >> b/bar/fruits.txt
419 $ echo narf >> b/bar/fruits.txt
404 $ hg ci -m narf
420 $ hg ci -m narf
405 $ echo troz >> b/bar/orange/fly/gnat.py
421 $ echo troz >> b/bar/orange/fly/gnat.py
406 $ hg ci -m troz
422 $ hg ci -m troz
407
423
408 Test cloning a treemanifest repo over http.
424 Test cloning a treemanifest repo over http.
409 $ hg serve -p $HGPORT -d --pid-file=hg.pid --errorlog=errors.log
425 $ hg serve -p $HGPORT -d --pid-file=hg.pid --errorlog=errors.log
410 $ cat hg.pid >> $DAEMON_PIDS
426 $ cat hg.pid >> $DAEMON_PIDS
411 $ cd ..
427 $ cd ..
412 We can clone even with the knob turned off and we'll get a treemanifest repo.
428 We can clone even with the knob turned off and we'll get a treemanifest repo.
413 $ hg clone --config experimental.treemanifest=False \
429 $ hg clone --config experimental.treemanifest=False \
414 > --config experimental.changegroup3=True \
430 > --config experimental.changegroup3=True \
415 > http://localhost:$HGPORT deepclone
431 > http://localhost:$HGPORT deepclone
416 requesting all changes
432 requesting all changes
417 adding changesets
433 adding changesets
418 adding manifests
434 adding manifests
419 adding file changes
435 adding file changes
420 added 3 changesets with 10 changes to 8 files
436 added 3 changesets with 10 changes to 8 files
421 updating to branch default
437 updating to branch default
422 8 files updated, 0 files merged, 0 files removed, 0 files unresolved
438 8 files updated, 0 files merged, 0 files removed, 0 files unresolved
423 No server errors.
439 No server errors.
424 $ cat deeprepo/errors.log
440 $ cat deeprepo/errors.log
425 requires got updated to include treemanifest
441 requires got updated to include treemanifest
426 $ cat deepclone/.hg/requires | grep treemanifest
442 $ cat deepclone/.hg/requires | grep treemanifest
427 treemanifest
443 treemanifest
428 Tree manifest revlogs exist.
444 Tree manifest revlogs exist.
429 $ find deepclone/.hg/store/meta | sort
445 $ find deepclone/.hg/store/meta | sort
430 deepclone/.hg/store/meta
446 deepclone/.hg/store/meta
431 deepclone/.hg/store/meta/a
447 deepclone/.hg/store/meta/a
432 deepclone/.hg/store/meta/a/00manifest.i
448 deepclone/.hg/store/meta/a/00manifest.i
433 deepclone/.hg/store/meta/b
449 deepclone/.hg/store/meta/b
434 deepclone/.hg/store/meta/b/00manifest.i
450 deepclone/.hg/store/meta/b/00manifest.i
435 deepclone/.hg/store/meta/b/bar
451 deepclone/.hg/store/meta/b/bar
436 deepclone/.hg/store/meta/b/bar/00manifest.i
452 deepclone/.hg/store/meta/b/bar/00manifest.i
437 deepclone/.hg/store/meta/b/bar/orange
453 deepclone/.hg/store/meta/b/bar/orange
438 deepclone/.hg/store/meta/b/bar/orange/00manifest.i
454 deepclone/.hg/store/meta/b/bar/orange/00manifest.i
439 deepclone/.hg/store/meta/b/bar/orange/fly
455 deepclone/.hg/store/meta/b/bar/orange/fly
440 deepclone/.hg/store/meta/b/bar/orange/fly/00manifest.i
456 deepclone/.hg/store/meta/b/bar/orange/fly/00manifest.i
441 deepclone/.hg/store/meta/b/foo
457 deepclone/.hg/store/meta/b/foo
442 deepclone/.hg/store/meta/b/foo/00manifest.i
458 deepclone/.hg/store/meta/b/foo/00manifest.i
443 deepclone/.hg/store/meta/b/foo/apple
459 deepclone/.hg/store/meta/b/foo/apple
444 deepclone/.hg/store/meta/b/foo/apple/00manifest.i
460 deepclone/.hg/store/meta/b/foo/apple/00manifest.i
445 deepclone/.hg/store/meta/b/foo/apple/bees
461 deepclone/.hg/store/meta/b/foo/apple/bees
446 deepclone/.hg/store/meta/b/foo/apple/bees/00manifest.i
462 deepclone/.hg/store/meta/b/foo/apple/bees/00manifest.i
447 Verify passes.
463 Verify passes.
448 $ cd deepclone
464 $ cd deepclone
449 $ hg verify
465 $ hg verify
450 checking changesets
466 checking changesets
451 checking manifests
467 checking manifests
452 crosschecking files in changesets and manifests
468 crosschecking files in changesets and manifests
453 checking files
469 checking files
454 8 files, 3 changesets, 10 total revisions
470 8 files, 3 changesets, 10 total revisions
455 $ cd ..
471 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now