##// END OF EJS Templates
localrepo: use the path relative to "self.vfs" instead of "path" argument...
FUJIWARA Katsunori -
r17159:36a30168 default
parent child Browse files
Show More
@@ -1,586 +1,588 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from i18n import _
9 from i18n import _
10 from lock import release
10 from lock import release
11 from node import hex, nullid
11 from node import hex, nullid
12 import localrepo, bundlerepo, httprepo, sshrepo, statichttprepo, bookmarks
12 import localrepo, bundlerepo, httprepo, sshrepo, statichttprepo, bookmarks
13 import lock, util, extensions, error, node, scmutil
13 import lock, util, extensions, error, node, scmutil
14 import cmdutil, discovery
14 import cmdutil, discovery
15 import merge as mergemod
15 import merge as mergemod
16 import verify as verifymod
16 import verify as verifymod
17 import errno, os, shutil
17 import errno, os, shutil
18
18
19 def _local(path):
19 def _local(path):
20 path = util.expandpath(util.urllocalpath(path))
20 path = util.expandpath(util.urllocalpath(path))
21 return (os.path.isfile(path) and bundlerepo or localrepo)
21 return (os.path.isfile(path) and bundlerepo or localrepo)
22
22
23 def addbranchrevs(lrepo, repo, branches, revs):
23 def addbranchrevs(lrepo, repo, branches, revs):
24 hashbranch, branches = branches
24 hashbranch, branches = branches
25 if not hashbranch and not branches:
25 if not hashbranch and not branches:
26 return revs or None, revs and revs[0] or None
26 return revs or None, revs and revs[0] or None
27 revs = revs and list(revs) or []
27 revs = revs and list(revs) or []
28 if not repo.capable('branchmap'):
28 if not repo.capable('branchmap'):
29 if branches:
29 if branches:
30 raise util.Abort(_("remote branch lookup not supported"))
30 raise util.Abort(_("remote branch lookup not supported"))
31 revs.append(hashbranch)
31 revs.append(hashbranch)
32 return revs, revs[0]
32 return revs, revs[0]
33 branchmap = repo.branchmap()
33 branchmap = repo.branchmap()
34
34
35 def primary(branch):
35 def primary(branch):
36 if branch == '.':
36 if branch == '.':
37 if not lrepo or not lrepo.local():
37 if not lrepo or not lrepo.local():
38 raise util.Abort(_("dirstate branch not accessible"))
38 raise util.Abort(_("dirstate branch not accessible"))
39 branch = lrepo.dirstate.branch()
39 branch = lrepo.dirstate.branch()
40 if branch in branchmap:
40 if branch in branchmap:
41 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
41 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
42 return True
42 return True
43 else:
43 else:
44 return False
44 return False
45
45
46 for branch in branches:
46 for branch in branches:
47 if not primary(branch):
47 if not primary(branch):
48 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
48 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
49 if hashbranch:
49 if hashbranch:
50 if not primary(hashbranch):
50 if not primary(hashbranch):
51 revs.append(hashbranch)
51 revs.append(hashbranch)
52 return revs, revs[0]
52 return revs, revs[0]
53
53
54 def parseurl(path, branches=None):
54 def parseurl(path, branches=None):
55 '''parse url#branch, returning (url, (branch, branches))'''
55 '''parse url#branch, returning (url, (branch, branches))'''
56
56
57 u = util.url(path)
57 u = util.url(path)
58 branch = None
58 branch = None
59 if u.fragment:
59 if u.fragment:
60 branch = u.fragment
60 branch = u.fragment
61 u.fragment = None
61 u.fragment = None
62 return str(u), (branch, branches or [])
62 return str(u), (branch, branches or [])
63
63
64 schemes = {
64 schemes = {
65 'bundle': bundlerepo,
65 'bundle': bundlerepo,
66 'file': _local,
66 'file': _local,
67 'http': httprepo,
67 'http': httprepo,
68 'https': httprepo,
68 'https': httprepo,
69 'ssh': sshrepo,
69 'ssh': sshrepo,
70 'static-http': statichttprepo,
70 'static-http': statichttprepo,
71 }
71 }
72
72
73 def _peerlookup(path):
73 def _peerlookup(path):
74 u = util.url(path)
74 u = util.url(path)
75 scheme = u.scheme or 'file'
75 scheme = u.scheme or 'file'
76 thing = schemes.get(scheme) or schemes['file']
76 thing = schemes.get(scheme) or schemes['file']
77 try:
77 try:
78 return thing(path)
78 return thing(path)
79 except TypeError:
79 except TypeError:
80 return thing
80 return thing
81
81
82 def islocal(repo):
82 def islocal(repo):
83 '''return true if repo or path is local'''
83 '''return true if repo or path is local'''
84 if isinstance(repo, str):
84 if isinstance(repo, str):
85 try:
85 try:
86 return _peerlookup(repo).islocal(repo)
86 return _peerlookup(repo).islocal(repo)
87 except AttributeError:
87 except AttributeError:
88 return False
88 return False
89 return repo.local()
89 return repo.local()
90
90
91 def repository(ui, path='', create=False):
91 def repository(ui, path='', create=False):
92 """return a repository object for the specified path"""
92 """return a repository object for the specified path"""
93 repo = _peerlookup(path).instance(ui, path, create)
93 repo = _peerlookup(path).instance(ui, path, create)
94 ui = getattr(repo, "ui", ui)
94 ui = getattr(repo, "ui", ui)
95 for name, module in extensions.extensions():
95 for name, module in extensions.extensions():
96 hook = getattr(module, 'reposetup', None)
96 hook = getattr(module, 'reposetup', None)
97 if hook:
97 if hook:
98 hook(ui, repo)
98 hook(ui, repo)
99 return repo
99 return repo
100
100
101 def peer(uiorrepo, opts, path, create=False):
101 def peer(uiorrepo, opts, path, create=False):
102 '''return a repository peer for the specified path'''
102 '''return a repository peer for the specified path'''
103 rui = remoteui(uiorrepo, opts)
103 rui = remoteui(uiorrepo, opts)
104 return repository(rui, path, create)
104 return repository(rui, path, create)
105
105
106 def defaultdest(source):
106 def defaultdest(source):
107 '''return default destination of clone if none is given'''
107 '''return default destination of clone if none is given'''
108 return os.path.basename(os.path.normpath(source))
108 return os.path.basename(os.path.normpath(source))
109
109
110 def share(ui, source, dest=None, update=True):
110 def share(ui, source, dest=None, update=True):
111 '''create a shared repository'''
111 '''create a shared repository'''
112
112
113 if not islocal(source):
113 if not islocal(source):
114 raise util.Abort(_('can only share local repositories'))
114 raise util.Abort(_('can only share local repositories'))
115
115
116 if not dest:
116 if not dest:
117 dest = defaultdest(source)
117 dest = defaultdest(source)
118 else:
118 else:
119 dest = ui.expandpath(dest)
119 dest = ui.expandpath(dest)
120
120
121 if isinstance(source, str):
121 if isinstance(source, str):
122 origsource = ui.expandpath(source)
122 origsource = ui.expandpath(source)
123 source, branches = parseurl(origsource)
123 source, branches = parseurl(origsource)
124 srcrepo = repository(ui, source)
124 srcrepo = repository(ui, source)
125 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
125 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
126 else:
126 else:
127 srcrepo = source
127 srcrepo = source
128 origsource = source = srcrepo.url()
128 origsource = source = srcrepo.url()
129 checkout = None
129 checkout = None
130
130
131 sharedpath = srcrepo.sharedpath # if our source is already sharing
131 sharedpath = srcrepo.sharedpath # if our source is already sharing
132
132
133 root = os.path.realpath(dest)
133 root = os.path.realpath(dest)
134 roothg = os.path.join(root, '.hg')
134 roothg = os.path.join(root, '.hg')
135
135
136 if os.path.exists(roothg):
136 if os.path.exists(roothg):
137 raise util.Abort(_('destination already exists'))
137 raise util.Abort(_('destination already exists'))
138
138
139 if not os.path.isdir(root):
139 if not os.path.isdir(root):
140 os.mkdir(root)
140 os.mkdir(root)
141 util.makedir(roothg, notindexed=True)
141 util.makedir(roothg, notindexed=True)
142
142
143 requirements = ''
143 requirements = ''
144 try:
144 try:
145 requirements = srcrepo.opener.read('requires')
145 requirements = srcrepo.opener.read('requires')
146 except IOError, inst:
146 except IOError, inst:
147 if inst.errno != errno.ENOENT:
147 if inst.errno != errno.ENOENT:
148 raise
148 raise
149
149
150 requirements += 'shared\n'
150 requirements += 'shared\n'
151 util.writefile(os.path.join(roothg, 'requires'), requirements)
151 util.writefile(os.path.join(roothg, 'requires'), requirements)
152 util.writefile(os.path.join(roothg, 'sharedpath'), sharedpath)
152 util.writefile(os.path.join(roothg, 'sharedpath'), sharedpath)
153
153
154 r = repository(ui, root)
154 r = repository(ui, root)
155
155
156 default = srcrepo.ui.config('paths', 'default')
156 default = srcrepo.ui.config('paths', 'default')
157 if default:
157 if default:
158 fp = r.opener("hgrc", "w", text=True)
158 fp = r.opener("hgrc", "w", text=True)
159 fp.write("[paths]\n")
159 fp.write("[paths]\n")
160 fp.write("default = %s\n" % default)
160 fp.write("default = %s\n" % default)
161 fp.close()
161 fp.close()
162
162
163 if update:
163 if update:
164 r.ui.status(_("updating working directory\n"))
164 r.ui.status(_("updating working directory\n"))
165 if update is not True:
165 if update is not True:
166 checkout = update
166 checkout = update
167 for test in (checkout, 'default', 'tip'):
167 for test in (checkout, 'default', 'tip'):
168 if test is None:
168 if test is None:
169 continue
169 continue
170 try:
170 try:
171 uprev = r.lookup(test)
171 uprev = r.lookup(test)
172 break
172 break
173 except error.RepoLookupError:
173 except error.RepoLookupError:
174 continue
174 continue
175 _update(r, uprev)
175 _update(r, uprev)
176
176
177 def copystore(ui, srcrepo, destpath):
177 def copystore(ui, srcrepo, destpath):
178 '''copy files from store of srcrepo in destpath
178 '''copy files from store of srcrepo in destpath
179
179
180 returns destlock
180 returns destlock
181 '''
181 '''
182 destlock = None
182 destlock = None
183 try:
183 try:
184 hardlink = None
184 hardlink = None
185 num = 0
185 num = 0
186 srcpublishing = srcrepo.ui.configbool('phases', 'publish', True)
186 srcpublishing = srcrepo.ui.configbool('phases', 'publish', True)
187 for f in srcrepo.store.copylist():
187 for f in srcrepo.store.copylist():
188 if srcpublishing and f.endswith('phaseroots'):
188 if srcpublishing and f.endswith('phaseroots'):
189 continue
189 continue
190 src = os.path.join(srcrepo.sharedpath, f)
190 src = os.path.join(srcrepo.sharedpath, f)
191 dst = os.path.join(destpath, f)
191 dst = os.path.join(destpath, f)
192 dstbase = os.path.dirname(dst)
192 dstbase = os.path.dirname(dst)
193 if dstbase and not os.path.exists(dstbase):
193 if dstbase and not os.path.exists(dstbase):
194 os.mkdir(dstbase)
194 os.mkdir(dstbase)
195 if os.path.exists(src):
195 if os.path.exists(src):
196 if dst.endswith('data'):
196 if dst.endswith('data'):
197 # lock to avoid premature writing to the target
197 # lock to avoid premature writing to the target
198 destlock = lock.lock(os.path.join(dstbase, "lock"))
198 destlock = lock.lock(os.path.join(dstbase, "lock"))
199 hardlink, n = util.copyfiles(src, dst, hardlink)
199 hardlink, n = util.copyfiles(src, dst, hardlink)
200 num += n
200 num += n
201 if hardlink:
201 if hardlink:
202 ui.debug("linked %d files\n" % num)
202 ui.debug("linked %d files\n" % num)
203 else:
203 else:
204 ui.debug("copied %d files\n" % num)
204 ui.debug("copied %d files\n" % num)
205 return destlock
205 return destlock
206 except: # re-raises
206 except: # re-raises
207 release(destlock)
207 release(destlock)
208 raise
208 raise
209
209
210 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
210 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
211 update=True, stream=False, branch=None):
211 update=True, stream=False, branch=None):
212 """Make a copy of an existing repository.
212 """Make a copy of an existing repository.
213
213
214 Create a copy of an existing repository in a new directory. The
214 Create a copy of an existing repository in a new directory. The
215 source and destination are URLs, as passed to the repository
215 source and destination are URLs, as passed to the repository
216 function. Returns a pair of repository objects, the source and
216 function. Returns a pair of repository objects, the source and
217 newly created destination.
217 newly created destination.
218
218
219 The location of the source is added to the new repository's
219 The location of the source is added to the new repository's
220 .hg/hgrc file, as the default to be used for future pulls and
220 .hg/hgrc file, as the default to be used for future pulls and
221 pushes.
221 pushes.
222
222
223 If an exception is raised, the partly cloned/updated destination
223 If an exception is raised, the partly cloned/updated destination
224 repository will be deleted.
224 repository will be deleted.
225
225
226 Arguments:
226 Arguments:
227
227
228 source: repository object or URL
228 source: repository object or URL
229
229
230 dest: URL of destination repository to create (defaults to base
230 dest: URL of destination repository to create (defaults to base
231 name of source repository)
231 name of source repository)
232
232
233 pull: always pull from source repository, even in local case
233 pull: always pull from source repository, even in local case
234
234
235 stream: stream raw data uncompressed from repository (fast over
235 stream: stream raw data uncompressed from repository (fast over
236 LAN, slow over WAN)
236 LAN, slow over WAN)
237
237
238 rev: revision to clone up to (implies pull=True)
238 rev: revision to clone up to (implies pull=True)
239
239
240 update: update working directory after clone completes, if
240 update: update working directory after clone completes, if
241 destination is local repository (True means update to default rev,
241 destination is local repository (True means update to default rev,
242 anything else is treated as a revision)
242 anything else is treated as a revision)
243
243
244 branch: branches to clone
244 branch: branches to clone
245 """
245 """
246
246
247 if isinstance(source, str):
247 if isinstance(source, str):
248 origsource = ui.expandpath(source)
248 origsource = ui.expandpath(source)
249 source, branch = parseurl(origsource, branch)
249 source, branch = parseurl(origsource, branch)
250 srcrepo = repository(remoteui(ui, peeropts), source)
250 srcrepo = repository(remoteui(ui, peeropts), source)
251 else:
251 else:
252 srcrepo = source
252 srcrepo = source
253 branch = (None, branch or [])
253 branch = (None, branch or [])
254 origsource = source = srcrepo.url()
254 origsource = source = srcrepo.url()
255 rev, checkout = addbranchrevs(srcrepo, srcrepo, branch, rev)
255 rev, checkout = addbranchrevs(srcrepo, srcrepo, branch, rev)
256
256
257 if dest is None:
257 if dest is None:
258 dest = defaultdest(source)
258 dest = defaultdest(source)
259 ui.status(_("destination directory: %s\n") % dest)
259 ui.status(_("destination directory: %s\n") % dest)
260 else:
260 else:
261 dest = ui.expandpath(dest)
261 dest = ui.expandpath(dest)
262
262
263 dest = util.urllocalpath(dest)
263 dest = util.urllocalpath(dest)
264 source = util.urllocalpath(source)
264 source = util.urllocalpath(source)
265
265
266 if not dest:
267 raise util.Abort(_("empty destination path is not valid"))
266 if os.path.exists(dest):
268 if os.path.exists(dest):
267 if not os.path.isdir(dest):
269 if not os.path.isdir(dest):
268 raise util.Abort(_("destination '%s' already exists") % dest)
270 raise util.Abort(_("destination '%s' already exists") % dest)
269 elif os.listdir(dest):
271 elif os.listdir(dest):
270 raise util.Abort(_("destination '%s' is not empty") % dest)
272 raise util.Abort(_("destination '%s' is not empty") % dest)
271
273
272 class DirCleanup(object):
274 class DirCleanup(object):
273 def __init__(self, dir_):
275 def __init__(self, dir_):
274 self.rmtree = shutil.rmtree
276 self.rmtree = shutil.rmtree
275 self.dir_ = dir_
277 self.dir_ = dir_
276 def close(self):
278 def close(self):
277 self.dir_ = None
279 self.dir_ = None
278 def cleanup(self):
280 def cleanup(self):
279 if self.dir_:
281 if self.dir_:
280 self.rmtree(self.dir_, True)
282 self.rmtree(self.dir_, True)
281
283
282 srclock = destlock = dircleanup = None
284 srclock = destlock = dircleanup = None
283 try:
285 try:
284 abspath = origsource
286 abspath = origsource
285 if islocal(origsource):
287 if islocal(origsource):
286 abspath = os.path.abspath(util.urllocalpath(origsource))
288 abspath = os.path.abspath(util.urllocalpath(origsource))
287
289
288 if islocal(dest):
290 if islocal(dest):
289 dircleanup = DirCleanup(dest)
291 dircleanup = DirCleanup(dest)
290
292
291 copy = False
293 copy = False
292 if srcrepo.cancopy() and islocal(dest) and not srcrepo.revs("secret()"):
294 if srcrepo.cancopy() and islocal(dest) and not srcrepo.revs("secret()"):
293 copy = not pull and not rev
295 copy = not pull and not rev
294
296
295 if copy:
297 if copy:
296 try:
298 try:
297 # we use a lock here because if we race with commit, we
299 # we use a lock here because if we race with commit, we
298 # can end up with extra data in the cloned revlogs that's
300 # can end up with extra data in the cloned revlogs that's
299 # not pointed to by changesets, thus causing verify to
301 # not pointed to by changesets, thus causing verify to
300 # fail
302 # fail
301 srclock = srcrepo.lock(wait=False)
303 srclock = srcrepo.lock(wait=False)
302 except error.LockError:
304 except error.LockError:
303 copy = False
305 copy = False
304
306
305 if copy:
307 if copy:
306 srcrepo.hook('preoutgoing', throw=True, source='clone')
308 srcrepo.hook('preoutgoing', throw=True, source='clone')
307 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
309 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
308 if not os.path.exists(dest):
310 if not os.path.exists(dest):
309 os.mkdir(dest)
311 os.mkdir(dest)
310 else:
312 else:
311 # only clean up directories we create ourselves
313 # only clean up directories we create ourselves
312 dircleanup.dir_ = hgdir
314 dircleanup.dir_ = hgdir
313 try:
315 try:
314 destpath = hgdir
316 destpath = hgdir
315 util.makedir(destpath, notindexed=True)
317 util.makedir(destpath, notindexed=True)
316 except OSError, inst:
318 except OSError, inst:
317 if inst.errno == errno.EEXIST:
319 if inst.errno == errno.EEXIST:
318 dircleanup.close()
320 dircleanup.close()
319 raise util.Abort(_("destination '%s' already exists")
321 raise util.Abort(_("destination '%s' already exists")
320 % dest)
322 % dest)
321 raise
323 raise
322
324
323 destlock = copystore(ui, srcrepo, destpath)
325 destlock = copystore(ui, srcrepo, destpath)
324
326
325 # we need to re-init the repo after manually copying the data
327 # we need to re-init the repo after manually copying the data
326 # into it
328 # into it
327 destrepo = repository(remoteui(ui, peeropts), dest)
329 destrepo = repository(remoteui(ui, peeropts), dest)
328 srcrepo.hook('outgoing', source='clone',
330 srcrepo.hook('outgoing', source='clone',
329 node=node.hex(node.nullid))
331 node=node.hex(node.nullid))
330 else:
332 else:
331 try:
333 try:
332 destrepo = repository(remoteui(ui, peeropts), dest,
334 destrepo = repository(remoteui(ui, peeropts), dest,
333 create=True)
335 create=True)
334 except OSError, inst:
336 except OSError, inst:
335 if inst.errno == errno.EEXIST:
337 if inst.errno == errno.EEXIST:
336 dircleanup.close()
338 dircleanup.close()
337 raise util.Abort(_("destination '%s' already exists")
339 raise util.Abort(_("destination '%s' already exists")
338 % dest)
340 % dest)
339 raise
341 raise
340
342
341 revs = None
343 revs = None
342 if rev:
344 if rev:
343 if not srcrepo.capable('lookup'):
345 if not srcrepo.capable('lookup'):
344 raise util.Abort(_("src repository does not support "
346 raise util.Abort(_("src repository does not support "
345 "revision lookup and so doesn't "
347 "revision lookup and so doesn't "
346 "support clone by revision"))
348 "support clone by revision"))
347 revs = [srcrepo.lookup(r) for r in rev]
349 revs = [srcrepo.lookup(r) for r in rev]
348 checkout = revs[0]
350 checkout = revs[0]
349 if destrepo.local():
351 if destrepo.local():
350 destrepo.clone(srcrepo, heads=revs, stream=stream)
352 destrepo.clone(srcrepo, heads=revs, stream=stream)
351 elif srcrepo.local():
353 elif srcrepo.local():
352 srcrepo.push(destrepo, revs=revs)
354 srcrepo.push(destrepo, revs=revs)
353 else:
355 else:
354 raise util.Abort(_("clone from remote to remote not supported"))
356 raise util.Abort(_("clone from remote to remote not supported"))
355
357
356 if dircleanup:
358 if dircleanup:
357 dircleanup.close()
359 dircleanup.close()
358
360
359 # clone all bookmarks except divergent ones
361 # clone all bookmarks except divergent ones
360 if destrepo.local() and srcrepo.capable("pushkey"):
362 if destrepo.local() and srcrepo.capable("pushkey"):
361 rb = srcrepo.listkeys('bookmarks')
363 rb = srcrepo.listkeys('bookmarks')
362 for k, n in rb.iteritems():
364 for k, n in rb.iteritems():
363 try:
365 try:
364 m = destrepo.lookup(n)
366 m = destrepo.lookup(n)
365 destrepo._bookmarks[k] = m
367 destrepo._bookmarks[k] = m
366 except error.RepoLookupError:
368 except error.RepoLookupError:
367 pass
369 pass
368 if rb:
370 if rb:
369 bookmarks.write(destrepo)
371 bookmarks.write(destrepo)
370 elif srcrepo.local() and destrepo.capable("pushkey"):
372 elif srcrepo.local() and destrepo.capable("pushkey"):
371 for k, n in srcrepo._bookmarks.iteritems():
373 for k, n in srcrepo._bookmarks.iteritems():
372 destrepo.pushkey('bookmarks', k, '', hex(n))
374 destrepo.pushkey('bookmarks', k, '', hex(n))
373
375
374 if destrepo.local():
376 if destrepo.local():
375 fp = destrepo.opener("hgrc", "w", text=True)
377 fp = destrepo.opener("hgrc", "w", text=True)
376 fp.write("[paths]\n")
378 fp.write("[paths]\n")
377 u = util.url(abspath)
379 u = util.url(abspath)
378 u.passwd = None
380 u.passwd = None
379 defaulturl = str(u)
381 defaulturl = str(u)
380 fp.write("default = %s\n" % defaulturl)
382 fp.write("default = %s\n" % defaulturl)
381 fp.close()
383 fp.close()
382
384
383 destrepo.ui.setconfig('paths', 'default', defaulturl)
385 destrepo.ui.setconfig('paths', 'default', defaulturl)
384
386
385 if update:
387 if update:
386 if update is not True:
388 if update is not True:
387 checkout = update
389 checkout = update
388 if srcrepo.local():
390 if srcrepo.local():
389 checkout = srcrepo.lookup(update)
391 checkout = srcrepo.lookup(update)
390 for test in (checkout, 'default', 'tip'):
392 for test in (checkout, 'default', 'tip'):
391 if test is None:
393 if test is None:
392 continue
394 continue
393 try:
395 try:
394 uprev = destrepo.lookup(test)
396 uprev = destrepo.lookup(test)
395 break
397 break
396 except error.RepoLookupError:
398 except error.RepoLookupError:
397 continue
399 continue
398 bn = destrepo[uprev].branch()
400 bn = destrepo[uprev].branch()
399 destrepo.ui.status(_("updating to branch %s\n") % bn)
401 destrepo.ui.status(_("updating to branch %s\n") % bn)
400 _update(destrepo, uprev)
402 _update(destrepo, uprev)
401
403
402 return srcrepo, destrepo
404 return srcrepo, destrepo
403 finally:
405 finally:
404 release(srclock, destlock)
406 release(srclock, destlock)
405 if dircleanup is not None:
407 if dircleanup is not None:
406 dircleanup.cleanup()
408 dircleanup.cleanup()
407 if srcrepo is not None:
409 if srcrepo is not None:
408 srcrepo.close()
410 srcrepo.close()
409
411
410 def _showstats(repo, stats):
412 def _showstats(repo, stats):
411 repo.ui.status(_("%d files updated, %d files merged, "
413 repo.ui.status(_("%d files updated, %d files merged, "
412 "%d files removed, %d files unresolved\n") % stats)
414 "%d files removed, %d files unresolved\n") % stats)
413
415
414 def update(repo, node):
416 def update(repo, node):
415 """update the working directory to node, merging linear changes"""
417 """update the working directory to node, merging linear changes"""
416 stats = mergemod.update(repo, node, False, False, None)
418 stats = mergemod.update(repo, node, False, False, None)
417 _showstats(repo, stats)
419 _showstats(repo, stats)
418 if stats[3]:
420 if stats[3]:
419 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
421 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
420 return stats[3] > 0
422 return stats[3] > 0
421
423
422 # naming conflict in clone()
424 # naming conflict in clone()
423 _update = update
425 _update = update
424
426
425 def clean(repo, node, show_stats=True):
427 def clean(repo, node, show_stats=True):
426 """forcibly switch the working directory to node, clobbering changes"""
428 """forcibly switch the working directory to node, clobbering changes"""
427 stats = mergemod.update(repo, node, False, True, None)
429 stats = mergemod.update(repo, node, False, True, None)
428 if show_stats:
430 if show_stats:
429 _showstats(repo, stats)
431 _showstats(repo, stats)
430 return stats[3] > 0
432 return stats[3] > 0
431
433
432 def merge(repo, node, force=None, remind=True):
434 def merge(repo, node, force=None, remind=True):
433 """Branch merge with node, resolving changes. Return true if any
435 """Branch merge with node, resolving changes. Return true if any
434 unresolved conflicts."""
436 unresolved conflicts."""
435 stats = mergemod.update(repo, node, True, force, False)
437 stats = mergemod.update(repo, node, True, force, False)
436 _showstats(repo, stats)
438 _showstats(repo, stats)
437 if stats[3]:
439 if stats[3]:
438 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
440 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
439 "or 'hg update -C .' to abandon\n"))
441 "or 'hg update -C .' to abandon\n"))
440 elif remind:
442 elif remind:
441 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
443 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
442 return stats[3] > 0
444 return stats[3] > 0
443
445
444 def _incoming(displaychlist, subreporecurse, ui, repo, source,
446 def _incoming(displaychlist, subreporecurse, ui, repo, source,
445 opts, buffered=False):
447 opts, buffered=False):
446 """
448 """
447 Helper for incoming / gincoming.
449 Helper for incoming / gincoming.
448 displaychlist gets called with
450 displaychlist gets called with
449 (remoterepo, incomingchangesetlist, displayer) parameters,
451 (remoterepo, incomingchangesetlist, displayer) parameters,
450 and is supposed to contain only code that can't be unified.
452 and is supposed to contain only code that can't be unified.
451 """
453 """
452 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
454 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
453 other = peer(repo, opts, source)
455 other = peer(repo, opts, source)
454 ui.status(_('comparing with %s\n') % util.hidepassword(source))
456 ui.status(_('comparing with %s\n') % util.hidepassword(source))
455 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
457 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
456
458
457 if revs:
459 if revs:
458 revs = [other.lookup(rev) for rev in revs]
460 revs = [other.lookup(rev) for rev in revs]
459 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
461 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
460 revs, opts["bundle"], opts["force"])
462 revs, opts["bundle"], opts["force"])
461 try:
463 try:
462 if not chlist:
464 if not chlist:
463 ui.status(_("no changes found\n"))
465 ui.status(_("no changes found\n"))
464 return subreporecurse()
466 return subreporecurse()
465
467
466 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
468 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
467
469
468 # XXX once graphlog extension makes it into core,
470 # XXX once graphlog extension makes it into core,
469 # should be replaced by a if graph/else
471 # should be replaced by a if graph/else
470 displaychlist(other, chlist, displayer)
472 displaychlist(other, chlist, displayer)
471
473
472 displayer.close()
474 displayer.close()
473 finally:
475 finally:
474 cleanupfn()
476 cleanupfn()
475 subreporecurse()
477 subreporecurse()
476 return 0 # exit code is zero since we found incoming changes
478 return 0 # exit code is zero since we found incoming changes
477
479
478 def incoming(ui, repo, source, opts):
480 def incoming(ui, repo, source, opts):
479 def subreporecurse():
481 def subreporecurse():
480 ret = 1
482 ret = 1
481 if opts.get('subrepos'):
483 if opts.get('subrepos'):
482 ctx = repo[None]
484 ctx = repo[None]
483 for subpath in sorted(ctx.substate):
485 for subpath in sorted(ctx.substate):
484 sub = ctx.sub(subpath)
486 sub = ctx.sub(subpath)
485 ret = min(ret, sub.incoming(ui, source, opts))
487 ret = min(ret, sub.incoming(ui, source, opts))
486 return ret
488 return ret
487
489
488 def display(other, chlist, displayer):
490 def display(other, chlist, displayer):
489 limit = cmdutil.loglimit(opts)
491 limit = cmdutil.loglimit(opts)
490 if opts.get('newest_first'):
492 if opts.get('newest_first'):
491 chlist.reverse()
493 chlist.reverse()
492 count = 0
494 count = 0
493 for n in chlist:
495 for n in chlist:
494 if limit is not None and count >= limit:
496 if limit is not None and count >= limit:
495 break
497 break
496 parents = [p for p in other.changelog.parents(n) if p != nullid]
498 parents = [p for p in other.changelog.parents(n) if p != nullid]
497 if opts.get('no_merges') and len(parents) == 2:
499 if opts.get('no_merges') and len(parents) == 2:
498 continue
500 continue
499 count += 1
501 count += 1
500 displayer.show(other[n])
502 displayer.show(other[n])
501 return _incoming(display, subreporecurse, ui, repo, source, opts)
503 return _incoming(display, subreporecurse, ui, repo, source, opts)
502
504
503 def _outgoing(ui, repo, dest, opts):
505 def _outgoing(ui, repo, dest, opts):
504 dest = ui.expandpath(dest or 'default-push', dest or 'default')
506 dest = ui.expandpath(dest or 'default-push', dest or 'default')
505 dest, branches = parseurl(dest, opts.get('branch'))
507 dest, branches = parseurl(dest, opts.get('branch'))
506 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
508 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
507 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
509 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
508 if revs:
510 if revs:
509 revs = [repo.lookup(rev) for rev in revs]
511 revs = [repo.lookup(rev) for rev in revs]
510
512
511 other = peer(repo, opts, dest)
513 other = peer(repo, opts, dest)
512 outgoing = discovery.findcommonoutgoing(repo, other, revs,
514 outgoing = discovery.findcommonoutgoing(repo, other, revs,
513 force=opts.get('force'))
515 force=opts.get('force'))
514 o = outgoing.missing
516 o = outgoing.missing
515 if not o:
517 if not o:
516 scmutil.nochangesfound(repo.ui, outgoing.excluded)
518 scmutil.nochangesfound(repo.ui, outgoing.excluded)
517 return None
519 return None
518 return o
520 return o
519
521
520 def outgoing(ui, repo, dest, opts):
522 def outgoing(ui, repo, dest, opts):
521 def recurse():
523 def recurse():
522 ret = 1
524 ret = 1
523 if opts.get('subrepos'):
525 if opts.get('subrepos'):
524 ctx = repo[None]
526 ctx = repo[None]
525 for subpath in sorted(ctx.substate):
527 for subpath in sorted(ctx.substate):
526 sub = ctx.sub(subpath)
528 sub = ctx.sub(subpath)
527 ret = min(ret, sub.outgoing(ui, dest, opts))
529 ret = min(ret, sub.outgoing(ui, dest, opts))
528 return ret
530 return ret
529
531
530 limit = cmdutil.loglimit(opts)
532 limit = cmdutil.loglimit(opts)
531 o = _outgoing(ui, repo, dest, opts)
533 o = _outgoing(ui, repo, dest, opts)
532 if o is None:
534 if o is None:
533 return recurse()
535 return recurse()
534
536
535 if opts.get('newest_first'):
537 if opts.get('newest_first'):
536 o.reverse()
538 o.reverse()
537 displayer = cmdutil.show_changeset(ui, repo, opts)
539 displayer = cmdutil.show_changeset(ui, repo, opts)
538 count = 0
540 count = 0
539 for n in o:
541 for n in o:
540 if limit is not None and count >= limit:
542 if limit is not None and count >= limit:
541 break
543 break
542 parents = [p for p in repo.changelog.parents(n) if p != nullid]
544 parents = [p for p in repo.changelog.parents(n) if p != nullid]
543 if opts.get('no_merges') and len(parents) == 2:
545 if opts.get('no_merges') and len(parents) == 2:
544 continue
546 continue
545 count += 1
547 count += 1
546 displayer.show(repo[n])
548 displayer.show(repo[n])
547 displayer.close()
549 displayer.close()
548 recurse()
550 recurse()
549 return 0 # exit code is zero since we found outgoing changes
551 return 0 # exit code is zero since we found outgoing changes
550
552
551 def revert(repo, node, choose):
553 def revert(repo, node, choose):
552 """revert changes to revision in node without updating dirstate"""
554 """revert changes to revision in node without updating dirstate"""
553 return mergemod.update(repo, node, False, True, choose)[3] > 0
555 return mergemod.update(repo, node, False, True, choose)[3] > 0
554
556
555 def verify(repo):
557 def verify(repo):
556 """verify the consistency of a repository"""
558 """verify the consistency of a repository"""
557 return verifymod.verify(repo)
559 return verifymod.verify(repo)
558
560
559 def remoteui(src, opts):
561 def remoteui(src, opts):
560 'build a remote ui from ui or repo and opts'
562 'build a remote ui from ui or repo and opts'
561 if util.safehasattr(src, 'baseui'): # looks like a repository
563 if util.safehasattr(src, 'baseui'): # looks like a repository
562 dst = src.baseui.copy() # drop repo-specific config
564 dst = src.baseui.copy() # drop repo-specific config
563 src = src.ui # copy target options from repo
565 src = src.ui # copy target options from repo
564 else: # assume it's a global ui object
566 else: # assume it's a global ui object
565 dst = src.copy() # keep all global options
567 dst = src.copy() # keep all global options
566
568
567 # copy ssh-specific options
569 # copy ssh-specific options
568 for o in 'ssh', 'remotecmd':
570 for o in 'ssh', 'remotecmd':
569 v = opts.get(o) or src.config('ui', o)
571 v = opts.get(o) or src.config('ui', o)
570 if v:
572 if v:
571 dst.setconfig("ui", o, v)
573 dst.setconfig("ui", o, v)
572
574
573 # copy bundle-specific options
575 # copy bundle-specific options
574 r = src.config('bundle', 'mainreporoot')
576 r = src.config('bundle', 'mainreporoot')
575 if r:
577 if r:
576 dst.setconfig('bundle', 'mainreporoot', r)
578 dst.setconfig('bundle', 'mainreporoot', r)
577
579
578 # copy selected local settings to the remote ui
580 # copy selected local settings to the remote ui
579 for sect in ('auth', 'hostfingerprints', 'http_proxy'):
581 for sect in ('auth', 'hostfingerprints', 'http_proxy'):
580 for key, val in src.configitems(sect):
582 for key, val in src.configitems(sect):
581 dst.setconfig(sect, key, val)
583 dst.setconfig(sect, key, val)
582 v = src.config('web', 'cacerts')
584 v = src.config('web', 'cacerts')
583 if v:
585 if v:
584 dst.setconfig('web', 'cacerts', util.expandpath(v))
586 dst.setconfig('web', 'cacerts', util.expandpath(v))
585
587
586 return dst
588 return dst
@@ -1,2457 +1,2457 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import bin, hex, nullid, nullrev, short
7 from node import bin, hex, nullid, nullrev, short
8 from i18n import _
8 from i18n import _
9 import repo, changegroup, subrepo, discovery, pushkey, obsolete
9 import repo, changegroup, subrepo, discovery, pushkey, obsolete
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock, transaction, store, encoding, base85
11 import lock, transaction, store, encoding, base85
12 import scmutil, util, extensions, hook, error, revset
12 import scmutil, util, extensions, hook, error, revset
13 import match as matchmod
13 import match as matchmod
14 import merge as mergemod
14 import merge as mergemod
15 import tags as tagsmod
15 import tags as tagsmod
16 from lock import release
16 from lock import release
17 import weakref, errno, os, time, inspect
17 import weakref, errno, os, time, inspect
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19 filecache = scmutil.filecache
19 filecache = scmutil.filecache
20
20
21 class storecache(filecache):
21 class storecache(filecache):
22 """filecache for files in the store"""
22 """filecache for files in the store"""
23 def join(self, obj, fname):
23 def join(self, obj, fname):
24 return obj.sjoin(fname)
24 return obj.sjoin(fname)
25
25
26 class localrepository(repo.repository):
26 class localrepository(repo.repository):
27 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
27 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
28 'known', 'getbundle'))
28 'known', 'getbundle'))
29 supportedformats = set(('revlogv1', 'generaldelta'))
29 supportedformats = set(('revlogv1', 'generaldelta'))
30 supported = supportedformats | set(('store', 'fncache', 'shared',
30 supported = supportedformats | set(('store', 'fncache', 'shared',
31 'dotencode'))
31 'dotencode'))
32 openerreqs = set(('revlogv1', 'generaldelta'))
32 openerreqs = set(('revlogv1', 'generaldelta'))
33 requirements = ['revlogv1']
33 requirements = ['revlogv1']
34
34
35 def _baserequirements(self, create):
35 def _baserequirements(self, create):
36 return self.requirements[:]
36 return self.requirements[:]
37
37
38 def __init__(self, baseui, path=None, create=False):
38 def __init__(self, baseui, path=None, create=False):
39 repo.repository.__init__(self)
39 repo.repository.__init__(self)
40 self.wopener = scmutil.opener(path, expand=True)
40 self.wopener = scmutil.opener(path, expand=True)
41 self.wvfs = self.wopener
41 self.wvfs = self.wopener
42 self.root = self.wvfs.base
42 self.root = self.wvfs.base
43 self.path = self.wvfs.join(".hg")
43 self.path = self.wvfs.join(".hg")
44 self.origroot = path
44 self.origroot = path
45 self.auditor = scmutil.pathauditor(self.root, self._checknested)
45 self.auditor = scmutil.pathauditor(self.root, self._checknested)
46 self.opener = scmutil.opener(self.path)
46 self.opener = scmutil.opener(self.path)
47 self.vfs = self.opener
47 self.vfs = self.opener
48 self.baseui = baseui
48 self.baseui = baseui
49 self.ui = baseui.copy()
49 self.ui = baseui.copy()
50 # A list of callback to shape the phase if no data were found.
50 # A list of callback to shape the phase if no data were found.
51 # Callback are in the form: func(repo, roots) --> processed root.
51 # Callback are in the form: func(repo, roots) --> processed root.
52 # This list it to be filled by extension during repo setup
52 # This list it to be filled by extension during repo setup
53 self._phasedefaults = []
53 self._phasedefaults = []
54
54
55 try:
55 try:
56 self.ui.readconfig(self.join("hgrc"), self.root)
56 self.ui.readconfig(self.join("hgrc"), self.root)
57 extensions.loadall(self.ui)
57 extensions.loadall(self.ui)
58 except IOError:
58 except IOError:
59 pass
59 pass
60
60
61 if not os.path.isdir(self.path):
61 if not os.path.isdir(self.path):
62 if create:
62 if create:
63 if not os.path.exists(path):
63 if not os.path.exists(self.root):
64 util.makedirs(path)
64 util.makedirs(self.root)
65 util.makedir(self.path, notindexed=True)
65 util.makedir(self.path, notindexed=True)
66 requirements = self._baserequirements(create)
66 requirements = self._baserequirements(create)
67 if self.ui.configbool('format', 'usestore', True):
67 if self.ui.configbool('format', 'usestore', True):
68 os.mkdir(os.path.join(self.path, "store"))
68 os.mkdir(os.path.join(self.path, "store"))
69 requirements.append("store")
69 requirements.append("store")
70 if self.ui.configbool('format', 'usefncache', True):
70 if self.ui.configbool('format', 'usefncache', True):
71 requirements.append("fncache")
71 requirements.append("fncache")
72 if self.ui.configbool('format', 'dotencode', True):
72 if self.ui.configbool('format', 'dotencode', True):
73 requirements.append('dotencode')
73 requirements.append('dotencode')
74 # create an invalid changelog
74 # create an invalid changelog
75 self.opener.append(
75 self.opener.append(
76 "00changelog.i",
76 "00changelog.i",
77 '\0\0\0\2' # represents revlogv2
77 '\0\0\0\2' # represents revlogv2
78 ' dummy changelog to prevent using the old repo layout'
78 ' dummy changelog to prevent using the old repo layout'
79 )
79 )
80 if self.ui.configbool('format', 'generaldelta', False):
80 if self.ui.configbool('format', 'generaldelta', False):
81 requirements.append("generaldelta")
81 requirements.append("generaldelta")
82 requirements = set(requirements)
82 requirements = set(requirements)
83 else:
83 else:
84 raise error.RepoError(_("repository %s not found") % path)
84 raise error.RepoError(_("repository %s not found") % path)
85 elif create:
85 elif create:
86 raise error.RepoError(_("repository %s already exists") % path)
86 raise error.RepoError(_("repository %s already exists") % path)
87 else:
87 else:
88 try:
88 try:
89 requirements = scmutil.readrequires(self.opener, self.supported)
89 requirements = scmutil.readrequires(self.opener, self.supported)
90 except IOError, inst:
90 except IOError, inst:
91 if inst.errno != errno.ENOENT:
91 if inst.errno != errno.ENOENT:
92 raise
92 raise
93 requirements = set()
93 requirements = set()
94
94
95 self.sharedpath = self.path
95 self.sharedpath = self.path
96 try:
96 try:
97 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
97 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
98 if not os.path.exists(s):
98 if not os.path.exists(s):
99 raise error.RepoError(
99 raise error.RepoError(
100 _('.hg/sharedpath points to nonexistent directory %s') % s)
100 _('.hg/sharedpath points to nonexistent directory %s') % s)
101 self.sharedpath = s
101 self.sharedpath = s
102 except IOError, inst:
102 except IOError, inst:
103 if inst.errno != errno.ENOENT:
103 if inst.errno != errno.ENOENT:
104 raise
104 raise
105
105
106 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
106 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
107 self.spath = self.store.path
107 self.spath = self.store.path
108 self.sopener = self.store.opener
108 self.sopener = self.store.opener
109 self.svfs = self.sopener
109 self.svfs = self.sopener
110 self.sjoin = self.store.join
110 self.sjoin = self.store.join
111 self.opener.createmode = self.store.createmode
111 self.opener.createmode = self.store.createmode
112 self._applyrequirements(requirements)
112 self._applyrequirements(requirements)
113 if create:
113 if create:
114 self._writerequirements()
114 self._writerequirements()
115
115
116
116
117 self._branchcache = None
117 self._branchcache = None
118 self._branchcachetip = None
118 self._branchcachetip = None
119 self.filterpats = {}
119 self.filterpats = {}
120 self._datafilters = {}
120 self._datafilters = {}
121 self._transref = self._lockref = self._wlockref = None
121 self._transref = self._lockref = self._wlockref = None
122
122
123 # A cache for various files under .hg/ that tracks file changes,
123 # A cache for various files under .hg/ that tracks file changes,
124 # (used by the filecache decorator)
124 # (used by the filecache decorator)
125 #
125 #
126 # Maps a property name to its util.filecacheentry
126 # Maps a property name to its util.filecacheentry
127 self._filecache = {}
127 self._filecache = {}
128
128
129 def _applyrequirements(self, requirements):
129 def _applyrequirements(self, requirements):
130 self.requirements = requirements
130 self.requirements = requirements
131 self.sopener.options = dict((r, 1) for r in requirements
131 self.sopener.options = dict((r, 1) for r in requirements
132 if r in self.openerreqs)
132 if r in self.openerreqs)
133
133
134 def _writerequirements(self):
134 def _writerequirements(self):
135 reqfile = self.opener("requires", "w")
135 reqfile = self.opener("requires", "w")
136 for r in self.requirements:
136 for r in self.requirements:
137 reqfile.write("%s\n" % r)
137 reqfile.write("%s\n" % r)
138 reqfile.close()
138 reqfile.close()
139
139
140 def _checknested(self, path):
140 def _checknested(self, path):
141 """Determine if path is a legal nested repository."""
141 """Determine if path is a legal nested repository."""
142 if not path.startswith(self.root):
142 if not path.startswith(self.root):
143 return False
143 return False
144 subpath = path[len(self.root) + 1:]
144 subpath = path[len(self.root) + 1:]
145 normsubpath = util.pconvert(subpath)
145 normsubpath = util.pconvert(subpath)
146
146
147 # XXX: Checking against the current working copy is wrong in
147 # XXX: Checking against the current working copy is wrong in
148 # the sense that it can reject things like
148 # the sense that it can reject things like
149 #
149 #
150 # $ hg cat -r 10 sub/x.txt
150 # $ hg cat -r 10 sub/x.txt
151 #
151 #
152 # if sub/ is no longer a subrepository in the working copy
152 # if sub/ is no longer a subrepository in the working copy
153 # parent revision.
153 # parent revision.
154 #
154 #
155 # However, it can of course also allow things that would have
155 # However, it can of course also allow things that would have
156 # been rejected before, such as the above cat command if sub/
156 # been rejected before, such as the above cat command if sub/
157 # is a subrepository now, but was a normal directory before.
157 # is a subrepository now, but was a normal directory before.
158 # The old path auditor would have rejected by mistake since it
158 # The old path auditor would have rejected by mistake since it
159 # panics when it sees sub/.hg/.
159 # panics when it sees sub/.hg/.
160 #
160 #
161 # All in all, checking against the working copy seems sensible
161 # All in all, checking against the working copy seems sensible
162 # since we want to prevent access to nested repositories on
162 # since we want to prevent access to nested repositories on
163 # the filesystem *now*.
163 # the filesystem *now*.
164 ctx = self[None]
164 ctx = self[None]
165 parts = util.splitpath(subpath)
165 parts = util.splitpath(subpath)
166 while parts:
166 while parts:
167 prefix = '/'.join(parts)
167 prefix = '/'.join(parts)
168 if prefix in ctx.substate:
168 if prefix in ctx.substate:
169 if prefix == normsubpath:
169 if prefix == normsubpath:
170 return True
170 return True
171 else:
171 else:
172 sub = ctx.sub(prefix)
172 sub = ctx.sub(prefix)
173 return sub.checknested(subpath[len(prefix) + 1:])
173 return sub.checknested(subpath[len(prefix) + 1:])
174 else:
174 else:
175 parts.pop()
175 parts.pop()
176 return False
176 return False
177
177
178 @filecache('bookmarks')
178 @filecache('bookmarks')
179 def _bookmarks(self):
179 def _bookmarks(self):
180 return bookmarks.read(self)
180 return bookmarks.read(self)
181
181
182 @filecache('bookmarks.current')
182 @filecache('bookmarks.current')
183 def _bookmarkcurrent(self):
183 def _bookmarkcurrent(self):
184 return bookmarks.readcurrent(self)
184 return bookmarks.readcurrent(self)
185
185
186 def _writebookmarks(self, marks):
186 def _writebookmarks(self, marks):
187 bookmarks.write(self)
187 bookmarks.write(self)
188
188
189 def bookmarkheads(self, bookmark):
189 def bookmarkheads(self, bookmark):
190 name = bookmark.split('@', 1)[0]
190 name = bookmark.split('@', 1)[0]
191 heads = []
191 heads = []
192 for mark, n in self._bookmarks.iteritems():
192 for mark, n in self._bookmarks.iteritems():
193 if mark.split('@', 1)[0] == name:
193 if mark.split('@', 1)[0] == name:
194 heads.append(n)
194 heads.append(n)
195 return heads
195 return heads
196
196
197 @storecache('phaseroots')
197 @storecache('phaseroots')
198 def _phasecache(self):
198 def _phasecache(self):
199 return phases.phasecache(self, self._phasedefaults)
199 return phases.phasecache(self, self._phasedefaults)
200
200
201 @storecache('obsstore')
201 @storecache('obsstore')
202 def obsstore(self):
202 def obsstore(self):
203 store = obsolete.obsstore(self.sopener)
203 store = obsolete.obsstore(self.sopener)
204 return store
204 return store
205
205
206 @storecache('00changelog.i')
206 @storecache('00changelog.i')
207 def changelog(self):
207 def changelog(self):
208 c = changelog.changelog(self.sopener)
208 c = changelog.changelog(self.sopener)
209 if 'HG_PENDING' in os.environ:
209 if 'HG_PENDING' in os.environ:
210 p = os.environ['HG_PENDING']
210 p = os.environ['HG_PENDING']
211 if p.startswith(self.root):
211 if p.startswith(self.root):
212 c.readpending('00changelog.i.a')
212 c.readpending('00changelog.i.a')
213 return c
213 return c
214
214
215 @storecache('00manifest.i')
215 @storecache('00manifest.i')
216 def manifest(self):
216 def manifest(self):
217 return manifest.manifest(self.sopener)
217 return manifest.manifest(self.sopener)
218
218
219 @filecache('dirstate')
219 @filecache('dirstate')
220 def dirstate(self):
220 def dirstate(self):
221 warned = [0]
221 warned = [0]
222 def validate(node):
222 def validate(node):
223 try:
223 try:
224 self.changelog.rev(node)
224 self.changelog.rev(node)
225 return node
225 return node
226 except error.LookupError:
226 except error.LookupError:
227 if not warned[0]:
227 if not warned[0]:
228 warned[0] = True
228 warned[0] = True
229 self.ui.warn(_("warning: ignoring unknown"
229 self.ui.warn(_("warning: ignoring unknown"
230 " working parent %s!\n") % short(node))
230 " working parent %s!\n") % short(node))
231 return nullid
231 return nullid
232
232
233 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
233 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
234
234
235 def __getitem__(self, changeid):
235 def __getitem__(self, changeid):
236 if changeid is None:
236 if changeid is None:
237 return context.workingctx(self)
237 return context.workingctx(self)
238 return context.changectx(self, changeid)
238 return context.changectx(self, changeid)
239
239
240 def __contains__(self, changeid):
240 def __contains__(self, changeid):
241 try:
241 try:
242 return bool(self.lookup(changeid))
242 return bool(self.lookup(changeid))
243 except error.RepoLookupError:
243 except error.RepoLookupError:
244 return False
244 return False
245
245
246 def __nonzero__(self):
246 def __nonzero__(self):
247 return True
247 return True
248
248
249 def __len__(self):
249 def __len__(self):
250 return len(self.changelog)
250 return len(self.changelog)
251
251
252 def __iter__(self):
252 def __iter__(self):
253 for i in xrange(len(self)):
253 for i in xrange(len(self)):
254 yield i
254 yield i
255
255
256 def revs(self, expr, *args):
256 def revs(self, expr, *args):
257 '''Return a list of revisions matching the given revset'''
257 '''Return a list of revisions matching the given revset'''
258 expr = revset.formatspec(expr, *args)
258 expr = revset.formatspec(expr, *args)
259 m = revset.match(None, expr)
259 m = revset.match(None, expr)
260 return [r for r in m(self, range(len(self)))]
260 return [r for r in m(self, range(len(self)))]
261
261
262 def set(self, expr, *args):
262 def set(self, expr, *args):
263 '''
263 '''
264 Yield a context for each matching revision, after doing arg
264 Yield a context for each matching revision, after doing arg
265 replacement via revset.formatspec
265 replacement via revset.formatspec
266 '''
266 '''
267 for r in self.revs(expr, *args):
267 for r in self.revs(expr, *args):
268 yield self[r]
268 yield self[r]
269
269
270 def url(self):
270 def url(self):
271 return 'file:' + self.root
271 return 'file:' + self.root
272
272
273 def hook(self, name, throw=False, **args):
273 def hook(self, name, throw=False, **args):
274 return hook.hook(self.ui, self, name, throw, **args)
274 return hook.hook(self.ui, self, name, throw, **args)
275
275
276 tag_disallowed = ':\r\n'
276 tag_disallowed = ':\r\n'
277
277
278 def _tag(self, names, node, message, local, user, date, extra={}):
278 def _tag(self, names, node, message, local, user, date, extra={}):
279 if isinstance(names, str):
279 if isinstance(names, str):
280 allchars = names
280 allchars = names
281 names = (names,)
281 names = (names,)
282 else:
282 else:
283 allchars = ''.join(names)
283 allchars = ''.join(names)
284 for c in self.tag_disallowed:
284 for c in self.tag_disallowed:
285 if c in allchars:
285 if c in allchars:
286 raise util.Abort(_('%r cannot be used in a tag name') % c)
286 raise util.Abort(_('%r cannot be used in a tag name') % c)
287
287
288 branches = self.branchmap()
288 branches = self.branchmap()
289 for name in names:
289 for name in names:
290 self.hook('pretag', throw=True, node=hex(node), tag=name,
290 self.hook('pretag', throw=True, node=hex(node), tag=name,
291 local=local)
291 local=local)
292 if name in branches:
292 if name in branches:
293 self.ui.warn(_("warning: tag %s conflicts with existing"
293 self.ui.warn(_("warning: tag %s conflicts with existing"
294 " branch name\n") % name)
294 " branch name\n") % name)
295
295
296 def writetags(fp, names, munge, prevtags):
296 def writetags(fp, names, munge, prevtags):
297 fp.seek(0, 2)
297 fp.seek(0, 2)
298 if prevtags and prevtags[-1] != '\n':
298 if prevtags and prevtags[-1] != '\n':
299 fp.write('\n')
299 fp.write('\n')
300 for name in names:
300 for name in names:
301 m = munge and munge(name) or name
301 m = munge and munge(name) or name
302 if (self._tagscache.tagtypes and
302 if (self._tagscache.tagtypes and
303 name in self._tagscache.tagtypes):
303 name in self._tagscache.tagtypes):
304 old = self.tags().get(name, nullid)
304 old = self.tags().get(name, nullid)
305 fp.write('%s %s\n' % (hex(old), m))
305 fp.write('%s %s\n' % (hex(old), m))
306 fp.write('%s %s\n' % (hex(node), m))
306 fp.write('%s %s\n' % (hex(node), m))
307 fp.close()
307 fp.close()
308
308
309 prevtags = ''
309 prevtags = ''
310 if local:
310 if local:
311 try:
311 try:
312 fp = self.opener('localtags', 'r+')
312 fp = self.opener('localtags', 'r+')
313 except IOError:
313 except IOError:
314 fp = self.opener('localtags', 'a')
314 fp = self.opener('localtags', 'a')
315 else:
315 else:
316 prevtags = fp.read()
316 prevtags = fp.read()
317
317
318 # local tags are stored in the current charset
318 # local tags are stored in the current charset
319 writetags(fp, names, None, prevtags)
319 writetags(fp, names, None, prevtags)
320 for name in names:
320 for name in names:
321 self.hook('tag', node=hex(node), tag=name, local=local)
321 self.hook('tag', node=hex(node), tag=name, local=local)
322 return
322 return
323
323
324 try:
324 try:
325 fp = self.wfile('.hgtags', 'rb+')
325 fp = self.wfile('.hgtags', 'rb+')
326 except IOError, e:
326 except IOError, e:
327 if e.errno != errno.ENOENT:
327 if e.errno != errno.ENOENT:
328 raise
328 raise
329 fp = self.wfile('.hgtags', 'ab')
329 fp = self.wfile('.hgtags', 'ab')
330 else:
330 else:
331 prevtags = fp.read()
331 prevtags = fp.read()
332
332
333 # committed tags are stored in UTF-8
333 # committed tags are stored in UTF-8
334 writetags(fp, names, encoding.fromlocal, prevtags)
334 writetags(fp, names, encoding.fromlocal, prevtags)
335
335
336 fp.close()
336 fp.close()
337
337
338 self.invalidatecaches()
338 self.invalidatecaches()
339
339
340 if '.hgtags' not in self.dirstate:
340 if '.hgtags' not in self.dirstate:
341 self[None].add(['.hgtags'])
341 self[None].add(['.hgtags'])
342
342
343 m = matchmod.exact(self.root, '', ['.hgtags'])
343 m = matchmod.exact(self.root, '', ['.hgtags'])
344 tagnode = self.commit(message, user, date, extra=extra, match=m)
344 tagnode = self.commit(message, user, date, extra=extra, match=m)
345
345
346 for name in names:
346 for name in names:
347 self.hook('tag', node=hex(node), tag=name, local=local)
347 self.hook('tag', node=hex(node), tag=name, local=local)
348
348
349 return tagnode
349 return tagnode
350
350
351 def tag(self, names, node, message, local, user, date):
351 def tag(self, names, node, message, local, user, date):
352 '''tag a revision with one or more symbolic names.
352 '''tag a revision with one or more symbolic names.
353
353
354 names is a list of strings or, when adding a single tag, names may be a
354 names is a list of strings or, when adding a single tag, names may be a
355 string.
355 string.
356
356
357 if local is True, the tags are stored in a per-repository file.
357 if local is True, the tags are stored in a per-repository file.
358 otherwise, they are stored in the .hgtags file, and a new
358 otherwise, they are stored in the .hgtags file, and a new
359 changeset is committed with the change.
359 changeset is committed with the change.
360
360
361 keyword arguments:
361 keyword arguments:
362
362
363 local: whether to store tags in non-version-controlled file
363 local: whether to store tags in non-version-controlled file
364 (default False)
364 (default False)
365
365
366 message: commit message to use if committing
366 message: commit message to use if committing
367
367
368 user: name of user to use if committing
368 user: name of user to use if committing
369
369
370 date: date tuple to use if committing'''
370 date: date tuple to use if committing'''
371
371
372 if not local:
372 if not local:
373 for x in self.status()[:5]:
373 for x in self.status()[:5]:
374 if '.hgtags' in x:
374 if '.hgtags' in x:
375 raise util.Abort(_('working copy of .hgtags is changed '
375 raise util.Abort(_('working copy of .hgtags is changed '
376 '(please commit .hgtags manually)'))
376 '(please commit .hgtags manually)'))
377
377
378 self.tags() # instantiate the cache
378 self.tags() # instantiate the cache
379 self._tag(names, node, message, local, user, date)
379 self._tag(names, node, message, local, user, date)
380
380
381 @propertycache
381 @propertycache
382 def _tagscache(self):
382 def _tagscache(self):
383 '''Returns a tagscache object that contains various tags related
383 '''Returns a tagscache object that contains various tags related
384 caches.'''
384 caches.'''
385
385
386 # This simplifies its cache management by having one decorated
386 # This simplifies its cache management by having one decorated
387 # function (this one) and the rest simply fetch things from it.
387 # function (this one) and the rest simply fetch things from it.
388 class tagscache(object):
388 class tagscache(object):
389 def __init__(self):
389 def __init__(self):
390 # These two define the set of tags for this repository. tags
390 # These two define the set of tags for this repository. tags
391 # maps tag name to node; tagtypes maps tag name to 'global' or
391 # maps tag name to node; tagtypes maps tag name to 'global' or
392 # 'local'. (Global tags are defined by .hgtags across all
392 # 'local'. (Global tags are defined by .hgtags across all
393 # heads, and local tags are defined in .hg/localtags.)
393 # heads, and local tags are defined in .hg/localtags.)
394 # They constitute the in-memory cache of tags.
394 # They constitute the in-memory cache of tags.
395 self.tags = self.tagtypes = None
395 self.tags = self.tagtypes = None
396
396
397 self.nodetagscache = self.tagslist = None
397 self.nodetagscache = self.tagslist = None
398
398
399 cache = tagscache()
399 cache = tagscache()
400 cache.tags, cache.tagtypes = self._findtags()
400 cache.tags, cache.tagtypes = self._findtags()
401
401
402 return cache
402 return cache
403
403
404 def tags(self):
404 def tags(self):
405 '''return a mapping of tag to node'''
405 '''return a mapping of tag to node'''
406 t = {}
406 t = {}
407 for k, v in self._tagscache.tags.iteritems():
407 for k, v in self._tagscache.tags.iteritems():
408 try:
408 try:
409 # ignore tags to unknown nodes
409 # ignore tags to unknown nodes
410 self.changelog.rev(v)
410 self.changelog.rev(v)
411 t[k] = v
411 t[k] = v
412 except (error.LookupError, ValueError):
412 except (error.LookupError, ValueError):
413 pass
413 pass
414 return t
414 return t
415
415
416 def _findtags(self):
416 def _findtags(self):
417 '''Do the hard work of finding tags. Return a pair of dicts
417 '''Do the hard work of finding tags. Return a pair of dicts
418 (tags, tagtypes) where tags maps tag name to node, and tagtypes
418 (tags, tagtypes) where tags maps tag name to node, and tagtypes
419 maps tag name to a string like \'global\' or \'local\'.
419 maps tag name to a string like \'global\' or \'local\'.
420 Subclasses or extensions are free to add their own tags, but
420 Subclasses or extensions are free to add their own tags, but
421 should be aware that the returned dicts will be retained for the
421 should be aware that the returned dicts will be retained for the
422 duration of the localrepo object.'''
422 duration of the localrepo object.'''
423
423
424 # XXX what tagtype should subclasses/extensions use? Currently
424 # XXX what tagtype should subclasses/extensions use? Currently
425 # mq and bookmarks add tags, but do not set the tagtype at all.
425 # mq and bookmarks add tags, but do not set the tagtype at all.
426 # Should each extension invent its own tag type? Should there
426 # Should each extension invent its own tag type? Should there
427 # be one tagtype for all such "virtual" tags? Or is the status
427 # be one tagtype for all such "virtual" tags? Or is the status
428 # quo fine?
428 # quo fine?
429
429
430 alltags = {} # map tag name to (node, hist)
430 alltags = {} # map tag name to (node, hist)
431 tagtypes = {}
431 tagtypes = {}
432
432
433 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
433 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
434 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
434 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
435
435
436 # Build the return dicts. Have to re-encode tag names because
436 # Build the return dicts. Have to re-encode tag names because
437 # the tags module always uses UTF-8 (in order not to lose info
437 # the tags module always uses UTF-8 (in order not to lose info
438 # writing to the cache), but the rest of Mercurial wants them in
438 # writing to the cache), but the rest of Mercurial wants them in
439 # local encoding.
439 # local encoding.
440 tags = {}
440 tags = {}
441 for (name, (node, hist)) in alltags.iteritems():
441 for (name, (node, hist)) in alltags.iteritems():
442 if node != nullid:
442 if node != nullid:
443 tags[encoding.tolocal(name)] = node
443 tags[encoding.tolocal(name)] = node
444 tags['tip'] = self.changelog.tip()
444 tags['tip'] = self.changelog.tip()
445 tagtypes = dict([(encoding.tolocal(name), value)
445 tagtypes = dict([(encoding.tolocal(name), value)
446 for (name, value) in tagtypes.iteritems()])
446 for (name, value) in tagtypes.iteritems()])
447 return (tags, tagtypes)
447 return (tags, tagtypes)
448
448
449 def tagtype(self, tagname):
449 def tagtype(self, tagname):
450 '''
450 '''
451 return the type of the given tag. result can be:
451 return the type of the given tag. result can be:
452
452
453 'local' : a local tag
453 'local' : a local tag
454 'global' : a global tag
454 'global' : a global tag
455 None : tag does not exist
455 None : tag does not exist
456 '''
456 '''
457
457
458 return self._tagscache.tagtypes.get(tagname)
458 return self._tagscache.tagtypes.get(tagname)
459
459
460 def tagslist(self):
460 def tagslist(self):
461 '''return a list of tags ordered by revision'''
461 '''return a list of tags ordered by revision'''
462 if not self._tagscache.tagslist:
462 if not self._tagscache.tagslist:
463 l = []
463 l = []
464 for t, n in self.tags().iteritems():
464 for t, n in self.tags().iteritems():
465 r = self.changelog.rev(n)
465 r = self.changelog.rev(n)
466 l.append((r, t, n))
466 l.append((r, t, n))
467 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
467 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
468
468
469 return self._tagscache.tagslist
469 return self._tagscache.tagslist
470
470
471 def nodetags(self, node):
471 def nodetags(self, node):
472 '''return the tags associated with a node'''
472 '''return the tags associated with a node'''
473 if not self._tagscache.nodetagscache:
473 if not self._tagscache.nodetagscache:
474 nodetagscache = {}
474 nodetagscache = {}
475 for t, n in self._tagscache.tags.iteritems():
475 for t, n in self._tagscache.tags.iteritems():
476 nodetagscache.setdefault(n, []).append(t)
476 nodetagscache.setdefault(n, []).append(t)
477 for tags in nodetagscache.itervalues():
477 for tags in nodetagscache.itervalues():
478 tags.sort()
478 tags.sort()
479 self._tagscache.nodetagscache = nodetagscache
479 self._tagscache.nodetagscache = nodetagscache
480 return self._tagscache.nodetagscache.get(node, [])
480 return self._tagscache.nodetagscache.get(node, [])
481
481
482 def nodebookmarks(self, node):
482 def nodebookmarks(self, node):
483 marks = []
483 marks = []
484 for bookmark, n in self._bookmarks.iteritems():
484 for bookmark, n in self._bookmarks.iteritems():
485 if n == node:
485 if n == node:
486 marks.append(bookmark)
486 marks.append(bookmark)
487 return sorted(marks)
487 return sorted(marks)
488
488
489 def _branchtags(self, partial, lrev):
489 def _branchtags(self, partial, lrev):
490 # TODO: rename this function?
490 # TODO: rename this function?
491 tiprev = len(self) - 1
491 tiprev = len(self) - 1
492 if lrev != tiprev:
492 if lrev != tiprev:
493 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
493 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
494 self._updatebranchcache(partial, ctxgen)
494 self._updatebranchcache(partial, ctxgen)
495 self._writebranchcache(partial, self.changelog.tip(), tiprev)
495 self._writebranchcache(partial, self.changelog.tip(), tiprev)
496
496
497 return partial
497 return partial
498
498
499 def updatebranchcache(self):
499 def updatebranchcache(self):
500 tip = self.changelog.tip()
500 tip = self.changelog.tip()
501 if self._branchcache is not None and self._branchcachetip == tip:
501 if self._branchcache is not None and self._branchcachetip == tip:
502 return
502 return
503
503
504 oldtip = self._branchcachetip
504 oldtip = self._branchcachetip
505 self._branchcachetip = tip
505 self._branchcachetip = tip
506 if oldtip is None or oldtip not in self.changelog.nodemap:
506 if oldtip is None or oldtip not in self.changelog.nodemap:
507 partial, last, lrev = self._readbranchcache()
507 partial, last, lrev = self._readbranchcache()
508 else:
508 else:
509 lrev = self.changelog.rev(oldtip)
509 lrev = self.changelog.rev(oldtip)
510 partial = self._branchcache
510 partial = self._branchcache
511
511
512 self._branchtags(partial, lrev)
512 self._branchtags(partial, lrev)
513 # this private cache holds all heads (not just the branch tips)
513 # this private cache holds all heads (not just the branch tips)
514 self._branchcache = partial
514 self._branchcache = partial
515
515
516 def branchmap(self):
516 def branchmap(self):
517 '''returns a dictionary {branch: [branchheads]}'''
517 '''returns a dictionary {branch: [branchheads]}'''
518 self.updatebranchcache()
518 self.updatebranchcache()
519 return self._branchcache
519 return self._branchcache
520
520
521 def _branchtip(self, heads):
521 def _branchtip(self, heads):
522 '''return the tipmost branch head in heads'''
522 '''return the tipmost branch head in heads'''
523 tip = heads[-1]
523 tip = heads[-1]
524 for h in reversed(heads):
524 for h in reversed(heads):
525 if not self[h].closesbranch():
525 if not self[h].closesbranch():
526 tip = h
526 tip = h
527 break
527 break
528 return tip
528 return tip
529
529
530 def branchtip(self, branch):
530 def branchtip(self, branch):
531 '''return the tip node for a given branch'''
531 '''return the tip node for a given branch'''
532 if branch not in self.branchmap():
532 if branch not in self.branchmap():
533 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
533 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
534 return self._branchtip(self.branchmap()[branch])
534 return self._branchtip(self.branchmap()[branch])
535
535
536 def branchtags(self):
536 def branchtags(self):
537 '''return a dict where branch names map to the tipmost head of
537 '''return a dict where branch names map to the tipmost head of
538 the branch, open heads come before closed'''
538 the branch, open heads come before closed'''
539 bt = {}
539 bt = {}
540 for bn, heads in self.branchmap().iteritems():
540 for bn, heads in self.branchmap().iteritems():
541 bt[bn] = self._branchtip(heads)
541 bt[bn] = self._branchtip(heads)
542 return bt
542 return bt
543
543
544 def _readbranchcache(self):
544 def _readbranchcache(self):
545 partial = {}
545 partial = {}
546 try:
546 try:
547 f = self.opener("cache/branchheads")
547 f = self.opener("cache/branchheads")
548 lines = f.read().split('\n')
548 lines = f.read().split('\n')
549 f.close()
549 f.close()
550 except (IOError, OSError):
550 except (IOError, OSError):
551 return {}, nullid, nullrev
551 return {}, nullid, nullrev
552
552
553 try:
553 try:
554 last, lrev = lines.pop(0).split(" ", 1)
554 last, lrev = lines.pop(0).split(" ", 1)
555 last, lrev = bin(last), int(lrev)
555 last, lrev = bin(last), int(lrev)
556 if lrev >= len(self) or self[lrev].node() != last:
556 if lrev >= len(self) or self[lrev].node() != last:
557 # invalidate the cache
557 # invalidate the cache
558 raise ValueError('invalidating branch cache (tip differs)')
558 raise ValueError('invalidating branch cache (tip differs)')
559 for l in lines:
559 for l in lines:
560 if not l:
560 if not l:
561 continue
561 continue
562 node, label = l.split(" ", 1)
562 node, label = l.split(" ", 1)
563 label = encoding.tolocal(label.strip())
563 label = encoding.tolocal(label.strip())
564 if not node in self:
564 if not node in self:
565 raise ValueError('invalidating branch cache because node '+
565 raise ValueError('invalidating branch cache because node '+
566 '%s does not exist' % node)
566 '%s does not exist' % node)
567 partial.setdefault(label, []).append(bin(node))
567 partial.setdefault(label, []).append(bin(node))
568 except KeyboardInterrupt:
568 except KeyboardInterrupt:
569 raise
569 raise
570 except Exception, inst:
570 except Exception, inst:
571 if self.ui.debugflag:
571 if self.ui.debugflag:
572 self.ui.warn(str(inst), '\n')
572 self.ui.warn(str(inst), '\n')
573 partial, last, lrev = {}, nullid, nullrev
573 partial, last, lrev = {}, nullid, nullrev
574 return partial, last, lrev
574 return partial, last, lrev
575
575
576 def _writebranchcache(self, branches, tip, tiprev):
576 def _writebranchcache(self, branches, tip, tiprev):
577 try:
577 try:
578 f = self.opener("cache/branchheads", "w", atomictemp=True)
578 f = self.opener("cache/branchheads", "w", atomictemp=True)
579 f.write("%s %s\n" % (hex(tip), tiprev))
579 f.write("%s %s\n" % (hex(tip), tiprev))
580 for label, nodes in branches.iteritems():
580 for label, nodes in branches.iteritems():
581 for node in nodes:
581 for node in nodes:
582 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
582 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
583 f.close()
583 f.close()
584 except (IOError, OSError):
584 except (IOError, OSError):
585 pass
585 pass
586
586
587 def _updatebranchcache(self, partial, ctxgen):
587 def _updatebranchcache(self, partial, ctxgen):
588 """Given a branchhead cache, partial, that may have extra nodes or be
588 """Given a branchhead cache, partial, that may have extra nodes or be
589 missing heads, and a generator of nodes that are at least a superset of
589 missing heads, and a generator of nodes that are at least a superset of
590 heads missing, this function updates partial to be correct.
590 heads missing, this function updates partial to be correct.
591 """
591 """
592 # collect new branch entries
592 # collect new branch entries
593 newbranches = {}
593 newbranches = {}
594 for c in ctxgen:
594 for c in ctxgen:
595 newbranches.setdefault(c.branch(), []).append(c.node())
595 newbranches.setdefault(c.branch(), []).append(c.node())
596 # if older branchheads are reachable from new ones, they aren't
596 # if older branchheads are reachable from new ones, they aren't
597 # really branchheads. Note checking parents is insufficient:
597 # really branchheads. Note checking parents is insufficient:
598 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
598 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
599 for branch, newnodes in newbranches.iteritems():
599 for branch, newnodes in newbranches.iteritems():
600 bheads = partial.setdefault(branch, [])
600 bheads = partial.setdefault(branch, [])
601 # Remove candidate heads that no longer are in the repo (e.g., as
601 # Remove candidate heads that no longer are in the repo (e.g., as
602 # the result of a strip that just happened). Avoid using 'node in
602 # the result of a strip that just happened). Avoid using 'node in
603 # self' here because that dives down into branchcache code somewhat
603 # self' here because that dives down into branchcache code somewhat
604 # recrusively.
604 # recrusively.
605 bheadrevs = [self.changelog.rev(node) for node in bheads
605 bheadrevs = [self.changelog.rev(node) for node in bheads
606 if self.changelog.hasnode(node)]
606 if self.changelog.hasnode(node)]
607 newheadrevs = [self.changelog.rev(node) for node in newnodes
607 newheadrevs = [self.changelog.rev(node) for node in newnodes
608 if self.changelog.hasnode(node)]
608 if self.changelog.hasnode(node)]
609 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
609 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
610 # Remove duplicates - nodes that are in newheadrevs and are already
610 # Remove duplicates - nodes that are in newheadrevs and are already
611 # in bheadrevs. This can happen if you strip a node whose parent
611 # in bheadrevs. This can happen if you strip a node whose parent
612 # was already a head (because they're on different branches).
612 # was already a head (because they're on different branches).
613 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
613 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
614
614
615 # Starting from tip means fewer passes over reachable. If we know
615 # Starting from tip means fewer passes over reachable. If we know
616 # the new candidates are not ancestors of existing heads, we don't
616 # the new candidates are not ancestors of existing heads, we don't
617 # have to examine ancestors of existing heads
617 # have to examine ancestors of existing heads
618 if ctxisnew:
618 if ctxisnew:
619 iterrevs = sorted(newheadrevs)
619 iterrevs = sorted(newheadrevs)
620 else:
620 else:
621 iterrevs = list(bheadrevs)
621 iterrevs = list(bheadrevs)
622
622
623 # This loop prunes out two kinds of heads - heads that are
623 # This loop prunes out two kinds of heads - heads that are
624 # superceded by a head in newheadrevs, and newheadrevs that are not
624 # superceded by a head in newheadrevs, and newheadrevs that are not
625 # heads because an existing head is their descendant.
625 # heads because an existing head is their descendant.
626 while iterrevs:
626 while iterrevs:
627 latest = iterrevs.pop()
627 latest = iterrevs.pop()
628 if latest not in bheadrevs:
628 if latest not in bheadrevs:
629 continue
629 continue
630 ancestors = set(self.changelog.ancestors([latest],
630 ancestors = set(self.changelog.ancestors([latest],
631 bheadrevs[0]))
631 bheadrevs[0]))
632 if ancestors:
632 if ancestors:
633 bheadrevs = [b for b in bheadrevs if b not in ancestors]
633 bheadrevs = [b for b in bheadrevs if b not in ancestors]
634 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
634 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
635
635
636 # There may be branches that cease to exist when the last commit in the
636 # There may be branches that cease to exist when the last commit in the
637 # branch was stripped. This code filters them out. Note that the
637 # branch was stripped. This code filters them out. Note that the
638 # branch that ceased to exist may not be in newbranches because
638 # branch that ceased to exist may not be in newbranches because
639 # newbranches is the set of candidate heads, which when you strip the
639 # newbranches is the set of candidate heads, which when you strip the
640 # last commit in a branch will be the parent branch.
640 # last commit in a branch will be the parent branch.
641 for branch in partial:
641 for branch in partial:
642 nodes = [head for head in partial[branch]
642 nodes = [head for head in partial[branch]
643 if self.changelog.hasnode(head)]
643 if self.changelog.hasnode(head)]
644 if not nodes:
644 if not nodes:
645 del partial[branch]
645 del partial[branch]
646
646
647 def lookup(self, key):
647 def lookup(self, key):
648 return self[key].node()
648 return self[key].node()
649
649
650 def lookupbranch(self, key, remote=None):
650 def lookupbranch(self, key, remote=None):
651 repo = remote or self
651 repo = remote or self
652 if key in repo.branchmap():
652 if key in repo.branchmap():
653 return key
653 return key
654
654
655 repo = (remote and remote.local()) and remote or self
655 repo = (remote and remote.local()) and remote or self
656 return repo[key].branch()
656 return repo[key].branch()
657
657
658 def known(self, nodes):
658 def known(self, nodes):
659 nm = self.changelog.nodemap
659 nm = self.changelog.nodemap
660 pc = self._phasecache
660 pc = self._phasecache
661 result = []
661 result = []
662 for n in nodes:
662 for n in nodes:
663 r = nm.get(n)
663 r = nm.get(n)
664 resp = not (r is None or pc.phase(self, r) >= phases.secret)
664 resp = not (r is None or pc.phase(self, r) >= phases.secret)
665 result.append(resp)
665 result.append(resp)
666 return result
666 return result
667
667
668 def local(self):
668 def local(self):
669 return self
669 return self
670
670
671 def join(self, f):
671 def join(self, f):
672 return os.path.join(self.path, f)
672 return os.path.join(self.path, f)
673
673
674 def wjoin(self, f):
674 def wjoin(self, f):
675 return os.path.join(self.root, f)
675 return os.path.join(self.root, f)
676
676
677 def file(self, f):
677 def file(self, f):
678 if f[0] == '/':
678 if f[0] == '/':
679 f = f[1:]
679 f = f[1:]
680 return filelog.filelog(self.sopener, f)
680 return filelog.filelog(self.sopener, f)
681
681
682 def changectx(self, changeid):
682 def changectx(self, changeid):
683 return self[changeid]
683 return self[changeid]
684
684
685 def parents(self, changeid=None):
685 def parents(self, changeid=None):
686 '''get list of changectxs for parents of changeid'''
686 '''get list of changectxs for parents of changeid'''
687 return self[changeid].parents()
687 return self[changeid].parents()
688
688
689 def setparents(self, p1, p2=nullid):
689 def setparents(self, p1, p2=nullid):
690 copies = self.dirstate.setparents(p1, p2)
690 copies = self.dirstate.setparents(p1, p2)
691 if copies:
691 if copies:
692 # Adjust copy records, the dirstate cannot do it, it
692 # Adjust copy records, the dirstate cannot do it, it
693 # requires access to parents manifests. Preserve them
693 # requires access to parents manifests. Preserve them
694 # only for entries added to first parent.
694 # only for entries added to first parent.
695 pctx = self[p1]
695 pctx = self[p1]
696 for f in copies:
696 for f in copies:
697 if f not in pctx and copies[f] in pctx:
697 if f not in pctx and copies[f] in pctx:
698 self.dirstate.copy(copies[f], f)
698 self.dirstate.copy(copies[f], f)
699
699
700 def filectx(self, path, changeid=None, fileid=None):
700 def filectx(self, path, changeid=None, fileid=None):
701 """changeid can be a changeset revision, node, or tag.
701 """changeid can be a changeset revision, node, or tag.
702 fileid can be a file revision or node."""
702 fileid can be a file revision or node."""
703 return context.filectx(self, path, changeid, fileid)
703 return context.filectx(self, path, changeid, fileid)
704
704
705 def getcwd(self):
705 def getcwd(self):
706 return self.dirstate.getcwd()
706 return self.dirstate.getcwd()
707
707
708 def pathto(self, f, cwd=None):
708 def pathto(self, f, cwd=None):
709 return self.dirstate.pathto(f, cwd)
709 return self.dirstate.pathto(f, cwd)
710
710
711 def wfile(self, f, mode='r'):
711 def wfile(self, f, mode='r'):
712 return self.wopener(f, mode)
712 return self.wopener(f, mode)
713
713
714 def _link(self, f):
714 def _link(self, f):
715 return os.path.islink(self.wjoin(f))
715 return os.path.islink(self.wjoin(f))
716
716
717 def _loadfilter(self, filter):
717 def _loadfilter(self, filter):
718 if filter not in self.filterpats:
718 if filter not in self.filterpats:
719 l = []
719 l = []
720 for pat, cmd in self.ui.configitems(filter):
720 for pat, cmd in self.ui.configitems(filter):
721 if cmd == '!':
721 if cmd == '!':
722 continue
722 continue
723 mf = matchmod.match(self.root, '', [pat])
723 mf = matchmod.match(self.root, '', [pat])
724 fn = None
724 fn = None
725 params = cmd
725 params = cmd
726 for name, filterfn in self._datafilters.iteritems():
726 for name, filterfn in self._datafilters.iteritems():
727 if cmd.startswith(name):
727 if cmd.startswith(name):
728 fn = filterfn
728 fn = filterfn
729 params = cmd[len(name):].lstrip()
729 params = cmd[len(name):].lstrip()
730 break
730 break
731 if not fn:
731 if not fn:
732 fn = lambda s, c, **kwargs: util.filter(s, c)
732 fn = lambda s, c, **kwargs: util.filter(s, c)
733 # Wrap old filters not supporting keyword arguments
733 # Wrap old filters not supporting keyword arguments
734 if not inspect.getargspec(fn)[2]:
734 if not inspect.getargspec(fn)[2]:
735 oldfn = fn
735 oldfn = fn
736 fn = lambda s, c, **kwargs: oldfn(s, c)
736 fn = lambda s, c, **kwargs: oldfn(s, c)
737 l.append((mf, fn, params))
737 l.append((mf, fn, params))
738 self.filterpats[filter] = l
738 self.filterpats[filter] = l
739 return self.filterpats[filter]
739 return self.filterpats[filter]
740
740
741 def _filter(self, filterpats, filename, data):
741 def _filter(self, filterpats, filename, data):
742 for mf, fn, cmd in filterpats:
742 for mf, fn, cmd in filterpats:
743 if mf(filename):
743 if mf(filename):
744 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
744 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
745 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
745 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
746 break
746 break
747
747
748 return data
748 return data
749
749
750 @propertycache
750 @propertycache
751 def _encodefilterpats(self):
751 def _encodefilterpats(self):
752 return self._loadfilter('encode')
752 return self._loadfilter('encode')
753
753
754 @propertycache
754 @propertycache
755 def _decodefilterpats(self):
755 def _decodefilterpats(self):
756 return self._loadfilter('decode')
756 return self._loadfilter('decode')
757
757
758 def adddatafilter(self, name, filter):
758 def adddatafilter(self, name, filter):
759 self._datafilters[name] = filter
759 self._datafilters[name] = filter
760
760
761 def wread(self, filename):
761 def wread(self, filename):
762 if self._link(filename):
762 if self._link(filename):
763 data = os.readlink(self.wjoin(filename))
763 data = os.readlink(self.wjoin(filename))
764 else:
764 else:
765 data = self.wopener.read(filename)
765 data = self.wopener.read(filename)
766 return self._filter(self._encodefilterpats, filename, data)
766 return self._filter(self._encodefilterpats, filename, data)
767
767
768 def wwrite(self, filename, data, flags):
768 def wwrite(self, filename, data, flags):
769 data = self._filter(self._decodefilterpats, filename, data)
769 data = self._filter(self._decodefilterpats, filename, data)
770 if 'l' in flags:
770 if 'l' in flags:
771 self.wopener.symlink(data, filename)
771 self.wopener.symlink(data, filename)
772 else:
772 else:
773 self.wopener.write(filename, data)
773 self.wopener.write(filename, data)
774 if 'x' in flags:
774 if 'x' in flags:
775 util.setflags(self.wjoin(filename), False, True)
775 util.setflags(self.wjoin(filename), False, True)
776
776
777 def wwritedata(self, filename, data):
777 def wwritedata(self, filename, data):
778 return self._filter(self._decodefilterpats, filename, data)
778 return self._filter(self._decodefilterpats, filename, data)
779
779
780 def transaction(self, desc):
780 def transaction(self, desc):
781 tr = self._transref and self._transref() or None
781 tr = self._transref and self._transref() or None
782 if tr and tr.running():
782 if tr and tr.running():
783 return tr.nest()
783 return tr.nest()
784
784
785 # abort here if the journal already exists
785 # abort here if the journal already exists
786 if os.path.exists(self.sjoin("journal")):
786 if os.path.exists(self.sjoin("journal")):
787 raise error.RepoError(
787 raise error.RepoError(
788 _("abandoned transaction found - run hg recover"))
788 _("abandoned transaction found - run hg recover"))
789
789
790 self._writejournal(desc)
790 self._writejournal(desc)
791 renames = [(x, undoname(x)) for x in self._journalfiles()]
791 renames = [(x, undoname(x)) for x in self._journalfiles()]
792
792
793 tr = transaction.transaction(self.ui.warn, self.sopener,
793 tr = transaction.transaction(self.ui.warn, self.sopener,
794 self.sjoin("journal"),
794 self.sjoin("journal"),
795 aftertrans(renames),
795 aftertrans(renames),
796 self.store.createmode)
796 self.store.createmode)
797 self._transref = weakref.ref(tr)
797 self._transref = weakref.ref(tr)
798 return tr
798 return tr
799
799
800 def _journalfiles(self):
800 def _journalfiles(self):
801 return (self.sjoin('journal'), self.join('journal.dirstate'),
801 return (self.sjoin('journal'), self.join('journal.dirstate'),
802 self.join('journal.branch'), self.join('journal.desc'),
802 self.join('journal.branch'), self.join('journal.desc'),
803 self.join('journal.bookmarks'),
803 self.join('journal.bookmarks'),
804 self.sjoin('journal.phaseroots'))
804 self.sjoin('journal.phaseroots'))
805
805
806 def undofiles(self):
806 def undofiles(self):
807 return [undoname(x) for x in self._journalfiles()]
807 return [undoname(x) for x in self._journalfiles()]
808
808
809 def _writejournal(self, desc):
809 def _writejournal(self, desc):
810 self.opener.write("journal.dirstate",
810 self.opener.write("journal.dirstate",
811 self.opener.tryread("dirstate"))
811 self.opener.tryread("dirstate"))
812 self.opener.write("journal.branch",
812 self.opener.write("journal.branch",
813 encoding.fromlocal(self.dirstate.branch()))
813 encoding.fromlocal(self.dirstate.branch()))
814 self.opener.write("journal.desc",
814 self.opener.write("journal.desc",
815 "%d\n%s\n" % (len(self), desc))
815 "%d\n%s\n" % (len(self), desc))
816 self.opener.write("journal.bookmarks",
816 self.opener.write("journal.bookmarks",
817 self.opener.tryread("bookmarks"))
817 self.opener.tryread("bookmarks"))
818 self.sopener.write("journal.phaseroots",
818 self.sopener.write("journal.phaseroots",
819 self.sopener.tryread("phaseroots"))
819 self.sopener.tryread("phaseroots"))
820
820
821 def recover(self):
821 def recover(self):
822 lock = self.lock()
822 lock = self.lock()
823 try:
823 try:
824 if os.path.exists(self.sjoin("journal")):
824 if os.path.exists(self.sjoin("journal")):
825 self.ui.status(_("rolling back interrupted transaction\n"))
825 self.ui.status(_("rolling back interrupted transaction\n"))
826 transaction.rollback(self.sopener, self.sjoin("journal"),
826 transaction.rollback(self.sopener, self.sjoin("journal"),
827 self.ui.warn)
827 self.ui.warn)
828 self.invalidate()
828 self.invalidate()
829 return True
829 return True
830 else:
830 else:
831 self.ui.warn(_("no interrupted transaction available\n"))
831 self.ui.warn(_("no interrupted transaction available\n"))
832 return False
832 return False
833 finally:
833 finally:
834 lock.release()
834 lock.release()
835
835
836 def rollback(self, dryrun=False, force=False):
836 def rollback(self, dryrun=False, force=False):
837 wlock = lock = None
837 wlock = lock = None
838 try:
838 try:
839 wlock = self.wlock()
839 wlock = self.wlock()
840 lock = self.lock()
840 lock = self.lock()
841 if os.path.exists(self.sjoin("undo")):
841 if os.path.exists(self.sjoin("undo")):
842 return self._rollback(dryrun, force)
842 return self._rollback(dryrun, force)
843 else:
843 else:
844 self.ui.warn(_("no rollback information available\n"))
844 self.ui.warn(_("no rollback information available\n"))
845 return 1
845 return 1
846 finally:
846 finally:
847 release(lock, wlock)
847 release(lock, wlock)
848
848
849 def _rollback(self, dryrun, force):
849 def _rollback(self, dryrun, force):
850 ui = self.ui
850 ui = self.ui
851 try:
851 try:
852 args = self.opener.read('undo.desc').splitlines()
852 args = self.opener.read('undo.desc').splitlines()
853 (oldlen, desc, detail) = (int(args[0]), args[1], None)
853 (oldlen, desc, detail) = (int(args[0]), args[1], None)
854 if len(args) >= 3:
854 if len(args) >= 3:
855 detail = args[2]
855 detail = args[2]
856 oldtip = oldlen - 1
856 oldtip = oldlen - 1
857
857
858 if detail and ui.verbose:
858 if detail and ui.verbose:
859 msg = (_('repository tip rolled back to revision %s'
859 msg = (_('repository tip rolled back to revision %s'
860 ' (undo %s: %s)\n')
860 ' (undo %s: %s)\n')
861 % (oldtip, desc, detail))
861 % (oldtip, desc, detail))
862 else:
862 else:
863 msg = (_('repository tip rolled back to revision %s'
863 msg = (_('repository tip rolled back to revision %s'
864 ' (undo %s)\n')
864 ' (undo %s)\n')
865 % (oldtip, desc))
865 % (oldtip, desc))
866 except IOError:
866 except IOError:
867 msg = _('rolling back unknown transaction\n')
867 msg = _('rolling back unknown transaction\n')
868 desc = None
868 desc = None
869
869
870 if not force and self['.'] != self['tip'] and desc == 'commit':
870 if not force and self['.'] != self['tip'] and desc == 'commit':
871 raise util.Abort(
871 raise util.Abort(
872 _('rollback of last commit while not checked out '
872 _('rollback of last commit while not checked out '
873 'may lose data'), hint=_('use -f to force'))
873 'may lose data'), hint=_('use -f to force'))
874
874
875 ui.status(msg)
875 ui.status(msg)
876 if dryrun:
876 if dryrun:
877 return 0
877 return 0
878
878
879 parents = self.dirstate.parents()
879 parents = self.dirstate.parents()
880 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
880 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
881 if os.path.exists(self.join('undo.bookmarks')):
881 if os.path.exists(self.join('undo.bookmarks')):
882 util.rename(self.join('undo.bookmarks'),
882 util.rename(self.join('undo.bookmarks'),
883 self.join('bookmarks'))
883 self.join('bookmarks'))
884 if os.path.exists(self.sjoin('undo.phaseroots')):
884 if os.path.exists(self.sjoin('undo.phaseroots')):
885 util.rename(self.sjoin('undo.phaseroots'),
885 util.rename(self.sjoin('undo.phaseroots'),
886 self.sjoin('phaseroots'))
886 self.sjoin('phaseroots'))
887 self.invalidate()
887 self.invalidate()
888
888
889 parentgone = (parents[0] not in self.changelog.nodemap or
889 parentgone = (parents[0] not in self.changelog.nodemap or
890 parents[1] not in self.changelog.nodemap)
890 parents[1] not in self.changelog.nodemap)
891 if parentgone:
891 if parentgone:
892 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
892 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
893 try:
893 try:
894 branch = self.opener.read('undo.branch')
894 branch = self.opener.read('undo.branch')
895 self.dirstate.setbranch(branch)
895 self.dirstate.setbranch(branch)
896 except IOError:
896 except IOError:
897 ui.warn(_('named branch could not be reset: '
897 ui.warn(_('named branch could not be reset: '
898 'current branch is still \'%s\'\n')
898 'current branch is still \'%s\'\n')
899 % self.dirstate.branch())
899 % self.dirstate.branch())
900
900
901 self.dirstate.invalidate()
901 self.dirstate.invalidate()
902 parents = tuple([p.rev() for p in self.parents()])
902 parents = tuple([p.rev() for p in self.parents()])
903 if len(parents) > 1:
903 if len(parents) > 1:
904 ui.status(_('working directory now based on '
904 ui.status(_('working directory now based on '
905 'revisions %d and %d\n') % parents)
905 'revisions %d and %d\n') % parents)
906 else:
906 else:
907 ui.status(_('working directory now based on '
907 ui.status(_('working directory now based on '
908 'revision %d\n') % parents)
908 'revision %d\n') % parents)
909 # TODO: if we know which new heads may result from this rollback, pass
909 # TODO: if we know which new heads may result from this rollback, pass
910 # them to destroy(), which will prevent the branchhead cache from being
910 # them to destroy(), which will prevent the branchhead cache from being
911 # invalidated.
911 # invalidated.
912 self.destroyed()
912 self.destroyed()
913 return 0
913 return 0
914
914
915 def invalidatecaches(self):
915 def invalidatecaches(self):
916 def delcache(name):
916 def delcache(name):
917 try:
917 try:
918 delattr(self, name)
918 delattr(self, name)
919 except AttributeError:
919 except AttributeError:
920 pass
920 pass
921
921
922 delcache('_tagscache')
922 delcache('_tagscache')
923
923
924 self._branchcache = None # in UTF-8
924 self._branchcache = None # in UTF-8
925 self._branchcachetip = None
925 self._branchcachetip = None
926
926
927 def invalidatedirstate(self):
927 def invalidatedirstate(self):
928 '''Invalidates the dirstate, causing the next call to dirstate
928 '''Invalidates the dirstate, causing the next call to dirstate
929 to check if it was modified since the last time it was read,
929 to check if it was modified since the last time it was read,
930 rereading it if it has.
930 rereading it if it has.
931
931
932 This is different to dirstate.invalidate() that it doesn't always
932 This is different to dirstate.invalidate() that it doesn't always
933 rereads the dirstate. Use dirstate.invalidate() if you want to
933 rereads the dirstate. Use dirstate.invalidate() if you want to
934 explicitly read the dirstate again (i.e. restoring it to a previous
934 explicitly read the dirstate again (i.e. restoring it to a previous
935 known good state).'''
935 known good state).'''
936 if 'dirstate' in self.__dict__:
936 if 'dirstate' in self.__dict__:
937 for k in self.dirstate._filecache:
937 for k in self.dirstate._filecache:
938 try:
938 try:
939 delattr(self.dirstate, k)
939 delattr(self.dirstate, k)
940 except AttributeError:
940 except AttributeError:
941 pass
941 pass
942 delattr(self, 'dirstate')
942 delattr(self, 'dirstate')
943
943
944 def invalidate(self):
944 def invalidate(self):
945 for k in self._filecache:
945 for k in self._filecache:
946 # dirstate is invalidated separately in invalidatedirstate()
946 # dirstate is invalidated separately in invalidatedirstate()
947 if k == 'dirstate':
947 if k == 'dirstate':
948 continue
948 continue
949
949
950 try:
950 try:
951 delattr(self, k)
951 delattr(self, k)
952 except AttributeError:
952 except AttributeError:
953 pass
953 pass
954 self.invalidatecaches()
954 self.invalidatecaches()
955
955
956 # Discard all cache entries to force reloading everything.
956 # Discard all cache entries to force reloading everything.
957 self._filecache.clear()
957 self._filecache.clear()
958
958
959 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
959 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
960 try:
960 try:
961 l = lock.lock(lockname, 0, releasefn, desc=desc)
961 l = lock.lock(lockname, 0, releasefn, desc=desc)
962 except error.LockHeld, inst:
962 except error.LockHeld, inst:
963 if not wait:
963 if not wait:
964 raise
964 raise
965 self.ui.warn(_("waiting for lock on %s held by %r\n") %
965 self.ui.warn(_("waiting for lock on %s held by %r\n") %
966 (desc, inst.locker))
966 (desc, inst.locker))
967 # default to 600 seconds timeout
967 # default to 600 seconds timeout
968 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
968 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
969 releasefn, desc=desc)
969 releasefn, desc=desc)
970 if acquirefn:
970 if acquirefn:
971 acquirefn()
971 acquirefn()
972 return l
972 return l
973
973
974 def _afterlock(self, callback):
974 def _afterlock(self, callback):
975 """add a callback to the current repository lock.
975 """add a callback to the current repository lock.
976
976
977 The callback will be executed on lock release."""
977 The callback will be executed on lock release."""
978 l = self._lockref and self._lockref()
978 l = self._lockref and self._lockref()
979 if l:
979 if l:
980 l.postrelease.append(callback)
980 l.postrelease.append(callback)
981 else:
981 else:
982 callback()
982 callback()
983
983
984 def lock(self, wait=True):
984 def lock(self, wait=True):
985 '''Lock the repository store (.hg/store) and return a weak reference
985 '''Lock the repository store (.hg/store) and return a weak reference
986 to the lock. Use this before modifying the store (e.g. committing or
986 to the lock. Use this before modifying the store (e.g. committing or
987 stripping). If you are opening a transaction, get a lock as well.)'''
987 stripping). If you are opening a transaction, get a lock as well.)'''
988 l = self._lockref and self._lockref()
988 l = self._lockref and self._lockref()
989 if l is not None and l.held:
989 if l is not None and l.held:
990 l.lock()
990 l.lock()
991 return l
991 return l
992
992
993 def unlock():
993 def unlock():
994 self.store.write()
994 self.store.write()
995 if '_phasecache' in vars(self):
995 if '_phasecache' in vars(self):
996 self._phasecache.write()
996 self._phasecache.write()
997 for k, ce in self._filecache.items():
997 for k, ce in self._filecache.items():
998 if k == 'dirstate':
998 if k == 'dirstate':
999 continue
999 continue
1000 ce.refresh()
1000 ce.refresh()
1001
1001
1002 l = self._lock(self.sjoin("lock"), wait, unlock,
1002 l = self._lock(self.sjoin("lock"), wait, unlock,
1003 self.invalidate, _('repository %s') % self.origroot)
1003 self.invalidate, _('repository %s') % self.origroot)
1004 self._lockref = weakref.ref(l)
1004 self._lockref = weakref.ref(l)
1005 return l
1005 return l
1006
1006
1007 def wlock(self, wait=True):
1007 def wlock(self, wait=True):
1008 '''Lock the non-store parts of the repository (everything under
1008 '''Lock the non-store parts of the repository (everything under
1009 .hg except .hg/store) and return a weak reference to the lock.
1009 .hg except .hg/store) and return a weak reference to the lock.
1010 Use this before modifying files in .hg.'''
1010 Use this before modifying files in .hg.'''
1011 l = self._wlockref and self._wlockref()
1011 l = self._wlockref and self._wlockref()
1012 if l is not None and l.held:
1012 if l is not None and l.held:
1013 l.lock()
1013 l.lock()
1014 return l
1014 return l
1015
1015
1016 def unlock():
1016 def unlock():
1017 self.dirstate.write()
1017 self.dirstate.write()
1018 ce = self._filecache.get('dirstate')
1018 ce = self._filecache.get('dirstate')
1019 if ce:
1019 if ce:
1020 ce.refresh()
1020 ce.refresh()
1021
1021
1022 l = self._lock(self.join("wlock"), wait, unlock,
1022 l = self._lock(self.join("wlock"), wait, unlock,
1023 self.invalidatedirstate, _('working directory of %s') %
1023 self.invalidatedirstate, _('working directory of %s') %
1024 self.origroot)
1024 self.origroot)
1025 self._wlockref = weakref.ref(l)
1025 self._wlockref = weakref.ref(l)
1026 return l
1026 return l
1027
1027
1028 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1028 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1029 """
1029 """
1030 commit an individual file as part of a larger transaction
1030 commit an individual file as part of a larger transaction
1031 """
1031 """
1032
1032
1033 fname = fctx.path()
1033 fname = fctx.path()
1034 text = fctx.data()
1034 text = fctx.data()
1035 flog = self.file(fname)
1035 flog = self.file(fname)
1036 fparent1 = manifest1.get(fname, nullid)
1036 fparent1 = manifest1.get(fname, nullid)
1037 fparent2 = fparent2o = manifest2.get(fname, nullid)
1037 fparent2 = fparent2o = manifest2.get(fname, nullid)
1038
1038
1039 meta = {}
1039 meta = {}
1040 copy = fctx.renamed()
1040 copy = fctx.renamed()
1041 if copy and copy[0] != fname:
1041 if copy and copy[0] != fname:
1042 # Mark the new revision of this file as a copy of another
1042 # Mark the new revision of this file as a copy of another
1043 # file. This copy data will effectively act as a parent
1043 # file. This copy data will effectively act as a parent
1044 # of this new revision. If this is a merge, the first
1044 # of this new revision. If this is a merge, the first
1045 # parent will be the nullid (meaning "look up the copy data")
1045 # parent will be the nullid (meaning "look up the copy data")
1046 # and the second one will be the other parent. For example:
1046 # and the second one will be the other parent. For example:
1047 #
1047 #
1048 # 0 --- 1 --- 3 rev1 changes file foo
1048 # 0 --- 1 --- 3 rev1 changes file foo
1049 # \ / rev2 renames foo to bar and changes it
1049 # \ / rev2 renames foo to bar and changes it
1050 # \- 2 -/ rev3 should have bar with all changes and
1050 # \- 2 -/ rev3 should have bar with all changes and
1051 # should record that bar descends from
1051 # should record that bar descends from
1052 # bar in rev2 and foo in rev1
1052 # bar in rev2 and foo in rev1
1053 #
1053 #
1054 # this allows this merge to succeed:
1054 # this allows this merge to succeed:
1055 #
1055 #
1056 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1056 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1057 # \ / merging rev3 and rev4 should use bar@rev2
1057 # \ / merging rev3 and rev4 should use bar@rev2
1058 # \- 2 --- 4 as the merge base
1058 # \- 2 --- 4 as the merge base
1059 #
1059 #
1060
1060
1061 cfname = copy[0]
1061 cfname = copy[0]
1062 crev = manifest1.get(cfname)
1062 crev = manifest1.get(cfname)
1063 newfparent = fparent2
1063 newfparent = fparent2
1064
1064
1065 if manifest2: # branch merge
1065 if manifest2: # branch merge
1066 if fparent2 == nullid or crev is None: # copied on remote side
1066 if fparent2 == nullid or crev is None: # copied on remote side
1067 if cfname in manifest2:
1067 if cfname in manifest2:
1068 crev = manifest2[cfname]
1068 crev = manifest2[cfname]
1069 newfparent = fparent1
1069 newfparent = fparent1
1070
1070
1071 # find source in nearest ancestor if we've lost track
1071 # find source in nearest ancestor if we've lost track
1072 if not crev:
1072 if not crev:
1073 self.ui.debug(" %s: searching for copy revision for %s\n" %
1073 self.ui.debug(" %s: searching for copy revision for %s\n" %
1074 (fname, cfname))
1074 (fname, cfname))
1075 for ancestor in self[None].ancestors():
1075 for ancestor in self[None].ancestors():
1076 if cfname in ancestor:
1076 if cfname in ancestor:
1077 crev = ancestor[cfname].filenode()
1077 crev = ancestor[cfname].filenode()
1078 break
1078 break
1079
1079
1080 if crev:
1080 if crev:
1081 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1081 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1082 meta["copy"] = cfname
1082 meta["copy"] = cfname
1083 meta["copyrev"] = hex(crev)
1083 meta["copyrev"] = hex(crev)
1084 fparent1, fparent2 = nullid, newfparent
1084 fparent1, fparent2 = nullid, newfparent
1085 else:
1085 else:
1086 self.ui.warn(_("warning: can't find ancestor for '%s' "
1086 self.ui.warn(_("warning: can't find ancestor for '%s' "
1087 "copied from '%s'!\n") % (fname, cfname))
1087 "copied from '%s'!\n") % (fname, cfname))
1088
1088
1089 elif fparent2 != nullid:
1089 elif fparent2 != nullid:
1090 # is one parent an ancestor of the other?
1090 # is one parent an ancestor of the other?
1091 fparentancestor = flog.ancestor(fparent1, fparent2)
1091 fparentancestor = flog.ancestor(fparent1, fparent2)
1092 if fparentancestor == fparent1:
1092 if fparentancestor == fparent1:
1093 fparent1, fparent2 = fparent2, nullid
1093 fparent1, fparent2 = fparent2, nullid
1094 elif fparentancestor == fparent2:
1094 elif fparentancestor == fparent2:
1095 fparent2 = nullid
1095 fparent2 = nullid
1096
1096
1097 # is the file changed?
1097 # is the file changed?
1098 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1098 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1099 changelist.append(fname)
1099 changelist.append(fname)
1100 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1100 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1101
1101
1102 # are just the flags changed during merge?
1102 # are just the flags changed during merge?
1103 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1103 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1104 changelist.append(fname)
1104 changelist.append(fname)
1105
1105
1106 return fparent1
1106 return fparent1
1107
1107
1108 def commit(self, text="", user=None, date=None, match=None, force=False,
1108 def commit(self, text="", user=None, date=None, match=None, force=False,
1109 editor=False, extra={}):
1109 editor=False, extra={}):
1110 """Add a new revision to current repository.
1110 """Add a new revision to current repository.
1111
1111
1112 Revision information is gathered from the working directory,
1112 Revision information is gathered from the working directory,
1113 match can be used to filter the committed files. If editor is
1113 match can be used to filter the committed files. If editor is
1114 supplied, it is called to get a commit message.
1114 supplied, it is called to get a commit message.
1115 """
1115 """
1116
1116
1117 def fail(f, msg):
1117 def fail(f, msg):
1118 raise util.Abort('%s: %s' % (f, msg))
1118 raise util.Abort('%s: %s' % (f, msg))
1119
1119
1120 if not match:
1120 if not match:
1121 match = matchmod.always(self.root, '')
1121 match = matchmod.always(self.root, '')
1122
1122
1123 if not force:
1123 if not force:
1124 vdirs = []
1124 vdirs = []
1125 match.dir = vdirs.append
1125 match.dir = vdirs.append
1126 match.bad = fail
1126 match.bad = fail
1127
1127
1128 wlock = self.wlock()
1128 wlock = self.wlock()
1129 try:
1129 try:
1130 wctx = self[None]
1130 wctx = self[None]
1131 merge = len(wctx.parents()) > 1
1131 merge = len(wctx.parents()) > 1
1132
1132
1133 if (not force and merge and match and
1133 if (not force and merge and match and
1134 (match.files() or match.anypats())):
1134 (match.files() or match.anypats())):
1135 raise util.Abort(_('cannot partially commit a merge '
1135 raise util.Abort(_('cannot partially commit a merge '
1136 '(do not specify files or patterns)'))
1136 '(do not specify files or patterns)'))
1137
1137
1138 changes = self.status(match=match, clean=force)
1138 changes = self.status(match=match, clean=force)
1139 if force:
1139 if force:
1140 changes[0].extend(changes[6]) # mq may commit unchanged files
1140 changes[0].extend(changes[6]) # mq may commit unchanged files
1141
1141
1142 # check subrepos
1142 # check subrepos
1143 subs = []
1143 subs = []
1144 commitsubs = set()
1144 commitsubs = set()
1145 newstate = wctx.substate.copy()
1145 newstate = wctx.substate.copy()
1146 # only manage subrepos and .hgsubstate if .hgsub is present
1146 # only manage subrepos and .hgsubstate if .hgsub is present
1147 if '.hgsub' in wctx:
1147 if '.hgsub' in wctx:
1148 # we'll decide whether to track this ourselves, thanks
1148 # we'll decide whether to track this ourselves, thanks
1149 if '.hgsubstate' in changes[0]:
1149 if '.hgsubstate' in changes[0]:
1150 changes[0].remove('.hgsubstate')
1150 changes[0].remove('.hgsubstate')
1151 if '.hgsubstate' in changes[2]:
1151 if '.hgsubstate' in changes[2]:
1152 changes[2].remove('.hgsubstate')
1152 changes[2].remove('.hgsubstate')
1153
1153
1154 # compare current state to last committed state
1154 # compare current state to last committed state
1155 # build new substate based on last committed state
1155 # build new substate based on last committed state
1156 oldstate = wctx.p1().substate
1156 oldstate = wctx.p1().substate
1157 for s in sorted(newstate.keys()):
1157 for s in sorted(newstate.keys()):
1158 if not match(s):
1158 if not match(s):
1159 # ignore working copy, use old state if present
1159 # ignore working copy, use old state if present
1160 if s in oldstate:
1160 if s in oldstate:
1161 newstate[s] = oldstate[s]
1161 newstate[s] = oldstate[s]
1162 continue
1162 continue
1163 if not force:
1163 if not force:
1164 raise util.Abort(
1164 raise util.Abort(
1165 _("commit with new subrepo %s excluded") % s)
1165 _("commit with new subrepo %s excluded") % s)
1166 if wctx.sub(s).dirty(True):
1166 if wctx.sub(s).dirty(True):
1167 if not self.ui.configbool('ui', 'commitsubrepos'):
1167 if not self.ui.configbool('ui', 'commitsubrepos'):
1168 raise util.Abort(
1168 raise util.Abort(
1169 _("uncommitted changes in subrepo %s") % s,
1169 _("uncommitted changes in subrepo %s") % s,
1170 hint=_("use --subrepos for recursive commit"))
1170 hint=_("use --subrepos for recursive commit"))
1171 subs.append(s)
1171 subs.append(s)
1172 commitsubs.add(s)
1172 commitsubs.add(s)
1173 else:
1173 else:
1174 bs = wctx.sub(s).basestate()
1174 bs = wctx.sub(s).basestate()
1175 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1175 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1176 if oldstate.get(s, (None, None, None))[1] != bs:
1176 if oldstate.get(s, (None, None, None))[1] != bs:
1177 subs.append(s)
1177 subs.append(s)
1178
1178
1179 # check for removed subrepos
1179 # check for removed subrepos
1180 for p in wctx.parents():
1180 for p in wctx.parents():
1181 r = [s for s in p.substate if s not in newstate]
1181 r = [s for s in p.substate if s not in newstate]
1182 subs += [s for s in r if match(s)]
1182 subs += [s for s in r if match(s)]
1183 if subs:
1183 if subs:
1184 if (not match('.hgsub') and
1184 if (not match('.hgsub') and
1185 '.hgsub' in (wctx.modified() + wctx.added())):
1185 '.hgsub' in (wctx.modified() + wctx.added())):
1186 raise util.Abort(
1186 raise util.Abort(
1187 _("can't commit subrepos without .hgsub"))
1187 _("can't commit subrepos without .hgsub"))
1188 changes[0].insert(0, '.hgsubstate')
1188 changes[0].insert(0, '.hgsubstate')
1189
1189
1190 elif '.hgsub' in changes[2]:
1190 elif '.hgsub' in changes[2]:
1191 # clean up .hgsubstate when .hgsub is removed
1191 # clean up .hgsubstate when .hgsub is removed
1192 if ('.hgsubstate' in wctx and
1192 if ('.hgsubstate' in wctx and
1193 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1193 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1194 changes[2].insert(0, '.hgsubstate')
1194 changes[2].insert(0, '.hgsubstate')
1195
1195
1196 # make sure all explicit patterns are matched
1196 # make sure all explicit patterns are matched
1197 if not force and match.files():
1197 if not force and match.files():
1198 matched = set(changes[0] + changes[1] + changes[2])
1198 matched = set(changes[0] + changes[1] + changes[2])
1199
1199
1200 for f in match.files():
1200 for f in match.files():
1201 if f == '.' or f in matched or f in wctx.substate:
1201 if f == '.' or f in matched or f in wctx.substate:
1202 continue
1202 continue
1203 if f in changes[3]: # missing
1203 if f in changes[3]: # missing
1204 fail(f, _('file not found!'))
1204 fail(f, _('file not found!'))
1205 if f in vdirs: # visited directory
1205 if f in vdirs: # visited directory
1206 d = f + '/'
1206 d = f + '/'
1207 for mf in matched:
1207 for mf in matched:
1208 if mf.startswith(d):
1208 if mf.startswith(d):
1209 break
1209 break
1210 else:
1210 else:
1211 fail(f, _("no match under directory!"))
1211 fail(f, _("no match under directory!"))
1212 elif f not in self.dirstate:
1212 elif f not in self.dirstate:
1213 fail(f, _("file not tracked!"))
1213 fail(f, _("file not tracked!"))
1214
1214
1215 if (not force and not extra.get("close") and not merge
1215 if (not force and not extra.get("close") and not merge
1216 and not (changes[0] or changes[1] or changes[2])
1216 and not (changes[0] or changes[1] or changes[2])
1217 and wctx.branch() == wctx.p1().branch()):
1217 and wctx.branch() == wctx.p1().branch()):
1218 return None
1218 return None
1219
1219
1220 if merge and changes[3]:
1220 if merge and changes[3]:
1221 raise util.Abort(_("cannot commit merge with missing files"))
1221 raise util.Abort(_("cannot commit merge with missing files"))
1222
1222
1223 ms = mergemod.mergestate(self)
1223 ms = mergemod.mergestate(self)
1224 for f in changes[0]:
1224 for f in changes[0]:
1225 if f in ms and ms[f] == 'u':
1225 if f in ms and ms[f] == 'u':
1226 raise util.Abort(_("unresolved merge conflicts "
1226 raise util.Abort(_("unresolved merge conflicts "
1227 "(see hg help resolve)"))
1227 "(see hg help resolve)"))
1228
1228
1229 cctx = context.workingctx(self, text, user, date, extra, changes)
1229 cctx = context.workingctx(self, text, user, date, extra, changes)
1230 if editor:
1230 if editor:
1231 cctx._text = editor(self, cctx, subs)
1231 cctx._text = editor(self, cctx, subs)
1232 edited = (text != cctx._text)
1232 edited = (text != cctx._text)
1233
1233
1234 # commit subs and write new state
1234 # commit subs and write new state
1235 if subs:
1235 if subs:
1236 for s in sorted(commitsubs):
1236 for s in sorted(commitsubs):
1237 sub = wctx.sub(s)
1237 sub = wctx.sub(s)
1238 self.ui.status(_('committing subrepository %s\n') %
1238 self.ui.status(_('committing subrepository %s\n') %
1239 subrepo.subrelpath(sub))
1239 subrepo.subrelpath(sub))
1240 sr = sub.commit(cctx._text, user, date)
1240 sr = sub.commit(cctx._text, user, date)
1241 newstate[s] = (newstate[s][0], sr)
1241 newstate[s] = (newstate[s][0], sr)
1242 subrepo.writestate(self, newstate)
1242 subrepo.writestate(self, newstate)
1243
1243
1244 # Save commit message in case this transaction gets rolled back
1244 # Save commit message in case this transaction gets rolled back
1245 # (e.g. by a pretxncommit hook). Leave the content alone on
1245 # (e.g. by a pretxncommit hook). Leave the content alone on
1246 # the assumption that the user will use the same editor again.
1246 # the assumption that the user will use the same editor again.
1247 msgfn = self.savecommitmessage(cctx._text)
1247 msgfn = self.savecommitmessage(cctx._text)
1248
1248
1249 p1, p2 = self.dirstate.parents()
1249 p1, p2 = self.dirstate.parents()
1250 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1250 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1251 try:
1251 try:
1252 self.hook("precommit", throw=True, parent1=hookp1,
1252 self.hook("precommit", throw=True, parent1=hookp1,
1253 parent2=hookp2)
1253 parent2=hookp2)
1254 ret = self.commitctx(cctx, True)
1254 ret = self.commitctx(cctx, True)
1255 except: # re-raises
1255 except: # re-raises
1256 if edited:
1256 if edited:
1257 self.ui.write(
1257 self.ui.write(
1258 _('note: commit message saved in %s\n') % msgfn)
1258 _('note: commit message saved in %s\n') % msgfn)
1259 raise
1259 raise
1260
1260
1261 # update bookmarks, dirstate and mergestate
1261 # update bookmarks, dirstate and mergestate
1262 bookmarks.update(self, [p1, p2], ret)
1262 bookmarks.update(self, [p1, p2], ret)
1263 for f in changes[0] + changes[1]:
1263 for f in changes[0] + changes[1]:
1264 self.dirstate.normal(f)
1264 self.dirstate.normal(f)
1265 for f in changes[2]:
1265 for f in changes[2]:
1266 self.dirstate.drop(f)
1266 self.dirstate.drop(f)
1267 self.dirstate.setparents(ret)
1267 self.dirstate.setparents(ret)
1268 ms.reset()
1268 ms.reset()
1269 finally:
1269 finally:
1270 wlock.release()
1270 wlock.release()
1271
1271
1272 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1272 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1273 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1273 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1274 self._afterlock(commithook)
1274 self._afterlock(commithook)
1275 return ret
1275 return ret
1276
1276
1277 def commitctx(self, ctx, error=False):
1277 def commitctx(self, ctx, error=False):
1278 """Add a new revision to current repository.
1278 """Add a new revision to current repository.
1279 Revision information is passed via the context argument.
1279 Revision information is passed via the context argument.
1280 """
1280 """
1281
1281
1282 tr = lock = None
1282 tr = lock = None
1283 removed = list(ctx.removed())
1283 removed = list(ctx.removed())
1284 p1, p2 = ctx.p1(), ctx.p2()
1284 p1, p2 = ctx.p1(), ctx.p2()
1285 user = ctx.user()
1285 user = ctx.user()
1286
1286
1287 lock = self.lock()
1287 lock = self.lock()
1288 try:
1288 try:
1289 tr = self.transaction("commit")
1289 tr = self.transaction("commit")
1290 trp = weakref.proxy(tr)
1290 trp = weakref.proxy(tr)
1291
1291
1292 if ctx.files():
1292 if ctx.files():
1293 m1 = p1.manifest().copy()
1293 m1 = p1.manifest().copy()
1294 m2 = p2.manifest()
1294 m2 = p2.manifest()
1295
1295
1296 # check in files
1296 # check in files
1297 new = {}
1297 new = {}
1298 changed = []
1298 changed = []
1299 linkrev = len(self)
1299 linkrev = len(self)
1300 for f in sorted(ctx.modified() + ctx.added()):
1300 for f in sorted(ctx.modified() + ctx.added()):
1301 self.ui.note(f + "\n")
1301 self.ui.note(f + "\n")
1302 try:
1302 try:
1303 fctx = ctx[f]
1303 fctx = ctx[f]
1304 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1304 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1305 changed)
1305 changed)
1306 m1.set(f, fctx.flags())
1306 m1.set(f, fctx.flags())
1307 except OSError, inst:
1307 except OSError, inst:
1308 self.ui.warn(_("trouble committing %s!\n") % f)
1308 self.ui.warn(_("trouble committing %s!\n") % f)
1309 raise
1309 raise
1310 except IOError, inst:
1310 except IOError, inst:
1311 errcode = getattr(inst, 'errno', errno.ENOENT)
1311 errcode = getattr(inst, 'errno', errno.ENOENT)
1312 if error or errcode and errcode != errno.ENOENT:
1312 if error or errcode and errcode != errno.ENOENT:
1313 self.ui.warn(_("trouble committing %s!\n") % f)
1313 self.ui.warn(_("trouble committing %s!\n") % f)
1314 raise
1314 raise
1315 else:
1315 else:
1316 removed.append(f)
1316 removed.append(f)
1317
1317
1318 # update manifest
1318 # update manifest
1319 m1.update(new)
1319 m1.update(new)
1320 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1320 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1321 drop = [f for f in removed if f in m1]
1321 drop = [f for f in removed if f in m1]
1322 for f in drop:
1322 for f in drop:
1323 del m1[f]
1323 del m1[f]
1324 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1324 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1325 p2.manifestnode(), (new, drop))
1325 p2.manifestnode(), (new, drop))
1326 files = changed + removed
1326 files = changed + removed
1327 else:
1327 else:
1328 mn = p1.manifestnode()
1328 mn = p1.manifestnode()
1329 files = []
1329 files = []
1330
1330
1331 # update changelog
1331 # update changelog
1332 self.changelog.delayupdate()
1332 self.changelog.delayupdate()
1333 n = self.changelog.add(mn, files, ctx.description(),
1333 n = self.changelog.add(mn, files, ctx.description(),
1334 trp, p1.node(), p2.node(),
1334 trp, p1.node(), p2.node(),
1335 user, ctx.date(), ctx.extra().copy())
1335 user, ctx.date(), ctx.extra().copy())
1336 p = lambda: self.changelog.writepending() and self.root or ""
1336 p = lambda: self.changelog.writepending() and self.root or ""
1337 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1337 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1338 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1338 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1339 parent2=xp2, pending=p)
1339 parent2=xp2, pending=p)
1340 self.changelog.finalize(trp)
1340 self.changelog.finalize(trp)
1341 # set the new commit is proper phase
1341 # set the new commit is proper phase
1342 targetphase = phases.newcommitphase(self.ui)
1342 targetphase = phases.newcommitphase(self.ui)
1343 if targetphase:
1343 if targetphase:
1344 # retract boundary do not alter parent changeset.
1344 # retract boundary do not alter parent changeset.
1345 # if a parent have higher the resulting phase will
1345 # if a parent have higher the resulting phase will
1346 # be compliant anyway
1346 # be compliant anyway
1347 #
1347 #
1348 # if minimal phase was 0 we don't need to retract anything
1348 # if minimal phase was 0 we don't need to retract anything
1349 phases.retractboundary(self, targetphase, [n])
1349 phases.retractboundary(self, targetphase, [n])
1350 tr.close()
1350 tr.close()
1351 self.updatebranchcache()
1351 self.updatebranchcache()
1352 return n
1352 return n
1353 finally:
1353 finally:
1354 if tr:
1354 if tr:
1355 tr.release()
1355 tr.release()
1356 lock.release()
1356 lock.release()
1357
1357
1358 def destroyed(self, newheadnodes=None):
1358 def destroyed(self, newheadnodes=None):
1359 '''Inform the repository that nodes have been destroyed.
1359 '''Inform the repository that nodes have been destroyed.
1360 Intended for use by strip and rollback, so there's a common
1360 Intended for use by strip and rollback, so there's a common
1361 place for anything that has to be done after destroying history.
1361 place for anything that has to be done after destroying history.
1362
1362
1363 If you know the branchheadcache was uptodate before nodes were removed
1363 If you know the branchheadcache was uptodate before nodes were removed
1364 and you also know the set of candidate new heads that may have resulted
1364 and you also know the set of candidate new heads that may have resulted
1365 from the destruction, you can set newheadnodes. This will enable the
1365 from the destruction, you can set newheadnodes. This will enable the
1366 code to update the branchheads cache, rather than having future code
1366 code to update the branchheads cache, rather than having future code
1367 decide it's invalid and regenrating it from scratch.
1367 decide it's invalid and regenrating it from scratch.
1368 '''
1368 '''
1369 # If we have info, newheadnodes, on how to update the branch cache, do
1369 # If we have info, newheadnodes, on how to update the branch cache, do
1370 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1370 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1371 # will be caught the next time it is read.
1371 # will be caught the next time it is read.
1372 if newheadnodes:
1372 if newheadnodes:
1373 tiprev = len(self) - 1
1373 tiprev = len(self) - 1
1374 ctxgen = (self[node] for node in newheadnodes
1374 ctxgen = (self[node] for node in newheadnodes
1375 if self.changelog.hasnode(node))
1375 if self.changelog.hasnode(node))
1376 self._updatebranchcache(self._branchcache, ctxgen)
1376 self._updatebranchcache(self._branchcache, ctxgen)
1377 self._writebranchcache(self._branchcache, self.changelog.tip(),
1377 self._writebranchcache(self._branchcache, self.changelog.tip(),
1378 tiprev)
1378 tiprev)
1379
1379
1380 # Ensure the persistent tag cache is updated. Doing it now
1380 # Ensure the persistent tag cache is updated. Doing it now
1381 # means that the tag cache only has to worry about destroyed
1381 # means that the tag cache only has to worry about destroyed
1382 # heads immediately after a strip/rollback. That in turn
1382 # heads immediately after a strip/rollback. That in turn
1383 # guarantees that "cachetip == currenttip" (comparing both rev
1383 # guarantees that "cachetip == currenttip" (comparing both rev
1384 # and node) always means no nodes have been added or destroyed.
1384 # and node) always means no nodes have been added or destroyed.
1385
1385
1386 # XXX this is suboptimal when qrefresh'ing: we strip the current
1386 # XXX this is suboptimal when qrefresh'ing: we strip the current
1387 # head, refresh the tag cache, then immediately add a new head.
1387 # head, refresh the tag cache, then immediately add a new head.
1388 # But I think doing it this way is necessary for the "instant
1388 # But I think doing it this way is necessary for the "instant
1389 # tag cache retrieval" case to work.
1389 # tag cache retrieval" case to work.
1390 self.invalidatecaches()
1390 self.invalidatecaches()
1391
1391
1392 def walk(self, match, node=None):
1392 def walk(self, match, node=None):
1393 '''
1393 '''
1394 walk recursively through the directory tree or a given
1394 walk recursively through the directory tree or a given
1395 changeset, finding all files matched by the match
1395 changeset, finding all files matched by the match
1396 function
1396 function
1397 '''
1397 '''
1398 return self[node].walk(match)
1398 return self[node].walk(match)
1399
1399
1400 def status(self, node1='.', node2=None, match=None,
1400 def status(self, node1='.', node2=None, match=None,
1401 ignored=False, clean=False, unknown=False,
1401 ignored=False, clean=False, unknown=False,
1402 listsubrepos=False):
1402 listsubrepos=False):
1403 """return status of files between two nodes or node and working
1403 """return status of files between two nodes or node and working
1404 directory.
1404 directory.
1405
1405
1406 If node1 is None, use the first dirstate parent instead.
1406 If node1 is None, use the first dirstate parent instead.
1407 If node2 is None, compare node1 with working directory.
1407 If node2 is None, compare node1 with working directory.
1408 """
1408 """
1409
1409
1410 def mfmatches(ctx):
1410 def mfmatches(ctx):
1411 mf = ctx.manifest().copy()
1411 mf = ctx.manifest().copy()
1412 if match.always():
1412 if match.always():
1413 return mf
1413 return mf
1414 for fn in mf.keys():
1414 for fn in mf.keys():
1415 if not match(fn):
1415 if not match(fn):
1416 del mf[fn]
1416 del mf[fn]
1417 return mf
1417 return mf
1418
1418
1419 if isinstance(node1, context.changectx):
1419 if isinstance(node1, context.changectx):
1420 ctx1 = node1
1420 ctx1 = node1
1421 else:
1421 else:
1422 ctx1 = self[node1]
1422 ctx1 = self[node1]
1423 if isinstance(node2, context.changectx):
1423 if isinstance(node2, context.changectx):
1424 ctx2 = node2
1424 ctx2 = node2
1425 else:
1425 else:
1426 ctx2 = self[node2]
1426 ctx2 = self[node2]
1427
1427
1428 working = ctx2.rev() is None
1428 working = ctx2.rev() is None
1429 parentworking = working and ctx1 == self['.']
1429 parentworking = working and ctx1 == self['.']
1430 match = match or matchmod.always(self.root, self.getcwd())
1430 match = match or matchmod.always(self.root, self.getcwd())
1431 listignored, listclean, listunknown = ignored, clean, unknown
1431 listignored, listclean, listunknown = ignored, clean, unknown
1432
1432
1433 # load earliest manifest first for caching reasons
1433 # load earliest manifest first for caching reasons
1434 if not working and ctx2.rev() < ctx1.rev():
1434 if not working and ctx2.rev() < ctx1.rev():
1435 ctx2.manifest()
1435 ctx2.manifest()
1436
1436
1437 if not parentworking:
1437 if not parentworking:
1438 def bad(f, msg):
1438 def bad(f, msg):
1439 # 'f' may be a directory pattern from 'match.files()',
1439 # 'f' may be a directory pattern from 'match.files()',
1440 # so 'f not in ctx1' is not enough
1440 # so 'f not in ctx1' is not enough
1441 if f not in ctx1 and f not in ctx1.dirs():
1441 if f not in ctx1 and f not in ctx1.dirs():
1442 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1442 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1443 match.bad = bad
1443 match.bad = bad
1444
1444
1445 if working: # we need to scan the working dir
1445 if working: # we need to scan the working dir
1446 subrepos = []
1446 subrepos = []
1447 if '.hgsub' in self.dirstate:
1447 if '.hgsub' in self.dirstate:
1448 subrepos = ctx2.substate.keys()
1448 subrepos = ctx2.substate.keys()
1449 s = self.dirstate.status(match, subrepos, listignored,
1449 s = self.dirstate.status(match, subrepos, listignored,
1450 listclean, listunknown)
1450 listclean, listunknown)
1451 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1451 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1452
1452
1453 # check for any possibly clean files
1453 # check for any possibly clean files
1454 if parentworking and cmp:
1454 if parentworking and cmp:
1455 fixup = []
1455 fixup = []
1456 # do a full compare of any files that might have changed
1456 # do a full compare of any files that might have changed
1457 for f in sorted(cmp):
1457 for f in sorted(cmp):
1458 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1458 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1459 or ctx1[f].cmp(ctx2[f])):
1459 or ctx1[f].cmp(ctx2[f])):
1460 modified.append(f)
1460 modified.append(f)
1461 else:
1461 else:
1462 fixup.append(f)
1462 fixup.append(f)
1463
1463
1464 # update dirstate for files that are actually clean
1464 # update dirstate for files that are actually clean
1465 if fixup:
1465 if fixup:
1466 if listclean:
1466 if listclean:
1467 clean += fixup
1467 clean += fixup
1468
1468
1469 try:
1469 try:
1470 # updating the dirstate is optional
1470 # updating the dirstate is optional
1471 # so we don't wait on the lock
1471 # so we don't wait on the lock
1472 wlock = self.wlock(False)
1472 wlock = self.wlock(False)
1473 try:
1473 try:
1474 for f in fixup:
1474 for f in fixup:
1475 self.dirstate.normal(f)
1475 self.dirstate.normal(f)
1476 finally:
1476 finally:
1477 wlock.release()
1477 wlock.release()
1478 except error.LockError:
1478 except error.LockError:
1479 pass
1479 pass
1480
1480
1481 if not parentworking:
1481 if not parentworking:
1482 mf1 = mfmatches(ctx1)
1482 mf1 = mfmatches(ctx1)
1483 if working:
1483 if working:
1484 # we are comparing working dir against non-parent
1484 # we are comparing working dir against non-parent
1485 # generate a pseudo-manifest for the working dir
1485 # generate a pseudo-manifest for the working dir
1486 mf2 = mfmatches(self['.'])
1486 mf2 = mfmatches(self['.'])
1487 for f in cmp + modified + added:
1487 for f in cmp + modified + added:
1488 mf2[f] = None
1488 mf2[f] = None
1489 mf2.set(f, ctx2.flags(f))
1489 mf2.set(f, ctx2.flags(f))
1490 for f in removed:
1490 for f in removed:
1491 if f in mf2:
1491 if f in mf2:
1492 del mf2[f]
1492 del mf2[f]
1493 else:
1493 else:
1494 # we are comparing two revisions
1494 # we are comparing two revisions
1495 deleted, unknown, ignored = [], [], []
1495 deleted, unknown, ignored = [], [], []
1496 mf2 = mfmatches(ctx2)
1496 mf2 = mfmatches(ctx2)
1497
1497
1498 modified, added, clean = [], [], []
1498 modified, added, clean = [], [], []
1499 withflags = mf1.withflags() | mf2.withflags()
1499 withflags = mf1.withflags() | mf2.withflags()
1500 for fn in mf2:
1500 for fn in mf2:
1501 if fn in mf1:
1501 if fn in mf1:
1502 if (fn not in deleted and
1502 if (fn not in deleted and
1503 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1503 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1504 (mf1[fn] != mf2[fn] and
1504 (mf1[fn] != mf2[fn] and
1505 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1505 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1506 modified.append(fn)
1506 modified.append(fn)
1507 elif listclean:
1507 elif listclean:
1508 clean.append(fn)
1508 clean.append(fn)
1509 del mf1[fn]
1509 del mf1[fn]
1510 elif fn not in deleted:
1510 elif fn not in deleted:
1511 added.append(fn)
1511 added.append(fn)
1512 removed = mf1.keys()
1512 removed = mf1.keys()
1513
1513
1514 if working and modified and not self.dirstate._checklink:
1514 if working and modified and not self.dirstate._checklink:
1515 # Symlink placeholders may get non-symlink-like contents
1515 # Symlink placeholders may get non-symlink-like contents
1516 # via user error or dereferencing by NFS or Samba servers,
1516 # via user error or dereferencing by NFS or Samba servers,
1517 # so we filter out any placeholders that don't look like a
1517 # so we filter out any placeholders that don't look like a
1518 # symlink
1518 # symlink
1519 sane = []
1519 sane = []
1520 for f in modified:
1520 for f in modified:
1521 if ctx2.flags(f) == 'l':
1521 if ctx2.flags(f) == 'l':
1522 d = ctx2[f].data()
1522 d = ctx2[f].data()
1523 if len(d) >= 1024 or '\n' in d or util.binary(d):
1523 if len(d) >= 1024 or '\n' in d or util.binary(d):
1524 self.ui.debug('ignoring suspect symlink placeholder'
1524 self.ui.debug('ignoring suspect symlink placeholder'
1525 ' "%s"\n' % f)
1525 ' "%s"\n' % f)
1526 continue
1526 continue
1527 sane.append(f)
1527 sane.append(f)
1528 modified = sane
1528 modified = sane
1529
1529
1530 r = modified, added, removed, deleted, unknown, ignored, clean
1530 r = modified, added, removed, deleted, unknown, ignored, clean
1531
1531
1532 if listsubrepos:
1532 if listsubrepos:
1533 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1533 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1534 if working:
1534 if working:
1535 rev2 = None
1535 rev2 = None
1536 else:
1536 else:
1537 rev2 = ctx2.substate[subpath][1]
1537 rev2 = ctx2.substate[subpath][1]
1538 try:
1538 try:
1539 submatch = matchmod.narrowmatcher(subpath, match)
1539 submatch = matchmod.narrowmatcher(subpath, match)
1540 s = sub.status(rev2, match=submatch, ignored=listignored,
1540 s = sub.status(rev2, match=submatch, ignored=listignored,
1541 clean=listclean, unknown=listunknown,
1541 clean=listclean, unknown=listunknown,
1542 listsubrepos=True)
1542 listsubrepos=True)
1543 for rfiles, sfiles in zip(r, s):
1543 for rfiles, sfiles in zip(r, s):
1544 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1544 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1545 except error.LookupError:
1545 except error.LookupError:
1546 self.ui.status(_("skipping missing subrepository: %s\n")
1546 self.ui.status(_("skipping missing subrepository: %s\n")
1547 % subpath)
1547 % subpath)
1548
1548
1549 for l in r:
1549 for l in r:
1550 l.sort()
1550 l.sort()
1551 return r
1551 return r
1552
1552
1553 def heads(self, start=None):
1553 def heads(self, start=None):
1554 heads = self.changelog.heads(start)
1554 heads = self.changelog.heads(start)
1555 # sort the output in rev descending order
1555 # sort the output in rev descending order
1556 return sorted(heads, key=self.changelog.rev, reverse=True)
1556 return sorted(heads, key=self.changelog.rev, reverse=True)
1557
1557
1558 def branchheads(self, branch=None, start=None, closed=False):
1558 def branchheads(self, branch=None, start=None, closed=False):
1559 '''return a (possibly filtered) list of heads for the given branch
1559 '''return a (possibly filtered) list of heads for the given branch
1560
1560
1561 Heads are returned in topological order, from newest to oldest.
1561 Heads are returned in topological order, from newest to oldest.
1562 If branch is None, use the dirstate branch.
1562 If branch is None, use the dirstate branch.
1563 If start is not None, return only heads reachable from start.
1563 If start is not None, return only heads reachable from start.
1564 If closed is True, return heads that are marked as closed as well.
1564 If closed is True, return heads that are marked as closed as well.
1565 '''
1565 '''
1566 if branch is None:
1566 if branch is None:
1567 branch = self[None].branch()
1567 branch = self[None].branch()
1568 branches = self.branchmap()
1568 branches = self.branchmap()
1569 if branch not in branches:
1569 if branch not in branches:
1570 return []
1570 return []
1571 # the cache returns heads ordered lowest to highest
1571 # the cache returns heads ordered lowest to highest
1572 bheads = list(reversed(branches[branch]))
1572 bheads = list(reversed(branches[branch]))
1573 if start is not None:
1573 if start is not None:
1574 # filter out the heads that cannot be reached from startrev
1574 # filter out the heads that cannot be reached from startrev
1575 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1575 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1576 bheads = [h for h in bheads if h in fbheads]
1576 bheads = [h for h in bheads if h in fbheads]
1577 if not closed:
1577 if not closed:
1578 bheads = [h for h in bheads if not self[h].closesbranch()]
1578 bheads = [h for h in bheads if not self[h].closesbranch()]
1579 return bheads
1579 return bheads
1580
1580
1581 def branches(self, nodes):
1581 def branches(self, nodes):
1582 if not nodes:
1582 if not nodes:
1583 nodes = [self.changelog.tip()]
1583 nodes = [self.changelog.tip()]
1584 b = []
1584 b = []
1585 for n in nodes:
1585 for n in nodes:
1586 t = n
1586 t = n
1587 while True:
1587 while True:
1588 p = self.changelog.parents(n)
1588 p = self.changelog.parents(n)
1589 if p[1] != nullid or p[0] == nullid:
1589 if p[1] != nullid or p[0] == nullid:
1590 b.append((t, n, p[0], p[1]))
1590 b.append((t, n, p[0], p[1]))
1591 break
1591 break
1592 n = p[0]
1592 n = p[0]
1593 return b
1593 return b
1594
1594
1595 def between(self, pairs):
1595 def between(self, pairs):
1596 r = []
1596 r = []
1597
1597
1598 for top, bottom in pairs:
1598 for top, bottom in pairs:
1599 n, l, i = top, [], 0
1599 n, l, i = top, [], 0
1600 f = 1
1600 f = 1
1601
1601
1602 while n != bottom and n != nullid:
1602 while n != bottom and n != nullid:
1603 p = self.changelog.parents(n)[0]
1603 p = self.changelog.parents(n)[0]
1604 if i == f:
1604 if i == f:
1605 l.append(n)
1605 l.append(n)
1606 f = f * 2
1606 f = f * 2
1607 n = p
1607 n = p
1608 i += 1
1608 i += 1
1609
1609
1610 r.append(l)
1610 r.append(l)
1611
1611
1612 return r
1612 return r
1613
1613
1614 def pull(self, remote, heads=None, force=False):
1614 def pull(self, remote, heads=None, force=False):
1615 # don't open transaction for nothing or you break future useful
1615 # don't open transaction for nothing or you break future useful
1616 # rollback call
1616 # rollback call
1617 tr = None
1617 tr = None
1618 trname = 'pull\n' + util.hidepassword(remote.url())
1618 trname = 'pull\n' + util.hidepassword(remote.url())
1619 lock = self.lock()
1619 lock = self.lock()
1620 try:
1620 try:
1621 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1621 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1622 force=force)
1622 force=force)
1623 common, fetch, rheads = tmp
1623 common, fetch, rheads = tmp
1624 if not fetch:
1624 if not fetch:
1625 self.ui.status(_("no changes found\n"))
1625 self.ui.status(_("no changes found\n"))
1626 added = []
1626 added = []
1627 result = 0
1627 result = 0
1628 else:
1628 else:
1629 tr = self.transaction(trname)
1629 tr = self.transaction(trname)
1630 if heads is None and list(common) == [nullid]:
1630 if heads is None and list(common) == [nullid]:
1631 self.ui.status(_("requesting all changes\n"))
1631 self.ui.status(_("requesting all changes\n"))
1632 elif heads is None and remote.capable('changegroupsubset'):
1632 elif heads is None and remote.capable('changegroupsubset'):
1633 # issue1320, avoid a race if remote changed after discovery
1633 # issue1320, avoid a race if remote changed after discovery
1634 heads = rheads
1634 heads = rheads
1635
1635
1636 if remote.capable('getbundle'):
1636 if remote.capable('getbundle'):
1637 cg = remote.getbundle('pull', common=common,
1637 cg = remote.getbundle('pull', common=common,
1638 heads=heads or rheads)
1638 heads=heads or rheads)
1639 elif heads is None:
1639 elif heads is None:
1640 cg = remote.changegroup(fetch, 'pull')
1640 cg = remote.changegroup(fetch, 'pull')
1641 elif not remote.capable('changegroupsubset'):
1641 elif not remote.capable('changegroupsubset'):
1642 raise util.Abort(_("partial pull cannot be done because "
1642 raise util.Abort(_("partial pull cannot be done because "
1643 "other repository doesn't support "
1643 "other repository doesn't support "
1644 "changegroupsubset."))
1644 "changegroupsubset."))
1645 else:
1645 else:
1646 cg = remote.changegroupsubset(fetch, heads, 'pull')
1646 cg = remote.changegroupsubset(fetch, heads, 'pull')
1647 clstart = len(self.changelog)
1647 clstart = len(self.changelog)
1648 result = self.addchangegroup(cg, 'pull', remote.url())
1648 result = self.addchangegroup(cg, 'pull', remote.url())
1649 clend = len(self.changelog)
1649 clend = len(self.changelog)
1650 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1650 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1651
1651
1652 # compute target subset
1652 # compute target subset
1653 if heads is None:
1653 if heads is None:
1654 # We pulled every thing possible
1654 # We pulled every thing possible
1655 # sync on everything common
1655 # sync on everything common
1656 subset = common + added
1656 subset = common + added
1657 else:
1657 else:
1658 # We pulled a specific subset
1658 # We pulled a specific subset
1659 # sync on this subset
1659 # sync on this subset
1660 subset = heads
1660 subset = heads
1661
1661
1662 # Get remote phases data from remote
1662 # Get remote phases data from remote
1663 remotephases = remote.listkeys('phases')
1663 remotephases = remote.listkeys('phases')
1664 publishing = bool(remotephases.get('publishing', False))
1664 publishing = bool(remotephases.get('publishing', False))
1665 if remotephases and not publishing:
1665 if remotephases and not publishing:
1666 # remote is new and unpublishing
1666 # remote is new and unpublishing
1667 pheads, _dr = phases.analyzeremotephases(self, subset,
1667 pheads, _dr = phases.analyzeremotephases(self, subset,
1668 remotephases)
1668 remotephases)
1669 phases.advanceboundary(self, phases.public, pheads)
1669 phases.advanceboundary(self, phases.public, pheads)
1670 phases.advanceboundary(self, phases.draft, subset)
1670 phases.advanceboundary(self, phases.draft, subset)
1671 else:
1671 else:
1672 # Remote is old or publishing all common changesets
1672 # Remote is old or publishing all common changesets
1673 # should be seen as public
1673 # should be seen as public
1674 phases.advanceboundary(self, phases.public, subset)
1674 phases.advanceboundary(self, phases.public, subset)
1675
1675
1676 remoteobs = remote.listkeys('obsolete')
1676 remoteobs = remote.listkeys('obsolete')
1677 if 'dump' in remoteobs:
1677 if 'dump' in remoteobs:
1678 if tr is None:
1678 if tr is None:
1679 tr = self.transaction(trname)
1679 tr = self.transaction(trname)
1680 data = base85.b85decode(remoteobs['dump'])
1680 data = base85.b85decode(remoteobs['dump'])
1681 self.obsstore.mergemarkers(tr, data)
1681 self.obsstore.mergemarkers(tr, data)
1682 if tr is not None:
1682 if tr is not None:
1683 tr.close()
1683 tr.close()
1684 finally:
1684 finally:
1685 if tr is not None:
1685 if tr is not None:
1686 tr.release()
1686 tr.release()
1687 lock.release()
1687 lock.release()
1688
1688
1689 return result
1689 return result
1690
1690
1691 def checkpush(self, force, revs):
1691 def checkpush(self, force, revs):
1692 """Extensions can override this function if additional checks have
1692 """Extensions can override this function if additional checks have
1693 to be performed before pushing, or call it if they override push
1693 to be performed before pushing, or call it if they override push
1694 command.
1694 command.
1695 """
1695 """
1696 pass
1696 pass
1697
1697
1698 def push(self, remote, force=False, revs=None, newbranch=False):
1698 def push(self, remote, force=False, revs=None, newbranch=False):
1699 '''Push outgoing changesets (limited by revs) from the current
1699 '''Push outgoing changesets (limited by revs) from the current
1700 repository to remote. Return an integer:
1700 repository to remote. Return an integer:
1701 - None means nothing to push
1701 - None means nothing to push
1702 - 0 means HTTP error
1702 - 0 means HTTP error
1703 - 1 means we pushed and remote head count is unchanged *or*
1703 - 1 means we pushed and remote head count is unchanged *or*
1704 we have outgoing changesets but refused to push
1704 we have outgoing changesets but refused to push
1705 - other values as described by addchangegroup()
1705 - other values as described by addchangegroup()
1706 '''
1706 '''
1707 # there are two ways to push to remote repo:
1707 # there are two ways to push to remote repo:
1708 #
1708 #
1709 # addchangegroup assumes local user can lock remote
1709 # addchangegroup assumes local user can lock remote
1710 # repo (local filesystem, old ssh servers).
1710 # repo (local filesystem, old ssh servers).
1711 #
1711 #
1712 # unbundle assumes local user cannot lock remote repo (new ssh
1712 # unbundle assumes local user cannot lock remote repo (new ssh
1713 # servers, http servers).
1713 # servers, http servers).
1714
1714
1715 # get local lock as we might write phase data
1715 # get local lock as we might write phase data
1716 locallock = self.lock()
1716 locallock = self.lock()
1717 try:
1717 try:
1718 self.checkpush(force, revs)
1718 self.checkpush(force, revs)
1719 lock = None
1719 lock = None
1720 unbundle = remote.capable('unbundle')
1720 unbundle = remote.capable('unbundle')
1721 if not unbundle:
1721 if not unbundle:
1722 lock = remote.lock()
1722 lock = remote.lock()
1723 try:
1723 try:
1724 # discovery
1724 # discovery
1725 fci = discovery.findcommonincoming
1725 fci = discovery.findcommonincoming
1726 commoninc = fci(self, remote, force=force)
1726 commoninc = fci(self, remote, force=force)
1727 common, inc, remoteheads = commoninc
1727 common, inc, remoteheads = commoninc
1728 fco = discovery.findcommonoutgoing
1728 fco = discovery.findcommonoutgoing
1729 outgoing = fco(self, remote, onlyheads=revs,
1729 outgoing = fco(self, remote, onlyheads=revs,
1730 commoninc=commoninc, force=force)
1730 commoninc=commoninc, force=force)
1731
1731
1732
1732
1733 if not outgoing.missing:
1733 if not outgoing.missing:
1734 # nothing to push
1734 # nothing to push
1735 scmutil.nochangesfound(self.ui, outgoing.excluded)
1735 scmutil.nochangesfound(self.ui, outgoing.excluded)
1736 ret = None
1736 ret = None
1737 else:
1737 else:
1738 # something to push
1738 # something to push
1739 if not force:
1739 if not force:
1740 discovery.checkheads(self, remote, outgoing,
1740 discovery.checkheads(self, remote, outgoing,
1741 remoteheads, newbranch,
1741 remoteheads, newbranch,
1742 bool(inc))
1742 bool(inc))
1743
1743
1744 # create a changegroup from local
1744 # create a changegroup from local
1745 if revs is None and not outgoing.excluded:
1745 if revs is None and not outgoing.excluded:
1746 # push everything,
1746 # push everything,
1747 # use the fast path, no race possible on push
1747 # use the fast path, no race possible on push
1748 cg = self._changegroup(outgoing.missing, 'push')
1748 cg = self._changegroup(outgoing.missing, 'push')
1749 else:
1749 else:
1750 cg = self.getlocalbundle('push', outgoing)
1750 cg = self.getlocalbundle('push', outgoing)
1751
1751
1752 # apply changegroup to remote
1752 # apply changegroup to remote
1753 if unbundle:
1753 if unbundle:
1754 # local repo finds heads on server, finds out what
1754 # local repo finds heads on server, finds out what
1755 # revs it must push. once revs transferred, if server
1755 # revs it must push. once revs transferred, if server
1756 # finds it has different heads (someone else won
1756 # finds it has different heads (someone else won
1757 # commit/push race), server aborts.
1757 # commit/push race), server aborts.
1758 if force:
1758 if force:
1759 remoteheads = ['force']
1759 remoteheads = ['force']
1760 # ssh: return remote's addchangegroup()
1760 # ssh: return remote's addchangegroup()
1761 # http: return remote's addchangegroup() or 0 for error
1761 # http: return remote's addchangegroup() or 0 for error
1762 ret = remote.unbundle(cg, remoteheads, 'push')
1762 ret = remote.unbundle(cg, remoteheads, 'push')
1763 else:
1763 else:
1764 # we return an integer indicating remote head count
1764 # we return an integer indicating remote head count
1765 # change
1765 # change
1766 ret = remote.addchangegroup(cg, 'push', self.url())
1766 ret = remote.addchangegroup(cg, 'push', self.url())
1767
1767
1768 if ret:
1768 if ret:
1769 # push succeed, synchonize target of the push
1769 # push succeed, synchonize target of the push
1770 cheads = outgoing.missingheads
1770 cheads = outgoing.missingheads
1771 elif revs is None:
1771 elif revs is None:
1772 # All out push fails. synchronize all common
1772 # All out push fails. synchronize all common
1773 cheads = outgoing.commonheads
1773 cheads = outgoing.commonheads
1774 else:
1774 else:
1775 # I want cheads = heads(::missingheads and ::commonheads)
1775 # I want cheads = heads(::missingheads and ::commonheads)
1776 # (missingheads is revs with secret changeset filtered out)
1776 # (missingheads is revs with secret changeset filtered out)
1777 #
1777 #
1778 # This can be expressed as:
1778 # This can be expressed as:
1779 # cheads = ( (missingheads and ::commonheads)
1779 # cheads = ( (missingheads and ::commonheads)
1780 # + (commonheads and ::missingheads))"
1780 # + (commonheads and ::missingheads))"
1781 # )
1781 # )
1782 #
1782 #
1783 # while trying to push we already computed the following:
1783 # while trying to push we already computed the following:
1784 # common = (::commonheads)
1784 # common = (::commonheads)
1785 # missing = ((commonheads::missingheads) - commonheads)
1785 # missing = ((commonheads::missingheads) - commonheads)
1786 #
1786 #
1787 # We can pick:
1787 # We can pick:
1788 # * missingheads part of comon (::commonheads)
1788 # * missingheads part of comon (::commonheads)
1789 common = set(outgoing.common)
1789 common = set(outgoing.common)
1790 cheads = [node for node in revs if node in common]
1790 cheads = [node for node in revs if node in common]
1791 # and
1791 # and
1792 # * commonheads parents on missing
1792 # * commonheads parents on missing
1793 revset = self.set('%ln and parents(roots(%ln))',
1793 revset = self.set('%ln and parents(roots(%ln))',
1794 outgoing.commonheads,
1794 outgoing.commonheads,
1795 outgoing.missing)
1795 outgoing.missing)
1796 cheads.extend(c.node() for c in revset)
1796 cheads.extend(c.node() for c in revset)
1797 # even when we don't push, exchanging phase data is useful
1797 # even when we don't push, exchanging phase data is useful
1798 remotephases = remote.listkeys('phases')
1798 remotephases = remote.listkeys('phases')
1799 if not remotephases: # old server or public only repo
1799 if not remotephases: # old server or public only repo
1800 phases.advanceboundary(self, phases.public, cheads)
1800 phases.advanceboundary(self, phases.public, cheads)
1801 # don't push any phase data as there is nothing to push
1801 # don't push any phase data as there is nothing to push
1802 else:
1802 else:
1803 ana = phases.analyzeremotephases(self, cheads, remotephases)
1803 ana = phases.analyzeremotephases(self, cheads, remotephases)
1804 pheads, droots = ana
1804 pheads, droots = ana
1805 ### Apply remote phase on local
1805 ### Apply remote phase on local
1806 if remotephases.get('publishing', False):
1806 if remotephases.get('publishing', False):
1807 phases.advanceboundary(self, phases.public, cheads)
1807 phases.advanceboundary(self, phases.public, cheads)
1808 else: # publish = False
1808 else: # publish = False
1809 phases.advanceboundary(self, phases.public, pheads)
1809 phases.advanceboundary(self, phases.public, pheads)
1810 phases.advanceboundary(self, phases.draft, cheads)
1810 phases.advanceboundary(self, phases.draft, cheads)
1811 ### Apply local phase on remote
1811 ### Apply local phase on remote
1812
1812
1813 # Get the list of all revs draft on remote by public here.
1813 # Get the list of all revs draft on remote by public here.
1814 # XXX Beware that revset break if droots is not strictly
1814 # XXX Beware that revset break if droots is not strictly
1815 # XXX root we may want to ensure it is but it is costly
1815 # XXX root we may want to ensure it is but it is costly
1816 outdated = self.set('heads((%ln::%ln) and public())',
1816 outdated = self.set('heads((%ln::%ln) and public())',
1817 droots, cheads)
1817 droots, cheads)
1818 for newremotehead in outdated:
1818 for newremotehead in outdated:
1819 r = remote.pushkey('phases',
1819 r = remote.pushkey('phases',
1820 newremotehead.hex(),
1820 newremotehead.hex(),
1821 str(phases.draft),
1821 str(phases.draft),
1822 str(phases.public))
1822 str(phases.public))
1823 if not r:
1823 if not r:
1824 self.ui.warn(_('updating %s to public failed!\n')
1824 self.ui.warn(_('updating %s to public failed!\n')
1825 % newremotehead)
1825 % newremotehead)
1826 if 'obsolete' in self.listkeys('namespaces') and self.obsstore:
1826 if 'obsolete' in self.listkeys('namespaces') and self.obsstore:
1827 data = self.listkeys('obsolete')['dump']
1827 data = self.listkeys('obsolete')['dump']
1828 r = remote.pushkey('obsolete', 'dump', '', data)
1828 r = remote.pushkey('obsolete', 'dump', '', data)
1829 if not r:
1829 if not r:
1830 self.ui.warn(_('failed to push obsolete markers!\n'))
1830 self.ui.warn(_('failed to push obsolete markers!\n'))
1831 finally:
1831 finally:
1832 if lock is not None:
1832 if lock is not None:
1833 lock.release()
1833 lock.release()
1834 finally:
1834 finally:
1835 locallock.release()
1835 locallock.release()
1836
1836
1837 self.ui.debug("checking for updated bookmarks\n")
1837 self.ui.debug("checking for updated bookmarks\n")
1838 rb = remote.listkeys('bookmarks')
1838 rb = remote.listkeys('bookmarks')
1839 for k in rb.keys():
1839 for k in rb.keys():
1840 if k in self._bookmarks:
1840 if k in self._bookmarks:
1841 nr, nl = rb[k], hex(self._bookmarks[k])
1841 nr, nl = rb[k], hex(self._bookmarks[k])
1842 if nr in self:
1842 if nr in self:
1843 cr = self[nr]
1843 cr = self[nr]
1844 cl = self[nl]
1844 cl = self[nl]
1845 if cl in cr.descendants():
1845 if cl in cr.descendants():
1846 r = remote.pushkey('bookmarks', k, nr, nl)
1846 r = remote.pushkey('bookmarks', k, nr, nl)
1847 if r:
1847 if r:
1848 self.ui.status(_("updating bookmark %s\n") % k)
1848 self.ui.status(_("updating bookmark %s\n") % k)
1849 else:
1849 else:
1850 self.ui.warn(_('updating bookmark %s'
1850 self.ui.warn(_('updating bookmark %s'
1851 ' failed!\n') % k)
1851 ' failed!\n') % k)
1852
1852
1853 return ret
1853 return ret
1854
1854
1855 def changegroupinfo(self, nodes, source):
1855 def changegroupinfo(self, nodes, source):
1856 if self.ui.verbose or source == 'bundle':
1856 if self.ui.verbose or source == 'bundle':
1857 self.ui.status(_("%d changesets found\n") % len(nodes))
1857 self.ui.status(_("%d changesets found\n") % len(nodes))
1858 if self.ui.debugflag:
1858 if self.ui.debugflag:
1859 self.ui.debug("list of changesets:\n")
1859 self.ui.debug("list of changesets:\n")
1860 for node in nodes:
1860 for node in nodes:
1861 self.ui.debug("%s\n" % hex(node))
1861 self.ui.debug("%s\n" % hex(node))
1862
1862
1863 def changegroupsubset(self, bases, heads, source):
1863 def changegroupsubset(self, bases, heads, source):
1864 """Compute a changegroup consisting of all the nodes that are
1864 """Compute a changegroup consisting of all the nodes that are
1865 descendants of any of the bases and ancestors of any of the heads.
1865 descendants of any of the bases and ancestors of any of the heads.
1866 Return a chunkbuffer object whose read() method will return
1866 Return a chunkbuffer object whose read() method will return
1867 successive changegroup chunks.
1867 successive changegroup chunks.
1868
1868
1869 It is fairly complex as determining which filenodes and which
1869 It is fairly complex as determining which filenodes and which
1870 manifest nodes need to be included for the changeset to be complete
1870 manifest nodes need to be included for the changeset to be complete
1871 is non-trivial.
1871 is non-trivial.
1872
1872
1873 Another wrinkle is doing the reverse, figuring out which changeset in
1873 Another wrinkle is doing the reverse, figuring out which changeset in
1874 the changegroup a particular filenode or manifestnode belongs to.
1874 the changegroup a particular filenode or manifestnode belongs to.
1875 """
1875 """
1876 cl = self.changelog
1876 cl = self.changelog
1877 if not bases:
1877 if not bases:
1878 bases = [nullid]
1878 bases = [nullid]
1879 csets, bases, heads = cl.nodesbetween(bases, heads)
1879 csets, bases, heads = cl.nodesbetween(bases, heads)
1880 # We assume that all ancestors of bases are known
1880 # We assume that all ancestors of bases are known
1881 common = set(cl.ancestors([cl.rev(n) for n in bases]))
1881 common = set(cl.ancestors([cl.rev(n) for n in bases]))
1882 return self._changegroupsubset(common, csets, heads, source)
1882 return self._changegroupsubset(common, csets, heads, source)
1883
1883
1884 def getlocalbundle(self, source, outgoing):
1884 def getlocalbundle(self, source, outgoing):
1885 """Like getbundle, but taking a discovery.outgoing as an argument.
1885 """Like getbundle, but taking a discovery.outgoing as an argument.
1886
1886
1887 This is only implemented for local repos and reuses potentially
1887 This is only implemented for local repos and reuses potentially
1888 precomputed sets in outgoing."""
1888 precomputed sets in outgoing."""
1889 if not outgoing.missing:
1889 if not outgoing.missing:
1890 return None
1890 return None
1891 return self._changegroupsubset(outgoing.common,
1891 return self._changegroupsubset(outgoing.common,
1892 outgoing.missing,
1892 outgoing.missing,
1893 outgoing.missingheads,
1893 outgoing.missingheads,
1894 source)
1894 source)
1895
1895
1896 def getbundle(self, source, heads=None, common=None):
1896 def getbundle(self, source, heads=None, common=None):
1897 """Like changegroupsubset, but returns the set difference between the
1897 """Like changegroupsubset, but returns the set difference between the
1898 ancestors of heads and the ancestors common.
1898 ancestors of heads and the ancestors common.
1899
1899
1900 If heads is None, use the local heads. If common is None, use [nullid].
1900 If heads is None, use the local heads. If common is None, use [nullid].
1901
1901
1902 The nodes in common might not all be known locally due to the way the
1902 The nodes in common might not all be known locally due to the way the
1903 current discovery protocol works.
1903 current discovery protocol works.
1904 """
1904 """
1905 cl = self.changelog
1905 cl = self.changelog
1906 if common:
1906 if common:
1907 nm = cl.nodemap
1907 nm = cl.nodemap
1908 common = [n for n in common if n in nm]
1908 common = [n for n in common if n in nm]
1909 else:
1909 else:
1910 common = [nullid]
1910 common = [nullid]
1911 if not heads:
1911 if not heads:
1912 heads = cl.heads()
1912 heads = cl.heads()
1913 return self.getlocalbundle(source,
1913 return self.getlocalbundle(source,
1914 discovery.outgoing(cl, common, heads))
1914 discovery.outgoing(cl, common, heads))
1915
1915
1916 def _changegroupsubset(self, commonrevs, csets, heads, source):
1916 def _changegroupsubset(self, commonrevs, csets, heads, source):
1917
1917
1918 cl = self.changelog
1918 cl = self.changelog
1919 mf = self.manifest
1919 mf = self.manifest
1920 mfs = {} # needed manifests
1920 mfs = {} # needed manifests
1921 fnodes = {} # needed file nodes
1921 fnodes = {} # needed file nodes
1922 changedfiles = set()
1922 changedfiles = set()
1923 fstate = ['', {}]
1923 fstate = ['', {}]
1924 count = [0, 0]
1924 count = [0, 0]
1925
1925
1926 # can we go through the fast path ?
1926 # can we go through the fast path ?
1927 heads.sort()
1927 heads.sort()
1928 if heads == sorted(self.heads()):
1928 if heads == sorted(self.heads()):
1929 return self._changegroup(csets, source)
1929 return self._changegroup(csets, source)
1930
1930
1931 # slow path
1931 # slow path
1932 self.hook('preoutgoing', throw=True, source=source)
1932 self.hook('preoutgoing', throw=True, source=source)
1933 self.changegroupinfo(csets, source)
1933 self.changegroupinfo(csets, source)
1934
1934
1935 # filter any nodes that claim to be part of the known set
1935 # filter any nodes that claim to be part of the known set
1936 def prune(revlog, missing):
1936 def prune(revlog, missing):
1937 rr, rl = revlog.rev, revlog.linkrev
1937 rr, rl = revlog.rev, revlog.linkrev
1938 return [n for n in missing
1938 return [n for n in missing
1939 if rl(rr(n)) not in commonrevs]
1939 if rl(rr(n)) not in commonrevs]
1940
1940
1941 progress = self.ui.progress
1941 progress = self.ui.progress
1942 _bundling = _('bundling')
1942 _bundling = _('bundling')
1943 _changesets = _('changesets')
1943 _changesets = _('changesets')
1944 _manifests = _('manifests')
1944 _manifests = _('manifests')
1945 _files = _('files')
1945 _files = _('files')
1946
1946
1947 def lookup(revlog, x):
1947 def lookup(revlog, x):
1948 if revlog == cl:
1948 if revlog == cl:
1949 c = cl.read(x)
1949 c = cl.read(x)
1950 changedfiles.update(c[3])
1950 changedfiles.update(c[3])
1951 mfs.setdefault(c[0], x)
1951 mfs.setdefault(c[0], x)
1952 count[0] += 1
1952 count[0] += 1
1953 progress(_bundling, count[0],
1953 progress(_bundling, count[0],
1954 unit=_changesets, total=count[1])
1954 unit=_changesets, total=count[1])
1955 return x
1955 return x
1956 elif revlog == mf:
1956 elif revlog == mf:
1957 clnode = mfs[x]
1957 clnode = mfs[x]
1958 mdata = mf.readfast(x)
1958 mdata = mf.readfast(x)
1959 for f, n in mdata.iteritems():
1959 for f, n in mdata.iteritems():
1960 if f in changedfiles:
1960 if f in changedfiles:
1961 fnodes[f].setdefault(n, clnode)
1961 fnodes[f].setdefault(n, clnode)
1962 count[0] += 1
1962 count[0] += 1
1963 progress(_bundling, count[0],
1963 progress(_bundling, count[0],
1964 unit=_manifests, total=count[1])
1964 unit=_manifests, total=count[1])
1965 return clnode
1965 return clnode
1966 else:
1966 else:
1967 progress(_bundling, count[0], item=fstate[0],
1967 progress(_bundling, count[0], item=fstate[0],
1968 unit=_files, total=count[1])
1968 unit=_files, total=count[1])
1969 return fstate[1][x]
1969 return fstate[1][x]
1970
1970
1971 bundler = changegroup.bundle10(lookup)
1971 bundler = changegroup.bundle10(lookup)
1972 reorder = self.ui.config('bundle', 'reorder', 'auto')
1972 reorder = self.ui.config('bundle', 'reorder', 'auto')
1973 if reorder == 'auto':
1973 if reorder == 'auto':
1974 reorder = None
1974 reorder = None
1975 else:
1975 else:
1976 reorder = util.parsebool(reorder)
1976 reorder = util.parsebool(reorder)
1977
1977
1978 def gengroup():
1978 def gengroup():
1979 # Create a changenode group generator that will call our functions
1979 # Create a changenode group generator that will call our functions
1980 # back to lookup the owning changenode and collect information.
1980 # back to lookup the owning changenode and collect information.
1981 count[:] = [0, len(csets)]
1981 count[:] = [0, len(csets)]
1982 for chunk in cl.group(csets, bundler, reorder=reorder):
1982 for chunk in cl.group(csets, bundler, reorder=reorder):
1983 yield chunk
1983 yield chunk
1984 progress(_bundling, None)
1984 progress(_bundling, None)
1985
1985
1986 # Create a generator for the manifestnodes that calls our lookup
1986 # Create a generator for the manifestnodes that calls our lookup
1987 # and data collection functions back.
1987 # and data collection functions back.
1988 for f in changedfiles:
1988 for f in changedfiles:
1989 fnodes[f] = {}
1989 fnodes[f] = {}
1990 count[:] = [0, len(mfs)]
1990 count[:] = [0, len(mfs)]
1991 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1991 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1992 yield chunk
1992 yield chunk
1993 progress(_bundling, None)
1993 progress(_bundling, None)
1994
1994
1995 mfs.clear()
1995 mfs.clear()
1996
1996
1997 # Go through all our files in order sorted by name.
1997 # Go through all our files in order sorted by name.
1998 count[:] = [0, len(changedfiles)]
1998 count[:] = [0, len(changedfiles)]
1999 for fname in sorted(changedfiles):
1999 for fname in sorted(changedfiles):
2000 filerevlog = self.file(fname)
2000 filerevlog = self.file(fname)
2001 if not len(filerevlog):
2001 if not len(filerevlog):
2002 raise util.Abort(_("empty or missing revlog for %s")
2002 raise util.Abort(_("empty or missing revlog for %s")
2003 % fname)
2003 % fname)
2004 fstate[0] = fname
2004 fstate[0] = fname
2005 fstate[1] = fnodes.pop(fname, {})
2005 fstate[1] = fnodes.pop(fname, {})
2006
2006
2007 nodelist = prune(filerevlog, fstate[1])
2007 nodelist = prune(filerevlog, fstate[1])
2008 if nodelist:
2008 if nodelist:
2009 count[0] += 1
2009 count[0] += 1
2010 yield bundler.fileheader(fname)
2010 yield bundler.fileheader(fname)
2011 for chunk in filerevlog.group(nodelist, bundler, reorder):
2011 for chunk in filerevlog.group(nodelist, bundler, reorder):
2012 yield chunk
2012 yield chunk
2013
2013
2014 # Signal that no more groups are left.
2014 # Signal that no more groups are left.
2015 yield bundler.close()
2015 yield bundler.close()
2016 progress(_bundling, None)
2016 progress(_bundling, None)
2017
2017
2018 if csets:
2018 if csets:
2019 self.hook('outgoing', node=hex(csets[0]), source=source)
2019 self.hook('outgoing', node=hex(csets[0]), source=source)
2020
2020
2021 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2021 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2022
2022
2023 def changegroup(self, basenodes, source):
2023 def changegroup(self, basenodes, source):
2024 # to avoid a race we use changegroupsubset() (issue1320)
2024 # to avoid a race we use changegroupsubset() (issue1320)
2025 return self.changegroupsubset(basenodes, self.heads(), source)
2025 return self.changegroupsubset(basenodes, self.heads(), source)
2026
2026
2027 def _changegroup(self, nodes, source):
2027 def _changegroup(self, nodes, source):
2028 """Compute the changegroup of all nodes that we have that a recipient
2028 """Compute the changegroup of all nodes that we have that a recipient
2029 doesn't. Return a chunkbuffer object whose read() method will return
2029 doesn't. Return a chunkbuffer object whose read() method will return
2030 successive changegroup chunks.
2030 successive changegroup chunks.
2031
2031
2032 This is much easier than the previous function as we can assume that
2032 This is much easier than the previous function as we can assume that
2033 the recipient has any changenode we aren't sending them.
2033 the recipient has any changenode we aren't sending them.
2034
2034
2035 nodes is the set of nodes to send"""
2035 nodes is the set of nodes to send"""
2036
2036
2037 cl = self.changelog
2037 cl = self.changelog
2038 mf = self.manifest
2038 mf = self.manifest
2039 mfs = {}
2039 mfs = {}
2040 changedfiles = set()
2040 changedfiles = set()
2041 fstate = ['']
2041 fstate = ['']
2042 count = [0, 0]
2042 count = [0, 0]
2043
2043
2044 self.hook('preoutgoing', throw=True, source=source)
2044 self.hook('preoutgoing', throw=True, source=source)
2045 self.changegroupinfo(nodes, source)
2045 self.changegroupinfo(nodes, source)
2046
2046
2047 revset = set([cl.rev(n) for n in nodes])
2047 revset = set([cl.rev(n) for n in nodes])
2048
2048
2049 def gennodelst(log):
2049 def gennodelst(log):
2050 ln, llr = log.node, log.linkrev
2050 ln, llr = log.node, log.linkrev
2051 return [ln(r) for r in log if llr(r) in revset]
2051 return [ln(r) for r in log if llr(r) in revset]
2052
2052
2053 progress = self.ui.progress
2053 progress = self.ui.progress
2054 _bundling = _('bundling')
2054 _bundling = _('bundling')
2055 _changesets = _('changesets')
2055 _changesets = _('changesets')
2056 _manifests = _('manifests')
2056 _manifests = _('manifests')
2057 _files = _('files')
2057 _files = _('files')
2058
2058
2059 def lookup(revlog, x):
2059 def lookup(revlog, x):
2060 if revlog == cl:
2060 if revlog == cl:
2061 c = cl.read(x)
2061 c = cl.read(x)
2062 changedfiles.update(c[3])
2062 changedfiles.update(c[3])
2063 mfs.setdefault(c[0], x)
2063 mfs.setdefault(c[0], x)
2064 count[0] += 1
2064 count[0] += 1
2065 progress(_bundling, count[0],
2065 progress(_bundling, count[0],
2066 unit=_changesets, total=count[1])
2066 unit=_changesets, total=count[1])
2067 return x
2067 return x
2068 elif revlog == mf:
2068 elif revlog == mf:
2069 count[0] += 1
2069 count[0] += 1
2070 progress(_bundling, count[0],
2070 progress(_bundling, count[0],
2071 unit=_manifests, total=count[1])
2071 unit=_manifests, total=count[1])
2072 return cl.node(revlog.linkrev(revlog.rev(x)))
2072 return cl.node(revlog.linkrev(revlog.rev(x)))
2073 else:
2073 else:
2074 progress(_bundling, count[0], item=fstate[0],
2074 progress(_bundling, count[0], item=fstate[0],
2075 total=count[1], unit=_files)
2075 total=count[1], unit=_files)
2076 return cl.node(revlog.linkrev(revlog.rev(x)))
2076 return cl.node(revlog.linkrev(revlog.rev(x)))
2077
2077
2078 bundler = changegroup.bundle10(lookup)
2078 bundler = changegroup.bundle10(lookup)
2079 reorder = self.ui.config('bundle', 'reorder', 'auto')
2079 reorder = self.ui.config('bundle', 'reorder', 'auto')
2080 if reorder == 'auto':
2080 if reorder == 'auto':
2081 reorder = None
2081 reorder = None
2082 else:
2082 else:
2083 reorder = util.parsebool(reorder)
2083 reorder = util.parsebool(reorder)
2084
2084
2085 def gengroup():
2085 def gengroup():
2086 '''yield a sequence of changegroup chunks (strings)'''
2086 '''yield a sequence of changegroup chunks (strings)'''
2087 # construct a list of all changed files
2087 # construct a list of all changed files
2088
2088
2089 count[:] = [0, len(nodes)]
2089 count[:] = [0, len(nodes)]
2090 for chunk in cl.group(nodes, bundler, reorder=reorder):
2090 for chunk in cl.group(nodes, bundler, reorder=reorder):
2091 yield chunk
2091 yield chunk
2092 progress(_bundling, None)
2092 progress(_bundling, None)
2093
2093
2094 count[:] = [0, len(mfs)]
2094 count[:] = [0, len(mfs)]
2095 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2095 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2096 yield chunk
2096 yield chunk
2097 progress(_bundling, None)
2097 progress(_bundling, None)
2098
2098
2099 count[:] = [0, len(changedfiles)]
2099 count[:] = [0, len(changedfiles)]
2100 for fname in sorted(changedfiles):
2100 for fname in sorted(changedfiles):
2101 filerevlog = self.file(fname)
2101 filerevlog = self.file(fname)
2102 if not len(filerevlog):
2102 if not len(filerevlog):
2103 raise util.Abort(_("empty or missing revlog for %s")
2103 raise util.Abort(_("empty or missing revlog for %s")
2104 % fname)
2104 % fname)
2105 fstate[0] = fname
2105 fstate[0] = fname
2106 nodelist = gennodelst(filerevlog)
2106 nodelist = gennodelst(filerevlog)
2107 if nodelist:
2107 if nodelist:
2108 count[0] += 1
2108 count[0] += 1
2109 yield bundler.fileheader(fname)
2109 yield bundler.fileheader(fname)
2110 for chunk in filerevlog.group(nodelist, bundler, reorder):
2110 for chunk in filerevlog.group(nodelist, bundler, reorder):
2111 yield chunk
2111 yield chunk
2112 yield bundler.close()
2112 yield bundler.close()
2113 progress(_bundling, None)
2113 progress(_bundling, None)
2114
2114
2115 if nodes:
2115 if nodes:
2116 self.hook('outgoing', node=hex(nodes[0]), source=source)
2116 self.hook('outgoing', node=hex(nodes[0]), source=source)
2117
2117
2118 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2118 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2119
2119
2120 def addchangegroup(self, source, srctype, url, emptyok=False):
2120 def addchangegroup(self, source, srctype, url, emptyok=False):
2121 """Add the changegroup returned by source.read() to this repo.
2121 """Add the changegroup returned by source.read() to this repo.
2122 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2122 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2123 the URL of the repo where this changegroup is coming from.
2123 the URL of the repo where this changegroup is coming from.
2124
2124
2125 Return an integer summarizing the change to this repo:
2125 Return an integer summarizing the change to this repo:
2126 - nothing changed or no source: 0
2126 - nothing changed or no source: 0
2127 - more heads than before: 1+added heads (2..n)
2127 - more heads than before: 1+added heads (2..n)
2128 - fewer heads than before: -1-removed heads (-2..-n)
2128 - fewer heads than before: -1-removed heads (-2..-n)
2129 - number of heads stays the same: 1
2129 - number of heads stays the same: 1
2130 """
2130 """
2131 def csmap(x):
2131 def csmap(x):
2132 self.ui.debug("add changeset %s\n" % short(x))
2132 self.ui.debug("add changeset %s\n" % short(x))
2133 return len(cl)
2133 return len(cl)
2134
2134
2135 def revmap(x):
2135 def revmap(x):
2136 return cl.rev(x)
2136 return cl.rev(x)
2137
2137
2138 if not source:
2138 if not source:
2139 return 0
2139 return 0
2140
2140
2141 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2141 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2142
2142
2143 changesets = files = revisions = 0
2143 changesets = files = revisions = 0
2144 efiles = set()
2144 efiles = set()
2145
2145
2146 # write changelog data to temp files so concurrent readers will not see
2146 # write changelog data to temp files so concurrent readers will not see
2147 # inconsistent view
2147 # inconsistent view
2148 cl = self.changelog
2148 cl = self.changelog
2149 cl.delayupdate()
2149 cl.delayupdate()
2150 oldheads = cl.heads()
2150 oldheads = cl.heads()
2151
2151
2152 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2152 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2153 try:
2153 try:
2154 trp = weakref.proxy(tr)
2154 trp = weakref.proxy(tr)
2155 # pull off the changeset group
2155 # pull off the changeset group
2156 self.ui.status(_("adding changesets\n"))
2156 self.ui.status(_("adding changesets\n"))
2157 clstart = len(cl)
2157 clstart = len(cl)
2158 class prog(object):
2158 class prog(object):
2159 step = _('changesets')
2159 step = _('changesets')
2160 count = 1
2160 count = 1
2161 ui = self.ui
2161 ui = self.ui
2162 total = None
2162 total = None
2163 def __call__(self):
2163 def __call__(self):
2164 self.ui.progress(self.step, self.count, unit=_('chunks'),
2164 self.ui.progress(self.step, self.count, unit=_('chunks'),
2165 total=self.total)
2165 total=self.total)
2166 self.count += 1
2166 self.count += 1
2167 pr = prog()
2167 pr = prog()
2168 source.callback = pr
2168 source.callback = pr
2169
2169
2170 source.changelogheader()
2170 source.changelogheader()
2171 srccontent = cl.addgroup(source, csmap, trp)
2171 srccontent = cl.addgroup(source, csmap, trp)
2172 if not (srccontent or emptyok):
2172 if not (srccontent or emptyok):
2173 raise util.Abort(_("received changelog group is empty"))
2173 raise util.Abort(_("received changelog group is empty"))
2174 clend = len(cl)
2174 clend = len(cl)
2175 changesets = clend - clstart
2175 changesets = clend - clstart
2176 for c in xrange(clstart, clend):
2176 for c in xrange(clstart, clend):
2177 efiles.update(self[c].files())
2177 efiles.update(self[c].files())
2178 efiles = len(efiles)
2178 efiles = len(efiles)
2179 self.ui.progress(_('changesets'), None)
2179 self.ui.progress(_('changesets'), None)
2180
2180
2181 # pull off the manifest group
2181 # pull off the manifest group
2182 self.ui.status(_("adding manifests\n"))
2182 self.ui.status(_("adding manifests\n"))
2183 pr.step = _('manifests')
2183 pr.step = _('manifests')
2184 pr.count = 1
2184 pr.count = 1
2185 pr.total = changesets # manifests <= changesets
2185 pr.total = changesets # manifests <= changesets
2186 # no need to check for empty manifest group here:
2186 # no need to check for empty manifest group here:
2187 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2187 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2188 # no new manifest will be created and the manifest group will
2188 # no new manifest will be created and the manifest group will
2189 # be empty during the pull
2189 # be empty during the pull
2190 source.manifestheader()
2190 source.manifestheader()
2191 self.manifest.addgroup(source, revmap, trp)
2191 self.manifest.addgroup(source, revmap, trp)
2192 self.ui.progress(_('manifests'), None)
2192 self.ui.progress(_('manifests'), None)
2193
2193
2194 needfiles = {}
2194 needfiles = {}
2195 if self.ui.configbool('server', 'validate', default=False):
2195 if self.ui.configbool('server', 'validate', default=False):
2196 # validate incoming csets have their manifests
2196 # validate incoming csets have their manifests
2197 for cset in xrange(clstart, clend):
2197 for cset in xrange(clstart, clend):
2198 mfest = self.changelog.read(self.changelog.node(cset))[0]
2198 mfest = self.changelog.read(self.changelog.node(cset))[0]
2199 mfest = self.manifest.readdelta(mfest)
2199 mfest = self.manifest.readdelta(mfest)
2200 # store file nodes we must see
2200 # store file nodes we must see
2201 for f, n in mfest.iteritems():
2201 for f, n in mfest.iteritems():
2202 needfiles.setdefault(f, set()).add(n)
2202 needfiles.setdefault(f, set()).add(n)
2203
2203
2204 # process the files
2204 # process the files
2205 self.ui.status(_("adding file changes\n"))
2205 self.ui.status(_("adding file changes\n"))
2206 pr.step = _('files')
2206 pr.step = _('files')
2207 pr.count = 1
2207 pr.count = 1
2208 pr.total = efiles
2208 pr.total = efiles
2209 source.callback = None
2209 source.callback = None
2210
2210
2211 while True:
2211 while True:
2212 chunkdata = source.filelogheader()
2212 chunkdata = source.filelogheader()
2213 if not chunkdata:
2213 if not chunkdata:
2214 break
2214 break
2215 f = chunkdata["filename"]
2215 f = chunkdata["filename"]
2216 self.ui.debug("adding %s revisions\n" % f)
2216 self.ui.debug("adding %s revisions\n" % f)
2217 pr()
2217 pr()
2218 fl = self.file(f)
2218 fl = self.file(f)
2219 o = len(fl)
2219 o = len(fl)
2220 if not fl.addgroup(source, revmap, trp):
2220 if not fl.addgroup(source, revmap, trp):
2221 raise util.Abort(_("received file revlog group is empty"))
2221 raise util.Abort(_("received file revlog group is empty"))
2222 revisions += len(fl) - o
2222 revisions += len(fl) - o
2223 files += 1
2223 files += 1
2224 if f in needfiles:
2224 if f in needfiles:
2225 needs = needfiles[f]
2225 needs = needfiles[f]
2226 for new in xrange(o, len(fl)):
2226 for new in xrange(o, len(fl)):
2227 n = fl.node(new)
2227 n = fl.node(new)
2228 if n in needs:
2228 if n in needs:
2229 needs.remove(n)
2229 needs.remove(n)
2230 if not needs:
2230 if not needs:
2231 del needfiles[f]
2231 del needfiles[f]
2232 self.ui.progress(_('files'), None)
2232 self.ui.progress(_('files'), None)
2233
2233
2234 for f, needs in needfiles.iteritems():
2234 for f, needs in needfiles.iteritems():
2235 fl = self.file(f)
2235 fl = self.file(f)
2236 for n in needs:
2236 for n in needs:
2237 try:
2237 try:
2238 fl.rev(n)
2238 fl.rev(n)
2239 except error.LookupError:
2239 except error.LookupError:
2240 raise util.Abort(
2240 raise util.Abort(
2241 _('missing file data for %s:%s - run hg verify') %
2241 _('missing file data for %s:%s - run hg verify') %
2242 (f, hex(n)))
2242 (f, hex(n)))
2243
2243
2244 dh = 0
2244 dh = 0
2245 if oldheads:
2245 if oldheads:
2246 heads = cl.heads()
2246 heads = cl.heads()
2247 dh = len(heads) - len(oldheads)
2247 dh = len(heads) - len(oldheads)
2248 for h in heads:
2248 for h in heads:
2249 if h not in oldheads and self[h].closesbranch():
2249 if h not in oldheads and self[h].closesbranch():
2250 dh -= 1
2250 dh -= 1
2251 htext = ""
2251 htext = ""
2252 if dh:
2252 if dh:
2253 htext = _(" (%+d heads)") % dh
2253 htext = _(" (%+d heads)") % dh
2254
2254
2255 self.ui.status(_("added %d changesets"
2255 self.ui.status(_("added %d changesets"
2256 " with %d changes to %d files%s\n")
2256 " with %d changes to %d files%s\n")
2257 % (changesets, revisions, files, htext))
2257 % (changesets, revisions, files, htext))
2258
2258
2259 if changesets > 0:
2259 if changesets > 0:
2260 p = lambda: cl.writepending() and self.root or ""
2260 p = lambda: cl.writepending() and self.root or ""
2261 self.hook('pretxnchangegroup', throw=True,
2261 self.hook('pretxnchangegroup', throw=True,
2262 node=hex(cl.node(clstart)), source=srctype,
2262 node=hex(cl.node(clstart)), source=srctype,
2263 url=url, pending=p)
2263 url=url, pending=p)
2264
2264
2265 added = [cl.node(r) for r in xrange(clstart, clend)]
2265 added = [cl.node(r) for r in xrange(clstart, clend)]
2266 publishing = self.ui.configbool('phases', 'publish', True)
2266 publishing = self.ui.configbool('phases', 'publish', True)
2267 if srctype == 'push':
2267 if srctype == 'push':
2268 # Old server can not push the boundary themself.
2268 # Old server can not push the boundary themself.
2269 # New server won't push the boundary if changeset already
2269 # New server won't push the boundary if changeset already
2270 # existed locally as secrete
2270 # existed locally as secrete
2271 #
2271 #
2272 # We should not use added here but the list of all change in
2272 # We should not use added here but the list of all change in
2273 # the bundle
2273 # the bundle
2274 if publishing:
2274 if publishing:
2275 phases.advanceboundary(self, phases.public, srccontent)
2275 phases.advanceboundary(self, phases.public, srccontent)
2276 else:
2276 else:
2277 phases.advanceboundary(self, phases.draft, srccontent)
2277 phases.advanceboundary(self, phases.draft, srccontent)
2278 phases.retractboundary(self, phases.draft, added)
2278 phases.retractboundary(self, phases.draft, added)
2279 elif srctype != 'strip':
2279 elif srctype != 'strip':
2280 # publishing only alter behavior during push
2280 # publishing only alter behavior during push
2281 #
2281 #
2282 # strip should not touch boundary at all
2282 # strip should not touch boundary at all
2283 phases.retractboundary(self, phases.draft, added)
2283 phases.retractboundary(self, phases.draft, added)
2284
2284
2285 # make changelog see real files again
2285 # make changelog see real files again
2286 cl.finalize(trp)
2286 cl.finalize(trp)
2287
2287
2288 tr.close()
2288 tr.close()
2289
2289
2290 if changesets > 0:
2290 if changesets > 0:
2291 def runhooks():
2291 def runhooks():
2292 # forcefully update the on-disk branch cache
2292 # forcefully update the on-disk branch cache
2293 self.ui.debug("updating the branch cache\n")
2293 self.ui.debug("updating the branch cache\n")
2294 self.updatebranchcache()
2294 self.updatebranchcache()
2295 self.hook("changegroup", node=hex(cl.node(clstart)),
2295 self.hook("changegroup", node=hex(cl.node(clstart)),
2296 source=srctype, url=url)
2296 source=srctype, url=url)
2297
2297
2298 for n in added:
2298 for n in added:
2299 self.hook("incoming", node=hex(n), source=srctype,
2299 self.hook("incoming", node=hex(n), source=srctype,
2300 url=url)
2300 url=url)
2301 self._afterlock(runhooks)
2301 self._afterlock(runhooks)
2302
2302
2303 finally:
2303 finally:
2304 tr.release()
2304 tr.release()
2305 # never return 0 here:
2305 # never return 0 here:
2306 if dh < 0:
2306 if dh < 0:
2307 return dh - 1
2307 return dh - 1
2308 else:
2308 else:
2309 return dh + 1
2309 return dh + 1
2310
2310
2311 def stream_in(self, remote, requirements):
2311 def stream_in(self, remote, requirements):
2312 lock = self.lock()
2312 lock = self.lock()
2313 try:
2313 try:
2314 fp = remote.stream_out()
2314 fp = remote.stream_out()
2315 l = fp.readline()
2315 l = fp.readline()
2316 try:
2316 try:
2317 resp = int(l)
2317 resp = int(l)
2318 except ValueError:
2318 except ValueError:
2319 raise error.ResponseError(
2319 raise error.ResponseError(
2320 _('unexpected response from remote server:'), l)
2320 _('unexpected response from remote server:'), l)
2321 if resp == 1:
2321 if resp == 1:
2322 raise util.Abort(_('operation forbidden by server'))
2322 raise util.Abort(_('operation forbidden by server'))
2323 elif resp == 2:
2323 elif resp == 2:
2324 raise util.Abort(_('locking the remote repository failed'))
2324 raise util.Abort(_('locking the remote repository failed'))
2325 elif resp != 0:
2325 elif resp != 0:
2326 raise util.Abort(_('the server sent an unknown error code'))
2326 raise util.Abort(_('the server sent an unknown error code'))
2327 self.ui.status(_('streaming all changes\n'))
2327 self.ui.status(_('streaming all changes\n'))
2328 l = fp.readline()
2328 l = fp.readline()
2329 try:
2329 try:
2330 total_files, total_bytes = map(int, l.split(' ', 1))
2330 total_files, total_bytes = map(int, l.split(' ', 1))
2331 except (ValueError, TypeError):
2331 except (ValueError, TypeError):
2332 raise error.ResponseError(
2332 raise error.ResponseError(
2333 _('unexpected response from remote server:'), l)
2333 _('unexpected response from remote server:'), l)
2334 self.ui.status(_('%d files to transfer, %s of data\n') %
2334 self.ui.status(_('%d files to transfer, %s of data\n') %
2335 (total_files, util.bytecount(total_bytes)))
2335 (total_files, util.bytecount(total_bytes)))
2336 handled_bytes = 0
2336 handled_bytes = 0
2337 self.ui.progress(_('clone'), 0, total=total_bytes)
2337 self.ui.progress(_('clone'), 0, total=total_bytes)
2338 start = time.time()
2338 start = time.time()
2339 for i in xrange(total_files):
2339 for i in xrange(total_files):
2340 # XXX doesn't support '\n' or '\r' in filenames
2340 # XXX doesn't support '\n' or '\r' in filenames
2341 l = fp.readline()
2341 l = fp.readline()
2342 try:
2342 try:
2343 name, size = l.split('\0', 1)
2343 name, size = l.split('\0', 1)
2344 size = int(size)
2344 size = int(size)
2345 except (ValueError, TypeError):
2345 except (ValueError, TypeError):
2346 raise error.ResponseError(
2346 raise error.ResponseError(
2347 _('unexpected response from remote server:'), l)
2347 _('unexpected response from remote server:'), l)
2348 if self.ui.debugflag:
2348 if self.ui.debugflag:
2349 self.ui.debug('adding %s (%s)\n' %
2349 self.ui.debug('adding %s (%s)\n' %
2350 (name, util.bytecount(size)))
2350 (name, util.bytecount(size)))
2351 # for backwards compat, name was partially encoded
2351 # for backwards compat, name was partially encoded
2352 ofp = self.sopener(store.decodedir(name), 'w')
2352 ofp = self.sopener(store.decodedir(name), 'w')
2353 for chunk in util.filechunkiter(fp, limit=size):
2353 for chunk in util.filechunkiter(fp, limit=size):
2354 handled_bytes += len(chunk)
2354 handled_bytes += len(chunk)
2355 self.ui.progress(_('clone'), handled_bytes,
2355 self.ui.progress(_('clone'), handled_bytes,
2356 total=total_bytes)
2356 total=total_bytes)
2357 ofp.write(chunk)
2357 ofp.write(chunk)
2358 ofp.close()
2358 ofp.close()
2359 elapsed = time.time() - start
2359 elapsed = time.time() - start
2360 if elapsed <= 0:
2360 if elapsed <= 0:
2361 elapsed = 0.001
2361 elapsed = 0.001
2362 self.ui.progress(_('clone'), None)
2362 self.ui.progress(_('clone'), None)
2363 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2363 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2364 (util.bytecount(total_bytes), elapsed,
2364 (util.bytecount(total_bytes), elapsed,
2365 util.bytecount(total_bytes / elapsed)))
2365 util.bytecount(total_bytes / elapsed)))
2366
2366
2367 # new requirements = old non-format requirements +
2367 # new requirements = old non-format requirements +
2368 # new format-related
2368 # new format-related
2369 # requirements from the streamed-in repository
2369 # requirements from the streamed-in repository
2370 requirements.update(set(self.requirements) - self.supportedformats)
2370 requirements.update(set(self.requirements) - self.supportedformats)
2371 self._applyrequirements(requirements)
2371 self._applyrequirements(requirements)
2372 self._writerequirements()
2372 self._writerequirements()
2373
2373
2374 self.invalidate()
2374 self.invalidate()
2375 return len(self.heads()) + 1
2375 return len(self.heads()) + 1
2376 finally:
2376 finally:
2377 lock.release()
2377 lock.release()
2378
2378
2379 def clone(self, remote, heads=[], stream=False):
2379 def clone(self, remote, heads=[], stream=False):
2380 '''clone remote repository.
2380 '''clone remote repository.
2381
2381
2382 keyword arguments:
2382 keyword arguments:
2383 heads: list of revs to clone (forces use of pull)
2383 heads: list of revs to clone (forces use of pull)
2384 stream: use streaming clone if possible'''
2384 stream: use streaming clone if possible'''
2385
2385
2386 # now, all clients that can request uncompressed clones can
2386 # now, all clients that can request uncompressed clones can
2387 # read repo formats supported by all servers that can serve
2387 # read repo formats supported by all servers that can serve
2388 # them.
2388 # them.
2389
2389
2390 # if revlog format changes, client will have to check version
2390 # if revlog format changes, client will have to check version
2391 # and format flags on "stream" capability, and use
2391 # and format flags on "stream" capability, and use
2392 # uncompressed only if compatible.
2392 # uncompressed only if compatible.
2393
2393
2394 if not stream:
2394 if not stream:
2395 # if the server explicitely prefer to stream (for fast LANs)
2395 # if the server explicitely prefer to stream (for fast LANs)
2396 stream = remote.capable('stream-preferred')
2396 stream = remote.capable('stream-preferred')
2397
2397
2398 if stream and not heads:
2398 if stream and not heads:
2399 # 'stream' means remote revlog format is revlogv1 only
2399 # 'stream' means remote revlog format is revlogv1 only
2400 if remote.capable('stream'):
2400 if remote.capable('stream'):
2401 return self.stream_in(remote, set(('revlogv1',)))
2401 return self.stream_in(remote, set(('revlogv1',)))
2402 # otherwise, 'streamreqs' contains the remote revlog format
2402 # otherwise, 'streamreqs' contains the remote revlog format
2403 streamreqs = remote.capable('streamreqs')
2403 streamreqs = remote.capable('streamreqs')
2404 if streamreqs:
2404 if streamreqs:
2405 streamreqs = set(streamreqs.split(','))
2405 streamreqs = set(streamreqs.split(','))
2406 # if we support it, stream in and adjust our requirements
2406 # if we support it, stream in and adjust our requirements
2407 if not streamreqs - self.supportedformats:
2407 if not streamreqs - self.supportedformats:
2408 return self.stream_in(remote, streamreqs)
2408 return self.stream_in(remote, streamreqs)
2409 return self.pull(remote, heads)
2409 return self.pull(remote, heads)
2410
2410
2411 def pushkey(self, namespace, key, old, new):
2411 def pushkey(self, namespace, key, old, new):
2412 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2412 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2413 old=old, new=new)
2413 old=old, new=new)
2414 ret = pushkey.push(self, namespace, key, old, new)
2414 ret = pushkey.push(self, namespace, key, old, new)
2415 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2415 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2416 ret=ret)
2416 ret=ret)
2417 return ret
2417 return ret
2418
2418
2419 def listkeys(self, namespace):
2419 def listkeys(self, namespace):
2420 self.hook('prelistkeys', throw=True, namespace=namespace)
2420 self.hook('prelistkeys', throw=True, namespace=namespace)
2421 values = pushkey.list(self, namespace)
2421 values = pushkey.list(self, namespace)
2422 self.hook('listkeys', namespace=namespace, values=values)
2422 self.hook('listkeys', namespace=namespace, values=values)
2423 return values
2423 return values
2424
2424
2425 def debugwireargs(self, one, two, three=None, four=None, five=None):
2425 def debugwireargs(self, one, two, three=None, four=None, five=None):
2426 '''used to test argument passing over the wire'''
2426 '''used to test argument passing over the wire'''
2427 return "%s %s %s %s %s" % (one, two, three, four, five)
2427 return "%s %s %s %s %s" % (one, two, three, four, five)
2428
2428
2429 def savecommitmessage(self, text):
2429 def savecommitmessage(self, text):
2430 fp = self.opener('last-message.txt', 'wb')
2430 fp = self.opener('last-message.txt', 'wb')
2431 try:
2431 try:
2432 fp.write(text)
2432 fp.write(text)
2433 finally:
2433 finally:
2434 fp.close()
2434 fp.close()
2435 return self.pathto(fp.name[len(self.root)+1:])
2435 return self.pathto(fp.name[len(self.root)+1:])
2436
2436
2437 # used to avoid circular references so destructors work
2437 # used to avoid circular references so destructors work
2438 def aftertrans(files):
2438 def aftertrans(files):
2439 renamefiles = [tuple(t) for t in files]
2439 renamefiles = [tuple(t) for t in files]
2440 def a():
2440 def a():
2441 for src, dest in renamefiles:
2441 for src, dest in renamefiles:
2442 try:
2442 try:
2443 util.rename(src, dest)
2443 util.rename(src, dest)
2444 except OSError: # journal file does not yet exist
2444 except OSError: # journal file does not yet exist
2445 pass
2445 pass
2446 return a
2446 return a
2447
2447
2448 def undoname(fn):
2448 def undoname(fn):
2449 base, name = os.path.split(fn)
2449 base, name = os.path.split(fn)
2450 assert name.startswith('journal')
2450 assert name.startswith('journal')
2451 return os.path.join(base, name.replace('journal', 'undo', 1))
2451 return os.path.join(base, name.replace('journal', 'undo', 1))
2452
2452
2453 def instance(ui, path, create):
2453 def instance(ui, path, create):
2454 return localrepository(ui, util.urllocalpath(path), create)
2454 return localrepository(ui, util.urllocalpath(path), create)
2455
2455
2456 def islocal(path):
2456 def islocal(path):
2457 return True
2457 return True
@@ -1,603 +1,603 b''
1 Setting up test
1 Setting up test
2
2
3 $ hg init test
3 $ hg init test
4 $ cd test
4 $ cd test
5 $ echo 0 > afile
5 $ echo 0 > afile
6 $ hg add afile
6 $ hg add afile
7 $ hg commit -m "0.0"
7 $ hg commit -m "0.0"
8 $ echo 1 >> afile
8 $ echo 1 >> afile
9 $ hg commit -m "0.1"
9 $ hg commit -m "0.1"
10 $ echo 2 >> afile
10 $ echo 2 >> afile
11 $ hg commit -m "0.2"
11 $ hg commit -m "0.2"
12 $ echo 3 >> afile
12 $ echo 3 >> afile
13 $ hg commit -m "0.3"
13 $ hg commit -m "0.3"
14 $ hg update -C 0
14 $ hg update -C 0
15 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
15 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
16 $ echo 1 >> afile
16 $ echo 1 >> afile
17 $ hg commit -m "1.1"
17 $ hg commit -m "1.1"
18 created new head
18 created new head
19 $ echo 2 >> afile
19 $ echo 2 >> afile
20 $ hg commit -m "1.2"
20 $ hg commit -m "1.2"
21 $ echo "a line" > fred
21 $ echo "a line" > fred
22 $ echo 3 >> afile
22 $ echo 3 >> afile
23 $ hg add fred
23 $ hg add fred
24 $ hg commit -m "1.3"
24 $ hg commit -m "1.3"
25 $ hg mv afile adifferentfile
25 $ hg mv afile adifferentfile
26 $ hg commit -m "1.3m"
26 $ hg commit -m "1.3m"
27 $ hg update -C 3
27 $ hg update -C 3
28 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
28 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
29 $ hg mv afile anotherfile
29 $ hg mv afile anotherfile
30 $ hg commit -m "0.3m"
30 $ hg commit -m "0.3m"
31 $ hg verify
31 $ hg verify
32 checking changesets
32 checking changesets
33 checking manifests
33 checking manifests
34 crosschecking files in changesets and manifests
34 crosschecking files in changesets and manifests
35 checking files
35 checking files
36 4 files, 9 changesets, 7 total revisions
36 4 files, 9 changesets, 7 total revisions
37 $ cd ..
37 $ cd ..
38 $ hg init empty
38 $ hg init empty
39
39
40 Bundle and phase
40 Bundle and phase
41
41
42 $ hg -R test phase --force --secret 0
42 $ hg -R test phase --force --secret 0
43 $ hg -R test bundle phase.hg empty
43 $ hg -R test bundle phase.hg empty
44 searching for changes
44 searching for changes
45 no changes found (ignored 9 secret changesets)
45 no changes found (ignored 9 secret changesets)
46 [1]
46 [1]
47 $ hg -R test phase --draft -r 'head()'
47 $ hg -R test phase --draft -r 'head()'
48
48
49 Bundle --all
49 Bundle --all
50
50
51 $ hg -R test bundle --all all.hg
51 $ hg -R test bundle --all all.hg
52 9 changesets found
52 9 changesets found
53
53
54 Bundle test to full.hg
54 Bundle test to full.hg
55
55
56 $ hg -R test bundle full.hg empty
56 $ hg -R test bundle full.hg empty
57 searching for changes
57 searching for changes
58 9 changesets found
58 9 changesets found
59
59
60 Unbundle full.hg in test
60 Unbundle full.hg in test
61
61
62 $ hg -R test unbundle full.hg
62 $ hg -R test unbundle full.hg
63 adding changesets
63 adding changesets
64 adding manifests
64 adding manifests
65 adding file changes
65 adding file changes
66 added 0 changesets with 0 changes to 4 files
66 added 0 changesets with 0 changes to 4 files
67 (run 'hg update' to get a working copy)
67 (run 'hg update' to get a working copy)
68
68
69 Verify empty
69 Verify empty
70
70
71 $ hg -R empty heads
71 $ hg -R empty heads
72 [1]
72 [1]
73 $ hg -R empty verify
73 $ hg -R empty verify
74 checking changesets
74 checking changesets
75 checking manifests
75 checking manifests
76 crosschecking files in changesets and manifests
76 crosschecking files in changesets and manifests
77 checking files
77 checking files
78 0 files, 0 changesets, 0 total revisions
78 0 files, 0 changesets, 0 total revisions
79
79
80 Pull full.hg into test (using --cwd)
80 Pull full.hg into test (using --cwd)
81
81
82 $ hg --cwd test pull ../full.hg
82 $ hg --cwd test pull ../full.hg
83 pulling from ../full.hg
83 pulling from ../full.hg
84 searching for changes
84 searching for changes
85 no changes found
85 no changes found
86
86
87 Pull full.hg into empty (using --cwd)
87 Pull full.hg into empty (using --cwd)
88
88
89 $ hg --cwd empty pull ../full.hg
89 $ hg --cwd empty pull ../full.hg
90 pulling from ../full.hg
90 pulling from ../full.hg
91 requesting all changes
91 requesting all changes
92 adding changesets
92 adding changesets
93 adding manifests
93 adding manifests
94 adding file changes
94 adding file changes
95 added 9 changesets with 7 changes to 4 files (+1 heads)
95 added 9 changesets with 7 changes to 4 files (+1 heads)
96 (run 'hg heads' to see heads, 'hg merge' to merge)
96 (run 'hg heads' to see heads, 'hg merge' to merge)
97
97
98 Rollback empty
98 Rollback empty
99
99
100 $ hg -R empty rollback
100 $ hg -R empty rollback
101 repository tip rolled back to revision -1 (undo pull)
101 repository tip rolled back to revision -1 (undo pull)
102
102
103 Pull full.hg into empty again (using --cwd)
103 Pull full.hg into empty again (using --cwd)
104
104
105 $ hg --cwd empty pull ../full.hg
105 $ hg --cwd empty pull ../full.hg
106 pulling from ../full.hg
106 pulling from ../full.hg
107 requesting all changes
107 requesting all changes
108 adding changesets
108 adding changesets
109 adding manifests
109 adding manifests
110 adding file changes
110 adding file changes
111 added 9 changesets with 7 changes to 4 files (+1 heads)
111 added 9 changesets with 7 changes to 4 files (+1 heads)
112 (run 'hg heads' to see heads, 'hg merge' to merge)
112 (run 'hg heads' to see heads, 'hg merge' to merge)
113
113
114 Pull full.hg into test (using -R)
114 Pull full.hg into test (using -R)
115
115
116 $ hg -R test pull full.hg
116 $ hg -R test pull full.hg
117 pulling from full.hg
117 pulling from full.hg
118 searching for changes
118 searching for changes
119 no changes found
119 no changes found
120
120
121 Pull full.hg into empty (using -R)
121 Pull full.hg into empty (using -R)
122
122
123 $ hg -R empty pull full.hg
123 $ hg -R empty pull full.hg
124 pulling from full.hg
124 pulling from full.hg
125 searching for changes
125 searching for changes
126 no changes found
126 no changes found
127
127
128 Rollback empty
128 Rollback empty
129
129
130 $ hg -R empty rollback
130 $ hg -R empty rollback
131 repository tip rolled back to revision -1 (undo pull)
131 repository tip rolled back to revision -1 (undo pull)
132
132
133 Pull full.hg into empty again (using -R)
133 Pull full.hg into empty again (using -R)
134
134
135 $ hg -R empty pull full.hg
135 $ hg -R empty pull full.hg
136 pulling from full.hg
136 pulling from full.hg
137 requesting all changes
137 requesting all changes
138 adding changesets
138 adding changesets
139 adding manifests
139 adding manifests
140 adding file changes
140 adding file changes
141 added 9 changesets with 7 changes to 4 files (+1 heads)
141 added 9 changesets with 7 changes to 4 files (+1 heads)
142 (run 'hg heads' to see heads, 'hg merge' to merge)
142 (run 'hg heads' to see heads, 'hg merge' to merge)
143
143
144 Log -R full.hg in fresh empty
144 Log -R full.hg in fresh empty
145
145
146 $ rm -r empty
146 $ rm -r empty
147 $ hg init empty
147 $ hg init empty
148 $ cd empty
148 $ cd empty
149 $ hg -R bundle://../full.hg log
149 $ hg -R bundle://../full.hg log
150 changeset: 8:aa35859c02ea
150 changeset: 8:aa35859c02ea
151 tag: tip
151 tag: tip
152 parent: 3:eebf5a27f8ca
152 parent: 3:eebf5a27f8ca
153 user: test
153 user: test
154 date: Thu Jan 01 00:00:00 1970 +0000
154 date: Thu Jan 01 00:00:00 1970 +0000
155 summary: 0.3m
155 summary: 0.3m
156
156
157 changeset: 7:a6a34bfa0076
157 changeset: 7:a6a34bfa0076
158 user: test
158 user: test
159 date: Thu Jan 01 00:00:00 1970 +0000
159 date: Thu Jan 01 00:00:00 1970 +0000
160 summary: 1.3m
160 summary: 1.3m
161
161
162 changeset: 6:7373c1169842
162 changeset: 6:7373c1169842
163 user: test
163 user: test
164 date: Thu Jan 01 00:00:00 1970 +0000
164 date: Thu Jan 01 00:00:00 1970 +0000
165 summary: 1.3
165 summary: 1.3
166
166
167 changeset: 5:1bb50a9436a7
167 changeset: 5:1bb50a9436a7
168 user: test
168 user: test
169 date: Thu Jan 01 00:00:00 1970 +0000
169 date: Thu Jan 01 00:00:00 1970 +0000
170 summary: 1.2
170 summary: 1.2
171
171
172 changeset: 4:095197eb4973
172 changeset: 4:095197eb4973
173 parent: 0:f9ee2f85a263
173 parent: 0:f9ee2f85a263
174 user: test
174 user: test
175 date: Thu Jan 01 00:00:00 1970 +0000
175 date: Thu Jan 01 00:00:00 1970 +0000
176 summary: 1.1
176 summary: 1.1
177
177
178 changeset: 3:eebf5a27f8ca
178 changeset: 3:eebf5a27f8ca
179 user: test
179 user: test
180 date: Thu Jan 01 00:00:00 1970 +0000
180 date: Thu Jan 01 00:00:00 1970 +0000
181 summary: 0.3
181 summary: 0.3
182
182
183 changeset: 2:e38ba6f5b7e0
183 changeset: 2:e38ba6f5b7e0
184 user: test
184 user: test
185 date: Thu Jan 01 00:00:00 1970 +0000
185 date: Thu Jan 01 00:00:00 1970 +0000
186 summary: 0.2
186 summary: 0.2
187
187
188 changeset: 1:34c2bf6b0626
188 changeset: 1:34c2bf6b0626
189 user: test
189 user: test
190 date: Thu Jan 01 00:00:00 1970 +0000
190 date: Thu Jan 01 00:00:00 1970 +0000
191 summary: 0.1
191 summary: 0.1
192
192
193 changeset: 0:f9ee2f85a263
193 changeset: 0:f9ee2f85a263
194 user: test
194 user: test
195 date: Thu Jan 01 00:00:00 1970 +0000
195 date: Thu Jan 01 00:00:00 1970 +0000
196 summary: 0.0
196 summary: 0.0
197
197
198 Make sure bundlerepo doesn't leak tempfiles (issue2491)
198 Make sure bundlerepo doesn't leak tempfiles (issue2491)
199
199
200 $ ls .hg
200 $ ls .hg
201 00changelog.i
201 00changelog.i
202 cache
202 cache
203 requires
203 requires
204 store
204 store
205
205
206 Pull ../full.hg into empty (with hook)
206 Pull ../full.hg into empty (with hook)
207
207
208 $ echo "[hooks]" >> .hg/hgrc
208 $ echo "[hooks]" >> .hg/hgrc
209 $ echo "changegroup = python \"$TESTDIR/printenv.py\" changegroup" >> .hg/hgrc
209 $ echo "changegroup = python \"$TESTDIR/printenv.py\" changegroup" >> .hg/hgrc
210
210
211 doesn't work (yet ?)
211 doesn't work (yet ?)
212
212
213 hg -R bundle://../full.hg verify
213 hg -R bundle://../full.hg verify
214
214
215 $ hg pull bundle://../full.hg
215 $ hg pull bundle://../full.hg
216 pulling from bundle:../full.hg
216 pulling from bundle:../full.hg
217 requesting all changes
217 requesting all changes
218 adding changesets
218 adding changesets
219 adding manifests
219 adding manifests
220 adding file changes
220 adding file changes
221 added 9 changesets with 7 changes to 4 files (+1 heads)
221 added 9 changesets with 7 changes to 4 files (+1 heads)
222 changegroup hook: HG_NODE=f9ee2f85a263049e9ae6d37a0e67e96194ffb735 HG_SOURCE=pull HG_URL=bundle:../full.hg
222 changegroup hook: HG_NODE=f9ee2f85a263049e9ae6d37a0e67e96194ffb735 HG_SOURCE=pull HG_URL=bundle:../full.hg
223 (run 'hg heads' to see heads, 'hg merge' to merge)
223 (run 'hg heads' to see heads, 'hg merge' to merge)
224
224
225 Rollback empty
225 Rollback empty
226
226
227 $ hg rollback
227 $ hg rollback
228 repository tip rolled back to revision -1 (undo pull)
228 repository tip rolled back to revision -1 (undo pull)
229 $ cd ..
229 $ cd ..
230
230
231 Log -R bundle:empty+full.hg
231 Log -R bundle:empty+full.hg
232
232
233 $ hg -R bundle:empty+full.hg log --template="{rev} "; echo ""
233 $ hg -R bundle:empty+full.hg log --template="{rev} "; echo ""
234 8 7 6 5 4 3 2 1 0
234 8 7 6 5 4 3 2 1 0
235
235
236 Pull full.hg into empty again (using -R; with hook)
236 Pull full.hg into empty again (using -R; with hook)
237
237
238 $ hg -R empty pull full.hg
238 $ hg -R empty pull full.hg
239 pulling from full.hg
239 pulling from full.hg
240 requesting all changes
240 requesting all changes
241 adding changesets
241 adding changesets
242 adding manifests
242 adding manifests
243 adding file changes
243 adding file changes
244 added 9 changesets with 7 changes to 4 files (+1 heads)
244 added 9 changesets with 7 changes to 4 files (+1 heads)
245 changegroup hook: HG_NODE=f9ee2f85a263049e9ae6d37a0e67e96194ffb735 HG_SOURCE=pull HG_URL=bundle:empty+full.hg
245 changegroup hook: HG_NODE=f9ee2f85a263049e9ae6d37a0e67e96194ffb735 HG_SOURCE=pull HG_URL=bundle:empty+full.hg
246 (run 'hg heads' to see heads, 'hg merge' to merge)
246 (run 'hg heads' to see heads, 'hg merge' to merge)
247
247
248 Create partial clones
248 Create partial clones
249
249
250 $ rm -r empty
250 $ rm -r empty
251 $ hg init empty
251 $ hg init empty
252 $ hg clone -r 3 test partial
252 $ hg clone -r 3 test partial
253 adding changesets
253 adding changesets
254 adding manifests
254 adding manifests
255 adding file changes
255 adding file changes
256 added 4 changesets with 4 changes to 1 files
256 added 4 changesets with 4 changes to 1 files
257 updating to branch default
257 updating to branch default
258 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
258 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
259 $ hg clone partial partial2
259 $ hg clone partial partial2
260 updating to branch default
260 updating to branch default
261 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
261 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
262 $ cd partial
262 $ cd partial
263
263
264 Log -R full.hg in partial
264 Log -R full.hg in partial
265
265
266 $ hg -R bundle://../full.hg log
266 $ hg -R bundle://../full.hg log
267 changeset: 8:aa35859c02ea
267 changeset: 8:aa35859c02ea
268 tag: tip
268 tag: tip
269 parent: 3:eebf5a27f8ca
269 parent: 3:eebf5a27f8ca
270 user: test
270 user: test
271 date: Thu Jan 01 00:00:00 1970 +0000
271 date: Thu Jan 01 00:00:00 1970 +0000
272 summary: 0.3m
272 summary: 0.3m
273
273
274 changeset: 7:a6a34bfa0076
274 changeset: 7:a6a34bfa0076
275 user: test
275 user: test
276 date: Thu Jan 01 00:00:00 1970 +0000
276 date: Thu Jan 01 00:00:00 1970 +0000
277 summary: 1.3m
277 summary: 1.3m
278
278
279 changeset: 6:7373c1169842
279 changeset: 6:7373c1169842
280 user: test
280 user: test
281 date: Thu Jan 01 00:00:00 1970 +0000
281 date: Thu Jan 01 00:00:00 1970 +0000
282 summary: 1.3
282 summary: 1.3
283
283
284 changeset: 5:1bb50a9436a7
284 changeset: 5:1bb50a9436a7
285 user: test
285 user: test
286 date: Thu Jan 01 00:00:00 1970 +0000
286 date: Thu Jan 01 00:00:00 1970 +0000
287 summary: 1.2
287 summary: 1.2
288
288
289 changeset: 4:095197eb4973
289 changeset: 4:095197eb4973
290 parent: 0:f9ee2f85a263
290 parent: 0:f9ee2f85a263
291 user: test
291 user: test
292 date: Thu Jan 01 00:00:00 1970 +0000
292 date: Thu Jan 01 00:00:00 1970 +0000
293 summary: 1.1
293 summary: 1.1
294
294
295 changeset: 3:eebf5a27f8ca
295 changeset: 3:eebf5a27f8ca
296 user: test
296 user: test
297 date: Thu Jan 01 00:00:00 1970 +0000
297 date: Thu Jan 01 00:00:00 1970 +0000
298 summary: 0.3
298 summary: 0.3
299
299
300 changeset: 2:e38ba6f5b7e0
300 changeset: 2:e38ba6f5b7e0
301 user: test
301 user: test
302 date: Thu Jan 01 00:00:00 1970 +0000
302 date: Thu Jan 01 00:00:00 1970 +0000
303 summary: 0.2
303 summary: 0.2
304
304
305 changeset: 1:34c2bf6b0626
305 changeset: 1:34c2bf6b0626
306 user: test
306 user: test
307 date: Thu Jan 01 00:00:00 1970 +0000
307 date: Thu Jan 01 00:00:00 1970 +0000
308 summary: 0.1
308 summary: 0.1
309
309
310 changeset: 0:f9ee2f85a263
310 changeset: 0:f9ee2f85a263
311 user: test
311 user: test
312 date: Thu Jan 01 00:00:00 1970 +0000
312 date: Thu Jan 01 00:00:00 1970 +0000
313 summary: 0.0
313 summary: 0.0
314
314
315
315
316 Incoming full.hg in partial
316 Incoming full.hg in partial
317
317
318 $ hg incoming bundle://../full.hg
318 $ hg incoming bundle://../full.hg
319 comparing with bundle:../full.hg
319 comparing with bundle:../full.hg
320 searching for changes
320 searching for changes
321 changeset: 4:095197eb4973
321 changeset: 4:095197eb4973
322 parent: 0:f9ee2f85a263
322 parent: 0:f9ee2f85a263
323 user: test
323 user: test
324 date: Thu Jan 01 00:00:00 1970 +0000
324 date: Thu Jan 01 00:00:00 1970 +0000
325 summary: 1.1
325 summary: 1.1
326
326
327 changeset: 5:1bb50a9436a7
327 changeset: 5:1bb50a9436a7
328 user: test
328 user: test
329 date: Thu Jan 01 00:00:00 1970 +0000
329 date: Thu Jan 01 00:00:00 1970 +0000
330 summary: 1.2
330 summary: 1.2
331
331
332 changeset: 6:7373c1169842
332 changeset: 6:7373c1169842
333 user: test
333 user: test
334 date: Thu Jan 01 00:00:00 1970 +0000
334 date: Thu Jan 01 00:00:00 1970 +0000
335 summary: 1.3
335 summary: 1.3
336
336
337 changeset: 7:a6a34bfa0076
337 changeset: 7:a6a34bfa0076
338 user: test
338 user: test
339 date: Thu Jan 01 00:00:00 1970 +0000
339 date: Thu Jan 01 00:00:00 1970 +0000
340 summary: 1.3m
340 summary: 1.3m
341
341
342 changeset: 8:aa35859c02ea
342 changeset: 8:aa35859c02ea
343 tag: tip
343 tag: tip
344 parent: 3:eebf5a27f8ca
344 parent: 3:eebf5a27f8ca
345 user: test
345 user: test
346 date: Thu Jan 01 00:00:00 1970 +0000
346 date: Thu Jan 01 00:00:00 1970 +0000
347 summary: 0.3m
347 summary: 0.3m
348
348
349
349
350 Outgoing -R full.hg vs partial2 in partial
350 Outgoing -R full.hg vs partial2 in partial
351
351
352 $ hg -R bundle://../full.hg outgoing ../partial2
352 $ hg -R bundle://../full.hg outgoing ../partial2
353 comparing with ../partial2
353 comparing with ../partial2
354 searching for changes
354 searching for changes
355 changeset: 4:095197eb4973
355 changeset: 4:095197eb4973
356 parent: 0:f9ee2f85a263
356 parent: 0:f9ee2f85a263
357 user: test
357 user: test
358 date: Thu Jan 01 00:00:00 1970 +0000
358 date: Thu Jan 01 00:00:00 1970 +0000
359 summary: 1.1
359 summary: 1.1
360
360
361 changeset: 5:1bb50a9436a7
361 changeset: 5:1bb50a9436a7
362 user: test
362 user: test
363 date: Thu Jan 01 00:00:00 1970 +0000
363 date: Thu Jan 01 00:00:00 1970 +0000
364 summary: 1.2
364 summary: 1.2
365
365
366 changeset: 6:7373c1169842
366 changeset: 6:7373c1169842
367 user: test
367 user: test
368 date: Thu Jan 01 00:00:00 1970 +0000
368 date: Thu Jan 01 00:00:00 1970 +0000
369 summary: 1.3
369 summary: 1.3
370
370
371 changeset: 7:a6a34bfa0076
371 changeset: 7:a6a34bfa0076
372 user: test
372 user: test
373 date: Thu Jan 01 00:00:00 1970 +0000
373 date: Thu Jan 01 00:00:00 1970 +0000
374 summary: 1.3m
374 summary: 1.3m
375
375
376 changeset: 8:aa35859c02ea
376 changeset: 8:aa35859c02ea
377 tag: tip
377 tag: tip
378 parent: 3:eebf5a27f8ca
378 parent: 3:eebf5a27f8ca
379 user: test
379 user: test
380 date: Thu Jan 01 00:00:00 1970 +0000
380 date: Thu Jan 01 00:00:00 1970 +0000
381 summary: 0.3m
381 summary: 0.3m
382
382
383
383
384 Outgoing -R does-not-exist.hg vs partial2 in partial
384 Outgoing -R does-not-exist.hg vs partial2 in partial
385
385
386 $ hg -R bundle://../does-not-exist.hg outgoing ../partial2
386 $ hg -R bundle://../does-not-exist.hg outgoing ../partial2
387 abort: *../does-not-exist.hg* (glob)
387 abort: *../does-not-exist.hg* (glob)
388 [255]
388 [255]
389 $ cd ..
389 $ cd ..
390
390
391 hide outer repo
391 hide outer repo
392 $ hg init
392 $ hg init
393
393
394 Direct clone from bundle (all-history)
394 Direct clone from bundle (all-history)
395
395
396 $ hg clone full.hg full-clone
396 $ hg clone full.hg full-clone
397 requesting all changes
397 requesting all changes
398 adding changesets
398 adding changesets
399 adding manifests
399 adding manifests
400 adding file changes
400 adding file changes
401 added 9 changesets with 7 changes to 4 files (+1 heads)
401 added 9 changesets with 7 changes to 4 files (+1 heads)
402 updating to branch default
402 updating to branch default
403 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
403 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
404 $ hg -R full-clone heads
404 $ hg -R full-clone heads
405 changeset: 8:aa35859c02ea
405 changeset: 8:aa35859c02ea
406 tag: tip
406 tag: tip
407 parent: 3:eebf5a27f8ca
407 parent: 3:eebf5a27f8ca
408 user: test
408 user: test
409 date: Thu Jan 01 00:00:00 1970 +0000
409 date: Thu Jan 01 00:00:00 1970 +0000
410 summary: 0.3m
410 summary: 0.3m
411
411
412 changeset: 7:a6a34bfa0076
412 changeset: 7:a6a34bfa0076
413 user: test
413 user: test
414 date: Thu Jan 01 00:00:00 1970 +0000
414 date: Thu Jan 01 00:00:00 1970 +0000
415 summary: 1.3m
415 summary: 1.3m
416
416
417 $ rm -r full-clone
417 $ rm -r full-clone
418
418
419 When cloning from a non-copiable repository into '', do not
419 When cloning from a non-copiable repository into '', do not
420 recurse infinitely (issue 2528)
420 recurse infinitely (issue 2528)
421
421
422 $ hg clone full.hg ''
422 $ hg clone full.hg ''
423 abort: * (glob)
423 abort: empty destination path is not valid
424 [255]
424 [255]
425
425
426 test for http://mercurial.selenic.com/bts/issue216
426 test for http://mercurial.selenic.com/bts/issue216
427
427
428 Unbundle incremental bundles into fresh empty in one go
428 Unbundle incremental bundles into fresh empty in one go
429
429
430 $ rm -r empty
430 $ rm -r empty
431 $ hg init empty
431 $ hg init empty
432 $ hg -R test bundle --base null -r 0 ../0.hg
432 $ hg -R test bundle --base null -r 0 ../0.hg
433 1 changesets found
433 1 changesets found
434 $ hg -R test bundle --base 0 -r 1 ../1.hg
434 $ hg -R test bundle --base 0 -r 1 ../1.hg
435 1 changesets found
435 1 changesets found
436 $ hg -R empty unbundle -u ../0.hg ../1.hg
436 $ hg -R empty unbundle -u ../0.hg ../1.hg
437 adding changesets
437 adding changesets
438 adding manifests
438 adding manifests
439 adding file changes
439 adding file changes
440 added 1 changesets with 1 changes to 1 files
440 added 1 changesets with 1 changes to 1 files
441 adding changesets
441 adding changesets
442 adding manifests
442 adding manifests
443 adding file changes
443 adding file changes
444 added 1 changesets with 1 changes to 1 files
444 added 1 changesets with 1 changes to 1 files
445 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
445 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
446
446
447 test for 540d1059c802
447 test for 540d1059c802
448
448
449 test for 540d1059c802
449 test for 540d1059c802
450
450
451 $ hg init orig
451 $ hg init orig
452 $ cd orig
452 $ cd orig
453 $ echo foo > foo
453 $ echo foo > foo
454 $ hg add foo
454 $ hg add foo
455 $ hg ci -m 'add foo'
455 $ hg ci -m 'add foo'
456
456
457 $ hg clone . ../copy
457 $ hg clone . ../copy
458 updating to branch default
458 updating to branch default
459 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
459 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
460 $ hg tag foo
460 $ hg tag foo
461
461
462 $ cd ../copy
462 $ cd ../copy
463 $ echo >> foo
463 $ echo >> foo
464 $ hg ci -m 'change foo'
464 $ hg ci -m 'change foo'
465 $ hg bundle ../bundle.hg ../orig
465 $ hg bundle ../bundle.hg ../orig
466 searching for changes
466 searching for changes
467 1 changesets found
467 1 changesets found
468
468
469 $ cd ../orig
469 $ cd ../orig
470 $ hg incoming ../bundle.hg
470 $ hg incoming ../bundle.hg
471 comparing with ../bundle.hg
471 comparing with ../bundle.hg
472 searching for changes
472 searching for changes
473 changeset: 2:ed1b79f46b9a
473 changeset: 2:ed1b79f46b9a
474 tag: tip
474 tag: tip
475 parent: 0:bbd179dfa0a7
475 parent: 0:bbd179dfa0a7
476 user: test
476 user: test
477 date: Thu Jan 01 00:00:00 1970 +0000
477 date: Thu Jan 01 00:00:00 1970 +0000
478 summary: change foo
478 summary: change foo
479
479
480 $ cd ..
480 $ cd ..
481
481
482 test bundle with # in the filename (issue2154):
482 test bundle with # in the filename (issue2154):
483
483
484 $ cp bundle.hg 'test#bundle.hg'
484 $ cp bundle.hg 'test#bundle.hg'
485 $ cd orig
485 $ cd orig
486 $ hg incoming '../test#bundle.hg'
486 $ hg incoming '../test#bundle.hg'
487 comparing with ../test
487 comparing with ../test
488 abort: unknown revision 'bundle.hg'!
488 abort: unknown revision 'bundle.hg'!
489 [255]
489 [255]
490
490
491 note that percent encoding is not handled:
491 note that percent encoding is not handled:
492
492
493 $ hg incoming ../test%23bundle.hg
493 $ hg incoming ../test%23bundle.hg
494 abort: repository ../test%23bundle.hg not found!
494 abort: repository ../test%23bundle.hg not found!
495 [255]
495 [255]
496 $ cd ..
496 $ cd ..
497
497
498 test for http://mercurial.selenic.com/bts/issue1144
498 test for http://mercurial.selenic.com/bts/issue1144
499
499
500 test that verify bundle does not traceback
500 test that verify bundle does not traceback
501
501
502 partial history bundle, fails w/ unkown parent
502 partial history bundle, fails w/ unkown parent
503
503
504 $ hg -R bundle.hg verify
504 $ hg -R bundle.hg verify
505 abort: 00changelog.i@bbd179dfa0a7: unknown parent!
505 abort: 00changelog.i@bbd179dfa0a7: unknown parent!
506 [255]
506 [255]
507
507
508 full history bundle, refuses to verify non-local repo
508 full history bundle, refuses to verify non-local repo
509
509
510 $ hg -R all.hg verify
510 $ hg -R all.hg verify
511 abort: cannot verify bundle or remote repos
511 abort: cannot verify bundle or remote repos
512 [255]
512 [255]
513
513
514 but, regular verify must continue to work
514 but, regular verify must continue to work
515
515
516 $ hg -R orig verify
516 $ hg -R orig verify
517 checking changesets
517 checking changesets
518 checking manifests
518 checking manifests
519 crosschecking files in changesets and manifests
519 crosschecking files in changesets and manifests
520 checking files
520 checking files
521 2 files, 2 changesets, 2 total revisions
521 2 files, 2 changesets, 2 total revisions
522
522
523 diff against bundle
523 diff against bundle
524
524
525 $ hg init b
525 $ hg init b
526 $ cd b
526 $ cd b
527 $ hg -R ../all.hg diff -r tip
527 $ hg -R ../all.hg diff -r tip
528 diff -r aa35859c02ea anotherfile
528 diff -r aa35859c02ea anotherfile
529 --- a/anotherfile Thu Jan 01 00:00:00 1970 +0000
529 --- a/anotherfile Thu Jan 01 00:00:00 1970 +0000
530 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000
530 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000
531 @@ -1,4 +0,0 @@
531 @@ -1,4 +0,0 @@
532 -0
532 -0
533 -1
533 -1
534 -2
534 -2
535 -3
535 -3
536 $ cd ..
536 $ cd ..
537
537
538 bundle single branch
538 bundle single branch
539
539
540 $ hg init branchy
540 $ hg init branchy
541 $ cd branchy
541 $ cd branchy
542 $ echo a >a
542 $ echo a >a
543 $ echo x >x
543 $ echo x >x
544 $ hg ci -Ama
544 $ hg ci -Ama
545 adding a
545 adding a
546 adding x
546 adding x
547 $ echo c >c
547 $ echo c >c
548 $ echo xx >x
548 $ echo xx >x
549 $ hg ci -Amc
549 $ hg ci -Amc
550 adding c
550 adding c
551 $ echo c1 >c1
551 $ echo c1 >c1
552 $ hg ci -Amc1
552 $ hg ci -Amc1
553 adding c1
553 adding c1
554 $ hg up 0
554 $ hg up 0
555 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
555 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
556 $ echo b >b
556 $ echo b >b
557 $ hg ci -Amb
557 $ hg ci -Amb
558 adding b
558 adding b
559 created new head
559 created new head
560 $ echo b1 >b1
560 $ echo b1 >b1
561 $ echo xx >x
561 $ echo xx >x
562 $ hg ci -Amb1
562 $ hg ci -Amb1
563 adding b1
563 adding b1
564 $ hg clone -q -r2 . part
564 $ hg clone -q -r2 . part
565
565
566 == bundling via incoming
566 == bundling via incoming
567
567
568 $ hg in -R part --bundle incoming.hg --template "{node}\n" .
568 $ hg in -R part --bundle incoming.hg --template "{node}\n" .
569 comparing with .
569 comparing with .
570 searching for changes
570 searching for changes
571 1a38c1b849e8b70c756d2d80b0b9a3ac0b7ea11a
571 1a38c1b849e8b70c756d2d80b0b9a3ac0b7ea11a
572 057f4db07f61970e1c11e83be79e9d08adc4dc31
572 057f4db07f61970e1c11e83be79e9d08adc4dc31
573
573
574 == bundling
574 == bundling
575
575
576 $ hg bundle bundle.hg part --debug
576 $ hg bundle bundle.hg part --debug
577 query 1; heads
577 query 1; heads
578 searching for changes
578 searching for changes
579 all remote heads known locally
579 all remote heads known locally
580 2 changesets found
580 2 changesets found
581 list of changesets:
581 list of changesets:
582 1a38c1b849e8b70c756d2d80b0b9a3ac0b7ea11a
582 1a38c1b849e8b70c756d2d80b0b9a3ac0b7ea11a
583 057f4db07f61970e1c11e83be79e9d08adc4dc31
583 057f4db07f61970e1c11e83be79e9d08adc4dc31
584 bundling: 1/2 changesets (50.00%)
584 bundling: 1/2 changesets (50.00%)
585 bundling: 2/2 changesets (100.00%)
585 bundling: 2/2 changesets (100.00%)
586 bundling: 1/2 manifests (50.00%)
586 bundling: 1/2 manifests (50.00%)
587 bundling: 2/2 manifests (100.00%)
587 bundling: 2/2 manifests (100.00%)
588 bundling: b 1/3 files (33.33%)
588 bundling: b 1/3 files (33.33%)
589 bundling: b1 2/3 files (66.67%)
589 bundling: b1 2/3 files (66.67%)
590 bundling: x 3/3 files (100.00%)
590 bundling: x 3/3 files (100.00%)
591
591
592 == Test for issue3441
592 == Test for issue3441
593
593
594 $ hg clone -q -r0 . part2
594 $ hg clone -q -r0 . part2
595 $ hg -q -R part2 pull bundle.hg
595 $ hg -q -R part2 pull bundle.hg
596 $ hg -R part2 verify
596 $ hg -R part2 verify
597 checking changesets
597 checking changesets
598 checking manifests
598 checking manifests
599 crosschecking files in changesets and manifests
599 crosschecking files in changesets and manifests
600 checking files
600 checking files
601 4 files, 3 changesets, 5 total revisions
601 4 files, 3 changesets, 5 total revisions
602
602
603 $ cd ..
603 $ cd ..
@@ -1,565 +1,565 b''
1 Prepare repo a:
1 Prepare repo a:
2
2
3 $ hg init a
3 $ hg init a
4 $ cd a
4 $ cd a
5 $ echo a > a
5 $ echo a > a
6 $ hg add a
6 $ hg add a
7 $ hg commit -m test
7 $ hg commit -m test
8 $ echo first line > b
8 $ echo first line > b
9 $ hg add b
9 $ hg add b
10
10
11 Create a non-inlined filelog:
11 Create a non-inlined filelog:
12
12
13 $ python -c 'file("data1", "wb").write("".join("%s\n" % x for x in range(10000)))'
13 $ python -c 'file("data1", "wb").write("".join("%s\n" % x for x in range(10000)))'
14 $ for j in 0 1 2 3 4 5 6 7 8 9; do
14 $ for j in 0 1 2 3 4 5 6 7 8 9; do
15 > cat data1 >> b
15 > cat data1 >> b
16 > hg commit -m test
16 > hg commit -m test
17 > done
17 > done
18
18
19 List files in store/data (should show a 'b.d'):
19 List files in store/data (should show a 'b.d'):
20
20
21 $ for i in .hg/store/data/*; do
21 $ for i in .hg/store/data/*; do
22 > echo $i
22 > echo $i
23 > done
23 > done
24 .hg/store/data/a.i
24 .hg/store/data/a.i
25 .hg/store/data/b.d
25 .hg/store/data/b.d
26 .hg/store/data/b.i
26 .hg/store/data/b.i
27
27
28 Default operation:
28 Default operation:
29
29
30 $ hg clone . ../b
30 $ hg clone . ../b
31 updating to branch default
31 updating to branch default
32 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
32 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
33 $ cd ../b
33 $ cd ../b
34 $ cat a
34 $ cat a
35 a
35 a
36 $ hg verify
36 $ hg verify
37 checking changesets
37 checking changesets
38 checking manifests
38 checking manifests
39 crosschecking files in changesets and manifests
39 crosschecking files in changesets and manifests
40 checking files
40 checking files
41 2 files, 11 changesets, 11 total revisions
41 2 files, 11 changesets, 11 total revisions
42
42
43 Invalid dest '' must abort:
43 Invalid dest '' must abort:
44
44
45 $ hg clone . ''
45 $ hg clone . ''
46 abort: * (glob)
46 abort: empty destination path is not valid
47 [255]
47 [255]
48
48
49 No update, with debug option:
49 No update, with debug option:
50
50
51 #if hardlink
51 #if hardlink
52 $ hg --debug clone -U . ../c
52 $ hg --debug clone -U . ../c
53 linked 8 files
53 linked 8 files
54 #else
54 #else
55 $ hg --debug clone -U . ../c
55 $ hg --debug clone -U . ../c
56 copied 8 files
56 copied 8 files
57 #endif
57 #endif
58 $ cd ../c
58 $ cd ../c
59 $ cat a 2>/dev/null || echo "a not present"
59 $ cat a 2>/dev/null || echo "a not present"
60 a not present
60 a not present
61 $ hg verify
61 $ hg verify
62 checking changesets
62 checking changesets
63 checking manifests
63 checking manifests
64 crosschecking files in changesets and manifests
64 crosschecking files in changesets and manifests
65 checking files
65 checking files
66 2 files, 11 changesets, 11 total revisions
66 2 files, 11 changesets, 11 total revisions
67
67
68 Default destination:
68 Default destination:
69
69
70 $ mkdir ../d
70 $ mkdir ../d
71 $ cd ../d
71 $ cd ../d
72 $ hg clone ../a
72 $ hg clone ../a
73 destination directory: a
73 destination directory: a
74 updating to branch default
74 updating to branch default
75 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
75 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
76 $ cd a
76 $ cd a
77 $ hg cat a
77 $ hg cat a
78 a
78 a
79 $ cd ../..
79 $ cd ../..
80
80
81 Check that we drop the 'file:' from the path before writing the .hgrc:
81 Check that we drop the 'file:' from the path before writing the .hgrc:
82
82
83 $ hg clone file:a e
83 $ hg clone file:a e
84 updating to branch default
84 updating to branch default
85 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
85 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
86 $ grep 'file:' e/.hg/hgrc
86 $ grep 'file:' e/.hg/hgrc
87 [1]
87 [1]
88
88
89 Check that path aliases are expanded:
89 Check that path aliases are expanded:
90
90
91 $ hg clone -q -U --config 'paths.foobar=a#0' foobar f
91 $ hg clone -q -U --config 'paths.foobar=a#0' foobar f
92 $ hg -R f showconfig paths.default
92 $ hg -R f showconfig paths.default
93 $TESTTMP/a#0 (glob)
93 $TESTTMP/a#0 (glob)
94
94
95 Use --pull:
95 Use --pull:
96
96
97 $ hg clone --pull a g
97 $ hg clone --pull a g
98 requesting all changes
98 requesting all changes
99 adding changesets
99 adding changesets
100 adding manifests
100 adding manifests
101 adding file changes
101 adding file changes
102 added 11 changesets with 11 changes to 2 files
102 added 11 changesets with 11 changes to 2 files
103 updating to branch default
103 updating to branch default
104 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
104 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
105 $ hg -R g verify
105 $ hg -R g verify
106 checking changesets
106 checking changesets
107 checking manifests
107 checking manifests
108 crosschecking files in changesets and manifests
108 crosschecking files in changesets and manifests
109 checking files
109 checking files
110 2 files, 11 changesets, 11 total revisions
110 2 files, 11 changesets, 11 total revisions
111
111
112 Invalid dest '' with --pull must abort (issue2528):
112 Invalid dest '' with --pull must abort (issue2528):
113
113
114 $ hg clone --pull a ''
114 $ hg clone --pull a ''
115 abort: * (glob)
115 abort: empty destination path is not valid
116 [255]
116 [255]
117
117
118 Clone to '.':
118 Clone to '.':
119
119
120 $ mkdir h
120 $ mkdir h
121 $ cd h
121 $ cd h
122 $ hg clone ../a .
122 $ hg clone ../a .
123 updating to branch default
123 updating to branch default
124 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
124 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
125 $ cd ..
125 $ cd ..
126
126
127
127
128 *** Tests for option -u ***
128 *** Tests for option -u ***
129
129
130 Adding some more history to repo a:
130 Adding some more history to repo a:
131
131
132 $ cd a
132 $ cd a
133 $ hg tag ref1
133 $ hg tag ref1
134 $ echo the quick brown fox >a
134 $ echo the quick brown fox >a
135 $ hg ci -m "hacked default"
135 $ hg ci -m "hacked default"
136 $ hg up ref1
136 $ hg up ref1
137 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
137 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
138 $ hg branch stable
138 $ hg branch stable
139 marked working directory as branch stable
139 marked working directory as branch stable
140 (branches are permanent and global, did you want a bookmark?)
140 (branches are permanent and global, did you want a bookmark?)
141 $ echo some text >a
141 $ echo some text >a
142 $ hg ci -m "starting branch stable"
142 $ hg ci -m "starting branch stable"
143 $ hg tag ref2
143 $ hg tag ref2
144 $ echo some more text >a
144 $ echo some more text >a
145 $ hg ci -m "another change for branch stable"
145 $ hg ci -m "another change for branch stable"
146 $ hg up ref2
146 $ hg up ref2
147 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
147 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
148 $ hg parents
148 $ hg parents
149 changeset: 13:e8ece76546a6
149 changeset: 13:e8ece76546a6
150 branch: stable
150 branch: stable
151 tag: ref2
151 tag: ref2
152 parent: 10:a7949464abda
152 parent: 10:a7949464abda
153 user: test
153 user: test
154 date: Thu Jan 01 00:00:00 1970 +0000
154 date: Thu Jan 01 00:00:00 1970 +0000
155 summary: starting branch stable
155 summary: starting branch stable
156
156
157
157
158 Repo a has two heads:
158 Repo a has two heads:
159
159
160 $ hg heads
160 $ hg heads
161 changeset: 15:0aae7cf88f0d
161 changeset: 15:0aae7cf88f0d
162 branch: stable
162 branch: stable
163 tag: tip
163 tag: tip
164 user: test
164 user: test
165 date: Thu Jan 01 00:00:00 1970 +0000
165 date: Thu Jan 01 00:00:00 1970 +0000
166 summary: another change for branch stable
166 summary: another change for branch stable
167
167
168 changeset: 12:f21241060d6a
168 changeset: 12:f21241060d6a
169 user: test
169 user: test
170 date: Thu Jan 01 00:00:00 1970 +0000
170 date: Thu Jan 01 00:00:00 1970 +0000
171 summary: hacked default
171 summary: hacked default
172
172
173
173
174 $ cd ..
174 $ cd ..
175
175
176
176
177 Testing --noupdate with --updaterev (must abort):
177 Testing --noupdate with --updaterev (must abort):
178
178
179 $ hg clone --noupdate --updaterev 1 a ua
179 $ hg clone --noupdate --updaterev 1 a ua
180 abort: cannot specify both --noupdate and --updaterev
180 abort: cannot specify both --noupdate and --updaterev
181 [255]
181 [255]
182
182
183
183
184 Testing clone -u:
184 Testing clone -u:
185
185
186 $ hg clone -u . a ua
186 $ hg clone -u . a ua
187 updating to branch stable
187 updating to branch stable
188 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
188 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
189
189
190 Repo ua has both heads:
190 Repo ua has both heads:
191
191
192 $ hg -R ua heads
192 $ hg -R ua heads
193 changeset: 15:0aae7cf88f0d
193 changeset: 15:0aae7cf88f0d
194 branch: stable
194 branch: stable
195 tag: tip
195 tag: tip
196 user: test
196 user: test
197 date: Thu Jan 01 00:00:00 1970 +0000
197 date: Thu Jan 01 00:00:00 1970 +0000
198 summary: another change for branch stable
198 summary: another change for branch stable
199
199
200 changeset: 12:f21241060d6a
200 changeset: 12:f21241060d6a
201 user: test
201 user: test
202 date: Thu Jan 01 00:00:00 1970 +0000
202 date: Thu Jan 01 00:00:00 1970 +0000
203 summary: hacked default
203 summary: hacked default
204
204
205
205
206 Same revision checked out in repo a and ua:
206 Same revision checked out in repo a and ua:
207
207
208 $ hg -R a parents --template "{node|short}\n"
208 $ hg -R a parents --template "{node|short}\n"
209 e8ece76546a6
209 e8ece76546a6
210 $ hg -R ua parents --template "{node|short}\n"
210 $ hg -R ua parents --template "{node|short}\n"
211 e8ece76546a6
211 e8ece76546a6
212
212
213 $ rm -r ua
213 $ rm -r ua
214
214
215
215
216 Testing clone --pull -u:
216 Testing clone --pull -u:
217
217
218 $ hg clone --pull -u . a ua
218 $ hg clone --pull -u . a ua
219 requesting all changes
219 requesting all changes
220 adding changesets
220 adding changesets
221 adding manifests
221 adding manifests
222 adding file changes
222 adding file changes
223 added 16 changesets with 16 changes to 3 files (+1 heads)
223 added 16 changesets with 16 changes to 3 files (+1 heads)
224 updating to branch stable
224 updating to branch stable
225 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
225 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
226
226
227 Repo ua has both heads:
227 Repo ua has both heads:
228
228
229 $ hg -R ua heads
229 $ hg -R ua heads
230 changeset: 15:0aae7cf88f0d
230 changeset: 15:0aae7cf88f0d
231 branch: stable
231 branch: stable
232 tag: tip
232 tag: tip
233 user: test
233 user: test
234 date: Thu Jan 01 00:00:00 1970 +0000
234 date: Thu Jan 01 00:00:00 1970 +0000
235 summary: another change for branch stable
235 summary: another change for branch stable
236
236
237 changeset: 12:f21241060d6a
237 changeset: 12:f21241060d6a
238 user: test
238 user: test
239 date: Thu Jan 01 00:00:00 1970 +0000
239 date: Thu Jan 01 00:00:00 1970 +0000
240 summary: hacked default
240 summary: hacked default
241
241
242
242
243 Same revision checked out in repo a and ua:
243 Same revision checked out in repo a and ua:
244
244
245 $ hg -R a parents --template "{node|short}\n"
245 $ hg -R a parents --template "{node|short}\n"
246 e8ece76546a6
246 e8ece76546a6
247 $ hg -R ua parents --template "{node|short}\n"
247 $ hg -R ua parents --template "{node|short}\n"
248 e8ece76546a6
248 e8ece76546a6
249
249
250 $ rm -r ua
250 $ rm -r ua
251
251
252
252
253 Testing clone -u <branch>:
253 Testing clone -u <branch>:
254
254
255 $ hg clone -u stable a ua
255 $ hg clone -u stable a ua
256 updating to branch stable
256 updating to branch stable
257 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
257 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
258
258
259 Repo ua has both heads:
259 Repo ua has both heads:
260
260
261 $ hg -R ua heads
261 $ hg -R ua heads
262 changeset: 15:0aae7cf88f0d
262 changeset: 15:0aae7cf88f0d
263 branch: stable
263 branch: stable
264 tag: tip
264 tag: tip
265 user: test
265 user: test
266 date: Thu Jan 01 00:00:00 1970 +0000
266 date: Thu Jan 01 00:00:00 1970 +0000
267 summary: another change for branch stable
267 summary: another change for branch stable
268
268
269 changeset: 12:f21241060d6a
269 changeset: 12:f21241060d6a
270 user: test
270 user: test
271 date: Thu Jan 01 00:00:00 1970 +0000
271 date: Thu Jan 01 00:00:00 1970 +0000
272 summary: hacked default
272 summary: hacked default
273
273
274
274
275 Branch 'stable' is checked out:
275 Branch 'stable' is checked out:
276
276
277 $ hg -R ua parents
277 $ hg -R ua parents
278 changeset: 15:0aae7cf88f0d
278 changeset: 15:0aae7cf88f0d
279 branch: stable
279 branch: stable
280 tag: tip
280 tag: tip
281 user: test
281 user: test
282 date: Thu Jan 01 00:00:00 1970 +0000
282 date: Thu Jan 01 00:00:00 1970 +0000
283 summary: another change for branch stable
283 summary: another change for branch stable
284
284
285
285
286 $ rm -r ua
286 $ rm -r ua
287
287
288
288
289 Testing default checkout:
289 Testing default checkout:
290
290
291 $ hg clone a ua
291 $ hg clone a ua
292 updating to branch default
292 updating to branch default
293 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
293 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
294
294
295 Repo ua has both heads:
295 Repo ua has both heads:
296
296
297 $ hg -R ua heads
297 $ hg -R ua heads
298 changeset: 15:0aae7cf88f0d
298 changeset: 15:0aae7cf88f0d
299 branch: stable
299 branch: stable
300 tag: tip
300 tag: tip
301 user: test
301 user: test
302 date: Thu Jan 01 00:00:00 1970 +0000
302 date: Thu Jan 01 00:00:00 1970 +0000
303 summary: another change for branch stable
303 summary: another change for branch stable
304
304
305 changeset: 12:f21241060d6a
305 changeset: 12:f21241060d6a
306 user: test
306 user: test
307 date: Thu Jan 01 00:00:00 1970 +0000
307 date: Thu Jan 01 00:00:00 1970 +0000
308 summary: hacked default
308 summary: hacked default
309
309
310
310
311 Branch 'default' is checked out:
311 Branch 'default' is checked out:
312
312
313 $ hg -R ua parents
313 $ hg -R ua parents
314 changeset: 12:f21241060d6a
314 changeset: 12:f21241060d6a
315 user: test
315 user: test
316 date: Thu Jan 01 00:00:00 1970 +0000
316 date: Thu Jan 01 00:00:00 1970 +0000
317 summary: hacked default
317 summary: hacked default
318
318
319
319
320 $ rm -r ua
320 $ rm -r ua
321
321
322
322
323 Testing #<branch>:
323 Testing #<branch>:
324
324
325 $ hg clone -u . a#stable ua
325 $ hg clone -u . a#stable ua
326 adding changesets
326 adding changesets
327 adding manifests
327 adding manifests
328 adding file changes
328 adding file changes
329 added 14 changesets with 14 changes to 3 files
329 added 14 changesets with 14 changes to 3 files
330 updating to branch stable
330 updating to branch stable
331 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
331 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
332
332
333 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
333 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
334
334
335 $ hg -R ua heads
335 $ hg -R ua heads
336 changeset: 13:0aae7cf88f0d
336 changeset: 13:0aae7cf88f0d
337 branch: stable
337 branch: stable
338 tag: tip
338 tag: tip
339 user: test
339 user: test
340 date: Thu Jan 01 00:00:00 1970 +0000
340 date: Thu Jan 01 00:00:00 1970 +0000
341 summary: another change for branch stable
341 summary: another change for branch stable
342
342
343 changeset: 10:a7949464abda
343 changeset: 10:a7949464abda
344 user: test
344 user: test
345 date: Thu Jan 01 00:00:00 1970 +0000
345 date: Thu Jan 01 00:00:00 1970 +0000
346 summary: test
346 summary: test
347
347
348
348
349 Same revision checked out in repo a and ua:
349 Same revision checked out in repo a and ua:
350
350
351 $ hg -R a parents --template "{node|short}\n"
351 $ hg -R a parents --template "{node|short}\n"
352 e8ece76546a6
352 e8ece76546a6
353 $ hg -R ua parents --template "{node|short}\n"
353 $ hg -R ua parents --template "{node|short}\n"
354 e8ece76546a6
354 e8ece76546a6
355
355
356 $ rm -r ua
356 $ rm -r ua
357
357
358
358
359 Testing -u -r <branch>:
359 Testing -u -r <branch>:
360
360
361 $ hg clone -u . -r stable a ua
361 $ hg clone -u . -r stable a ua
362 adding changesets
362 adding changesets
363 adding manifests
363 adding manifests
364 adding file changes
364 adding file changes
365 added 14 changesets with 14 changes to 3 files
365 added 14 changesets with 14 changes to 3 files
366 updating to branch stable
366 updating to branch stable
367 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
367 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
368
368
369 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
369 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
370
370
371 $ hg -R ua heads
371 $ hg -R ua heads
372 changeset: 13:0aae7cf88f0d
372 changeset: 13:0aae7cf88f0d
373 branch: stable
373 branch: stable
374 tag: tip
374 tag: tip
375 user: test
375 user: test
376 date: Thu Jan 01 00:00:00 1970 +0000
376 date: Thu Jan 01 00:00:00 1970 +0000
377 summary: another change for branch stable
377 summary: another change for branch stable
378
378
379 changeset: 10:a7949464abda
379 changeset: 10:a7949464abda
380 user: test
380 user: test
381 date: Thu Jan 01 00:00:00 1970 +0000
381 date: Thu Jan 01 00:00:00 1970 +0000
382 summary: test
382 summary: test
383
383
384
384
385 Same revision checked out in repo a and ua:
385 Same revision checked out in repo a and ua:
386
386
387 $ hg -R a parents --template "{node|short}\n"
387 $ hg -R a parents --template "{node|short}\n"
388 e8ece76546a6
388 e8ece76546a6
389 $ hg -R ua parents --template "{node|short}\n"
389 $ hg -R ua parents --template "{node|short}\n"
390 e8ece76546a6
390 e8ece76546a6
391
391
392 $ rm -r ua
392 $ rm -r ua
393
393
394
394
395 Testing -r <branch>:
395 Testing -r <branch>:
396
396
397 $ hg clone -r stable a ua
397 $ hg clone -r stable a ua
398 adding changesets
398 adding changesets
399 adding manifests
399 adding manifests
400 adding file changes
400 adding file changes
401 added 14 changesets with 14 changes to 3 files
401 added 14 changesets with 14 changes to 3 files
402 updating to branch stable
402 updating to branch stable
403 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
403 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
404
404
405 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
405 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
406
406
407 $ hg -R ua heads
407 $ hg -R ua heads
408 changeset: 13:0aae7cf88f0d
408 changeset: 13:0aae7cf88f0d
409 branch: stable
409 branch: stable
410 tag: tip
410 tag: tip
411 user: test
411 user: test
412 date: Thu Jan 01 00:00:00 1970 +0000
412 date: Thu Jan 01 00:00:00 1970 +0000
413 summary: another change for branch stable
413 summary: another change for branch stable
414
414
415 changeset: 10:a7949464abda
415 changeset: 10:a7949464abda
416 user: test
416 user: test
417 date: Thu Jan 01 00:00:00 1970 +0000
417 date: Thu Jan 01 00:00:00 1970 +0000
418 summary: test
418 summary: test
419
419
420
420
421 Branch 'stable' is checked out:
421 Branch 'stable' is checked out:
422
422
423 $ hg -R ua parents
423 $ hg -R ua parents
424 changeset: 13:0aae7cf88f0d
424 changeset: 13:0aae7cf88f0d
425 branch: stable
425 branch: stable
426 tag: tip
426 tag: tip
427 user: test
427 user: test
428 date: Thu Jan 01 00:00:00 1970 +0000
428 date: Thu Jan 01 00:00:00 1970 +0000
429 summary: another change for branch stable
429 summary: another change for branch stable
430
430
431
431
432 $ rm -r ua
432 $ rm -r ua
433
433
434
434
435 Issue2267: Error in 1.6 hg.py: TypeError: 'NoneType' object is not
435 Issue2267: Error in 1.6 hg.py: TypeError: 'NoneType' object is not
436 iterable in addbranchrevs()
436 iterable in addbranchrevs()
437
437
438 $ cat <<EOF > simpleclone.py
438 $ cat <<EOF > simpleclone.py
439 > from mercurial import ui, hg
439 > from mercurial import ui, hg
440 > myui = ui.ui()
440 > myui = ui.ui()
441 > repo = hg.repository(myui, 'a')
441 > repo = hg.repository(myui, 'a')
442 > hg.clone(myui, {}, repo, dest="ua")
442 > hg.clone(myui, {}, repo, dest="ua")
443 > EOF
443 > EOF
444
444
445 $ python simpleclone.py
445 $ python simpleclone.py
446 updating to branch default
446 updating to branch default
447 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
447 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
448
448
449 $ rm -r ua
449 $ rm -r ua
450
450
451 $ cat <<EOF > branchclone.py
451 $ cat <<EOF > branchclone.py
452 > from mercurial import ui, hg, extensions
452 > from mercurial import ui, hg, extensions
453 > myui = ui.ui()
453 > myui = ui.ui()
454 > extensions.loadall(myui)
454 > extensions.loadall(myui)
455 > repo = hg.repository(myui, 'a')
455 > repo = hg.repository(myui, 'a')
456 > hg.clone(myui, {}, repo, dest="ua", branch=["stable",])
456 > hg.clone(myui, {}, repo, dest="ua", branch=["stable",])
457 > EOF
457 > EOF
458
458
459 $ python branchclone.py
459 $ python branchclone.py
460 adding changesets
460 adding changesets
461 adding manifests
461 adding manifests
462 adding file changes
462 adding file changes
463 added 14 changesets with 14 changes to 3 files
463 added 14 changesets with 14 changes to 3 files
464 updating to branch stable
464 updating to branch stable
465 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
465 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
466 $ rm -r ua
466 $ rm -r ua
467
467
468
468
469 Testing failures:
469 Testing failures:
470
470
471 $ mkdir fail
471 $ mkdir fail
472 $ cd fail
472 $ cd fail
473
473
474 No local source
474 No local source
475
475
476 $ hg clone a b
476 $ hg clone a b
477 abort: repository a not found!
477 abort: repository a not found!
478 [255]
478 [255]
479
479
480 No remote source
480 No remote source
481
481
482 $ hg clone http://127.0.0.1:3121/a b
482 $ hg clone http://127.0.0.1:3121/a b
483 abort: error: *refused* (glob)
483 abort: error: *refused* (glob)
484 [255]
484 [255]
485 $ rm -rf b # work around bug with http clone
485 $ rm -rf b # work around bug with http clone
486
486
487
487
488 #if unix-permissions
488 #if unix-permissions
489
489
490 Inaccessible source
490 Inaccessible source
491
491
492 $ mkdir a
492 $ mkdir a
493 $ chmod 000 a
493 $ chmod 000 a
494 $ hg clone a b
494 $ hg clone a b
495 abort: repository a not found!
495 abort: repository a not found!
496 [255]
496 [255]
497
497
498 Inaccessible destination
498 Inaccessible destination
499
499
500 $ hg init b
500 $ hg init b
501 $ cd b
501 $ cd b
502 $ hg clone . ../a
502 $ hg clone . ../a
503 abort: Permission denied: ../a
503 abort: Permission denied: ../a
504 [255]
504 [255]
505 $ cd ..
505 $ cd ..
506 $ chmod 700 a
506 $ chmod 700 a
507 $ rm -r a b
507 $ rm -r a b
508
508
509 #endif
509 #endif
510
510
511
511
512 #if fifo
512 #if fifo
513
513
514 Source of wrong type
514 Source of wrong type
515
515
516 $ mkfifo a
516 $ mkfifo a
517 $ hg clone a b
517 $ hg clone a b
518 abort: repository a not found!
518 abort: repository a not found!
519 [255]
519 [255]
520 $ rm a
520 $ rm a
521
521
522 #endif
522 #endif
523
523
524 Default destination, same directory
524 Default destination, same directory
525
525
526 $ hg init q
526 $ hg init q
527 $ hg clone q
527 $ hg clone q
528 destination directory: q
528 destination directory: q
529 abort: destination 'q' is not empty
529 abort: destination 'q' is not empty
530 [255]
530 [255]
531
531
532 destination directory not empty
532 destination directory not empty
533
533
534 $ mkdir a
534 $ mkdir a
535 $ echo stuff > a/a
535 $ echo stuff > a/a
536 $ hg clone q a
536 $ hg clone q a
537 abort: destination 'a' is not empty
537 abort: destination 'a' is not empty
538 [255]
538 [255]
539
539
540
540
541 #if unix-permissions
541 #if unix-permissions
542
542
543 leave existing directory in place after clone failure
543 leave existing directory in place after clone failure
544
544
545 $ hg init c
545 $ hg init c
546 $ cd c
546 $ cd c
547 $ echo c > c
547 $ echo c > c
548 $ hg commit -A -m test
548 $ hg commit -A -m test
549 adding c
549 adding c
550 $ chmod -rx .hg/store/data
550 $ chmod -rx .hg/store/data
551 $ cd ..
551 $ cd ..
552 $ mkdir d
552 $ mkdir d
553 $ hg clone c d 2> err
553 $ hg clone c d 2> err
554 [255]
554 [255]
555 $ test -d d
555 $ test -d d
556 $ test -d d/.hg
556 $ test -d d/.hg
557 [1]
557 [1]
558
558
559 reenable perm to allow deletion
559 reenable perm to allow deletion
560
560
561 $ chmod +rx c/.hg/store/data
561 $ chmod +rx c/.hg/store/data
562
562
563 #endif
563 #endif
564
564
565 $ cd ..
565 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now