##// END OF EJS Templates
bookmarks: delegate writing to the repo just like reading...
Augie Fackler -
r15237:7196ed7a default
parent child Browse files
Show More
@@ -1,213 +1,213 b''
1 # Mercurial bookmark support code
1 # Mercurial bookmark support code
2 #
2 #
3 # Copyright 2008 David Soria Parra <dsp@php.net>
3 # Copyright 2008 David Soria Parra <dsp@php.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from mercurial.i18n import _
8 from mercurial.i18n import _
9 from mercurial.node import hex
9 from mercurial.node import hex
10 from mercurial import encoding, error, util
10 from mercurial import encoding, error, util
11 import errno, os
11 import errno, os
12
12
13 def valid(mark):
13 def valid(mark):
14 for c in (':', '\0', '\n', '\r'):
14 for c in (':', '\0', '\n', '\r'):
15 if c in mark:
15 if c in mark:
16 return False
16 return False
17 return True
17 return True
18
18
19 def read(repo):
19 def read(repo):
20 '''Parse .hg/bookmarks file and return a dictionary
20 '''Parse .hg/bookmarks file and return a dictionary
21
21
22 Bookmarks are stored as {HASH}\\s{NAME}\\n (localtags format) values
22 Bookmarks are stored as {HASH}\\s{NAME}\\n (localtags format) values
23 in the .hg/bookmarks file.
23 in the .hg/bookmarks file.
24 Read the file and return a (name=>nodeid) dictionary
24 Read the file and return a (name=>nodeid) dictionary
25 '''
25 '''
26 bookmarks = {}
26 bookmarks = {}
27 try:
27 try:
28 for line in repo.opener('bookmarks'):
28 for line in repo.opener('bookmarks'):
29 line = line.strip()
29 line = line.strip()
30 if not line:
30 if not line:
31 continue
31 continue
32 if ' ' not in line:
32 if ' ' not in line:
33 repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n') % line)
33 repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n') % line)
34 continue
34 continue
35 sha, refspec = line.split(' ', 1)
35 sha, refspec = line.split(' ', 1)
36 refspec = encoding.tolocal(refspec)
36 refspec = encoding.tolocal(refspec)
37 try:
37 try:
38 bookmarks[refspec] = repo.changelog.lookup(sha)
38 bookmarks[refspec] = repo.changelog.lookup(sha)
39 except error.RepoLookupError:
39 except error.RepoLookupError:
40 pass
40 pass
41 except IOError, inst:
41 except IOError, inst:
42 if inst.errno != errno.ENOENT:
42 if inst.errno != errno.ENOENT:
43 raise
43 raise
44 return bookmarks
44 return bookmarks
45
45
46 def readcurrent(repo):
46 def readcurrent(repo):
47 '''Get the current bookmark
47 '''Get the current bookmark
48
48
49 If we use gittishsh branches we have a current bookmark that
49 If we use gittishsh branches we have a current bookmark that
50 we are on. This function returns the name of the bookmark. It
50 we are on. This function returns the name of the bookmark. It
51 is stored in .hg/bookmarks.current
51 is stored in .hg/bookmarks.current
52 '''
52 '''
53 mark = None
53 mark = None
54 try:
54 try:
55 file = repo.opener('bookmarks.current')
55 file = repo.opener('bookmarks.current')
56 except IOError, inst:
56 except IOError, inst:
57 if inst.errno != errno.ENOENT:
57 if inst.errno != errno.ENOENT:
58 raise
58 raise
59 return None
59 return None
60 try:
60 try:
61 # No readline() in posixfile_nt, reading everything is cheap
61 # No readline() in posixfile_nt, reading everything is cheap
62 mark = encoding.tolocal((file.readlines() or [''])[0])
62 mark = encoding.tolocal((file.readlines() or [''])[0])
63 if mark == '' or mark not in repo._bookmarks:
63 if mark == '' or mark not in repo._bookmarks:
64 mark = None
64 mark = None
65 finally:
65 finally:
66 file.close()
66 file.close()
67 return mark
67 return mark
68
68
69 def write(repo):
69 def write(repo):
70 '''Write bookmarks
70 '''Write bookmarks
71
71
72 Write the given bookmark => hash dictionary to the .hg/bookmarks file
72 Write the given bookmark => hash dictionary to the .hg/bookmarks file
73 in a format equal to those of localtags.
73 in a format equal to those of localtags.
74
74
75 We also store a backup of the previous state in undo.bookmarks that
75 We also store a backup of the previous state in undo.bookmarks that
76 can be copied back on rollback.
76 can be copied back on rollback.
77 '''
77 '''
78 refs = repo._bookmarks
78 refs = repo._bookmarks
79
79
80 if repo._bookmarkcurrent not in refs:
80 if repo._bookmarkcurrent not in refs:
81 setcurrent(repo, None)
81 setcurrent(repo, None)
82 for mark in refs.keys():
82 for mark in refs.keys():
83 if not valid(mark):
83 if not valid(mark):
84 raise util.Abort(_("bookmark '%s' contains illegal "
84 raise util.Abort(_("bookmark '%s' contains illegal "
85 "character" % mark))
85 "character" % mark))
86
86
87 wlock = repo.wlock()
87 wlock = repo.wlock()
88 try:
88 try:
89
89
90 file = repo.opener('bookmarks', 'w', atomictemp=True)
90 file = repo.opener('bookmarks', 'w', atomictemp=True)
91 for refspec, node in refs.iteritems():
91 for refspec, node in refs.iteritems():
92 file.write("%s %s\n" % (hex(node), encoding.fromlocal(refspec)))
92 file.write("%s %s\n" % (hex(node), encoding.fromlocal(refspec)))
93 file.close()
93 file.close()
94
94
95 # touch 00changelog.i so hgweb reloads bookmarks (no lock needed)
95 # touch 00changelog.i so hgweb reloads bookmarks (no lock needed)
96 try:
96 try:
97 os.utime(repo.sjoin('00changelog.i'), None)
97 os.utime(repo.sjoin('00changelog.i'), None)
98 except OSError:
98 except OSError:
99 pass
99 pass
100
100
101 finally:
101 finally:
102 wlock.release()
102 wlock.release()
103
103
104 def setcurrent(repo, mark):
104 def setcurrent(repo, mark):
105 '''Set the name of the bookmark that we are currently on
105 '''Set the name of the bookmark that we are currently on
106
106
107 Set the name of the bookmark that we are on (hg update <bookmark>).
107 Set the name of the bookmark that we are on (hg update <bookmark>).
108 The name is recorded in .hg/bookmarks.current
108 The name is recorded in .hg/bookmarks.current
109 '''
109 '''
110 current = repo._bookmarkcurrent
110 current = repo._bookmarkcurrent
111 if current == mark:
111 if current == mark:
112 return
112 return
113
113
114 if mark not in repo._bookmarks:
114 if mark not in repo._bookmarks:
115 mark = ''
115 mark = ''
116 if not valid(mark):
116 if not valid(mark):
117 raise util.Abort(_("bookmark '%s' contains illegal "
117 raise util.Abort(_("bookmark '%s' contains illegal "
118 "character" % mark))
118 "character" % mark))
119
119
120 wlock = repo.wlock()
120 wlock = repo.wlock()
121 try:
121 try:
122 file = repo.opener('bookmarks.current', 'w', atomictemp=True)
122 file = repo.opener('bookmarks.current', 'w', atomictemp=True)
123 file.write(encoding.fromlocal(mark))
123 file.write(encoding.fromlocal(mark))
124 file.close()
124 file.close()
125 finally:
125 finally:
126 wlock.release()
126 wlock.release()
127 repo._bookmarkcurrent = mark
127 repo._bookmarkcurrent = mark
128
128
129 def updatecurrentbookmark(repo, oldnode, curbranch):
129 def updatecurrentbookmark(repo, oldnode, curbranch):
130 try:
130 try:
131 update(repo, oldnode, repo.branchtags()[curbranch])
131 update(repo, oldnode, repo.branchtags()[curbranch])
132 except KeyError:
132 except KeyError:
133 if curbranch == "default": # no default branch!
133 if curbranch == "default": # no default branch!
134 update(repo, oldnode, repo.lookup("tip"))
134 update(repo, oldnode, repo.lookup("tip"))
135 else:
135 else:
136 raise util.Abort(_("branch %s not found") % curbranch)
136 raise util.Abort(_("branch %s not found") % curbranch)
137
137
138 def update(repo, parents, node):
138 def update(repo, parents, node):
139 marks = repo._bookmarks
139 marks = repo._bookmarks
140 update = False
140 update = False
141 mark = repo._bookmarkcurrent
141 mark = repo._bookmarkcurrent
142 if mark and marks[mark] in parents:
142 if mark and marks[mark] in parents:
143 old = repo[marks[mark]]
143 old = repo[marks[mark]]
144 new = repo[node]
144 new = repo[node]
145 if new in old.descendants():
145 if new in old.descendants():
146 marks[mark] = new.node()
146 marks[mark] = new.node()
147 update = True
147 update = True
148 if update:
148 if update:
149 write(repo)
149 repo._writebookmarks(marks)
150
150
151 def listbookmarks(repo):
151 def listbookmarks(repo):
152 # We may try to list bookmarks on a repo type that does not
152 # We may try to list bookmarks on a repo type that does not
153 # support it (e.g., statichttprepository).
153 # support it (e.g., statichttprepository).
154 marks = getattr(repo, '_bookmarks', {})
154 marks = getattr(repo, '_bookmarks', {})
155
155
156 d = {}
156 d = {}
157 for k, v in marks.iteritems():
157 for k, v in marks.iteritems():
158 d[k] = hex(v)
158 d[k] = hex(v)
159 return d
159 return d
160
160
161 def pushbookmark(repo, key, old, new):
161 def pushbookmark(repo, key, old, new):
162 w = repo.wlock()
162 w = repo.wlock()
163 try:
163 try:
164 marks = repo._bookmarks
164 marks = repo._bookmarks
165 if hex(marks.get(key, '')) != old:
165 if hex(marks.get(key, '')) != old:
166 return False
166 return False
167 if new == '':
167 if new == '':
168 del marks[key]
168 del marks[key]
169 else:
169 else:
170 if new not in repo:
170 if new not in repo:
171 return False
171 return False
172 marks[key] = repo[new].node()
172 marks[key] = repo[new].node()
173 write(repo)
173 write(repo)
174 return True
174 return True
175 finally:
175 finally:
176 w.release()
176 w.release()
177
177
178 def updatefromremote(ui, repo, remote):
178 def updatefromremote(ui, repo, remote):
179 ui.debug("checking for updated bookmarks\n")
179 ui.debug("checking for updated bookmarks\n")
180 rb = remote.listkeys('bookmarks')
180 rb = remote.listkeys('bookmarks')
181 changed = False
181 changed = False
182 for k in rb.keys():
182 for k in rb.keys():
183 if k in repo._bookmarks:
183 if k in repo._bookmarks:
184 nr, nl = rb[k], repo._bookmarks[k]
184 nr, nl = rb[k], repo._bookmarks[k]
185 if nr in repo:
185 if nr in repo:
186 cr = repo[nr]
186 cr = repo[nr]
187 cl = repo[nl]
187 cl = repo[nl]
188 if cl.rev() >= cr.rev():
188 if cl.rev() >= cr.rev():
189 continue
189 continue
190 if cr in cl.descendants():
190 if cr in cl.descendants():
191 repo._bookmarks[k] = cr.node()
191 repo._bookmarks[k] = cr.node()
192 changed = True
192 changed = True
193 ui.status(_("updating bookmark %s\n") % k)
193 ui.status(_("updating bookmark %s\n") % k)
194 else:
194 else:
195 ui.warn(_("not updating divergent"
195 ui.warn(_("not updating divergent"
196 " bookmark %s\n") % k)
196 " bookmark %s\n") % k)
197 if changed:
197 if changed:
198 write(repo)
198 write(repo)
199
199
200 def diff(ui, repo, remote):
200 def diff(ui, repo, remote):
201 ui.status(_("searching for changed bookmarks\n"))
201 ui.status(_("searching for changed bookmarks\n"))
202
202
203 lmarks = repo.listkeys('bookmarks')
203 lmarks = repo.listkeys('bookmarks')
204 rmarks = remote.listkeys('bookmarks')
204 rmarks = remote.listkeys('bookmarks')
205
205
206 diff = sorted(set(rmarks) - set(lmarks))
206 diff = sorted(set(rmarks) - set(lmarks))
207 for k in diff:
207 for k in diff:
208 ui.write(" %-25s %s\n" % (k, rmarks[k][:12]))
208 ui.write(" %-25s %s\n" % (k, rmarks[k][:12]))
209
209
210 if len(diff) <= 0:
210 if len(diff) <= 0:
211 ui.status(_("no changed bookmarks found\n"))
211 ui.status(_("no changed bookmarks found\n"))
212 return 1
212 return 1
213 return 0
213 return 0
@@ -1,2081 +1,2084 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks
11 import changelog, dirstate, filelog, manifest, context, bookmarks
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import scmutil, util, extensions, hook, error, revset
13 import scmutil, util, extensions, hook, error, revset
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20 filecache = scmutil.filecache
20 filecache = scmutil.filecache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 'known', 'getbundle'))
24 'known', 'getbundle'))
25 supportedformats = set(('revlogv1', 'generaldelta'))
25 supportedformats = set(('revlogv1', 'generaldelta'))
26 supported = supportedformats | set(('store', 'fncache', 'shared',
26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 'dotencode'))
27 'dotencode'))
28
28
29 def __init__(self, baseui, path=None, create=False):
29 def __init__(self, baseui, path=None, create=False):
30 repo.repository.__init__(self)
30 repo.repository.__init__(self)
31 self.root = os.path.realpath(util.expandpath(path))
31 self.root = os.path.realpath(util.expandpath(path))
32 self.path = os.path.join(self.root, ".hg")
32 self.path = os.path.join(self.root, ".hg")
33 self.origroot = path
33 self.origroot = path
34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
35 self.opener = scmutil.opener(self.path)
35 self.opener = scmutil.opener(self.path)
36 self.wopener = scmutil.opener(self.root)
36 self.wopener = scmutil.opener(self.root)
37 self.baseui = baseui
37 self.baseui = baseui
38 self.ui = baseui.copy()
38 self.ui = baseui.copy()
39
39
40 try:
40 try:
41 self.ui.readconfig(self.join("hgrc"), self.root)
41 self.ui.readconfig(self.join("hgrc"), self.root)
42 extensions.loadall(self.ui)
42 extensions.loadall(self.ui)
43 except IOError:
43 except IOError:
44 pass
44 pass
45
45
46 if not os.path.isdir(self.path):
46 if not os.path.isdir(self.path):
47 if create:
47 if create:
48 if not os.path.exists(path):
48 if not os.path.exists(path):
49 util.makedirs(path)
49 util.makedirs(path)
50 util.makedir(self.path, notindexed=True)
50 util.makedir(self.path, notindexed=True)
51 requirements = ["revlogv1"]
51 requirements = ["revlogv1"]
52 if self.ui.configbool('format', 'usestore', True):
52 if self.ui.configbool('format', 'usestore', True):
53 os.mkdir(os.path.join(self.path, "store"))
53 os.mkdir(os.path.join(self.path, "store"))
54 requirements.append("store")
54 requirements.append("store")
55 if self.ui.configbool('format', 'usefncache', True):
55 if self.ui.configbool('format', 'usefncache', True):
56 requirements.append("fncache")
56 requirements.append("fncache")
57 if self.ui.configbool('format', 'dotencode', True):
57 if self.ui.configbool('format', 'dotencode', True):
58 requirements.append('dotencode')
58 requirements.append('dotencode')
59 # create an invalid changelog
59 # create an invalid changelog
60 self.opener.append(
60 self.opener.append(
61 "00changelog.i",
61 "00changelog.i",
62 '\0\0\0\2' # represents revlogv2
62 '\0\0\0\2' # represents revlogv2
63 ' dummy changelog to prevent using the old repo layout'
63 ' dummy changelog to prevent using the old repo layout'
64 )
64 )
65 if self.ui.configbool('format', 'generaldelta', False):
65 if self.ui.configbool('format', 'generaldelta', False):
66 requirements.append("generaldelta")
66 requirements.append("generaldelta")
67 requirements = set(requirements)
67 requirements = set(requirements)
68 else:
68 else:
69 raise error.RepoError(_("repository %s not found") % path)
69 raise error.RepoError(_("repository %s not found") % path)
70 elif create:
70 elif create:
71 raise error.RepoError(_("repository %s already exists") % path)
71 raise error.RepoError(_("repository %s already exists") % path)
72 else:
72 else:
73 try:
73 try:
74 requirements = scmutil.readrequires(self.opener, self.supported)
74 requirements = scmutil.readrequires(self.opener, self.supported)
75 except IOError, inst:
75 except IOError, inst:
76 if inst.errno != errno.ENOENT:
76 if inst.errno != errno.ENOENT:
77 raise
77 raise
78 requirements = set()
78 requirements = set()
79
79
80 self.sharedpath = self.path
80 self.sharedpath = self.path
81 try:
81 try:
82 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
82 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
83 if not os.path.exists(s):
83 if not os.path.exists(s):
84 raise error.RepoError(
84 raise error.RepoError(
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 self.sharedpath = s
86 self.sharedpath = s
87 except IOError, inst:
87 except IOError, inst:
88 if inst.errno != errno.ENOENT:
88 if inst.errno != errno.ENOENT:
89 raise
89 raise
90
90
91 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
91 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
92 self.spath = self.store.path
92 self.spath = self.store.path
93 self.sopener = self.store.opener
93 self.sopener = self.store.opener
94 self.sjoin = self.store.join
94 self.sjoin = self.store.join
95 self.opener.createmode = self.store.createmode
95 self.opener.createmode = self.store.createmode
96 self._applyrequirements(requirements)
96 self._applyrequirements(requirements)
97 if create:
97 if create:
98 self._writerequirements()
98 self._writerequirements()
99
99
100
100
101 self._branchcache = None
101 self._branchcache = None
102 self._branchcachetip = None
102 self._branchcachetip = None
103 self.filterpats = {}
103 self.filterpats = {}
104 self._datafilters = {}
104 self._datafilters = {}
105 self._transref = self._lockref = self._wlockref = None
105 self._transref = self._lockref = self._wlockref = None
106
106
107 # A cache for various files under .hg/ that tracks file changes,
107 # A cache for various files under .hg/ that tracks file changes,
108 # (used by the filecache decorator)
108 # (used by the filecache decorator)
109 #
109 #
110 # Maps a property name to its util.filecacheentry
110 # Maps a property name to its util.filecacheentry
111 self._filecache = {}
111 self._filecache = {}
112
112
113 def _applyrequirements(self, requirements):
113 def _applyrequirements(self, requirements):
114 self.requirements = requirements
114 self.requirements = requirements
115 openerreqs = set(('revlogv1', 'generaldelta'))
115 openerreqs = set(('revlogv1', 'generaldelta'))
116 self.sopener.options = dict((r, 1) for r in requirements
116 self.sopener.options = dict((r, 1) for r in requirements
117 if r in openerreqs)
117 if r in openerreqs)
118
118
119 def _writerequirements(self):
119 def _writerequirements(self):
120 reqfile = self.opener("requires", "w")
120 reqfile = self.opener("requires", "w")
121 for r in self.requirements:
121 for r in self.requirements:
122 reqfile.write("%s\n" % r)
122 reqfile.write("%s\n" % r)
123 reqfile.close()
123 reqfile.close()
124
124
125 def _checknested(self, path):
125 def _checknested(self, path):
126 """Determine if path is a legal nested repository."""
126 """Determine if path is a legal nested repository."""
127 if not path.startswith(self.root):
127 if not path.startswith(self.root):
128 return False
128 return False
129 subpath = path[len(self.root) + 1:]
129 subpath = path[len(self.root) + 1:]
130
130
131 # XXX: Checking against the current working copy is wrong in
131 # XXX: Checking against the current working copy is wrong in
132 # the sense that it can reject things like
132 # the sense that it can reject things like
133 #
133 #
134 # $ hg cat -r 10 sub/x.txt
134 # $ hg cat -r 10 sub/x.txt
135 #
135 #
136 # if sub/ is no longer a subrepository in the working copy
136 # if sub/ is no longer a subrepository in the working copy
137 # parent revision.
137 # parent revision.
138 #
138 #
139 # However, it can of course also allow things that would have
139 # However, it can of course also allow things that would have
140 # been rejected before, such as the above cat command if sub/
140 # been rejected before, such as the above cat command if sub/
141 # is a subrepository now, but was a normal directory before.
141 # is a subrepository now, but was a normal directory before.
142 # The old path auditor would have rejected by mistake since it
142 # The old path auditor would have rejected by mistake since it
143 # panics when it sees sub/.hg/.
143 # panics when it sees sub/.hg/.
144 #
144 #
145 # All in all, checking against the working copy seems sensible
145 # All in all, checking against the working copy seems sensible
146 # since we want to prevent access to nested repositories on
146 # since we want to prevent access to nested repositories on
147 # the filesystem *now*.
147 # the filesystem *now*.
148 ctx = self[None]
148 ctx = self[None]
149 parts = util.splitpath(subpath)
149 parts = util.splitpath(subpath)
150 while parts:
150 while parts:
151 prefix = os.sep.join(parts)
151 prefix = os.sep.join(parts)
152 if prefix in ctx.substate:
152 if prefix in ctx.substate:
153 if prefix == subpath:
153 if prefix == subpath:
154 return True
154 return True
155 else:
155 else:
156 sub = ctx.sub(prefix)
156 sub = ctx.sub(prefix)
157 return sub.checknested(subpath[len(prefix) + 1:])
157 return sub.checknested(subpath[len(prefix) + 1:])
158 else:
158 else:
159 parts.pop()
159 parts.pop()
160 return False
160 return False
161
161
162 @filecache('bookmarks')
162 @filecache('bookmarks')
163 def _bookmarks(self):
163 def _bookmarks(self):
164 return bookmarks.read(self)
164 return bookmarks.read(self)
165
165
166 @filecache('bookmarks.current')
166 @filecache('bookmarks.current')
167 def _bookmarkcurrent(self):
167 def _bookmarkcurrent(self):
168 return bookmarks.readcurrent(self)
168 return bookmarks.readcurrent(self)
169
169
170 def _writebookmarks(self, marks):
171 bookmarks.write(self)
172
170 @filecache('00changelog.i', True)
173 @filecache('00changelog.i', True)
171 def changelog(self):
174 def changelog(self):
172 c = changelog.changelog(self.sopener)
175 c = changelog.changelog(self.sopener)
173 if 'HG_PENDING' in os.environ:
176 if 'HG_PENDING' in os.environ:
174 p = os.environ['HG_PENDING']
177 p = os.environ['HG_PENDING']
175 if p.startswith(self.root):
178 if p.startswith(self.root):
176 c.readpending('00changelog.i.a')
179 c.readpending('00changelog.i.a')
177 return c
180 return c
178
181
179 @filecache('00manifest.i', True)
182 @filecache('00manifest.i', True)
180 def manifest(self):
183 def manifest(self):
181 return manifest.manifest(self.sopener)
184 return manifest.manifest(self.sopener)
182
185
183 @filecache('dirstate')
186 @filecache('dirstate')
184 def dirstate(self):
187 def dirstate(self):
185 warned = [0]
188 warned = [0]
186 def validate(node):
189 def validate(node):
187 try:
190 try:
188 self.changelog.rev(node)
191 self.changelog.rev(node)
189 return node
192 return node
190 except error.LookupError:
193 except error.LookupError:
191 if not warned[0]:
194 if not warned[0]:
192 warned[0] = True
195 warned[0] = True
193 self.ui.warn(_("warning: ignoring unknown"
196 self.ui.warn(_("warning: ignoring unknown"
194 " working parent %s!\n") % short(node))
197 " working parent %s!\n") % short(node))
195 return nullid
198 return nullid
196
199
197 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
200 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
198
201
199 def __getitem__(self, changeid):
202 def __getitem__(self, changeid):
200 if changeid is None:
203 if changeid is None:
201 return context.workingctx(self)
204 return context.workingctx(self)
202 return context.changectx(self, changeid)
205 return context.changectx(self, changeid)
203
206
204 def __contains__(self, changeid):
207 def __contains__(self, changeid):
205 try:
208 try:
206 return bool(self.lookup(changeid))
209 return bool(self.lookup(changeid))
207 except error.RepoLookupError:
210 except error.RepoLookupError:
208 return False
211 return False
209
212
210 def __nonzero__(self):
213 def __nonzero__(self):
211 return True
214 return True
212
215
213 def __len__(self):
216 def __len__(self):
214 return len(self.changelog)
217 return len(self.changelog)
215
218
216 def __iter__(self):
219 def __iter__(self):
217 for i in xrange(len(self)):
220 for i in xrange(len(self)):
218 yield i
221 yield i
219
222
220 def set(self, expr, *args):
223 def set(self, expr, *args):
221 '''
224 '''
222 Yield a context for each matching revision, after doing arg
225 Yield a context for each matching revision, after doing arg
223 replacement via revset.formatspec
226 replacement via revset.formatspec
224 '''
227 '''
225
228
226 expr = revset.formatspec(expr, *args)
229 expr = revset.formatspec(expr, *args)
227 m = revset.match(None, expr)
230 m = revset.match(None, expr)
228 for r in m(self, range(len(self))):
231 for r in m(self, range(len(self))):
229 yield self[r]
232 yield self[r]
230
233
231 def url(self):
234 def url(self):
232 return 'file:' + self.root
235 return 'file:' + self.root
233
236
234 def hook(self, name, throw=False, **args):
237 def hook(self, name, throw=False, **args):
235 return hook.hook(self.ui, self, name, throw, **args)
238 return hook.hook(self.ui, self, name, throw, **args)
236
239
237 tag_disallowed = ':\r\n'
240 tag_disallowed = ':\r\n'
238
241
239 def _tag(self, names, node, message, local, user, date, extra={}):
242 def _tag(self, names, node, message, local, user, date, extra={}):
240 if isinstance(names, str):
243 if isinstance(names, str):
241 allchars = names
244 allchars = names
242 names = (names,)
245 names = (names,)
243 else:
246 else:
244 allchars = ''.join(names)
247 allchars = ''.join(names)
245 for c in self.tag_disallowed:
248 for c in self.tag_disallowed:
246 if c in allchars:
249 if c in allchars:
247 raise util.Abort(_('%r cannot be used in a tag name') % c)
250 raise util.Abort(_('%r cannot be used in a tag name') % c)
248
251
249 branches = self.branchmap()
252 branches = self.branchmap()
250 for name in names:
253 for name in names:
251 self.hook('pretag', throw=True, node=hex(node), tag=name,
254 self.hook('pretag', throw=True, node=hex(node), tag=name,
252 local=local)
255 local=local)
253 if name in branches:
256 if name in branches:
254 self.ui.warn(_("warning: tag %s conflicts with existing"
257 self.ui.warn(_("warning: tag %s conflicts with existing"
255 " branch name\n") % name)
258 " branch name\n") % name)
256
259
257 def writetags(fp, names, munge, prevtags):
260 def writetags(fp, names, munge, prevtags):
258 fp.seek(0, 2)
261 fp.seek(0, 2)
259 if prevtags and prevtags[-1] != '\n':
262 if prevtags and prevtags[-1] != '\n':
260 fp.write('\n')
263 fp.write('\n')
261 for name in names:
264 for name in names:
262 m = munge and munge(name) or name
265 m = munge and munge(name) or name
263 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
266 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
264 old = self.tags().get(name, nullid)
267 old = self.tags().get(name, nullid)
265 fp.write('%s %s\n' % (hex(old), m))
268 fp.write('%s %s\n' % (hex(old), m))
266 fp.write('%s %s\n' % (hex(node), m))
269 fp.write('%s %s\n' % (hex(node), m))
267 fp.close()
270 fp.close()
268
271
269 prevtags = ''
272 prevtags = ''
270 if local:
273 if local:
271 try:
274 try:
272 fp = self.opener('localtags', 'r+')
275 fp = self.opener('localtags', 'r+')
273 except IOError:
276 except IOError:
274 fp = self.opener('localtags', 'a')
277 fp = self.opener('localtags', 'a')
275 else:
278 else:
276 prevtags = fp.read()
279 prevtags = fp.read()
277
280
278 # local tags are stored in the current charset
281 # local tags are stored in the current charset
279 writetags(fp, names, None, prevtags)
282 writetags(fp, names, None, prevtags)
280 for name in names:
283 for name in names:
281 self.hook('tag', node=hex(node), tag=name, local=local)
284 self.hook('tag', node=hex(node), tag=name, local=local)
282 return
285 return
283
286
284 try:
287 try:
285 fp = self.wfile('.hgtags', 'rb+')
288 fp = self.wfile('.hgtags', 'rb+')
286 except IOError, e:
289 except IOError, e:
287 if e.errno != errno.ENOENT:
290 if e.errno != errno.ENOENT:
288 raise
291 raise
289 fp = self.wfile('.hgtags', 'ab')
292 fp = self.wfile('.hgtags', 'ab')
290 else:
293 else:
291 prevtags = fp.read()
294 prevtags = fp.read()
292
295
293 # committed tags are stored in UTF-8
296 # committed tags are stored in UTF-8
294 writetags(fp, names, encoding.fromlocal, prevtags)
297 writetags(fp, names, encoding.fromlocal, prevtags)
295
298
296 fp.close()
299 fp.close()
297
300
298 if '.hgtags' not in self.dirstate:
301 if '.hgtags' not in self.dirstate:
299 self[None].add(['.hgtags'])
302 self[None].add(['.hgtags'])
300
303
301 m = matchmod.exact(self.root, '', ['.hgtags'])
304 m = matchmod.exact(self.root, '', ['.hgtags'])
302 tagnode = self.commit(message, user, date, extra=extra, match=m)
305 tagnode = self.commit(message, user, date, extra=extra, match=m)
303
306
304 for name in names:
307 for name in names:
305 self.hook('tag', node=hex(node), tag=name, local=local)
308 self.hook('tag', node=hex(node), tag=name, local=local)
306
309
307 return tagnode
310 return tagnode
308
311
309 def tag(self, names, node, message, local, user, date):
312 def tag(self, names, node, message, local, user, date):
310 '''tag a revision with one or more symbolic names.
313 '''tag a revision with one or more symbolic names.
311
314
312 names is a list of strings or, when adding a single tag, names may be a
315 names is a list of strings or, when adding a single tag, names may be a
313 string.
316 string.
314
317
315 if local is True, the tags are stored in a per-repository file.
318 if local is True, the tags are stored in a per-repository file.
316 otherwise, they are stored in the .hgtags file, and a new
319 otherwise, they are stored in the .hgtags file, and a new
317 changeset is committed with the change.
320 changeset is committed with the change.
318
321
319 keyword arguments:
322 keyword arguments:
320
323
321 local: whether to store tags in non-version-controlled file
324 local: whether to store tags in non-version-controlled file
322 (default False)
325 (default False)
323
326
324 message: commit message to use if committing
327 message: commit message to use if committing
325
328
326 user: name of user to use if committing
329 user: name of user to use if committing
327
330
328 date: date tuple to use if committing'''
331 date: date tuple to use if committing'''
329
332
330 if not local:
333 if not local:
331 for x in self.status()[:5]:
334 for x in self.status()[:5]:
332 if '.hgtags' in x:
335 if '.hgtags' in x:
333 raise util.Abort(_('working copy of .hgtags is changed '
336 raise util.Abort(_('working copy of .hgtags is changed '
334 '(please commit .hgtags manually)'))
337 '(please commit .hgtags manually)'))
335
338
336 self.tags() # instantiate the cache
339 self.tags() # instantiate the cache
337 self._tag(names, node, message, local, user, date)
340 self._tag(names, node, message, local, user, date)
338
341
339 @propertycache
342 @propertycache
340 def _tagscache(self):
343 def _tagscache(self):
341 '''Returns a tagscache object that contains various tags related caches.'''
344 '''Returns a tagscache object that contains various tags related caches.'''
342
345
343 # This simplifies its cache management by having one decorated
346 # This simplifies its cache management by having one decorated
344 # function (this one) and the rest simply fetch things from it.
347 # function (this one) and the rest simply fetch things from it.
345 class tagscache(object):
348 class tagscache(object):
346 def __init__(self):
349 def __init__(self):
347 # These two define the set of tags for this repository. tags
350 # These two define the set of tags for this repository. tags
348 # maps tag name to node; tagtypes maps tag name to 'global' or
351 # maps tag name to node; tagtypes maps tag name to 'global' or
349 # 'local'. (Global tags are defined by .hgtags across all
352 # 'local'. (Global tags are defined by .hgtags across all
350 # heads, and local tags are defined in .hg/localtags.)
353 # heads, and local tags are defined in .hg/localtags.)
351 # They constitute the in-memory cache of tags.
354 # They constitute the in-memory cache of tags.
352 self.tags = self.tagtypes = None
355 self.tags = self.tagtypes = None
353
356
354 self.nodetagscache = self.tagslist = None
357 self.nodetagscache = self.tagslist = None
355
358
356 cache = tagscache()
359 cache = tagscache()
357 cache.tags, cache.tagtypes = self._findtags()
360 cache.tags, cache.tagtypes = self._findtags()
358
361
359 return cache
362 return cache
360
363
361 def tags(self):
364 def tags(self):
362 '''return a mapping of tag to node'''
365 '''return a mapping of tag to node'''
363 return self._tagscache.tags
366 return self._tagscache.tags
364
367
365 def _findtags(self):
368 def _findtags(self):
366 '''Do the hard work of finding tags. Return a pair of dicts
369 '''Do the hard work of finding tags. Return a pair of dicts
367 (tags, tagtypes) where tags maps tag name to node, and tagtypes
370 (tags, tagtypes) where tags maps tag name to node, and tagtypes
368 maps tag name to a string like \'global\' or \'local\'.
371 maps tag name to a string like \'global\' or \'local\'.
369 Subclasses or extensions are free to add their own tags, but
372 Subclasses or extensions are free to add their own tags, but
370 should be aware that the returned dicts will be retained for the
373 should be aware that the returned dicts will be retained for the
371 duration of the localrepo object.'''
374 duration of the localrepo object.'''
372
375
373 # XXX what tagtype should subclasses/extensions use? Currently
376 # XXX what tagtype should subclasses/extensions use? Currently
374 # mq and bookmarks add tags, but do not set the tagtype at all.
377 # mq and bookmarks add tags, but do not set the tagtype at all.
375 # Should each extension invent its own tag type? Should there
378 # Should each extension invent its own tag type? Should there
376 # be one tagtype for all such "virtual" tags? Or is the status
379 # be one tagtype for all such "virtual" tags? Or is the status
377 # quo fine?
380 # quo fine?
378
381
379 alltags = {} # map tag name to (node, hist)
382 alltags = {} # map tag name to (node, hist)
380 tagtypes = {}
383 tagtypes = {}
381
384
382 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
385 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
383 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
386 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
384
387
385 # Build the return dicts. Have to re-encode tag names because
388 # Build the return dicts. Have to re-encode tag names because
386 # the tags module always uses UTF-8 (in order not to lose info
389 # the tags module always uses UTF-8 (in order not to lose info
387 # writing to the cache), but the rest of Mercurial wants them in
390 # writing to the cache), but the rest of Mercurial wants them in
388 # local encoding.
391 # local encoding.
389 tags = {}
392 tags = {}
390 for (name, (node, hist)) in alltags.iteritems():
393 for (name, (node, hist)) in alltags.iteritems():
391 if node != nullid:
394 if node != nullid:
392 try:
395 try:
393 # ignore tags to unknown nodes
396 # ignore tags to unknown nodes
394 self.changelog.lookup(node)
397 self.changelog.lookup(node)
395 tags[encoding.tolocal(name)] = node
398 tags[encoding.tolocal(name)] = node
396 except error.LookupError:
399 except error.LookupError:
397 pass
400 pass
398 tags['tip'] = self.changelog.tip()
401 tags['tip'] = self.changelog.tip()
399 tagtypes = dict([(encoding.tolocal(name), value)
402 tagtypes = dict([(encoding.tolocal(name), value)
400 for (name, value) in tagtypes.iteritems()])
403 for (name, value) in tagtypes.iteritems()])
401 return (tags, tagtypes)
404 return (tags, tagtypes)
402
405
403 def tagtype(self, tagname):
406 def tagtype(self, tagname):
404 '''
407 '''
405 return the type of the given tag. result can be:
408 return the type of the given tag. result can be:
406
409
407 'local' : a local tag
410 'local' : a local tag
408 'global' : a global tag
411 'global' : a global tag
409 None : tag does not exist
412 None : tag does not exist
410 '''
413 '''
411
414
412 return self._tagscache.tagtypes.get(tagname)
415 return self._tagscache.tagtypes.get(tagname)
413
416
414 def tagslist(self):
417 def tagslist(self):
415 '''return a list of tags ordered by revision'''
418 '''return a list of tags ordered by revision'''
416 if not self._tagscache.tagslist:
419 if not self._tagscache.tagslist:
417 l = []
420 l = []
418 for t, n in self.tags().iteritems():
421 for t, n in self.tags().iteritems():
419 r = self.changelog.rev(n)
422 r = self.changelog.rev(n)
420 l.append((r, t, n))
423 l.append((r, t, n))
421 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
424 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
422
425
423 return self._tagscache.tagslist
426 return self._tagscache.tagslist
424
427
425 def nodetags(self, node):
428 def nodetags(self, node):
426 '''return the tags associated with a node'''
429 '''return the tags associated with a node'''
427 if not self._tagscache.nodetagscache:
430 if not self._tagscache.nodetagscache:
428 nodetagscache = {}
431 nodetagscache = {}
429 for t, n in self.tags().iteritems():
432 for t, n in self.tags().iteritems():
430 nodetagscache.setdefault(n, []).append(t)
433 nodetagscache.setdefault(n, []).append(t)
431 for tags in nodetagscache.itervalues():
434 for tags in nodetagscache.itervalues():
432 tags.sort()
435 tags.sort()
433 self._tagscache.nodetagscache = nodetagscache
436 self._tagscache.nodetagscache = nodetagscache
434 return self._tagscache.nodetagscache.get(node, [])
437 return self._tagscache.nodetagscache.get(node, [])
435
438
436 def nodebookmarks(self, node):
439 def nodebookmarks(self, node):
437 marks = []
440 marks = []
438 for bookmark, n in self._bookmarks.iteritems():
441 for bookmark, n in self._bookmarks.iteritems():
439 if n == node:
442 if n == node:
440 marks.append(bookmark)
443 marks.append(bookmark)
441 return sorted(marks)
444 return sorted(marks)
442
445
443 def _branchtags(self, partial, lrev):
446 def _branchtags(self, partial, lrev):
444 # TODO: rename this function?
447 # TODO: rename this function?
445 tiprev = len(self) - 1
448 tiprev = len(self) - 1
446 if lrev != tiprev:
449 if lrev != tiprev:
447 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
450 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
448 self._updatebranchcache(partial, ctxgen)
451 self._updatebranchcache(partial, ctxgen)
449 self._writebranchcache(partial, self.changelog.tip(), tiprev)
452 self._writebranchcache(partial, self.changelog.tip(), tiprev)
450
453
451 return partial
454 return partial
452
455
453 def updatebranchcache(self):
456 def updatebranchcache(self):
454 tip = self.changelog.tip()
457 tip = self.changelog.tip()
455 if self._branchcache is not None and self._branchcachetip == tip:
458 if self._branchcache is not None and self._branchcachetip == tip:
456 return self._branchcache
459 return self._branchcache
457
460
458 oldtip = self._branchcachetip
461 oldtip = self._branchcachetip
459 self._branchcachetip = tip
462 self._branchcachetip = tip
460 if oldtip is None or oldtip not in self.changelog.nodemap:
463 if oldtip is None or oldtip not in self.changelog.nodemap:
461 partial, last, lrev = self._readbranchcache()
464 partial, last, lrev = self._readbranchcache()
462 else:
465 else:
463 lrev = self.changelog.rev(oldtip)
466 lrev = self.changelog.rev(oldtip)
464 partial = self._branchcache
467 partial = self._branchcache
465
468
466 self._branchtags(partial, lrev)
469 self._branchtags(partial, lrev)
467 # this private cache holds all heads (not just tips)
470 # this private cache holds all heads (not just tips)
468 self._branchcache = partial
471 self._branchcache = partial
469
472
470 def branchmap(self):
473 def branchmap(self):
471 '''returns a dictionary {branch: [branchheads]}'''
474 '''returns a dictionary {branch: [branchheads]}'''
472 self.updatebranchcache()
475 self.updatebranchcache()
473 return self._branchcache
476 return self._branchcache
474
477
475 def branchtags(self):
478 def branchtags(self):
476 '''return a dict where branch names map to the tipmost head of
479 '''return a dict where branch names map to the tipmost head of
477 the branch, open heads come before closed'''
480 the branch, open heads come before closed'''
478 bt = {}
481 bt = {}
479 for bn, heads in self.branchmap().iteritems():
482 for bn, heads in self.branchmap().iteritems():
480 tip = heads[-1]
483 tip = heads[-1]
481 for h in reversed(heads):
484 for h in reversed(heads):
482 if 'close' not in self.changelog.read(h)[5]:
485 if 'close' not in self.changelog.read(h)[5]:
483 tip = h
486 tip = h
484 break
487 break
485 bt[bn] = tip
488 bt[bn] = tip
486 return bt
489 return bt
487
490
488 def _readbranchcache(self):
491 def _readbranchcache(self):
489 partial = {}
492 partial = {}
490 try:
493 try:
491 f = self.opener("cache/branchheads")
494 f = self.opener("cache/branchheads")
492 lines = f.read().split('\n')
495 lines = f.read().split('\n')
493 f.close()
496 f.close()
494 except (IOError, OSError):
497 except (IOError, OSError):
495 return {}, nullid, nullrev
498 return {}, nullid, nullrev
496
499
497 try:
500 try:
498 last, lrev = lines.pop(0).split(" ", 1)
501 last, lrev = lines.pop(0).split(" ", 1)
499 last, lrev = bin(last), int(lrev)
502 last, lrev = bin(last), int(lrev)
500 if lrev >= len(self) or self[lrev].node() != last:
503 if lrev >= len(self) or self[lrev].node() != last:
501 # invalidate the cache
504 # invalidate the cache
502 raise ValueError('invalidating branch cache (tip differs)')
505 raise ValueError('invalidating branch cache (tip differs)')
503 for l in lines:
506 for l in lines:
504 if not l:
507 if not l:
505 continue
508 continue
506 node, label = l.split(" ", 1)
509 node, label = l.split(" ", 1)
507 label = encoding.tolocal(label.strip())
510 label = encoding.tolocal(label.strip())
508 partial.setdefault(label, []).append(bin(node))
511 partial.setdefault(label, []).append(bin(node))
509 except KeyboardInterrupt:
512 except KeyboardInterrupt:
510 raise
513 raise
511 except Exception, inst:
514 except Exception, inst:
512 if self.ui.debugflag:
515 if self.ui.debugflag:
513 self.ui.warn(str(inst), '\n')
516 self.ui.warn(str(inst), '\n')
514 partial, last, lrev = {}, nullid, nullrev
517 partial, last, lrev = {}, nullid, nullrev
515 return partial, last, lrev
518 return partial, last, lrev
516
519
517 def _writebranchcache(self, branches, tip, tiprev):
520 def _writebranchcache(self, branches, tip, tiprev):
518 try:
521 try:
519 f = self.opener("cache/branchheads", "w", atomictemp=True)
522 f = self.opener("cache/branchheads", "w", atomictemp=True)
520 f.write("%s %s\n" % (hex(tip), tiprev))
523 f.write("%s %s\n" % (hex(tip), tiprev))
521 for label, nodes in branches.iteritems():
524 for label, nodes in branches.iteritems():
522 for node in nodes:
525 for node in nodes:
523 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
526 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
524 f.close()
527 f.close()
525 except (IOError, OSError):
528 except (IOError, OSError):
526 pass
529 pass
527
530
528 def _updatebranchcache(self, partial, ctxgen):
531 def _updatebranchcache(self, partial, ctxgen):
529 # collect new branch entries
532 # collect new branch entries
530 newbranches = {}
533 newbranches = {}
531 for c in ctxgen:
534 for c in ctxgen:
532 newbranches.setdefault(c.branch(), []).append(c.node())
535 newbranches.setdefault(c.branch(), []).append(c.node())
533 # if older branchheads are reachable from new ones, they aren't
536 # if older branchheads are reachable from new ones, they aren't
534 # really branchheads. Note checking parents is insufficient:
537 # really branchheads. Note checking parents is insufficient:
535 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
538 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
536 for branch, newnodes in newbranches.iteritems():
539 for branch, newnodes in newbranches.iteritems():
537 bheads = partial.setdefault(branch, [])
540 bheads = partial.setdefault(branch, [])
538 bheads.extend(newnodes)
541 bheads.extend(newnodes)
539 if len(bheads) <= 1:
542 if len(bheads) <= 1:
540 continue
543 continue
541 bheads = sorted(bheads, key=lambda x: self[x].rev())
544 bheads = sorted(bheads, key=lambda x: self[x].rev())
542 # starting from tip means fewer passes over reachable
545 # starting from tip means fewer passes over reachable
543 while newnodes:
546 while newnodes:
544 latest = newnodes.pop()
547 latest = newnodes.pop()
545 if latest not in bheads:
548 if latest not in bheads:
546 continue
549 continue
547 minbhrev = self[bheads[0]].node()
550 minbhrev = self[bheads[0]].node()
548 reachable = self.changelog.reachable(latest, minbhrev)
551 reachable = self.changelog.reachable(latest, minbhrev)
549 reachable.remove(latest)
552 reachable.remove(latest)
550 if reachable:
553 if reachable:
551 bheads = [b for b in bheads if b not in reachable]
554 bheads = [b for b in bheads if b not in reachable]
552 partial[branch] = bheads
555 partial[branch] = bheads
553
556
554 def lookup(self, key):
557 def lookup(self, key):
555 if isinstance(key, int):
558 if isinstance(key, int):
556 return self.changelog.node(key)
559 return self.changelog.node(key)
557 elif key == '.':
560 elif key == '.':
558 return self.dirstate.p1()
561 return self.dirstate.p1()
559 elif key == 'null':
562 elif key == 'null':
560 return nullid
563 return nullid
561 elif key == 'tip':
564 elif key == 'tip':
562 return self.changelog.tip()
565 return self.changelog.tip()
563 n = self.changelog._match(key)
566 n = self.changelog._match(key)
564 if n:
567 if n:
565 return n
568 return n
566 if key in self._bookmarks:
569 if key in self._bookmarks:
567 return self._bookmarks[key]
570 return self._bookmarks[key]
568 if key in self.tags():
571 if key in self.tags():
569 return self.tags()[key]
572 return self.tags()[key]
570 if key in self.branchtags():
573 if key in self.branchtags():
571 return self.branchtags()[key]
574 return self.branchtags()[key]
572 n = self.changelog._partialmatch(key)
575 n = self.changelog._partialmatch(key)
573 if n:
576 if n:
574 return n
577 return n
575
578
576 # can't find key, check if it might have come from damaged dirstate
579 # can't find key, check if it might have come from damaged dirstate
577 if key in self.dirstate.parents():
580 if key in self.dirstate.parents():
578 raise error.Abort(_("working directory has unknown parent '%s'!")
581 raise error.Abort(_("working directory has unknown parent '%s'!")
579 % short(key))
582 % short(key))
580 try:
583 try:
581 if len(key) == 20:
584 if len(key) == 20:
582 key = hex(key)
585 key = hex(key)
583 except TypeError:
586 except TypeError:
584 pass
587 pass
585 raise error.RepoLookupError(_("unknown revision '%s'") % key)
588 raise error.RepoLookupError(_("unknown revision '%s'") % key)
586
589
587 def lookupbranch(self, key, remote=None):
590 def lookupbranch(self, key, remote=None):
588 repo = remote or self
591 repo = remote or self
589 if key in repo.branchmap():
592 if key in repo.branchmap():
590 return key
593 return key
591
594
592 repo = (remote and remote.local()) and remote or self
595 repo = (remote and remote.local()) and remote or self
593 return repo[key].branch()
596 return repo[key].branch()
594
597
595 def known(self, nodes):
598 def known(self, nodes):
596 nm = self.changelog.nodemap
599 nm = self.changelog.nodemap
597 return [(n in nm) for n in nodes]
600 return [(n in nm) for n in nodes]
598
601
599 def local(self):
602 def local(self):
600 return self
603 return self
601
604
602 def join(self, f):
605 def join(self, f):
603 return os.path.join(self.path, f)
606 return os.path.join(self.path, f)
604
607
605 def wjoin(self, f):
608 def wjoin(self, f):
606 return os.path.join(self.root, f)
609 return os.path.join(self.root, f)
607
610
608 def file(self, f):
611 def file(self, f):
609 if f[0] == '/':
612 if f[0] == '/':
610 f = f[1:]
613 f = f[1:]
611 return filelog.filelog(self.sopener, f)
614 return filelog.filelog(self.sopener, f)
612
615
613 def changectx(self, changeid):
616 def changectx(self, changeid):
614 return self[changeid]
617 return self[changeid]
615
618
616 def parents(self, changeid=None):
619 def parents(self, changeid=None):
617 '''get list of changectxs for parents of changeid'''
620 '''get list of changectxs for parents of changeid'''
618 return self[changeid].parents()
621 return self[changeid].parents()
619
622
620 def filectx(self, path, changeid=None, fileid=None):
623 def filectx(self, path, changeid=None, fileid=None):
621 """changeid can be a changeset revision, node, or tag.
624 """changeid can be a changeset revision, node, or tag.
622 fileid can be a file revision or node."""
625 fileid can be a file revision or node."""
623 return context.filectx(self, path, changeid, fileid)
626 return context.filectx(self, path, changeid, fileid)
624
627
625 def getcwd(self):
628 def getcwd(self):
626 return self.dirstate.getcwd()
629 return self.dirstate.getcwd()
627
630
628 def pathto(self, f, cwd=None):
631 def pathto(self, f, cwd=None):
629 return self.dirstate.pathto(f, cwd)
632 return self.dirstate.pathto(f, cwd)
630
633
631 def wfile(self, f, mode='r'):
634 def wfile(self, f, mode='r'):
632 return self.wopener(f, mode)
635 return self.wopener(f, mode)
633
636
634 def _link(self, f):
637 def _link(self, f):
635 return os.path.islink(self.wjoin(f))
638 return os.path.islink(self.wjoin(f))
636
639
637 def _loadfilter(self, filter):
640 def _loadfilter(self, filter):
638 if filter not in self.filterpats:
641 if filter not in self.filterpats:
639 l = []
642 l = []
640 for pat, cmd in self.ui.configitems(filter):
643 for pat, cmd in self.ui.configitems(filter):
641 if cmd == '!':
644 if cmd == '!':
642 continue
645 continue
643 mf = matchmod.match(self.root, '', [pat])
646 mf = matchmod.match(self.root, '', [pat])
644 fn = None
647 fn = None
645 params = cmd
648 params = cmd
646 for name, filterfn in self._datafilters.iteritems():
649 for name, filterfn in self._datafilters.iteritems():
647 if cmd.startswith(name):
650 if cmd.startswith(name):
648 fn = filterfn
651 fn = filterfn
649 params = cmd[len(name):].lstrip()
652 params = cmd[len(name):].lstrip()
650 break
653 break
651 if not fn:
654 if not fn:
652 fn = lambda s, c, **kwargs: util.filter(s, c)
655 fn = lambda s, c, **kwargs: util.filter(s, c)
653 # Wrap old filters not supporting keyword arguments
656 # Wrap old filters not supporting keyword arguments
654 if not inspect.getargspec(fn)[2]:
657 if not inspect.getargspec(fn)[2]:
655 oldfn = fn
658 oldfn = fn
656 fn = lambda s, c, **kwargs: oldfn(s, c)
659 fn = lambda s, c, **kwargs: oldfn(s, c)
657 l.append((mf, fn, params))
660 l.append((mf, fn, params))
658 self.filterpats[filter] = l
661 self.filterpats[filter] = l
659 return self.filterpats[filter]
662 return self.filterpats[filter]
660
663
661 def _filter(self, filterpats, filename, data):
664 def _filter(self, filterpats, filename, data):
662 for mf, fn, cmd in filterpats:
665 for mf, fn, cmd in filterpats:
663 if mf(filename):
666 if mf(filename):
664 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
667 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
665 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
668 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
666 break
669 break
667
670
668 return data
671 return data
669
672
670 @propertycache
673 @propertycache
671 def _encodefilterpats(self):
674 def _encodefilterpats(self):
672 return self._loadfilter('encode')
675 return self._loadfilter('encode')
673
676
674 @propertycache
677 @propertycache
675 def _decodefilterpats(self):
678 def _decodefilterpats(self):
676 return self._loadfilter('decode')
679 return self._loadfilter('decode')
677
680
678 def adddatafilter(self, name, filter):
681 def adddatafilter(self, name, filter):
679 self._datafilters[name] = filter
682 self._datafilters[name] = filter
680
683
681 def wread(self, filename):
684 def wread(self, filename):
682 if self._link(filename):
685 if self._link(filename):
683 data = os.readlink(self.wjoin(filename))
686 data = os.readlink(self.wjoin(filename))
684 else:
687 else:
685 data = self.wopener.read(filename)
688 data = self.wopener.read(filename)
686 return self._filter(self._encodefilterpats, filename, data)
689 return self._filter(self._encodefilterpats, filename, data)
687
690
688 def wwrite(self, filename, data, flags):
691 def wwrite(self, filename, data, flags):
689 data = self._filter(self._decodefilterpats, filename, data)
692 data = self._filter(self._decodefilterpats, filename, data)
690 if 'l' in flags:
693 if 'l' in flags:
691 self.wopener.symlink(data, filename)
694 self.wopener.symlink(data, filename)
692 else:
695 else:
693 self.wopener.write(filename, data)
696 self.wopener.write(filename, data)
694 if 'x' in flags:
697 if 'x' in flags:
695 util.setflags(self.wjoin(filename), False, True)
698 util.setflags(self.wjoin(filename), False, True)
696
699
697 def wwritedata(self, filename, data):
700 def wwritedata(self, filename, data):
698 return self._filter(self._decodefilterpats, filename, data)
701 return self._filter(self._decodefilterpats, filename, data)
699
702
700 def transaction(self, desc):
703 def transaction(self, desc):
701 tr = self._transref and self._transref() or None
704 tr = self._transref and self._transref() or None
702 if tr and tr.running():
705 if tr and tr.running():
703 return tr.nest()
706 return tr.nest()
704
707
705 # abort here if the journal already exists
708 # abort here if the journal already exists
706 if os.path.exists(self.sjoin("journal")):
709 if os.path.exists(self.sjoin("journal")):
707 raise error.RepoError(
710 raise error.RepoError(
708 _("abandoned transaction found - run hg recover"))
711 _("abandoned transaction found - run hg recover"))
709
712
710 journalfiles = self._writejournal(desc)
713 journalfiles = self._writejournal(desc)
711 renames = [(x, undoname(x)) for x in journalfiles]
714 renames = [(x, undoname(x)) for x in journalfiles]
712
715
713 tr = transaction.transaction(self.ui.warn, self.sopener,
716 tr = transaction.transaction(self.ui.warn, self.sopener,
714 self.sjoin("journal"),
717 self.sjoin("journal"),
715 aftertrans(renames),
718 aftertrans(renames),
716 self.store.createmode)
719 self.store.createmode)
717 self._transref = weakref.ref(tr)
720 self._transref = weakref.ref(tr)
718 return tr
721 return tr
719
722
720 def _writejournal(self, desc):
723 def _writejournal(self, desc):
721 # save dirstate for rollback
724 # save dirstate for rollback
722 try:
725 try:
723 ds = self.opener.read("dirstate")
726 ds = self.opener.read("dirstate")
724 except IOError:
727 except IOError:
725 ds = ""
728 ds = ""
726 self.opener.write("journal.dirstate", ds)
729 self.opener.write("journal.dirstate", ds)
727 self.opener.write("journal.branch",
730 self.opener.write("journal.branch",
728 encoding.fromlocal(self.dirstate.branch()))
731 encoding.fromlocal(self.dirstate.branch()))
729 self.opener.write("journal.desc",
732 self.opener.write("journal.desc",
730 "%d\n%s\n" % (len(self), desc))
733 "%d\n%s\n" % (len(self), desc))
731
734
732 bkname = self.join('bookmarks')
735 bkname = self.join('bookmarks')
733 if os.path.exists(bkname):
736 if os.path.exists(bkname):
734 util.copyfile(bkname, self.join('journal.bookmarks'))
737 util.copyfile(bkname, self.join('journal.bookmarks'))
735 else:
738 else:
736 self.opener.write('journal.bookmarks', '')
739 self.opener.write('journal.bookmarks', '')
737
740
738 return (self.sjoin('journal'), self.join('journal.dirstate'),
741 return (self.sjoin('journal'), self.join('journal.dirstate'),
739 self.join('journal.branch'), self.join('journal.desc'),
742 self.join('journal.branch'), self.join('journal.desc'),
740 self.join('journal.bookmarks'))
743 self.join('journal.bookmarks'))
741
744
742 def recover(self):
745 def recover(self):
743 lock = self.lock()
746 lock = self.lock()
744 try:
747 try:
745 if os.path.exists(self.sjoin("journal")):
748 if os.path.exists(self.sjoin("journal")):
746 self.ui.status(_("rolling back interrupted transaction\n"))
749 self.ui.status(_("rolling back interrupted transaction\n"))
747 transaction.rollback(self.sopener, self.sjoin("journal"),
750 transaction.rollback(self.sopener, self.sjoin("journal"),
748 self.ui.warn)
751 self.ui.warn)
749 self.invalidate()
752 self.invalidate()
750 return True
753 return True
751 else:
754 else:
752 self.ui.warn(_("no interrupted transaction available\n"))
755 self.ui.warn(_("no interrupted transaction available\n"))
753 return False
756 return False
754 finally:
757 finally:
755 lock.release()
758 lock.release()
756
759
757 def rollback(self, dryrun=False, force=False):
760 def rollback(self, dryrun=False, force=False):
758 wlock = lock = None
761 wlock = lock = None
759 try:
762 try:
760 wlock = self.wlock()
763 wlock = self.wlock()
761 lock = self.lock()
764 lock = self.lock()
762 if os.path.exists(self.sjoin("undo")):
765 if os.path.exists(self.sjoin("undo")):
763 return self._rollback(dryrun, force)
766 return self._rollback(dryrun, force)
764 else:
767 else:
765 self.ui.warn(_("no rollback information available\n"))
768 self.ui.warn(_("no rollback information available\n"))
766 return 1
769 return 1
767 finally:
770 finally:
768 release(lock, wlock)
771 release(lock, wlock)
769
772
770 def _rollback(self, dryrun, force):
773 def _rollback(self, dryrun, force):
771 ui = self.ui
774 ui = self.ui
772 try:
775 try:
773 args = self.opener.read('undo.desc').splitlines()
776 args = self.opener.read('undo.desc').splitlines()
774 (oldlen, desc, detail) = (int(args[0]), args[1], None)
777 (oldlen, desc, detail) = (int(args[0]), args[1], None)
775 if len(args) >= 3:
778 if len(args) >= 3:
776 detail = args[2]
779 detail = args[2]
777 oldtip = oldlen - 1
780 oldtip = oldlen - 1
778
781
779 if detail and ui.verbose:
782 if detail and ui.verbose:
780 msg = (_('repository tip rolled back to revision %s'
783 msg = (_('repository tip rolled back to revision %s'
781 ' (undo %s: %s)\n')
784 ' (undo %s: %s)\n')
782 % (oldtip, desc, detail))
785 % (oldtip, desc, detail))
783 else:
786 else:
784 msg = (_('repository tip rolled back to revision %s'
787 msg = (_('repository tip rolled back to revision %s'
785 ' (undo %s)\n')
788 ' (undo %s)\n')
786 % (oldtip, desc))
789 % (oldtip, desc))
787 except IOError:
790 except IOError:
788 msg = _('rolling back unknown transaction\n')
791 msg = _('rolling back unknown transaction\n')
789 desc = None
792 desc = None
790
793
791 if not force and self['.'] != self['tip'] and desc == 'commit':
794 if not force and self['.'] != self['tip'] and desc == 'commit':
792 raise util.Abort(
795 raise util.Abort(
793 _('rollback of last commit while not checked out '
796 _('rollback of last commit while not checked out '
794 'may lose data'), hint=_('use -f to force'))
797 'may lose data'), hint=_('use -f to force'))
795
798
796 ui.status(msg)
799 ui.status(msg)
797 if dryrun:
800 if dryrun:
798 return 0
801 return 0
799
802
800 parents = self.dirstate.parents()
803 parents = self.dirstate.parents()
801 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
804 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
802 if os.path.exists(self.join('undo.bookmarks')):
805 if os.path.exists(self.join('undo.bookmarks')):
803 util.rename(self.join('undo.bookmarks'),
806 util.rename(self.join('undo.bookmarks'),
804 self.join('bookmarks'))
807 self.join('bookmarks'))
805 self.invalidate()
808 self.invalidate()
806
809
807 parentgone = (parents[0] not in self.changelog.nodemap or
810 parentgone = (parents[0] not in self.changelog.nodemap or
808 parents[1] not in self.changelog.nodemap)
811 parents[1] not in self.changelog.nodemap)
809 if parentgone:
812 if parentgone:
810 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
813 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
811 try:
814 try:
812 branch = self.opener.read('undo.branch')
815 branch = self.opener.read('undo.branch')
813 self.dirstate.setbranch(branch)
816 self.dirstate.setbranch(branch)
814 except IOError:
817 except IOError:
815 ui.warn(_('named branch could not be reset: '
818 ui.warn(_('named branch could not be reset: '
816 'current branch is still \'%s\'\n')
819 'current branch is still \'%s\'\n')
817 % self.dirstate.branch())
820 % self.dirstate.branch())
818
821
819 self.dirstate.invalidate()
822 self.dirstate.invalidate()
820 self.destroyed()
823 self.destroyed()
821 parents = tuple([p.rev() for p in self.parents()])
824 parents = tuple([p.rev() for p in self.parents()])
822 if len(parents) > 1:
825 if len(parents) > 1:
823 ui.status(_('working directory now based on '
826 ui.status(_('working directory now based on '
824 'revisions %d and %d\n') % parents)
827 'revisions %d and %d\n') % parents)
825 else:
828 else:
826 ui.status(_('working directory now based on '
829 ui.status(_('working directory now based on '
827 'revision %d\n') % parents)
830 'revision %d\n') % parents)
828 return 0
831 return 0
829
832
830 def invalidatecaches(self):
833 def invalidatecaches(self):
831 try:
834 try:
832 delattr(self, '_tagscache')
835 delattr(self, '_tagscache')
833 except AttributeError:
836 except AttributeError:
834 pass
837 pass
835
838
836 self._branchcache = None # in UTF-8
839 self._branchcache = None # in UTF-8
837 self._branchcachetip = None
840 self._branchcachetip = None
838
841
839 def invalidatedirstate(self):
842 def invalidatedirstate(self):
840 '''Invalidates the dirstate, causing the next call to dirstate
843 '''Invalidates the dirstate, causing the next call to dirstate
841 to check if it was modified since the last time it was read,
844 to check if it was modified since the last time it was read,
842 rereading it if it has.
845 rereading it if it has.
843
846
844 This is different to dirstate.invalidate() that it doesn't always
847 This is different to dirstate.invalidate() that it doesn't always
845 rereads the dirstate. Use dirstate.invalidate() if you want to
848 rereads the dirstate. Use dirstate.invalidate() if you want to
846 explicitly read the dirstate again (i.e. restoring it to a previous
849 explicitly read the dirstate again (i.e. restoring it to a previous
847 known good state).'''
850 known good state).'''
848 try:
851 try:
849 delattr(self, 'dirstate')
852 delattr(self, 'dirstate')
850 except AttributeError:
853 except AttributeError:
851 pass
854 pass
852
855
853 def invalidate(self):
856 def invalidate(self):
854 for k in self._filecache:
857 for k in self._filecache:
855 # dirstate is invalidated separately in invalidatedirstate()
858 # dirstate is invalidated separately in invalidatedirstate()
856 if k == 'dirstate':
859 if k == 'dirstate':
857 continue
860 continue
858
861
859 try:
862 try:
860 delattr(self, k)
863 delattr(self, k)
861 except AttributeError:
864 except AttributeError:
862 pass
865 pass
863 self.invalidatecaches()
866 self.invalidatecaches()
864
867
865 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
868 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
866 try:
869 try:
867 l = lock.lock(lockname, 0, releasefn, desc=desc)
870 l = lock.lock(lockname, 0, releasefn, desc=desc)
868 except error.LockHeld, inst:
871 except error.LockHeld, inst:
869 if not wait:
872 if not wait:
870 raise
873 raise
871 self.ui.warn(_("waiting for lock on %s held by %r\n") %
874 self.ui.warn(_("waiting for lock on %s held by %r\n") %
872 (desc, inst.locker))
875 (desc, inst.locker))
873 # default to 600 seconds timeout
876 # default to 600 seconds timeout
874 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
877 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
875 releasefn, desc=desc)
878 releasefn, desc=desc)
876 if acquirefn:
879 if acquirefn:
877 acquirefn()
880 acquirefn()
878 return l
881 return l
879
882
880 def lock(self, wait=True):
883 def lock(self, wait=True):
881 '''Lock the repository store (.hg/store) and return a weak reference
884 '''Lock the repository store (.hg/store) and return a weak reference
882 to the lock. Use this before modifying the store (e.g. committing or
885 to the lock. Use this before modifying the store (e.g. committing or
883 stripping). If you are opening a transaction, get a lock as well.)'''
886 stripping). If you are opening a transaction, get a lock as well.)'''
884 l = self._lockref and self._lockref()
887 l = self._lockref and self._lockref()
885 if l is not None and l.held:
888 if l is not None and l.held:
886 l.lock()
889 l.lock()
887 return l
890 return l
888
891
889 def unlock():
892 def unlock():
890 self.store.write()
893 self.store.write()
891 for k, ce in self._filecache.items():
894 for k, ce in self._filecache.items():
892 if k == 'dirstate':
895 if k == 'dirstate':
893 continue
896 continue
894 ce.refresh()
897 ce.refresh()
895
898
896 l = self._lock(self.sjoin("lock"), wait, unlock,
899 l = self._lock(self.sjoin("lock"), wait, unlock,
897 self.invalidate, _('repository %s') % self.origroot)
900 self.invalidate, _('repository %s') % self.origroot)
898 self._lockref = weakref.ref(l)
901 self._lockref = weakref.ref(l)
899 return l
902 return l
900
903
901 def wlock(self, wait=True):
904 def wlock(self, wait=True):
902 '''Lock the non-store parts of the repository (everything under
905 '''Lock the non-store parts of the repository (everything under
903 .hg except .hg/store) and return a weak reference to the lock.
906 .hg except .hg/store) and return a weak reference to the lock.
904 Use this before modifying files in .hg.'''
907 Use this before modifying files in .hg.'''
905 l = self._wlockref and self._wlockref()
908 l = self._wlockref and self._wlockref()
906 if l is not None and l.held:
909 if l is not None and l.held:
907 l.lock()
910 l.lock()
908 return l
911 return l
909
912
910 def unlock():
913 def unlock():
911 self.dirstate.write()
914 self.dirstate.write()
912 ce = self._filecache.get('dirstate')
915 ce = self._filecache.get('dirstate')
913 if ce:
916 if ce:
914 ce.refresh()
917 ce.refresh()
915
918
916 l = self._lock(self.join("wlock"), wait, unlock,
919 l = self._lock(self.join("wlock"), wait, unlock,
917 self.invalidatedirstate, _('working directory of %s') %
920 self.invalidatedirstate, _('working directory of %s') %
918 self.origroot)
921 self.origroot)
919 self._wlockref = weakref.ref(l)
922 self._wlockref = weakref.ref(l)
920 return l
923 return l
921
924
922 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
925 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
923 """
926 """
924 commit an individual file as part of a larger transaction
927 commit an individual file as part of a larger transaction
925 """
928 """
926
929
927 fname = fctx.path()
930 fname = fctx.path()
928 text = fctx.data()
931 text = fctx.data()
929 flog = self.file(fname)
932 flog = self.file(fname)
930 fparent1 = manifest1.get(fname, nullid)
933 fparent1 = manifest1.get(fname, nullid)
931 fparent2 = fparent2o = manifest2.get(fname, nullid)
934 fparent2 = fparent2o = manifest2.get(fname, nullid)
932
935
933 meta = {}
936 meta = {}
934 copy = fctx.renamed()
937 copy = fctx.renamed()
935 if copy and copy[0] != fname:
938 if copy and copy[0] != fname:
936 # Mark the new revision of this file as a copy of another
939 # Mark the new revision of this file as a copy of another
937 # file. This copy data will effectively act as a parent
940 # file. This copy data will effectively act as a parent
938 # of this new revision. If this is a merge, the first
941 # of this new revision. If this is a merge, the first
939 # parent will be the nullid (meaning "look up the copy data")
942 # parent will be the nullid (meaning "look up the copy data")
940 # and the second one will be the other parent. For example:
943 # and the second one will be the other parent. For example:
941 #
944 #
942 # 0 --- 1 --- 3 rev1 changes file foo
945 # 0 --- 1 --- 3 rev1 changes file foo
943 # \ / rev2 renames foo to bar and changes it
946 # \ / rev2 renames foo to bar and changes it
944 # \- 2 -/ rev3 should have bar with all changes and
947 # \- 2 -/ rev3 should have bar with all changes and
945 # should record that bar descends from
948 # should record that bar descends from
946 # bar in rev2 and foo in rev1
949 # bar in rev2 and foo in rev1
947 #
950 #
948 # this allows this merge to succeed:
951 # this allows this merge to succeed:
949 #
952 #
950 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
953 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
951 # \ / merging rev3 and rev4 should use bar@rev2
954 # \ / merging rev3 and rev4 should use bar@rev2
952 # \- 2 --- 4 as the merge base
955 # \- 2 --- 4 as the merge base
953 #
956 #
954
957
955 cfname = copy[0]
958 cfname = copy[0]
956 crev = manifest1.get(cfname)
959 crev = manifest1.get(cfname)
957 newfparent = fparent2
960 newfparent = fparent2
958
961
959 if manifest2: # branch merge
962 if manifest2: # branch merge
960 if fparent2 == nullid or crev is None: # copied on remote side
963 if fparent2 == nullid or crev is None: # copied on remote side
961 if cfname in manifest2:
964 if cfname in manifest2:
962 crev = manifest2[cfname]
965 crev = manifest2[cfname]
963 newfparent = fparent1
966 newfparent = fparent1
964
967
965 # find source in nearest ancestor if we've lost track
968 # find source in nearest ancestor if we've lost track
966 if not crev:
969 if not crev:
967 self.ui.debug(" %s: searching for copy revision for %s\n" %
970 self.ui.debug(" %s: searching for copy revision for %s\n" %
968 (fname, cfname))
971 (fname, cfname))
969 for ancestor in self[None].ancestors():
972 for ancestor in self[None].ancestors():
970 if cfname in ancestor:
973 if cfname in ancestor:
971 crev = ancestor[cfname].filenode()
974 crev = ancestor[cfname].filenode()
972 break
975 break
973
976
974 if crev:
977 if crev:
975 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
978 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
976 meta["copy"] = cfname
979 meta["copy"] = cfname
977 meta["copyrev"] = hex(crev)
980 meta["copyrev"] = hex(crev)
978 fparent1, fparent2 = nullid, newfparent
981 fparent1, fparent2 = nullid, newfparent
979 else:
982 else:
980 self.ui.warn(_("warning: can't find ancestor for '%s' "
983 self.ui.warn(_("warning: can't find ancestor for '%s' "
981 "copied from '%s'!\n") % (fname, cfname))
984 "copied from '%s'!\n") % (fname, cfname))
982
985
983 elif fparent2 != nullid:
986 elif fparent2 != nullid:
984 # is one parent an ancestor of the other?
987 # is one parent an ancestor of the other?
985 fparentancestor = flog.ancestor(fparent1, fparent2)
988 fparentancestor = flog.ancestor(fparent1, fparent2)
986 if fparentancestor == fparent1:
989 if fparentancestor == fparent1:
987 fparent1, fparent2 = fparent2, nullid
990 fparent1, fparent2 = fparent2, nullid
988 elif fparentancestor == fparent2:
991 elif fparentancestor == fparent2:
989 fparent2 = nullid
992 fparent2 = nullid
990
993
991 # is the file changed?
994 # is the file changed?
992 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
995 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
993 changelist.append(fname)
996 changelist.append(fname)
994 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
997 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
995
998
996 # are just the flags changed during merge?
999 # are just the flags changed during merge?
997 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1000 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
998 changelist.append(fname)
1001 changelist.append(fname)
999
1002
1000 return fparent1
1003 return fparent1
1001
1004
1002 def commit(self, text="", user=None, date=None, match=None, force=False,
1005 def commit(self, text="", user=None, date=None, match=None, force=False,
1003 editor=False, extra={}):
1006 editor=False, extra={}):
1004 """Add a new revision to current repository.
1007 """Add a new revision to current repository.
1005
1008
1006 Revision information is gathered from the working directory,
1009 Revision information is gathered from the working directory,
1007 match can be used to filter the committed files. If editor is
1010 match can be used to filter the committed files. If editor is
1008 supplied, it is called to get a commit message.
1011 supplied, it is called to get a commit message.
1009 """
1012 """
1010
1013
1011 def fail(f, msg):
1014 def fail(f, msg):
1012 raise util.Abort('%s: %s' % (f, msg))
1015 raise util.Abort('%s: %s' % (f, msg))
1013
1016
1014 if not match:
1017 if not match:
1015 match = matchmod.always(self.root, '')
1018 match = matchmod.always(self.root, '')
1016
1019
1017 if not force:
1020 if not force:
1018 vdirs = []
1021 vdirs = []
1019 match.dir = vdirs.append
1022 match.dir = vdirs.append
1020 match.bad = fail
1023 match.bad = fail
1021
1024
1022 wlock = self.wlock()
1025 wlock = self.wlock()
1023 try:
1026 try:
1024 wctx = self[None]
1027 wctx = self[None]
1025 merge = len(wctx.parents()) > 1
1028 merge = len(wctx.parents()) > 1
1026
1029
1027 if (not force and merge and match and
1030 if (not force and merge and match and
1028 (match.files() or match.anypats())):
1031 (match.files() or match.anypats())):
1029 raise util.Abort(_('cannot partially commit a merge '
1032 raise util.Abort(_('cannot partially commit a merge '
1030 '(do not specify files or patterns)'))
1033 '(do not specify files or patterns)'))
1031
1034
1032 changes = self.status(match=match, clean=force)
1035 changes = self.status(match=match, clean=force)
1033 if force:
1036 if force:
1034 changes[0].extend(changes[6]) # mq may commit unchanged files
1037 changes[0].extend(changes[6]) # mq may commit unchanged files
1035
1038
1036 # check subrepos
1039 # check subrepos
1037 subs = []
1040 subs = []
1038 removedsubs = set()
1041 removedsubs = set()
1039 if '.hgsub' in wctx:
1042 if '.hgsub' in wctx:
1040 # only manage subrepos and .hgsubstate if .hgsub is present
1043 # only manage subrepos and .hgsubstate if .hgsub is present
1041 for p in wctx.parents():
1044 for p in wctx.parents():
1042 removedsubs.update(s for s in p.substate if match(s))
1045 removedsubs.update(s for s in p.substate if match(s))
1043 for s in wctx.substate:
1046 for s in wctx.substate:
1044 removedsubs.discard(s)
1047 removedsubs.discard(s)
1045 if match(s) and wctx.sub(s).dirty():
1048 if match(s) and wctx.sub(s).dirty():
1046 subs.append(s)
1049 subs.append(s)
1047 if (subs or removedsubs):
1050 if (subs or removedsubs):
1048 if (not match('.hgsub') and
1051 if (not match('.hgsub') and
1049 '.hgsub' in (wctx.modified() + wctx.added())):
1052 '.hgsub' in (wctx.modified() + wctx.added())):
1050 raise util.Abort(
1053 raise util.Abort(
1051 _("can't commit subrepos without .hgsub"))
1054 _("can't commit subrepos without .hgsub"))
1052 if '.hgsubstate' not in changes[0]:
1055 if '.hgsubstate' not in changes[0]:
1053 changes[0].insert(0, '.hgsubstate')
1056 changes[0].insert(0, '.hgsubstate')
1054 if '.hgsubstate' in changes[2]:
1057 if '.hgsubstate' in changes[2]:
1055 changes[2].remove('.hgsubstate')
1058 changes[2].remove('.hgsubstate')
1056 elif '.hgsub' in changes[2]:
1059 elif '.hgsub' in changes[2]:
1057 # clean up .hgsubstate when .hgsub is removed
1060 # clean up .hgsubstate when .hgsub is removed
1058 if ('.hgsubstate' in wctx and
1061 if ('.hgsubstate' in wctx and
1059 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1062 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1060 changes[2].insert(0, '.hgsubstate')
1063 changes[2].insert(0, '.hgsubstate')
1061
1064
1062 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
1065 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
1063 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1066 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1064 if changedsubs:
1067 if changedsubs:
1065 raise util.Abort(_("uncommitted changes in subrepo %s")
1068 raise util.Abort(_("uncommitted changes in subrepo %s")
1066 % changedsubs[0])
1069 % changedsubs[0])
1067
1070
1068 # make sure all explicit patterns are matched
1071 # make sure all explicit patterns are matched
1069 if not force and match.files():
1072 if not force and match.files():
1070 matched = set(changes[0] + changes[1] + changes[2])
1073 matched = set(changes[0] + changes[1] + changes[2])
1071
1074
1072 for f in match.files():
1075 for f in match.files():
1073 if f == '.' or f in matched or f in wctx.substate:
1076 if f == '.' or f in matched or f in wctx.substate:
1074 continue
1077 continue
1075 if f in changes[3]: # missing
1078 if f in changes[3]: # missing
1076 fail(f, _('file not found!'))
1079 fail(f, _('file not found!'))
1077 if f in vdirs: # visited directory
1080 if f in vdirs: # visited directory
1078 d = f + '/'
1081 d = f + '/'
1079 for mf in matched:
1082 for mf in matched:
1080 if mf.startswith(d):
1083 if mf.startswith(d):
1081 break
1084 break
1082 else:
1085 else:
1083 fail(f, _("no match under directory!"))
1086 fail(f, _("no match under directory!"))
1084 elif f not in self.dirstate:
1087 elif f not in self.dirstate:
1085 fail(f, _("file not tracked!"))
1088 fail(f, _("file not tracked!"))
1086
1089
1087 if (not force and not extra.get("close") and not merge
1090 if (not force and not extra.get("close") and not merge
1088 and not (changes[0] or changes[1] or changes[2])
1091 and not (changes[0] or changes[1] or changes[2])
1089 and wctx.branch() == wctx.p1().branch()):
1092 and wctx.branch() == wctx.p1().branch()):
1090 return None
1093 return None
1091
1094
1092 ms = mergemod.mergestate(self)
1095 ms = mergemod.mergestate(self)
1093 for f in changes[0]:
1096 for f in changes[0]:
1094 if f in ms and ms[f] == 'u':
1097 if f in ms and ms[f] == 'u':
1095 raise util.Abort(_("unresolved merge conflicts "
1098 raise util.Abort(_("unresolved merge conflicts "
1096 "(see hg help resolve)"))
1099 "(see hg help resolve)"))
1097
1100
1098 cctx = context.workingctx(self, text, user, date, extra, changes)
1101 cctx = context.workingctx(self, text, user, date, extra, changes)
1099 if editor:
1102 if editor:
1100 cctx._text = editor(self, cctx, subs)
1103 cctx._text = editor(self, cctx, subs)
1101 edited = (text != cctx._text)
1104 edited = (text != cctx._text)
1102
1105
1103 # commit subs
1106 # commit subs
1104 if subs or removedsubs:
1107 if subs or removedsubs:
1105 state = wctx.substate.copy()
1108 state = wctx.substate.copy()
1106 for s in sorted(subs):
1109 for s in sorted(subs):
1107 sub = wctx.sub(s)
1110 sub = wctx.sub(s)
1108 self.ui.status(_('committing subrepository %s\n') %
1111 self.ui.status(_('committing subrepository %s\n') %
1109 subrepo.subrelpath(sub))
1112 subrepo.subrelpath(sub))
1110 sr = sub.commit(cctx._text, user, date)
1113 sr = sub.commit(cctx._text, user, date)
1111 state[s] = (state[s][0], sr)
1114 state[s] = (state[s][0], sr)
1112 subrepo.writestate(self, state)
1115 subrepo.writestate(self, state)
1113
1116
1114 # Save commit message in case this transaction gets rolled back
1117 # Save commit message in case this transaction gets rolled back
1115 # (e.g. by a pretxncommit hook). Leave the content alone on
1118 # (e.g. by a pretxncommit hook). Leave the content alone on
1116 # the assumption that the user will use the same editor again.
1119 # the assumption that the user will use the same editor again.
1117 msgfn = self.savecommitmessage(cctx._text)
1120 msgfn = self.savecommitmessage(cctx._text)
1118
1121
1119 p1, p2 = self.dirstate.parents()
1122 p1, p2 = self.dirstate.parents()
1120 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1123 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1121 try:
1124 try:
1122 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1125 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1123 ret = self.commitctx(cctx, True)
1126 ret = self.commitctx(cctx, True)
1124 except:
1127 except:
1125 if edited:
1128 if edited:
1126 self.ui.write(
1129 self.ui.write(
1127 _('note: commit message saved in %s\n') % msgfn)
1130 _('note: commit message saved in %s\n') % msgfn)
1128 raise
1131 raise
1129
1132
1130 # update bookmarks, dirstate and mergestate
1133 # update bookmarks, dirstate and mergestate
1131 bookmarks.update(self, p1, ret)
1134 bookmarks.update(self, p1, ret)
1132 for f in changes[0] + changes[1]:
1135 for f in changes[0] + changes[1]:
1133 self.dirstate.normal(f)
1136 self.dirstate.normal(f)
1134 for f in changes[2]:
1137 for f in changes[2]:
1135 self.dirstate.drop(f)
1138 self.dirstate.drop(f)
1136 self.dirstate.setparents(ret)
1139 self.dirstate.setparents(ret)
1137 ms.reset()
1140 ms.reset()
1138 finally:
1141 finally:
1139 wlock.release()
1142 wlock.release()
1140
1143
1141 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1144 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1142 return ret
1145 return ret
1143
1146
1144 def commitctx(self, ctx, error=False):
1147 def commitctx(self, ctx, error=False):
1145 """Add a new revision to current repository.
1148 """Add a new revision to current repository.
1146 Revision information is passed via the context argument.
1149 Revision information is passed via the context argument.
1147 """
1150 """
1148
1151
1149 tr = lock = None
1152 tr = lock = None
1150 removed = list(ctx.removed())
1153 removed = list(ctx.removed())
1151 p1, p2 = ctx.p1(), ctx.p2()
1154 p1, p2 = ctx.p1(), ctx.p2()
1152 user = ctx.user()
1155 user = ctx.user()
1153
1156
1154 lock = self.lock()
1157 lock = self.lock()
1155 try:
1158 try:
1156 tr = self.transaction("commit")
1159 tr = self.transaction("commit")
1157 trp = weakref.proxy(tr)
1160 trp = weakref.proxy(tr)
1158
1161
1159 if ctx.files():
1162 if ctx.files():
1160 m1 = p1.manifest().copy()
1163 m1 = p1.manifest().copy()
1161 m2 = p2.manifest()
1164 m2 = p2.manifest()
1162
1165
1163 # check in files
1166 # check in files
1164 new = {}
1167 new = {}
1165 changed = []
1168 changed = []
1166 linkrev = len(self)
1169 linkrev = len(self)
1167 for f in sorted(ctx.modified() + ctx.added()):
1170 for f in sorted(ctx.modified() + ctx.added()):
1168 self.ui.note(f + "\n")
1171 self.ui.note(f + "\n")
1169 try:
1172 try:
1170 fctx = ctx[f]
1173 fctx = ctx[f]
1171 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1174 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1172 changed)
1175 changed)
1173 m1.set(f, fctx.flags())
1176 m1.set(f, fctx.flags())
1174 except OSError, inst:
1177 except OSError, inst:
1175 self.ui.warn(_("trouble committing %s!\n") % f)
1178 self.ui.warn(_("trouble committing %s!\n") % f)
1176 raise
1179 raise
1177 except IOError, inst:
1180 except IOError, inst:
1178 errcode = getattr(inst, 'errno', errno.ENOENT)
1181 errcode = getattr(inst, 'errno', errno.ENOENT)
1179 if error or errcode and errcode != errno.ENOENT:
1182 if error or errcode and errcode != errno.ENOENT:
1180 self.ui.warn(_("trouble committing %s!\n") % f)
1183 self.ui.warn(_("trouble committing %s!\n") % f)
1181 raise
1184 raise
1182 else:
1185 else:
1183 removed.append(f)
1186 removed.append(f)
1184
1187
1185 # update manifest
1188 # update manifest
1186 m1.update(new)
1189 m1.update(new)
1187 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1190 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1188 drop = [f for f in removed if f in m1]
1191 drop = [f for f in removed if f in m1]
1189 for f in drop:
1192 for f in drop:
1190 del m1[f]
1193 del m1[f]
1191 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1194 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1192 p2.manifestnode(), (new, drop))
1195 p2.manifestnode(), (new, drop))
1193 files = changed + removed
1196 files = changed + removed
1194 else:
1197 else:
1195 mn = p1.manifestnode()
1198 mn = p1.manifestnode()
1196 files = []
1199 files = []
1197
1200
1198 # update changelog
1201 # update changelog
1199 self.changelog.delayupdate()
1202 self.changelog.delayupdate()
1200 n = self.changelog.add(mn, files, ctx.description(),
1203 n = self.changelog.add(mn, files, ctx.description(),
1201 trp, p1.node(), p2.node(),
1204 trp, p1.node(), p2.node(),
1202 user, ctx.date(), ctx.extra().copy())
1205 user, ctx.date(), ctx.extra().copy())
1203 p = lambda: self.changelog.writepending() and self.root or ""
1206 p = lambda: self.changelog.writepending() and self.root or ""
1204 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1207 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1205 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1208 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1206 parent2=xp2, pending=p)
1209 parent2=xp2, pending=p)
1207 self.changelog.finalize(trp)
1210 self.changelog.finalize(trp)
1208 tr.close()
1211 tr.close()
1209
1212
1210 if self._branchcache:
1213 if self._branchcache:
1211 self.updatebranchcache()
1214 self.updatebranchcache()
1212 return n
1215 return n
1213 finally:
1216 finally:
1214 if tr:
1217 if tr:
1215 tr.release()
1218 tr.release()
1216 lock.release()
1219 lock.release()
1217
1220
1218 def destroyed(self):
1221 def destroyed(self):
1219 '''Inform the repository that nodes have been destroyed.
1222 '''Inform the repository that nodes have been destroyed.
1220 Intended for use by strip and rollback, so there's a common
1223 Intended for use by strip and rollback, so there's a common
1221 place for anything that has to be done after destroying history.'''
1224 place for anything that has to be done after destroying history.'''
1222 # XXX it might be nice if we could take the list of destroyed
1225 # XXX it might be nice if we could take the list of destroyed
1223 # nodes, but I don't see an easy way for rollback() to do that
1226 # nodes, but I don't see an easy way for rollback() to do that
1224
1227
1225 # Ensure the persistent tag cache is updated. Doing it now
1228 # Ensure the persistent tag cache is updated. Doing it now
1226 # means that the tag cache only has to worry about destroyed
1229 # means that the tag cache only has to worry about destroyed
1227 # heads immediately after a strip/rollback. That in turn
1230 # heads immediately after a strip/rollback. That in turn
1228 # guarantees that "cachetip == currenttip" (comparing both rev
1231 # guarantees that "cachetip == currenttip" (comparing both rev
1229 # and node) always means no nodes have been added or destroyed.
1232 # and node) always means no nodes have been added or destroyed.
1230
1233
1231 # XXX this is suboptimal when qrefresh'ing: we strip the current
1234 # XXX this is suboptimal when qrefresh'ing: we strip the current
1232 # head, refresh the tag cache, then immediately add a new head.
1235 # head, refresh the tag cache, then immediately add a new head.
1233 # But I think doing it this way is necessary for the "instant
1236 # But I think doing it this way is necessary for the "instant
1234 # tag cache retrieval" case to work.
1237 # tag cache retrieval" case to work.
1235 self.invalidatecaches()
1238 self.invalidatecaches()
1236
1239
1237 def walk(self, match, node=None):
1240 def walk(self, match, node=None):
1238 '''
1241 '''
1239 walk recursively through the directory tree or a given
1242 walk recursively through the directory tree or a given
1240 changeset, finding all files matched by the match
1243 changeset, finding all files matched by the match
1241 function
1244 function
1242 '''
1245 '''
1243 return self[node].walk(match)
1246 return self[node].walk(match)
1244
1247
1245 def status(self, node1='.', node2=None, match=None,
1248 def status(self, node1='.', node2=None, match=None,
1246 ignored=False, clean=False, unknown=False,
1249 ignored=False, clean=False, unknown=False,
1247 listsubrepos=False):
1250 listsubrepos=False):
1248 """return status of files between two nodes or node and working directory
1251 """return status of files between two nodes or node and working directory
1249
1252
1250 If node1 is None, use the first dirstate parent instead.
1253 If node1 is None, use the first dirstate parent instead.
1251 If node2 is None, compare node1 with working directory.
1254 If node2 is None, compare node1 with working directory.
1252 """
1255 """
1253
1256
1254 def mfmatches(ctx):
1257 def mfmatches(ctx):
1255 mf = ctx.manifest().copy()
1258 mf = ctx.manifest().copy()
1256 for fn in mf.keys():
1259 for fn in mf.keys():
1257 if not match(fn):
1260 if not match(fn):
1258 del mf[fn]
1261 del mf[fn]
1259 return mf
1262 return mf
1260
1263
1261 if isinstance(node1, context.changectx):
1264 if isinstance(node1, context.changectx):
1262 ctx1 = node1
1265 ctx1 = node1
1263 else:
1266 else:
1264 ctx1 = self[node1]
1267 ctx1 = self[node1]
1265 if isinstance(node2, context.changectx):
1268 if isinstance(node2, context.changectx):
1266 ctx2 = node2
1269 ctx2 = node2
1267 else:
1270 else:
1268 ctx2 = self[node2]
1271 ctx2 = self[node2]
1269
1272
1270 working = ctx2.rev() is None
1273 working = ctx2.rev() is None
1271 parentworking = working and ctx1 == self['.']
1274 parentworking = working and ctx1 == self['.']
1272 match = match or matchmod.always(self.root, self.getcwd())
1275 match = match or matchmod.always(self.root, self.getcwd())
1273 listignored, listclean, listunknown = ignored, clean, unknown
1276 listignored, listclean, listunknown = ignored, clean, unknown
1274
1277
1275 # load earliest manifest first for caching reasons
1278 # load earliest manifest first for caching reasons
1276 if not working and ctx2.rev() < ctx1.rev():
1279 if not working and ctx2.rev() < ctx1.rev():
1277 ctx2.manifest()
1280 ctx2.manifest()
1278
1281
1279 if not parentworking:
1282 if not parentworking:
1280 def bad(f, msg):
1283 def bad(f, msg):
1281 if f not in ctx1:
1284 if f not in ctx1:
1282 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1285 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1283 match.bad = bad
1286 match.bad = bad
1284
1287
1285 if working: # we need to scan the working dir
1288 if working: # we need to scan the working dir
1286 subrepos = []
1289 subrepos = []
1287 if '.hgsub' in self.dirstate:
1290 if '.hgsub' in self.dirstate:
1288 subrepos = ctx2.substate.keys()
1291 subrepos = ctx2.substate.keys()
1289 s = self.dirstate.status(match, subrepos, listignored,
1292 s = self.dirstate.status(match, subrepos, listignored,
1290 listclean, listunknown)
1293 listclean, listunknown)
1291 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1294 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1292
1295
1293 # check for any possibly clean files
1296 # check for any possibly clean files
1294 if parentworking and cmp:
1297 if parentworking and cmp:
1295 fixup = []
1298 fixup = []
1296 # do a full compare of any files that might have changed
1299 # do a full compare of any files that might have changed
1297 for f in sorted(cmp):
1300 for f in sorted(cmp):
1298 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1301 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1299 or ctx1[f].cmp(ctx2[f])):
1302 or ctx1[f].cmp(ctx2[f])):
1300 modified.append(f)
1303 modified.append(f)
1301 else:
1304 else:
1302 fixup.append(f)
1305 fixup.append(f)
1303
1306
1304 # update dirstate for files that are actually clean
1307 # update dirstate for files that are actually clean
1305 if fixup:
1308 if fixup:
1306 if listclean:
1309 if listclean:
1307 clean += fixup
1310 clean += fixup
1308
1311
1309 try:
1312 try:
1310 # updating the dirstate is optional
1313 # updating the dirstate is optional
1311 # so we don't wait on the lock
1314 # so we don't wait on the lock
1312 wlock = self.wlock(False)
1315 wlock = self.wlock(False)
1313 try:
1316 try:
1314 for f in fixup:
1317 for f in fixup:
1315 self.dirstate.normal(f)
1318 self.dirstate.normal(f)
1316 finally:
1319 finally:
1317 wlock.release()
1320 wlock.release()
1318 except error.LockError:
1321 except error.LockError:
1319 pass
1322 pass
1320
1323
1321 if not parentworking:
1324 if not parentworking:
1322 mf1 = mfmatches(ctx1)
1325 mf1 = mfmatches(ctx1)
1323 if working:
1326 if working:
1324 # we are comparing working dir against non-parent
1327 # we are comparing working dir against non-parent
1325 # generate a pseudo-manifest for the working dir
1328 # generate a pseudo-manifest for the working dir
1326 mf2 = mfmatches(self['.'])
1329 mf2 = mfmatches(self['.'])
1327 for f in cmp + modified + added:
1330 for f in cmp + modified + added:
1328 mf2[f] = None
1331 mf2[f] = None
1329 mf2.set(f, ctx2.flags(f))
1332 mf2.set(f, ctx2.flags(f))
1330 for f in removed:
1333 for f in removed:
1331 if f in mf2:
1334 if f in mf2:
1332 del mf2[f]
1335 del mf2[f]
1333 else:
1336 else:
1334 # we are comparing two revisions
1337 # we are comparing two revisions
1335 deleted, unknown, ignored = [], [], []
1338 deleted, unknown, ignored = [], [], []
1336 mf2 = mfmatches(ctx2)
1339 mf2 = mfmatches(ctx2)
1337
1340
1338 modified, added, clean = [], [], []
1341 modified, added, clean = [], [], []
1339 for fn in mf2:
1342 for fn in mf2:
1340 if fn in mf1:
1343 if fn in mf1:
1341 if (fn not in deleted and
1344 if (fn not in deleted and
1342 (mf1.flags(fn) != mf2.flags(fn) or
1345 (mf1.flags(fn) != mf2.flags(fn) or
1343 (mf1[fn] != mf2[fn] and
1346 (mf1[fn] != mf2[fn] and
1344 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1347 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1345 modified.append(fn)
1348 modified.append(fn)
1346 elif listclean:
1349 elif listclean:
1347 clean.append(fn)
1350 clean.append(fn)
1348 del mf1[fn]
1351 del mf1[fn]
1349 elif fn not in deleted:
1352 elif fn not in deleted:
1350 added.append(fn)
1353 added.append(fn)
1351 removed = mf1.keys()
1354 removed = mf1.keys()
1352
1355
1353 r = modified, added, removed, deleted, unknown, ignored, clean
1356 r = modified, added, removed, deleted, unknown, ignored, clean
1354
1357
1355 if listsubrepos:
1358 if listsubrepos:
1356 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1359 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1357 if working:
1360 if working:
1358 rev2 = None
1361 rev2 = None
1359 else:
1362 else:
1360 rev2 = ctx2.substate[subpath][1]
1363 rev2 = ctx2.substate[subpath][1]
1361 try:
1364 try:
1362 submatch = matchmod.narrowmatcher(subpath, match)
1365 submatch = matchmod.narrowmatcher(subpath, match)
1363 s = sub.status(rev2, match=submatch, ignored=listignored,
1366 s = sub.status(rev2, match=submatch, ignored=listignored,
1364 clean=listclean, unknown=listunknown,
1367 clean=listclean, unknown=listunknown,
1365 listsubrepos=True)
1368 listsubrepos=True)
1366 for rfiles, sfiles in zip(r, s):
1369 for rfiles, sfiles in zip(r, s):
1367 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1370 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1368 except error.LookupError:
1371 except error.LookupError:
1369 self.ui.status(_("skipping missing subrepository: %s\n")
1372 self.ui.status(_("skipping missing subrepository: %s\n")
1370 % subpath)
1373 % subpath)
1371
1374
1372 for l in r:
1375 for l in r:
1373 l.sort()
1376 l.sort()
1374 return r
1377 return r
1375
1378
1376 def heads(self, start=None):
1379 def heads(self, start=None):
1377 heads = self.changelog.heads(start)
1380 heads = self.changelog.heads(start)
1378 # sort the output in rev descending order
1381 # sort the output in rev descending order
1379 return sorted(heads, key=self.changelog.rev, reverse=True)
1382 return sorted(heads, key=self.changelog.rev, reverse=True)
1380
1383
1381 def branchheads(self, branch=None, start=None, closed=False):
1384 def branchheads(self, branch=None, start=None, closed=False):
1382 '''return a (possibly filtered) list of heads for the given branch
1385 '''return a (possibly filtered) list of heads for the given branch
1383
1386
1384 Heads are returned in topological order, from newest to oldest.
1387 Heads are returned in topological order, from newest to oldest.
1385 If branch is None, use the dirstate branch.
1388 If branch is None, use the dirstate branch.
1386 If start is not None, return only heads reachable from start.
1389 If start is not None, return only heads reachable from start.
1387 If closed is True, return heads that are marked as closed as well.
1390 If closed is True, return heads that are marked as closed as well.
1388 '''
1391 '''
1389 if branch is None:
1392 if branch is None:
1390 branch = self[None].branch()
1393 branch = self[None].branch()
1391 branches = self.branchmap()
1394 branches = self.branchmap()
1392 if branch not in branches:
1395 if branch not in branches:
1393 return []
1396 return []
1394 # the cache returns heads ordered lowest to highest
1397 # the cache returns heads ordered lowest to highest
1395 bheads = list(reversed(branches[branch]))
1398 bheads = list(reversed(branches[branch]))
1396 if start is not None:
1399 if start is not None:
1397 # filter out the heads that cannot be reached from startrev
1400 # filter out the heads that cannot be reached from startrev
1398 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1401 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1399 bheads = [h for h in bheads if h in fbheads]
1402 bheads = [h for h in bheads if h in fbheads]
1400 if not closed:
1403 if not closed:
1401 bheads = [h for h in bheads if
1404 bheads = [h for h in bheads if
1402 ('close' not in self.changelog.read(h)[5])]
1405 ('close' not in self.changelog.read(h)[5])]
1403 return bheads
1406 return bheads
1404
1407
1405 def branches(self, nodes):
1408 def branches(self, nodes):
1406 if not nodes:
1409 if not nodes:
1407 nodes = [self.changelog.tip()]
1410 nodes = [self.changelog.tip()]
1408 b = []
1411 b = []
1409 for n in nodes:
1412 for n in nodes:
1410 t = n
1413 t = n
1411 while True:
1414 while True:
1412 p = self.changelog.parents(n)
1415 p = self.changelog.parents(n)
1413 if p[1] != nullid or p[0] == nullid:
1416 if p[1] != nullid or p[0] == nullid:
1414 b.append((t, n, p[0], p[1]))
1417 b.append((t, n, p[0], p[1]))
1415 break
1418 break
1416 n = p[0]
1419 n = p[0]
1417 return b
1420 return b
1418
1421
1419 def between(self, pairs):
1422 def between(self, pairs):
1420 r = []
1423 r = []
1421
1424
1422 for top, bottom in pairs:
1425 for top, bottom in pairs:
1423 n, l, i = top, [], 0
1426 n, l, i = top, [], 0
1424 f = 1
1427 f = 1
1425
1428
1426 while n != bottom and n != nullid:
1429 while n != bottom and n != nullid:
1427 p = self.changelog.parents(n)[0]
1430 p = self.changelog.parents(n)[0]
1428 if i == f:
1431 if i == f:
1429 l.append(n)
1432 l.append(n)
1430 f = f * 2
1433 f = f * 2
1431 n = p
1434 n = p
1432 i += 1
1435 i += 1
1433
1436
1434 r.append(l)
1437 r.append(l)
1435
1438
1436 return r
1439 return r
1437
1440
1438 def pull(self, remote, heads=None, force=False):
1441 def pull(self, remote, heads=None, force=False):
1439 lock = self.lock()
1442 lock = self.lock()
1440 try:
1443 try:
1441 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1444 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1442 force=force)
1445 force=force)
1443 common, fetch, rheads = tmp
1446 common, fetch, rheads = tmp
1444 if not fetch:
1447 if not fetch:
1445 self.ui.status(_("no changes found\n"))
1448 self.ui.status(_("no changes found\n"))
1446 result = 0
1449 result = 0
1447 else:
1450 else:
1448 if heads is None and list(common) == [nullid]:
1451 if heads is None and list(common) == [nullid]:
1449 self.ui.status(_("requesting all changes\n"))
1452 self.ui.status(_("requesting all changes\n"))
1450 elif heads is None and remote.capable('changegroupsubset'):
1453 elif heads is None and remote.capable('changegroupsubset'):
1451 # issue1320, avoid a race if remote changed after discovery
1454 # issue1320, avoid a race if remote changed after discovery
1452 heads = rheads
1455 heads = rheads
1453
1456
1454 if remote.capable('getbundle'):
1457 if remote.capable('getbundle'):
1455 cg = remote.getbundle('pull', common=common,
1458 cg = remote.getbundle('pull', common=common,
1456 heads=heads or rheads)
1459 heads=heads or rheads)
1457 elif heads is None:
1460 elif heads is None:
1458 cg = remote.changegroup(fetch, 'pull')
1461 cg = remote.changegroup(fetch, 'pull')
1459 elif not remote.capable('changegroupsubset'):
1462 elif not remote.capable('changegroupsubset'):
1460 raise util.Abort(_("partial pull cannot be done because "
1463 raise util.Abort(_("partial pull cannot be done because "
1461 "other repository doesn't support "
1464 "other repository doesn't support "
1462 "changegroupsubset."))
1465 "changegroupsubset."))
1463 else:
1466 else:
1464 cg = remote.changegroupsubset(fetch, heads, 'pull')
1467 cg = remote.changegroupsubset(fetch, heads, 'pull')
1465 result = self.addchangegroup(cg, 'pull', remote.url(),
1468 result = self.addchangegroup(cg, 'pull', remote.url(),
1466 lock=lock)
1469 lock=lock)
1467 finally:
1470 finally:
1468 lock.release()
1471 lock.release()
1469
1472
1470 return result
1473 return result
1471
1474
1472 def checkpush(self, force, revs):
1475 def checkpush(self, force, revs):
1473 """Extensions can override this function if additional checks have
1476 """Extensions can override this function if additional checks have
1474 to be performed before pushing, or call it if they override push
1477 to be performed before pushing, or call it if they override push
1475 command.
1478 command.
1476 """
1479 """
1477 pass
1480 pass
1478
1481
1479 def push(self, remote, force=False, revs=None, newbranch=False):
1482 def push(self, remote, force=False, revs=None, newbranch=False):
1480 '''Push outgoing changesets (limited by revs) from the current
1483 '''Push outgoing changesets (limited by revs) from the current
1481 repository to remote. Return an integer:
1484 repository to remote. Return an integer:
1482 - 0 means HTTP error *or* nothing to push
1485 - 0 means HTTP error *or* nothing to push
1483 - 1 means we pushed and remote head count is unchanged *or*
1486 - 1 means we pushed and remote head count is unchanged *or*
1484 we have outgoing changesets but refused to push
1487 we have outgoing changesets but refused to push
1485 - other values as described by addchangegroup()
1488 - other values as described by addchangegroup()
1486 '''
1489 '''
1487 # there are two ways to push to remote repo:
1490 # there are two ways to push to remote repo:
1488 #
1491 #
1489 # addchangegroup assumes local user can lock remote
1492 # addchangegroup assumes local user can lock remote
1490 # repo (local filesystem, old ssh servers).
1493 # repo (local filesystem, old ssh servers).
1491 #
1494 #
1492 # unbundle assumes local user cannot lock remote repo (new ssh
1495 # unbundle assumes local user cannot lock remote repo (new ssh
1493 # servers, http servers).
1496 # servers, http servers).
1494
1497
1495 self.checkpush(force, revs)
1498 self.checkpush(force, revs)
1496 lock = None
1499 lock = None
1497 unbundle = remote.capable('unbundle')
1500 unbundle = remote.capable('unbundle')
1498 if not unbundle:
1501 if not unbundle:
1499 lock = remote.lock()
1502 lock = remote.lock()
1500 try:
1503 try:
1501 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1504 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1502 newbranch)
1505 newbranch)
1503 ret = remote_heads
1506 ret = remote_heads
1504 if cg is not None:
1507 if cg is not None:
1505 if unbundle:
1508 if unbundle:
1506 # local repo finds heads on server, finds out what
1509 # local repo finds heads on server, finds out what
1507 # revs it must push. once revs transferred, if server
1510 # revs it must push. once revs transferred, if server
1508 # finds it has different heads (someone else won
1511 # finds it has different heads (someone else won
1509 # commit/push race), server aborts.
1512 # commit/push race), server aborts.
1510 if force:
1513 if force:
1511 remote_heads = ['force']
1514 remote_heads = ['force']
1512 # ssh: return remote's addchangegroup()
1515 # ssh: return remote's addchangegroup()
1513 # http: return remote's addchangegroup() or 0 for error
1516 # http: return remote's addchangegroup() or 0 for error
1514 ret = remote.unbundle(cg, remote_heads, 'push')
1517 ret = remote.unbundle(cg, remote_heads, 'push')
1515 else:
1518 else:
1516 # we return an integer indicating remote head count change
1519 # we return an integer indicating remote head count change
1517 ret = remote.addchangegroup(cg, 'push', self.url(),
1520 ret = remote.addchangegroup(cg, 'push', self.url(),
1518 lock=lock)
1521 lock=lock)
1519 finally:
1522 finally:
1520 if lock is not None:
1523 if lock is not None:
1521 lock.release()
1524 lock.release()
1522
1525
1523 self.ui.debug("checking for updated bookmarks\n")
1526 self.ui.debug("checking for updated bookmarks\n")
1524 rb = remote.listkeys('bookmarks')
1527 rb = remote.listkeys('bookmarks')
1525 for k in rb.keys():
1528 for k in rb.keys():
1526 if k in self._bookmarks:
1529 if k in self._bookmarks:
1527 nr, nl = rb[k], hex(self._bookmarks[k])
1530 nr, nl = rb[k], hex(self._bookmarks[k])
1528 if nr in self:
1531 if nr in self:
1529 cr = self[nr]
1532 cr = self[nr]
1530 cl = self[nl]
1533 cl = self[nl]
1531 if cl in cr.descendants():
1534 if cl in cr.descendants():
1532 r = remote.pushkey('bookmarks', k, nr, nl)
1535 r = remote.pushkey('bookmarks', k, nr, nl)
1533 if r:
1536 if r:
1534 self.ui.status(_("updating bookmark %s\n") % k)
1537 self.ui.status(_("updating bookmark %s\n") % k)
1535 else:
1538 else:
1536 self.ui.warn(_('updating bookmark %s'
1539 self.ui.warn(_('updating bookmark %s'
1537 ' failed!\n') % k)
1540 ' failed!\n') % k)
1538
1541
1539 return ret
1542 return ret
1540
1543
1541 def changegroupinfo(self, nodes, source):
1544 def changegroupinfo(self, nodes, source):
1542 if self.ui.verbose or source == 'bundle':
1545 if self.ui.verbose or source == 'bundle':
1543 self.ui.status(_("%d changesets found\n") % len(nodes))
1546 self.ui.status(_("%d changesets found\n") % len(nodes))
1544 if self.ui.debugflag:
1547 if self.ui.debugflag:
1545 self.ui.debug("list of changesets:\n")
1548 self.ui.debug("list of changesets:\n")
1546 for node in nodes:
1549 for node in nodes:
1547 self.ui.debug("%s\n" % hex(node))
1550 self.ui.debug("%s\n" % hex(node))
1548
1551
1549 def changegroupsubset(self, bases, heads, source):
1552 def changegroupsubset(self, bases, heads, source):
1550 """Compute a changegroup consisting of all the nodes that are
1553 """Compute a changegroup consisting of all the nodes that are
1551 descendants of any of the bases and ancestors of any of the heads.
1554 descendants of any of the bases and ancestors of any of the heads.
1552 Return a chunkbuffer object whose read() method will return
1555 Return a chunkbuffer object whose read() method will return
1553 successive changegroup chunks.
1556 successive changegroup chunks.
1554
1557
1555 It is fairly complex as determining which filenodes and which
1558 It is fairly complex as determining which filenodes and which
1556 manifest nodes need to be included for the changeset to be complete
1559 manifest nodes need to be included for the changeset to be complete
1557 is non-trivial.
1560 is non-trivial.
1558
1561
1559 Another wrinkle is doing the reverse, figuring out which changeset in
1562 Another wrinkle is doing the reverse, figuring out which changeset in
1560 the changegroup a particular filenode or manifestnode belongs to.
1563 the changegroup a particular filenode or manifestnode belongs to.
1561 """
1564 """
1562 cl = self.changelog
1565 cl = self.changelog
1563 if not bases:
1566 if not bases:
1564 bases = [nullid]
1567 bases = [nullid]
1565 csets, bases, heads = cl.nodesbetween(bases, heads)
1568 csets, bases, heads = cl.nodesbetween(bases, heads)
1566 # We assume that all ancestors of bases are known
1569 # We assume that all ancestors of bases are known
1567 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1570 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1568 return self._changegroupsubset(common, csets, heads, source)
1571 return self._changegroupsubset(common, csets, heads, source)
1569
1572
1570 def getbundle(self, source, heads=None, common=None):
1573 def getbundle(self, source, heads=None, common=None):
1571 """Like changegroupsubset, but returns the set difference between the
1574 """Like changegroupsubset, but returns the set difference between the
1572 ancestors of heads and the ancestors common.
1575 ancestors of heads and the ancestors common.
1573
1576
1574 If heads is None, use the local heads. If common is None, use [nullid].
1577 If heads is None, use the local heads. If common is None, use [nullid].
1575
1578
1576 The nodes in common might not all be known locally due to the way the
1579 The nodes in common might not all be known locally due to the way the
1577 current discovery protocol works.
1580 current discovery protocol works.
1578 """
1581 """
1579 cl = self.changelog
1582 cl = self.changelog
1580 if common:
1583 if common:
1581 nm = cl.nodemap
1584 nm = cl.nodemap
1582 common = [n for n in common if n in nm]
1585 common = [n for n in common if n in nm]
1583 else:
1586 else:
1584 common = [nullid]
1587 common = [nullid]
1585 if not heads:
1588 if not heads:
1586 heads = cl.heads()
1589 heads = cl.heads()
1587 common, missing = cl.findcommonmissing(common, heads)
1590 common, missing = cl.findcommonmissing(common, heads)
1588 if not missing:
1591 if not missing:
1589 return None
1592 return None
1590 return self._changegroupsubset(common, missing, heads, source)
1593 return self._changegroupsubset(common, missing, heads, source)
1591
1594
1592 def _changegroupsubset(self, commonrevs, csets, heads, source):
1595 def _changegroupsubset(self, commonrevs, csets, heads, source):
1593
1596
1594 cl = self.changelog
1597 cl = self.changelog
1595 mf = self.manifest
1598 mf = self.manifest
1596 mfs = {} # needed manifests
1599 mfs = {} # needed manifests
1597 fnodes = {} # needed file nodes
1600 fnodes = {} # needed file nodes
1598 changedfiles = set()
1601 changedfiles = set()
1599 fstate = ['', {}]
1602 fstate = ['', {}]
1600 count = [0]
1603 count = [0]
1601
1604
1602 # can we go through the fast path ?
1605 # can we go through the fast path ?
1603 heads.sort()
1606 heads.sort()
1604 if heads == sorted(self.heads()):
1607 if heads == sorted(self.heads()):
1605 return self._changegroup(csets, source)
1608 return self._changegroup(csets, source)
1606
1609
1607 # slow path
1610 # slow path
1608 self.hook('preoutgoing', throw=True, source=source)
1611 self.hook('preoutgoing', throw=True, source=source)
1609 self.changegroupinfo(csets, source)
1612 self.changegroupinfo(csets, source)
1610
1613
1611 # filter any nodes that claim to be part of the known set
1614 # filter any nodes that claim to be part of the known set
1612 def prune(revlog, missing):
1615 def prune(revlog, missing):
1613 return [n for n in missing
1616 return [n for n in missing
1614 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1617 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1615
1618
1616 def lookup(revlog, x):
1619 def lookup(revlog, x):
1617 if revlog == cl:
1620 if revlog == cl:
1618 c = cl.read(x)
1621 c = cl.read(x)
1619 changedfiles.update(c[3])
1622 changedfiles.update(c[3])
1620 mfs.setdefault(c[0], x)
1623 mfs.setdefault(c[0], x)
1621 count[0] += 1
1624 count[0] += 1
1622 self.ui.progress(_('bundling'), count[0],
1625 self.ui.progress(_('bundling'), count[0],
1623 unit=_('changesets'), total=len(csets))
1626 unit=_('changesets'), total=len(csets))
1624 return x
1627 return x
1625 elif revlog == mf:
1628 elif revlog == mf:
1626 clnode = mfs[x]
1629 clnode = mfs[x]
1627 mdata = mf.readfast(x)
1630 mdata = mf.readfast(x)
1628 for f in changedfiles:
1631 for f in changedfiles:
1629 if f in mdata:
1632 if f in mdata:
1630 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1633 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1631 count[0] += 1
1634 count[0] += 1
1632 self.ui.progress(_('bundling'), count[0],
1635 self.ui.progress(_('bundling'), count[0],
1633 unit=_('manifests'), total=len(mfs))
1636 unit=_('manifests'), total=len(mfs))
1634 return mfs[x]
1637 return mfs[x]
1635 else:
1638 else:
1636 self.ui.progress(
1639 self.ui.progress(
1637 _('bundling'), count[0], item=fstate[0],
1640 _('bundling'), count[0], item=fstate[0],
1638 unit=_('files'), total=len(changedfiles))
1641 unit=_('files'), total=len(changedfiles))
1639 return fstate[1][x]
1642 return fstate[1][x]
1640
1643
1641 bundler = changegroup.bundle10(lookup)
1644 bundler = changegroup.bundle10(lookup)
1642 reorder = self.ui.config('bundle', 'reorder', 'auto')
1645 reorder = self.ui.config('bundle', 'reorder', 'auto')
1643 if reorder == 'auto':
1646 if reorder == 'auto':
1644 reorder = None
1647 reorder = None
1645 else:
1648 else:
1646 reorder = util.parsebool(reorder)
1649 reorder = util.parsebool(reorder)
1647
1650
1648 def gengroup():
1651 def gengroup():
1649 # Create a changenode group generator that will call our functions
1652 # Create a changenode group generator that will call our functions
1650 # back to lookup the owning changenode and collect information.
1653 # back to lookup the owning changenode and collect information.
1651 for chunk in cl.group(csets, bundler, reorder=reorder):
1654 for chunk in cl.group(csets, bundler, reorder=reorder):
1652 yield chunk
1655 yield chunk
1653 self.ui.progress(_('bundling'), None)
1656 self.ui.progress(_('bundling'), None)
1654
1657
1655 # Create a generator for the manifestnodes that calls our lookup
1658 # Create a generator for the manifestnodes that calls our lookup
1656 # and data collection functions back.
1659 # and data collection functions back.
1657 count[0] = 0
1660 count[0] = 0
1658 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1661 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1659 yield chunk
1662 yield chunk
1660 self.ui.progress(_('bundling'), None)
1663 self.ui.progress(_('bundling'), None)
1661
1664
1662 mfs.clear()
1665 mfs.clear()
1663
1666
1664 # Go through all our files in order sorted by name.
1667 # Go through all our files in order sorted by name.
1665 count[0] = 0
1668 count[0] = 0
1666 for fname in sorted(changedfiles):
1669 for fname in sorted(changedfiles):
1667 filerevlog = self.file(fname)
1670 filerevlog = self.file(fname)
1668 if not len(filerevlog):
1671 if not len(filerevlog):
1669 raise util.Abort(_("empty or missing revlog for %s") % fname)
1672 raise util.Abort(_("empty or missing revlog for %s") % fname)
1670 fstate[0] = fname
1673 fstate[0] = fname
1671 fstate[1] = fnodes.pop(fname, {})
1674 fstate[1] = fnodes.pop(fname, {})
1672
1675
1673 nodelist = prune(filerevlog, fstate[1])
1676 nodelist = prune(filerevlog, fstate[1])
1674 if nodelist:
1677 if nodelist:
1675 count[0] += 1
1678 count[0] += 1
1676 yield bundler.fileheader(fname)
1679 yield bundler.fileheader(fname)
1677 for chunk in filerevlog.group(nodelist, bundler, reorder):
1680 for chunk in filerevlog.group(nodelist, bundler, reorder):
1678 yield chunk
1681 yield chunk
1679
1682
1680 # Signal that no more groups are left.
1683 # Signal that no more groups are left.
1681 yield bundler.close()
1684 yield bundler.close()
1682 self.ui.progress(_('bundling'), None)
1685 self.ui.progress(_('bundling'), None)
1683
1686
1684 if csets:
1687 if csets:
1685 self.hook('outgoing', node=hex(csets[0]), source=source)
1688 self.hook('outgoing', node=hex(csets[0]), source=source)
1686
1689
1687 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1690 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1688
1691
1689 def changegroup(self, basenodes, source):
1692 def changegroup(self, basenodes, source):
1690 # to avoid a race we use changegroupsubset() (issue1320)
1693 # to avoid a race we use changegroupsubset() (issue1320)
1691 return self.changegroupsubset(basenodes, self.heads(), source)
1694 return self.changegroupsubset(basenodes, self.heads(), source)
1692
1695
1693 def _changegroup(self, nodes, source):
1696 def _changegroup(self, nodes, source):
1694 """Compute the changegroup of all nodes that we have that a recipient
1697 """Compute the changegroup of all nodes that we have that a recipient
1695 doesn't. Return a chunkbuffer object whose read() method will return
1698 doesn't. Return a chunkbuffer object whose read() method will return
1696 successive changegroup chunks.
1699 successive changegroup chunks.
1697
1700
1698 This is much easier than the previous function as we can assume that
1701 This is much easier than the previous function as we can assume that
1699 the recipient has any changenode we aren't sending them.
1702 the recipient has any changenode we aren't sending them.
1700
1703
1701 nodes is the set of nodes to send"""
1704 nodes is the set of nodes to send"""
1702
1705
1703 cl = self.changelog
1706 cl = self.changelog
1704 mf = self.manifest
1707 mf = self.manifest
1705 mfs = {}
1708 mfs = {}
1706 changedfiles = set()
1709 changedfiles = set()
1707 fstate = ['']
1710 fstate = ['']
1708 count = [0]
1711 count = [0]
1709
1712
1710 self.hook('preoutgoing', throw=True, source=source)
1713 self.hook('preoutgoing', throw=True, source=source)
1711 self.changegroupinfo(nodes, source)
1714 self.changegroupinfo(nodes, source)
1712
1715
1713 revset = set([cl.rev(n) for n in nodes])
1716 revset = set([cl.rev(n) for n in nodes])
1714
1717
1715 def gennodelst(log):
1718 def gennodelst(log):
1716 return [log.node(r) for r in log if log.linkrev(r) in revset]
1719 return [log.node(r) for r in log if log.linkrev(r) in revset]
1717
1720
1718 def lookup(revlog, x):
1721 def lookup(revlog, x):
1719 if revlog == cl:
1722 if revlog == cl:
1720 c = cl.read(x)
1723 c = cl.read(x)
1721 changedfiles.update(c[3])
1724 changedfiles.update(c[3])
1722 mfs.setdefault(c[0], x)
1725 mfs.setdefault(c[0], x)
1723 count[0] += 1
1726 count[0] += 1
1724 self.ui.progress(_('bundling'), count[0],
1727 self.ui.progress(_('bundling'), count[0],
1725 unit=_('changesets'), total=len(nodes))
1728 unit=_('changesets'), total=len(nodes))
1726 return x
1729 return x
1727 elif revlog == mf:
1730 elif revlog == mf:
1728 count[0] += 1
1731 count[0] += 1
1729 self.ui.progress(_('bundling'), count[0],
1732 self.ui.progress(_('bundling'), count[0],
1730 unit=_('manifests'), total=len(mfs))
1733 unit=_('manifests'), total=len(mfs))
1731 return cl.node(revlog.linkrev(revlog.rev(x)))
1734 return cl.node(revlog.linkrev(revlog.rev(x)))
1732 else:
1735 else:
1733 self.ui.progress(
1736 self.ui.progress(
1734 _('bundling'), count[0], item=fstate[0],
1737 _('bundling'), count[0], item=fstate[0],
1735 total=len(changedfiles), unit=_('files'))
1738 total=len(changedfiles), unit=_('files'))
1736 return cl.node(revlog.linkrev(revlog.rev(x)))
1739 return cl.node(revlog.linkrev(revlog.rev(x)))
1737
1740
1738 bundler = changegroup.bundle10(lookup)
1741 bundler = changegroup.bundle10(lookup)
1739 reorder = self.ui.config('bundle', 'reorder', 'auto')
1742 reorder = self.ui.config('bundle', 'reorder', 'auto')
1740 if reorder == 'auto':
1743 if reorder == 'auto':
1741 reorder = None
1744 reorder = None
1742 else:
1745 else:
1743 reorder = util.parsebool(reorder)
1746 reorder = util.parsebool(reorder)
1744
1747
1745 def gengroup():
1748 def gengroup():
1746 '''yield a sequence of changegroup chunks (strings)'''
1749 '''yield a sequence of changegroup chunks (strings)'''
1747 # construct a list of all changed files
1750 # construct a list of all changed files
1748
1751
1749 for chunk in cl.group(nodes, bundler, reorder=reorder):
1752 for chunk in cl.group(nodes, bundler, reorder=reorder):
1750 yield chunk
1753 yield chunk
1751 self.ui.progress(_('bundling'), None)
1754 self.ui.progress(_('bundling'), None)
1752
1755
1753 count[0] = 0
1756 count[0] = 0
1754 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1757 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1755 yield chunk
1758 yield chunk
1756 self.ui.progress(_('bundling'), None)
1759 self.ui.progress(_('bundling'), None)
1757
1760
1758 count[0] = 0
1761 count[0] = 0
1759 for fname in sorted(changedfiles):
1762 for fname in sorted(changedfiles):
1760 filerevlog = self.file(fname)
1763 filerevlog = self.file(fname)
1761 if not len(filerevlog):
1764 if not len(filerevlog):
1762 raise util.Abort(_("empty or missing revlog for %s") % fname)
1765 raise util.Abort(_("empty or missing revlog for %s") % fname)
1763 fstate[0] = fname
1766 fstate[0] = fname
1764 nodelist = gennodelst(filerevlog)
1767 nodelist = gennodelst(filerevlog)
1765 if nodelist:
1768 if nodelist:
1766 count[0] += 1
1769 count[0] += 1
1767 yield bundler.fileheader(fname)
1770 yield bundler.fileheader(fname)
1768 for chunk in filerevlog.group(nodelist, bundler, reorder):
1771 for chunk in filerevlog.group(nodelist, bundler, reorder):
1769 yield chunk
1772 yield chunk
1770 yield bundler.close()
1773 yield bundler.close()
1771 self.ui.progress(_('bundling'), None)
1774 self.ui.progress(_('bundling'), None)
1772
1775
1773 if nodes:
1776 if nodes:
1774 self.hook('outgoing', node=hex(nodes[0]), source=source)
1777 self.hook('outgoing', node=hex(nodes[0]), source=source)
1775
1778
1776 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1779 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1777
1780
1778 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1781 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1779 """Add the changegroup returned by source.read() to this repo.
1782 """Add the changegroup returned by source.read() to this repo.
1780 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1783 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1781 the URL of the repo where this changegroup is coming from.
1784 the URL of the repo where this changegroup is coming from.
1782 If lock is not None, the function takes ownership of the lock
1785 If lock is not None, the function takes ownership of the lock
1783 and releases it after the changegroup is added.
1786 and releases it after the changegroup is added.
1784
1787
1785 Return an integer summarizing the change to this repo:
1788 Return an integer summarizing the change to this repo:
1786 - nothing changed or no source: 0
1789 - nothing changed or no source: 0
1787 - more heads than before: 1+added heads (2..n)
1790 - more heads than before: 1+added heads (2..n)
1788 - fewer heads than before: -1-removed heads (-2..-n)
1791 - fewer heads than before: -1-removed heads (-2..-n)
1789 - number of heads stays the same: 1
1792 - number of heads stays the same: 1
1790 """
1793 """
1791 def csmap(x):
1794 def csmap(x):
1792 self.ui.debug("add changeset %s\n" % short(x))
1795 self.ui.debug("add changeset %s\n" % short(x))
1793 return len(cl)
1796 return len(cl)
1794
1797
1795 def revmap(x):
1798 def revmap(x):
1796 return cl.rev(x)
1799 return cl.rev(x)
1797
1800
1798 if not source:
1801 if not source:
1799 return 0
1802 return 0
1800
1803
1801 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1804 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1802
1805
1803 changesets = files = revisions = 0
1806 changesets = files = revisions = 0
1804 efiles = set()
1807 efiles = set()
1805
1808
1806 # write changelog data to temp files so concurrent readers will not see
1809 # write changelog data to temp files so concurrent readers will not see
1807 # inconsistent view
1810 # inconsistent view
1808 cl = self.changelog
1811 cl = self.changelog
1809 cl.delayupdate()
1812 cl.delayupdate()
1810 oldheads = cl.heads()
1813 oldheads = cl.heads()
1811
1814
1812 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1815 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1813 try:
1816 try:
1814 trp = weakref.proxy(tr)
1817 trp = weakref.proxy(tr)
1815 # pull off the changeset group
1818 # pull off the changeset group
1816 self.ui.status(_("adding changesets\n"))
1819 self.ui.status(_("adding changesets\n"))
1817 clstart = len(cl)
1820 clstart = len(cl)
1818 class prog(object):
1821 class prog(object):
1819 step = _('changesets')
1822 step = _('changesets')
1820 count = 1
1823 count = 1
1821 ui = self.ui
1824 ui = self.ui
1822 total = None
1825 total = None
1823 def __call__(self):
1826 def __call__(self):
1824 self.ui.progress(self.step, self.count, unit=_('chunks'),
1827 self.ui.progress(self.step, self.count, unit=_('chunks'),
1825 total=self.total)
1828 total=self.total)
1826 self.count += 1
1829 self.count += 1
1827 pr = prog()
1830 pr = prog()
1828 source.callback = pr
1831 source.callback = pr
1829
1832
1830 source.changelogheader()
1833 source.changelogheader()
1831 if (cl.addgroup(source, csmap, trp) is None
1834 if (cl.addgroup(source, csmap, trp) is None
1832 and not emptyok):
1835 and not emptyok):
1833 raise util.Abort(_("received changelog group is empty"))
1836 raise util.Abort(_("received changelog group is empty"))
1834 clend = len(cl)
1837 clend = len(cl)
1835 changesets = clend - clstart
1838 changesets = clend - clstart
1836 for c in xrange(clstart, clend):
1839 for c in xrange(clstart, clend):
1837 efiles.update(self[c].files())
1840 efiles.update(self[c].files())
1838 efiles = len(efiles)
1841 efiles = len(efiles)
1839 self.ui.progress(_('changesets'), None)
1842 self.ui.progress(_('changesets'), None)
1840
1843
1841 # pull off the manifest group
1844 # pull off the manifest group
1842 self.ui.status(_("adding manifests\n"))
1845 self.ui.status(_("adding manifests\n"))
1843 pr.step = _('manifests')
1846 pr.step = _('manifests')
1844 pr.count = 1
1847 pr.count = 1
1845 pr.total = changesets # manifests <= changesets
1848 pr.total = changesets # manifests <= changesets
1846 # no need to check for empty manifest group here:
1849 # no need to check for empty manifest group here:
1847 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1850 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1848 # no new manifest will be created and the manifest group will
1851 # no new manifest will be created and the manifest group will
1849 # be empty during the pull
1852 # be empty during the pull
1850 source.manifestheader()
1853 source.manifestheader()
1851 self.manifest.addgroup(source, revmap, trp)
1854 self.manifest.addgroup(source, revmap, trp)
1852 self.ui.progress(_('manifests'), None)
1855 self.ui.progress(_('manifests'), None)
1853
1856
1854 needfiles = {}
1857 needfiles = {}
1855 if self.ui.configbool('server', 'validate', default=False):
1858 if self.ui.configbool('server', 'validate', default=False):
1856 # validate incoming csets have their manifests
1859 # validate incoming csets have their manifests
1857 for cset in xrange(clstart, clend):
1860 for cset in xrange(clstart, clend):
1858 mfest = self.changelog.read(self.changelog.node(cset))[0]
1861 mfest = self.changelog.read(self.changelog.node(cset))[0]
1859 mfest = self.manifest.readdelta(mfest)
1862 mfest = self.manifest.readdelta(mfest)
1860 # store file nodes we must see
1863 # store file nodes we must see
1861 for f, n in mfest.iteritems():
1864 for f, n in mfest.iteritems():
1862 needfiles.setdefault(f, set()).add(n)
1865 needfiles.setdefault(f, set()).add(n)
1863
1866
1864 # process the files
1867 # process the files
1865 self.ui.status(_("adding file changes\n"))
1868 self.ui.status(_("adding file changes\n"))
1866 pr.step = _('files')
1869 pr.step = _('files')
1867 pr.count = 1
1870 pr.count = 1
1868 pr.total = efiles
1871 pr.total = efiles
1869 source.callback = None
1872 source.callback = None
1870
1873
1871 while True:
1874 while True:
1872 chunkdata = source.filelogheader()
1875 chunkdata = source.filelogheader()
1873 if not chunkdata:
1876 if not chunkdata:
1874 break
1877 break
1875 f = chunkdata["filename"]
1878 f = chunkdata["filename"]
1876 self.ui.debug("adding %s revisions\n" % f)
1879 self.ui.debug("adding %s revisions\n" % f)
1877 pr()
1880 pr()
1878 fl = self.file(f)
1881 fl = self.file(f)
1879 o = len(fl)
1882 o = len(fl)
1880 if fl.addgroup(source, revmap, trp) is None:
1883 if fl.addgroup(source, revmap, trp) is None:
1881 raise util.Abort(_("received file revlog group is empty"))
1884 raise util.Abort(_("received file revlog group is empty"))
1882 revisions += len(fl) - o
1885 revisions += len(fl) - o
1883 files += 1
1886 files += 1
1884 if f in needfiles:
1887 if f in needfiles:
1885 needs = needfiles[f]
1888 needs = needfiles[f]
1886 for new in xrange(o, len(fl)):
1889 for new in xrange(o, len(fl)):
1887 n = fl.node(new)
1890 n = fl.node(new)
1888 if n in needs:
1891 if n in needs:
1889 needs.remove(n)
1892 needs.remove(n)
1890 if not needs:
1893 if not needs:
1891 del needfiles[f]
1894 del needfiles[f]
1892 self.ui.progress(_('files'), None)
1895 self.ui.progress(_('files'), None)
1893
1896
1894 for f, needs in needfiles.iteritems():
1897 for f, needs in needfiles.iteritems():
1895 fl = self.file(f)
1898 fl = self.file(f)
1896 for n in needs:
1899 for n in needs:
1897 try:
1900 try:
1898 fl.rev(n)
1901 fl.rev(n)
1899 except error.LookupError:
1902 except error.LookupError:
1900 raise util.Abort(
1903 raise util.Abort(
1901 _('missing file data for %s:%s - run hg verify') %
1904 _('missing file data for %s:%s - run hg verify') %
1902 (f, hex(n)))
1905 (f, hex(n)))
1903
1906
1904 dh = 0
1907 dh = 0
1905 if oldheads:
1908 if oldheads:
1906 heads = cl.heads()
1909 heads = cl.heads()
1907 dh = len(heads) - len(oldheads)
1910 dh = len(heads) - len(oldheads)
1908 for h in heads:
1911 for h in heads:
1909 if h not in oldheads and 'close' in self[h].extra():
1912 if h not in oldheads and 'close' in self[h].extra():
1910 dh -= 1
1913 dh -= 1
1911 htext = ""
1914 htext = ""
1912 if dh:
1915 if dh:
1913 htext = _(" (%+d heads)") % dh
1916 htext = _(" (%+d heads)") % dh
1914
1917
1915 self.ui.status(_("added %d changesets"
1918 self.ui.status(_("added %d changesets"
1916 " with %d changes to %d files%s\n")
1919 " with %d changes to %d files%s\n")
1917 % (changesets, revisions, files, htext))
1920 % (changesets, revisions, files, htext))
1918
1921
1919 if changesets > 0:
1922 if changesets > 0:
1920 p = lambda: cl.writepending() and self.root or ""
1923 p = lambda: cl.writepending() and self.root or ""
1921 self.hook('pretxnchangegroup', throw=True,
1924 self.hook('pretxnchangegroup', throw=True,
1922 node=hex(cl.node(clstart)), source=srctype,
1925 node=hex(cl.node(clstart)), source=srctype,
1923 url=url, pending=p)
1926 url=url, pending=p)
1924
1927
1925 # make changelog see real files again
1928 # make changelog see real files again
1926 cl.finalize(trp)
1929 cl.finalize(trp)
1927
1930
1928 tr.close()
1931 tr.close()
1929 finally:
1932 finally:
1930 tr.release()
1933 tr.release()
1931 if lock:
1934 if lock:
1932 lock.release()
1935 lock.release()
1933
1936
1934 if changesets > 0:
1937 if changesets > 0:
1935 # forcefully update the on-disk branch cache
1938 # forcefully update the on-disk branch cache
1936 self.ui.debug("updating the branch cache\n")
1939 self.ui.debug("updating the branch cache\n")
1937 self.updatebranchcache()
1940 self.updatebranchcache()
1938 self.hook("changegroup", node=hex(cl.node(clstart)),
1941 self.hook("changegroup", node=hex(cl.node(clstart)),
1939 source=srctype, url=url)
1942 source=srctype, url=url)
1940
1943
1941 for i in xrange(clstart, clend):
1944 for i in xrange(clstart, clend):
1942 self.hook("incoming", node=hex(cl.node(i)),
1945 self.hook("incoming", node=hex(cl.node(i)),
1943 source=srctype, url=url)
1946 source=srctype, url=url)
1944
1947
1945 # never return 0 here:
1948 # never return 0 here:
1946 if dh < 0:
1949 if dh < 0:
1947 return dh - 1
1950 return dh - 1
1948 else:
1951 else:
1949 return dh + 1
1952 return dh + 1
1950
1953
1951 def stream_in(self, remote, requirements):
1954 def stream_in(self, remote, requirements):
1952 lock = self.lock()
1955 lock = self.lock()
1953 try:
1956 try:
1954 fp = remote.stream_out()
1957 fp = remote.stream_out()
1955 l = fp.readline()
1958 l = fp.readline()
1956 try:
1959 try:
1957 resp = int(l)
1960 resp = int(l)
1958 except ValueError:
1961 except ValueError:
1959 raise error.ResponseError(
1962 raise error.ResponseError(
1960 _('Unexpected response from remote server:'), l)
1963 _('Unexpected response from remote server:'), l)
1961 if resp == 1:
1964 if resp == 1:
1962 raise util.Abort(_('operation forbidden by server'))
1965 raise util.Abort(_('operation forbidden by server'))
1963 elif resp == 2:
1966 elif resp == 2:
1964 raise util.Abort(_('locking the remote repository failed'))
1967 raise util.Abort(_('locking the remote repository failed'))
1965 elif resp != 0:
1968 elif resp != 0:
1966 raise util.Abort(_('the server sent an unknown error code'))
1969 raise util.Abort(_('the server sent an unknown error code'))
1967 self.ui.status(_('streaming all changes\n'))
1970 self.ui.status(_('streaming all changes\n'))
1968 l = fp.readline()
1971 l = fp.readline()
1969 try:
1972 try:
1970 total_files, total_bytes = map(int, l.split(' ', 1))
1973 total_files, total_bytes = map(int, l.split(' ', 1))
1971 except (ValueError, TypeError):
1974 except (ValueError, TypeError):
1972 raise error.ResponseError(
1975 raise error.ResponseError(
1973 _('Unexpected response from remote server:'), l)
1976 _('Unexpected response from remote server:'), l)
1974 self.ui.status(_('%d files to transfer, %s of data\n') %
1977 self.ui.status(_('%d files to transfer, %s of data\n') %
1975 (total_files, util.bytecount(total_bytes)))
1978 (total_files, util.bytecount(total_bytes)))
1976 start = time.time()
1979 start = time.time()
1977 for i in xrange(total_files):
1980 for i in xrange(total_files):
1978 # XXX doesn't support '\n' or '\r' in filenames
1981 # XXX doesn't support '\n' or '\r' in filenames
1979 l = fp.readline()
1982 l = fp.readline()
1980 try:
1983 try:
1981 name, size = l.split('\0', 1)
1984 name, size = l.split('\0', 1)
1982 size = int(size)
1985 size = int(size)
1983 except (ValueError, TypeError):
1986 except (ValueError, TypeError):
1984 raise error.ResponseError(
1987 raise error.ResponseError(
1985 _('Unexpected response from remote server:'), l)
1988 _('Unexpected response from remote server:'), l)
1986 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1989 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1987 # for backwards compat, name was partially encoded
1990 # for backwards compat, name was partially encoded
1988 ofp = self.sopener(store.decodedir(name), 'w')
1991 ofp = self.sopener(store.decodedir(name), 'w')
1989 for chunk in util.filechunkiter(fp, limit=size):
1992 for chunk in util.filechunkiter(fp, limit=size):
1990 ofp.write(chunk)
1993 ofp.write(chunk)
1991 ofp.close()
1994 ofp.close()
1992 elapsed = time.time() - start
1995 elapsed = time.time() - start
1993 if elapsed <= 0:
1996 if elapsed <= 0:
1994 elapsed = 0.001
1997 elapsed = 0.001
1995 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1998 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1996 (util.bytecount(total_bytes), elapsed,
1999 (util.bytecount(total_bytes), elapsed,
1997 util.bytecount(total_bytes / elapsed)))
2000 util.bytecount(total_bytes / elapsed)))
1998
2001
1999 # new requirements = old non-format requirements + new format-related
2002 # new requirements = old non-format requirements + new format-related
2000 # requirements from the streamed-in repository
2003 # requirements from the streamed-in repository
2001 requirements.update(set(self.requirements) - self.supportedformats)
2004 requirements.update(set(self.requirements) - self.supportedformats)
2002 self._applyrequirements(requirements)
2005 self._applyrequirements(requirements)
2003 self._writerequirements()
2006 self._writerequirements()
2004
2007
2005 self.invalidate()
2008 self.invalidate()
2006 return len(self.heads()) + 1
2009 return len(self.heads()) + 1
2007 finally:
2010 finally:
2008 lock.release()
2011 lock.release()
2009
2012
2010 def clone(self, remote, heads=[], stream=False):
2013 def clone(self, remote, heads=[], stream=False):
2011 '''clone remote repository.
2014 '''clone remote repository.
2012
2015
2013 keyword arguments:
2016 keyword arguments:
2014 heads: list of revs to clone (forces use of pull)
2017 heads: list of revs to clone (forces use of pull)
2015 stream: use streaming clone if possible'''
2018 stream: use streaming clone if possible'''
2016
2019
2017 # now, all clients that can request uncompressed clones can
2020 # now, all clients that can request uncompressed clones can
2018 # read repo formats supported by all servers that can serve
2021 # read repo formats supported by all servers that can serve
2019 # them.
2022 # them.
2020
2023
2021 # if revlog format changes, client will have to check version
2024 # if revlog format changes, client will have to check version
2022 # and format flags on "stream" capability, and use
2025 # and format flags on "stream" capability, and use
2023 # uncompressed only if compatible.
2026 # uncompressed only if compatible.
2024
2027
2025 if stream and not heads:
2028 if stream and not heads:
2026 # 'stream' means remote revlog format is revlogv1 only
2029 # 'stream' means remote revlog format is revlogv1 only
2027 if remote.capable('stream'):
2030 if remote.capable('stream'):
2028 return self.stream_in(remote, set(('revlogv1',)))
2031 return self.stream_in(remote, set(('revlogv1',)))
2029 # otherwise, 'streamreqs' contains the remote revlog format
2032 # otherwise, 'streamreqs' contains the remote revlog format
2030 streamreqs = remote.capable('streamreqs')
2033 streamreqs = remote.capable('streamreqs')
2031 if streamreqs:
2034 if streamreqs:
2032 streamreqs = set(streamreqs.split(','))
2035 streamreqs = set(streamreqs.split(','))
2033 # if we support it, stream in and adjust our requirements
2036 # if we support it, stream in and adjust our requirements
2034 if not streamreqs - self.supportedformats:
2037 if not streamreqs - self.supportedformats:
2035 return self.stream_in(remote, streamreqs)
2038 return self.stream_in(remote, streamreqs)
2036 return self.pull(remote, heads)
2039 return self.pull(remote, heads)
2037
2040
2038 def pushkey(self, namespace, key, old, new):
2041 def pushkey(self, namespace, key, old, new):
2039 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2042 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2040 old=old, new=new)
2043 old=old, new=new)
2041 ret = pushkey.push(self, namespace, key, old, new)
2044 ret = pushkey.push(self, namespace, key, old, new)
2042 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2045 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2043 ret=ret)
2046 ret=ret)
2044 return ret
2047 return ret
2045
2048
2046 def listkeys(self, namespace):
2049 def listkeys(self, namespace):
2047 self.hook('prelistkeys', throw=True, namespace=namespace)
2050 self.hook('prelistkeys', throw=True, namespace=namespace)
2048 values = pushkey.list(self, namespace)
2051 values = pushkey.list(self, namespace)
2049 self.hook('listkeys', namespace=namespace, values=values)
2052 self.hook('listkeys', namespace=namespace, values=values)
2050 return values
2053 return values
2051
2054
2052 def debugwireargs(self, one, two, three=None, four=None, five=None):
2055 def debugwireargs(self, one, two, three=None, four=None, five=None):
2053 '''used to test argument passing over the wire'''
2056 '''used to test argument passing over the wire'''
2054 return "%s %s %s %s %s" % (one, two, three, four, five)
2057 return "%s %s %s %s %s" % (one, two, three, four, five)
2055
2058
2056 def savecommitmessage(self, text):
2059 def savecommitmessage(self, text):
2057 fp = self.opener('last-message.txt', 'wb')
2060 fp = self.opener('last-message.txt', 'wb')
2058 try:
2061 try:
2059 fp.write(text)
2062 fp.write(text)
2060 finally:
2063 finally:
2061 fp.close()
2064 fp.close()
2062 return self.pathto(fp.name[len(self.root)+1:])
2065 return self.pathto(fp.name[len(self.root)+1:])
2063
2066
2064 # used to avoid circular references so destructors work
2067 # used to avoid circular references so destructors work
2065 def aftertrans(files):
2068 def aftertrans(files):
2066 renamefiles = [tuple(t) for t in files]
2069 renamefiles = [tuple(t) for t in files]
2067 def a():
2070 def a():
2068 for src, dest in renamefiles:
2071 for src, dest in renamefiles:
2069 util.rename(src, dest)
2072 util.rename(src, dest)
2070 return a
2073 return a
2071
2074
2072 def undoname(fn):
2075 def undoname(fn):
2073 base, name = os.path.split(fn)
2076 base, name = os.path.split(fn)
2074 assert name.startswith('journal')
2077 assert name.startswith('journal')
2075 return os.path.join(base, name.replace('journal', 'undo', 1))
2078 return os.path.join(base, name.replace('journal', 'undo', 1))
2076
2079
2077 def instance(ui, path, create):
2080 def instance(ui, path, create):
2078 return localrepository(ui, util.urllocalpath(path), create)
2081 return localrepository(ui, util.urllocalpath(path), create)
2079
2082
2080 def islocal(path):
2083 def islocal(path):
2081 return True
2084 return True
General Comments 0
You need to be logged in to leave comments. Login now