##// END OF EJS Templates
bookmarks: delete divergent bookmarks on merge
David Soria Parra -
r16706:a270ec97 default
parent child Browse files
Show More
@@ -0,0 +1,31 b''
1 # init
2
3 $ hg init
4 $ echo a > a
5 $ hg add a
6 $ hg commit -m'a'
7 $ echo b > b
8 $ hg add b
9 $ hg commit -m'b'
10 $ hg up -C 0
11 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
12 $ echo c > c
13 $ hg add c
14 $ hg commit -m'c'
15 created new head
16
17 # test merging of diverged bookmarks
18 $ hg bookmark -r 1 "c@diverge"
19 $ hg bookmark -r 1 b
20 $ hg bookmark c
21 $ hg bookmarks
22 b 1:d2ae7f538514
23 * c 2:d36c0562f908
24 c@diverge 1:d2ae7f538514
25 $ hg merge "c@diverge"
26 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
27 (branch merge, don't forget to commit)
28 $ hg commit -m'merge'
29 $ hg bookmarks
30 b 1:d2ae7f538514
31 * c 3:b8f96cf4688b
@@ -1,247 +1,254 b''
1 # Mercurial bookmark support code
1 # Mercurial bookmark support code
2 #
2 #
3 # Copyright 2008 David Soria Parra <dsp@php.net>
3 # Copyright 2008 David Soria Parra <dsp@php.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from mercurial.i18n import _
8 from mercurial.i18n import _
9 from mercurial.node import hex
9 from mercurial.node import hex
10 from mercurial import encoding, util
10 from mercurial import encoding, util
11 import errno, os
11 import errno, os
12
12
13 def valid(mark):
13 def valid(mark):
14 for c in (':', '\0', '\n', '\r'):
14 for c in (':', '\0', '\n', '\r'):
15 if c in mark:
15 if c in mark:
16 return False
16 return False
17 return True
17 return True
18
18
19 def read(repo):
19 def read(repo):
20 '''Parse .hg/bookmarks file and return a dictionary
20 '''Parse .hg/bookmarks file and return a dictionary
21
21
22 Bookmarks are stored as {HASH}\\s{NAME}\\n (localtags format) values
22 Bookmarks are stored as {HASH}\\s{NAME}\\n (localtags format) values
23 in the .hg/bookmarks file.
23 in the .hg/bookmarks file.
24 Read the file and return a (name=>nodeid) dictionary
24 Read the file and return a (name=>nodeid) dictionary
25 '''
25 '''
26 bookmarks = {}
26 bookmarks = {}
27 try:
27 try:
28 for line in repo.opener('bookmarks'):
28 for line in repo.opener('bookmarks'):
29 line = line.strip()
29 line = line.strip()
30 if not line:
30 if not line:
31 continue
31 continue
32 if ' ' not in line:
32 if ' ' not in line:
33 repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n') % line)
33 repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n') % line)
34 continue
34 continue
35 sha, refspec = line.split(' ', 1)
35 sha, refspec = line.split(' ', 1)
36 refspec = encoding.tolocal(refspec)
36 refspec = encoding.tolocal(refspec)
37 try:
37 try:
38 bookmarks[refspec] = repo.changelog.lookup(sha)
38 bookmarks[refspec] = repo.changelog.lookup(sha)
39 except LookupError:
39 except LookupError:
40 pass
40 pass
41 except IOError, inst:
41 except IOError, inst:
42 if inst.errno != errno.ENOENT:
42 if inst.errno != errno.ENOENT:
43 raise
43 raise
44 return bookmarks
44 return bookmarks
45
45
46 def readcurrent(repo):
46 def readcurrent(repo):
47 '''Get the current bookmark
47 '''Get the current bookmark
48
48
49 If we use gittishsh branches we have a current bookmark that
49 If we use gittishsh branches we have a current bookmark that
50 we are on. This function returns the name of the bookmark. It
50 we are on. This function returns the name of the bookmark. It
51 is stored in .hg/bookmarks.current
51 is stored in .hg/bookmarks.current
52 '''
52 '''
53 mark = None
53 mark = None
54 try:
54 try:
55 file = repo.opener('bookmarks.current')
55 file = repo.opener('bookmarks.current')
56 except IOError, inst:
56 except IOError, inst:
57 if inst.errno != errno.ENOENT:
57 if inst.errno != errno.ENOENT:
58 raise
58 raise
59 return None
59 return None
60 try:
60 try:
61 # No readline() in posixfile_nt, reading everything is cheap
61 # No readline() in posixfile_nt, reading everything is cheap
62 mark = encoding.tolocal((file.readlines() or [''])[0])
62 mark = encoding.tolocal((file.readlines() or [''])[0])
63 if mark == '' or mark not in repo._bookmarks:
63 if mark == '' or mark not in repo._bookmarks:
64 mark = None
64 mark = None
65 finally:
65 finally:
66 file.close()
66 file.close()
67 return mark
67 return mark
68
68
69 def write(repo):
69 def write(repo):
70 '''Write bookmarks
70 '''Write bookmarks
71
71
72 Write the given bookmark => hash dictionary to the .hg/bookmarks file
72 Write the given bookmark => hash dictionary to the .hg/bookmarks file
73 in a format equal to those of localtags.
73 in a format equal to those of localtags.
74
74
75 We also store a backup of the previous state in undo.bookmarks that
75 We also store a backup of the previous state in undo.bookmarks that
76 can be copied back on rollback.
76 can be copied back on rollback.
77 '''
77 '''
78 refs = repo._bookmarks
78 refs = repo._bookmarks
79
79
80 if repo._bookmarkcurrent not in refs:
80 if repo._bookmarkcurrent not in refs:
81 setcurrent(repo, None)
81 setcurrent(repo, None)
82 for mark in refs.keys():
82 for mark in refs.keys():
83 if not valid(mark):
83 if not valid(mark):
84 raise util.Abort(_("bookmark '%s' contains illegal "
84 raise util.Abort(_("bookmark '%s' contains illegal "
85 "character" % mark))
85 "character" % mark))
86
86
87 wlock = repo.wlock()
87 wlock = repo.wlock()
88 try:
88 try:
89
89
90 file = repo.opener('bookmarks', 'w', atomictemp=True)
90 file = repo.opener('bookmarks', 'w', atomictemp=True)
91 for refspec, node in refs.iteritems():
91 for refspec, node in refs.iteritems():
92 file.write("%s %s\n" % (hex(node), encoding.fromlocal(refspec)))
92 file.write("%s %s\n" % (hex(node), encoding.fromlocal(refspec)))
93 file.close()
93 file.close()
94
94
95 # touch 00changelog.i so hgweb reloads bookmarks (no lock needed)
95 # touch 00changelog.i so hgweb reloads bookmarks (no lock needed)
96 try:
96 try:
97 os.utime(repo.sjoin('00changelog.i'), None)
97 os.utime(repo.sjoin('00changelog.i'), None)
98 except OSError:
98 except OSError:
99 pass
99 pass
100
100
101 finally:
101 finally:
102 wlock.release()
102 wlock.release()
103
103
104 def setcurrent(repo, mark):
104 def setcurrent(repo, mark):
105 '''Set the name of the bookmark that we are currently on
105 '''Set the name of the bookmark that we are currently on
106
106
107 Set the name of the bookmark that we are on (hg update <bookmark>).
107 Set the name of the bookmark that we are on (hg update <bookmark>).
108 The name is recorded in .hg/bookmarks.current
108 The name is recorded in .hg/bookmarks.current
109 '''
109 '''
110 current = repo._bookmarkcurrent
110 current = repo._bookmarkcurrent
111 if current == mark:
111 if current == mark:
112 return
112 return
113
113
114 if mark not in repo._bookmarks:
114 if mark not in repo._bookmarks:
115 mark = ''
115 mark = ''
116 if not valid(mark):
116 if not valid(mark):
117 raise util.Abort(_("bookmark '%s' contains illegal "
117 raise util.Abort(_("bookmark '%s' contains illegal "
118 "character" % mark))
118 "character" % mark))
119
119
120 wlock = repo.wlock()
120 wlock = repo.wlock()
121 try:
121 try:
122 file = repo.opener('bookmarks.current', 'w', atomictemp=True)
122 file = repo.opener('bookmarks.current', 'w', atomictemp=True)
123 file.write(encoding.fromlocal(mark))
123 file.write(encoding.fromlocal(mark))
124 file.close()
124 file.close()
125 finally:
125 finally:
126 wlock.release()
126 wlock.release()
127 repo._bookmarkcurrent = mark
127 repo._bookmarkcurrent = mark
128
128
129 def unsetcurrent(repo):
129 def unsetcurrent(repo):
130 wlock = repo.wlock()
130 wlock = repo.wlock()
131 try:
131 try:
132 try:
132 try:
133 util.unlink(repo.join('bookmarks.current'))
133 util.unlink(repo.join('bookmarks.current'))
134 repo._bookmarkcurrent = None
134 repo._bookmarkcurrent = None
135 except OSError, inst:
135 except OSError, inst:
136 if inst.errno != errno.ENOENT:
136 if inst.errno != errno.ENOENT:
137 raise
137 raise
138 finally:
138 finally:
139 wlock.release()
139 wlock.release()
140
140
141 def updatecurrentbookmark(repo, oldnode, curbranch):
141 def updatecurrentbookmark(repo, oldnode, curbranch):
142 try:
142 try:
143 return update(repo, oldnode, repo.branchtags()[curbranch])
143 return update(repo, oldnode, repo.branchtags()[curbranch])
144 except KeyError:
144 except KeyError:
145 if curbranch == "default": # no default branch!
145 if curbranch == "default": # no default branch!
146 return update(repo, oldnode, repo.lookup("tip"))
146 return update(repo, oldnode, repo.lookup("tip"))
147 else:
147 else:
148 raise util.Abort(_("branch %s not found") % curbranch)
148 raise util.Abort(_("branch %s not found") % curbranch)
149
149
150 def update(repo, parents, node):
150 def update(repo, parents, node):
151 marks = repo._bookmarks
151 marks = repo._bookmarks
152 update = False
152 update = False
153 mark = repo._bookmarkcurrent
153 cur = repo._bookmarkcurrent
154 if mark and marks[mark] in parents:
154 if not cur:
155 old = repo[marks[mark]]
155 return False
156 new = repo[node]
156
157 if new in old.descendants():
157 toupdate = [b for b in marks if b.split('@', 1)[0] == cur.split('@', 1)[0]]
158 marks[mark] = new.node()
158 for mark in toupdate:
159 update = True
159 if mark and marks[mark] in parents:
160 old = repo[marks[mark]]
161 new = repo[node]
162 if new in old.descendants() and mark == cur:
163 marks[cur] = new.node()
164 update = True
165 if mark != cur:
166 del marks[mark]
160 if update:
167 if update:
161 repo._writebookmarks(marks)
168 repo._writebookmarks(marks)
162 return update
169 return update
163
170
164 def listbookmarks(repo):
171 def listbookmarks(repo):
165 # We may try to list bookmarks on a repo type that does not
172 # We may try to list bookmarks on a repo type that does not
166 # support it (e.g., statichttprepository).
173 # support it (e.g., statichttprepository).
167 marks = getattr(repo, '_bookmarks', {})
174 marks = getattr(repo, '_bookmarks', {})
168
175
169 d = {}
176 d = {}
170 for k, v in marks.iteritems():
177 for k, v in marks.iteritems():
171 # don't expose local divergent bookmarks
178 # don't expose local divergent bookmarks
172 if '@' not in k or k.endswith('@'):
179 if '@' not in k or k.endswith('@'):
173 d[k] = hex(v)
180 d[k] = hex(v)
174 return d
181 return d
175
182
176 def pushbookmark(repo, key, old, new):
183 def pushbookmark(repo, key, old, new):
177 w = repo.wlock()
184 w = repo.wlock()
178 try:
185 try:
179 marks = repo._bookmarks
186 marks = repo._bookmarks
180 if hex(marks.get(key, '')) != old:
187 if hex(marks.get(key, '')) != old:
181 return False
188 return False
182 if new == '':
189 if new == '':
183 del marks[key]
190 del marks[key]
184 else:
191 else:
185 if new not in repo:
192 if new not in repo:
186 return False
193 return False
187 marks[key] = repo[new].node()
194 marks[key] = repo[new].node()
188 write(repo)
195 write(repo)
189 return True
196 return True
190 finally:
197 finally:
191 w.release()
198 w.release()
192
199
193 def updatefromremote(ui, repo, remote, path):
200 def updatefromremote(ui, repo, remote, path):
194 ui.debug("checking for updated bookmarks\n")
201 ui.debug("checking for updated bookmarks\n")
195 rb = remote.listkeys('bookmarks')
202 rb = remote.listkeys('bookmarks')
196 changed = False
203 changed = False
197 for k in rb.keys():
204 for k in rb.keys():
198 if k in repo._bookmarks:
205 if k in repo._bookmarks:
199 nr, nl = rb[k], repo._bookmarks[k]
206 nr, nl = rb[k], repo._bookmarks[k]
200 if nr in repo:
207 if nr in repo:
201 cr = repo[nr]
208 cr = repo[nr]
202 cl = repo[nl]
209 cl = repo[nl]
203 if cl.rev() >= cr.rev():
210 if cl.rev() >= cr.rev():
204 continue
211 continue
205 if cr in cl.descendants():
212 if cr in cl.descendants():
206 repo._bookmarks[k] = cr.node()
213 repo._bookmarks[k] = cr.node()
207 changed = True
214 changed = True
208 ui.status(_("updating bookmark %s\n") % k)
215 ui.status(_("updating bookmark %s\n") % k)
209 else:
216 else:
210 # find a unique @ suffix
217 # find a unique @ suffix
211 for x in range(1, 100):
218 for x in range(1, 100):
212 n = '%s@%d' % (k, x)
219 n = '%s@%d' % (k, x)
213 if n not in repo._bookmarks:
220 if n not in repo._bookmarks:
214 break
221 break
215 # try to use an @pathalias suffix
222 # try to use an @pathalias suffix
216 # if an @pathalias already exists, we overwrite (update) it
223 # if an @pathalias already exists, we overwrite (update) it
217 for p, u in ui.configitems("paths"):
224 for p, u in ui.configitems("paths"):
218 if path == u:
225 if path == u:
219 n = '%s@%s' % (k, p)
226 n = '%s@%s' % (k, p)
220
227
221 repo._bookmarks[n] = cr.node()
228 repo._bookmarks[n] = cr.node()
222 changed = True
229 changed = True
223 ui.warn(_("divergent bookmark %s stored as %s\n") % (k, n))
230 ui.warn(_("divergent bookmark %s stored as %s\n") % (k, n))
224 elif rb[k] in repo:
231 elif rb[k] in repo:
225 # add remote bookmarks for changes we already have
232 # add remote bookmarks for changes we already have
226 repo._bookmarks[k] = repo[rb[k]].node()
233 repo._bookmarks[k] = repo[rb[k]].node()
227 changed = True
234 changed = True
228 ui.status(_("adding remote bookmark %s\n") % k)
235 ui.status(_("adding remote bookmark %s\n") % k)
229
236
230 if changed:
237 if changed:
231 write(repo)
238 write(repo)
232
239
233 def diff(ui, repo, remote):
240 def diff(ui, repo, remote):
234 ui.status(_("searching for changed bookmarks\n"))
241 ui.status(_("searching for changed bookmarks\n"))
235
242
236 lmarks = repo.listkeys('bookmarks')
243 lmarks = repo.listkeys('bookmarks')
237 rmarks = remote.listkeys('bookmarks')
244 rmarks = remote.listkeys('bookmarks')
238
245
239 diff = sorted(set(rmarks) - set(lmarks))
246 diff = sorted(set(rmarks) - set(lmarks))
240 for k in diff:
247 for k in diff:
241 mark = ui.debugflag and rmarks[k] or rmarks[k][:12]
248 mark = ui.debugflag and rmarks[k] or rmarks[k][:12]
242 ui.write(" %-25s %s\n" % (k, mark))
249 ui.write(" %-25s %s\n" % (k, mark))
243
250
244 if len(diff) <= 0:
251 if len(diff) <= 0:
245 ui.status(_("no changed bookmarks found\n"))
252 ui.status(_("no changed bookmarks found\n"))
246 return 1
253 return 1
247 return 0
254 return 0
@@ -1,2347 +1,2347 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import scmutil, util, extensions, hook, error, revset
13 import scmutil, util, extensions, hook, error, revset
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20 filecache = scmutil.filecache
20 filecache = scmutil.filecache
21
21
22 class storecache(filecache):
22 class storecache(filecache):
23 """filecache for files in the store"""
23 """filecache for files in the store"""
24 def join(self, obj, fname):
24 def join(self, obj, fname):
25 return obj.sjoin(fname)
25 return obj.sjoin(fname)
26
26
27 class localrepository(repo.repository):
27 class localrepository(repo.repository):
28 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
28 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
29 'known', 'getbundle'))
29 'known', 'getbundle'))
30 supportedformats = set(('revlogv1', 'generaldelta'))
30 supportedformats = set(('revlogv1', 'generaldelta'))
31 supported = supportedformats | set(('store', 'fncache', 'shared',
31 supported = supportedformats | set(('store', 'fncache', 'shared',
32 'dotencode'))
32 'dotencode'))
33
33
34 def __init__(self, baseui, path=None, create=False):
34 def __init__(self, baseui, path=None, create=False):
35 repo.repository.__init__(self)
35 repo.repository.__init__(self)
36 self.root = os.path.realpath(util.expandpath(path))
36 self.root = os.path.realpath(util.expandpath(path))
37 self.path = os.path.join(self.root, ".hg")
37 self.path = os.path.join(self.root, ".hg")
38 self.origroot = path
38 self.origroot = path
39 self.auditor = scmutil.pathauditor(self.root, self._checknested)
39 self.auditor = scmutil.pathauditor(self.root, self._checknested)
40 self.opener = scmutil.opener(self.path)
40 self.opener = scmutil.opener(self.path)
41 self.wopener = scmutil.opener(self.root)
41 self.wopener = scmutil.opener(self.root)
42 self.baseui = baseui
42 self.baseui = baseui
43 self.ui = baseui.copy()
43 self.ui = baseui.copy()
44 # A list of callback to shape the phase if no data were found.
44 # A list of callback to shape the phase if no data were found.
45 # Callback are in the form: func(repo, roots) --> processed root.
45 # Callback are in the form: func(repo, roots) --> processed root.
46 # This list it to be filled by extension during repo setup
46 # This list it to be filled by extension during repo setup
47 self._phasedefaults = []
47 self._phasedefaults = []
48
48
49 try:
49 try:
50 self.ui.readconfig(self.join("hgrc"), self.root)
50 self.ui.readconfig(self.join("hgrc"), self.root)
51 extensions.loadall(self.ui)
51 extensions.loadall(self.ui)
52 except IOError:
52 except IOError:
53 pass
53 pass
54
54
55 if not os.path.isdir(self.path):
55 if not os.path.isdir(self.path):
56 if create:
56 if create:
57 if not os.path.exists(path):
57 if not os.path.exists(path):
58 util.makedirs(path)
58 util.makedirs(path)
59 util.makedir(self.path, notindexed=True)
59 util.makedir(self.path, notindexed=True)
60 requirements = ["revlogv1"]
60 requirements = ["revlogv1"]
61 if self.ui.configbool('format', 'usestore', True):
61 if self.ui.configbool('format', 'usestore', True):
62 os.mkdir(os.path.join(self.path, "store"))
62 os.mkdir(os.path.join(self.path, "store"))
63 requirements.append("store")
63 requirements.append("store")
64 if self.ui.configbool('format', 'usefncache', True):
64 if self.ui.configbool('format', 'usefncache', True):
65 requirements.append("fncache")
65 requirements.append("fncache")
66 if self.ui.configbool('format', 'dotencode', True):
66 if self.ui.configbool('format', 'dotencode', True):
67 requirements.append('dotencode')
67 requirements.append('dotencode')
68 # create an invalid changelog
68 # create an invalid changelog
69 self.opener.append(
69 self.opener.append(
70 "00changelog.i",
70 "00changelog.i",
71 '\0\0\0\2' # represents revlogv2
71 '\0\0\0\2' # represents revlogv2
72 ' dummy changelog to prevent using the old repo layout'
72 ' dummy changelog to prevent using the old repo layout'
73 )
73 )
74 if self.ui.configbool('format', 'generaldelta', False):
74 if self.ui.configbool('format', 'generaldelta', False):
75 requirements.append("generaldelta")
75 requirements.append("generaldelta")
76 requirements = set(requirements)
76 requirements = set(requirements)
77 else:
77 else:
78 raise error.RepoError(_("repository %s not found") % path)
78 raise error.RepoError(_("repository %s not found") % path)
79 elif create:
79 elif create:
80 raise error.RepoError(_("repository %s already exists") % path)
80 raise error.RepoError(_("repository %s already exists") % path)
81 else:
81 else:
82 try:
82 try:
83 requirements = scmutil.readrequires(self.opener, self.supported)
83 requirements = scmutil.readrequires(self.opener, self.supported)
84 except IOError, inst:
84 except IOError, inst:
85 if inst.errno != errno.ENOENT:
85 if inst.errno != errno.ENOENT:
86 raise
86 raise
87 requirements = set()
87 requirements = set()
88
88
89 self.sharedpath = self.path
89 self.sharedpath = self.path
90 try:
90 try:
91 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
91 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
92 if not os.path.exists(s):
92 if not os.path.exists(s):
93 raise error.RepoError(
93 raise error.RepoError(
94 _('.hg/sharedpath points to nonexistent directory %s') % s)
94 _('.hg/sharedpath points to nonexistent directory %s') % s)
95 self.sharedpath = s
95 self.sharedpath = s
96 except IOError, inst:
96 except IOError, inst:
97 if inst.errno != errno.ENOENT:
97 if inst.errno != errno.ENOENT:
98 raise
98 raise
99
99
100 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
100 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
101 self.spath = self.store.path
101 self.spath = self.store.path
102 self.sopener = self.store.opener
102 self.sopener = self.store.opener
103 self.sjoin = self.store.join
103 self.sjoin = self.store.join
104 self.opener.createmode = self.store.createmode
104 self.opener.createmode = self.store.createmode
105 self._applyrequirements(requirements)
105 self._applyrequirements(requirements)
106 if create:
106 if create:
107 self._writerequirements()
107 self._writerequirements()
108
108
109
109
110 self._branchcache = None
110 self._branchcache = None
111 self._branchcachetip = None
111 self._branchcachetip = None
112 self.filterpats = {}
112 self.filterpats = {}
113 self._datafilters = {}
113 self._datafilters = {}
114 self._transref = self._lockref = self._wlockref = None
114 self._transref = self._lockref = self._wlockref = None
115
115
116 # A cache for various files under .hg/ that tracks file changes,
116 # A cache for various files under .hg/ that tracks file changes,
117 # (used by the filecache decorator)
117 # (used by the filecache decorator)
118 #
118 #
119 # Maps a property name to its util.filecacheentry
119 # Maps a property name to its util.filecacheentry
120 self._filecache = {}
120 self._filecache = {}
121
121
122 def _applyrequirements(self, requirements):
122 def _applyrequirements(self, requirements):
123 self.requirements = requirements
123 self.requirements = requirements
124 openerreqs = set(('revlogv1', 'generaldelta'))
124 openerreqs = set(('revlogv1', 'generaldelta'))
125 self.sopener.options = dict((r, 1) for r in requirements
125 self.sopener.options = dict((r, 1) for r in requirements
126 if r in openerreqs)
126 if r in openerreqs)
127
127
128 def _writerequirements(self):
128 def _writerequirements(self):
129 reqfile = self.opener("requires", "w")
129 reqfile = self.opener("requires", "w")
130 for r in self.requirements:
130 for r in self.requirements:
131 reqfile.write("%s\n" % r)
131 reqfile.write("%s\n" % r)
132 reqfile.close()
132 reqfile.close()
133
133
134 def _checknested(self, path):
134 def _checknested(self, path):
135 """Determine if path is a legal nested repository."""
135 """Determine if path is a legal nested repository."""
136 if not path.startswith(self.root):
136 if not path.startswith(self.root):
137 return False
137 return False
138 subpath = path[len(self.root) + 1:]
138 subpath = path[len(self.root) + 1:]
139 normsubpath = util.pconvert(subpath)
139 normsubpath = util.pconvert(subpath)
140
140
141 # XXX: Checking against the current working copy is wrong in
141 # XXX: Checking against the current working copy is wrong in
142 # the sense that it can reject things like
142 # the sense that it can reject things like
143 #
143 #
144 # $ hg cat -r 10 sub/x.txt
144 # $ hg cat -r 10 sub/x.txt
145 #
145 #
146 # if sub/ is no longer a subrepository in the working copy
146 # if sub/ is no longer a subrepository in the working copy
147 # parent revision.
147 # parent revision.
148 #
148 #
149 # However, it can of course also allow things that would have
149 # However, it can of course also allow things that would have
150 # been rejected before, such as the above cat command if sub/
150 # been rejected before, such as the above cat command if sub/
151 # is a subrepository now, but was a normal directory before.
151 # is a subrepository now, but was a normal directory before.
152 # The old path auditor would have rejected by mistake since it
152 # The old path auditor would have rejected by mistake since it
153 # panics when it sees sub/.hg/.
153 # panics when it sees sub/.hg/.
154 #
154 #
155 # All in all, checking against the working copy seems sensible
155 # All in all, checking against the working copy seems sensible
156 # since we want to prevent access to nested repositories on
156 # since we want to prevent access to nested repositories on
157 # the filesystem *now*.
157 # the filesystem *now*.
158 ctx = self[None]
158 ctx = self[None]
159 parts = util.splitpath(subpath)
159 parts = util.splitpath(subpath)
160 while parts:
160 while parts:
161 prefix = '/'.join(parts)
161 prefix = '/'.join(parts)
162 if prefix in ctx.substate:
162 if prefix in ctx.substate:
163 if prefix == normsubpath:
163 if prefix == normsubpath:
164 return True
164 return True
165 else:
165 else:
166 sub = ctx.sub(prefix)
166 sub = ctx.sub(prefix)
167 return sub.checknested(subpath[len(prefix) + 1:])
167 return sub.checknested(subpath[len(prefix) + 1:])
168 else:
168 else:
169 parts.pop()
169 parts.pop()
170 return False
170 return False
171
171
172 @filecache('bookmarks')
172 @filecache('bookmarks')
173 def _bookmarks(self):
173 def _bookmarks(self):
174 return bookmarks.read(self)
174 return bookmarks.read(self)
175
175
176 @filecache('bookmarks.current')
176 @filecache('bookmarks.current')
177 def _bookmarkcurrent(self):
177 def _bookmarkcurrent(self):
178 return bookmarks.readcurrent(self)
178 return bookmarks.readcurrent(self)
179
179
180 def _writebookmarks(self, marks):
180 def _writebookmarks(self, marks):
181 bookmarks.write(self)
181 bookmarks.write(self)
182
182
183 @storecache('phaseroots')
183 @storecache('phaseroots')
184 def _phasecache(self):
184 def _phasecache(self):
185 return phases.phasecache(self, self._phasedefaults)
185 return phases.phasecache(self, self._phasedefaults)
186
186
187 @storecache('00changelog.i')
187 @storecache('00changelog.i')
188 def changelog(self):
188 def changelog(self):
189 c = changelog.changelog(self.sopener)
189 c = changelog.changelog(self.sopener)
190 if 'HG_PENDING' in os.environ:
190 if 'HG_PENDING' in os.environ:
191 p = os.environ['HG_PENDING']
191 p = os.environ['HG_PENDING']
192 if p.startswith(self.root):
192 if p.startswith(self.root):
193 c.readpending('00changelog.i.a')
193 c.readpending('00changelog.i.a')
194 return c
194 return c
195
195
196 @storecache('00manifest.i')
196 @storecache('00manifest.i')
197 def manifest(self):
197 def manifest(self):
198 return manifest.manifest(self.sopener)
198 return manifest.manifest(self.sopener)
199
199
200 @filecache('dirstate')
200 @filecache('dirstate')
201 def dirstate(self):
201 def dirstate(self):
202 warned = [0]
202 warned = [0]
203 def validate(node):
203 def validate(node):
204 try:
204 try:
205 self.changelog.rev(node)
205 self.changelog.rev(node)
206 return node
206 return node
207 except error.LookupError:
207 except error.LookupError:
208 if not warned[0]:
208 if not warned[0]:
209 warned[0] = True
209 warned[0] = True
210 self.ui.warn(_("warning: ignoring unknown"
210 self.ui.warn(_("warning: ignoring unknown"
211 " working parent %s!\n") % short(node))
211 " working parent %s!\n") % short(node))
212 return nullid
212 return nullid
213
213
214 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
214 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
215
215
216 def __getitem__(self, changeid):
216 def __getitem__(self, changeid):
217 if changeid is None:
217 if changeid is None:
218 return context.workingctx(self)
218 return context.workingctx(self)
219 return context.changectx(self, changeid)
219 return context.changectx(self, changeid)
220
220
221 def __contains__(self, changeid):
221 def __contains__(self, changeid):
222 try:
222 try:
223 return bool(self.lookup(changeid))
223 return bool(self.lookup(changeid))
224 except error.RepoLookupError:
224 except error.RepoLookupError:
225 return False
225 return False
226
226
227 def __nonzero__(self):
227 def __nonzero__(self):
228 return True
228 return True
229
229
230 def __len__(self):
230 def __len__(self):
231 return len(self.changelog)
231 return len(self.changelog)
232
232
233 def __iter__(self):
233 def __iter__(self):
234 for i in xrange(len(self)):
234 for i in xrange(len(self)):
235 yield i
235 yield i
236
236
237 def revs(self, expr, *args):
237 def revs(self, expr, *args):
238 '''Return a list of revisions matching the given revset'''
238 '''Return a list of revisions matching the given revset'''
239 expr = revset.formatspec(expr, *args)
239 expr = revset.formatspec(expr, *args)
240 m = revset.match(None, expr)
240 m = revset.match(None, expr)
241 return [r for r in m(self, range(len(self)))]
241 return [r for r in m(self, range(len(self)))]
242
242
243 def set(self, expr, *args):
243 def set(self, expr, *args):
244 '''
244 '''
245 Yield a context for each matching revision, after doing arg
245 Yield a context for each matching revision, after doing arg
246 replacement via revset.formatspec
246 replacement via revset.formatspec
247 '''
247 '''
248 for r in self.revs(expr, *args):
248 for r in self.revs(expr, *args):
249 yield self[r]
249 yield self[r]
250
250
251 def url(self):
251 def url(self):
252 return 'file:' + self.root
252 return 'file:' + self.root
253
253
254 def hook(self, name, throw=False, **args):
254 def hook(self, name, throw=False, **args):
255 return hook.hook(self.ui, self, name, throw, **args)
255 return hook.hook(self.ui, self, name, throw, **args)
256
256
257 tag_disallowed = ':\r\n'
257 tag_disallowed = ':\r\n'
258
258
259 def _tag(self, names, node, message, local, user, date, extra={}):
259 def _tag(self, names, node, message, local, user, date, extra={}):
260 if isinstance(names, str):
260 if isinstance(names, str):
261 allchars = names
261 allchars = names
262 names = (names,)
262 names = (names,)
263 else:
263 else:
264 allchars = ''.join(names)
264 allchars = ''.join(names)
265 for c in self.tag_disallowed:
265 for c in self.tag_disallowed:
266 if c in allchars:
266 if c in allchars:
267 raise util.Abort(_('%r cannot be used in a tag name') % c)
267 raise util.Abort(_('%r cannot be used in a tag name') % c)
268
268
269 branches = self.branchmap()
269 branches = self.branchmap()
270 for name in names:
270 for name in names:
271 self.hook('pretag', throw=True, node=hex(node), tag=name,
271 self.hook('pretag', throw=True, node=hex(node), tag=name,
272 local=local)
272 local=local)
273 if name in branches:
273 if name in branches:
274 self.ui.warn(_("warning: tag %s conflicts with existing"
274 self.ui.warn(_("warning: tag %s conflicts with existing"
275 " branch name\n") % name)
275 " branch name\n") % name)
276
276
277 def writetags(fp, names, munge, prevtags):
277 def writetags(fp, names, munge, prevtags):
278 fp.seek(0, 2)
278 fp.seek(0, 2)
279 if prevtags and prevtags[-1] != '\n':
279 if prevtags and prevtags[-1] != '\n':
280 fp.write('\n')
280 fp.write('\n')
281 for name in names:
281 for name in names:
282 m = munge and munge(name) or name
282 m = munge and munge(name) or name
283 if (self._tagscache.tagtypes and
283 if (self._tagscache.tagtypes and
284 name in self._tagscache.tagtypes):
284 name in self._tagscache.tagtypes):
285 old = self.tags().get(name, nullid)
285 old = self.tags().get(name, nullid)
286 fp.write('%s %s\n' % (hex(old), m))
286 fp.write('%s %s\n' % (hex(old), m))
287 fp.write('%s %s\n' % (hex(node), m))
287 fp.write('%s %s\n' % (hex(node), m))
288 fp.close()
288 fp.close()
289
289
290 prevtags = ''
290 prevtags = ''
291 if local:
291 if local:
292 try:
292 try:
293 fp = self.opener('localtags', 'r+')
293 fp = self.opener('localtags', 'r+')
294 except IOError:
294 except IOError:
295 fp = self.opener('localtags', 'a')
295 fp = self.opener('localtags', 'a')
296 else:
296 else:
297 prevtags = fp.read()
297 prevtags = fp.read()
298
298
299 # local tags are stored in the current charset
299 # local tags are stored in the current charset
300 writetags(fp, names, None, prevtags)
300 writetags(fp, names, None, prevtags)
301 for name in names:
301 for name in names:
302 self.hook('tag', node=hex(node), tag=name, local=local)
302 self.hook('tag', node=hex(node), tag=name, local=local)
303 return
303 return
304
304
305 try:
305 try:
306 fp = self.wfile('.hgtags', 'rb+')
306 fp = self.wfile('.hgtags', 'rb+')
307 except IOError, e:
307 except IOError, e:
308 if e.errno != errno.ENOENT:
308 if e.errno != errno.ENOENT:
309 raise
309 raise
310 fp = self.wfile('.hgtags', 'ab')
310 fp = self.wfile('.hgtags', 'ab')
311 else:
311 else:
312 prevtags = fp.read()
312 prevtags = fp.read()
313
313
314 # committed tags are stored in UTF-8
314 # committed tags are stored in UTF-8
315 writetags(fp, names, encoding.fromlocal, prevtags)
315 writetags(fp, names, encoding.fromlocal, prevtags)
316
316
317 fp.close()
317 fp.close()
318
318
319 self.invalidatecaches()
319 self.invalidatecaches()
320
320
321 if '.hgtags' not in self.dirstate:
321 if '.hgtags' not in self.dirstate:
322 self[None].add(['.hgtags'])
322 self[None].add(['.hgtags'])
323
323
324 m = matchmod.exact(self.root, '', ['.hgtags'])
324 m = matchmod.exact(self.root, '', ['.hgtags'])
325 tagnode = self.commit(message, user, date, extra=extra, match=m)
325 tagnode = self.commit(message, user, date, extra=extra, match=m)
326
326
327 for name in names:
327 for name in names:
328 self.hook('tag', node=hex(node), tag=name, local=local)
328 self.hook('tag', node=hex(node), tag=name, local=local)
329
329
330 return tagnode
330 return tagnode
331
331
332 def tag(self, names, node, message, local, user, date):
332 def tag(self, names, node, message, local, user, date):
333 '''tag a revision with one or more symbolic names.
333 '''tag a revision with one or more symbolic names.
334
334
335 names is a list of strings or, when adding a single tag, names may be a
335 names is a list of strings or, when adding a single tag, names may be a
336 string.
336 string.
337
337
338 if local is True, the tags are stored in a per-repository file.
338 if local is True, the tags are stored in a per-repository file.
339 otherwise, they are stored in the .hgtags file, and a new
339 otherwise, they are stored in the .hgtags file, and a new
340 changeset is committed with the change.
340 changeset is committed with the change.
341
341
342 keyword arguments:
342 keyword arguments:
343
343
344 local: whether to store tags in non-version-controlled file
344 local: whether to store tags in non-version-controlled file
345 (default False)
345 (default False)
346
346
347 message: commit message to use if committing
347 message: commit message to use if committing
348
348
349 user: name of user to use if committing
349 user: name of user to use if committing
350
350
351 date: date tuple to use if committing'''
351 date: date tuple to use if committing'''
352
352
353 if not local:
353 if not local:
354 for x in self.status()[:5]:
354 for x in self.status()[:5]:
355 if '.hgtags' in x:
355 if '.hgtags' in x:
356 raise util.Abort(_('working copy of .hgtags is changed '
356 raise util.Abort(_('working copy of .hgtags is changed '
357 '(please commit .hgtags manually)'))
357 '(please commit .hgtags manually)'))
358
358
359 self.tags() # instantiate the cache
359 self.tags() # instantiate the cache
360 self._tag(names, node, message, local, user, date)
360 self._tag(names, node, message, local, user, date)
361
361
362 @propertycache
362 @propertycache
363 def _tagscache(self):
363 def _tagscache(self):
364 '''Returns a tagscache object that contains various tags related
364 '''Returns a tagscache object that contains various tags related
365 caches.'''
365 caches.'''
366
366
367 # This simplifies its cache management by having one decorated
367 # This simplifies its cache management by having one decorated
368 # function (this one) and the rest simply fetch things from it.
368 # function (this one) and the rest simply fetch things from it.
369 class tagscache(object):
369 class tagscache(object):
370 def __init__(self):
370 def __init__(self):
371 # These two define the set of tags for this repository. tags
371 # These two define the set of tags for this repository. tags
372 # maps tag name to node; tagtypes maps tag name to 'global' or
372 # maps tag name to node; tagtypes maps tag name to 'global' or
373 # 'local'. (Global tags are defined by .hgtags across all
373 # 'local'. (Global tags are defined by .hgtags across all
374 # heads, and local tags are defined in .hg/localtags.)
374 # heads, and local tags are defined in .hg/localtags.)
375 # They constitute the in-memory cache of tags.
375 # They constitute the in-memory cache of tags.
376 self.tags = self.tagtypes = None
376 self.tags = self.tagtypes = None
377
377
378 self.nodetagscache = self.tagslist = None
378 self.nodetagscache = self.tagslist = None
379
379
380 cache = tagscache()
380 cache = tagscache()
381 cache.tags, cache.tagtypes = self._findtags()
381 cache.tags, cache.tagtypes = self._findtags()
382
382
383 return cache
383 return cache
384
384
385 def tags(self):
385 def tags(self):
386 '''return a mapping of tag to node'''
386 '''return a mapping of tag to node'''
387 t = {}
387 t = {}
388 for k, v in self._tagscache.tags.iteritems():
388 for k, v in self._tagscache.tags.iteritems():
389 try:
389 try:
390 # ignore tags to unknown nodes
390 # ignore tags to unknown nodes
391 self.changelog.rev(v)
391 self.changelog.rev(v)
392 t[k] = v
392 t[k] = v
393 except (error.LookupError, ValueError):
393 except (error.LookupError, ValueError):
394 pass
394 pass
395 return t
395 return t
396
396
397 def _findtags(self):
397 def _findtags(self):
398 '''Do the hard work of finding tags. Return a pair of dicts
398 '''Do the hard work of finding tags. Return a pair of dicts
399 (tags, tagtypes) where tags maps tag name to node, and tagtypes
399 (tags, tagtypes) where tags maps tag name to node, and tagtypes
400 maps tag name to a string like \'global\' or \'local\'.
400 maps tag name to a string like \'global\' or \'local\'.
401 Subclasses or extensions are free to add their own tags, but
401 Subclasses or extensions are free to add their own tags, but
402 should be aware that the returned dicts will be retained for the
402 should be aware that the returned dicts will be retained for the
403 duration of the localrepo object.'''
403 duration of the localrepo object.'''
404
404
405 # XXX what tagtype should subclasses/extensions use? Currently
405 # XXX what tagtype should subclasses/extensions use? Currently
406 # mq and bookmarks add tags, but do not set the tagtype at all.
406 # mq and bookmarks add tags, but do not set the tagtype at all.
407 # Should each extension invent its own tag type? Should there
407 # Should each extension invent its own tag type? Should there
408 # be one tagtype for all such "virtual" tags? Or is the status
408 # be one tagtype for all such "virtual" tags? Or is the status
409 # quo fine?
409 # quo fine?
410
410
411 alltags = {} # map tag name to (node, hist)
411 alltags = {} # map tag name to (node, hist)
412 tagtypes = {}
412 tagtypes = {}
413
413
414 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
414 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
415 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
415 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
416
416
417 # Build the return dicts. Have to re-encode tag names because
417 # Build the return dicts. Have to re-encode tag names because
418 # the tags module always uses UTF-8 (in order not to lose info
418 # the tags module always uses UTF-8 (in order not to lose info
419 # writing to the cache), but the rest of Mercurial wants them in
419 # writing to the cache), but the rest of Mercurial wants them in
420 # local encoding.
420 # local encoding.
421 tags = {}
421 tags = {}
422 for (name, (node, hist)) in alltags.iteritems():
422 for (name, (node, hist)) in alltags.iteritems():
423 if node != nullid:
423 if node != nullid:
424 tags[encoding.tolocal(name)] = node
424 tags[encoding.tolocal(name)] = node
425 tags['tip'] = self.changelog.tip()
425 tags['tip'] = self.changelog.tip()
426 tagtypes = dict([(encoding.tolocal(name), value)
426 tagtypes = dict([(encoding.tolocal(name), value)
427 for (name, value) in tagtypes.iteritems()])
427 for (name, value) in tagtypes.iteritems()])
428 return (tags, tagtypes)
428 return (tags, tagtypes)
429
429
430 def tagtype(self, tagname):
430 def tagtype(self, tagname):
431 '''
431 '''
432 return the type of the given tag. result can be:
432 return the type of the given tag. result can be:
433
433
434 'local' : a local tag
434 'local' : a local tag
435 'global' : a global tag
435 'global' : a global tag
436 None : tag does not exist
436 None : tag does not exist
437 '''
437 '''
438
438
439 return self._tagscache.tagtypes.get(tagname)
439 return self._tagscache.tagtypes.get(tagname)
440
440
441 def tagslist(self):
441 def tagslist(self):
442 '''return a list of tags ordered by revision'''
442 '''return a list of tags ordered by revision'''
443 if not self._tagscache.tagslist:
443 if not self._tagscache.tagslist:
444 l = []
444 l = []
445 for t, n in self.tags().iteritems():
445 for t, n in self.tags().iteritems():
446 r = self.changelog.rev(n)
446 r = self.changelog.rev(n)
447 l.append((r, t, n))
447 l.append((r, t, n))
448 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
448 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
449
449
450 return self._tagscache.tagslist
450 return self._tagscache.tagslist
451
451
452 def nodetags(self, node):
452 def nodetags(self, node):
453 '''return the tags associated with a node'''
453 '''return the tags associated with a node'''
454 if not self._tagscache.nodetagscache:
454 if not self._tagscache.nodetagscache:
455 nodetagscache = {}
455 nodetagscache = {}
456 for t, n in self._tagscache.tags.iteritems():
456 for t, n in self._tagscache.tags.iteritems():
457 nodetagscache.setdefault(n, []).append(t)
457 nodetagscache.setdefault(n, []).append(t)
458 for tags in nodetagscache.itervalues():
458 for tags in nodetagscache.itervalues():
459 tags.sort()
459 tags.sort()
460 self._tagscache.nodetagscache = nodetagscache
460 self._tagscache.nodetagscache = nodetagscache
461 return self._tagscache.nodetagscache.get(node, [])
461 return self._tagscache.nodetagscache.get(node, [])
462
462
463 def nodebookmarks(self, node):
463 def nodebookmarks(self, node):
464 marks = []
464 marks = []
465 for bookmark, n in self._bookmarks.iteritems():
465 for bookmark, n in self._bookmarks.iteritems():
466 if n == node:
466 if n == node:
467 marks.append(bookmark)
467 marks.append(bookmark)
468 return sorted(marks)
468 return sorted(marks)
469
469
470 def _branchtags(self, partial, lrev):
470 def _branchtags(self, partial, lrev):
471 # TODO: rename this function?
471 # TODO: rename this function?
472 tiprev = len(self) - 1
472 tiprev = len(self) - 1
473 if lrev != tiprev:
473 if lrev != tiprev:
474 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
474 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
475 self._updatebranchcache(partial, ctxgen)
475 self._updatebranchcache(partial, ctxgen)
476 self._writebranchcache(partial, self.changelog.tip(), tiprev)
476 self._writebranchcache(partial, self.changelog.tip(), tiprev)
477
477
478 return partial
478 return partial
479
479
480 def updatebranchcache(self):
480 def updatebranchcache(self):
481 tip = self.changelog.tip()
481 tip = self.changelog.tip()
482 if self._branchcache is not None and self._branchcachetip == tip:
482 if self._branchcache is not None and self._branchcachetip == tip:
483 return
483 return
484
484
485 oldtip = self._branchcachetip
485 oldtip = self._branchcachetip
486 self._branchcachetip = tip
486 self._branchcachetip = tip
487 if oldtip is None or oldtip not in self.changelog.nodemap:
487 if oldtip is None or oldtip not in self.changelog.nodemap:
488 partial, last, lrev = self._readbranchcache()
488 partial, last, lrev = self._readbranchcache()
489 else:
489 else:
490 lrev = self.changelog.rev(oldtip)
490 lrev = self.changelog.rev(oldtip)
491 partial = self._branchcache
491 partial = self._branchcache
492
492
493 self._branchtags(partial, lrev)
493 self._branchtags(partial, lrev)
494 # this private cache holds all heads (not just the branch tips)
494 # this private cache holds all heads (not just the branch tips)
495 self._branchcache = partial
495 self._branchcache = partial
496
496
497 def branchmap(self):
497 def branchmap(self):
498 '''returns a dictionary {branch: [branchheads]}'''
498 '''returns a dictionary {branch: [branchheads]}'''
499 self.updatebranchcache()
499 self.updatebranchcache()
500 return self._branchcache
500 return self._branchcache
501
501
502 def branchtags(self):
502 def branchtags(self):
503 '''return a dict where branch names map to the tipmost head of
503 '''return a dict where branch names map to the tipmost head of
504 the branch, open heads come before closed'''
504 the branch, open heads come before closed'''
505 bt = {}
505 bt = {}
506 for bn, heads in self.branchmap().iteritems():
506 for bn, heads in self.branchmap().iteritems():
507 tip = heads[-1]
507 tip = heads[-1]
508 for h in reversed(heads):
508 for h in reversed(heads):
509 if 'close' not in self.changelog.read(h)[5]:
509 if 'close' not in self.changelog.read(h)[5]:
510 tip = h
510 tip = h
511 break
511 break
512 bt[bn] = tip
512 bt[bn] = tip
513 return bt
513 return bt
514
514
515 def _readbranchcache(self):
515 def _readbranchcache(self):
516 partial = {}
516 partial = {}
517 try:
517 try:
518 f = self.opener("cache/branchheads")
518 f = self.opener("cache/branchheads")
519 lines = f.read().split('\n')
519 lines = f.read().split('\n')
520 f.close()
520 f.close()
521 except (IOError, OSError):
521 except (IOError, OSError):
522 return {}, nullid, nullrev
522 return {}, nullid, nullrev
523
523
524 try:
524 try:
525 last, lrev = lines.pop(0).split(" ", 1)
525 last, lrev = lines.pop(0).split(" ", 1)
526 last, lrev = bin(last), int(lrev)
526 last, lrev = bin(last), int(lrev)
527 if lrev >= len(self) or self[lrev].node() != last:
527 if lrev >= len(self) or self[lrev].node() != last:
528 # invalidate the cache
528 # invalidate the cache
529 raise ValueError('invalidating branch cache (tip differs)')
529 raise ValueError('invalidating branch cache (tip differs)')
530 for l in lines:
530 for l in lines:
531 if not l:
531 if not l:
532 continue
532 continue
533 node, label = l.split(" ", 1)
533 node, label = l.split(" ", 1)
534 label = encoding.tolocal(label.strip())
534 label = encoding.tolocal(label.strip())
535 partial.setdefault(label, []).append(bin(node))
535 partial.setdefault(label, []).append(bin(node))
536 except KeyboardInterrupt:
536 except KeyboardInterrupt:
537 raise
537 raise
538 except Exception, inst:
538 except Exception, inst:
539 if self.ui.debugflag:
539 if self.ui.debugflag:
540 self.ui.warn(str(inst), '\n')
540 self.ui.warn(str(inst), '\n')
541 partial, last, lrev = {}, nullid, nullrev
541 partial, last, lrev = {}, nullid, nullrev
542 return partial, last, lrev
542 return partial, last, lrev
543
543
544 def _writebranchcache(self, branches, tip, tiprev):
544 def _writebranchcache(self, branches, tip, tiprev):
545 try:
545 try:
546 f = self.opener("cache/branchheads", "w", atomictemp=True)
546 f = self.opener("cache/branchheads", "w", atomictemp=True)
547 f.write("%s %s\n" % (hex(tip), tiprev))
547 f.write("%s %s\n" % (hex(tip), tiprev))
548 for label, nodes in branches.iteritems():
548 for label, nodes in branches.iteritems():
549 for node in nodes:
549 for node in nodes:
550 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
550 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
551 f.close()
551 f.close()
552 except (IOError, OSError):
552 except (IOError, OSError):
553 pass
553 pass
554
554
555 def _updatebranchcache(self, partial, ctxgen):
555 def _updatebranchcache(self, partial, ctxgen):
556 # collect new branch entries
556 # collect new branch entries
557 newbranches = {}
557 newbranches = {}
558 for c in ctxgen:
558 for c in ctxgen:
559 newbranches.setdefault(c.branch(), []).append(c.node())
559 newbranches.setdefault(c.branch(), []).append(c.node())
560 # if older branchheads are reachable from new ones, they aren't
560 # if older branchheads are reachable from new ones, they aren't
561 # really branchheads. Note checking parents is insufficient:
561 # really branchheads. Note checking parents is insufficient:
562 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
562 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
563 for branch, newnodes in newbranches.iteritems():
563 for branch, newnodes in newbranches.iteritems():
564 bheads = partial.setdefault(branch, [])
564 bheads = partial.setdefault(branch, [])
565 bheads.extend(newnodes)
565 bheads.extend(newnodes)
566 if len(bheads) <= 1:
566 if len(bheads) <= 1:
567 continue
567 continue
568 bheads = sorted(bheads, key=lambda x: self[x].rev())
568 bheads = sorted(bheads, key=lambda x: self[x].rev())
569 # starting from tip means fewer passes over reachable
569 # starting from tip means fewer passes over reachable
570 while newnodes:
570 while newnodes:
571 latest = newnodes.pop()
571 latest = newnodes.pop()
572 if latest not in bheads:
572 if latest not in bheads:
573 continue
573 continue
574 minbhnode = self[bheads[0]].node()
574 minbhnode = self[bheads[0]].node()
575 reachable = self.changelog.reachable(latest, minbhnode)
575 reachable = self.changelog.reachable(latest, minbhnode)
576 reachable.remove(latest)
576 reachable.remove(latest)
577 if reachable:
577 if reachable:
578 bheads = [b for b in bheads if b not in reachable]
578 bheads = [b for b in bheads if b not in reachable]
579 partial[branch] = bheads
579 partial[branch] = bheads
580
580
581 def lookup(self, key):
581 def lookup(self, key):
582 return self[key].node()
582 return self[key].node()
583
583
584 def lookupbranch(self, key, remote=None):
584 def lookupbranch(self, key, remote=None):
585 repo = remote or self
585 repo = remote or self
586 if key in repo.branchmap():
586 if key in repo.branchmap():
587 return key
587 return key
588
588
589 repo = (remote and remote.local()) and remote or self
589 repo = (remote and remote.local()) and remote or self
590 return repo[key].branch()
590 return repo[key].branch()
591
591
592 def known(self, nodes):
592 def known(self, nodes):
593 nm = self.changelog.nodemap
593 nm = self.changelog.nodemap
594 pc = self._phasecache
594 pc = self._phasecache
595 result = []
595 result = []
596 for n in nodes:
596 for n in nodes:
597 r = nm.get(n)
597 r = nm.get(n)
598 resp = not (r is None or pc.phase(self, r) >= phases.secret)
598 resp = not (r is None or pc.phase(self, r) >= phases.secret)
599 result.append(resp)
599 result.append(resp)
600 return result
600 return result
601
601
602 def local(self):
602 def local(self):
603 return self
603 return self
604
604
605 def join(self, f):
605 def join(self, f):
606 return os.path.join(self.path, f)
606 return os.path.join(self.path, f)
607
607
608 def wjoin(self, f):
608 def wjoin(self, f):
609 return os.path.join(self.root, f)
609 return os.path.join(self.root, f)
610
610
611 def file(self, f):
611 def file(self, f):
612 if f[0] == '/':
612 if f[0] == '/':
613 f = f[1:]
613 f = f[1:]
614 return filelog.filelog(self.sopener, f)
614 return filelog.filelog(self.sopener, f)
615
615
616 def changectx(self, changeid):
616 def changectx(self, changeid):
617 return self[changeid]
617 return self[changeid]
618
618
619 def parents(self, changeid=None):
619 def parents(self, changeid=None):
620 '''get list of changectxs for parents of changeid'''
620 '''get list of changectxs for parents of changeid'''
621 return self[changeid].parents()
621 return self[changeid].parents()
622
622
623 def setparents(self, p1, p2=nullid):
623 def setparents(self, p1, p2=nullid):
624 copies = self.dirstate.setparents(p1, p2)
624 copies = self.dirstate.setparents(p1, p2)
625 if copies:
625 if copies:
626 # Adjust copy records, the dirstate cannot do it, it
626 # Adjust copy records, the dirstate cannot do it, it
627 # requires access to parents manifests. Preserve them
627 # requires access to parents manifests. Preserve them
628 # only for entries added to first parent.
628 # only for entries added to first parent.
629 pctx = self[p1]
629 pctx = self[p1]
630 for f in copies:
630 for f in copies:
631 if f not in pctx and copies[f] in pctx:
631 if f not in pctx and copies[f] in pctx:
632 self.dirstate.copy(copies[f], f)
632 self.dirstate.copy(copies[f], f)
633
633
634 def filectx(self, path, changeid=None, fileid=None):
634 def filectx(self, path, changeid=None, fileid=None):
635 """changeid can be a changeset revision, node, or tag.
635 """changeid can be a changeset revision, node, or tag.
636 fileid can be a file revision or node."""
636 fileid can be a file revision or node."""
637 return context.filectx(self, path, changeid, fileid)
637 return context.filectx(self, path, changeid, fileid)
638
638
639 def getcwd(self):
639 def getcwd(self):
640 return self.dirstate.getcwd()
640 return self.dirstate.getcwd()
641
641
642 def pathto(self, f, cwd=None):
642 def pathto(self, f, cwd=None):
643 return self.dirstate.pathto(f, cwd)
643 return self.dirstate.pathto(f, cwd)
644
644
645 def wfile(self, f, mode='r'):
645 def wfile(self, f, mode='r'):
646 return self.wopener(f, mode)
646 return self.wopener(f, mode)
647
647
648 def _link(self, f):
648 def _link(self, f):
649 return os.path.islink(self.wjoin(f))
649 return os.path.islink(self.wjoin(f))
650
650
651 def _loadfilter(self, filter):
651 def _loadfilter(self, filter):
652 if filter not in self.filterpats:
652 if filter not in self.filterpats:
653 l = []
653 l = []
654 for pat, cmd in self.ui.configitems(filter):
654 for pat, cmd in self.ui.configitems(filter):
655 if cmd == '!':
655 if cmd == '!':
656 continue
656 continue
657 mf = matchmod.match(self.root, '', [pat])
657 mf = matchmod.match(self.root, '', [pat])
658 fn = None
658 fn = None
659 params = cmd
659 params = cmd
660 for name, filterfn in self._datafilters.iteritems():
660 for name, filterfn in self._datafilters.iteritems():
661 if cmd.startswith(name):
661 if cmd.startswith(name):
662 fn = filterfn
662 fn = filterfn
663 params = cmd[len(name):].lstrip()
663 params = cmd[len(name):].lstrip()
664 break
664 break
665 if not fn:
665 if not fn:
666 fn = lambda s, c, **kwargs: util.filter(s, c)
666 fn = lambda s, c, **kwargs: util.filter(s, c)
667 # Wrap old filters not supporting keyword arguments
667 # Wrap old filters not supporting keyword arguments
668 if not inspect.getargspec(fn)[2]:
668 if not inspect.getargspec(fn)[2]:
669 oldfn = fn
669 oldfn = fn
670 fn = lambda s, c, **kwargs: oldfn(s, c)
670 fn = lambda s, c, **kwargs: oldfn(s, c)
671 l.append((mf, fn, params))
671 l.append((mf, fn, params))
672 self.filterpats[filter] = l
672 self.filterpats[filter] = l
673 return self.filterpats[filter]
673 return self.filterpats[filter]
674
674
675 def _filter(self, filterpats, filename, data):
675 def _filter(self, filterpats, filename, data):
676 for mf, fn, cmd in filterpats:
676 for mf, fn, cmd in filterpats:
677 if mf(filename):
677 if mf(filename):
678 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
678 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
679 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
679 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
680 break
680 break
681
681
682 return data
682 return data
683
683
684 @propertycache
684 @propertycache
685 def _encodefilterpats(self):
685 def _encodefilterpats(self):
686 return self._loadfilter('encode')
686 return self._loadfilter('encode')
687
687
688 @propertycache
688 @propertycache
689 def _decodefilterpats(self):
689 def _decodefilterpats(self):
690 return self._loadfilter('decode')
690 return self._loadfilter('decode')
691
691
692 def adddatafilter(self, name, filter):
692 def adddatafilter(self, name, filter):
693 self._datafilters[name] = filter
693 self._datafilters[name] = filter
694
694
695 def wread(self, filename):
695 def wread(self, filename):
696 if self._link(filename):
696 if self._link(filename):
697 data = os.readlink(self.wjoin(filename))
697 data = os.readlink(self.wjoin(filename))
698 else:
698 else:
699 data = self.wopener.read(filename)
699 data = self.wopener.read(filename)
700 return self._filter(self._encodefilterpats, filename, data)
700 return self._filter(self._encodefilterpats, filename, data)
701
701
702 def wwrite(self, filename, data, flags):
702 def wwrite(self, filename, data, flags):
703 data = self._filter(self._decodefilterpats, filename, data)
703 data = self._filter(self._decodefilterpats, filename, data)
704 if 'l' in flags:
704 if 'l' in flags:
705 self.wopener.symlink(data, filename)
705 self.wopener.symlink(data, filename)
706 else:
706 else:
707 self.wopener.write(filename, data)
707 self.wopener.write(filename, data)
708 if 'x' in flags:
708 if 'x' in flags:
709 util.setflags(self.wjoin(filename), False, True)
709 util.setflags(self.wjoin(filename), False, True)
710
710
711 def wwritedata(self, filename, data):
711 def wwritedata(self, filename, data):
712 return self._filter(self._decodefilterpats, filename, data)
712 return self._filter(self._decodefilterpats, filename, data)
713
713
714 def transaction(self, desc):
714 def transaction(self, desc):
715 tr = self._transref and self._transref() or None
715 tr = self._transref and self._transref() or None
716 if tr and tr.running():
716 if tr and tr.running():
717 return tr.nest()
717 return tr.nest()
718
718
719 # abort here if the journal already exists
719 # abort here if the journal already exists
720 if os.path.exists(self.sjoin("journal")):
720 if os.path.exists(self.sjoin("journal")):
721 raise error.RepoError(
721 raise error.RepoError(
722 _("abandoned transaction found - run hg recover"))
722 _("abandoned transaction found - run hg recover"))
723
723
724 self._writejournal(desc)
724 self._writejournal(desc)
725 renames = [(x, undoname(x)) for x in self._journalfiles()]
725 renames = [(x, undoname(x)) for x in self._journalfiles()]
726
726
727 tr = transaction.transaction(self.ui.warn, self.sopener,
727 tr = transaction.transaction(self.ui.warn, self.sopener,
728 self.sjoin("journal"),
728 self.sjoin("journal"),
729 aftertrans(renames),
729 aftertrans(renames),
730 self.store.createmode)
730 self.store.createmode)
731 self._transref = weakref.ref(tr)
731 self._transref = weakref.ref(tr)
732 return tr
732 return tr
733
733
734 def _journalfiles(self):
734 def _journalfiles(self):
735 return (self.sjoin('journal'), self.join('journal.dirstate'),
735 return (self.sjoin('journal'), self.join('journal.dirstate'),
736 self.join('journal.branch'), self.join('journal.desc'),
736 self.join('journal.branch'), self.join('journal.desc'),
737 self.join('journal.bookmarks'),
737 self.join('journal.bookmarks'),
738 self.sjoin('journal.phaseroots'))
738 self.sjoin('journal.phaseroots'))
739
739
740 def undofiles(self):
740 def undofiles(self):
741 return [undoname(x) for x in self._journalfiles()]
741 return [undoname(x) for x in self._journalfiles()]
742
742
743 def _writejournal(self, desc):
743 def _writejournal(self, desc):
744 self.opener.write("journal.dirstate",
744 self.opener.write("journal.dirstate",
745 self.opener.tryread("dirstate"))
745 self.opener.tryread("dirstate"))
746 self.opener.write("journal.branch",
746 self.opener.write("journal.branch",
747 encoding.fromlocal(self.dirstate.branch()))
747 encoding.fromlocal(self.dirstate.branch()))
748 self.opener.write("journal.desc",
748 self.opener.write("journal.desc",
749 "%d\n%s\n" % (len(self), desc))
749 "%d\n%s\n" % (len(self), desc))
750 self.opener.write("journal.bookmarks",
750 self.opener.write("journal.bookmarks",
751 self.opener.tryread("bookmarks"))
751 self.opener.tryread("bookmarks"))
752 self.sopener.write("journal.phaseroots",
752 self.sopener.write("journal.phaseroots",
753 self.sopener.tryread("phaseroots"))
753 self.sopener.tryread("phaseroots"))
754
754
755 def recover(self):
755 def recover(self):
756 lock = self.lock()
756 lock = self.lock()
757 try:
757 try:
758 if os.path.exists(self.sjoin("journal")):
758 if os.path.exists(self.sjoin("journal")):
759 self.ui.status(_("rolling back interrupted transaction\n"))
759 self.ui.status(_("rolling back interrupted transaction\n"))
760 transaction.rollback(self.sopener, self.sjoin("journal"),
760 transaction.rollback(self.sopener, self.sjoin("journal"),
761 self.ui.warn)
761 self.ui.warn)
762 self.invalidate()
762 self.invalidate()
763 return True
763 return True
764 else:
764 else:
765 self.ui.warn(_("no interrupted transaction available\n"))
765 self.ui.warn(_("no interrupted transaction available\n"))
766 return False
766 return False
767 finally:
767 finally:
768 lock.release()
768 lock.release()
769
769
770 def rollback(self, dryrun=False, force=False):
770 def rollback(self, dryrun=False, force=False):
771 wlock = lock = None
771 wlock = lock = None
772 try:
772 try:
773 wlock = self.wlock()
773 wlock = self.wlock()
774 lock = self.lock()
774 lock = self.lock()
775 if os.path.exists(self.sjoin("undo")):
775 if os.path.exists(self.sjoin("undo")):
776 return self._rollback(dryrun, force)
776 return self._rollback(dryrun, force)
777 else:
777 else:
778 self.ui.warn(_("no rollback information available\n"))
778 self.ui.warn(_("no rollback information available\n"))
779 return 1
779 return 1
780 finally:
780 finally:
781 release(lock, wlock)
781 release(lock, wlock)
782
782
783 def _rollback(self, dryrun, force):
783 def _rollback(self, dryrun, force):
784 ui = self.ui
784 ui = self.ui
785 try:
785 try:
786 args = self.opener.read('undo.desc').splitlines()
786 args = self.opener.read('undo.desc').splitlines()
787 (oldlen, desc, detail) = (int(args[0]), args[1], None)
787 (oldlen, desc, detail) = (int(args[0]), args[1], None)
788 if len(args) >= 3:
788 if len(args) >= 3:
789 detail = args[2]
789 detail = args[2]
790 oldtip = oldlen - 1
790 oldtip = oldlen - 1
791
791
792 if detail and ui.verbose:
792 if detail and ui.verbose:
793 msg = (_('repository tip rolled back to revision %s'
793 msg = (_('repository tip rolled back to revision %s'
794 ' (undo %s: %s)\n')
794 ' (undo %s: %s)\n')
795 % (oldtip, desc, detail))
795 % (oldtip, desc, detail))
796 else:
796 else:
797 msg = (_('repository tip rolled back to revision %s'
797 msg = (_('repository tip rolled back to revision %s'
798 ' (undo %s)\n')
798 ' (undo %s)\n')
799 % (oldtip, desc))
799 % (oldtip, desc))
800 except IOError:
800 except IOError:
801 msg = _('rolling back unknown transaction\n')
801 msg = _('rolling back unknown transaction\n')
802 desc = None
802 desc = None
803
803
804 if not force and self['.'] != self['tip'] and desc == 'commit':
804 if not force and self['.'] != self['tip'] and desc == 'commit':
805 raise util.Abort(
805 raise util.Abort(
806 _('rollback of last commit while not checked out '
806 _('rollback of last commit while not checked out '
807 'may lose data'), hint=_('use -f to force'))
807 'may lose data'), hint=_('use -f to force'))
808
808
809 ui.status(msg)
809 ui.status(msg)
810 if dryrun:
810 if dryrun:
811 return 0
811 return 0
812
812
813 parents = self.dirstate.parents()
813 parents = self.dirstate.parents()
814 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
814 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
815 if os.path.exists(self.join('undo.bookmarks')):
815 if os.path.exists(self.join('undo.bookmarks')):
816 util.rename(self.join('undo.bookmarks'),
816 util.rename(self.join('undo.bookmarks'),
817 self.join('bookmarks'))
817 self.join('bookmarks'))
818 if os.path.exists(self.sjoin('undo.phaseroots')):
818 if os.path.exists(self.sjoin('undo.phaseroots')):
819 util.rename(self.sjoin('undo.phaseroots'),
819 util.rename(self.sjoin('undo.phaseroots'),
820 self.sjoin('phaseroots'))
820 self.sjoin('phaseroots'))
821 self.invalidate()
821 self.invalidate()
822
822
823 parentgone = (parents[0] not in self.changelog.nodemap or
823 parentgone = (parents[0] not in self.changelog.nodemap or
824 parents[1] not in self.changelog.nodemap)
824 parents[1] not in self.changelog.nodemap)
825 if parentgone:
825 if parentgone:
826 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
826 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
827 try:
827 try:
828 branch = self.opener.read('undo.branch')
828 branch = self.opener.read('undo.branch')
829 self.dirstate.setbranch(branch)
829 self.dirstate.setbranch(branch)
830 except IOError:
830 except IOError:
831 ui.warn(_('named branch could not be reset: '
831 ui.warn(_('named branch could not be reset: '
832 'current branch is still \'%s\'\n')
832 'current branch is still \'%s\'\n')
833 % self.dirstate.branch())
833 % self.dirstate.branch())
834
834
835 self.dirstate.invalidate()
835 self.dirstate.invalidate()
836 parents = tuple([p.rev() for p in self.parents()])
836 parents = tuple([p.rev() for p in self.parents()])
837 if len(parents) > 1:
837 if len(parents) > 1:
838 ui.status(_('working directory now based on '
838 ui.status(_('working directory now based on '
839 'revisions %d and %d\n') % parents)
839 'revisions %d and %d\n') % parents)
840 else:
840 else:
841 ui.status(_('working directory now based on '
841 ui.status(_('working directory now based on '
842 'revision %d\n') % parents)
842 'revision %d\n') % parents)
843 self.destroyed()
843 self.destroyed()
844 return 0
844 return 0
845
845
846 def invalidatecaches(self):
846 def invalidatecaches(self):
847 def delcache(name):
847 def delcache(name):
848 try:
848 try:
849 delattr(self, name)
849 delattr(self, name)
850 except AttributeError:
850 except AttributeError:
851 pass
851 pass
852
852
853 delcache('_tagscache')
853 delcache('_tagscache')
854
854
855 self._branchcache = None # in UTF-8
855 self._branchcache = None # in UTF-8
856 self._branchcachetip = None
856 self._branchcachetip = None
857
857
858 def invalidatedirstate(self):
858 def invalidatedirstate(self):
859 '''Invalidates the dirstate, causing the next call to dirstate
859 '''Invalidates the dirstate, causing the next call to dirstate
860 to check if it was modified since the last time it was read,
860 to check if it was modified since the last time it was read,
861 rereading it if it has.
861 rereading it if it has.
862
862
863 This is different to dirstate.invalidate() that it doesn't always
863 This is different to dirstate.invalidate() that it doesn't always
864 rereads the dirstate. Use dirstate.invalidate() if you want to
864 rereads the dirstate. Use dirstate.invalidate() if you want to
865 explicitly read the dirstate again (i.e. restoring it to a previous
865 explicitly read the dirstate again (i.e. restoring it to a previous
866 known good state).'''
866 known good state).'''
867 if 'dirstate' in self.__dict__:
867 if 'dirstate' in self.__dict__:
868 for k in self.dirstate._filecache:
868 for k in self.dirstate._filecache:
869 try:
869 try:
870 delattr(self.dirstate, k)
870 delattr(self.dirstate, k)
871 except AttributeError:
871 except AttributeError:
872 pass
872 pass
873 delattr(self, 'dirstate')
873 delattr(self, 'dirstate')
874
874
875 def invalidate(self):
875 def invalidate(self):
876 for k in self._filecache:
876 for k in self._filecache:
877 # dirstate is invalidated separately in invalidatedirstate()
877 # dirstate is invalidated separately in invalidatedirstate()
878 if k == 'dirstate':
878 if k == 'dirstate':
879 continue
879 continue
880
880
881 try:
881 try:
882 delattr(self, k)
882 delattr(self, k)
883 except AttributeError:
883 except AttributeError:
884 pass
884 pass
885 self.invalidatecaches()
885 self.invalidatecaches()
886
886
887 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
887 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
888 try:
888 try:
889 l = lock.lock(lockname, 0, releasefn, desc=desc)
889 l = lock.lock(lockname, 0, releasefn, desc=desc)
890 except error.LockHeld, inst:
890 except error.LockHeld, inst:
891 if not wait:
891 if not wait:
892 raise
892 raise
893 self.ui.warn(_("waiting for lock on %s held by %r\n") %
893 self.ui.warn(_("waiting for lock on %s held by %r\n") %
894 (desc, inst.locker))
894 (desc, inst.locker))
895 # default to 600 seconds timeout
895 # default to 600 seconds timeout
896 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
896 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
897 releasefn, desc=desc)
897 releasefn, desc=desc)
898 if acquirefn:
898 if acquirefn:
899 acquirefn()
899 acquirefn()
900 return l
900 return l
901
901
902 def _afterlock(self, callback):
902 def _afterlock(self, callback):
903 """add a callback to the current repository lock.
903 """add a callback to the current repository lock.
904
904
905 The callback will be executed on lock release."""
905 The callback will be executed on lock release."""
906 l = self._lockref and self._lockref()
906 l = self._lockref and self._lockref()
907 if l:
907 if l:
908 l.postrelease.append(callback)
908 l.postrelease.append(callback)
909 else:
909 else:
910 callback()
910 callback()
911
911
912 def lock(self, wait=True):
912 def lock(self, wait=True):
913 '''Lock the repository store (.hg/store) and return a weak reference
913 '''Lock the repository store (.hg/store) and return a weak reference
914 to the lock. Use this before modifying the store (e.g. committing or
914 to the lock. Use this before modifying the store (e.g. committing or
915 stripping). If you are opening a transaction, get a lock as well.)'''
915 stripping). If you are opening a transaction, get a lock as well.)'''
916 l = self._lockref and self._lockref()
916 l = self._lockref and self._lockref()
917 if l is not None and l.held:
917 if l is not None and l.held:
918 l.lock()
918 l.lock()
919 return l
919 return l
920
920
921 def unlock():
921 def unlock():
922 self.store.write()
922 self.store.write()
923 if '_phasecache' in vars(self):
923 if '_phasecache' in vars(self):
924 self._phasecache.write()
924 self._phasecache.write()
925 for k, ce in self._filecache.items():
925 for k, ce in self._filecache.items():
926 if k == 'dirstate':
926 if k == 'dirstate':
927 continue
927 continue
928 ce.refresh()
928 ce.refresh()
929
929
930 l = self._lock(self.sjoin("lock"), wait, unlock,
930 l = self._lock(self.sjoin("lock"), wait, unlock,
931 self.invalidate, _('repository %s') % self.origroot)
931 self.invalidate, _('repository %s') % self.origroot)
932 self._lockref = weakref.ref(l)
932 self._lockref = weakref.ref(l)
933 return l
933 return l
934
934
935 def wlock(self, wait=True):
935 def wlock(self, wait=True):
936 '''Lock the non-store parts of the repository (everything under
936 '''Lock the non-store parts of the repository (everything under
937 .hg except .hg/store) and return a weak reference to the lock.
937 .hg except .hg/store) and return a weak reference to the lock.
938 Use this before modifying files in .hg.'''
938 Use this before modifying files in .hg.'''
939 l = self._wlockref and self._wlockref()
939 l = self._wlockref and self._wlockref()
940 if l is not None and l.held:
940 if l is not None and l.held:
941 l.lock()
941 l.lock()
942 return l
942 return l
943
943
944 def unlock():
944 def unlock():
945 self.dirstate.write()
945 self.dirstate.write()
946 ce = self._filecache.get('dirstate')
946 ce = self._filecache.get('dirstate')
947 if ce:
947 if ce:
948 ce.refresh()
948 ce.refresh()
949
949
950 l = self._lock(self.join("wlock"), wait, unlock,
950 l = self._lock(self.join("wlock"), wait, unlock,
951 self.invalidatedirstate, _('working directory of %s') %
951 self.invalidatedirstate, _('working directory of %s') %
952 self.origroot)
952 self.origroot)
953 self._wlockref = weakref.ref(l)
953 self._wlockref = weakref.ref(l)
954 return l
954 return l
955
955
956 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
956 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
957 """
957 """
958 commit an individual file as part of a larger transaction
958 commit an individual file as part of a larger transaction
959 """
959 """
960
960
961 fname = fctx.path()
961 fname = fctx.path()
962 text = fctx.data()
962 text = fctx.data()
963 flog = self.file(fname)
963 flog = self.file(fname)
964 fparent1 = manifest1.get(fname, nullid)
964 fparent1 = manifest1.get(fname, nullid)
965 fparent2 = fparent2o = manifest2.get(fname, nullid)
965 fparent2 = fparent2o = manifest2.get(fname, nullid)
966
966
967 meta = {}
967 meta = {}
968 copy = fctx.renamed()
968 copy = fctx.renamed()
969 if copy and copy[0] != fname:
969 if copy and copy[0] != fname:
970 # Mark the new revision of this file as a copy of another
970 # Mark the new revision of this file as a copy of another
971 # file. This copy data will effectively act as a parent
971 # file. This copy data will effectively act as a parent
972 # of this new revision. If this is a merge, the first
972 # of this new revision. If this is a merge, the first
973 # parent will be the nullid (meaning "look up the copy data")
973 # parent will be the nullid (meaning "look up the copy data")
974 # and the second one will be the other parent. For example:
974 # and the second one will be the other parent. For example:
975 #
975 #
976 # 0 --- 1 --- 3 rev1 changes file foo
976 # 0 --- 1 --- 3 rev1 changes file foo
977 # \ / rev2 renames foo to bar and changes it
977 # \ / rev2 renames foo to bar and changes it
978 # \- 2 -/ rev3 should have bar with all changes and
978 # \- 2 -/ rev3 should have bar with all changes and
979 # should record that bar descends from
979 # should record that bar descends from
980 # bar in rev2 and foo in rev1
980 # bar in rev2 and foo in rev1
981 #
981 #
982 # this allows this merge to succeed:
982 # this allows this merge to succeed:
983 #
983 #
984 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
984 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
985 # \ / merging rev3 and rev4 should use bar@rev2
985 # \ / merging rev3 and rev4 should use bar@rev2
986 # \- 2 --- 4 as the merge base
986 # \- 2 --- 4 as the merge base
987 #
987 #
988
988
989 cfname = copy[0]
989 cfname = copy[0]
990 crev = manifest1.get(cfname)
990 crev = manifest1.get(cfname)
991 newfparent = fparent2
991 newfparent = fparent2
992
992
993 if manifest2: # branch merge
993 if manifest2: # branch merge
994 if fparent2 == nullid or crev is None: # copied on remote side
994 if fparent2 == nullid or crev is None: # copied on remote side
995 if cfname in manifest2:
995 if cfname in manifest2:
996 crev = manifest2[cfname]
996 crev = manifest2[cfname]
997 newfparent = fparent1
997 newfparent = fparent1
998
998
999 # find source in nearest ancestor if we've lost track
999 # find source in nearest ancestor if we've lost track
1000 if not crev:
1000 if not crev:
1001 self.ui.debug(" %s: searching for copy revision for %s\n" %
1001 self.ui.debug(" %s: searching for copy revision for %s\n" %
1002 (fname, cfname))
1002 (fname, cfname))
1003 for ancestor in self[None].ancestors():
1003 for ancestor in self[None].ancestors():
1004 if cfname in ancestor:
1004 if cfname in ancestor:
1005 crev = ancestor[cfname].filenode()
1005 crev = ancestor[cfname].filenode()
1006 break
1006 break
1007
1007
1008 if crev:
1008 if crev:
1009 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1009 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1010 meta["copy"] = cfname
1010 meta["copy"] = cfname
1011 meta["copyrev"] = hex(crev)
1011 meta["copyrev"] = hex(crev)
1012 fparent1, fparent2 = nullid, newfparent
1012 fparent1, fparent2 = nullid, newfparent
1013 else:
1013 else:
1014 self.ui.warn(_("warning: can't find ancestor for '%s' "
1014 self.ui.warn(_("warning: can't find ancestor for '%s' "
1015 "copied from '%s'!\n") % (fname, cfname))
1015 "copied from '%s'!\n") % (fname, cfname))
1016
1016
1017 elif fparent2 != nullid:
1017 elif fparent2 != nullid:
1018 # is one parent an ancestor of the other?
1018 # is one parent an ancestor of the other?
1019 fparentancestor = flog.ancestor(fparent1, fparent2)
1019 fparentancestor = flog.ancestor(fparent1, fparent2)
1020 if fparentancestor == fparent1:
1020 if fparentancestor == fparent1:
1021 fparent1, fparent2 = fparent2, nullid
1021 fparent1, fparent2 = fparent2, nullid
1022 elif fparentancestor == fparent2:
1022 elif fparentancestor == fparent2:
1023 fparent2 = nullid
1023 fparent2 = nullid
1024
1024
1025 # is the file changed?
1025 # is the file changed?
1026 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1026 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1027 changelist.append(fname)
1027 changelist.append(fname)
1028 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1028 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1029
1029
1030 # are just the flags changed during merge?
1030 # are just the flags changed during merge?
1031 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1031 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1032 changelist.append(fname)
1032 changelist.append(fname)
1033
1033
1034 return fparent1
1034 return fparent1
1035
1035
1036 def commit(self, text="", user=None, date=None, match=None, force=False,
1036 def commit(self, text="", user=None, date=None, match=None, force=False,
1037 editor=False, extra={}):
1037 editor=False, extra={}):
1038 """Add a new revision to current repository.
1038 """Add a new revision to current repository.
1039
1039
1040 Revision information is gathered from the working directory,
1040 Revision information is gathered from the working directory,
1041 match can be used to filter the committed files. If editor is
1041 match can be used to filter the committed files. If editor is
1042 supplied, it is called to get a commit message.
1042 supplied, it is called to get a commit message.
1043 """
1043 """
1044
1044
1045 def fail(f, msg):
1045 def fail(f, msg):
1046 raise util.Abort('%s: %s' % (f, msg))
1046 raise util.Abort('%s: %s' % (f, msg))
1047
1047
1048 if not match:
1048 if not match:
1049 match = matchmod.always(self.root, '')
1049 match = matchmod.always(self.root, '')
1050
1050
1051 if not force:
1051 if not force:
1052 vdirs = []
1052 vdirs = []
1053 match.dir = vdirs.append
1053 match.dir = vdirs.append
1054 match.bad = fail
1054 match.bad = fail
1055
1055
1056 wlock = self.wlock()
1056 wlock = self.wlock()
1057 try:
1057 try:
1058 wctx = self[None]
1058 wctx = self[None]
1059 merge = len(wctx.parents()) > 1
1059 merge = len(wctx.parents()) > 1
1060
1060
1061 if (not force and merge and match and
1061 if (not force and merge and match and
1062 (match.files() or match.anypats())):
1062 (match.files() or match.anypats())):
1063 raise util.Abort(_('cannot partially commit a merge '
1063 raise util.Abort(_('cannot partially commit a merge '
1064 '(do not specify files or patterns)'))
1064 '(do not specify files or patterns)'))
1065
1065
1066 changes = self.status(match=match, clean=force)
1066 changes = self.status(match=match, clean=force)
1067 if force:
1067 if force:
1068 changes[0].extend(changes[6]) # mq may commit unchanged files
1068 changes[0].extend(changes[6]) # mq may commit unchanged files
1069
1069
1070 # check subrepos
1070 # check subrepos
1071 subs = []
1071 subs = []
1072 commitsubs = set()
1072 commitsubs = set()
1073 newstate = wctx.substate.copy()
1073 newstate = wctx.substate.copy()
1074 # only manage subrepos and .hgsubstate if .hgsub is present
1074 # only manage subrepos and .hgsubstate if .hgsub is present
1075 if '.hgsub' in wctx:
1075 if '.hgsub' in wctx:
1076 # we'll decide whether to track this ourselves, thanks
1076 # we'll decide whether to track this ourselves, thanks
1077 if '.hgsubstate' in changes[0]:
1077 if '.hgsubstate' in changes[0]:
1078 changes[0].remove('.hgsubstate')
1078 changes[0].remove('.hgsubstate')
1079 if '.hgsubstate' in changes[2]:
1079 if '.hgsubstate' in changes[2]:
1080 changes[2].remove('.hgsubstate')
1080 changes[2].remove('.hgsubstate')
1081
1081
1082 # compare current state to last committed state
1082 # compare current state to last committed state
1083 # build new substate based on last committed state
1083 # build new substate based on last committed state
1084 oldstate = wctx.p1().substate
1084 oldstate = wctx.p1().substate
1085 for s in sorted(newstate.keys()):
1085 for s in sorted(newstate.keys()):
1086 if not match(s):
1086 if not match(s):
1087 # ignore working copy, use old state if present
1087 # ignore working copy, use old state if present
1088 if s in oldstate:
1088 if s in oldstate:
1089 newstate[s] = oldstate[s]
1089 newstate[s] = oldstate[s]
1090 continue
1090 continue
1091 if not force:
1091 if not force:
1092 raise util.Abort(
1092 raise util.Abort(
1093 _("commit with new subrepo %s excluded") % s)
1093 _("commit with new subrepo %s excluded") % s)
1094 if wctx.sub(s).dirty(True):
1094 if wctx.sub(s).dirty(True):
1095 if not self.ui.configbool('ui', 'commitsubrepos'):
1095 if not self.ui.configbool('ui', 'commitsubrepos'):
1096 raise util.Abort(
1096 raise util.Abort(
1097 _("uncommitted changes in subrepo %s") % s,
1097 _("uncommitted changes in subrepo %s") % s,
1098 hint=_("use --subrepos for recursive commit"))
1098 hint=_("use --subrepos for recursive commit"))
1099 subs.append(s)
1099 subs.append(s)
1100 commitsubs.add(s)
1100 commitsubs.add(s)
1101 else:
1101 else:
1102 bs = wctx.sub(s).basestate()
1102 bs = wctx.sub(s).basestate()
1103 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1103 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1104 if oldstate.get(s, (None, None, None))[1] != bs:
1104 if oldstate.get(s, (None, None, None))[1] != bs:
1105 subs.append(s)
1105 subs.append(s)
1106
1106
1107 # check for removed subrepos
1107 # check for removed subrepos
1108 for p in wctx.parents():
1108 for p in wctx.parents():
1109 r = [s for s in p.substate if s not in newstate]
1109 r = [s for s in p.substate if s not in newstate]
1110 subs += [s for s in r if match(s)]
1110 subs += [s for s in r if match(s)]
1111 if subs:
1111 if subs:
1112 if (not match('.hgsub') and
1112 if (not match('.hgsub') and
1113 '.hgsub' in (wctx.modified() + wctx.added())):
1113 '.hgsub' in (wctx.modified() + wctx.added())):
1114 raise util.Abort(
1114 raise util.Abort(
1115 _("can't commit subrepos without .hgsub"))
1115 _("can't commit subrepos without .hgsub"))
1116 changes[0].insert(0, '.hgsubstate')
1116 changes[0].insert(0, '.hgsubstate')
1117
1117
1118 elif '.hgsub' in changes[2]:
1118 elif '.hgsub' in changes[2]:
1119 # clean up .hgsubstate when .hgsub is removed
1119 # clean up .hgsubstate when .hgsub is removed
1120 if ('.hgsubstate' in wctx and
1120 if ('.hgsubstate' in wctx and
1121 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1121 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1122 changes[2].insert(0, '.hgsubstate')
1122 changes[2].insert(0, '.hgsubstate')
1123
1123
1124 # make sure all explicit patterns are matched
1124 # make sure all explicit patterns are matched
1125 if not force and match.files():
1125 if not force and match.files():
1126 matched = set(changes[0] + changes[1] + changes[2])
1126 matched = set(changes[0] + changes[1] + changes[2])
1127
1127
1128 for f in match.files():
1128 for f in match.files():
1129 if f == '.' or f in matched or f in wctx.substate:
1129 if f == '.' or f in matched or f in wctx.substate:
1130 continue
1130 continue
1131 if f in changes[3]: # missing
1131 if f in changes[3]: # missing
1132 fail(f, _('file not found!'))
1132 fail(f, _('file not found!'))
1133 if f in vdirs: # visited directory
1133 if f in vdirs: # visited directory
1134 d = f + '/'
1134 d = f + '/'
1135 for mf in matched:
1135 for mf in matched:
1136 if mf.startswith(d):
1136 if mf.startswith(d):
1137 break
1137 break
1138 else:
1138 else:
1139 fail(f, _("no match under directory!"))
1139 fail(f, _("no match under directory!"))
1140 elif f not in self.dirstate:
1140 elif f not in self.dirstate:
1141 fail(f, _("file not tracked!"))
1141 fail(f, _("file not tracked!"))
1142
1142
1143 if (not force and not extra.get("close") and not merge
1143 if (not force and not extra.get("close") and not merge
1144 and not (changes[0] or changes[1] or changes[2])
1144 and not (changes[0] or changes[1] or changes[2])
1145 and wctx.branch() == wctx.p1().branch()):
1145 and wctx.branch() == wctx.p1().branch()):
1146 return None
1146 return None
1147
1147
1148 if merge and changes[3]:
1148 if merge and changes[3]:
1149 raise util.Abort(_("cannot commit merge with missing files"))
1149 raise util.Abort(_("cannot commit merge with missing files"))
1150
1150
1151 ms = mergemod.mergestate(self)
1151 ms = mergemod.mergestate(self)
1152 for f in changes[0]:
1152 for f in changes[0]:
1153 if f in ms and ms[f] == 'u':
1153 if f in ms and ms[f] == 'u':
1154 raise util.Abort(_("unresolved merge conflicts "
1154 raise util.Abort(_("unresolved merge conflicts "
1155 "(see hg help resolve)"))
1155 "(see hg help resolve)"))
1156
1156
1157 cctx = context.workingctx(self, text, user, date, extra, changes)
1157 cctx = context.workingctx(self, text, user, date, extra, changes)
1158 if editor:
1158 if editor:
1159 cctx._text = editor(self, cctx, subs)
1159 cctx._text = editor(self, cctx, subs)
1160 edited = (text != cctx._text)
1160 edited = (text != cctx._text)
1161
1161
1162 # commit subs and write new state
1162 # commit subs and write new state
1163 if subs:
1163 if subs:
1164 for s in sorted(commitsubs):
1164 for s in sorted(commitsubs):
1165 sub = wctx.sub(s)
1165 sub = wctx.sub(s)
1166 self.ui.status(_('committing subrepository %s\n') %
1166 self.ui.status(_('committing subrepository %s\n') %
1167 subrepo.subrelpath(sub))
1167 subrepo.subrelpath(sub))
1168 sr = sub.commit(cctx._text, user, date)
1168 sr = sub.commit(cctx._text, user, date)
1169 newstate[s] = (newstate[s][0], sr)
1169 newstate[s] = (newstate[s][0], sr)
1170 subrepo.writestate(self, newstate)
1170 subrepo.writestate(self, newstate)
1171
1171
1172 # Save commit message in case this transaction gets rolled back
1172 # Save commit message in case this transaction gets rolled back
1173 # (e.g. by a pretxncommit hook). Leave the content alone on
1173 # (e.g. by a pretxncommit hook). Leave the content alone on
1174 # the assumption that the user will use the same editor again.
1174 # the assumption that the user will use the same editor again.
1175 msgfn = self.savecommitmessage(cctx._text)
1175 msgfn = self.savecommitmessage(cctx._text)
1176
1176
1177 p1, p2 = self.dirstate.parents()
1177 p1, p2 = self.dirstate.parents()
1178 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1178 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1179 try:
1179 try:
1180 self.hook("precommit", throw=True, parent1=hookp1,
1180 self.hook("precommit", throw=True, parent1=hookp1,
1181 parent2=hookp2)
1181 parent2=hookp2)
1182 ret = self.commitctx(cctx, True)
1182 ret = self.commitctx(cctx, True)
1183 except: # re-raises
1183 except: # re-raises
1184 if edited:
1184 if edited:
1185 self.ui.write(
1185 self.ui.write(
1186 _('note: commit message saved in %s\n') % msgfn)
1186 _('note: commit message saved in %s\n') % msgfn)
1187 raise
1187 raise
1188
1188
1189 # update bookmarks, dirstate and mergestate
1189 # update bookmarks, dirstate and mergestate
1190 bookmarks.update(self, p1, ret)
1190 bookmarks.update(self, [p1, p2], ret)
1191 for f in changes[0] + changes[1]:
1191 for f in changes[0] + changes[1]:
1192 self.dirstate.normal(f)
1192 self.dirstate.normal(f)
1193 for f in changes[2]:
1193 for f in changes[2]:
1194 self.dirstate.drop(f)
1194 self.dirstate.drop(f)
1195 self.dirstate.setparents(ret)
1195 self.dirstate.setparents(ret)
1196 ms.reset()
1196 ms.reset()
1197 finally:
1197 finally:
1198 wlock.release()
1198 wlock.release()
1199
1199
1200 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1200 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1201 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1201 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1202 self._afterlock(commithook)
1202 self._afterlock(commithook)
1203 return ret
1203 return ret
1204
1204
1205 def commitctx(self, ctx, error=False):
1205 def commitctx(self, ctx, error=False):
1206 """Add a new revision to current repository.
1206 """Add a new revision to current repository.
1207 Revision information is passed via the context argument.
1207 Revision information is passed via the context argument.
1208 """
1208 """
1209
1209
1210 tr = lock = None
1210 tr = lock = None
1211 removed = list(ctx.removed())
1211 removed = list(ctx.removed())
1212 p1, p2 = ctx.p1(), ctx.p2()
1212 p1, p2 = ctx.p1(), ctx.p2()
1213 user = ctx.user()
1213 user = ctx.user()
1214
1214
1215 lock = self.lock()
1215 lock = self.lock()
1216 try:
1216 try:
1217 tr = self.transaction("commit")
1217 tr = self.transaction("commit")
1218 trp = weakref.proxy(tr)
1218 trp = weakref.proxy(tr)
1219
1219
1220 if ctx.files():
1220 if ctx.files():
1221 m1 = p1.manifest().copy()
1221 m1 = p1.manifest().copy()
1222 m2 = p2.manifest()
1222 m2 = p2.manifest()
1223
1223
1224 # check in files
1224 # check in files
1225 new = {}
1225 new = {}
1226 changed = []
1226 changed = []
1227 linkrev = len(self)
1227 linkrev = len(self)
1228 for f in sorted(ctx.modified() + ctx.added()):
1228 for f in sorted(ctx.modified() + ctx.added()):
1229 self.ui.note(f + "\n")
1229 self.ui.note(f + "\n")
1230 try:
1230 try:
1231 fctx = ctx[f]
1231 fctx = ctx[f]
1232 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1232 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1233 changed)
1233 changed)
1234 m1.set(f, fctx.flags())
1234 m1.set(f, fctx.flags())
1235 except OSError, inst:
1235 except OSError, inst:
1236 self.ui.warn(_("trouble committing %s!\n") % f)
1236 self.ui.warn(_("trouble committing %s!\n") % f)
1237 raise
1237 raise
1238 except IOError, inst:
1238 except IOError, inst:
1239 errcode = getattr(inst, 'errno', errno.ENOENT)
1239 errcode = getattr(inst, 'errno', errno.ENOENT)
1240 if error or errcode and errcode != errno.ENOENT:
1240 if error or errcode and errcode != errno.ENOENT:
1241 self.ui.warn(_("trouble committing %s!\n") % f)
1241 self.ui.warn(_("trouble committing %s!\n") % f)
1242 raise
1242 raise
1243 else:
1243 else:
1244 removed.append(f)
1244 removed.append(f)
1245
1245
1246 # update manifest
1246 # update manifest
1247 m1.update(new)
1247 m1.update(new)
1248 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1248 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1249 drop = [f for f in removed if f in m1]
1249 drop = [f for f in removed if f in m1]
1250 for f in drop:
1250 for f in drop:
1251 del m1[f]
1251 del m1[f]
1252 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1252 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1253 p2.manifestnode(), (new, drop))
1253 p2.manifestnode(), (new, drop))
1254 files = changed + removed
1254 files = changed + removed
1255 else:
1255 else:
1256 mn = p1.manifestnode()
1256 mn = p1.manifestnode()
1257 files = []
1257 files = []
1258
1258
1259 # update changelog
1259 # update changelog
1260 self.changelog.delayupdate()
1260 self.changelog.delayupdate()
1261 n = self.changelog.add(mn, files, ctx.description(),
1261 n = self.changelog.add(mn, files, ctx.description(),
1262 trp, p1.node(), p2.node(),
1262 trp, p1.node(), p2.node(),
1263 user, ctx.date(), ctx.extra().copy())
1263 user, ctx.date(), ctx.extra().copy())
1264 p = lambda: self.changelog.writepending() and self.root or ""
1264 p = lambda: self.changelog.writepending() and self.root or ""
1265 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1265 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1266 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1266 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1267 parent2=xp2, pending=p)
1267 parent2=xp2, pending=p)
1268 self.changelog.finalize(trp)
1268 self.changelog.finalize(trp)
1269 # set the new commit is proper phase
1269 # set the new commit is proper phase
1270 targetphase = phases.newcommitphase(self.ui)
1270 targetphase = phases.newcommitphase(self.ui)
1271 if targetphase:
1271 if targetphase:
1272 # retract boundary do not alter parent changeset.
1272 # retract boundary do not alter parent changeset.
1273 # if a parent have higher the resulting phase will
1273 # if a parent have higher the resulting phase will
1274 # be compliant anyway
1274 # be compliant anyway
1275 #
1275 #
1276 # if minimal phase was 0 we don't need to retract anything
1276 # if minimal phase was 0 we don't need to retract anything
1277 phases.retractboundary(self, targetphase, [n])
1277 phases.retractboundary(self, targetphase, [n])
1278 tr.close()
1278 tr.close()
1279 self.updatebranchcache()
1279 self.updatebranchcache()
1280 return n
1280 return n
1281 finally:
1281 finally:
1282 if tr:
1282 if tr:
1283 tr.release()
1283 tr.release()
1284 lock.release()
1284 lock.release()
1285
1285
1286 def destroyed(self):
1286 def destroyed(self):
1287 '''Inform the repository that nodes have been destroyed.
1287 '''Inform the repository that nodes have been destroyed.
1288 Intended for use by strip and rollback, so there's a common
1288 Intended for use by strip and rollback, so there's a common
1289 place for anything that has to be done after destroying history.'''
1289 place for anything that has to be done after destroying history.'''
1290 # XXX it might be nice if we could take the list of destroyed
1290 # XXX it might be nice if we could take the list of destroyed
1291 # nodes, but I don't see an easy way for rollback() to do that
1291 # nodes, but I don't see an easy way for rollback() to do that
1292
1292
1293 # Ensure the persistent tag cache is updated. Doing it now
1293 # Ensure the persistent tag cache is updated. Doing it now
1294 # means that the tag cache only has to worry about destroyed
1294 # means that the tag cache only has to worry about destroyed
1295 # heads immediately after a strip/rollback. That in turn
1295 # heads immediately after a strip/rollback. That in turn
1296 # guarantees that "cachetip == currenttip" (comparing both rev
1296 # guarantees that "cachetip == currenttip" (comparing both rev
1297 # and node) always means no nodes have been added or destroyed.
1297 # and node) always means no nodes have been added or destroyed.
1298
1298
1299 # XXX this is suboptimal when qrefresh'ing: we strip the current
1299 # XXX this is suboptimal when qrefresh'ing: we strip the current
1300 # head, refresh the tag cache, then immediately add a new head.
1300 # head, refresh the tag cache, then immediately add a new head.
1301 # But I think doing it this way is necessary for the "instant
1301 # But I think doing it this way is necessary for the "instant
1302 # tag cache retrieval" case to work.
1302 # tag cache retrieval" case to work.
1303 self.invalidatecaches()
1303 self.invalidatecaches()
1304
1304
1305 # Discard all cache entries to force reloading everything.
1305 # Discard all cache entries to force reloading everything.
1306 self._filecache.clear()
1306 self._filecache.clear()
1307
1307
1308 def walk(self, match, node=None):
1308 def walk(self, match, node=None):
1309 '''
1309 '''
1310 walk recursively through the directory tree or a given
1310 walk recursively through the directory tree or a given
1311 changeset, finding all files matched by the match
1311 changeset, finding all files matched by the match
1312 function
1312 function
1313 '''
1313 '''
1314 return self[node].walk(match)
1314 return self[node].walk(match)
1315
1315
1316 def status(self, node1='.', node2=None, match=None,
1316 def status(self, node1='.', node2=None, match=None,
1317 ignored=False, clean=False, unknown=False,
1317 ignored=False, clean=False, unknown=False,
1318 listsubrepos=False):
1318 listsubrepos=False):
1319 """return status of files between two nodes or node and working
1319 """return status of files between two nodes or node and working
1320 directory.
1320 directory.
1321
1321
1322 If node1 is None, use the first dirstate parent instead.
1322 If node1 is None, use the first dirstate parent instead.
1323 If node2 is None, compare node1 with working directory.
1323 If node2 is None, compare node1 with working directory.
1324 """
1324 """
1325
1325
1326 def mfmatches(ctx):
1326 def mfmatches(ctx):
1327 mf = ctx.manifest().copy()
1327 mf = ctx.manifest().copy()
1328 if match.always():
1328 if match.always():
1329 return mf
1329 return mf
1330 for fn in mf.keys():
1330 for fn in mf.keys():
1331 if not match(fn):
1331 if not match(fn):
1332 del mf[fn]
1332 del mf[fn]
1333 return mf
1333 return mf
1334
1334
1335 if isinstance(node1, context.changectx):
1335 if isinstance(node1, context.changectx):
1336 ctx1 = node1
1336 ctx1 = node1
1337 else:
1337 else:
1338 ctx1 = self[node1]
1338 ctx1 = self[node1]
1339 if isinstance(node2, context.changectx):
1339 if isinstance(node2, context.changectx):
1340 ctx2 = node2
1340 ctx2 = node2
1341 else:
1341 else:
1342 ctx2 = self[node2]
1342 ctx2 = self[node2]
1343
1343
1344 working = ctx2.rev() is None
1344 working = ctx2.rev() is None
1345 parentworking = working and ctx1 == self['.']
1345 parentworking = working and ctx1 == self['.']
1346 match = match or matchmod.always(self.root, self.getcwd())
1346 match = match or matchmod.always(self.root, self.getcwd())
1347 listignored, listclean, listunknown = ignored, clean, unknown
1347 listignored, listclean, listunknown = ignored, clean, unknown
1348
1348
1349 # load earliest manifest first for caching reasons
1349 # load earliest manifest first for caching reasons
1350 if not working and ctx2.rev() < ctx1.rev():
1350 if not working and ctx2.rev() < ctx1.rev():
1351 ctx2.manifest()
1351 ctx2.manifest()
1352
1352
1353 if not parentworking:
1353 if not parentworking:
1354 def bad(f, msg):
1354 def bad(f, msg):
1355 # 'f' may be a directory pattern from 'match.files()',
1355 # 'f' may be a directory pattern from 'match.files()',
1356 # so 'f not in ctx1' is not enough
1356 # so 'f not in ctx1' is not enough
1357 if f not in ctx1 and f not in ctx1.dirs():
1357 if f not in ctx1 and f not in ctx1.dirs():
1358 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1358 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1359 match.bad = bad
1359 match.bad = bad
1360
1360
1361 if working: # we need to scan the working dir
1361 if working: # we need to scan the working dir
1362 subrepos = []
1362 subrepos = []
1363 if '.hgsub' in self.dirstate:
1363 if '.hgsub' in self.dirstate:
1364 subrepos = ctx2.substate.keys()
1364 subrepos = ctx2.substate.keys()
1365 s = self.dirstate.status(match, subrepos, listignored,
1365 s = self.dirstate.status(match, subrepos, listignored,
1366 listclean, listunknown)
1366 listclean, listunknown)
1367 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1367 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1368
1368
1369 # check for any possibly clean files
1369 # check for any possibly clean files
1370 if parentworking and cmp:
1370 if parentworking and cmp:
1371 fixup = []
1371 fixup = []
1372 # do a full compare of any files that might have changed
1372 # do a full compare of any files that might have changed
1373 for f in sorted(cmp):
1373 for f in sorted(cmp):
1374 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1374 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1375 or ctx1[f].cmp(ctx2[f])):
1375 or ctx1[f].cmp(ctx2[f])):
1376 modified.append(f)
1376 modified.append(f)
1377 else:
1377 else:
1378 fixup.append(f)
1378 fixup.append(f)
1379
1379
1380 # update dirstate for files that are actually clean
1380 # update dirstate for files that are actually clean
1381 if fixup:
1381 if fixup:
1382 if listclean:
1382 if listclean:
1383 clean += fixup
1383 clean += fixup
1384
1384
1385 try:
1385 try:
1386 # updating the dirstate is optional
1386 # updating the dirstate is optional
1387 # so we don't wait on the lock
1387 # so we don't wait on the lock
1388 wlock = self.wlock(False)
1388 wlock = self.wlock(False)
1389 try:
1389 try:
1390 for f in fixup:
1390 for f in fixup:
1391 self.dirstate.normal(f)
1391 self.dirstate.normal(f)
1392 finally:
1392 finally:
1393 wlock.release()
1393 wlock.release()
1394 except error.LockError:
1394 except error.LockError:
1395 pass
1395 pass
1396
1396
1397 if not parentworking:
1397 if not parentworking:
1398 mf1 = mfmatches(ctx1)
1398 mf1 = mfmatches(ctx1)
1399 if working:
1399 if working:
1400 # we are comparing working dir against non-parent
1400 # we are comparing working dir against non-parent
1401 # generate a pseudo-manifest for the working dir
1401 # generate a pseudo-manifest for the working dir
1402 mf2 = mfmatches(self['.'])
1402 mf2 = mfmatches(self['.'])
1403 for f in cmp + modified + added:
1403 for f in cmp + modified + added:
1404 mf2[f] = None
1404 mf2[f] = None
1405 mf2.set(f, ctx2.flags(f))
1405 mf2.set(f, ctx2.flags(f))
1406 for f in removed:
1406 for f in removed:
1407 if f in mf2:
1407 if f in mf2:
1408 del mf2[f]
1408 del mf2[f]
1409 else:
1409 else:
1410 # we are comparing two revisions
1410 # we are comparing two revisions
1411 deleted, unknown, ignored = [], [], []
1411 deleted, unknown, ignored = [], [], []
1412 mf2 = mfmatches(ctx2)
1412 mf2 = mfmatches(ctx2)
1413
1413
1414 modified, added, clean = [], [], []
1414 modified, added, clean = [], [], []
1415 withflags = mf1.withflags() | mf2.withflags()
1415 withflags = mf1.withflags() | mf2.withflags()
1416 for fn in mf2:
1416 for fn in mf2:
1417 if fn in mf1:
1417 if fn in mf1:
1418 if (fn not in deleted and
1418 if (fn not in deleted and
1419 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1419 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1420 (mf1[fn] != mf2[fn] and
1420 (mf1[fn] != mf2[fn] and
1421 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1421 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1422 modified.append(fn)
1422 modified.append(fn)
1423 elif listclean:
1423 elif listclean:
1424 clean.append(fn)
1424 clean.append(fn)
1425 del mf1[fn]
1425 del mf1[fn]
1426 elif fn not in deleted:
1426 elif fn not in deleted:
1427 added.append(fn)
1427 added.append(fn)
1428 removed = mf1.keys()
1428 removed = mf1.keys()
1429
1429
1430 if working and modified and not self.dirstate._checklink:
1430 if working and modified and not self.dirstate._checklink:
1431 # Symlink placeholders may get non-symlink-like contents
1431 # Symlink placeholders may get non-symlink-like contents
1432 # via user error or dereferencing by NFS or Samba servers,
1432 # via user error or dereferencing by NFS or Samba servers,
1433 # so we filter out any placeholders that don't look like a
1433 # so we filter out any placeholders that don't look like a
1434 # symlink
1434 # symlink
1435 sane = []
1435 sane = []
1436 for f in modified:
1436 for f in modified:
1437 if ctx2.flags(f) == 'l':
1437 if ctx2.flags(f) == 'l':
1438 d = ctx2[f].data()
1438 d = ctx2[f].data()
1439 if len(d) >= 1024 or '\n' in d or util.binary(d):
1439 if len(d) >= 1024 or '\n' in d or util.binary(d):
1440 self.ui.debug('ignoring suspect symlink placeholder'
1440 self.ui.debug('ignoring suspect symlink placeholder'
1441 ' "%s"\n' % f)
1441 ' "%s"\n' % f)
1442 continue
1442 continue
1443 sane.append(f)
1443 sane.append(f)
1444 modified = sane
1444 modified = sane
1445
1445
1446 r = modified, added, removed, deleted, unknown, ignored, clean
1446 r = modified, added, removed, deleted, unknown, ignored, clean
1447
1447
1448 if listsubrepos:
1448 if listsubrepos:
1449 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1449 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1450 if working:
1450 if working:
1451 rev2 = None
1451 rev2 = None
1452 else:
1452 else:
1453 rev2 = ctx2.substate[subpath][1]
1453 rev2 = ctx2.substate[subpath][1]
1454 try:
1454 try:
1455 submatch = matchmod.narrowmatcher(subpath, match)
1455 submatch = matchmod.narrowmatcher(subpath, match)
1456 s = sub.status(rev2, match=submatch, ignored=listignored,
1456 s = sub.status(rev2, match=submatch, ignored=listignored,
1457 clean=listclean, unknown=listunknown,
1457 clean=listclean, unknown=listunknown,
1458 listsubrepos=True)
1458 listsubrepos=True)
1459 for rfiles, sfiles in zip(r, s):
1459 for rfiles, sfiles in zip(r, s):
1460 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1460 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1461 except error.LookupError:
1461 except error.LookupError:
1462 self.ui.status(_("skipping missing subrepository: %s\n")
1462 self.ui.status(_("skipping missing subrepository: %s\n")
1463 % subpath)
1463 % subpath)
1464
1464
1465 for l in r:
1465 for l in r:
1466 l.sort()
1466 l.sort()
1467 return r
1467 return r
1468
1468
1469 def heads(self, start=None):
1469 def heads(self, start=None):
1470 heads = self.changelog.heads(start)
1470 heads = self.changelog.heads(start)
1471 # sort the output in rev descending order
1471 # sort the output in rev descending order
1472 return sorted(heads, key=self.changelog.rev, reverse=True)
1472 return sorted(heads, key=self.changelog.rev, reverse=True)
1473
1473
1474 def branchheads(self, branch=None, start=None, closed=False):
1474 def branchheads(self, branch=None, start=None, closed=False):
1475 '''return a (possibly filtered) list of heads for the given branch
1475 '''return a (possibly filtered) list of heads for the given branch
1476
1476
1477 Heads are returned in topological order, from newest to oldest.
1477 Heads are returned in topological order, from newest to oldest.
1478 If branch is None, use the dirstate branch.
1478 If branch is None, use the dirstate branch.
1479 If start is not None, return only heads reachable from start.
1479 If start is not None, return only heads reachable from start.
1480 If closed is True, return heads that are marked as closed as well.
1480 If closed is True, return heads that are marked as closed as well.
1481 '''
1481 '''
1482 if branch is None:
1482 if branch is None:
1483 branch = self[None].branch()
1483 branch = self[None].branch()
1484 branches = self.branchmap()
1484 branches = self.branchmap()
1485 if branch not in branches:
1485 if branch not in branches:
1486 return []
1486 return []
1487 # the cache returns heads ordered lowest to highest
1487 # the cache returns heads ordered lowest to highest
1488 bheads = list(reversed(branches[branch]))
1488 bheads = list(reversed(branches[branch]))
1489 if start is not None:
1489 if start is not None:
1490 # filter out the heads that cannot be reached from startrev
1490 # filter out the heads that cannot be reached from startrev
1491 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1491 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1492 bheads = [h for h in bheads if h in fbheads]
1492 bheads = [h for h in bheads if h in fbheads]
1493 if not closed:
1493 if not closed:
1494 bheads = [h for h in bheads if
1494 bheads = [h for h in bheads if
1495 ('close' not in self.changelog.read(h)[5])]
1495 ('close' not in self.changelog.read(h)[5])]
1496 return bheads
1496 return bheads
1497
1497
1498 def branches(self, nodes):
1498 def branches(self, nodes):
1499 if not nodes:
1499 if not nodes:
1500 nodes = [self.changelog.tip()]
1500 nodes = [self.changelog.tip()]
1501 b = []
1501 b = []
1502 for n in nodes:
1502 for n in nodes:
1503 t = n
1503 t = n
1504 while True:
1504 while True:
1505 p = self.changelog.parents(n)
1505 p = self.changelog.parents(n)
1506 if p[1] != nullid or p[0] == nullid:
1506 if p[1] != nullid or p[0] == nullid:
1507 b.append((t, n, p[0], p[1]))
1507 b.append((t, n, p[0], p[1]))
1508 break
1508 break
1509 n = p[0]
1509 n = p[0]
1510 return b
1510 return b
1511
1511
1512 def between(self, pairs):
1512 def between(self, pairs):
1513 r = []
1513 r = []
1514
1514
1515 for top, bottom in pairs:
1515 for top, bottom in pairs:
1516 n, l, i = top, [], 0
1516 n, l, i = top, [], 0
1517 f = 1
1517 f = 1
1518
1518
1519 while n != bottom and n != nullid:
1519 while n != bottom and n != nullid:
1520 p = self.changelog.parents(n)[0]
1520 p = self.changelog.parents(n)[0]
1521 if i == f:
1521 if i == f:
1522 l.append(n)
1522 l.append(n)
1523 f = f * 2
1523 f = f * 2
1524 n = p
1524 n = p
1525 i += 1
1525 i += 1
1526
1526
1527 r.append(l)
1527 r.append(l)
1528
1528
1529 return r
1529 return r
1530
1530
1531 def pull(self, remote, heads=None, force=False):
1531 def pull(self, remote, heads=None, force=False):
1532 lock = self.lock()
1532 lock = self.lock()
1533 try:
1533 try:
1534 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1534 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1535 force=force)
1535 force=force)
1536 common, fetch, rheads = tmp
1536 common, fetch, rheads = tmp
1537 if not fetch:
1537 if not fetch:
1538 self.ui.status(_("no changes found\n"))
1538 self.ui.status(_("no changes found\n"))
1539 added = []
1539 added = []
1540 result = 0
1540 result = 0
1541 else:
1541 else:
1542 if heads is None and list(common) == [nullid]:
1542 if heads is None and list(common) == [nullid]:
1543 self.ui.status(_("requesting all changes\n"))
1543 self.ui.status(_("requesting all changes\n"))
1544 elif heads is None and remote.capable('changegroupsubset'):
1544 elif heads is None and remote.capable('changegroupsubset'):
1545 # issue1320, avoid a race if remote changed after discovery
1545 # issue1320, avoid a race if remote changed after discovery
1546 heads = rheads
1546 heads = rheads
1547
1547
1548 if remote.capable('getbundle'):
1548 if remote.capable('getbundle'):
1549 cg = remote.getbundle('pull', common=common,
1549 cg = remote.getbundle('pull', common=common,
1550 heads=heads or rheads)
1550 heads=heads or rheads)
1551 elif heads is None:
1551 elif heads is None:
1552 cg = remote.changegroup(fetch, 'pull')
1552 cg = remote.changegroup(fetch, 'pull')
1553 elif not remote.capable('changegroupsubset'):
1553 elif not remote.capable('changegroupsubset'):
1554 raise util.Abort(_("partial pull cannot be done because "
1554 raise util.Abort(_("partial pull cannot be done because "
1555 "other repository doesn't support "
1555 "other repository doesn't support "
1556 "changegroupsubset."))
1556 "changegroupsubset."))
1557 else:
1557 else:
1558 cg = remote.changegroupsubset(fetch, heads, 'pull')
1558 cg = remote.changegroupsubset(fetch, heads, 'pull')
1559 clstart = len(self.changelog)
1559 clstart = len(self.changelog)
1560 result = self.addchangegroup(cg, 'pull', remote.url())
1560 result = self.addchangegroup(cg, 'pull', remote.url())
1561 clend = len(self.changelog)
1561 clend = len(self.changelog)
1562 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1562 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1563
1563
1564 # compute target subset
1564 # compute target subset
1565 if heads is None:
1565 if heads is None:
1566 # We pulled every thing possible
1566 # We pulled every thing possible
1567 # sync on everything common
1567 # sync on everything common
1568 subset = common + added
1568 subset = common + added
1569 else:
1569 else:
1570 # We pulled a specific subset
1570 # We pulled a specific subset
1571 # sync on this subset
1571 # sync on this subset
1572 subset = heads
1572 subset = heads
1573
1573
1574 # Get remote phases data from remote
1574 # Get remote phases data from remote
1575 remotephases = remote.listkeys('phases')
1575 remotephases = remote.listkeys('phases')
1576 publishing = bool(remotephases.get('publishing', False))
1576 publishing = bool(remotephases.get('publishing', False))
1577 if remotephases and not publishing:
1577 if remotephases and not publishing:
1578 # remote is new and unpublishing
1578 # remote is new and unpublishing
1579 pheads, _dr = phases.analyzeremotephases(self, subset,
1579 pheads, _dr = phases.analyzeremotephases(self, subset,
1580 remotephases)
1580 remotephases)
1581 phases.advanceboundary(self, phases.public, pheads)
1581 phases.advanceboundary(self, phases.public, pheads)
1582 phases.advanceboundary(self, phases.draft, subset)
1582 phases.advanceboundary(self, phases.draft, subset)
1583 else:
1583 else:
1584 # Remote is old or publishing all common changesets
1584 # Remote is old or publishing all common changesets
1585 # should be seen as public
1585 # should be seen as public
1586 phases.advanceboundary(self, phases.public, subset)
1586 phases.advanceboundary(self, phases.public, subset)
1587 finally:
1587 finally:
1588 lock.release()
1588 lock.release()
1589
1589
1590 return result
1590 return result
1591
1591
1592 def checkpush(self, force, revs):
1592 def checkpush(self, force, revs):
1593 """Extensions can override this function if additional checks have
1593 """Extensions can override this function if additional checks have
1594 to be performed before pushing, or call it if they override push
1594 to be performed before pushing, or call it if they override push
1595 command.
1595 command.
1596 """
1596 """
1597 pass
1597 pass
1598
1598
1599 def push(self, remote, force=False, revs=None, newbranch=False):
1599 def push(self, remote, force=False, revs=None, newbranch=False):
1600 '''Push outgoing changesets (limited by revs) from the current
1600 '''Push outgoing changesets (limited by revs) from the current
1601 repository to remote. Return an integer:
1601 repository to remote. Return an integer:
1602 - None means nothing to push
1602 - None means nothing to push
1603 - 0 means HTTP error
1603 - 0 means HTTP error
1604 - 1 means we pushed and remote head count is unchanged *or*
1604 - 1 means we pushed and remote head count is unchanged *or*
1605 we have outgoing changesets but refused to push
1605 we have outgoing changesets but refused to push
1606 - other values as described by addchangegroup()
1606 - other values as described by addchangegroup()
1607 '''
1607 '''
1608 # there are two ways to push to remote repo:
1608 # there are two ways to push to remote repo:
1609 #
1609 #
1610 # addchangegroup assumes local user can lock remote
1610 # addchangegroup assumes local user can lock remote
1611 # repo (local filesystem, old ssh servers).
1611 # repo (local filesystem, old ssh servers).
1612 #
1612 #
1613 # unbundle assumes local user cannot lock remote repo (new ssh
1613 # unbundle assumes local user cannot lock remote repo (new ssh
1614 # servers, http servers).
1614 # servers, http servers).
1615
1615
1616 # get local lock as we might write phase data
1616 # get local lock as we might write phase data
1617 locallock = self.lock()
1617 locallock = self.lock()
1618 try:
1618 try:
1619 self.checkpush(force, revs)
1619 self.checkpush(force, revs)
1620 lock = None
1620 lock = None
1621 unbundle = remote.capable('unbundle')
1621 unbundle = remote.capable('unbundle')
1622 if not unbundle:
1622 if not unbundle:
1623 lock = remote.lock()
1623 lock = remote.lock()
1624 try:
1624 try:
1625 # discovery
1625 # discovery
1626 fci = discovery.findcommonincoming
1626 fci = discovery.findcommonincoming
1627 commoninc = fci(self, remote, force=force)
1627 commoninc = fci(self, remote, force=force)
1628 common, inc, remoteheads = commoninc
1628 common, inc, remoteheads = commoninc
1629 fco = discovery.findcommonoutgoing
1629 fco = discovery.findcommonoutgoing
1630 outgoing = fco(self, remote, onlyheads=revs,
1630 outgoing = fco(self, remote, onlyheads=revs,
1631 commoninc=commoninc, force=force)
1631 commoninc=commoninc, force=force)
1632
1632
1633
1633
1634 if not outgoing.missing:
1634 if not outgoing.missing:
1635 # nothing to push
1635 # nothing to push
1636 scmutil.nochangesfound(self.ui, outgoing.excluded)
1636 scmutil.nochangesfound(self.ui, outgoing.excluded)
1637 ret = None
1637 ret = None
1638 else:
1638 else:
1639 # something to push
1639 # something to push
1640 if not force:
1640 if not force:
1641 discovery.checkheads(self, remote, outgoing,
1641 discovery.checkheads(self, remote, outgoing,
1642 remoteheads, newbranch,
1642 remoteheads, newbranch,
1643 bool(inc))
1643 bool(inc))
1644
1644
1645 # create a changegroup from local
1645 # create a changegroup from local
1646 if revs is None and not outgoing.excluded:
1646 if revs is None and not outgoing.excluded:
1647 # push everything,
1647 # push everything,
1648 # use the fast path, no race possible on push
1648 # use the fast path, no race possible on push
1649 cg = self._changegroup(outgoing.missing, 'push')
1649 cg = self._changegroup(outgoing.missing, 'push')
1650 else:
1650 else:
1651 cg = self.getlocalbundle('push', outgoing)
1651 cg = self.getlocalbundle('push', outgoing)
1652
1652
1653 # apply changegroup to remote
1653 # apply changegroup to remote
1654 if unbundle:
1654 if unbundle:
1655 # local repo finds heads on server, finds out what
1655 # local repo finds heads on server, finds out what
1656 # revs it must push. once revs transferred, if server
1656 # revs it must push. once revs transferred, if server
1657 # finds it has different heads (someone else won
1657 # finds it has different heads (someone else won
1658 # commit/push race), server aborts.
1658 # commit/push race), server aborts.
1659 if force:
1659 if force:
1660 remoteheads = ['force']
1660 remoteheads = ['force']
1661 # ssh: return remote's addchangegroup()
1661 # ssh: return remote's addchangegroup()
1662 # http: return remote's addchangegroup() or 0 for error
1662 # http: return remote's addchangegroup() or 0 for error
1663 ret = remote.unbundle(cg, remoteheads, 'push')
1663 ret = remote.unbundle(cg, remoteheads, 'push')
1664 else:
1664 else:
1665 # we return an integer indicating remote head count
1665 # we return an integer indicating remote head count
1666 # change
1666 # change
1667 ret = remote.addchangegroup(cg, 'push', self.url())
1667 ret = remote.addchangegroup(cg, 'push', self.url())
1668
1668
1669 if ret:
1669 if ret:
1670 # push succeed, synchonize target of the push
1670 # push succeed, synchonize target of the push
1671 cheads = outgoing.missingheads
1671 cheads = outgoing.missingheads
1672 elif revs is None:
1672 elif revs is None:
1673 # All out push fails. synchronize all common
1673 # All out push fails. synchronize all common
1674 cheads = outgoing.commonheads
1674 cheads = outgoing.commonheads
1675 else:
1675 else:
1676 # I want cheads = heads(::missingheads and ::commonheads)
1676 # I want cheads = heads(::missingheads and ::commonheads)
1677 # (missingheads is revs with secret changeset filtered out)
1677 # (missingheads is revs with secret changeset filtered out)
1678 #
1678 #
1679 # This can be expressed as:
1679 # This can be expressed as:
1680 # cheads = ( (missingheads and ::commonheads)
1680 # cheads = ( (missingheads and ::commonheads)
1681 # + (commonheads and ::missingheads))"
1681 # + (commonheads and ::missingheads))"
1682 # )
1682 # )
1683 #
1683 #
1684 # while trying to push we already computed the following:
1684 # while trying to push we already computed the following:
1685 # common = (::commonheads)
1685 # common = (::commonheads)
1686 # missing = ((commonheads::missingheads) - commonheads)
1686 # missing = ((commonheads::missingheads) - commonheads)
1687 #
1687 #
1688 # We can pick:
1688 # We can pick:
1689 # * missingheads part of comon (::commonheads)
1689 # * missingheads part of comon (::commonheads)
1690 common = set(outgoing.common)
1690 common = set(outgoing.common)
1691 cheads = [node for node in revs if node in common]
1691 cheads = [node for node in revs if node in common]
1692 # and
1692 # and
1693 # * commonheads parents on missing
1693 # * commonheads parents on missing
1694 revset = self.set('%ln and parents(roots(%ln))',
1694 revset = self.set('%ln and parents(roots(%ln))',
1695 outgoing.commonheads,
1695 outgoing.commonheads,
1696 outgoing.missing)
1696 outgoing.missing)
1697 cheads.extend(c.node() for c in revset)
1697 cheads.extend(c.node() for c in revset)
1698 # even when we don't push, exchanging phase data is useful
1698 # even when we don't push, exchanging phase data is useful
1699 remotephases = remote.listkeys('phases')
1699 remotephases = remote.listkeys('phases')
1700 if not remotephases: # old server or public only repo
1700 if not remotephases: # old server or public only repo
1701 phases.advanceboundary(self, phases.public, cheads)
1701 phases.advanceboundary(self, phases.public, cheads)
1702 # don't push any phase data as there is nothing to push
1702 # don't push any phase data as there is nothing to push
1703 else:
1703 else:
1704 ana = phases.analyzeremotephases(self, cheads, remotephases)
1704 ana = phases.analyzeremotephases(self, cheads, remotephases)
1705 pheads, droots = ana
1705 pheads, droots = ana
1706 ### Apply remote phase on local
1706 ### Apply remote phase on local
1707 if remotephases.get('publishing', False):
1707 if remotephases.get('publishing', False):
1708 phases.advanceboundary(self, phases.public, cheads)
1708 phases.advanceboundary(self, phases.public, cheads)
1709 else: # publish = False
1709 else: # publish = False
1710 phases.advanceboundary(self, phases.public, pheads)
1710 phases.advanceboundary(self, phases.public, pheads)
1711 phases.advanceboundary(self, phases.draft, cheads)
1711 phases.advanceboundary(self, phases.draft, cheads)
1712 ### Apply local phase on remote
1712 ### Apply local phase on remote
1713
1713
1714 # Get the list of all revs draft on remote by public here.
1714 # Get the list of all revs draft on remote by public here.
1715 # XXX Beware that revset break if droots is not strictly
1715 # XXX Beware that revset break if droots is not strictly
1716 # XXX root we may want to ensure it is but it is costly
1716 # XXX root we may want to ensure it is but it is costly
1717 outdated = self.set('heads((%ln::%ln) and public())',
1717 outdated = self.set('heads((%ln::%ln) and public())',
1718 droots, cheads)
1718 droots, cheads)
1719 for newremotehead in outdated:
1719 for newremotehead in outdated:
1720 r = remote.pushkey('phases',
1720 r = remote.pushkey('phases',
1721 newremotehead.hex(),
1721 newremotehead.hex(),
1722 str(phases.draft),
1722 str(phases.draft),
1723 str(phases.public))
1723 str(phases.public))
1724 if not r:
1724 if not r:
1725 self.ui.warn(_('updating %s to public failed!\n')
1725 self.ui.warn(_('updating %s to public failed!\n')
1726 % newremotehead)
1726 % newremotehead)
1727 finally:
1727 finally:
1728 if lock is not None:
1728 if lock is not None:
1729 lock.release()
1729 lock.release()
1730 finally:
1730 finally:
1731 locallock.release()
1731 locallock.release()
1732
1732
1733 self.ui.debug("checking for updated bookmarks\n")
1733 self.ui.debug("checking for updated bookmarks\n")
1734 rb = remote.listkeys('bookmarks')
1734 rb = remote.listkeys('bookmarks')
1735 for k in rb.keys():
1735 for k in rb.keys():
1736 if k in self._bookmarks:
1736 if k in self._bookmarks:
1737 nr, nl = rb[k], hex(self._bookmarks[k])
1737 nr, nl = rb[k], hex(self._bookmarks[k])
1738 if nr in self:
1738 if nr in self:
1739 cr = self[nr]
1739 cr = self[nr]
1740 cl = self[nl]
1740 cl = self[nl]
1741 if cl in cr.descendants():
1741 if cl in cr.descendants():
1742 r = remote.pushkey('bookmarks', k, nr, nl)
1742 r = remote.pushkey('bookmarks', k, nr, nl)
1743 if r:
1743 if r:
1744 self.ui.status(_("updating bookmark %s\n") % k)
1744 self.ui.status(_("updating bookmark %s\n") % k)
1745 else:
1745 else:
1746 self.ui.warn(_('updating bookmark %s'
1746 self.ui.warn(_('updating bookmark %s'
1747 ' failed!\n') % k)
1747 ' failed!\n') % k)
1748
1748
1749 return ret
1749 return ret
1750
1750
1751 def changegroupinfo(self, nodes, source):
1751 def changegroupinfo(self, nodes, source):
1752 if self.ui.verbose or source == 'bundle':
1752 if self.ui.verbose or source == 'bundle':
1753 self.ui.status(_("%d changesets found\n") % len(nodes))
1753 self.ui.status(_("%d changesets found\n") % len(nodes))
1754 if self.ui.debugflag:
1754 if self.ui.debugflag:
1755 self.ui.debug("list of changesets:\n")
1755 self.ui.debug("list of changesets:\n")
1756 for node in nodes:
1756 for node in nodes:
1757 self.ui.debug("%s\n" % hex(node))
1757 self.ui.debug("%s\n" % hex(node))
1758
1758
1759 def changegroupsubset(self, bases, heads, source):
1759 def changegroupsubset(self, bases, heads, source):
1760 """Compute a changegroup consisting of all the nodes that are
1760 """Compute a changegroup consisting of all the nodes that are
1761 descendants of any of the bases and ancestors of any of the heads.
1761 descendants of any of the bases and ancestors of any of the heads.
1762 Return a chunkbuffer object whose read() method will return
1762 Return a chunkbuffer object whose read() method will return
1763 successive changegroup chunks.
1763 successive changegroup chunks.
1764
1764
1765 It is fairly complex as determining which filenodes and which
1765 It is fairly complex as determining which filenodes and which
1766 manifest nodes need to be included for the changeset to be complete
1766 manifest nodes need to be included for the changeset to be complete
1767 is non-trivial.
1767 is non-trivial.
1768
1768
1769 Another wrinkle is doing the reverse, figuring out which changeset in
1769 Another wrinkle is doing the reverse, figuring out which changeset in
1770 the changegroup a particular filenode or manifestnode belongs to.
1770 the changegroup a particular filenode or manifestnode belongs to.
1771 """
1771 """
1772 cl = self.changelog
1772 cl = self.changelog
1773 if not bases:
1773 if not bases:
1774 bases = [nullid]
1774 bases = [nullid]
1775 csets, bases, heads = cl.nodesbetween(bases, heads)
1775 csets, bases, heads = cl.nodesbetween(bases, heads)
1776 # We assume that all ancestors of bases are known
1776 # We assume that all ancestors of bases are known
1777 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1777 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1778 return self._changegroupsubset(common, csets, heads, source)
1778 return self._changegroupsubset(common, csets, heads, source)
1779
1779
1780 def getlocalbundle(self, source, outgoing):
1780 def getlocalbundle(self, source, outgoing):
1781 """Like getbundle, but taking a discovery.outgoing as an argument.
1781 """Like getbundle, but taking a discovery.outgoing as an argument.
1782
1782
1783 This is only implemented for local repos and reuses potentially
1783 This is only implemented for local repos and reuses potentially
1784 precomputed sets in outgoing."""
1784 precomputed sets in outgoing."""
1785 if not outgoing.missing:
1785 if not outgoing.missing:
1786 return None
1786 return None
1787 return self._changegroupsubset(outgoing.common,
1787 return self._changegroupsubset(outgoing.common,
1788 outgoing.missing,
1788 outgoing.missing,
1789 outgoing.missingheads,
1789 outgoing.missingheads,
1790 source)
1790 source)
1791
1791
1792 def getbundle(self, source, heads=None, common=None):
1792 def getbundle(self, source, heads=None, common=None):
1793 """Like changegroupsubset, but returns the set difference between the
1793 """Like changegroupsubset, but returns the set difference between the
1794 ancestors of heads and the ancestors common.
1794 ancestors of heads and the ancestors common.
1795
1795
1796 If heads is None, use the local heads. If common is None, use [nullid].
1796 If heads is None, use the local heads. If common is None, use [nullid].
1797
1797
1798 The nodes in common might not all be known locally due to the way the
1798 The nodes in common might not all be known locally due to the way the
1799 current discovery protocol works.
1799 current discovery protocol works.
1800 """
1800 """
1801 cl = self.changelog
1801 cl = self.changelog
1802 if common:
1802 if common:
1803 nm = cl.nodemap
1803 nm = cl.nodemap
1804 common = [n for n in common if n in nm]
1804 common = [n for n in common if n in nm]
1805 else:
1805 else:
1806 common = [nullid]
1806 common = [nullid]
1807 if not heads:
1807 if not heads:
1808 heads = cl.heads()
1808 heads = cl.heads()
1809 return self.getlocalbundle(source,
1809 return self.getlocalbundle(source,
1810 discovery.outgoing(cl, common, heads))
1810 discovery.outgoing(cl, common, heads))
1811
1811
1812 def _changegroupsubset(self, commonrevs, csets, heads, source):
1812 def _changegroupsubset(self, commonrevs, csets, heads, source):
1813
1813
1814 cl = self.changelog
1814 cl = self.changelog
1815 mf = self.manifest
1815 mf = self.manifest
1816 mfs = {} # needed manifests
1816 mfs = {} # needed manifests
1817 fnodes = {} # needed file nodes
1817 fnodes = {} # needed file nodes
1818 changedfiles = set()
1818 changedfiles = set()
1819 fstate = ['', {}]
1819 fstate = ['', {}]
1820 count = [0, 0]
1820 count = [0, 0]
1821
1821
1822 # can we go through the fast path ?
1822 # can we go through the fast path ?
1823 heads.sort()
1823 heads.sort()
1824 if heads == sorted(self.heads()):
1824 if heads == sorted(self.heads()):
1825 return self._changegroup(csets, source)
1825 return self._changegroup(csets, source)
1826
1826
1827 # slow path
1827 # slow path
1828 self.hook('preoutgoing', throw=True, source=source)
1828 self.hook('preoutgoing', throw=True, source=source)
1829 self.changegroupinfo(csets, source)
1829 self.changegroupinfo(csets, source)
1830
1830
1831 # filter any nodes that claim to be part of the known set
1831 # filter any nodes that claim to be part of the known set
1832 def prune(revlog, missing):
1832 def prune(revlog, missing):
1833 rr, rl = revlog.rev, revlog.linkrev
1833 rr, rl = revlog.rev, revlog.linkrev
1834 return [n for n in missing
1834 return [n for n in missing
1835 if rl(rr(n)) not in commonrevs]
1835 if rl(rr(n)) not in commonrevs]
1836
1836
1837 progress = self.ui.progress
1837 progress = self.ui.progress
1838 _bundling = _('bundling')
1838 _bundling = _('bundling')
1839 _changesets = _('changesets')
1839 _changesets = _('changesets')
1840 _manifests = _('manifests')
1840 _manifests = _('manifests')
1841 _files = _('files')
1841 _files = _('files')
1842
1842
1843 def lookup(revlog, x):
1843 def lookup(revlog, x):
1844 if revlog == cl:
1844 if revlog == cl:
1845 c = cl.read(x)
1845 c = cl.read(x)
1846 changedfiles.update(c[3])
1846 changedfiles.update(c[3])
1847 mfs.setdefault(c[0], x)
1847 mfs.setdefault(c[0], x)
1848 count[0] += 1
1848 count[0] += 1
1849 progress(_bundling, count[0],
1849 progress(_bundling, count[0],
1850 unit=_changesets, total=count[1])
1850 unit=_changesets, total=count[1])
1851 return x
1851 return x
1852 elif revlog == mf:
1852 elif revlog == mf:
1853 clnode = mfs[x]
1853 clnode = mfs[x]
1854 mdata = mf.readfast(x)
1854 mdata = mf.readfast(x)
1855 for f, n in mdata.iteritems():
1855 for f, n in mdata.iteritems():
1856 if f in changedfiles:
1856 if f in changedfiles:
1857 fnodes[f].setdefault(n, clnode)
1857 fnodes[f].setdefault(n, clnode)
1858 count[0] += 1
1858 count[0] += 1
1859 progress(_bundling, count[0],
1859 progress(_bundling, count[0],
1860 unit=_manifests, total=count[1])
1860 unit=_manifests, total=count[1])
1861 return clnode
1861 return clnode
1862 else:
1862 else:
1863 progress(_bundling, count[0], item=fstate[0],
1863 progress(_bundling, count[0], item=fstate[0],
1864 unit=_files, total=count[1])
1864 unit=_files, total=count[1])
1865 return fstate[1][x]
1865 return fstate[1][x]
1866
1866
1867 bundler = changegroup.bundle10(lookup)
1867 bundler = changegroup.bundle10(lookup)
1868 reorder = self.ui.config('bundle', 'reorder', 'auto')
1868 reorder = self.ui.config('bundle', 'reorder', 'auto')
1869 if reorder == 'auto':
1869 if reorder == 'auto':
1870 reorder = None
1870 reorder = None
1871 else:
1871 else:
1872 reorder = util.parsebool(reorder)
1872 reorder = util.parsebool(reorder)
1873
1873
1874 def gengroup():
1874 def gengroup():
1875 # Create a changenode group generator that will call our functions
1875 # Create a changenode group generator that will call our functions
1876 # back to lookup the owning changenode and collect information.
1876 # back to lookup the owning changenode and collect information.
1877 count[:] = [0, len(csets)]
1877 count[:] = [0, len(csets)]
1878 for chunk in cl.group(csets, bundler, reorder=reorder):
1878 for chunk in cl.group(csets, bundler, reorder=reorder):
1879 yield chunk
1879 yield chunk
1880 progress(_bundling, None)
1880 progress(_bundling, None)
1881
1881
1882 # Create a generator for the manifestnodes that calls our lookup
1882 # Create a generator for the manifestnodes that calls our lookup
1883 # and data collection functions back.
1883 # and data collection functions back.
1884 for f in changedfiles:
1884 for f in changedfiles:
1885 fnodes[f] = {}
1885 fnodes[f] = {}
1886 count[:] = [0, len(mfs)]
1886 count[:] = [0, len(mfs)]
1887 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1887 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1888 yield chunk
1888 yield chunk
1889 progress(_bundling, None)
1889 progress(_bundling, None)
1890
1890
1891 mfs.clear()
1891 mfs.clear()
1892
1892
1893 # Go through all our files in order sorted by name.
1893 # Go through all our files in order sorted by name.
1894 count[:] = [0, len(changedfiles)]
1894 count[:] = [0, len(changedfiles)]
1895 for fname in sorted(changedfiles):
1895 for fname in sorted(changedfiles):
1896 filerevlog = self.file(fname)
1896 filerevlog = self.file(fname)
1897 if not len(filerevlog):
1897 if not len(filerevlog):
1898 raise util.Abort(_("empty or missing revlog for %s")
1898 raise util.Abort(_("empty or missing revlog for %s")
1899 % fname)
1899 % fname)
1900 fstate[0] = fname
1900 fstate[0] = fname
1901 fstate[1] = fnodes.pop(fname, {})
1901 fstate[1] = fnodes.pop(fname, {})
1902
1902
1903 nodelist = prune(filerevlog, fstate[1])
1903 nodelist = prune(filerevlog, fstate[1])
1904 if nodelist:
1904 if nodelist:
1905 count[0] += 1
1905 count[0] += 1
1906 yield bundler.fileheader(fname)
1906 yield bundler.fileheader(fname)
1907 for chunk in filerevlog.group(nodelist, bundler, reorder):
1907 for chunk in filerevlog.group(nodelist, bundler, reorder):
1908 yield chunk
1908 yield chunk
1909
1909
1910 # Signal that no more groups are left.
1910 # Signal that no more groups are left.
1911 yield bundler.close()
1911 yield bundler.close()
1912 progress(_bundling, None)
1912 progress(_bundling, None)
1913
1913
1914 if csets:
1914 if csets:
1915 self.hook('outgoing', node=hex(csets[0]), source=source)
1915 self.hook('outgoing', node=hex(csets[0]), source=source)
1916
1916
1917 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1917 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1918
1918
1919 def changegroup(self, basenodes, source):
1919 def changegroup(self, basenodes, source):
1920 # to avoid a race we use changegroupsubset() (issue1320)
1920 # to avoid a race we use changegroupsubset() (issue1320)
1921 return self.changegroupsubset(basenodes, self.heads(), source)
1921 return self.changegroupsubset(basenodes, self.heads(), source)
1922
1922
1923 def _changegroup(self, nodes, source):
1923 def _changegroup(self, nodes, source):
1924 """Compute the changegroup of all nodes that we have that a recipient
1924 """Compute the changegroup of all nodes that we have that a recipient
1925 doesn't. Return a chunkbuffer object whose read() method will return
1925 doesn't. Return a chunkbuffer object whose read() method will return
1926 successive changegroup chunks.
1926 successive changegroup chunks.
1927
1927
1928 This is much easier than the previous function as we can assume that
1928 This is much easier than the previous function as we can assume that
1929 the recipient has any changenode we aren't sending them.
1929 the recipient has any changenode we aren't sending them.
1930
1930
1931 nodes is the set of nodes to send"""
1931 nodes is the set of nodes to send"""
1932
1932
1933 cl = self.changelog
1933 cl = self.changelog
1934 mf = self.manifest
1934 mf = self.manifest
1935 mfs = {}
1935 mfs = {}
1936 changedfiles = set()
1936 changedfiles = set()
1937 fstate = ['']
1937 fstate = ['']
1938 count = [0, 0]
1938 count = [0, 0]
1939
1939
1940 self.hook('preoutgoing', throw=True, source=source)
1940 self.hook('preoutgoing', throw=True, source=source)
1941 self.changegroupinfo(nodes, source)
1941 self.changegroupinfo(nodes, source)
1942
1942
1943 revset = set([cl.rev(n) for n in nodes])
1943 revset = set([cl.rev(n) for n in nodes])
1944
1944
1945 def gennodelst(log):
1945 def gennodelst(log):
1946 ln, llr = log.node, log.linkrev
1946 ln, llr = log.node, log.linkrev
1947 return [ln(r) for r in log if llr(r) in revset]
1947 return [ln(r) for r in log if llr(r) in revset]
1948
1948
1949 progress = self.ui.progress
1949 progress = self.ui.progress
1950 _bundling = _('bundling')
1950 _bundling = _('bundling')
1951 _changesets = _('changesets')
1951 _changesets = _('changesets')
1952 _manifests = _('manifests')
1952 _manifests = _('manifests')
1953 _files = _('files')
1953 _files = _('files')
1954
1954
1955 def lookup(revlog, x):
1955 def lookup(revlog, x):
1956 if revlog == cl:
1956 if revlog == cl:
1957 c = cl.read(x)
1957 c = cl.read(x)
1958 changedfiles.update(c[3])
1958 changedfiles.update(c[3])
1959 mfs.setdefault(c[0], x)
1959 mfs.setdefault(c[0], x)
1960 count[0] += 1
1960 count[0] += 1
1961 progress(_bundling, count[0],
1961 progress(_bundling, count[0],
1962 unit=_changesets, total=count[1])
1962 unit=_changesets, total=count[1])
1963 return x
1963 return x
1964 elif revlog == mf:
1964 elif revlog == mf:
1965 count[0] += 1
1965 count[0] += 1
1966 progress(_bundling, count[0],
1966 progress(_bundling, count[0],
1967 unit=_manifests, total=count[1])
1967 unit=_manifests, total=count[1])
1968 return cl.node(revlog.linkrev(revlog.rev(x)))
1968 return cl.node(revlog.linkrev(revlog.rev(x)))
1969 else:
1969 else:
1970 progress(_bundling, count[0], item=fstate[0],
1970 progress(_bundling, count[0], item=fstate[0],
1971 total=count[1], unit=_files)
1971 total=count[1], unit=_files)
1972 return cl.node(revlog.linkrev(revlog.rev(x)))
1972 return cl.node(revlog.linkrev(revlog.rev(x)))
1973
1973
1974 bundler = changegroup.bundle10(lookup)
1974 bundler = changegroup.bundle10(lookup)
1975 reorder = self.ui.config('bundle', 'reorder', 'auto')
1975 reorder = self.ui.config('bundle', 'reorder', 'auto')
1976 if reorder == 'auto':
1976 if reorder == 'auto':
1977 reorder = None
1977 reorder = None
1978 else:
1978 else:
1979 reorder = util.parsebool(reorder)
1979 reorder = util.parsebool(reorder)
1980
1980
1981 def gengroup():
1981 def gengroup():
1982 '''yield a sequence of changegroup chunks (strings)'''
1982 '''yield a sequence of changegroup chunks (strings)'''
1983 # construct a list of all changed files
1983 # construct a list of all changed files
1984
1984
1985 count[:] = [0, len(nodes)]
1985 count[:] = [0, len(nodes)]
1986 for chunk in cl.group(nodes, bundler, reorder=reorder):
1986 for chunk in cl.group(nodes, bundler, reorder=reorder):
1987 yield chunk
1987 yield chunk
1988 progress(_bundling, None)
1988 progress(_bundling, None)
1989
1989
1990 count[:] = [0, len(mfs)]
1990 count[:] = [0, len(mfs)]
1991 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1991 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1992 yield chunk
1992 yield chunk
1993 progress(_bundling, None)
1993 progress(_bundling, None)
1994
1994
1995 count[:] = [0, len(changedfiles)]
1995 count[:] = [0, len(changedfiles)]
1996 for fname in sorted(changedfiles):
1996 for fname in sorted(changedfiles):
1997 filerevlog = self.file(fname)
1997 filerevlog = self.file(fname)
1998 if not len(filerevlog):
1998 if not len(filerevlog):
1999 raise util.Abort(_("empty or missing revlog for %s")
1999 raise util.Abort(_("empty or missing revlog for %s")
2000 % fname)
2000 % fname)
2001 fstate[0] = fname
2001 fstate[0] = fname
2002 nodelist = gennodelst(filerevlog)
2002 nodelist = gennodelst(filerevlog)
2003 if nodelist:
2003 if nodelist:
2004 count[0] += 1
2004 count[0] += 1
2005 yield bundler.fileheader(fname)
2005 yield bundler.fileheader(fname)
2006 for chunk in filerevlog.group(nodelist, bundler, reorder):
2006 for chunk in filerevlog.group(nodelist, bundler, reorder):
2007 yield chunk
2007 yield chunk
2008 yield bundler.close()
2008 yield bundler.close()
2009 progress(_bundling, None)
2009 progress(_bundling, None)
2010
2010
2011 if nodes:
2011 if nodes:
2012 self.hook('outgoing', node=hex(nodes[0]), source=source)
2012 self.hook('outgoing', node=hex(nodes[0]), source=source)
2013
2013
2014 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2014 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2015
2015
2016 def addchangegroup(self, source, srctype, url, emptyok=False):
2016 def addchangegroup(self, source, srctype, url, emptyok=False):
2017 """Add the changegroup returned by source.read() to this repo.
2017 """Add the changegroup returned by source.read() to this repo.
2018 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2018 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2019 the URL of the repo where this changegroup is coming from.
2019 the URL of the repo where this changegroup is coming from.
2020
2020
2021 Return an integer summarizing the change to this repo:
2021 Return an integer summarizing the change to this repo:
2022 - nothing changed or no source: 0
2022 - nothing changed or no source: 0
2023 - more heads than before: 1+added heads (2..n)
2023 - more heads than before: 1+added heads (2..n)
2024 - fewer heads than before: -1-removed heads (-2..-n)
2024 - fewer heads than before: -1-removed heads (-2..-n)
2025 - number of heads stays the same: 1
2025 - number of heads stays the same: 1
2026 """
2026 """
2027 def csmap(x):
2027 def csmap(x):
2028 self.ui.debug("add changeset %s\n" % short(x))
2028 self.ui.debug("add changeset %s\n" % short(x))
2029 return len(cl)
2029 return len(cl)
2030
2030
2031 def revmap(x):
2031 def revmap(x):
2032 return cl.rev(x)
2032 return cl.rev(x)
2033
2033
2034 if not source:
2034 if not source:
2035 return 0
2035 return 0
2036
2036
2037 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2037 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2038
2038
2039 changesets = files = revisions = 0
2039 changesets = files = revisions = 0
2040 efiles = set()
2040 efiles = set()
2041
2041
2042 # write changelog data to temp files so concurrent readers will not see
2042 # write changelog data to temp files so concurrent readers will not see
2043 # inconsistent view
2043 # inconsistent view
2044 cl = self.changelog
2044 cl = self.changelog
2045 cl.delayupdate()
2045 cl.delayupdate()
2046 oldheads = cl.heads()
2046 oldheads = cl.heads()
2047
2047
2048 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2048 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2049 try:
2049 try:
2050 trp = weakref.proxy(tr)
2050 trp = weakref.proxy(tr)
2051 # pull off the changeset group
2051 # pull off the changeset group
2052 self.ui.status(_("adding changesets\n"))
2052 self.ui.status(_("adding changesets\n"))
2053 clstart = len(cl)
2053 clstart = len(cl)
2054 class prog(object):
2054 class prog(object):
2055 step = _('changesets')
2055 step = _('changesets')
2056 count = 1
2056 count = 1
2057 ui = self.ui
2057 ui = self.ui
2058 total = None
2058 total = None
2059 def __call__(self):
2059 def __call__(self):
2060 self.ui.progress(self.step, self.count, unit=_('chunks'),
2060 self.ui.progress(self.step, self.count, unit=_('chunks'),
2061 total=self.total)
2061 total=self.total)
2062 self.count += 1
2062 self.count += 1
2063 pr = prog()
2063 pr = prog()
2064 source.callback = pr
2064 source.callback = pr
2065
2065
2066 source.changelogheader()
2066 source.changelogheader()
2067 srccontent = cl.addgroup(source, csmap, trp)
2067 srccontent = cl.addgroup(source, csmap, trp)
2068 if not (srccontent or emptyok):
2068 if not (srccontent or emptyok):
2069 raise util.Abort(_("received changelog group is empty"))
2069 raise util.Abort(_("received changelog group is empty"))
2070 clend = len(cl)
2070 clend = len(cl)
2071 changesets = clend - clstart
2071 changesets = clend - clstart
2072 for c in xrange(clstart, clend):
2072 for c in xrange(clstart, clend):
2073 efiles.update(self[c].files())
2073 efiles.update(self[c].files())
2074 efiles = len(efiles)
2074 efiles = len(efiles)
2075 self.ui.progress(_('changesets'), None)
2075 self.ui.progress(_('changesets'), None)
2076
2076
2077 # pull off the manifest group
2077 # pull off the manifest group
2078 self.ui.status(_("adding manifests\n"))
2078 self.ui.status(_("adding manifests\n"))
2079 pr.step = _('manifests')
2079 pr.step = _('manifests')
2080 pr.count = 1
2080 pr.count = 1
2081 pr.total = changesets # manifests <= changesets
2081 pr.total = changesets # manifests <= changesets
2082 # no need to check for empty manifest group here:
2082 # no need to check for empty manifest group here:
2083 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2083 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2084 # no new manifest will be created and the manifest group will
2084 # no new manifest will be created and the manifest group will
2085 # be empty during the pull
2085 # be empty during the pull
2086 source.manifestheader()
2086 source.manifestheader()
2087 self.manifest.addgroup(source, revmap, trp)
2087 self.manifest.addgroup(source, revmap, trp)
2088 self.ui.progress(_('manifests'), None)
2088 self.ui.progress(_('manifests'), None)
2089
2089
2090 needfiles = {}
2090 needfiles = {}
2091 if self.ui.configbool('server', 'validate', default=False):
2091 if self.ui.configbool('server', 'validate', default=False):
2092 # validate incoming csets have their manifests
2092 # validate incoming csets have their manifests
2093 for cset in xrange(clstart, clend):
2093 for cset in xrange(clstart, clend):
2094 mfest = self.changelog.read(self.changelog.node(cset))[0]
2094 mfest = self.changelog.read(self.changelog.node(cset))[0]
2095 mfest = self.manifest.readdelta(mfest)
2095 mfest = self.manifest.readdelta(mfest)
2096 # store file nodes we must see
2096 # store file nodes we must see
2097 for f, n in mfest.iteritems():
2097 for f, n in mfest.iteritems():
2098 needfiles.setdefault(f, set()).add(n)
2098 needfiles.setdefault(f, set()).add(n)
2099
2099
2100 # process the files
2100 # process the files
2101 self.ui.status(_("adding file changes\n"))
2101 self.ui.status(_("adding file changes\n"))
2102 pr.step = _('files')
2102 pr.step = _('files')
2103 pr.count = 1
2103 pr.count = 1
2104 pr.total = efiles
2104 pr.total = efiles
2105 source.callback = None
2105 source.callback = None
2106
2106
2107 while True:
2107 while True:
2108 chunkdata = source.filelogheader()
2108 chunkdata = source.filelogheader()
2109 if not chunkdata:
2109 if not chunkdata:
2110 break
2110 break
2111 f = chunkdata["filename"]
2111 f = chunkdata["filename"]
2112 self.ui.debug("adding %s revisions\n" % f)
2112 self.ui.debug("adding %s revisions\n" % f)
2113 pr()
2113 pr()
2114 fl = self.file(f)
2114 fl = self.file(f)
2115 o = len(fl)
2115 o = len(fl)
2116 if not fl.addgroup(source, revmap, trp):
2116 if not fl.addgroup(source, revmap, trp):
2117 raise util.Abort(_("received file revlog group is empty"))
2117 raise util.Abort(_("received file revlog group is empty"))
2118 revisions += len(fl) - o
2118 revisions += len(fl) - o
2119 files += 1
2119 files += 1
2120 if f in needfiles:
2120 if f in needfiles:
2121 needs = needfiles[f]
2121 needs = needfiles[f]
2122 for new in xrange(o, len(fl)):
2122 for new in xrange(o, len(fl)):
2123 n = fl.node(new)
2123 n = fl.node(new)
2124 if n in needs:
2124 if n in needs:
2125 needs.remove(n)
2125 needs.remove(n)
2126 if not needs:
2126 if not needs:
2127 del needfiles[f]
2127 del needfiles[f]
2128 self.ui.progress(_('files'), None)
2128 self.ui.progress(_('files'), None)
2129
2129
2130 for f, needs in needfiles.iteritems():
2130 for f, needs in needfiles.iteritems():
2131 fl = self.file(f)
2131 fl = self.file(f)
2132 for n in needs:
2132 for n in needs:
2133 try:
2133 try:
2134 fl.rev(n)
2134 fl.rev(n)
2135 except error.LookupError:
2135 except error.LookupError:
2136 raise util.Abort(
2136 raise util.Abort(
2137 _('missing file data for %s:%s - run hg verify') %
2137 _('missing file data for %s:%s - run hg verify') %
2138 (f, hex(n)))
2138 (f, hex(n)))
2139
2139
2140 dh = 0
2140 dh = 0
2141 if oldheads:
2141 if oldheads:
2142 heads = cl.heads()
2142 heads = cl.heads()
2143 dh = len(heads) - len(oldheads)
2143 dh = len(heads) - len(oldheads)
2144 for h in heads:
2144 for h in heads:
2145 if h not in oldheads and 'close' in self[h].extra():
2145 if h not in oldheads and 'close' in self[h].extra():
2146 dh -= 1
2146 dh -= 1
2147 htext = ""
2147 htext = ""
2148 if dh:
2148 if dh:
2149 htext = _(" (%+d heads)") % dh
2149 htext = _(" (%+d heads)") % dh
2150
2150
2151 self.ui.status(_("added %d changesets"
2151 self.ui.status(_("added %d changesets"
2152 " with %d changes to %d files%s\n")
2152 " with %d changes to %d files%s\n")
2153 % (changesets, revisions, files, htext))
2153 % (changesets, revisions, files, htext))
2154
2154
2155 if changesets > 0:
2155 if changesets > 0:
2156 p = lambda: cl.writepending() and self.root or ""
2156 p = lambda: cl.writepending() and self.root or ""
2157 self.hook('pretxnchangegroup', throw=True,
2157 self.hook('pretxnchangegroup', throw=True,
2158 node=hex(cl.node(clstart)), source=srctype,
2158 node=hex(cl.node(clstart)), source=srctype,
2159 url=url, pending=p)
2159 url=url, pending=p)
2160
2160
2161 added = [cl.node(r) for r in xrange(clstart, clend)]
2161 added = [cl.node(r) for r in xrange(clstart, clend)]
2162 publishing = self.ui.configbool('phases', 'publish', True)
2162 publishing = self.ui.configbool('phases', 'publish', True)
2163 if srctype == 'push':
2163 if srctype == 'push':
2164 # Old server can not push the boundary themself.
2164 # Old server can not push the boundary themself.
2165 # New server won't push the boundary if changeset already
2165 # New server won't push the boundary if changeset already
2166 # existed locally as secrete
2166 # existed locally as secrete
2167 #
2167 #
2168 # We should not use added here but the list of all change in
2168 # We should not use added here but the list of all change in
2169 # the bundle
2169 # the bundle
2170 if publishing:
2170 if publishing:
2171 phases.advanceboundary(self, phases.public, srccontent)
2171 phases.advanceboundary(self, phases.public, srccontent)
2172 else:
2172 else:
2173 phases.advanceboundary(self, phases.draft, srccontent)
2173 phases.advanceboundary(self, phases.draft, srccontent)
2174 phases.retractboundary(self, phases.draft, added)
2174 phases.retractboundary(self, phases.draft, added)
2175 elif srctype != 'strip':
2175 elif srctype != 'strip':
2176 # publishing only alter behavior during push
2176 # publishing only alter behavior during push
2177 #
2177 #
2178 # strip should not touch boundary at all
2178 # strip should not touch boundary at all
2179 phases.retractboundary(self, phases.draft, added)
2179 phases.retractboundary(self, phases.draft, added)
2180
2180
2181 # make changelog see real files again
2181 # make changelog see real files again
2182 cl.finalize(trp)
2182 cl.finalize(trp)
2183
2183
2184 tr.close()
2184 tr.close()
2185
2185
2186 if changesets > 0:
2186 if changesets > 0:
2187 def runhooks():
2187 def runhooks():
2188 # forcefully update the on-disk branch cache
2188 # forcefully update the on-disk branch cache
2189 self.ui.debug("updating the branch cache\n")
2189 self.ui.debug("updating the branch cache\n")
2190 self.updatebranchcache()
2190 self.updatebranchcache()
2191 self.hook("changegroup", node=hex(cl.node(clstart)),
2191 self.hook("changegroup", node=hex(cl.node(clstart)),
2192 source=srctype, url=url)
2192 source=srctype, url=url)
2193
2193
2194 for n in added:
2194 for n in added:
2195 self.hook("incoming", node=hex(n), source=srctype,
2195 self.hook("incoming", node=hex(n), source=srctype,
2196 url=url)
2196 url=url)
2197 self._afterlock(runhooks)
2197 self._afterlock(runhooks)
2198
2198
2199 finally:
2199 finally:
2200 tr.release()
2200 tr.release()
2201 # never return 0 here:
2201 # never return 0 here:
2202 if dh < 0:
2202 if dh < 0:
2203 return dh - 1
2203 return dh - 1
2204 else:
2204 else:
2205 return dh + 1
2205 return dh + 1
2206
2206
2207 def stream_in(self, remote, requirements):
2207 def stream_in(self, remote, requirements):
2208 lock = self.lock()
2208 lock = self.lock()
2209 try:
2209 try:
2210 fp = remote.stream_out()
2210 fp = remote.stream_out()
2211 l = fp.readline()
2211 l = fp.readline()
2212 try:
2212 try:
2213 resp = int(l)
2213 resp = int(l)
2214 except ValueError:
2214 except ValueError:
2215 raise error.ResponseError(
2215 raise error.ResponseError(
2216 _('Unexpected response from remote server:'), l)
2216 _('Unexpected response from remote server:'), l)
2217 if resp == 1:
2217 if resp == 1:
2218 raise util.Abort(_('operation forbidden by server'))
2218 raise util.Abort(_('operation forbidden by server'))
2219 elif resp == 2:
2219 elif resp == 2:
2220 raise util.Abort(_('locking the remote repository failed'))
2220 raise util.Abort(_('locking the remote repository failed'))
2221 elif resp != 0:
2221 elif resp != 0:
2222 raise util.Abort(_('the server sent an unknown error code'))
2222 raise util.Abort(_('the server sent an unknown error code'))
2223 self.ui.status(_('streaming all changes\n'))
2223 self.ui.status(_('streaming all changes\n'))
2224 l = fp.readline()
2224 l = fp.readline()
2225 try:
2225 try:
2226 total_files, total_bytes = map(int, l.split(' ', 1))
2226 total_files, total_bytes = map(int, l.split(' ', 1))
2227 except (ValueError, TypeError):
2227 except (ValueError, TypeError):
2228 raise error.ResponseError(
2228 raise error.ResponseError(
2229 _('Unexpected response from remote server:'), l)
2229 _('Unexpected response from remote server:'), l)
2230 self.ui.status(_('%d files to transfer, %s of data\n') %
2230 self.ui.status(_('%d files to transfer, %s of data\n') %
2231 (total_files, util.bytecount(total_bytes)))
2231 (total_files, util.bytecount(total_bytes)))
2232 start = time.time()
2232 start = time.time()
2233 for i in xrange(total_files):
2233 for i in xrange(total_files):
2234 # XXX doesn't support '\n' or '\r' in filenames
2234 # XXX doesn't support '\n' or '\r' in filenames
2235 l = fp.readline()
2235 l = fp.readline()
2236 try:
2236 try:
2237 name, size = l.split('\0', 1)
2237 name, size = l.split('\0', 1)
2238 size = int(size)
2238 size = int(size)
2239 except (ValueError, TypeError):
2239 except (ValueError, TypeError):
2240 raise error.ResponseError(
2240 raise error.ResponseError(
2241 _('Unexpected response from remote server:'), l)
2241 _('Unexpected response from remote server:'), l)
2242 if self.ui.debugflag:
2242 if self.ui.debugflag:
2243 self.ui.debug('adding %s (%s)\n' %
2243 self.ui.debug('adding %s (%s)\n' %
2244 (name, util.bytecount(size)))
2244 (name, util.bytecount(size)))
2245 # for backwards compat, name was partially encoded
2245 # for backwards compat, name was partially encoded
2246 ofp = self.sopener(store.decodedir(name), 'w')
2246 ofp = self.sopener(store.decodedir(name), 'w')
2247 for chunk in util.filechunkiter(fp, limit=size):
2247 for chunk in util.filechunkiter(fp, limit=size):
2248 ofp.write(chunk)
2248 ofp.write(chunk)
2249 ofp.close()
2249 ofp.close()
2250 elapsed = time.time() - start
2250 elapsed = time.time() - start
2251 if elapsed <= 0:
2251 if elapsed <= 0:
2252 elapsed = 0.001
2252 elapsed = 0.001
2253 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2253 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2254 (util.bytecount(total_bytes), elapsed,
2254 (util.bytecount(total_bytes), elapsed,
2255 util.bytecount(total_bytes / elapsed)))
2255 util.bytecount(total_bytes / elapsed)))
2256
2256
2257 # new requirements = old non-format requirements +
2257 # new requirements = old non-format requirements +
2258 # new format-related
2258 # new format-related
2259 # requirements from the streamed-in repository
2259 # requirements from the streamed-in repository
2260 requirements.update(set(self.requirements) - self.supportedformats)
2260 requirements.update(set(self.requirements) - self.supportedformats)
2261 self._applyrequirements(requirements)
2261 self._applyrequirements(requirements)
2262 self._writerequirements()
2262 self._writerequirements()
2263
2263
2264 self.invalidate()
2264 self.invalidate()
2265 return len(self.heads()) + 1
2265 return len(self.heads()) + 1
2266 finally:
2266 finally:
2267 lock.release()
2267 lock.release()
2268
2268
2269 def clone(self, remote, heads=[], stream=False):
2269 def clone(self, remote, heads=[], stream=False):
2270 '''clone remote repository.
2270 '''clone remote repository.
2271
2271
2272 keyword arguments:
2272 keyword arguments:
2273 heads: list of revs to clone (forces use of pull)
2273 heads: list of revs to clone (forces use of pull)
2274 stream: use streaming clone if possible'''
2274 stream: use streaming clone if possible'''
2275
2275
2276 # now, all clients that can request uncompressed clones can
2276 # now, all clients that can request uncompressed clones can
2277 # read repo formats supported by all servers that can serve
2277 # read repo formats supported by all servers that can serve
2278 # them.
2278 # them.
2279
2279
2280 # if revlog format changes, client will have to check version
2280 # if revlog format changes, client will have to check version
2281 # and format flags on "stream" capability, and use
2281 # and format flags on "stream" capability, and use
2282 # uncompressed only if compatible.
2282 # uncompressed only if compatible.
2283
2283
2284 if not stream:
2284 if not stream:
2285 # if the server explicitely prefer to stream (for fast LANs)
2285 # if the server explicitely prefer to stream (for fast LANs)
2286 stream = remote.capable('stream-preferred')
2286 stream = remote.capable('stream-preferred')
2287
2287
2288 if stream and not heads:
2288 if stream and not heads:
2289 # 'stream' means remote revlog format is revlogv1 only
2289 # 'stream' means remote revlog format is revlogv1 only
2290 if remote.capable('stream'):
2290 if remote.capable('stream'):
2291 return self.stream_in(remote, set(('revlogv1',)))
2291 return self.stream_in(remote, set(('revlogv1',)))
2292 # otherwise, 'streamreqs' contains the remote revlog format
2292 # otherwise, 'streamreqs' contains the remote revlog format
2293 streamreqs = remote.capable('streamreqs')
2293 streamreqs = remote.capable('streamreqs')
2294 if streamreqs:
2294 if streamreqs:
2295 streamreqs = set(streamreqs.split(','))
2295 streamreqs = set(streamreqs.split(','))
2296 # if we support it, stream in and adjust our requirements
2296 # if we support it, stream in and adjust our requirements
2297 if not streamreqs - self.supportedformats:
2297 if not streamreqs - self.supportedformats:
2298 return self.stream_in(remote, streamreqs)
2298 return self.stream_in(remote, streamreqs)
2299 return self.pull(remote, heads)
2299 return self.pull(remote, heads)
2300
2300
2301 def pushkey(self, namespace, key, old, new):
2301 def pushkey(self, namespace, key, old, new):
2302 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2302 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2303 old=old, new=new)
2303 old=old, new=new)
2304 ret = pushkey.push(self, namespace, key, old, new)
2304 ret = pushkey.push(self, namespace, key, old, new)
2305 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2305 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2306 ret=ret)
2306 ret=ret)
2307 return ret
2307 return ret
2308
2308
2309 def listkeys(self, namespace):
2309 def listkeys(self, namespace):
2310 self.hook('prelistkeys', throw=True, namespace=namespace)
2310 self.hook('prelistkeys', throw=True, namespace=namespace)
2311 values = pushkey.list(self, namespace)
2311 values = pushkey.list(self, namespace)
2312 self.hook('listkeys', namespace=namespace, values=values)
2312 self.hook('listkeys', namespace=namespace, values=values)
2313 return values
2313 return values
2314
2314
2315 def debugwireargs(self, one, two, three=None, four=None, five=None):
2315 def debugwireargs(self, one, two, three=None, four=None, five=None):
2316 '''used to test argument passing over the wire'''
2316 '''used to test argument passing over the wire'''
2317 return "%s %s %s %s %s" % (one, two, three, four, five)
2317 return "%s %s %s %s %s" % (one, two, three, four, five)
2318
2318
2319 def savecommitmessage(self, text):
2319 def savecommitmessage(self, text):
2320 fp = self.opener('last-message.txt', 'wb')
2320 fp = self.opener('last-message.txt', 'wb')
2321 try:
2321 try:
2322 fp.write(text)
2322 fp.write(text)
2323 finally:
2323 finally:
2324 fp.close()
2324 fp.close()
2325 return self.pathto(fp.name[len(self.root)+1:])
2325 return self.pathto(fp.name[len(self.root)+1:])
2326
2326
2327 # used to avoid circular references so destructors work
2327 # used to avoid circular references so destructors work
2328 def aftertrans(files):
2328 def aftertrans(files):
2329 renamefiles = [tuple(t) for t in files]
2329 renamefiles = [tuple(t) for t in files]
2330 def a():
2330 def a():
2331 for src, dest in renamefiles:
2331 for src, dest in renamefiles:
2332 try:
2332 try:
2333 util.rename(src, dest)
2333 util.rename(src, dest)
2334 except OSError: # journal file does not yet exist
2334 except OSError: # journal file does not yet exist
2335 pass
2335 pass
2336 return a
2336 return a
2337
2337
2338 def undoname(fn):
2338 def undoname(fn):
2339 base, name = os.path.split(fn)
2339 base, name = os.path.split(fn)
2340 assert name.startswith('journal')
2340 assert name.startswith('journal')
2341 return os.path.join(base, name.replace('journal', 'undo', 1))
2341 return os.path.join(base, name.replace('journal', 'undo', 1))
2342
2342
2343 def instance(ui, path, create):
2343 def instance(ui, path, create):
2344 return localrepository(ui, util.urllocalpath(path), create)
2344 return localrepository(ui, util.urllocalpath(path), create)
2345
2345
2346 def islocal(path):
2346 def islocal(path):
2347 return True
2347 return True
General Comments 0
You need to be logged in to leave comments. Login now