##// END OF EJS Templates
bookmarks: extract valid destination logic in a dedicated function...
Pierre-Yves David -
r17550:fc530080 default
parent child Browse files
Show More
@@ -1,254 +1,258 b''
1 # Mercurial bookmark support code
1 # Mercurial bookmark support code
2 #
2 #
3 # Copyright 2008 David Soria Parra <dsp@php.net>
3 # Copyright 2008 David Soria Parra <dsp@php.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from mercurial.i18n import _
8 from mercurial.i18n import _
9 from mercurial.node import hex
9 from mercurial.node import hex
10 from mercurial import encoding, error, util
10 from mercurial import encoding, error, util
11 import errno, os
11 import errno, os
12
12
13 def valid(mark):
13 def valid(mark):
14 for c in (':', '\0', '\n', '\r'):
14 for c in (':', '\0', '\n', '\r'):
15 if c in mark:
15 if c in mark:
16 return False
16 return False
17 return True
17 return True
18
18
19 def read(repo):
19 def read(repo):
20 '''Parse .hg/bookmarks file and return a dictionary
20 '''Parse .hg/bookmarks file and return a dictionary
21
21
22 Bookmarks are stored as {HASH}\\s{NAME}\\n (localtags format) values
22 Bookmarks are stored as {HASH}\\s{NAME}\\n (localtags format) values
23 in the .hg/bookmarks file.
23 in the .hg/bookmarks file.
24 Read the file and return a (name=>nodeid) dictionary
24 Read the file and return a (name=>nodeid) dictionary
25 '''
25 '''
26 bookmarks = {}
26 bookmarks = {}
27 try:
27 try:
28 for line in repo.opener('bookmarks'):
28 for line in repo.opener('bookmarks'):
29 line = line.strip()
29 line = line.strip()
30 if not line:
30 if not line:
31 continue
31 continue
32 if ' ' not in line:
32 if ' ' not in line:
33 repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n') % line)
33 repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n') % line)
34 continue
34 continue
35 sha, refspec = line.split(' ', 1)
35 sha, refspec = line.split(' ', 1)
36 refspec = encoding.tolocal(refspec)
36 refspec = encoding.tolocal(refspec)
37 try:
37 try:
38 bookmarks[refspec] = repo.changelog.lookup(sha)
38 bookmarks[refspec] = repo.changelog.lookup(sha)
39 except LookupError:
39 except LookupError:
40 pass
40 pass
41 except IOError, inst:
41 except IOError, inst:
42 if inst.errno != errno.ENOENT:
42 if inst.errno != errno.ENOENT:
43 raise
43 raise
44 return bookmarks
44 return bookmarks
45
45
46 def readcurrent(repo):
46 def readcurrent(repo):
47 '''Get the current bookmark
47 '''Get the current bookmark
48
48
49 If we use gittishsh branches we have a current bookmark that
49 If we use gittishsh branches we have a current bookmark that
50 we are on. This function returns the name of the bookmark. It
50 we are on. This function returns the name of the bookmark. It
51 is stored in .hg/bookmarks.current
51 is stored in .hg/bookmarks.current
52 '''
52 '''
53 mark = None
53 mark = None
54 try:
54 try:
55 file = repo.opener('bookmarks.current')
55 file = repo.opener('bookmarks.current')
56 except IOError, inst:
56 except IOError, inst:
57 if inst.errno != errno.ENOENT:
57 if inst.errno != errno.ENOENT:
58 raise
58 raise
59 return None
59 return None
60 try:
60 try:
61 # No readline() in osutil.posixfile, reading everything is cheap
61 # No readline() in osutil.posixfile, reading everything is cheap
62 mark = encoding.tolocal((file.readlines() or [''])[0])
62 mark = encoding.tolocal((file.readlines() or [''])[0])
63 if mark == '' or mark not in repo._bookmarks:
63 if mark == '' or mark not in repo._bookmarks:
64 mark = None
64 mark = None
65 finally:
65 finally:
66 file.close()
66 file.close()
67 return mark
67 return mark
68
68
69 def write(repo):
69 def write(repo):
70 '''Write bookmarks
70 '''Write bookmarks
71
71
72 Write the given bookmark => hash dictionary to the .hg/bookmarks file
72 Write the given bookmark => hash dictionary to the .hg/bookmarks file
73 in a format equal to those of localtags.
73 in a format equal to those of localtags.
74
74
75 We also store a backup of the previous state in undo.bookmarks that
75 We also store a backup of the previous state in undo.bookmarks that
76 can be copied back on rollback.
76 can be copied back on rollback.
77 '''
77 '''
78 refs = repo._bookmarks
78 refs = repo._bookmarks
79
79
80 if repo._bookmarkcurrent not in refs:
80 if repo._bookmarkcurrent not in refs:
81 setcurrent(repo, None)
81 setcurrent(repo, None)
82 for mark in refs.keys():
82 for mark in refs.keys():
83 if not valid(mark):
83 if not valid(mark):
84 raise util.Abort(_("bookmark '%s' contains illegal "
84 raise util.Abort(_("bookmark '%s' contains illegal "
85 "character" % mark))
85 "character" % mark))
86
86
87 wlock = repo.wlock()
87 wlock = repo.wlock()
88 try:
88 try:
89
89
90 file = repo.opener('bookmarks', 'w', atomictemp=True)
90 file = repo.opener('bookmarks', 'w', atomictemp=True)
91 for refspec, node in refs.iteritems():
91 for refspec, node in refs.iteritems():
92 file.write("%s %s\n" % (hex(node), encoding.fromlocal(refspec)))
92 file.write("%s %s\n" % (hex(node), encoding.fromlocal(refspec)))
93 file.close()
93 file.close()
94
94
95 # touch 00changelog.i so hgweb reloads bookmarks (no lock needed)
95 # touch 00changelog.i so hgweb reloads bookmarks (no lock needed)
96 try:
96 try:
97 os.utime(repo.sjoin('00changelog.i'), None)
97 os.utime(repo.sjoin('00changelog.i'), None)
98 except OSError:
98 except OSError:
99 pass
99 pass
100
100
101 finally:
101 finally:
102 wlock.release()
102 wlock.release()
103
103
104 def setcurrent(repo, mark):
104 def setcurrent(repo, mark):
105 '''Set the name of the bookmark that we are currently on
105 '''Set the name of the bookmark that we are currently on
106
106
107 Set the name of the bookmark that we are on (hg update <bookmark>).
107 Set the name of the bookmark that we are on (hg update <bookmark>).
108 The name is recorded in .hg/bookmarks.current
108 The name is recorded in .hg/bookmarks.current
109 '''
109 '''
110 current = repo._bookmarkcurrent
110 current = repo._bookmarkcurrent
111 if current == mark:
111 if current == mark:
112 return
112 return
113
113
114 if mark not in repo._bookmarks:
114 if mark not in repo._bookmarks:
115 mark = ''
115 mark = ''
116 if not valid(mark):
116 if not valid(mark):
117 raise util.Abort(_("bookmark '%s' contains illegal "
117 raise util.Abort(_("bookmark '%s' contains illegal "
118 "character" % mark))
118 "character" % mark))
119
119
120 wlock = repo.wlock()
120 wlock = repo.wlock()
121 try:
121 try:
122 file = repo.opener('bookmarks.current', 'w', atomictemp=True)
122 file = repo.opener('bookmarks.current', 'w', atomictemp=True)
123 file.write(encoding.fromlocal(mark))
123 file.write(encoding.fromlocal(mark))
124 file.close()
124 file.close()
125 finally:
125 finally:
126 wlock.release()
126 wlock.release()
127 repo._bookmarkcurrent = mark
127 repo._bookmarkcurrent = mark
128
128
129 def unsetcurrent(repo):
129 def unsetcurrent(repo):
130 wlock = repo.wlock()
130 wlock = repo.wlock()
131 try:
131 try:
132 try:
132 try:
133 util.unlink(repo.join('bookmarks.current'))
133 util.unlink(repo.join('bookmarks.current'))
134 repo._bookmarkcurrent = None
134 repo._bookmarkcurrent = None
135 except OSError, inst:
135 except OSError, inst:
136 if inst.errno != errno.ENOENT:
136 if inst.errno != errno.ENOENT:
137 raise
137 raise
138 finally:
138 finally:
139 wlock.release()
139 wlock.release()
140
140
141 def updatecurrentbookmark(repo, oldnode, curbranch):
141 def updatecurrentbookmark(repo, oldnode, curbranch):
142 try:
142 try:
143 return update(repo, oldnode, repo.branchtip(curbranch))
143 return update(repo, oldnode, repo.branchtip(curbranch))
144 except error.RepoLookupError:
144 except error.RepoLookupError:
145 if curbranch == "default": # no default branch!
145 if curbranch == "default": # no default branch!
146 return update(repo, oldnode, repo.lookup("tip"))
146 return update(repo, oldnode, repo.lookup("tip"))
147 else:
147 else:
148 raise util.Abort(_("branch %s not found") % curbranch)
148 raise util.Abort(_("branch %s not found") % curbranch)
149
149
150 def update(repo, parents, node):
150 def update(repo, parents, node):
151 marks = repo._bookmarks
151 marks = repo._bookmarks
152 update = False
152 update = False
153 cur = repo._bookmarkcurrent
153 cur = repo._bookmarkcurrent
154 if not cur:
154 if not cur:
155 return False
155 return False
156
156
157 toupdate = [b for b in marks if b.split('@', 1)[0] == cur.split('@', 1)[0]]
157 toupdate = [b for b in marks if b.split('@', 1)[0] == cur.split('@', 1)[0]]
158 for mark in toupdate:
158 for mark in toupdate:
159 if mark and marks[mark] in parents:
159 if mark and marks[mark] in parents:
160 old = repo[marks[mark]]
160 old = repo[marks[mark]]
161 new = repo[node]
161 new = repo[node]
162 if new in old.descendants() and mark == cur:
162 if new in old.descendants() and mark == cur:
163 marks[cur] = new.node()
163 marks[cur] = new.node()
164 update = True
164 update = True
165 if mark != cur:
165 if mark != cur:
166 del marks[mark]
166 del marks[mark]
167 if update:
167 if update:
168 repo._writebookmarks(marks)
168 repo._writebookmarks(marks)
169 return update
169 return update
170
170
171 def listbookmarks(repo):
171 def listbookmarks(repo):
172 # We may try to list bookmarks on a repo type that does not
172 # We may try to list bookmarks on a repo type that does not
173 # support it (e.g., statichttprepository).
173 # support it (e.g., statichttprepository).
174 marks = getattr(repo, '_bookmarks', {})
174 marks = getattr(repo, '_bookmarks', {})
175
175
176 d = {}
176 d = {}
177 for k, v in marks.iteritems():
177 for k, v in marks.iteritems():
178 # don't expose local divergent bookmarks
178 # don't expose local divergent bookmarks
179 if '@' not in k or k.endswith('@'):
179 if '@' not in k or k.endswith('@'):
180 d[k] = hex(v)
180 d[k] = hex(v)
181 return d
181 return d
182
182
183 def pushbookmark(repo, key, old, new):
183 def pushbookmark(repo, key, old, new):
184 w = repo.wlock()
184 w = repo.wlock()
185 try:
185 try:
186 marks = repo._bookmarks
186 marks = repo._bookmarks
187 if hex(marks.get(key, '')) != old:
187 if hex(marks.get(key, '')) != old:
188 return False
188 return False
189 if new == '':
189 if new == '':
190 del marks[key]
190 del marks[key]
191 else:
191 else:
192 if new not in repo:
192 if new not in repo:
193 return False
193 return False
194 marks[key] = repo[new].node()
194 marks[key] = repo[new].node()
195 write(repo)
195 write(repo)
196 return True
196 return True
197 finally:
197 finally:
198 w.release()
198 w.release()
199
199
200 def updatefromremote(ui, repo, remote, path):
200 def updatefromremote(ui, repo, remote, path):
201 ui.debug("checking for updated bookmarks\n")
201 ui.debug("checking for updated bookmarks\n")
202 rb = remote.listkeys('bookmarks')
202 rb = remote.listkeys('bookmarks')
203 changed = False
203 changed = False
204 for k in rb.keys():
204 for k in rb.keys():
205 if k in repo._bookmarks:
205 if k in repo._bookmarks:
206 nr, nl = rb[k], repo._bookmarks[k]
206 nr, nl = rb[k], repo._bookmarks[k]
207 if nr in repo:
207 if nr in repo:
208 cr = repo[nr]
208 cr = repo[nr]
209 cl = repo[nl]
209 cl = repo[nl]
210 if cl.rev() >= cr.rev():
210 if cl.rev() >= cr.rev():
211 continue
211 continue
212 if cr in cl.descendants():
212 if validdest(repo, cl, cr):
213 repo._bookmarks[k] = cr.node()
213 repo._bookmarks[k] = cr.node()
214 changed = True
214 changed = True
215 ui.status(_("updating bookmark %s\n") % k)
215 ui.status(_("updating bookmark %s\n") % k)
216 else:
216 else:
217 # find a unique @ suffix
217 # find a unique @ suffix
218 for x in range(1, 100):
218 for x in range(1, 100):
219 n = '%s@%d' % (k, x)
219 n = '%s@%d' % (k, x)
220 if n not in repo._bookmarks:
220 if n not in repo._bookmarks:
221 break
221 break
222 # try to use an @pathalias suffix
222 # try to use an @pathalias suffix
223 # if an @pathalias already exists, we overwrite (update) it
223 # if an @pathalias already exists, we overwrite (update) it
224 for p, u in ui.configitems("paths"):
224 for p, u in ui.configitems("paths"):
225 if path == u:
225 if path == u:
226 n = '%s@%s' % (k, p)
226 n = '%s@%s' % (k, p)
227
227
228 repo._bookmarks[n] = cr.node()
228 repo._bookmarks[n] = cr.node()
229 changed = True
229 changed = True
230 ui.warn(_("divergent bookmark %s stored as %s\n") % (k, n))
230 ui.warn(_("divergent bookmark %s stored as %s\n") % (k, n))
231 elif rb[k] in repo:
231 elif rb[k] in repo:
232 # add remote bookmarks for changes we already have
232 # add remote bookmarks for changes we already have
233 repo._bookmarks[k] = repo[rb[k]].node()
233 repo._bookmarks[k] = repo[rb[k]].node()
234 changed = True
234 changed = True
235 ui.status(_("adding remote bookmark %s\n") % k)
235 ui.status(_("adding remote bookmark %s\n") % k)
236
236
237 if changed:
237 if changed:
238 write(repo)
238 write(repo)
239
239
240 def diff(ui, repo, remote):
240 def diff(ui, repo, remote):
241 ui.status(_("searching for changed bookmarks\n"))
241 ui.status(_("searching for changed bookmarks\n"))
242
242
243 lmarks = repo.listkeys('bookmarks')
243 lmarks = repo.listkeys('bookmarks')
244 rmarks = remote.listkeys('bookmarks')
244 rmarks = remote.listkeys('bookmarks')
245
245
246 diff = sorted(set(rmarks) - set(lmarks))
246 diff = sorted(set(rmarks) - set(lmarks))
247 for k in diff:
247 for k in diff:
248 mark = ui.debugflag and rmarks[k] or rmarks[k][:12]
248 mark = ui.debugflag and rmarks[k] or rmarks[k][:12]
249 ui.write(" %-25s %s\n" % (k, mark))
249 ui.write(" %-25s %s\n" % (k, mark))
250
250
251 if len(diff) <= 0:
251 if len(diff) <= 0:
252 ui.status(_("no changed bookmarks found\n"))
252 ui.status(_("no changed bookmarks found\n"))
253 return 1
253 return 1
254 return 0
254 return 0
255
256 def validdest(repo, old, new):
257 """Is the new bookmark destination a valid update from the old one"""
258 return new in old.descendants()
@@ -1,377 +1,377 b''
1 # discovery.py - protocol changeset discovery functions
1 # discovery.py - protocol changeset discovery functions
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid, short
8 from node import nullid, short
9 from i18n import _
9 from i18n import _
10 import util, setdiscovery, treediscovery, phases, obsolete
10 import util, setdiscovery, treediscovery, phases, obsolete, bookmarks
11
11
12 def findcommonincoming(repo, remote, heads=None, force=False):
12 def findcommonincoming(repo, remote, heads=None, force=False):
13 """Return a tuple (common, anyincoming, heads) used to identify the common
13 """Return a tuple (common, anyincoming, heads) used to identify the common
14 subset of nodes between repo and remote.
14 subset of nodes between repo and remote.
15
15
16 "common" is a list of (at least) the heads of the common subset.
16 "common" is a list of (at least) the heads of the common subset.
17 "anyincoming" is testable as a boolean indicating if any nodes are missing
17 "anyincoming" is testable as a boolean indicating if any nodes are missing
18 locally. If remote does not support getbundle, this actually is a list of
18 locally. If remote does not support getbundle, this actually is a list of
19 roots of the nodes that would be incoming, to be supplied to
19 roots of the nodes that would be incoming, to be supplied to
20 changegroupsubset. No code except for pull should be relying on this fact
20 changegroupsubset. No code except for pull should be relying on this fact
21 any longer.
21 any longer.
22 "heads" is either the supplied heads, or else the remote's heads.
22 "heads" is either the supplied heads, or else the remote's heads.
23
23
24 If you pass heads and they are all known locally, the response lists just
24 If you pass heads and they are all known locally, the response lists just
25 these heads in "common" and in "heads".
25 these heads in "common" and in "heads".
26
26
27 Please use findcommonoutgoing to compute the set of outgoing nodes to give
27 Please use findcommonoutgoing to compute the set of outgoing nodes to give
28 extensions a good hook into outgoing.
28 extensions a good hook into outgoing.
29 """
29 """
30
30
31 if not remote.capable('getbundle'):
31 if not remote.capable('getbundle'):
32 return treediscovery.findcommonincoming(repo, remote, heads, force)
32 return treediscovery.findcommonincoming(repo, remote, heads, force)
33
33
34 if heads:
34 if heads:
35 allknown = True
35 allknown = True
36 nm = repo.changelog.nodemap
36 nm = repo.changelog.nodemap
37 for h in heads:
37 for h in heads:
38 if nm.get(h) is None:
38 if nm.get(h) is None:
39 allknown = False
39 allknown = False
40 break
40 break
41 if allknown:
41 if allknown:
42 return (heads, False, heads)
42 return (heads, False, heads)
43
43
44 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
44 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
45 abortwhenunrelated=not force)
45 abortwhenunrelated=not force)
46 common, anyinc, srvheads = res
46 common, anyinc, srvheads = res
47 return (list(common), anyinc, heads or list(srvheads))
47 return (list(common), anyinc, heads or list(srvheads))
48
48
49 class outgoing(object):
49 class outgoing(object):
50 '''Represents the set of nodes present in a local repo but not in a
50 '''Represents the set of nodes present in a local repo but not in a
51 (possibly) remote one.
51 (possibly) remote one.
52
52
53 Members:
53 Members:
54
54
55 missing is a list of all nodes present in local but not in remote.
55 missing is a list of all nodes present in local but not in remote.
56 common is a list of all nodes shared between the two repos.
56 common is a list of all nodes shared between the two repos.
57 excluded is the list of missing changeset that shouldn't be sent remotely.
57 excluded is the list of missing changeset that shouldn't be sent remotely.
58 missingheads is the list of heads of missing.
58 missingheads is the list of heads of missing.
59 commonheads is the list of heads of common.
59 commonheads is the list of heads of common.
60
60
61 The sets are computed on demand from the heads, unless provided upfront
61 The sets are computed on demand from the heads, unless provided upfront
62 by discovery.'''
62 by discovery.'''
63
63
64 def __init__(self, revlog, commonheads, missingheads):
64 def __init__(self, revlog, commonheads, missingheads):
65 self.commonheads = commonheads
65 self.commonheads = commonheads
66 self.missingheads = missingheads
66 self.missingheads = missingheads
67 self._revlog = revlog
67 self._revlog = revlog
68 self._common = None
68 self._common = None
69 self._missing = None
69 self._missing = None
70 self.excluded = []
70 self.excluded = []
71
71
72 def _computecommonmissing(self):
72 def _computecommonmissing(self):
73 sets = self._revlog.findcommonmissing(self.commonheads,
73 sets = self._revlog.findcommonmissing(self.commonheads,
74 self.missingheads)
74 self.missingheads)
75 self._common, self._missing = sets
75 self._common, self._missing = sets
76
76
77 @util.propertycache
77 @util.propertycache
78 def common(self):
78 def common(self):
79 if self._common is None:
79 if self._common is None:
80 self._computecommonmissing()
80 self._computecommonmissing()
81 return self._common
81 return self._common
82
82
83 @util.propertycache
83 @util.propertycache
84 def missing(self):
84 def missing(self):
85 if self._missing is None:
85 if self._missing is None:
86 self._computecommonmissing()
86 self._computecommonmissing()
87 return self._missing
87 return self._missing
88
88
89 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
89 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
90 commoninc=None, portable=False):
90 commoninc=None, portable=False):
91 '''Return an outgoing instance to identify the nodes present in repo but
91 '''Return an outgoing instance to identify the nodes present in repo but
92 not in other.
92 not in other.
93
93
94 If onlyheads is given, only nodes ancestral to nodes in onlyheads
94 If onlyheads is given, only nodes ancestral to nodes in onlyheads
95 (inclusive) are included. If you already know the local repo's heads,
95 (inclusive) are included. If you already know the local repo's heads,
96 passing them in onlyheads is faster than letting them be recomputed here.
96 passing them in onlyheads is faster than letting them be recomputed here.
97
97
98 If commoninc is given, it must be the result of a prior call to
98 If commoninc is given, it must be the result of a prior call to
99 findcommonincoming(repo, other, force) to avoid recomputing it here.
99 findcommonincoming(repo, other, force) to avoid recomputing it here.
100
100
101 If portable is given, compute more conservative common and missingheads,
101 If portable is given, compute more conservative common and missingheads,
102 to make bundles created from the instance more portable.'''
102 to make bundles created from the instance more portable.'''
103 # declare an empty outgoing object to be filled later
103 # declare an empty outgoing object to be filled later
104 og = outgoing(repo.changelog, None, None)
104 og = outgoing(repo.changelog, None, None)
105
105
106 # get common set if not provided
106 # get common set if not provided
107 if commoninc is None:
107 if commoninc is None:
108 commoninc = findcommonincoming(repo, other, force=force)
108 commoninc = findcommonincoming(repo, other, force=force)
109 og.commonheads, _any, _hds = commoninc
109 og.commonheads, _any, _hds = commoninc
110
110
111 # compute outgoing
111 # compute outgoing
112 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
112 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
113 if not mayexclude:
113 if not mayexclude:
114 og.missingheads = onlyheads or repo.heads()
114 og.missingheads = onlyheads or repo.heads()
115 elif onlyheads is None:
115 elif onlyheads is None:
116 # use visible heads as it should be cached
116 # use visible heads as it should be cached
117 og.missingheads = visibleheads(repo)
117 og.missingheads = visibleheads(repo)
118 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
118 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
119 else:
119 else:
120 # compute common, missing and exclude secret stuff
120 # compute common, missing and exclude secret stuff
121 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
121 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
122 og._common, allmissing = sets
122 og._common, allmissing = sets
123 og._missing = missing = []
123 og._missing = missing = []
124 og.excluded = excluded = []
124 og.excluded = excluded = []
125 for node in allmissing:
125 for node in allmissing:
126 ctx = repo[node]
126 ctx = repo[node]
127 if ctx.phase() >= phases.secret or ctx.extinct():
127 if ctx.phase() >= phases.secret or ctx.extinct():
128 excluded.append(node)
128 excluded.append(node)
129 else:
129 else:
130 missing.append(node)
130 missing.append(node)
131 if len(missing) == len(allmissing):
131 if len(missing) == len(allmissing):
132 missingheads = onlyheads
132 missingheads = onlyheads
133 else: # update missing heads
133 else: # update missing heads
134 missingheads = phases.newheads(repo, onlyheads, excluded)
134 missingheads = phases.newheads(repo, onlyheads, excluded)
135 og.missingheads = missingheads
135 og.missingheads = missingheads
136 if portable:
136 if portable:
137 # recompute common and missingheads as if -r<rev> had been given for
137 # recompute common and missingheads as if -r<rev> had been given for
138 # each head of missing, and --base <rev> for each head of the proper
138 # each head of missing, and --base <rev> for each head of the proper
139 # ancestors of missing
139 # ancestors of missing
140 og._computecommonmissing()
140 og._computecommonmissing()
141 cl = repo.changelog
141 cl = repo.changelog
142 missingrevs = set(cl.rev(n) for n in og._missing)
142 missingrevs = set(cl.rev(n) for n in og._missing)
143 og._common = set(cl.ancestors(missingrevs)) - missingrevs
143 og._common = set(cl.ancestors(missingrevs)) - missingrevs
144 commonheads = set(og.commonheads)
144 commonheads = set(og.commonheads)
145 og.missingheads = [h for h in og.missingheads if h not in commonheads]
145 og.missingheads = [h for h in og.missingheads if h not in commonheads]
146
146
147 return og
147 return og
148
148
149 def _headssummary(repo, remote, outgoing):
149 def _headssummary(repo, remote, outgoing):
150 """compute a summary of branch and heads status before and after push
150 """compute a summary of branch and heads status before and after push
151
151
152 return {'branch': ([remoteheads], [newheads], [unsyncedheads])} mapping
152 return {'branch': ([remoteheads], [newheads], [unsyncedheads])} mapping
153
153
154 - branch: the branch name
154 - branch: the branch name
155 - remoteheads: the list of remote heads known locally
155 - remoteheads: the list of remote heads known locally
156 None is the branch is new
156 None is the branch is new
157 - newheads: the new remote heads (known locally) with outgoing pushed
157 - newheads: the new remote heads (known locally) with outgoing pushed
158 - unsyncedheads: the list of remote heads unknown locally.
158 - unsyncedheads: the list of remote heads unknown locally.
159 """
159 """
160 cl = repo.changelog
160 cl = repo.changelog
161 headssum = {}
161 headssum = {}
162 # A. Create set of branches involved in the push.
162 # A. Create set of branches involved in the push.
163 branches = set(repo[n].branch() for n in outgoing.missing)
163 branches = set(repo[n].branch() for n in outgoing.missing)
164 remotemap = remote.branchmap()
164 remotemap = remote.branchmap()
165 newbranches = branches - set(remotemap)
165 newbranches = branches - set(remotemap)
166 branches.difference_update(newbranches)
166 branches.difference_update(newbranches)
167
167
168 # A. register remote heads
168 # A. register remote heads
169 remotebranches = set()
169 remotebranches = set()
170 for branch, heads in remote.branchmap().iteritems():
170 for branch, heads in remote.branchmap().iteritems():
171 remotebranches.add(branch)
171 remotebranches.add(branch)
172 known = []
172 known = []
173 unsynced = []
173 unsynced = []
174 for h in heads:
174 for h in heads:
175 if h in cl.nodemap:
175 if h in cl.nodemap:
176 known.append(h)
176 known.append(h)
177 else:
177 else:
178 unsynced.append(h)
178 unsynced.append(h)
179 headssum[branch] = (known, list(known), unsynced)
179 headssum[branch] = (known, list(known), unsynced)
180 # B. add new branch data
180 # B. add new branch data
181 missingctx = list(repo[n] for n in outgoing.missing)
181 missingctx = list(repo[n] for n in outgoing.missing)
182 touchedbranches = set()
182 touchedbranches = set()
183 for ctx in missingctx:
183 for ctx in missingctx:
184 branch = ctx.branch()
184 branch = ctx.branch()
185 touchedbranches.add(branch)
185 touchedbranches.add(branch)
186 if branch not in headssum:
186 if branch not in headssum:
187 headssum[branch] = (None, [], [])
187 headssum[branch] = (None, [], [])
188
188
189 # C drop data about untouched branches:
189 # C drop data about untouched branches:
190 for branch in remotebranches - touchedbranches:
190 for branch in remotebranches - touchedbranches:
191 del headssum[branch]
191 del headssum[branch]
192
192
193 # D. Update newmap with outgoing changes.
193 # D. Update newmap with outgoing changes.
194 # This will possibly add new heads and remove existing ones.
194 # This will possibly add new heads and remove existing ones.
195 newmap = dict((branch, heads[1]) for branch, heads in headssum.iteritems()
195 newmap = dict((branch, heads[1]) for branch, heads in headssum.iteritems()
196 if heads[0] is not None)
196 if heads[0] is not None)
197 repo._updatebranchcache(newmap, missingctx)
197 repo._updatebranchcache(newmap, missingctx)
198 for branch, newheads in newmap.iteritems():
198 for branch, newheads in newmap.iteritems():
199 headssum[branch][1][:] = newheads
199 headssum[branch][1][:] = newheads
200 return headssum
200 return headssum
201
201
202 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
202 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
203 """Compute branchmapsummary for repo without branchmap support"""
203 """Compute branchmapsummary for repo without branchmap support"""
204
204
205 cl = repo.changelog
205 cl = repo.changelog
206 # 1-4b. old servers: Check for new topological heads.
206 # 1-4b. old servers: Check for new topological heads.
207 # Construct {old,new}map with branch = None (topological branch).
207 # Construct {old,new}map with branch = None (topological branch).
208 # (code based on _updatebranchcache)
208 # (code based on _updatebranchcache)
209 oldheads = set(h for h in remoteheads if h in cl.nodemap)
209 oldheads = set(h for h in remoteheads if h in cl.nodemap)
210 # all nodes in outgoing.missing are children of either:
210 # all nodes in outgoing.missing are children of either:
211 # - an element of oldheads
211 # - an element of oldheads
212 # - another element of outgoing.missing
212 # - another element of outgoing.missing
213 # - nullrev
213 # - nullrev
214 # This explains why the new head are very simple to compute.
214 # This explains why the new head are very simple to compute.
215 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
215 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
216 newheads = list(c.node() for c in r)
216 newheads = list(c.node() for c in r)
217 unsynced = inc and set([None]) or set()
217 unsynced = inc and set([None]) or set()
218 return {None: (oldheads, newheads, unsynced)}
218 return {None: (oldheads, newheads, unsynced)}
219
219
220 def checkheads(repo, remote, outgoing, remoteheads, newbranch=False, inc=False):
220 def checkheads(repo, remote, outgoing, remoteheads, newbranch=False, inc=False):
221 """Check that a push won't add any outgoing head
221 """Check that a push won't add any outgoing head
222
222
223 raise Abort error and display ui message as needed.
223 raise Abort error and display ui message as needed.
224 """
224 """
225 # Check for each named branch if we're creating new remote heads.
225 # Check for each named branch if we're creating new remote heads.
226 # To be a remote head after push, node must be either:
226 # To be a remote head after push, node must be either:
227 # - unknown locally
227 # - unknown locally
228 # - a local outgoing head descended from update
228 # - a local outgoing head descended from update
229 # - a remote head that's known locally and not
229 # - a remote head that's known locally and not
230 # ancestral to an outgoing head
230 # ancestral to an outgoing head
231 if remoteheads == [nullid]:
231 if remoteheads == [nullid]:
232 # remote is empty, nothing to check.
232 # remote is empty, nothing to check.
233 return
233 return
234
234
235 if remote.capable('branchmap'):
235 if remote.capable('branchmap'):
236 headssum = _headssummary(repo, remote, outgoing)
236 headssum = _headssummary(repo, remote, outgoing)
237 else:
237 else:
238 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
238 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
239 newbranches = [branch for branch, heads in headssum.iteritems()
239 newbranches = [branch for branch, heads in headssum.iteritems()
240 if heads[0] is None]
240 if heads[0] is None]
241 # 1. Check for new branches on the remote.
241 # 1. Check for new branches on the remote.
242 if newbranches and not newbranch: # new branch requires --new-branch
242 if newbranches and not newbranch: # new branch requires --new-branch
243 branchnames = ', '.join(sorted(newbranches))
243 branchnames = ', '.join(sorted(newbranches))
244 raise util.Abort(_("push creates new remote branches: %s!")
244 raise util.Abort(_("push creates new remote branches: %s!")
245 % branchnames,
245 % branchnames,
246 hint=_("use 'hg push --new-branch' to create"
246 hint=_("use 'hg push --new-branch' to create"
247 " new remote branches"))
247 " new remote branches"))
248
248
249 # 2 compute newly pushed bookmarks. We
249 # 2 compute newly pushed bookmarks. We
250 # we don't warned about bookmarked heads.
250 # we don't warned about bookmarked heads.
251 localbookmarks = repo._bookmarks
251 localbookmarks = repo._bookmarks
252 remotebookmarks = remote.listkeys('bookmarks')
252 remotebookmarks = remote.listkeys('bookmarks')
253 bookmarkedheads = set()
253 bookmarkedheads = set()
254 for bm in localbookmarks:
254 for bm in localbookmarks:
255 rnode = remotebookmarks.get(bm)
255 rnode = remotebookmarks.get(bm)
256 if rnode and rnode in repo:
256 if rnode and rnode in repo:
257 lctx, rctx = repo[bm], repo[rnode]
257 lctx, rctx = repo[bm], repo[rnode]
258 if rctx == lctx.ancestor(rctx):
258 if bookmarks.validdest(repo, rctx, lctx):
259 bookmarkedheads.add(lctx.node())
259 bookmarkedheads.add(lctx.node())
260
260
261 # 3. Check for new heads.
261 # 3. Check for new heads.
262 # If there are more heads after the push than before, a suitable
262 # If there are more heads after the push than before, a suitable
263 # error message, depending on unsynced status, is displayed.
263 # error message, depending on unsynced status, is displayed.
264 error = None
264 error = None
265 unsynced = False
265 unsynced = False
266 allmissing = set(outgoing.missing)
266 allmissing = set(outgoing.missing)
267 allfuturecommon = set(c.node() for c in repo.set('%ld', outgoing.common))
267 allfuturecommon = set(c.node() for c in repo.set('%ld', outgoing.common))
268 allfuturecommon.update(allmissing)
268 allfuturecommon.update(allmissing)
269 for branch, heads in headssum.iteritems():
269 for branch, heads in headssum.iteritems():
270 if heads[0] is None:
270 if heads[0] is None:
271 # Maybe we should abort if we push more that one head
271 # Maybe we should abort if we push more that one head
272 # for new branches ?
272 # for new branches ?
273 continue
273 continue
274 candidate_newhs = set(heads[1])
274 candidate_newhs = set(heads[1])
275 # add unsynced data
275 # add unsynced data
276 oldhs = set(heads[0])
276 oldhs = set(heads[0])
277 oldhs.update(heads[2])
277 oldhs.update(heads[2])
278 candidate_newhs.update(heads[2])
278 candidate_newhs.update(heads[2])
279 dhs = None
279 dhs = None
280 discardedheads = set()
280 discardedheads = set()
281 if repo.obsstore:
281 if repo.obsstore:
282 # remove future heads which are actually obsolete by another
282 # remove future heads which are actually obsolete by another
283 # pushed element:
283 # pushed element:
284 #
284 #
285 # XXX as above, There are several cases this case does not handle
285 # XXX as above, There are several cases this case does not handle
286 # XXX properly
286 # XXX properly
287 #
287 #
288 # (1) if <nh> is public, it won't be affected by obsolete marker
288 # (1) if <nh> is public, it won't be affected by obsolete marker
289 # and a new is created
289 # and a new is created
290 #
290 #
291 # (2) if the new heads have ancestors which are not obsolete and
291 # (2) if the new heads have ancestors which are not obsolete and
292 # not ancestors of any other heads we will have a new head too.
292 # not ancestors of any other heads we will have a new head too.
293 #
293 #
294 # This two case will be easy to handle for know changeset but much
294 # This two case will be easy to handle for know changeset but much
295 # more tricky for unsynced changes.
295 # more tricky for unsynced changes.
296 newhs = set()
296 newhs = set()
297 for nh in candidate_newhs:
297 for nh in candidate_newhs:
298 if nh in repo and repo[nh].phase() <= phases.public:
298 if nh in repo and repo[nh].phase() <= phases.public:
299 newhs.add(nh)
299 newhs.add(nh)
300 else:
300 else:
301 for suc in obsolete.anysuccessors(repo.obsstore, nh):
301 for suc in obsolete.anysuccessors(repo.obsstore, nh):
302 if suc != nh and suc in allfuturecommon:
302 if suc != nh and suc in allfuturecommon:
303 discardedheads.add(nh)
303 discardedheads.add(nh)
304 break
304 break
305 else:
305 else:
306 newhs.add(nh)
306 newhs.add(nh)
307 else:
307 else:
308 newhs = candidate_newhs
308 newhs = candidate_newhs
309 if [h for h in heads[2] if h not in discardedheads]:
309 if [h for h in heads[2] if h not in discardedheads]:
310 unsynced = True
310 unsynced = True
311 if len(newhs) > len(oldhs):
311 if len(newhs) > len(oldhs):
312 # strip updates to existing remote heads from the new heads list
312 # strip updates to existing remote heads from the new heads list
313 dhs = list(newhs - bookmarkedheads - oldhs)
313 dhs = list(newhs - bookmarkedheads - oldhs)
314 if dhs:
314 if dhs:
315 if error is None:
315 if error is None:
316 if branch not in ('default', None):
316 if branch not in ('default', None):
317 error = _("push creates new remote head %s "
317 error = _("push creates new remote head %s "
318 "on branch '%s'!") % (short(dhs[0]), branch)
318 "on branch '%s'!") % (short(dhs[0]), branch)
319 else:
319 else:
320 error = _("push creates new remote head %s!"
320 error = _("push creates new remote head %s!"
321 ) % short(dhs[0])
321 ) % short(dhs[0])
322 if heads[2]: # unsynced
322 if heads[2]: # unsynced
323 hint = _("you should pull and merge or "
323 hint = _("you should pull and merge or "
324 "use push -f to force")
324 "use push -f to force")
325 else:
325 else:
326 hint = _("did you forget to merge? "
326 hint = _("did you forget to merge? "
327 "use push -f to force")
327 "use push -f to force")
328 if branch is not None:
328 if branch is not None:
329 repo.ui.note(_("new remote heads on branch '%s'\n") % branch)
329 repo.ui.note(_("new remote heads on branch '%s'\n") % branch)
330 for h in dhs:
330 for h in dhs:
331 repo.ui.note(_("new remote head %s\n") % short(h))
331 repo.ui.note(_("new remote head %s\n") % short(h))
332 if error:
332 if error:
333 raise util.Abort(error, hint=hint)
333 raise util.Abort(error, hint=hint)
334
334
335 # 6. Check for unsynced changes on involved branches.
335 # 6. Check for unsynced changes on involved branches.
336 if unsynced:
336 if unsynced:
337 repo.ui.warn(_("note: unsynced remote changes!\n"))
337 repo.ui.warn(_("note: unsynced remote changes!\n"))
338
338
339 def visibleheads(repo):
339 def visibleheads(repo):
340 """return the set of visible head of this repo"""
340 """return the set of visible head of this repo"""
341 # XXX we want a cache on this
341 # XXX we want a cache on this
342 sroots = repo._phasecache.phaseroots[phases.secret]
342 sroots = repo._phasecache.phaseroots[phases.secret]
343 if sroots or repo.obsstore:
343 if sroots or repo.obsstore:
344 # XXX very slow revset. storing heads or secret "boundary"
344 # XXX very slow revset. storing heads or secret "boundary"
345 # would help.
345 # would help.
346 revset = repo.set('heads(not (%ln:: + extinct()))', sroots)
346 revset = repo.set('heads(not (%ln:: + extinct()))', sroots)
347
347
348 vheads = [ctx.node() for ctx in revset]
348 vheads = [ctx.node() for ctx in revset]
349 if not vheads:
349 if not vheads:
350 vheads.append(nullid)
350 vheads.append(nullid)
351 else:
351 else:
352 vheads = repo.heads()
352 vheads = repo.heads()
353 return vheads
353 return vheads
354
354
355
355
356 def visiblebranchmap(repo):
356 def visiblebranchmap(repo):
357 """return a branchmap for the visible set"""
357 """return a branchmap for the visible set"""
358 # XXX Recomputing this data on the fly is very slow. We should build a
358 # XXX Recomputing this data on the fly is very slow. We should build a
359 # XXX cached version while computing the standard branchmap version.
359 # XXX cached version while computing the standard branchmap version.
360 sroots = repo._phasecache.phaseroots[phases.secret]
360 sroots = repo._phasecache.phaseroots[phases.secret]
361 if sroots or repo.obsstore:
361 if sroots or repo.obsstore:
362 vbranchmap = {}
362 vbranchmap = {}
363 for branch, nodes in repo.branchmap().iteritems():
363 for branch, nodes in repo.branchmap().iteritems():
364 # search for secret heads.
364 # search for secret heads.
365 for n in nodes:
365 for n in nodes:
366 if repo[n].phase() >= phases.secret:
366 if repo[n].phase() >= phases.secret:
367 nodes = None
367 nodes = None
368 break
368 break
369 # if secret heads were found we must compute them again
369 # if secret heads were found we must compute them again
370 if nodes is None:
370 if nodes is None:
371 s = repo.set('heads(branch(%s) - secret() - extinct())',
371 s = repo.set('heads(branch(%s) - secret() - extinct())',
372 branch)
372 branch)
373 nodes = [c.node() for c in s]
373 nodes = [c.node() for c in s]
374 vbranchmap[branch] = nodes
374 vbranchmap[branch] = nodes
375 else:
375 else:
376 vbranchmap = repo.branchmap()
376 vbranchmap = repo.branchmap()
377 return vbranchmap
377 return vbranchmap
@@ -1,2610 +1,2610 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import bin, hex, nullid, nullrev, short
7 from node import bin, hex, nullid, nullrev, short
8 from i18n import _
8 from i18n import _
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock, transaction, store, encoding, base85
11 import lock, transaction, store, encoding, base85
12 import scmutil, util, extensions, hook, error, revset
12 import scmutil, util, extensions, hook, error, revset
13 import match as matchmod
13 import match as matchmod
14 import merge as mergemod
14 import merge as mergemod
15 import tags as tagsmod
15 import tags as tagsmod
16 from lock import release
16 from lock import release
17 import weakref, errno, os, time, inspect
17 import weakref, errno, os, time, inspect
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19 filecache = scmutil.filecache
19 filecache = scmutil.filecache
20
20
21 class storecache(filecache):
21 class storecache(filecache):
22 """filecache for files in the store"""
22 """filecache for files in the store"""
23 def join(self, obj, fname):
23 def join(self, obj, fname):
24 return obj.sjoin(fname)
24 return obj.sjoin(fname)
25
25
26 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
26 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
27 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
27 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
28
28
29 class localpeer(peer.peerrepository):
29 class localpeer(peer.peerrepository):
30 '''peer for a local repo; reflects only the most recent API'''
30 '''peer for a local repo; reflects only the most recent API'''
31
31
32 def __init__(self, repo, caps=MODERNCAPS):
32 def __init__(self, repo, caps=MODERNCAPS):
33 peer.peerrepository.__init__(self)
33 peer.peerrepository.__init__(self)
34 self._repo = repo
34 self._repo = repo
35 self.ui = repo.ui
35 self.ui = repo.ui
36 self._caps = repo._restrictcapabilities(caps)
36 self._caps = repo._restrictcapabilities(caps)
37 self.requirements = repo.requirements
37 self.requirements = repo.requirements
38 self.supportedformats = repo.supportedformats
38 self.supportedformats = repo.supportedformats
39
39
40 def close(self):
40 def close(self):
41 self._repo.close()
41 self._repo.close()
42
42
43 def _capabilities(self):
43 def _capabilities(self):
44 return self._caps
44 return self._caps
45
45
46 def local(self):
46 def local(self):
47 return self._repo
47 return self._repo
48
48
49 def canpush(self):
49 def canpush(self):
50 return True
50 return True
51
51
52 def url(self):
52 def url(self):
53 return self._repo.url()
53 return self._repo.url()
54
54
55 def lookup(self, key):
55 def lookup(self, key):
56 return self._repo.lookup(key)
56 return self._repo.lookup(key)
57
57
58 def branchmap(self):
58 def branchmap(self):
59 return discovery.visiblebranchmap(self._repo)
59 return discovery.visiblebranchmap(self._repo)
60
60
61 def heads(self):
61 def heads(self):
62 return discovery.visibleheads(self._repo)
62 return discovery.visibleheads(self._repo)
63
63
64 def known(self, nodes):
64 def known(self, nodes):
65 return self._repo.known(nodes)
65 return self._repo.known(nodes)
66
66
67 def getbundle(self, source, heads=None, common=None):
67 def getbundle(self, source, heads=None, common=None):
68 return self._repo.getbundle(source, heads=heads, common=common)
68 return self._repo.getbundle(source, heads=heads, common=common)
69
69
70 # TODO We might want to move the next two calls into legacypeer and add
70 # TODO We might want to move the next two calls into legacypeer and add
71 # unbundle instead.
71 # unbundle instead.
72
72
73 def lock(self):
73 def lock(self):
74 return self._repo.lock()
74 return self._repo.lock()
75
75
76 def addchangegroup(self, cg, source, url):
76 def addchangegroup(self, cg, source, url):
77 return self._repo.addchangegroup(cg, source, url)
77 return self._repo.addchangegroup(cg, source, url)
78
78
79 def pushkey(self, namespace, key, old, new):
79 def pushkey(self, namespace, key, old, new):
80 return self._repo.pushkey(namespace, key, old, new)
80 return self._repo.pushkey(namespace, key, old, new)
81
81
82 def listkeys(self, namespace):
82 def listkeys(self, namespace):
83 return self._repo.listkeys(namespace)
83 return self._repo.listkeys(namespace)
84
84
85 def debugwireargs(self, one, two, three=None, four=None, five=None):
85 def debugwireargs(self, one, two, three=None, four=None, five=None):
86 '''used to test argument passing over the wire'''
86 '''used to test argument passing over the wire'''
87 return "%s %s %s %s %s" % (one, two, three, four, five)
87 return "%s %s %s %s %s" % (one, two, three, four, five)
88
88
89 class locallegacypeer(localpeer):
89 class locallegacypeer(localpeer):
90 '''peer extension which implements legacy methods too; used for tests with
90 '''peer extension which implements legacy methods too; used for tests with
91 restricted capabilities'''
91 restricted capabilities'''
92
92
93 def __init__(self, repo):
93 def __init__(self, repo):
94 localpeer.__init__(self, repo, caps=LEGACYCAPS)
94 localpeer.__init__(self, repo, caps=LEGACYCAPS)
95
95
96 def branches(self, nodes):
96 def branches(self, nodes):
97 return self._repo.branches(nodes)
97 return self._repo.branches(nodes)
98
98
99 def between(self, pairs):
99 def between(self, pairs):
100 return self._repo.between(pairs)
100 return self._repo.between(pairs)
101
101
102 def changegroup(self, basenodes, source):
102 def changegroup(self, basenodes, source):
103 return self._repo.changegroup(basenodes, source)
103 return self._repo.changegroup(basenodes, source)
104
104
105 def changegroupsubset(self, bases, heads, source):
105 def changegroupsubset(self, bases, heads, source):
106 return self._repo.changegroupsubset(bases, heads, source)
106 return self._repo.changegroupsubset(bases, heads, source)
107
107
108 class localrepository(object):
108 class localrepository(object):
109
109
110 supportedformats = set(('revlogv1', 'generaldelta'))
110 supportedformats = set(('revlogv1', 'generaldelta'))
111 supported = supportedformats | set(('store', 'fncache', 'shared',
111 supported = supportedformats | set(('store', 'fncache', 'shared',
112 'dotencode'))
112 'dotencode'))
113 openerreqs = set(('revlogv1', 'generaldelta'))
113 openerreqs = set(('revlogv1', 'generaldelta'))
114 requirements = ['revlogv1']
114 requirements = ['revlogv1']
115
115
116 def _baserequirements(self, create):
116 def _baserequirements(self, create):
117 return self.requirements[:]
117 return self.requirements[:]
118
118
119 def __init__(self, baseui, path=None, create=False):
119 def __init__(self, baseui, path=None, create=False):
120 self.wopener = scmutil.opener(path, expand=True)
120 self.wopener = scmutil.opener(path, expand=True)
121 self.wvfs = self.wopener
121 self.wvfs = self.wopener
122 self.root = self.wvfs.base
122 self.root = self.wvfs.base
123 self.path = self.wvfs.join(".hg")
123 self.path = self.wvfs.join(".hg")
124 self.origroot = path
124 self.origroot = path
125 self.auditor = scmutil.pathauditor(self.root, self._checknested)
125 self.auditor = scmutil.pathauditor(self.root, self._checknested)
126 self.opener = scmutil.opener(self.path)
126 self.opener = scmutil.opener(self.path)
127 self.vfs = self.opener
127 self.vfs = self.opener
128 self.baseui = baseui
128 self.baseui = baseui
129 self.ui = baseui.copy()
129 self.ui = baseui.copy()
130 # A list of callback to shape the phase if no data were found.
130 # A list of callback to shape the phase if no data were found.
131 # Callback are in the form: func(repo, roots) --> processed root.
131 # Callback are in the form: func(repo, roots) --> processed root.
132 # This list it to be filled by extension during repo setup
132 # This list it to be filled by extension during repo setup
133 self._phasedefaults = []
133 self._phasedefaults = []
134 try:
134 try:
135 self.ui.readconfig(self.join("hgrc"), self.root)
135 self.ui.readconfig(self.join("hgrc"), self.root)
136 extensions.loadall(self.ui)
136 extensions.loadall(self.ui)
137 except IOError:
137 except IOError:
138 pass
138 pass
139
139
140 if not self.vfs.isdir():
140 if not self.vfs.isdir():
141 if create:
141 if create:
142 if not self.wvfs.exists():
142 if not self.wvfs.exists():
143 self.wvfs.makedirs()
143 self.wvfs.makedirs()
144 self.vfs.makedir(notindexed=True)
144 self.vfs.makedir(notindexed=True)
145 requirements = self._baserequirements(create)
145 requirements = self._baserequirements(create)
146 if self.ui.configbool('format', 'usestore', True):
146 if self.ui.configbool('format', 'usestore', True):
147 self.vfs.mkdir("store")
147 self.vfs.mkdir("store")
148 requirements.append("store")
148 requirements.append("store")
149 if self.ui.configbool('format', 'usefncache', True):
149 if self.ui.configbool('format', 'usefncache', True):
150 requirements.append("fncache")
150 requirements.append("fncache")
151 if self.ui.configbool('format', 'dotencode', True):
151 if self.ui.configbool('format', 'dotencode', True):
152 requirements.append('dotencode')
152 requirements.append('dotencode')
153 # create an invalid changelog
153 # create an invalid changelog
154 self.vfs.append(
154 self.vfs.append(
155 "00changelog.i",
155 "00changelog.i",
156 '\0\0\0\2' # represents revlogv2
156 '\0\0\0\2' # represents revlogv2
157 ' dummy changelog to prevent using the old repo layout'
157 ' dummy changelog to prevent using the old repo layout'
158 )
158 )
159 if self.ui.configbool('format', 'generaldelta', False):
159 if self.ui.configbool('format', 'generaldelta', False):
160 requirements.append("generaldelta")
160 requirements.append("generaldelta")
161 requirements = set(requirements)
161 requirements = set(requirements)
162 else:
162 else:
163 raise error.RepoError(_("repository %s not found") % path)
163 raise error.RepoError(_("repository %s not found") % path)
164 elif create:
164 elif create:
165 raise error.RepoError(_("repository %s already exists") % path)
165 raise error.RepoError(_("repository %s already exists") % path)
166 else:
166 else:
167 try:
167 try:
168 requirements = scmutil.readrequires(self.vfs, self.supported)
168 requirements = scmutil.readrequires(self.vfs, self.supported)
169 except IOError, inst:
169 except IOError, inst:
170 if inst.errno != errno.ENOENT:
170 if inst.errno != errno.ENOENT:
171 raise
171 raise
172 requirements = set()
172 requirements = set()
173
173
174 self.sharedpath = self.path
174 self.sharedpath = self.path
175 try:
175 try:
176 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
176 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
177 if not os.path.exists(s):
177 if not os.path.exists(s):
178 raise error.RepoError(
178 raise error.RepoError(
179 _('.hg/sharedpath points to nonexistent directory %s') % s)
179 _('.hg/sharedpath points to nonexistent directory %s') % s)
180 self.sharedpath = s
180 self.sharedpath = s
181 except IOError, inst:
181 except IOError, inst:
182 if inst.errno != errno.ENOENT:
182 if inst.errno != errno.ENOENT:
183 raise
183 raise
184
184
185 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
185 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
186 self.spath = self.store.path
186 self.spath = self.store.path
187 self.sopener = self.store.opener
187 self.sopener = self.store.opener
188 self.svfs = self.sopener
188 self.svfs = self.sopener
189 self.sjoin = self.store.join
189 self.sjoin = self.store.join
190 self.opener.createmode = self.store.createmode
190 self.opener.createmode = self.store.createmode
191 self._applyrequirements(requirements)
191 self._applyrequirements(requirements)
192 if create:
192 if create:
193 self._writerequirements()
193 self._writerequirements()
194
194
195
195
196 self._branchcache = None
196 self._branchcache = None
197 self._branchcachetip = None
197 self._branchcachetip = None
198 self.filterpats = {}
198 self.filterpats = {}
199 self._datafilters = {}
199 self._datafilters = {}
200 self._transref = self._lockref = self._wlockref = None
200 self._transref = self._lockref = self._wlockref = None
201
201
202 # A cache for various files under .hg/ that tracks file changes,
202 # A cache for various files under .hg/ that tracks file changes,
203 # (used by the filecache decorator)
203 # (used by the filecache decorator)
204 #
204 #
205 # Maps a property name to its util.filecacheentry
205 # Maps a property name to its util.filecacheentry
206 self._filecache = {}
206 self._filecache = {}
207
207
208 def close(self):
208 def close(self):
209 pass
209 pass
210
210
211 def _restrictcapabilities(self, caps):
211 def _restrictcapabilities(self, caps):
212 return caps
212 return caps
213
213
214 def _applyrequirements(self, requirements):
214 def _applyrequirements(self, requirements):
215 self.requirements = requirements
215 self.requirements = requirements
216 self.sopener.options = dict((r, 1) for r in requirements
216 self.sopener.options = dict((r, 1) for r in requirements
217 if r in self.openerreqs)
217 if r in self.openerreqs)
218
218
219 def _writerequirements(self):
219 def _writerequirements(self):
220 reqfile = self.opener("requires", "w")
220 reqfile = self.opener("requires", "w")
221 for r in self.requirements:
221 for r in self.requirements:
222 reqfile.write("%s\n" % r)
222 reqfile.write("%s\n" % r)
223 reqfile.close()
223 reqfile.close()
224
224
225 def _checknested(self, path):
225 def _checknested(self, path):
226 """Determine if path is a legal nested repository."""
226 """Determine if path is a legal nested repository."""
227 if not path.startswith(self.root):
227 if not path.startswith(self.root):
228 return False
228 return False
229 subpath = path[len(self.root) + 1:]
229 subpath = path[len(self.root) + 1:]
230 normsubpath = util.pconvert(subpath)
230 normsubpath = util.pconvert(subpath)
231
231
232 # XXX: Checking against the current working copy is wrong in
232 # XXX: Checking against the current working copy is wrong in
233 # the sense that it can reject things like
233 # the sense that it can reject things like
234 #
234 #
235 # $ hg cat -r 10 sub/x.txt
235 # $ hg cat -r 10 sub/x.txt
236 #
236 #
237 # if sub/ is no longer a subrepository in the working copy
237 # if sub/ is no longer a subrepository in the working copy
238 # parent revision.
238 # parent revision.
239 #
239 #
240 # However, it can of course also allow things that would have
240 # However, it can of course also allow things that would have
241 # been rejected before, such as the above cat command if sub/
241 # been rejected before, such as the above cat command if sub/
242 # is a subrepository now, but was a normal directory before.
242 # is a subrepository now, but was a normal directory before.
243 # The old path auditor would have rejected by mistake since it
243 # The old path auditor would have rejected by mistake since it
244 # panics when it sees sub/.hg/.
244 # panics when it sees sub/.hg/.
245 #
245 #
246 # All in all, checking against the working copy seems sensible
246 # All in all, checking against the working copy seems sensible
247 # since we want to prevent access to nested repositories on
247 # since we want to prevent access to nested repositories on
248 # the filesystem *now*.
248 # the filesystem *now*.
249 ctx = self[None]
249 ctx = self[None]
250 parts = util.splitpath(subpath)
250 parts = util.splitpath(subpath)
251 while parts:
251 while parts:
252 prefix = '/'.join(parts)
252 prefix = '/'.join(parts)
253 if prefix in ctx.substate:
253 if prefix in ctx.substate:
254 if prefix == normsubpath:
254 if prefix == normsubpath:
255 return True
255 return True
256 else:
256 else:
257 sub = ctx.sub(prefix)
257 sub = ctx.sub(prefix)
258 return sub.checknested(subpath[len(prefix) + 1:])
258 return sub.checknested(subpath[len(prefix) + 1:])
259 else:
259 else:
260 parts.pop()
260 parts.pop()
261 return False
261 return False
262
262
263 def peer(self):
263 def peer(self):
264 return localpeer(self) # not cached to avoid reference cycle
264 return localpeer(self) # not cached to avoid reference cycle
265
265
266 @filecache('bookmarks')
266 @filecache('bookmarks')
267 def _bookmarks(self):
267 def _bookmarks(self):
268 return bookmarks.read(self)
268 return bookmarks.read(self)
269
269
270 @filecache('bookmarks.current')
270 @filecache('bookmarks.current')
271 def _bookmarkcurrent(self):
271 def _bookmarkcurrent(self):
272 return bookmarks.readcurrent(self)
272 return bookmarks.readcurrent(self)
273
273
274 def _writebookmarks(self, marks):
274 def _writebookmarks(self, marks):
275 bookmarks.write(self)
275 bookmarks.write(self)
276
276
277 def bookmarkheads(self, bookmark):
277 def bookmarkheads(self, bookmark):
278 name = bookmark.split('@', 1)[0]
278 name = bookmark.split('@', 1)[0]
279 heads = []
279 heads = []
280 for mark, n in self._bookmarks.iteritems():
280 for mark, n in self._bookmarks.iteritems():
281 if mark.split('@', 1)[0] == name:
281 if mark.split('@', 1)[0] == name:
282 heads.append(n)
282 heads.append(n)
283 return heads
283 return heads
284
284
285 @storecache('phaseroots')
285 @storecache('phaseroots')
286 def _phasecache(self):
286 def _phasecache(self):
287 return phases.phasecache(self, self._phasedefaults)
287 return phases.phasecache(self, self._phasedefaults)
288
288
289 @storecache('obsstore')
289 @storecache('obsstore')
290 def obsstore(self):
290 def obsstore(self):
291 store = obsolete.obsstore(self.sopener)
291 store = obsolete.obsstore(self.sopener)
292 if store and not obsolete._enabled:
292 if store and not obsolete._enabled:
293 # message is rare enough to not be translated
293 # message is rare enough to not be translated
294 msg = 'obsolete feature not enabled but %i markers found!\n'
294 msg = 'obsolete feature not enabled but %i markers found!\n'
295 self.ui.warn(msg % len(list(store)))
295 self.ui.warn(msg % len(list(store)))
296 return store
296 return store
297
297
298 @propertycache
298 @propertycache
299 def hiddenrevs(self):
299 def hiddenrevs(self):
300 """hiddenrevs: revs that should be hidden by command and tools
300 """hiddenrevs: revs that should be hidden by command and tools
301
301
302 This set is carried on the repo to ease initialization and lazy
302 This set is carried on the repo to ease initialization and lazy
303 loading; it'll probably move back to changelog for efficiency and
303 loading; it'll probably move back to changelog for efficiency and
304 consistency reasons.
304 consistency reasons.
305
305
306 Note that the hiddenrevs will needs invalidations when
306 Note that the hiddenrevs will needs invalidations when
307 - a new changesets is added (possible unstable above extinct)
307 - a new changesets is added (possible unstable above extinct)
308 - a new obsolete marker is added (possible new extinct changeset)
308 - a new obsolete marker is added (possible new extinct changeset)
309
309
310 hidden changesets cannot have non-hidden descendants
310 hidden changesets cannot have non-hidden descendants
311 """
311 """
312 hidden = set()
312 hidden = set()
313 if self.obsstore:
313 if self.obsstore:
314 ### hide extinct changeset that are not accessible by any mean
314 ### hide extinct changeset that are not accessible by any mean
315 hiddenquery = 'extinct() - ::(. + bookmark() + tagged())'
315 hiddenquery = 'extinct() - ::(. + bookmark() + tagged())'
316 hidden.update(self.revs(hiddenquery))
316 hidden.update(self.revs(hiddenquery))
317 return hidden
317 return hidden
318
318
319 @storecache('00changelog.i')
319 @storecache('00changelog.i')
320 def changelog(self):
320 def changelog(self):
321 c = changelog.changelog(self.sopener)
321 c = changelog.changelog(self.sopener)
322 if 'HG_PENDING' in os.environ:
322 if 'HG_PENDING' in os.environ:
323 p = os.environ['HG_PENDING']
323 p = os.environ['HG_PENDING']
324 if p.startswith(self.root):
324 if p.startswith(self.root):
325 c.readpending('00changelog.i.a')
325 c.readpending('00changelog.i.a')
326 return c
326 return c
327
327
328 @storecache('00manifest.i')
328 @storecache('00manifest.i')
329 def manifest(self):
329 def manifest(self):
330 return manifest.manifest(self.sopener)
330 return manifest.manifest(self.sopener)
331
331
332 @filecache('dirstate')
332 @filecache('dirstate')
333 def dirstate(self):
333 def dirstate(self):
334 warned = [0]
334 warned = [0]
335 def validate(node):
335 def validate(node):
336 try:
336 try:
337 self.changelog.rev(node)
337 self.changelog.rev(node)
338 return node
338 return node
339 except error.LookupError:
339 except error.LookupError:
340 if not warned[0]:
340 if not warned[0]:
341 warned[0] = True
341 warned[0] = True
342 self.ui.warn(_("warning: ignoring unknown"
342 self.ui.warn(_("warning: ignoring unknown"
343 " working parent %s!\n") % short(node))
343 " working parent %s!\n") % short(node))
344 return nullid
344 return nullid
345
345
346 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
346 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
347
347
348 def __getitem__(self, changeid):
348 def __getitem__(self, changeid):
349 if changeid is None:
349 if changeid is None:
350 return context.workingctx(self)
350 return context.workingctx(self)
351 return context.changectx(self, changeid)
351 return context.changectx(self, changeid)
352
352
353 def __contains__(self, changeid):
353 def __contains__(self, changeid):
354 try:
354 try:
355 return bool(self.lookup(changeid))
355 return bool(self.lookup(changeid))
356 except error.RepoLookupError:
356 except error.RepoLookupError:
357 return False
357 return False
358
358
359 def __nonzero__(self):
359 def __nonzero__(self):
360 return True
360 return True
361
361
362 def __len__(self):
362 def __len__(self):
363 return len(self.changelog)
363 return len(self.changelog)
364
364
365 def __iter__(self):
365 def __iter__(self):
366 for i in xrange(len(self)):
366 for i in xrange(len(self)):
367 yield i
367 yield i
368
368
369 def revs(self, expr, *args):
369 def revs(self, expr, *args):
370 '''Return a list of revisions matching the given revset'''
370 '''Return a list of revisions matching the given revset'''
371 expr = revset.formatspec(expr, *args)
371 expr = revset.formatspec(expr, *args)
372 m = revset.match(None, expr)
372 m = revset.match(None, expr)
373 return [r for r in m(self, range(len(self)))]
373 return [r for r in m(self, range(len(self)))]
374
374
375 def set(self, expr, *args):
375 def set(self, expr, *args):
376 '''
376 '''
377 Yield a context for each matching revision, after doing arg
377 Yield a context for each matching revision, after doing arg
378 replacement via revset.formatspec
378 replacement via revset.formatspec
379 '''
379 '''
380 for r in self.revs(expr, *args):
380 for r in self.revs(expr, *args):
381 yield self[r]
381 yield self[r]
382
382
383 def url(self):
383 def url(self):
384 return 'file:' + self.root
384 return 'file:' + self.root
385
385
386 def hook(self, name, throw=False, **args):
386 def hook(self, name, throw=False, **args):
387 return hook.hook(self.ui, self, name, throw, **args)
387 return hook.hook(self.ui, self, name, throw, **args)
388
388
389 tag_disallowed = ':\r\n'
389 tag_disallowed = ':\r\n'
390
390
391 def _tag(self, names, node, message, local, user, date, extra={}):
391 def _tag(self, names, node, message, local, user, date, extra={}):
392 if isinstance(names, str):
392 if isinstance(names, str):
393 allchars = names
393 allchars = names
394 names = (names,)
394 names = (names,)
395 else:
395 else:
396 allchars = ''.join(names)
396 allchars = ''.join(names)
397 for c in self.tag_disallowed:
397 for c in self.tag_disallowed:
398 if c in allchars:
398 if c in allchars:
399 raise util.Abort(_('%r cannot be used in a tag name') % c)
399 raise util.Abort(_('%r cannot be used in a tag name') % c)
400
400
401 branches = self.branchmap()
401 branches = self.branchmap()
402 for name in names:
402 for name in names:
403 self.hook('pretag', throw=True, node=hex(node), tag=name,
403 self.hook('pretag', throw=True, node=hex(node), tag=name,
404 local=local)
404 local=local)
405 if name in branches:
405 if name in branches:
406 self.ui.warn(_("warning: tag %s conflicts with existing"
406 self.ui.warn(_("warning: tag %s conflicts with existing"
407 " branch name\n") % name)
407 " branch name\n") % name)
408
408
409 def writetags(fp, names, munge, prevtags):
409 def writetags(fp, names, munge, prevtags):
410 fp.seek(0, 2)
410 fp.seek(0, 2)
411 if prevtags and prevtags[-1] != '\n':
411 if prevtags and prevtags[-1] != '\n':
412 fp.write('\n')
412 fp.write('\n')
413 for name in names:
413 for name in names:
414 m = munge and munge(name) or name
414 m = munge and munge(name) or name
415 if (self._tagscache.tagtypes and
415 if (self._tagscache.tagtypes and
416 name in self._tagscache.tagtypes):
416 name in self._tagscache.tagtypes):
417 old = self.tags().get(name, nullid)
417 old = self.tags().get(name, nullid)
418 fp.write('%s %s\n' % (hex(old), m))
418 fp.write('%s %s\n' % (hex(old), m))
419 fp.write('%s %s\n' % (hex(node), m))
419 fp.write('%s %s\n' % (hex(node), m))
420 fp.close()
420 fp.close()
421
421
422 prevtags = ''
422 prevtags = ''
423 if local:
423 if local:
424 try:
424 try:
425 fp = self.opener('localtags', 'r+')
425 fp = self.opener('localtags', 'r+')
426 except IOError:
426 except IOError:
427 fp = self.opener('localtags', 'a')
427 fp = self.opener('localtags', 'a')
428 else:
428 else:
429 prevtags = fp.read()
429 prevtags = fp.read()
430
430
431 # local tags are stored in the current charset
431 # local tags are stored in the current charset
432 writetags(fp, names, None, prevtags)
432 writetags(fp, names, None, prevtags)
433 for name in names:
433 for name in names:
434 self.hook('tag', node=hex(node), tag=name, local=local)
434 self.hook('tag', node=hex(node), tag=name, local=local)
435 return
435 return
436
436
437 try:
437 try:
438 fp = self.wfile('.hgtags', 'rb+')
438 fp = self.wfile('.hgtags', 'rb+')
439 except IOError, e:
439 except IOError, e:
440 if e.errno != errno.ENOENT:
440 if e.errno != errno.ENOENT:
441 raise
441 raise
442 fp = self.wfile('.hgtags', 'ab')
442 fp = self.wfile('.hgtags', 'ab')
443 else:
443 else:
444 prevtags = fp.read()
444 prevtags = fp.read()
445
445
446 # committed tags are stored in UTF-8
446 # committed tags are stored in UTF-8
447 writetags(fp, names, encoding.fromlocal, prevtags)
447 writetags(fp, names, encoding.fromlocal, prevtags)
448
448
449 fp.close()
449 fp.close()
450
450
451 self.invalidatecaches()
451 self.invalidatecaches()
452
452
453 if '.hgtags' not in self.dirstate:
453 if '.hgtags' not in self.dirstate:
454 self[None].add(['.hgtags'])
454 self[None].add(['.hgtags'])
455
455
456 m = matchmod.exact(self.root, '', ['.hgtags'])
456 m = matchmod.exact(self.root, '', ['.hgtags'])
457 tagnode = self.commit(message, user, date, extra=extra, match=m)
457 tagnode = self.commit(message, user, date, extra=extra, match=m)
458
458
459 for name in names:
459 for name in names:
460 self.hook('tag', node=hex(node), tag=name, local=local)
460 self.hook('tag', node=hex(node), tag=name, local=local)
461
461
462 return tagnode
462 return tagnode
463
463
464 def tag(self, names, node, message, local, user, date):
464 def tag(self, names, node, message, local, user, date):
465 '''tag a revision with one or more symbolic names.
465 '''tag a revision with one or more symbolic names.
466
466
467 names is a list of strings or, when adding a single tag, names may be a
467 names is a list of strings or, when adding a single tag, names may be a
468 string.
468 string.
469
469
470 if local is True, the tags are stored in a per-repository file.
470 if local is True, the tags are stored in a per-repository file.
471 otherwise, they are stored in the .hgtags file, and a new
471 otherwise, they are stored in the .hgtags file, and a new
472 changeset is committed with the change.
472 changeset is committed with the change.
473
473
474 keyword arguments:
474 keyword arguments:
475
475
476 local: whether to store tags in non-version-controlled file
476 local: whether to store tags in non-version-controlled file
477 (default False)
477 (default False)
478
478
479 message: commit message to use if committing
479 message: commit message to use if committing
480
480
481 user: name of user to use if committing
481 user: name of user to use if committing
482
482
483 date: date tuple to use if committing'''
483 date: date tuple to use if committing'''
484
484
485 if not local:
485 if not local:
486 for x in self.status()[:5]:
486 for x in self.status()[:5]:
487 if '.hgtags' in x:
487 if '.hgtags' in x:
488 raise util.Abort(_('working copy of .hgtags is changed '
488 raise util.Abort(_('working copy of .hgtags is changed '
489 '(please commit .hgtags manually)'))
489 '(please commit .hgtags manually)'))
490
490
491 self.tags() # instantiate the cache
491 self.tags() # instantiate the cache
492 self._tag(names, node, message, local, user, date)
492 self._tag(names, node, message, local, user, date)
493
493
494 @propertycache
494 @propertycache
495 def _tagscache(self):
495 def _tagscache(self):
496 '''Returns a tagscache object that contains various tags related
496 '''Returns a tagscache object that contains various tags related
497 caches.'''
497 caches.'''
498
498
499 # This simplifies its cache management by having one decorated
499 # This simplifies its cache management by having one decorated
500 # function (this one) and the rest simply fetch things from it.
500 # function (this one) and the rest simply fetch things from it.
501 class tagscache(object):
501 class tagscache(object):
502 def __init__(self):
502 def __init__(self):
503 # These two define the set of tags for this repository. tags
503 # These two define the set of tags for this repository. tags
504 # maps tag name to node; tagtypes maps tag name to 'global' or
504 # maps tag name to node; tagtypes maps tag name to 'global' or
505 # 'local'. (Global tags are defined by .hgtags across all
505 # 'local'. (Global tags are defined by .hgtags across all
506 # heads, and local tags are defined in .hg/localtags.)
506 # heads, and local tags are defined in .hg/localtags.)
507 # They constitute the in-memory cache of tags.
507 # They constitute the in-memory cache of tags.
508 self.tags = self.tagtypes = None
508 self.tags = self.tagtypes = None
509
509
510 self.nodetagscache = self.tagslist = None
510 self.nodetagscache = self.tagslist = None
511
511
512 cache = tagscache()
512 cache = tagscache()
513 cache.tags, cache.tagtypes = self._findtags()
513 cache.tags, cache.tagtypes = self._findtags()
514
514
515 return cache
515 return cache
516
516
517 def tags(self):
517 def tags(self):
518 '''return a mapping of tag to node'''
518 '''return a mapping of tag to node'''
519 t = {}
519 t = {}
520 for k, v in self._tagscache.tags.iteritems():
520 for k, v in self._tagscache.tags.iteritems():
521 try:
521 try:
522 # ignore tags to unknown nodes
522 # ignore tags to unknown nodes
523 self.changelog.rev(v)
523 self.changelog.rev(v)
524 t[k] = v
524 t[k] = v
525 except (error.LookupError, ValueError):
525 except (error.LookupError, ValueError):
526 pass
526 pass
527 return t
527 return t
528
528
529 def _findtags(self):
529 def _findtags(self):
530 '''Do the hard work of finding tags. Return a pair of dicts
530 '''Do the hard work of finding tags. Return a pair of dicts
531 (tags, tagtypes) where tags maps tag name to node, and tagtypes
531 (tags, tagtypes) where tags maps tag name to node, and tagtypes
532 maps tag name to a string like \'global\' or \'local\'.
532 maps tag name to a string like \'global\' or \'local\'.
533 Subclasses or extensions are free to add their own tags, but
533 Subclasses or extensions are free to add their own tags, but
534 should be aware that the returned dicts will be retained for the
534 should be aware that the returned dicts will be retained for the
535 duration of the localrepo object.'''
535 duration of the localrepo object.'''
536
536
537 # XXX what tagtype should subclasses/extensions use? Currently
537 # XXX what tagtype should subclasses/extensions use? Currently
538 # mq and bookmarks add tags, but do not set the tagtype at all.
538 # mq and bookmarks add tags, but do not set the tagtype at all.
539 # Should each extension invent its own tag type? Should there
539 # Should each extension invent its own tag type? Should there
540 # be one tagtype for all such "virtual" tags? Or is the status
540 # be one tagtype for all such "virtual" tags? Or is the status
541 # quo fine?
541 # quo fine?
542
542
543 alltags = {} # map tag name to (node, hist)
543 alltags = {} # map tag name to (node, hist)
544 tagtypes = {}
544 tagtypes = {}
545
545
546 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
546 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
547 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
547 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
548
548
549 # Build the return dicts. Have to re-encode tag names because
549 # Build the return dicts. Have to re-encode tag names because
550 # the tags module always uses UTF-8 (in order not to lose info
550 # the tags module always uses UTF-8 (in order not to lose info
551 # writing to the cache), but the rest of Mercurial wants them in
551 # writing to the cache), but the rest of Mercurial wants them in
552 # local encoding.
552 # local encoding.
553 tags = {}
553 tags = {}
554 for (name, (node, hist)) in alltags.iteritems():
554 for (name, (node, hist)) in alltags.iteritems():
555 if node != nullid:
555 if node != nullid:
556 tags[encoding.tolocal(name)] = node
556 tags[encoding.tolocal(name)] = node
557 tags['tip'] = self.changelog.tip()
557 tags['tip'] = self.changelog.tip()
558 tagtypes = dict([(encoding.tolocal(name), value)
558 tagtypes = dict([(encoding.tolocal(name), value)
559 for (name, value) in tagtypes.iteritems()])
559 for (name, value) in tagtypes.iteritems()])
560 return (tags, tagtypes)
560 return (tags, tagtypes)
561
561
562 def tagtype(self, tagname):
562 def tagtype(self, tagname):
563 '''
563 '''
564 return the type of the given tag. result can be:
564 return the type of the given tag. result can be:
565
565
566 'local' : a local tag
566 'local' : a local tag
567 'global' : a global tag
567 'global' : a global tag
568 None : tag does not exist
568 None : tag does not exist
569 '''
569 '''
570
570
571 return self._tagscache.tagtypes.get(tagname)
571 return self._tagscache.tagtypes.get(tagname)
572
572
573 def tagslist(self):
573 def tagslist(self):
574 '''return a list of tags ordered by revision'''
574 '''return a list of tags ordered by revision'''
575 if not self._tagscache.tagslist:
575 if not self._tagscache.tagslist:
576 l = []
576 l = []
577 for t, n in self.tags().iteritems():
577 for t, n in self.tags().iteritems():
578 r = self.changelog.rev(n)
578 r = self.changelog.rev(n)
579 l.append((r, t, n))
579 l.append((r, t, n))
580 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
580 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
581
581
582 return self._tagscache.tagslist
582 return self._tagscache.tagslist
583
583
584 def nodetags(self, node):
584 def nodetags(self, node):
585 '''return the tags associated with a node'''
585 '''return the tags associated with a node'''
586 if not self._tagscache.nodetagscache:
586 if not self._tagscache.nodetagscache:
587 nodetagscache = {}
587 nodetagscache = {}
588 for t, n in self._tagscache.tags.iteritems():
588 for t, n in self._tagscache.tags.iteritems():
589 nodetagscache.setdefault(n, []).append(t)
589 nodetagscache.setdefault(n, []).append(t)
590 for tags in nodetagscache.itervalues():
590 for tags in nodetagscache.itervalues():
591 tags.sort()
591 tags.sort()
592 self._tagscache.nodetagscache = nodetagscache
592 self._tagscache.nodetagscache = nodetagscache
593 return self._tagscache.nodetagscache.get(node, [])
593 return self._tagscache.nodetagscache.get(node, [])
594
594
595 def nodebookmarks(self, node):
595 def nodebookmarks(self, node):
596 marks = []
596 marks = []
597 for bookmark, n in self._bookmarks.iteritems():
597 for bookmark, n in self._bookmarks.iteritems():
598 if n == node:
598 if n == node:
599 marks.append(bookmark)
599 marks.append(bookmark)
600 return sorted(marks)
600 return sorted(marks)
601
601
602 def _branchtags(self, partial, lrev):
602 def _branchtags(self, partial, lrev):
603 # TODO: rename this function?
603 # TODO: rename this function?
604 tiprev = len(self) - 1
604 tiprev = len(self) - 1
605 if lrev != tiprev:
605 if lrev != tiprev:
606 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
606 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
607 self._updatebranchcache(partial, ctxgen)
607 self._updatebranchcache(partial, ctxgen)
608 self._writebranchcache(partial, self.changelog.tip(), tiprev)
608 self._writebranchcache(partial, self.changelog.tip(), tiprev)
609
609
610 return partial
610 return partial
611
611
612 def updatebranchcache(self):
612 def updatebranchcache(self):
613 tip = self.changelog.tip()
613 tip = self.changelog.tip()
614 if self._branchcache is not None and self._branchcachetip == tip:
614 if self._branchcache is not None and self._branchcachetip == tip:
615 return
615 return
616
616
617 oldtip = self._branchcachetip
617 oldtip = self._branchcachetip
618 self._branchcachetip = tip
618 self._branchcachetip = tip
619 if oldtip is None or oldtip not in self.changelog.nodemap:
619 if oldtip is None or oldtip not in self.changelog.nodemap:
620 partial, last, lrev = self._readbranchcache()
620 partial, last, lrev = self._readbranchcache()
621 else:
621 else:
622 lrev = self.changelog.rev(oldtip)
622 lrev = self.changelog.rev(oldtip)
623 partial = self._branchcache
623 partial = self._branchcache
624
624
625 self._branchtags(partial, lrev)
625 self._branchtags(partial, lrev)
626 # this private cache holds all heads (not just the branch tips)
626 # this private cache holds all heads (not just the branch tips)
627 self._branchcache = partial
627 self._branchcache = partial
628
628
629 def branchmap(self):
629 def branchmap(self):
630 '''returns a dictionary {branch: [branchheads]}'''
630 '''returns a dictionary {branch: [branchheads]}'''
631 self.updatebranchcache()
631 self.updatebranchcache()
632 return self._branchcache
632 return self._branchcache
633
633
634 def _branchtip(self, heads):
634 def _branchtip(self, heads):
635 '''return the tipmost branch head in heads'''
635 '''return the tipmost branch head in heads'''
636 tip = heads[-1]
636 tip = heads[-1]
637 for h in reversed(heads):
637 for h in reversed(heads):
638 if not self[h].closesbranch():
638 if not self[h].closesbranch():
639 tip = h
639 tip = h
640 break
640 break
641 return tip
641 return tip
642
642
643 def branchtip(self, branch):
643 def branchtip(self, branch):
644 '''return the tip node for a given branch'''
644 '''return the tip node for a given branch'''
645 if branch not in self.branchmap():
645 if branch not in self.branchmap():
646 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
646 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
647 return self._branchtip(self.branchmap()[branch])
647 return self._branchtip(self.branchmap()[branch])
648
648
649 def branchtags(self):
649 def branchtags(self):
650 '''return a dict where branch names map to the tipmost head of
650 '''return a dict where branch names map to the tipmost head of
651 the branch, open heads come before closed'''
651 the branch, open heads come before closed'''
652 bt = {}
652 bt = {}
653 for bn, heads in self.branchmap().iteritems():
653 for bn, heads in self.branchmap().iteritems():
654 bt[bn] = self._branchtip(heads)
654 bt[bn] = self._branchtip(heads)
655 return bt
655 return bt
656
656
657 def _readbranchcache(self):
657 def _readbranchcache(self):
658 partial = {}
658 partial = {}
659 try:
659 try:
660 f = self.opener("cache/branchheads")
660 f = self.opener("cache/branchheads")
661 lines = f.read().split('\n')
661 lines = f.read().split('\n')
662 f.close()
662 f.close()
663 except (IOError, OSError):
663 except (IOError, OSError):
664 return {}, nullid, nullrev
664 return {}, nullid, nullrev
665
665
666 try:
666 try:
667 last, lrev = lines.pop(0).split(" ", 1)
667 last, lrev = lines.pop(0).split(" ", 1)
668 last, lrev = bin(last), int(lrev)
668 last, lrev = bin(last), int(lrev)
669 if lrev >= len(self) or self[lrev].node() != last:
669 if lrev >= len(self) or self[lrev].node() != last:
670 # invalidate the cache
670 # invalidate the cache
671 raise ValueError('invalidating branch cache (tip differs)')
671 raise ValueError('invalidating branch cache (tip differs)')
672 for l in lines:
672 for l in lines:
673 if not l:
673 if not l:
674 continue
674 continue
675 node, label = l.split(" ", 1)
675 node, label = l.split(" ", 1)
676 label = encoding.tolocal(label.strip())
676 label = encoding.tolocal(label.strip())
677 if not node in self:
677 if not node in self:
678 raise ValueError('invalidating branch cache because node '+
678 raise ValueError('invalidating branch cache because node '+
679 '%s does not exist' % node)
679 '%s does not exist' % node)
680 partial.setdefault(label, []).append(bin(node))
680 partial.setdefault(label, []).append(bin(node))
681 except KeyboardInterrupt:
681 except KeyboardInterrupt:
682 raise
682 raise
683 except Exception, inst:
683 except Exception, inst:
684 if self.ui.debugflag:
684 if self.ui.debugflag:
685 self.ui.warn(str(inst), '\n')
685 self.ui.warn(str(inst), '\n')
686 partial, last, lrev = {}, nullid, nullrev
686 partial, last, lrev = {}, nullid, nullrev
687 return partial, last, lrev
687 return partial, last, lrev
688
688
689 def _writebranchcache(self, branches, tip, tiprev):
689 def _writebranchcache(self, branches, tip, tiprev):
690 try:
690 try:
691 f = self.opener("cache/branchheads", "w", atomictemp=True)
691 f = self.opener("cache/branchheads", "w", atomictemp=True)
692 f.write("%s %s\n" % (hex(tip), tiprev))
692 f.write("%s %s\n" % (hex(tip), tiprev))
693 for label, nodes in branches.iteritems():
693 for label, nodes in branches.iteritems():
694 for node in nodes:
694 for node in nodes:
695 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
695 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
696 f.close()
696 f.close()
697 except (IOError, OSError):
697 except (IOError, OSError):
698 pass
698 pass
699
699
700 def _updatebranchcache(self, partial, ctxgen):
700 def _updatebranchcache(self, partial, ctxgen):
701 """Given a branchhead cache, partial, that may have extra nodes or be
701 """Given a branchhead cache, partial, that may have extra nodes or be
702 missing heads, and a generator of nodes that are at least a superset of
702 missing heads, and a generator of nodes that are at least a superset of
703 heads missing, this function updates partial to be correct.
703 heads missing, this function updates partial to be correct.
704 """
704 """
705 # collect new branch entries
705 # collect new branch entries
706 newbranches = {}
706 newbranches = {}
707 for c in ctxgen:
707 for c in ctxgen:
708 newbranches.setdefault(c.branch(), []).append(c.node())
708 newbranches.setdefault(c.branch(), []).append(c.node())
709 # if older branchheads are reachable from new ones, they aren't
709 # if older branchheads are reachable from new ones, they aren't
710 # really branchheads. Note checking parents is insufficient:
710 # really branchheads. Note checking parents is insufficient:
711 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
711 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
712 for branch, newnodes in newbranches.iteritems():
712 for branch, newnodes in newbranches.iteritems():
713 bheads = partial.setdefault(branch, [])
713 bheads = partial.setdefault(branch, [])
714 # Remove candidate heads that no longer are in the repo (e.g., as
714 # Remove candidate heads that no longer are in the repo (e.g., as
715 # the result of a strip that just happened). Avoid using 'node in
715 # the result of a strip that just happened). Avoid using 'node in
716 # self' here because that dives down into branchcache code somewhat
716 # self' here because that dives down into branchcache code somewhat
717 # recursively.
717 # recursively.
718 bheadrevs = [self.changelog.rev(node) for node in bheads
718 bheadrevs = [self.changelog.rev(node) for node in bheads
719 if self.changelog.hasnode(node)]
719 if self.changelog.hasnode(node)]
720 newheadrevs = [self.changelog.rev(node) for node in newnodes
720 newheadrevs = [self.changelog.rev(node) for node in newnodes
721 if self.changelog.hasnode(node)]
721 if self.changelog.hasnode(node)]
722 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
722 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
723 # Remove duplicates - nodes that are in newheadrevs and are already
723 # Remove duplicates - nodes that are in newheadrevs and are already
724 # in bheadrevs. This can happen if you strip a node whose parent
724 # in bheadrevs. This can happen if you strip a node whose parent
725 # was already a head (because they're on different branches).
725 # was already a head (because they're on different branches).
726 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
726 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
727
727
728 # Starting from tip means fewer passes over reachable. If we know
728 # Starting from tip means fewer passes over reachable. If we know
729 # the new candidates are not ancestors of existing heads, we don't
729 # the new candidates are not ancestors of existing heads, we don't
730 # have to examine ancestors of existing heads
730 # have to examine ancestors of existing heads
731 if ctxisnew:
731 if ctxisnew:
732 iterrevs = sorted(newheadrevs)
732 iterrevs = sorted(newheadrevs)
733 else:
733 else:
734 iterrevs = list(bheadrevs)
734 iterrevs = list(bheadrevs)
735
735
736 # This loop prunes out two kinds of heads - heads that are
736 # This loop prunes out two kinds of heads - heads that are
737 # superseded by a head in newheadrevs, and newheadrevs that are not
737 # superseded by a head in newheadrevs, and newheadrevs that are not
738 # heads because an existing head is their descendant.
738 # heads because an existing head is their descendant.
739 while iterrevs:
739 while iterrevs:
740 latest = iterrevs.pop()
740 latest = iterrevs.pop()
741 if latest not in bheadrevs:
741 if latest not in bheadrevs:
742 continue
742 continue
743 ancestors = set(self.changelog.ancestors([latest],
743 ancestors = set(self.changelog.ancestors([latest],
744 bheadrevs[0]))
744 bheadrevs[0]))
745 if ancestors:
745 if ancestors:
746 bheadrevs = [b for b in bheadrevs if b not in ancestors]
746 bheadrevs = [b for b in bheadrevs if b not in ancestors]
747 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
747 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
748
748
749 # There may be branches that cease to exist when the last commit in the
749 # There may be branches that cease to exist when the last commit in the
750 # branch was stripped. This code filters them out. Note that the
750 # branch was stripped. This code filters them out. Note that the
751 # branch that ceased to exist may not be in newbranches because
751 # branch that ceased to exist may not be in newbranches because
752 # newbranches is the set of candidate heads, which when you strip the
752 # newbranches is the set of candidate heads, which when you strip the
753 # last commit in a branch will be the parent branch.
753 # last commit in a branch will be the parent branch.
754 for branch in partial.keys():
754 for branch in partial.keys():
755 nodes = [head for head in partial[branch]
755 nodes = [head for head in partial[branch]
756 if self.changelog.hasnode(head)]
756 if self.changelog.hasnode(head)]
757 if not nodes:
757 if not nodes:
758 del partial[branch]
758 del partial[branch]
759
759
760 def lookup(self, key):
760 def lookup(self, key):
761 return self[key].node()
761 return self[key].node()
762
762
763 def lookupbranch(self, key, remote=None):
763 def lookupbranch(self, key, remote=None):
764 repo = remote or self
764 repo = remote or self
765 if key in repo.branchmap():
765 if key in repo.branchmap():
766 return key
766 return key
767
767
768 repo = (remote and remote.local()) and remote or self
768 repo = (remote and remote.local()) and remote or self
769 return repo[key].branch()
769 return repo[key].branch()
770
770
771 def known(self, nodes):
771 def known(self, nodes):
772 nm = self.changelog.nodemap
772 nm = self.changelog.nodemap
773 pc = self._phasecache
773 pc = self._phasecache
774 result = []
774 result = []
775 for n in nodes:
775 for n in nodes:
776 r = nm.get(n)
776 r = nm.get(n)
777 resp = not (r is None or pc.phase(self, r) >= phases.secret)
777 resp = not (r is None or pc.phase(self, r) >= phases.secret)
778 result.append(resp)
778 result.append(resp)
779 return result
779 return result
780
780
781 def local(self):
781 def local(self):
782 return self
782 return self
783
783
784 def cancopy(self):
784 def cancopy(self):
785 return self.local() # so statichttprepo's override of local() works
785 return self.local() # so statichttprepo's override of local() works
786
786
787 def join(self, f):
787 def join(self, f):
788 return os.path.join(self.path, f)
788 return os.path.join(self.path, f)
789
789
790 def wjoin(self, f):
790 def wjoin(self, f):
791 return os.path.join(self.root, f)
791 return os.path.join(self.root, f)
792
792
793 def file(self, f):
793 def file(self, f):
794 if f[0] == '/':
794 if f[0] == '/':
795 f = f[1:]
795 f = f[1:]
796 return filelog.filelog(self.sopener, f)
796 return filelog.filelog(self.sopener, f)
797
797
798 def changectx(self, changeid):
798 def changectx(self, changeid):
799 return self[changeid]
799 return self[changeid]
800
800
801 def parents(self, changeid=None):
801 def parents(self, changeid=None):
802 '''get list of changectxs for parents of changeid'''
802 '''get list of changectxs for parents of changeid'''
803 return self[changeid].parents()
803 return self[changeid].parents()
804
804
805 def setparents(self, p1, p2=nullid):
805 def setparents(self, p1, p2=nullid):
806 copies = self.dirstate.setparents(p1, p2)
806 copies = self.dirstate.setparents(p1, p2)
807 if copies:
807 if copies:
808 # Adjust copy records, the dirstate cannot do it, it
808 # Adjust copy records, the dirstate cannot do it, it
809 # requires access to parents manifests. Preserve them
809 # requires access to parents manifests. Preserve them
810 # only for entries added to first parent.
810 # only for entries added to first parent.
811 pctx = self[p1]
811 pctx = self[p1]
812 for f in copies:
812 for f in copies:
813 if f not in pctx and copies[f] in pctx:
813 if f not in pctx and copies[f] in pctx:
814 self.dirstate.copy(copies[f], f)
814 self.dirstate.copy(copies[f], f)
815
815
816 def filectx(self, path, changeid=None, fileid=None):
816 def filectx(self, path, changeid=None, fileid=None):
817 """changeid can be a changeset revision, node, or tag.
817 """changeid can be a changeset revision, node, or tag.
818 fileid can be a file revision or node."""
818 fileid can be a file revision or node."""
819 return context.filectx(self, path, changeid, fileid)
819 return context.filectx(self, path, changeid, fileid)
820
820
821 def getcwd(self):
821 def getcwd(self):
822 return self.dirstate.getcwd()
822 return self.dirstate.getcwd()
823
823
824 def pathto(self, f, cwd=None):
824 def pathto(self, f, cwd=None):
825 return self.dirstate.pathto(f, cwd)
825 return self.dirstate.pathto(f, cwd)
826
826
827 def wfile(self, f, mode='r'):
827 def wfile(self, f, mode='r'):
828 return self.wopener(f, mode)
828 return self.wopener(f, mode)
829
829
830 def _link(self, f):
830 def _link(self, f):
831 return os.path.islink(self.wjoin(f))
831 return os.path.islink(self.wjoin(f))
832
832
833 def _loadfilter(self, filter):
833 def _loadfilter(self, filter):
834 if filter not in self.filterpats:
834 if filter not in self.filterpats:
835 l = []
835 l = []
836 for pat, cmd in self.ui.configitems(filter):
836 for pat, cmd in self.ui.configitems(filter):
837 if cmd == '!':
837 if cmd == '!':
838 continue
838 continue
839 mf = matchmod.match(self.root, '', [pat])
839 mf = matchmod.match(self.root, '', [pat])
840 fn = None
840 fn = None
841 params = cmd
841 params = cmd
842 for name, filterfn in self._datafilters.iteritems():
842 for name, filterfn in self._datafilters.iteritems():
843 if cmd.startswith(name):
843 if cmd.startswith(name):
844 fn = filterfn
844 fn = filterfn
845 params = cmd[len(name):].lstrip()
845 params = cmd[len(name):].lstrip()
846 break
846 break
847 if not fn:
847 if not fn:
848 fn = lambda s, c, **kwargs: util.filter(s, c)
848 fn = lambda s, c, **kwargs: util.filter(s, c)
849 # Wrap old filters not supporting keyword arguments
849 # Wrap old filters not supporting keyword arguments
850 if not inspect.getargspec(fn)[2]:
850 if not inspect.getargspec(fn)[2]:
851 oldfn = fn
851 oldfn = fn
852 fn = lambda s, c, **kwargs: oldfn(s, c)
852 fn = lambda s, c, **kwargs: oldfn(s, c)
853 l.append((mf, fn, params))
853 l.append((mf, fn, params))
854 self.filterpats[filter] = l
854 self.filterpats[filter] = l
855 return self.filterpats[filter]
855 return self.filterpats[filter]
856
856
857 def _filter(self, filterpats, filename, data):
857 def _filter(self, filterpats, filename, data):
858 for mf, fn, cmd in filterpats:
858 for mf, fn, cmd in filterpats:
859 if mf(filename):
859 if mf(filename):
860 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
860 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
861 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
861 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
862 break
862 break
863
863
864 return data
864 return data
865
865
866 @propertycache
866 @propertycache
867 def _encodefilterpats(self):
867 def _encodefilterpats(self):
868 return self._loadfilter('encode')
868 return self._loadfilter('encode')
869
869
870 @propertycache
870 @propertycache
871 def _decodefilterpats(self):
871 def _decodefilterpats(self):
872 return self._loadfilter('decode')
872 return self._loadfilter('decode')
873
873
874 def adddatafilter(self, name, filter):
874 def adddatafilter(self, name, filter):
875 self._datafilters[name] = filter
875 self._datafilters[name] = filter
876
876
877 def wread(self, filename):
877 def wread(self, filename):
878 if self._link(filename):
878 if self._link(filename):
879 data = os.readlink(self.wjoin(filename))
879 data = os.readlink(self.wjoin(filename))
880 else:
880 else:
881 data = self.wopener.read(filename)
881 data = self.wopener.read(filename)
882 return self._filter(self._encodefilterpats, filename, data)
882 return self._filter(self._encodefilterpats, filename, data)
883
883
884 def wwrite(self, filename, data, flags):
884 def wwrite(self, filename, data, flags):
885 data = self._filter(self._decodefilterpats, filename, data)
885 data = self._filter(self._decodefilterpats, filename, data)
886 if 'l' in flags:
886 if 'l' in flags:
887 self.wopener.symlink(data, filename)
887 self.wopener.symlink(data, filename)
888 else:
888 else:
889 self.wopener.write(filename, data)
889 self.wopener.write(filename, data)
890 if 'x' in flags:
890 if 'x' in flags:
891 util.setflags(self.wjoin(filename), False, True)
891 util.setflags(self.wjoin(filename), False, True)
892
892
893 def wwritedata(self, filename, data):
893 def wwritedata(self, filename, data):
894 return self._filter(self._decodefilterpats, filename, data)
894 return self._filter(self._decodefilterpats, filename, data)
895
895
896 def transaction(self, desc):
896 def transaction(self, desc):
897 tr = self._transref and self._transref() or None
897 tr = self._transref and self._transref() or None
898 if tr and tr.running():
898 if tr and tr.running():
899 return tr.nest()
899 return tr.nest()
900
900
901 # abort here if the journal already exists
901 # abort here if the journal already exists
902 if os.path.exists(self.sjoin("journal")):
902 if os.path.exists(self.sjoin("journal")):
903 raise error.RepoError(
903 raise error.RepoError(
904 _("abandoned transaction found - run hg recover"))
904 _("abandoned transaction found - run hg recover"))
905
905
906 self._writejournal(desc)
906 self._writejournal(desc)
907 renames = [(x, undoname(x)) for x in self._journalfiles()]
907 renames = [(x, undoname(x)) for x in self._journalfiles()]
908
908
909 tr = transaction.transaction(self.ui.warn, self.sopener,
909 tr = transaction.transaction(self.ui.warn, self.sopener,
910 self.sjoin("journal"),
910 self.sjoin("journal"),
911 aftertrans(renames),
911 aftertrans(renames),
912 self.store.createmode)
912 self.store.createmode)
913 self._transref = weakref.ref(tr)
913 self._transref = weakref.ref(tr)
914 return tr
914 return tr
915
915
916 def _journalfiles(self):
916 def _journalfiles(self):
917 return (self.sjoin('journal'), self.join('journal.dirstate'),
917 return (self.sjoin('journal'), self.join('journal.dirstate'),
918 self.join('journal.branch'), self.join('journal.desc'),
918 self.join('journal.branch'), self.join('journal.desc'),
919 self.join('journal.bookmarks'),
919 self.join('journal.bookmarks'),
920 self.sjoin('journal.phaseroots'))
920 self.sjoin('journal.phaseroots'))
921
921
922 def undofiles(self):
922 def undofiles(self):
923 return [undoname(x) for x in self._journalfiles()]
923 return [undoname(x) for x in self._journalfiles()]
924
924
925 def _writejournal(self, desc):
925 def _writejournal(self, desc):
926 self.opener.write("journal.dirstate",
926 self.opener.write("journal.dirstate",
927 self.opener.tryread("dirstate"))
927 self.opener.tryread("dirstate"))
928 self.opener.write("journal.branch",
928 self.opener.write("journal.branch",
929 encoding.fromlocal(self.dirstate.branch()))
929 encoding.fromlocal(self.dirstate.branch()))
930 self.opener.write("journal.desc",
930 self.opener.write("journal.desc",
931 "%d\n%s\n" % (len(self), desc))
931 "%d\n%s\n" % (len(self), desc))
932 self.opener.write("journal.bookmarks",
932 self.opener.write("journal.bookmarks",
933 self.opener.tryread("bookmarks"))
933 self.opener.tryread("bookmarks"))
934 self.sopener.write("journal.phaseroots",
934 self.sopener.write("journal.phaseroots",
935 self.sopener.tryread("phaseroots"))
935 self.sopener.tryread("phaseroots"))
936
936
937 def recover(self):
937 def recover(self):
938 lock = self.lock()
938 lock = self.lock()
939 try:
939 try:
940 if os.path.exists(self.sjoin("journal")):
940 if os.path.exists(self.sjoin("journal")):
941 self.ui.status(_("rolling back interrupted transaction\n"))
941 self.ui.status(_("rolling back interrupted transaction\n"))
942 transaction.rollback(self.sopener, self.sjoin("journal"),
942 transaction.rollback(self.sopener, self.sjoin("journal"),
943 self.ui.warn)
943 self.ui.warn)
944 self.invalidate()
944 self.invalidate()
945 return True
945 return True
946 else:
946 else:
947 self.ui.warn(_("no interrupted transaction available\n"))
947 self.ui.warn(_("no interrupted transaction available\n"))
948 return False
948 return False
949 finally:
949 finally:
950 lock.release()
950 lock.release()
951
951
952 def rollback(self, dryrun=False, force=False):
952 def rollback(self, dryrun=False, force=False):
953 wlock = lock = None
953 wlock = lock = None
954 try:
954 try:
955 wlock = self.wlock()
955 wlock = self.wlock()
956 lock = self.lock()
956 lock = self.lock()
957 if os.path.exists(self.sjoin("undo")):
957 if os.path.exists(self.sjoin("undo")):
958 return self._rollback(dryrun, force)
958 return self._rollback(dryrun, force)
959 else:
959 else:
960 self.ui.warn(_("no rollback information available\n"))
960 self.ui.warn(_("no rollback information available\n"))
961 return 1
961 return 1
962 finally:
962 finally:
963 release(lock, wlock)
963 release(lock, wlock)
964
964
965 def _rollback(self, dryrun, force):
965 def _rollback(self, dryrun, force):
966 ui = self.ui
966 ui = self.ui
967 try:
967 try:
968 args = self.opener.read('undo.desc').splitlines()
968 args = self.opener.read('undo.desc').splitlines()
969 (oldlen, desc, detail) = (int(args[0]), args[1], None)
969 (oldlen, desc, detail) = (int(args[0]), args[1], None)
970 if len(args) >= 3:
970 if len(args) >= 3:
971 detail = args[2]
971 detail = args[2]
972 oldtip = oldlen - 1
972 oldtip = oldlen - 1
973
973
974 if detail and ui.verbose:
974 if detail and ui.verbose:
975 msg = (_('repository tip rolled back to revision %s'
975 msg = (_('repository tip rolled back to revision %s'
976 ' (undo %s: %s)\n')
976 ' (undo %s: %s)\n')
977 % (oldtip, desc, detail))
977 % (oldtip, desc, detail))
978 else:
978 else:
979 msg = (_('repository tip rolled back to revision %s'
979 msg = (_('repository tip rolled back to revision %s'
980 ' (undo %s)\n')
980 ' (undo %s)\n')
981 % (oldtip, desc))
981 % (oldtip, desc))
982 except IOError:
982 except IOError:
983 msg = _('rolling back unknown transaction\n')
983 msg = _('rolling back unknown transaction\n')
984 desc = None
984 desc = None
985
985
986 if not force and self['.'] != self['tip'] and desc == 'commit':
986 if not force and self['.'] != self['tip'] and desc == 'commit':
987 raise util.Abort(
987 raise util.Abort(
988 _('rollback of last commit while not checked out '
988 _('rollback of last commit while not checked out '
989 'may lose data'), hint=_('use -f to force'))
989 'may lose data'), hint=_('use -f to force'))
990
990
991 ui.status(msg)
991 ui.status(msg)
992 if dryrun:
992 if dryrun:
993 return 0
993 return 0
994
994
995 parents = self.dirstate.parents()
995 parents = self.dirstate.parents()
996 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
996 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
997 if os.path.exists(self.join('undo.bookmarks')):
997 if os.path.exists(self.join('undo.bookmarks')):
998 util.rename(self.join('undo.bookmarks'),
998 util.rename(self.join('undo.bookmarks'),
999 self.join('bookmarks'))
999 self.join('bookmarks'))
1000 if os.path.exists(self.sjoin('undo.phaseroots')):
1000 if os.path.exists(self.sjoin('undo.phaseroots')):
1001 util.rename(self.sjoin('undo.phaseroots'),
1001 util.rename(self.sjoin('undo.phaseroots'),
1002 self.sjoin('phaseroots'))
1002 self.sjoin('phaseroots'))
1003 self.invalidate()
1003 self.invalidate()
1004
1004
1005 # Discard all cache entries to force reloading everything.
1005 # Discard all cache entries to force reloading everything.
1006 self._filecache.clear()
1006 self._filecache.clear()
1007
1007
1008 parentgone = (parents[0] not in self.changelog.nodemap or
1008 parentgone = (parents[0] not in self.changelog.nodemap or
1009 parents[1] not in self.changelog.nodemap)
1009 parents[1] not in self.changelog.nodemap)
1010 if parentgone:
1010 if parentgone:
1011 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1011 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1012 try:
1012 try:
1013 branch = self.opener.read('undo.branch')
1013 branch = self.opener.read('undo.branch')
1014 self.dirstate.setbranch(encoding.tolocal(branch))
1014 self.dirstate.setbranch(encoding.tolocal(branch))
1015 except IOError:
1015 except IOError:
1016 ui.warn(_('named branch could not be reset: '
1016 ui.warn(_('named branch could not be reset: '
1017 'current branch is still \'%s\'\n')
1017 'current branch is still \'%s\'\n')
1018 % self.dirstate.branch())
1018 % self.dirstate.branch())
1019
1019
1020 self.dirstate.invalidate()
1020 self.dirstate.invalidate()
1021 parents = tuple([p.rev() for p in self.parents()])
1021 parents = tuple([p.rev() for p in self.parents()])
1022 if len(parents) > 1:
1022 if len(parents) > 1:
1023 ui.status(_('working directory now based on '
1023 ui.status(_('working directory now based on '
1024 'revisions %d and %d\n') % parents)
1024 'revisions %d and %d\n') % parents)
1025 else:
1025 else:
1026 ui.status(_('working directory now based on '
1026 ui.status(_('working directory now based on '
1027 'revision %d\n') % parents)
1027 'revision %d\n') % parents)
1028 # TODO: if we know which new heads may result from this rollback, pass
1028 # TODO: if we know which new heads may result from this rollback, pass
1029 # them to destroy(), which will prevent the branchhead cache from being
1029 # them to destroy(), which will prevent the branchhead cache from being
1030 # invalidated.
1030 # invalidated.
1031 self.destroyed()
1031 self.destroyed()
1032 return 0
1032 return 0
1033
1033
1034 def invalidatecaches(self):
1034 def invalidatecaches(self):
1035 def delcache(name):
1035 def delcache(name):
1036 try:
1036 try:
1037 delattr(self, name)
1037 delattr(self, name)
1038 except AttributeError:
1038 except AttributeError:
1039 pass
1039 pass
1040
1040
1041 delcache('_tagscache')
1041 delcache('_tagscache')
1042
1042
1043 self._branchcache = None # in UTF-8
1043 self._branchcache = None # in UTF-8
1044 self._branchcachetip = None
1044 self._branchcachetip = None
1045 obsolete.clearobscaches(self)
1045 obsolete.clearobscaches(self)
1046
1046
1047 def invalidatedirstate(self):
1047 def invalidatedirstate(self):
1048 '''Invalidates the dirstate, causing the next call to dirstate
1048 '''Invalidates the dirstate, causing the next call to dirstate
1049 to check if it was modified since the last time it was read,
1049 to check if it was modified since the last time it was read,
1050 rereading it if it has.
1050 rereading it if it has.
1051
1051
1052 This is different to dirstate.invalidate() that it doesn't always
1052 This is different to dirstate.invalidate() that it doesn't always
1053 rereads the dirstate. Use dirstate.invalidate() if you want to
1053 rereads the dirstate. Use dirstate.invalidate() if you want to
1054 explicitly read the dirstate again (i.e. restoring it to a previous
1054 explicitly read the dirstate again (i.e. restoring it to a previous
1055 known good state).'''
1055 known good state).'''
1056 if 'dirstate' in self.__dict__:
1056 if 'dirstate' in self.__dict__:
1057 for k in self.dirstate._filecache:
1057 for k in self.dirstate._filecache:
1058 try:
1058 try:
1059 delattr(self.dirstate, k)
1059 delattr(self.dirstate, k)
1060 except AttributeError:
1060 except AttributeError:
1061 pass
1061 pass
1062 delattr(self, 'dirstate')
1062 delattr(self, 'dirstate')
1063
1063
1064 def invalidate(self):
1064 def invalidate(self):
1065 for k in self._filecache:
1065 for k in self._filecache:
1066 # dirstate is invalidated separately in invalidatedirstate()
1066 # dirstate is invalidated separately in invalidatedirstate()
1067 if k == 'dirstate':
1067 if k == 'dirstate':
1068 continue
1068 continue
1069
1069
1070 try:
1070 try:
1071 delattr(self, k)
1071 delattr(self, k)
1072 except AttributeError:
1072 except AttributeError:
1073 pass
1073 pass
1074 self.invalidatecaches()
1074 self.invalidatecaches()
1075
1075
1076 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1076 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1077 try:
1077 try:
1078 l = lock.lock(lockname, 0, releasefn, desc=desc)
1078 l = lock.lock(lockname, 0, releasefn, desc=desc)
1079 except error.LockHeld, inst:
1079 except error.LockHeld, inst:
1080 if not wait:
1080 if not wait:
1081 raise
1081 raise
1082 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1082 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1083 (desc, inst.locker))
1083 (desc, inst.locker))
1084 # default to 600 seconds timeout
1084 # default to 600 seconds timeout
1085 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1085 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1086 releasefn, desc=desc)
1086 releasefn, desc=desc)
1087 if acquirefn:
1087 if acquirefn:
1088 acquirefn()
1088 acquirefn()
1089 return l
1089 return l
1090
1090
1091 def _afterlock(self, callback):
1091 def _afterlock(self, callback):
1092 """add a callback to the current repository lock.
1092 """add a callback to the current repository lock.
1093
1093
1094 The callback will be executed on lock release."""
1094 The callback will be executed on lock release."""
1095 l = self._lockref and self._lockref()
1095 l = self._lockref and self._lockref()
1096 if l:
1096 if l:
1097 l.postrelease.append(callback)
1097 l.postrelease.append(callback)
1098 else:
1098 else:
1099 callback()
1099 callback()
1100
1100
1101 def lock(self, wait=True):
1101 def lock(self, wait=True):
1102 '''Lock the repository store (.hg/store) and return a weak reference
1102 '''Lock the repository store (.hg/store) and return a weak reference
1103 to the lock. Use this before modifying the store (e.g. committing or
1103 to the lock. Use this before modifying the store (e.g. committing or
1104 stripping). If you are opening a transaction, get a lock as well.)'''
1104 stripping). If you are opening a transaction, get a lock as well.)'''
1105 l = self._lockref and self._lockref()
1105 l = self._lockref and self._lockref()
1106 if l is not None and l.held:
1106 if l is not None and l.held:
1107 l.lock()
1107 l.lock()
1108 return l
1108 return l
1109
1109
1110 def unlock():
1110 def unlock():
1111 self.store.write()
1111 self.store.write()
1112 if '_phasecache' in vars(self):
1112 if '_phasecache' in vars(self):
1113 self._phasecache.write()
1113 self._phasecache.write()
1114 for k, ce in self._filecache.items():
1114 for k, ce in self._filecache.items():
1115 if k == 'dirstate':
1115 if k == 'dirstate':
1116 continue
1116 continue
1117 ce.refresh()
1117 ce.refresh()
1118
1118
1119 l = self._lock(self.sjoin("lock"), wait, unlock,
1119 l = self._lock(self.sjoin("lock"), wait, unlock,
1120 self.invalidate, _('repository %s') % self.origroot)
1120 self.invalidate, _('repository %s') % self.origroot)
1121 self._lockref = weakref.ref(l)
1121 self._lockref = weakref.ref(l)
1122 return l
1122 return l
1123
1123
1124 def wlock(self, wait=True):
1124 def wlock(self, wait=True):
1125 '''Lock the non-store parts of the repository (everything under
1125 '''Lock the non-store parts of the repository (everything under
1126 .hg except .hg/store) and return a weak reference to the lock.
1126 .hg except .hg/store) and return a weak reference to the lock.
1127 Use this before modifying files in .hg.'''
1127 Use this before modifying files in .hg.'''
1128 l = self._wlockref and self._wlockref()
1128 l = self._wlockref and self._wlockref()
1129 if l is not None and l.held:
1129 if l is not None and l.held:
1130 l.lock()
1130 l.lock()
1131 return l
1131 return l
1132
1132
1133 def unlock():
1133 def unlock():
1134 self.dirstate.write()
1134 self.dirstate.write()
1135 ce = self._filecache.get('dirstate')
1135 ce = self._filecache.get('dirstate')
1136 if ce:
1136 if ce:
1137 ce.refresh()
1137 ce.refresh()
1138
1138
1139 l = self._lock(self.join("wlock"), wait, unlock,
1139 l = self._lock(self.join("wlock"), wait, unlock,
1140 self.invalidatedirstate, _('working directory of %s') %
1140 self.invalidatedirstate, _('working directory of %s') %
1141 self.origroot)
1141 self.origroot)
1142 self._wlockref = weakref.ref(l)
1142 self._wlockref = weakref.ref(l)
1143 return l
1143 return l
1144
1144
1145 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1145 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1146 """
1146 """
1147 commit an individual file as part of a larger transaction
1147 commit an individual file as part of a larger transaction
1148 """
1148 """
1149
1149
1150 fname = fctx.path()
1150 fname = fctx.path()
1151 text = fctx.data()
1151 text = fctx.data()
1152 flog = self.file(fname)
1152 flog = self.file(fname)
1153 fparent1 = manifest1.get(fname, nullid)
1153 fparent1 = manifest1.get(fname, nullid)
1154 fparent2 = fparent2o = manifest2.get(fname, nullid)
1154 fparent2 = fparent2o = manifest2.get(fname, nullid)
1155
1155
1156 meta = {}
1156 meta = {}
1157 copy = fctx.renamed()
1157 copy = fctx.renamed()
1158 if copy and copy[0] != fname:
1158 if copy and copy[0] != fname:
1159 # Mark the new revision of this file as a copy of another
1159 # Mark the new revision of this file as a copy of another
1160 # file. This copy data will effectively act as a parent
1160 # file. This copy data will effectively act as a parent
1161 # of this new revision. If this is a merge, the first
1161 # of this new revision. If this is a merge, the first
1162 # parent will be the nullid (meaning "look up the copy data")
1162 # parent will be the nullid (meaning "look up the copy data")
1163 # and the second one will be the other parent. For example:
1163 # and the second one will be the other parent. For example:
1164 #
1164 #
1165 # 0 --- 1 --- 3 rev1 changes file foo
1165 # 0 --- 1 --- 3 rev1 changes file foo
1166 # \ / rev2 renames foo to bar and changes it
1166 # \ / rev2 renames foo to bar and changes it
1167 # \- 2 -/ rev3 should have bar with all changes and
1167 # \- 2 -/ rev3 should have bar with all changes and
1168 # should record that bar descends from
1168 # should record that bar descends from
1169 # bar in rev2 and foo in rev1
1169 # bar in rev2 and foo in rev1
1170 #
1170 #
1171 # this allows this merge to succeed:
1171 # this allows this merge to succeed:
1172 #
1172 #
1173 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1173 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1174 # \ / merging rev3 and rev4 should use bar@rev2
1174 # \ / merging rev3 and rev4 should use bar@rev2
1175 # \- 2 --- 4 as the merge base
1175 # \- 2 --- 4 as the merge base
1176 #
1176 #
1177
1177
1178 cfname = copy[0]
1178 cfname = copy[0]
1179 crev = manifest1.get(cfname)
1179 crev = manifest1.get(cfname)
1180 newfparent = fparent2
1180 newfparent = fparent2
1181
1181
1182 if manifest2: # branch merge
1182 if manifest2: # branch merge
1183 if fparent2 == nullid or crev is None: # copied on remote side
1183 if fparent2 == nullid or crev is None: # copied on remote side
1184 if cfname in manifest2:
1184 if cfname in manifest2:
1185 crev = manifest2[cfname]
1185 crev = manifest2[cfname]
1186 newfparent = fparent1
1186 newfparent = fparent1
1187
1187
1188 # find source in nearest ancestor if we've lost track
1188 # find source in nearest ancestor if we've lost track
1189 if not crev:
1189 if not crev:
1190 self.ui.debug(" %s: searching for copy revision for %s\n" %
1190 self.ui.debug(" %s: searching for copy revision for %s\n" %
1191 (fname, cfname))
1191 (fname, cfname))
1192 for ancestor in self[None].ancestors():
1192 for ancestor in self[None].ancestors():
1193 if cfname in ancestor:
1193 if cfname in ancestor:
1194 crev = ancestor[cfname].filenode()
1194 crev = ancestor[cfname].filenode()
1195 break
1195 break
1196
1196
1197 if crev:
1197 if crev:
1198 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1198 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1199 meta["copy"] = cfname
1199 meta["copy"] = cfname
1200 meta["copyrev"] = hex(crev)
1200 meta["copyrev"] = hex(crev)
1201 fparent1, fparent2 = nullid, newfparent
1201 fparent1, fparent2 = nullid, newfparent
1202 else:
1202 else:
1203 self.ui.warn(_("warning: can't find ancestor for '%s' "
1203 self.ui.warn(_("warning: can't find ancestor for '%s' "
1204 "copied from '%s'!\n") % (fname, cfname))
1204 "copied from '%s'!\n") % (fname, cfname))
1205
1205
1206 elif fparent2 != nullid:
1206 elif fparent2 != nullid:
1207 # is one parent an ancestor of the other?
1207 # is one parent an ancestor of the other?
1208 fparentancestor = flog.ancestor(fparent1, fparent2)
1208 fparentancestor = flog.ancestor(fparent1, fparent2)
1209 if fparentancestor == fparent1:
1209 if fparentancestor == fparent1:
1210 fparent1, fparent2 = fparent2, nullid
1210 fparent1, fparent2 = fparent2, nullid
1211 elif fparentancestor == fparent2:
1211 elif fparentancestor == fparent2:
1212 fparent2 = nullid
1212 fparent2 = nullid
1213
1213
1214 # is the file changed?
1214 # is the file changed?
1215 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1215 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1216 changelist.append(fname)
1216 changelist.append(fname)
1217 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1217 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1218
1218
1219 # are just the flags changed during merge?
1219 # are just the flags changed during merge?
1220 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1220 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1221 changelist.append(fname)
1221 changelist.append(fname)
1222
1222
1223 return fparent1
1223 return fparent1
1224
1224
1225 def commit(self, text="", user=None, date=None, match=None, force=False,
1225 def commit(self, text="", user=None, date=None, match=None, force=False,
1226 editor=False, extra={}):
1226 editor=False, extra={}):
1227 """Add a new revision to current repository.
1227 """Add a new revision to current repository.
1228
1228
1229 Revision information is gathered from the working directory,
1229 Revision information is gathered from the working directory,
1230 match can be used to filter the committed files. If editor is
1230 match can be used to filter the committed files. If editor is
1231 supplied, it is called to get a commit message.
1231 supplied, it is called to get a commit message.
1232 """
1232 """
1233
1233
1234 def fail(f, msg):
1234 def fail(f, msg):
1235 raise util.Abort('%s: %s' % (f, msg))
1235 raise util.Abort('%s: %s' % (f, msg))
1236
1236
1237 if not match:
1237 if not match:
1238 match = matchmod.always(self.root, '')
1238 match = matchmod.always(self.root, '')
1239
1239
1240 if not force:
1240 if not force:
1241 vdirs = []
1241 vdirs = []
1242 match.dir = vdirs.append
1242 match.dir = vdirs.append
1243 match.bad = fail
1243 match.bad = fail
1244
1244
1245 wlock = self.wlock()
1245 wlock = self.wlock()
1246 try:
1246 try:
1247 wctx = self[None]
1247 wctx = self[None]
1248 merge = len(wctx.parents()) > 1
1248 merge = len(wctx.parents()) > 1
1249
1249
1250 if (not force and merge and match and
1250 if (not force and merge and match and
1251 (match.files() or match.anypats())):
1251 (match.files() or match.anypats())):
1252 raise util.Abort(_('cannot partially commit a merge '
1252 raise util.Abort(_('cannot partially commit a merge '
1253 '(do not specify files or patterns)'))
1253 '(do not specify files or patterns)'))
1254
1254
1255 changes = self.status(match=match, clean=force)
1255 changes = self.status(match=match, clean=force)
1256 if force:
1256 if force:
1257 changes[0].extend(changes[6]) # mq may commit unchanged files
1257 changes[0].extend(changes[6]) # mq may commit unchanged files
1258
1258
1259 # check subrepos
1259 # check subrepos
1260 subs = []
1260 subs = []
1261 commitsubs = set()
1261 commitsubs = set()
1262 newstate = wctx.substate.copy()
1262 newstate = wctx.substate.copy()
1263 # only manage subrepos and .hgsubstate if .hgsub is present
1263 # only manage subrepos and .hgsubstate if .hgsub is present
1264 if '.hgsub' in wctx:
1264 if '.hgsub' in wctx:
1265 # we'll decide whether to track this ourselves, thanks
1265 # we'll decide whether to track this ourselves, thanks
1266 if '.hgsubstate' in changes[0]:
1266 if '.hgsubstate' in changes[0]:
1267 changes[0].remove('.hgsubstate')
1267 changes[0].remove('.hgsubstate')
1268 if '.hgsubstate' in changes[2]:
1268 if '.hgsubstate' in changes[2]:
1269 changes[2].remove('.hgsubstate')
1269 changes[2].remove('.hgsubstate')
1270
1270
1271 # compare current state to last committed state
1271 # compare current state to last committed state
1272 # build new substate based on last committed state
1272 # build new substate based on last committed state
1273 oldstate = wctx.p1().substate
1273 oldstate = wctx.p1().substate
1274 for s in sorted(newstate.keys()):
1274 for s in sorted(newstate.keys()):
1275 if not match(s):
1275 if not match(s):
1276 # ignore working copy, use old state if present
1276 # ignore working copy, use old state if present
1277 if s in oldstate:
1277 if s in oldstate:
1278 newstate[s] = oldstate[s]
1278 newstate[s] = oldstate[s]
1279 continue
1279 continue
1280 if not force:
1280 if not force:
1281 raise util.Abort(
1281 raise util.Abort(
1282 _("commit with new subrepo %s excluded") % s)
1282 _("commit with new subrepo %s excluded") % s)
1283 if wctx.sub(s).dirty(True):
1283 if wctx.sub(s).dirty(True):
1284 if not self.ui.configbool('ui', 'commitsubrepos'):
1284 if not self.ui.configbool('ui', 'commitsubrepos'):
1285 raise util.Abort(
1285 raise util.Abort(
1286 _("uncommitted changes in subrepo %s") % s,
1286 _("uncommitted changes in subrepo %s") % s,
1287 hint=_("use --subrepos for recursive commit"))
1287 hint=_("use --subrepos for recursive commit"))
1288 subs.append(s)
1288 subs.append(s)
1289 commitsubs.add(s)
1289 commitsubs.add(s)
1290 else:
1290 else:
1291 bs = wctx.sub(s).basestate()
1291 bs = wctx.sub(s).basestate()
1292 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1292 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1293 if oldstate.get(s, (None, None, None))[1] != bs:
1293 if oldstate.get(s, (None, None, None))[1] != bs:
1294 subs.append(s)
1294 subs.append(s)
1295
1295
1296 # check for removed subrepos
1296 # check for removed subrepos
1297 for p in wctx.parents():
1297 for p in wctx.parents():
1298 r = [s for s in p.substate if s not in newstate]
1298 r = [s for s in p.substate if s not in newstate]
1299 subs += [s for s in r if match(s)]
1299 subs += [s for s in r if match(s)]
1300 if subs:
1300 if subs:
1301 if (not match('.hgsub') and
1301 if (not match('.hgsub') and
1302 '.hgsub' in (wctx.modified() + wctx.added())):
1302 '.hgsub' in (wctx.modified() + wctx.added())):
1303 raise util.Abort(
1303 raise util.Abort(
1304 _("can't commit subrepos without .hgsub"))
1304 _("can't commit subrepos without .hgsub"))
1305 changes[0].insert(0, '.hgsubstate')
1305 changes[0].insert(0, '.hgsubstate')
1306
1306
1307 elif '.hgsub' in changes[2]:
1307 elif '.hgsub' in changes[2]:
1308 # clean up .hgsubstate when .hgsub is removed
1308 # clean up .hgsubstate when .hgsub is removed
1309 if ('.hgsubstate' in wctx and
1309 if ('.hgsubstate' in wctx and
1310 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1310 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1311 changes[2].insert(0, '.hgsubstate')
1311 changes[2].insert(0, '.hgsubstate')
1312
1312
1313 # make sure all explicit patterns are matched
1313 # make sure all explicit patterns are matched
1314 if not force and match.files():
1314 if not force and match.files():
1315 matched = set(changes[0] + changes[1] + changes[2])
1315 matched = set(changes[0] + changes[1] + changes[2])
1316
1316
1317 for f in match.files():
1317 for f in match.files():
1318 f = self.dirstate.normalize(f)
1318 f = self.dirstate.normalize(f)
1319 if f == '.' or f in matched or f in wctx.substate:
1319 if f == '.' or f in matched or f in wctx.substate:
1320 continue
1320 continue
1321 if f in changes[3]: # missing
1321 if f in changes[3]: # missing
1322 fail(f, _('file not found!'))
1322 fail(f, _('file not found!'))
1323 if f in vdirs: # visited directory
1323 if f in vdirs: # visited directory
1324 d = f + '/'
1324 d = f + '/'
1325 for mf in matched:
1325 for mf in matched:
1326 if mf.startswith(d):
1326 if mf.startswith(d):
1327 break
1327 break
1328 else:
1328 else:
1329 fail(f, _("no match under directory!"))
1329 fail(f, _("no match under directory!"))
1330 elif f not in self.dirstate:
1330 elif f not in self.dirstate:
1331 fail(f, _("file not tracked!"))
1331 fail(f, _("file not tracked!"))
1332
1332
1333 if (not force and not extra.get("close") and not merge
1333 if (not force and not extra.get("close") and not merge
1334 and not (changes[0] or changes[1] or changes[2])
1334 and not (changes[0] or changes[1] or changes[2])
1335 and wctx.branch() == wctx.p1().branch()):
1335 and wctx.branch() == wctx.p1().branch()):
1336 return None
1336 return None
1337
1337
1338 if merge and changes[3]:
1338 if merge and changes[3]:
1339 raise util.Abort(_("cannot commit merge with missing files"))
1339 raise util.Abort(_("cannot commit merge with missing files"))
1340
1340
1341 ms = mergemod.mergestate(self)
1341 ms = mergemod.mergestate(self)
1342 for f in changes[0]:
1342 for f in changes[0]:
1343 if f in ms and ms[f] == 'u':
1343 if f in ms and ms[f] == 'u':
1344 raise util.Abort(_("unresolved merge conflicts "
1344 raise util.Abort(_("unresolved merge conflicts "
1345 "(see hg help resolve)"))
1345 "(see hg help resolve)"))
1346
1346
1347 cctx = context.workingctx(self, text, user, date, extra, changes)
1347 cctx = context.workingctx(self, text, user, date, extra, changes)
1348 if editor:
1348 if editor:
1349 cctx._text = editor(self, cctx, subs)
1349 cctx._text = editor(self, cctx, subs)
1350 edited = (text != cctx._text)
1350 edited = (text != cctx._text)
1351
1351
1352 # commit subs and write new state
1352 # commit subs and write new state
1353 if subs:
1353 if subs:
1354 for s in sorted(commitsubs):
1354 for s in sorted(commitsubs):
1355 sub = wctx.sub(s)
1355 sub = wctx.sub(s)
1356 self.ui.status(_('committing subrepository %s\n') %
1356 self.ui.status(_('committing subrepository %s\n') %
1357 subrepo.subrelpath(sub))
1357 subrepo.subrelpath(sub))
1358 sr = sub.commit(cctx._text, user, date)
1358 sr = sub.commit(cctx._text, user, date)
1359 newstate[s] = (newstate[s][0], sr)
1359 newstate[s] = (newstate[s][0], sr)
1360 subrepo.writestate(self, newstate)
1360 subrepo.writestate(self, newstate)
1361
1361
1362 # Save commit message in case this transaction gets rolled back
1362 # Save commit message in case this transaction gets rolled back
1363 # (e.g. by a pretxncommit hook). Leave the content alone on
1363 # (e.g. by a pretxncommit hook). Leave the content alone on
1364 # the assumption that the user will use the same editor again.
1364 # the assumption that the user will use the same editor again.
1365 msgfn = self.savecommitmessage(cctx._text)
1365 msgfn = self.savecommitmessage(cctx._text)
1366
1366
1367 p1, p2 = self.dirstate.parents()
1367 p1, p2 = self.dirstate.parents()
1368 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1368 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1369 try:
1369 try:
1370 self.hook("precommit", throw=True, parent1=hookp1,
1370 self.hook("precommit", throw=True, parent1=hookp1,
1371 parent2=hookp2)
1371 parent2=hookp2)
1372 ret = self.commitctx(cctx, True)
1372 ret = self.commitctx(cctx, True)
1373 except: # re-raises
1373 except: # re-raises
1374 if edited:
1374 if edited:
1375 self.ui.write(
1375 self.ui.write(
1376 _('note: commit message saved in %s\n') % msgfn)
1376 _('note: commit message saved in %s\n') % msgfn)
1377 raise
1377 raise
1378
1378
1379 # update bookmarks, dirstate and mergestate
1379 # update bookmarks, dirstate and mergestate
1380 bookmarks.update(self, [p1, p2], ret)
1380 bookmarks.update(self, [p1, p2], ret)
1381 for f in changes[0] + changes[1]:
1381 for f in changes[0] + changes[1]:
1382 self.dirstate.normal(f)
1382 self.dirstate.normal(f)
1383 for f in changes[2]:
1383 for f in changes[2]:
1384 self.dirstate.drop(f)
1384 self.dirstate.drop(f)
1385 self.dirstate.setparents(ret)
1385 self.dirstate.setparents(ret)
1386 ms.reset()
1386 ms.reset()
1387 finally:
1387 finally:
1388 wlock.release()
1388 wlock.release()
1389
1389
1390 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1390 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1391 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1391 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1392 self._afterlock(commithook)
1392 self._afterlock(commithook)
1393 return ret
1393 return ret
1394
1394
1395 def commitctx(self, ctx, error=False):
1395 def commitctx(self, ctx, error=False):
1396 """Add a new revision to current repository.
1396 """Add a new revision to current repository.
1397 Revision information is passed via the context argument.
1397 Revision information is passed via the context argument.
1398 """
1398 """
1399
1399
1400 tr = lock = None
1400 tr = lock = None
1401 removed = list(ctx.removed())
1401 removed = list(ctx.removed())
1402 p1, p2 = ctx.p1(), ctx.p2()
1402 p1, p2 = ctx.p1(), ctx.p2()
1403 user = ctx.user()
1403 user = ctx.user()
1404
1404
1405 lock = self.lock()
1405 lock = self.lock()
1406 try:
1406 try:
1407 tr = self.transaction("commit")
1407 tr = self.transaction("commit")
1408 trp = weakref.proxy(tr)
1408 trp = weakref.proxy(tr)
1409
1409
1410 if ctx.files():
1410 if ctx.files():
1411 m1 = p1.manifest().copy()
1411 m1 = p1.manifest().copy()
1412 m2 = p2.manifest()
1412 m2 = p2.manifest()
1413
1413
1414 # check in files
1414 # check in files
1415 new = {}
1415 new = {}
1416 changed = []
1416 changed = []
1417 linkrev = len(self)
1417 linkrev = len(self)
1418 for f in sorted(ctx.modified() + ctx.added()):
1418 for f in sorted(ctx.modified() + ctx.added()):
1419 self.ui.note(f + "\n")
1419 self.ui.note(f + "\n")
1420 try:
1420 try:
1421 fctx = ctx[f]
1421 fctx = ctx[f]
1422 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1422 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1423 changed)
1423 changed)
1424 m1.set(f, fctx.flags())
1424 m1.set(f, fctx.flags())
1425 except OSError, inst:
1425 except OSError, inst:
1426 self.ui.warn(_("trouble committing %s!\n") % f)
1426 self.ui.warn(_("trouble committing %s!\n") % f)
1427 raise
1427 raise
1428 except IOError, inst:
1428 except IOError, inst:
1429 errcode = getattr(inst, 'errno', errno.ENOENT)
1429 errcode = getattr(inst, 'errno', errno.ENOENT)
1430 if error or errcode and errcode != errno.ENOENT:
1430 if error or errcode and errcode != errno.ENOENT:
1431 self.ui.warn(_("trouble committing %s!\n") % f)
1431 self.ui.warn(_("trouble committing %s!\n") % f)
1432 raise
1432 raise
1433 else:
1433 else:
1434 removed.append(f)
1434 removed.append(f)
1435
1435
1436 # update manifest
1436 # update manifest
1437 m1.update(new)
1437 m1.update(new)
1438 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1438 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1439 drop = [f for f in removed if f in m1]
1439 drop = [f for f in removed if f in m1]
1440 for f in drop:
1440 for f in drop:
1441 del m1[f]
1441 del m1[f]
1442 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1442 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1443 p2.manifestnode(), (new, drop))
1443 p2.manifestnode(), (new, drop))
1444 files = changed + removed
1444 files = changed + removed
1445 else:
1445 else:
1446 mn = p1.manifestnode()
1446 mn = p1.manifestnode()
1447 files = []
1447 files = []
1448
1448
1449 # update changelog
1449 # update changelog
1450 self.changelog.delayupdate()
1450 self.changelog.delayupdate()
1451 n = self.changelog.add(mn, files, ctx.description(),
1451 n = self.changelog.add(mn, files, ctx.description(),
1452 trp, p1.node(), p2.node(),
1452 trp, p1.node(), p2.node(),
1453 user, ctx.date(), ctx.extra().copy())
1453 user, ctx.date(), ctx.extra().copy())
1454 p = lambda: self.changelog.writepending() and self.root or ""
1454 p = lambda: self.changelog.writepending() and self.root or ""
1455 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1455 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1456 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1456 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1457 parent2=xp2, pending=p)
1457 parent2=xp2, pending=p)
1458 self.changelog.finalize(trp)
1458 self.changelog.finalize(trp)
1459 # set the new commit is proper phase
1459 # set the new commit is proper phase
1460 targetphase = phases.newcommitphase(self.ui)
1460 targetphase = phases.newcommitphase(self.ui)
1461 if targetphase:
1461 if targetphase:
1462 # retract boundary do not alter parent changeset.
1462 # retract boundary do not alter parent changeset.
1463 # if a parent have higher the resulting phase will
1463 # if a parent have higher the resulting phase will
1464 # be compliant anyway
1464 # be compliant anyway
1465 #
1465 #
1466 # if minimal phase was 0 we don't need to retract anything
1466 # if minimal phase was 0 we don't need to retract anything
1467 phases.retractboundary(self, targetphase, [n])
1467 phases.retractboundary(self, targetphase, [n])
1468 tr.close()
1468 tr.close()
1469 self.updatebranchcache()
1469 self.updatebranchcache()
1470 return n
1470 return n
1471 finally:
1471 finally:
1472 if tr:
1472 if tr:
1473 tr.release()
1473 tr.release()
1474 lock.release()
1474 lock.release()
1475
1475
1476 def destroyed(self, newheadnodes=None):
1476 def destroyed(self, newheadnodes=None):
1477 '''Inform the repository that nodes have been destroyed.
1477 '''Inform the repository that nodes have been destroyed.
1478 Intended for use by strip and rollback, so there's a common
1478 Intended for use by strip and rollback, so there's a common
1479 place for anything that has to be done after destroying history.
1479 place for anything that has to be done after destroying history.
1480
1480
1481 If you know the branchheadcache was uptodate before nodes were removed
1481 If you know the branchheadcache was uptodate before nodes were removed
1482 and you also know the set of candidate new heads that may have resulted
1482 and you also know the set of candidate new heads that may have resulted
1483 from the destruction, you can set newheadnodes. This will enable the
1483 from the destruction, you can set newheadnodes. This will enable the
1484 code to update the branchheads cache, rather than having future code
1484 code to update the branchheads cache, rather than having future code
1485 decide it's invalid and regenerating it from scratch.
1485 decide it's invalid and regenerating it from scratch.
1486 '''
1486 '''
1487 # If we have info, newheadnodes, on how to update the branch cache, do
1487 # If we have info, newheadnodes, on how to update the branch cache, do
1488 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1488 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1489 # will be caught the next time it is read.
1489 # will be caught the next time it is read.
1490 if newheadnodes:
1490 if newheadnodes:
1491 tiprev = len(self) - 1
1491 tiprev = len(self) - 1
1492 ctxgen = (self[node] for node in newheadnodes
1492 ctxgen = (self[node] for node in newheadnodes
1493 if self.changelog.hasnode(node))
1493 if self.changelog.hasnode(node))
1494 self._updatebranchcache(self._branchcache, ctxgen)
1494 self._updatebranchcache(self._branchcache, ctxgen)
1495 self._writebranchcache(self._branchcache, self.changelog.tip(),
1495 self._writebranchcache(self._branchcache, self.changelog.tip(),
1496 tiprev)
1496 tiprev)
1497
1497
1498 # Ensure the persistent tag cache is updated. Doing it now
1498 # Ensure the persistent tag cache is updated. Doing it now
1499 # means that the tag cache only has to worry about destroyed
1499 # means that the tag cache only has to worry about destroyed
1500 # heads immediately after a strip/rollback. That in turn
1500 # heads immediately after a strip/rollback. That in turn
1501 # guarantees that "cachetip == currenttip" (comparing both rev
1501 # guarantees that "cachetip == currenttip" (comparing both rev
1502 # and node) always means no nodes have been added or destroyed.
1502 # and node) always means no nodes have been added or destroyed.
1503
1503
1504 # XXX this is suboptimal when qrefresh'ing: we strip the current
1504 # XXX this is suboptimal when qrefresh'ing: we strip the current
1505 # head, refresh the tag cache, then immediately add a new head.
1505 # head, refresh the tag cache, then immediately add a new head.
1506 # But I think doing it this way is necessary for the "instant
1506 # But I think doing it this way is necessary for the "instant
1507 # tag cache retrieval" case to work.
1507 # tag cache retrieval" case to work.
1508 self.invalidatecaches()
1508 self.invalidatecaches()
1509
1509
1510 # Discard all cache entries to force reloading everything.
1510 # Discard all cache entries to force reloading everything.
1511 self._filecache.clear()
1511 self._filecache.clear()
1512
1512
1513 def walk(self, match, node=None):
1513 def walk(self, match, node=None):
1514 '''
1514 '''
1515 walk recursively through the directory tree or a given
1515 walk recursively through the directory tree or a given
1516 changeset, finding all files matched by the match
1516 changeset, finding all files matched by the match
1517 function
1517 function
1518 '''
1518 '''
1519 return self[node].walk(match)
1519 return self[node].walk(match)
1520
1520
1521 def status(self, node1='.', node2=None, match=None,
1521 def status(self, node1='.', node2=None, match=None,
1522 ignored=False, clean=False, unknown=False,
1522 ignored=False, clean=False, unknown=False,
1523 listsubrepos=False):
1523 listsubrepos=False):
1524 """return status of files between two nodes or node and working
1524 """return status of files between two nodes or node and working
1525 directory.
1525 directory.
1526
1526
1527 If node1 is None, use the first dirstate parent instead.
1527 If node1 is None, use the first dirstate parent instead.
1528 If node2 is None, compare node1 with working directory.
1528 If node2 is None, compare node1 with working directory.
1529 """
1529 """
1530
1530
1531 def mfmatches(ctx):
1531 def mfmatches(ctx):
1532 mf = ctx.manifest().copy()
1532 mf = ctx.manifest().copy()
1533 if match.always():
1533 if match.always():
1534 return mf
1534 return mf
1535 for fn in mf.keys():
1535 for fn in mf.keys():
1536 if not match(fn):
1536 if not match(fn):
1537 del mf[fn]
1537 del mf[fn]
1538 return mf
1538 return mf
1539
1539
1540 if isinstance(node1, context.changectx):
1540 if isinstance(node1, context.changectx):
1541 ctx1 = node1
1541 ctx1 = node1
1542 else:
1542 else:
1543 ctx1 = self[node1]
1543 ctx1 = self[node1]
1544 if isinstance(node2, context.changectx):
1544 if isinstance(node2, context.changectx):
1545 ctx2 = node2
1545 ctx2 = node2
1546 else:
1546 else:
1547 ctx2 = self[node2]
1547 ctx2 = self[node2]
1548
1548
1549 working = ctx2.rev() is None
1549 working = ctx2.rev() is None
1550 parentworking = working and ctx1 == self['.']
1550 parentworking = working and ctx1 == self['.']
1551 match = match or matchmod.always(self.root, self.getcwd())
1551 match = match or matchmod.always(self.root, self.getcwd())
1552 listignored, listclean, listunknown = ignored, clean, unknown
1552 listignored, listclean, listunknown = ignored, clean, unknown
1553
1553
1554 # load earliest manifest first for caching reasons
1554 # load earliest manifest first for caching reasons
1555 if not working and ctx2.rev() < ctx1.rev():
1555 if not working and ctx2.rev() < ctx1.rev():
1556 ctx2.manifest()
1556 ctx2.manifest()
1557
1557
1558 if not parentworking:
1558 if not parentworking:
1559 def bad(f, msg):
1559 def bad(f, msg):
1560 # 'f' may be a directory pattern from 'match.files()',
1560 # 'f' may be a directory pattern from 'match.files()',
1561 # so 'f not in ctx1' is not enough
1561 # so 'f not in ctx1' is not enough
1562 if f not in ctx1 and f not in ctx1.dirs():
1562 if f not in ctx1 and f not in ctx1.dirs():
1563 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1563 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1564 match.bad = bad
1564 match.bad = bad
1565
1565
1566 if working: # we need to scan the working dir
1566 if working: # we need to scan the working dir
1567 subrepos = []
1567 subrepos = []
1568 if '.hgsub' in self.dirstate:
1568 if '.hgsub' in self.dirstate:
1569 subrepos = ctx2.substate.keys()
1569 subrepos = ctx2.substate.keys()
1570 s = self.dirstate.status(match, subrepos, listignored,
1570 s = self.dirstate.status(match, subrepos, listignored,
1571 listclean, listunknown)
1571 listclean, listunknown)
1572 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1572 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1573
1573
1574 # check for any possibly clean files
1574 # check for any possibly clean files
1575 if parentworking and cmp:
1575 if parentworking and cmp:
1576 fixup = []
1576 fixup = []
1577 # do a full compare of any files that might have changed
1577 # do a full compare of any files that might have changed
1578 for f in sorted(cmp):
1578 for f in sorted(cmp):
1579 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1579 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1580 or ctx1[f].cmp(ctx2[f])):
1580 or ctx1[f].cmp(ctx2[f])):
1581 modified.append(f)
1581 modified.append(f)
1582 else:
1582 else:
1583 fixup.append(f)
1583 fixup.append(f)
1584
1584
1585 # update dirstate for files that are actually clean
1585 # update dirstate for files that are actually clean
1586 if fixup:
1586 if fixup:
1587 if listclean:
1587 if listclean:
1588 clean += fixup
1588 clean += fixup
1589
1589
1590 try:
1590 try:
1591 # updating the dirstate is optional
1591 # updating the dirstate is optional
1592 # so we don't wait on the lock
1592 # so we don't wait on the lock
1593 wlock = self.wlock(False)
1593 wlock = self.wlock(False)
1594 try:
1594 try:
1595 for f in fixup:
1595 for f in fixup:
1596 self.dirstate.normal(f)
1596 self.dirstate.normal(f)
1597 finally:
1597 finally:
1598 wlock.release()
1598 wlock.release()
1599 except error.LockError:
1599 except error.LockError:
1600 pass
1600 pass
1601
1601
1602 if not parentworking:
1602 if not parentworking:
1603 mf1 = mfmatches(ctx1)
1603 mf1 = mfmatches(ctx1)
1604 if working:
1604 if working:
1605 # we are comparing working dir against non-parent
1605 # we are comparing working dir against non-parent
1606 # generate a pseudo-manifest for the working dir
1606 # generate a pseudo-manifest for the working dir
1607 mf2 = mfmatches(self['.'])
1607 mf2 = mfmatches(self['.'])
1608 for f in cmp + modified + added:
1608 for f in cmp + modified + added:
1609 mf2[f] = None
1609 mf2[f] = None
1610 mf2.set(f, ctx2.flags(f))
1610 mf2.set(f, ctx2.flags(f))
1611 for f in removed:
1611 for f in removed:
1612 if f in mf2:
1612 if f in mf2:
1613 del mf2[f]
1613 del mf2[f]
1614 else:
1614 else:
1615 # we are comparing two revisions
1615 # we are comparing two revisions
1616 deleted, unknown, ignored = [], [], []
1616 deleted, unknown, ignored = [], [], []
1617 mf2 = mfmatches(ctx2)
1617 mf2 = mfmatches(ctx2)
1618
1618
1619 modified, added, clean = [], [], []
1619 modified, added, clean = [], [], []
1620 withflags = mf1.withflags() | mf2.withflags()
1620 withflags = mf1.withflags() | mf2.withflags()
1621 for fn in mf2:
1621 for fn in mf2:
1622 if fn in mf1:
1622 if fn in mf1:
1623 if (fn not in deleted and
1623 if (fn not in deleted and
1624 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1624 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1625 (mf1[fn] != mf2[fn] and
1625 (mf1[fn] != mf2[fn] and
1626 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1626 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1627 modified.append(fn)
1627 modified.append(fn)
1628 elif listclean:
1628 elif listclean:
1629 clean.append(fn)
1629 clean.append(fn)
1630 del mf1[fn]
1630 del mf1[fn]
1631 elif fn not in deleted:
1631 elif fn not in deleted:
1632 added.append(fn)
1632 added.append(fn)
1633 removed = mf1.keys()
1633 removed = mf1.keys()
1634
1634
1635 if working and modified and not self.dirstate._checklink:
1635 if working and modified and not self.dirstate._checklink:
1636 # Symlink placeholders may get non-symlink-like contents
1636 # Symlink placeholders may get non-symlink-like contents
1637 # via user error or dereferencing by NFS or Samba servers,
1637 # via user error or dereferencing by NFS or Samba servers,
1638 # so we filter out any placeholders that don't look like a
1638 # so we filter out any placeholders that don't look like a
1639 # symlink
1639 # symlink
1640 sane = []
1640 sane = []
1641 for f in modified:
1641 for f in modified:
1642 if ctx2.flags(f) == 'l':
1642 if ctx2.flags(f) == 'l':
1643 d = ctx2[f].data()
1643 d = ctx2[f].data()
1644 if len(d) >= 1024 or '\n' in d or util.binary(d):
1644 if len(d) >= 1024 or '\n' in d or util.binary(d):
1645 self.ui.debug('ignoring suspect symlink placeholder'
1645 self.ui.debug('ignoring suspect symlink placeholder'
1646 ' "%s"\n' % f)
1646 ' "%s"\n' % f)
1647 continue
1647 continue
1648 sane.append(f)
1648 sane.append(f)
1649 modified = sane
1649 modified = sane
1650
1650
1651 r = modified, added, removed, deleted, unknown, ignored, clean
1651 r = modified, added, removed, deleted, unknown, ignored, clean
1652
1652
1653 if listsubrepos:
1653 if listsubrepos:
1654 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1654 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1655 if working:
1655 if working:
1656 rev2 = None
1656 rev2 = None
1657 else:
1657 else:
1658 rev2 = ctx2.substate[subpath][1]
1658 rev2 = ctx2.substate[subpath][1]
1659 try:
1659 try:
1660 submatch = matchmod.narrowmatcher(subpath, match)
1660 submatch = matchmod.narrowmatcher(subpath, match)
1661 s = sub.status(rev2, match=submatch, ignored=listignored,
1661 s = sub.status(rev2, match=submatch, ignored=listignored,
1662 clean=listclean, unknown=listunknown,
1662 clean=listclean, unknown=listunknown,
1663 listsubrepos=True)
1663 listsubrepos=True)
1664 for rfiles, sfiles in zip(r, s):
1664 for rfiles, sfiles in zip(r, s):
1665 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1665 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1666 except error.LookupError:
1666 except error.LookupError:
1667 self.ui.status(_("skipping missing subrepository: %s\n")
1667 self.ui.status(_("skipping missing subrepository: %s\n")
1668 % subpath)
1668 % subpath)
1669
1669
1670 for l in r:
1670 for l in r:
1671 l.sort()
1671 l.sort()
1672 return r
1672 return r
1673
1673
1674 def heads(self, start=None):
1674 def heads(self, start=None):
1675 heads = self.changelog.heads(start)
1675 heads = self.changelog.heads(start)
1676 # sort the output in rev descending order
1676 # sort the output in rev descending order
1677 return sorted(heads, key=self.changelog.rev, reverse=True)
1677 return sorted(heads, key=self.changelog.rev, reverse=True)
1678
1678
1679 def branchheads(self, branch=None, start=None, closed=False):
1679 def branchheads(self, branch=None, start=None, closed=False):
1680 '''return a (possibly filtered) list of heads for the given branch
1680 '''return a (possibly filtered) list of heads for the given branch
1681
1681
1682 Heads are returned in topological order, from newest to oldest.
1682 Heads are returned in topological order, from newest to oldest.
1683 If branch is None, use the dirstate branch.
1683 If branch is None, use the dirstate branch.
1684 If start is not None, return only heads reachable from start.
1684 If start is not None, return only heads reachable from start.
1685 If closed is True, return heads that are marked as closed as well.
1685 If closed is True, return heads that are marked as closed as well.
1686 '''
1686 '''
1687 if branch is None:
1687 if branch is None:
1688 branch = self[None].branch()
1688 branch = self[None].branch()
1689 branches = self.branchmap()
1689 branches = self.branchmap()
1690 if branch not in branches:
1690 if branch not in branches:
1691 return []
1691 return []
1692 # the cache returns heads ordered lowest to highest
1692 # the cache returns heads ordered lowest to highest
1693 bheads = list(reversed(branches[branch]))
1693 bheads = list(reversed(branches[branch]))
1694 if start is not None:
1694 if start is not None:
1695 # filter out the heads that cannot be reached from startrev
1695 # filter out the heads that cannot be reached from startrev
1696 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1696 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1697 bheads = [h for h in bheads if h in fbheads]
1697 bheads = [h for h in bheads if h in fbheads]
1698 if not closed:
1698 if not closed:
1699 bheads = [h for h in bheads if not self[h].closesbranch()]
1699 bheads = [h for h in bheads if not self[h].closesbranch()]
1700 return bheads
1700 return bheads
1701
1701
1702 def branches(self, nodes):
1702 def branches(self, nodes):
1703 if not nodes:
1703 if not nodes:
1704 nodes = [self.changelog.tip()]
1704 nodes = [self.changelog.tip()]
1705 b = []
1705 b = []
1706 for n in nodes:
1706 for n in nodes:
1707 t = n
1707 t = n
1708 while True:
1708 while True:
1709 p = self.changelog.parents(n)
1709 p = self.changelog.parents(n)
1710 if p[1] != nullid or p[0] == nullid:
1710 if p[1] != nullid or p[0] == nullid:
1711 b.append((t, n, p[0], p[1]))
1711 b.append((t, n, p[0], p[1]))
1712 break
1712 break
1713 n = p[0]
1713 n = p[0]
1714 return b
1714 return b
1715
1715
1716 def between(self, pairs):
1716 def between(self, pairs):
1717 r = []
1717 r = []
1718
1718
1719 for top, bottom in pairs:
1719 for top, bottom in pairs:
1720 n, l, i = top, [], 0
1720 n, l, i = top, [], 0
1721 f = 1
1721 f = 1
1722
1722
1723 while n != bottom and n != nullid:
1723 while n != bottom and n != nullid:
1724 p = self.changelog.parents(n)[0]
1724 p = self.changelog.parents(n)[0]
1725 if i == f:
1725 if i == f:
1726 l.append(n)
1726 l.append(n)
1727 f = f * 2
1727 f = f * 2
1728 n = p
1728 n = p
1729 i += 1
1729 i += 1
1730
1730
1731 r.append(l)
1731 r.append(l)
1732
1732
1733 return r
1733 return r
1734
1734
1735 def pull(self, remote, heads=None, force=False):
1735 def pull(self, remote, heads=None, force=False):
1736 # don't open transaction for nothing or you break future useful
1736 # don't open transaction for nothing or you break future useful
1737 # rollback call
1737 # rollback call
1738 tr = None
1738 tr = None
1739 trname = 'pull\n' + util.hidepassword(remote.url())
1739 trname = 'pull\n' + util.hidepassword(remote.url())
1740 lock = self.lock()
1740 lock = self.lock()
1741 try:
1741 try:
1742 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1742 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1743 force=force)
1743 force=force)
1744 common, fetch, rheads = tmp
1744 common, fetch, rheads = tmp
1745 if not fetch:
1745 if not fetch:
1746 self.ui.status(_("no changes found\n"))
1746 self.ui.status(_("no changes found\n"))
1747 added = []
1747 added = []
1748 result = 0
1748 result = 0
1749 else:
1749 else:
1750 tr = self.transaction(trname)
1750 tr = self.transaction(trname)
1751 if heads is None and list(common) == [nullid]:
1751 if heads is None and list(common) == [nullid]:
1752 self.ui.status(_("requesting all changes\n"))
1752 self.ui.status(_("requesting all changes\n"))
1753 elif heads is None and remote.capable('changegroupsubset'):
1753 elif heads is None and remote.capable('changegroupsubset'):
1754 # issue1320, avoid a race if remote changed after discovery
1754 # issue1320, avoid a race if remote changed after discovery
1755 heads = rheads
1755 heads = rheads
1756
1756
1757 if remote.capable('getbundle'):
1757 if remote.capable('getbundle'):
1758 cg = remote.getbundle('pull', common=common,
1758 cg = remote.getbundle('pull', common=common,
1759 heads=heads or rheads)
1759 heads=heads or rheads)
1760 elif heads is None:
1760 elif heads is None:
1761 cg = remote.changegroup(fetch, 'pull')
1761 cg = remote.changegroup(fetch, 'pull')
1762 elif not remote.capable('changegroupsubset'):
1762 elif not remote.capable('changegroupsubset'):
1763 raise util.Abort(_("partial pull cannot be done because "
1763 raise util.Abort(_("partial pull cannot be done because "
1764 "other repository doesn't support "
1764 "other repository doesn't support "
1765 "changegroupsubset."))
1765 "changegroupsubset."))
1766 else:
1766 else:
1767 cg = remote.changegroupsubset(fetch, heads, 'pull')
1767 cg = remote.changegroupsubset(fetch, heads, 'pull')
1768 clstart = len(self.changelog)
1768 clstart = len(self.changelog)
1769 result = self.addchangegroup(cg, 'pull', remote.url())
1769 result = self.addchangegroup(cg, 'pull', remote.url())
1770 clend = len(self.changelog)
1770 clend = len(self.changelog)
1771 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1771 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1772
1772
1773 # compute target subset
1773 # compute target subset
1774 if heads is None:
1774 if heads is None:
1775 # We pulled every thing possible
1775 # We pulled every thing possible
1776 # sync on everything common
1776 # sync on everything common
1777 subset = common + added
1777 subset = common + added
1778 else:
1778 else:
1779 # We pulled a specific subset
1779 # We pulled a specific subset
1780 # sync on this subset
1780 # sync on this subset
1781 subset = heads
1781 subset = heads
1782
1782
1783 # Get remote phases data from remote
1783 # Get remote phases data from remote
1784 remotephases = remote.listkeys('phases')
1784 remotephases = remote.listkeys('phases')
1785 publishing = bool(remotephases.get('publishing', False))
1785 publishing = bool(remotephases.get('publishing', False))
1786 if remotephases and not publishing:
1786 if remotephases and not publishing:
1787 # remote is new and unpublishing
1787 # remote is new and unpublishing
1788 pheads, _dr = phases.analyzeremotephases(self, subset,
1788 pheads, _dr = phases.analyzeremotephases(self, subset,
1789 remotephases)
1789 remotephases)
1790 phases.advanceboundary(self, phases.public, pheads)
1790 phases.advanceboundary(self, phases.public, pheads)
1791 phases.advanceboundary(self, phases.draft, subset)
1791 phases.advanceboundary(self, phases.draft, subset)
1792 else:
1792 else:
1793 # Remote is old or publishing all common changesets
1793 # Remote is old or publishing all common changesets
1794 # should be seen as public
1794 # should be seen as public
1795 phases.advanceboundary(self, phases.public, subset)
1795 phases.advanceboundary(self, phases.public, subset)
1796
1796
1797 if obsolete._enabled:
1797 if obsolete._enabled:
1798 self.ui.debug('fetching remote obsolete markers')
1798 self.ui.debug('fetching remote obsolete markers')
1799 remoteobs = remote.listkeys('obsolete')
1799 remoteobs = remote.listkeys('obsolete')
1800 if 'dump0' in remoteobs:
1800 if 'dump0' in remoteobs:
1801 if tr is None:
1801 if tr is None:
1802 tr = self.transaction(trname)
1802 tr = self.transaction(trname)
1803 for key in sorted(remoteobs, reverse=True):
1803 for key in sorted(remoteobs, reverse=True):
1804 if key.startswith('dump'):
1804 if key.startswith('dump'):
1805 data = base85.b85decode(remoteobs[key])
1805 data = base85.b85decode(remoteobs[key])
1806 self.obsstore.mergemarkers(tr, data)
1806 self.obsstore.mergemarkers(tr, data)
1807 if tr is not None:
1807 if tr is not None:
1808 tr.close()
1808 tr.close()
1809 finally:
1809 finally:
1810 if tr is not None:
1810 if tr is not None:
1811 tr.release()
1811 tr.release()
1812 lock.release()
1812 lock.release()
1813
1813
1814 return result
1814 return result
1815
1815
1816 def checkpush(self, force, revs):
1816 def checkpush(self, force, revs):
1817 """Extensions can override this function if additional checks have
1817 """Extensions can override this function if additional checks have
1818 to be performed before pushing, or call it if they override push
1818 to be performed before pushing, or call it if they override push
1819 command.
1819 command.
1820 """
1820 """
1821 pass
1821 pass
1822
1822
1823 def push(self, remote, force=False, revs=None, newbranch=False):
1823 def push(self, remote, force=False, revs=None, newbranch=False):
1824 '''Push outgoing changesets (limited by revs) from the current
1824 '''Push outgoing changesets (limited by revs) from the current
1825 repository to remote. Return an integer:
1825 repository to remote. Return an integer:
1826 - None means nothing to push
1826 - None means nothing to push
1827 - 0 means HTTP error
1827 - 0 means HTTP error
1828 - 1 means we pushed and remote head count is unchanged *or*
1828 - 1 means we pushed and remote head count is unchanged *or*
1829 we have outgoing changesets but refused to push
1829 we have outgoing changesets but refused to push
1830 - other values as described by addchangegroup()
1830 - other values as described by addchangegroup()
1831 '''
1831 '''
1832 # there are two ways to push to remote repo:
1832 # there are two ways to push to remote repo:
1833 #
1833 #
1834 # addchangegroup assumes local user can lock remote
1834 # addchangegroup assumes local user can lock remote
1835 # repo (local filesystem, old ssh servers).
1835 # repo (local filesystem, old ssh servers).
1836 #
1836 #
1837 # unbundle assumes local user cannot lock remote repo (new ssh
1837 # unbundle assumes local user cannot lock remote repo (new ssh
1838 # servers, http servers).
1838 # servers, http servers).
1839
1839
1840 if not remote.canpush():
1840 if not remote.canpush():
1841 raise util.Abort(_("destination does not support push"))
1841 raise util.Abort(_("destination does not support push"))
1842 # get local lock as we might write phase data
1842 # get local lock as we might write phase data
1843 locallock = self.lock()
1843 locallock = self.lock()
1844 try:
1844 try:
1845 self.checkpush(force, revs)
1845 self.checkpush(force, revs)
1846 lock = None
1846 lock = None
1847 unbundle = remote.capable('unbundle')
1847 unbundle = remote.capable('unbundle')
1848 if not unbundle:
1848 if not unbundle:
1849 lock = remote.lock()
1849 lock = remote.lock()
1850 try:
1850 try:
1851 # discovery
1851 # discovery
1852 fci = discovery.findcommonincoming
1852 fci = discovery.findcommonincoming
1853 commoninc = fci(self, remote, force=force)
1853 commoninc = fci(self, remote, force=force)
1854 common, inc, remoteheads = commoninc
1854 common, inc, remoteheads = commoninc
1855 fco = discovery.findcommonoutgoing
1855 fco = discovery.findcommonoutgoing
1856 outgoing = fco(self, remote, onlyheads=revs,
1856 outgoing = fco(self, remote, onlyheads=revs,
1857 commoninc=commoninc, force=force)
1857 commoninc=commoninc, force=force)
1858
1858
1859
1859
1860 if not outgoing.missing:
1860 if not outgoing.missing:
1861 # nothing to push
1861 # nothing to push
1862 scmutil.nochangesfound(self.ui, self, outgoing.excluded)
1862 scmutil.nochangesfound(self.ui, self, outgoing.excluded)
1863 ret = None
1863 ret = None
1864 else:
1864 else:
1865 # something to push
1865 # something to push
1866 if not force:
1866 if not force:
1867 # if self.obsstore == False --> no obsolete
1867 # if self.obsstore == False --> no obsolete
1868 # then, save the iteration
1868 # then, save the iteration
1869 if self.obsstore:
1869 if self.obsstore:
1870 # this message are here for 80 char limit reason
1870 # this message are here for 80 char limit reason
1871 mso = _("push includes an obsolete changeset: %s!")
1871 mso = _("push includes an obsolete changeset: %s!")
1872 msu = _("push includes an unstable changeset: %s!")
1872 msu = _("push includes an unstable changeset: %s!")
1873 # If we are to push if there is at least one
1873 # If we are to push if there is at least one
1874 # obsolete or unstable changeset in missing, at
1874 # obsolete or unstable changeset in missing, at
1875 # least one of the missinghead will be obsolete or
1875 # least one of the missinghead will be obsolete or
1876 # unstable. So checking heads only is ok
1876 # unstable. So checking heads only is ok
1877 for node in outgoing.missingheads:
1877 for node in outgoing.missingheads:
1878 ctx = self[node]
1878 ctx = self[node]
1879 if ctx.obsolete():
1879 if ctx.obsolete():
1880 raise util.Abort(_(mso) % ctx)
1880 raise util.Abort(_(mso) % ctx)
1881 elif ctx.unstable():
1881 elif ctx.unstable():
1882 raise util.Abort(_(msu) % ctx)
1882 raise util.Abort(_(msu) % ctx)
1883 discovery.checkheads(self, remote, outgoing,
1883 discovery.checkheads(self, remote, outgoing,
1884 remoteheads, newbranch,
1884 remoteheads, newbranch,
1885 bool(inc))
1885 bool(inc))
1886
1886
1887 # create a changegroup from local
1887 # create a changegroup from local
1888 if revs is None and not outgoing.excluded:
1888 if revs is None and not outgoing.excluded:
1889 # push everything,
1889 # push everything,
1890 # use the fast path, no race possible on push
1890 # use the fast path, no race possible on push
1891 cg = self._changegroup(outgoing.missing, 'push')
1891 cg = self._changegroup(outgoing.missing, 'push')
1892 else:
1892 else:
1893 cg = self.getlocalbundle('push', outgoing)
1893 cg = self.getlocalbundle('push', outgoing)
1894
1894
1895 # apply changegroup to remote
1895 # apply changegroup to remote
1896 if unbundle:
1896 if unbundle:
1897 # local repo finds heads on server, finds out what
1897 # local repo finds heads on server, finds out what
1898 # revs it must push. once revs transferred, if server
1898 # revs it must push. once revs transferred, if server
1899 # finds it has different heads (someone else won
1899 # finds it has different heads (someone else won
1900 # commit/push race), server aborts.
1900 # commit/push race), server aborts.
1901 if force:
1901 if force:
1902 remoteheads = ['force']
1902 remoteheads = ['force']
1903 # ssh: return remote's addchangegroup()
1903 # ssh: return remote's addchangegroup()
1904 # http: return remote's addchangegroup() or 0 for error
1904 # http: return remote's addchangegroup() or 0 for error
1905 ret = remote.unbundle(cg, remoteheads, 'push')
1905 ret = remote.unbundle(cg, remoteheads, 'push')
1906 else:
1906 else:
1907 # we return an integer indicating remote head count
1907 # we return an integer indicating remote head count
1908 # change
1908 # change
1909 ret = remote.addchangegroup(cg, 'push', self.url())
1909 ret = remote.addchangegroup(cg, 'push', self.url())
1910
1910
1911 if ret:
1911 if ret:
1912 # push succeed, synchronize target of the push
1912 # push succeed, synchronize target of the push
1913 cheads = outgoing.missingheads
1913 cheads = outgoing.missingheads
1914 elif revs is None:
1914 elif revs is None:
1915 # All out push fails. synchronize all common
1915 # All out push fails. synchronize all common
1916 cheads = outgoing.commonheads
1916 cheads = outgoing.commonheads
1917 else:
1917 else:
1918 # I want cheads = heads(::missingheads and ::commonheads)
1918 # I want cheads = heads(::missingheads and ::commonheads)
1919 # (missingheads is revs with secret changeset filtered out)
1919 # (missingheads is revs with secret changeset filtered out)
1920 #
1920 #
1921 # This can be expressed as:
1921 # This can be expressed as:
1922 # cheads = ( (missingheads and ::commonheads)
1922 # cheads = ( (missingheads and ::commonheads)
1923 # + (commonheads and ::missingheads))"
1923 # + (commonheads and ::missingheads))"
1924 # )
1924 # )
1925 #
1925 #
1926 # while trying to push we already computed the following:
1926 # while trying to push we already computed the following:
1927 # common = (::commonheads)
1927 # common = (::commonheads)
1928 # missing = ((commonheads::missingheads) - commonheads)
1928 # missing = ((commonheads::missingheads) - commonheads)
1929 #
1929 #
1930 # We can pick:
1930 # We can pick:
1931 # * missingheads part of common (::commonheads)
1931 # * missingheads part of common (::commonheads)
1932 common = set(outgoing.common)
1932 common = set(outgoing.common)
1933 cheads = [node for node in revs if node in common]
1933 cheads = [node for node in revs if node in common]
1934 # and
1934 # and
1935 # * commonheads parents on missing
1935 # * commonheads parents on missing
1936 revset = self.set('%ln and parents(roots(%ln))',
1936 revset = self.set('%ln and parents(roots(%ln))',
1937 outgoing.commonheads,
1937 outgoing.commonheads,
1938 outgoing.missing)
1938 outgoing.missing)
1939 cheads.extend(c.node() for c in revset)
1939 cheads.extend(c.node() for c in revset)
1940 # even when we don't push, exchanging phase data is useful
1940 # even when we don't push, exchanging phase data is useful
1941 remotephases = remote.listkeys('phases')
1941 remotephases = remote.listkeys('phases')
1942 if not remotephases: # old server or public only repo
1942 if not remotephases: # old server or public only repo
1943 phases.advanceboundary(self, phases.public, cheads)
1943 phases.advanceboundary(self, phases.public, cheads)
1944 # don't push any phase data as there is nothing to push
1944 # don't push any phase data as there is nothing to push
1945 else:
1945 else:
1946 ana = phases.analyzeremotephases(self, cheads, remotephases)
1946 ana = phases.analyzeremotephases(self, cheads, remotephases)
1947 pheads, droots = ana
1947 pheads, droots = ana
1948 ### Apply remote phase on local
1948 ### Apply remote phase on local
1949 if remotephases.get('publishing', False):
1949 if remotephases.get('publishing', False):
1950 phases.advanceboundary(self, phases.public, cheads)
1950 phases.advanceboundary(self, phases.public, cheads)
1951 else: # publish = False
1951 else: # publish = False
1952 phases.advanceboundary(self, phases.public, pheads)
1952 phases.advanceboundary(self, phases.public, pheads)
1953 phases.advanceboundary(self, phases.draft, cheads)
1953 phases.advanceboundary(self, phases.draft, cheads)
1954 ### Apply local phase on remote
1954 ### Apply local phase on remote
1955
1955
1956 # Get the list of all revs draft on remote by public here.
1956 # Get the list of all revs draft on remote by public here.
1957 # XXX Beware that revset break if droots is not strictly
1957 # XXX Beware that revset break if droots is not strictly
1958 # XXX root we may want to ensure it is but it is costly
1958 # XXX root we may want to ensure it is but it is costly
1959 outdated = self.set('heads((%ln::%ln) and public())',
1959 outdated = self.set('heads((%ln::%ln) and public())',
1960 droots, cheads)
1960 droots, cheads)
1961 for newremotehead in outdated:
1961 for newremotehead in outdated:
1962 r = remote.pushkey('phases',
1962 r = remote.pushkey('phases',
1963 newremotehead.hex(),
1963 newremotehead.hex(),
1964 str(phases.draft),
1964 str(phases.draft),
1965 str(phases.public))
1965 str(phases.public))
1966 if not r:
1966 if not r:
1967 self.ui.warn(_('updating %s to public failed!\n')
1967 self.ui.warn(_('updating %s to public failed!\n')
1968 % newremotehead)
1968 % newremotehead)
1969 self.ui.debug('try to push obsolete markers to remote\n')
1969 self.ui.debug('try to push obsolete markers to remote\n')
1970 if (obsolete._enabled and self.obsstore and
1970 if (obsolete._enabled and self.obsstore and
1971 'obsolete' in remote.listkeys('namespaces')):
1971 'obsolete' in remote.listkeys('namespaces')):
1972 rslts = []
1972 rslts = []
1973 remotedata = self.listkeys('obsolete')
1973 remotedata = self.listkeys('obsolete')
1974 for key in sorted(remotedata, reverse=True):
1974 for key in sorted(remotedata, reverse=True):
1975 # reverse sort to ensure we end with dump0
1975 # reverse sort to ensure we end with dump0
1976 data = remotedata[key]
1976 data = remotedata[key]
1977 rslts.append(remote.pushkey('obsolete', key, '', data))
1977 rslts.append(remote.pushkey('obsolete', key, '', data))
1978 if [r for r in rslts if not r]:
1978 if [r for r in rslts if not r]:
1979 msg = _('failed to push some obsolete markers!\n')
1979 msg = _('failed to push some obsolete markers!\n')
1980 self.ui.warn(msg)
1980 self.ui.warn(msg)
1981 finally:
1981 finally:
1982 if lock is not None:
1982 if lock is not None:
1983 lock.release()
1983 lock.release()
1984 finally:
1984 finally:
1985 locallock.release()
1985 locallock.release()
1986
1986
1987 self.ui.debug("checking for updated bookmarks\n")
1987 self.ui.debug("checking for updated bookmarks\n")
1988 rb = remote.listkeys('bookmarks')
1988 rb = remote.listkeys('bookmarks')
1989 for k in rb.keys():
1989 for k in rb.keys():
1990 if k in self._bookmarks:
1990 if k in self._bookmarks:
1991 nr, nl = rb[k], hex(self._bookmarks[k])
1991 nr, nl = rb[k], hex(self._bookmarks[k])
1992 if nr in self:
1992 if nr in self:
1993 cr = self[nr]
1993 cr = self[nr]
1994 cl = self[nl]
1994 cl = self[nl]
1995 if cl in cr.descendants():
1995 if bookmarks.validdest(self, cr, cl):
1996 r = remote.pushkey('bookmarks', k, nr, nl)
1996 r = remote.pushkey('bookmarks', k, nr, nl)
1997 if r:
1997 if r:
1998 self.ui.status(_("updating bookmark %s\n") % k)
1998 self.ui.status(_("updating bookmark %s\n") % k)
1999 else:
1999 else:
2000 self.ui.warn(_('updating bookmark %s'
2000 self.ui.warn(_('updating bookmark %s'
2001 ' failed!\n') % k)
2001 ' failed!\n') % k)
2002
2002
2003 return ret
2003 return ret
2004
2004
2005 def changegroupinfo(self, nodes, source):
2005 def changegroupinfo(self, nodes, source):
2006 if self.ui.verbose or source == 'bundle':
2006 if self.ui.verbose or source == 'bundle':
2007 self.ui.status(_("%d changesets found\n") % len(nodes))
2007 self.ui.status(_("%d changesets found\n") % len(nodes))
2008 if self.ui.debugflag:
2008 if self.ui.debugflag:
2009 self.ui.debug("list of changesets:\n")
2009 self.ui.debug("list of changesets:\n")
2010 for node in nodes:
2010 for node in nodes:
2011 self.ui.debug("%s\n" % hex(node))
2011 self.ui.debug("%s\n" % hex(node))
2012
2012
2013 def changegroupsubset(self, bases, heads, source):
2013 def changegroupsubset(self, bases, heads, source):
2014 """Compute a changegroup consisting of all the nodes that are
2014 """Compute a changegroup consisting of all the nodes that are
2015 descendants of any of the bases and ancestors of any of the heads.
2015 descendants of any of the bases and ancestors of any of the heads.
2016 Return a chunkbuffer object whose read() method will return
2016 Return a chunkbuffer object whose read() method will return
2017 successive changegroup chunks.
2017 successive changegroup chunks.
2018
2018
2019 It is fairly complex as determining which filenodes and which
2019 It is fairly complex as determining which filenodes and which
2020 manifest nodes need to be included for the changeset to be complete
2020 manifest nodes need to be included for the changeset to be complete
2021 is non-trivial.
2021 is non-trivial.
2022
2022
2023 Another wrinkle is doing the reverse, figuring out which changeset in
2023 Another wrinkle is doing the reverse, figuring out which changeset in
2024 the changegroup a particular filenode or manifestnode belongs to.
2024 the changegroup a particular filenode or manifestnode belongs to.
2025 """
2025 """
2026 cl = self.changelog
2026 cl = self.changelog
2027 if not bases:
2027 if not bases:
2028 bases = [nullid]
2028 bases = [nullid]
2029 csets, bases, heads = cl.nodesbetween(bases, heads)
2029 csets, bases, heads = cl.nodesbetween(bases, heads)
2030 # We assume that all ancestors of bases are known
2030 # We assume that all ancestors of bases are known
2031 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2031 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2032 return self._changegroupsubset(common, csets, heads, source)
2032 return self._changegroupsubset(common, csets, heads, source)
2033
2033
2034 def getlocalbundle(self, source, outgoing):
2034 def getlocalbundle(self, source, outgoing):
2035 """Like getbundle, but taking a discovery.outgoing as an argument.
2035 """Like getbundle, but taking a discovery.outgoing as an argument.
2036
2036
2037 This is only implemented for local repos and reuses potentially
2037 This is only implemented for local repos and reuses potentially
2038 precomputed sets in outgoing."""
2038 precomputed sets in outgoing."""
2039 if not outgoing.missing:
2039 if not outgoing.missing:
2040 return None
2040 return None
2041 return self._changegroupsubset(outgoing.common,
2041 return self._changegroupsubset(outgoing.common,
2042 outgoing.missing,
2042 outgoing.missing,
2043 outgoing.missingheads,
2043 outgoing.missingheads,
2044 source)
2044 source)
2045
2045
2046 def getbundle(self, source, heads=None, common=None):
2046 def getbundle(self, source, heads=None, common=None):
2047 """Like changegroupsubset, but returns the set difference between the
2047 """Like changegroupsubset, but returns the set difference between the
2048 ancestors of heads and the ancestors common.
2048 ancestors of heads and the ancestors common.
2049
2049
2050 If heads is None, use the local heads. If common is None, use [nullid].
2050 If heads is None, use the local heads. If common is None, use [nullid].
2051
2051
2052 The nodes in common might not all be known locally due to the way the
2052 The nodes in common might not all be known locally due to the way the
2053 current discovery protocol works.
2053 current discovery protocol works.
2054 """
2054 """
2055 cl = self.changelog
2055 cl = self.changelog
2056 if common:
2056 if common:
2057 nm = cl.nodemap
2057 nm = cl.nodemap
2058 common = [n for n in common if n in nm]
2058 common = [n for n in common if n in nm]
2059 else:
2059 else:
2060 common = [nullid]
2060 common = [nullid]
2061 if not heads:
2061 if not heads:
2062 heads = cl.heads()
2062 heads = cl.heads()
2063 return self.getlocalbundle(source,
2063 return self.getlocalbundle(source,
2064 discovery.outgoing(cl, common, heads))
2064 discovery.outgoing(cl, common, heads))
2065
2065
2066 def _changegroupsubset(self, commonrevs, csets, heads, source):
2066 def _changegroupsubset(self, commonrevs, csets, heads, source):
2067
2067
2068 cl = self.changelog
2068 cl = self.changelog
2069 mf = self.manifest
2069 mf = self.manifest
2070 mfs = {} # needed manifests
2070 mfs = {} # needed manifests
2071 fnodes = {} # needed file nodes
2071 fnodes = {} # needed file nodes
2072 changedfiles = set()
2072 changedfiles = set()
2073 fstate = ['', {}]
2073 fstate = ['', {}]
2074 count = [0, 0]
2074 count = [0, 0]
2075
2075
2076 # can we go through the fast path ?
2076 # can we go through the fast path ?
2077 heads.sort()
2077 heads.sort()
2078 if heads == sorted(self.heads()):
2078 if heads == sorted(self.heads()):
2079 return self._changegroup(csets, source)
2079 return self._changegroup(csets, source)
2080
2080
2081 # slow path
2081 # slow path
2082 self.hook('preoutgoing', throw=True, source=source)
2082 self.hook('preoutgoing', throw=True, source=source)
2083 self.changegroupinfo(csets, source)
2083 self.changegroupinfo(csets, source)
2084
2084
2085 # filter any nodes that claim to be part of the known set
2085 # filter any nodes that claim to be part of the known set
2086 def prune(revlog, missing):
2086 def prune(revlog, missing):
2087 rr, rl = revlog.rev, revlog.linkrev
2087 rr, rl = revlog.rev, revlog.linkrev
2088 return [n for n in missing
2088 return [n for n in missing
2089 if rl(rr(n)) not in commonrevs]
2089 if rl(rr(n)) not in commonrevs]
2090
2090
2091 progress = self.ui.progress
2091 progress = self.ui.progress
2092 _bundling = _('bundling')
2092 _bundling = _('bundling')
2093 _changesets = _('changesets')
2093 _changesets = _('changesets')
2094 _manifests = _('manifests')
2094 _manifests = _('manifests')
2095 _files = _('files')
2095 _files = _('files')
2096
2096
2097 def lookup(revlog, x):
2097 def lookup(revlog, x):
2098 if revlog == cl:
2098 if revlog == cl:
2099 c = cl.read(x)
2099 c = cl.read(x)
2100 changedfiles.update(c[3])
2100 changedfiles.update(c[3])
2101 mfs.setdefault(c[0], x)
2101 mfs.setdefault(c[0], x)
2102 count[0] += 1
2102 count[0] += 1
2103 progress(_bundling, count[0],
2103 progress(_bundling, count[0],
2104 unit=_changesets, total=count[1])
2104 unit=_changesets, total=count[1])
2105 return x
2105 return x
2106 elif revlog == mf:
2106 elif revlog == mf:
2107 clnode = mfs[x]
2107 clnode = mfs[x]
2108 mdata = mf.readfast(x)
2108 mdata = mf.readfast(x)
2109 for f, n in mdata.iteritems():
2109 for f, n in mdata.iteritems():
2110 if f in changedfiles:
2110 if f in changedfiles:
2111 fnodes[f].setdefault(n, clnode)
2111 fnodes[f].setdefault(n, clnode)
2112 count[0] += 1
2112 count[0] += 1
2113 progress(_bundling, count[0],
2113 progress(_bundling, count[0],
2114 unit=_manifests, total=count[1])
2114 unit=_manifests, total=count[1])
2115 return clnode
2115 return clnode
2116 else:
2116 else:
2117 progress(_bundling, count[0], item=fstate[0],
2117 progress(_bundling, count[0], item=fstate[0],
2118 unit=_files, total=count[1])
2118 unit=_files, total=count[1])
2119 return fstate[1][x]
2119 return fstate[1][x]
2120
2120
2121 bundler = changegroup.bundle10(lookup)
2121 bundler = changegroup.bundle10(lookup)
2122 reorder = self.ui.config('bundle', 'reorder', 'auto')
2122 reorder = self.ui.config('bundle', 'reorder', 'auto')
2123 if reorder == 'auto':
2123 if reorder == 'auto':
2124 reorder = None
2124 reorder = None
2125 else:
2125 else:
2126 reorder = util.parsebool(reorder)
2126 reorder = util.parsebool(reorder)
2127
2127
2128 def gengroup():
2128 def gengroup():
2129 # Create a changenode group generator that will call our functions
2129 # Create a changenode group generator that will call our functions
2130 # back to lookup the owning changenode and collect information.
2130 # back to lookup the owning changenode and collect information.
2131 count[:] = [0, len(csets)]
2131 count[:] = [0, len(csets)]
2132 for chunk in cl.group(csets, bundler, reorder=reorder):
2132 for chunk in cl.group(csets, bundler, reorder=reorder):
2133 yield chunk
2133 yield chunk
2134 progress(_bundling, None)
2134 progress(_bundling, None)
2135
2135
2136 # Create a generator for the manifestnodes that calls our lookup
2136 # Create a generator for the manifestnodes that calls our lookup
2137 # and data collection functions back.
2137 # and data collection functions back.
2138 for f in changedfiles:
2138 for f in changedfiles:
2139 fnodes[f] = {}
2139 fnodes[f] = {}
2140 count[:] = [0, len(mfs)]
2140 count[:] = [0, len(mfs)]
2141 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2141 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2142 yield chunk
2142 yield chunk
2143 progress(_bundling, None)
2143 progress(_bundling, None)
2144
2144
2145 mfs.clear()
2145 mfs.clear()
2146
2146
2147 # Go through all our files in order sorted by name.
2147 # Go through all our files in order sorted by name.
2148 count[:] = [0, len(changedfiles)]
2148 count[:] = [0, len(changedfiles)]
2149 for fname in sorted(changedfiles):
2149 for fname in sorted(changedfiles):
2150 filerevlog = self.file(fname)
2150 filerevlog = self.file(fname)
2151 if not len(filerevlog):
2151 if not len(filerevlog):
2152 raise util.Abort(_("empty or missing revlog for %s")
2152 raise util.Abort(_("empty or missing revlog for %s")
2153 % fname)
2153 % fname)
2154 fstate[0] = fname
2154 fstate[0] = fname
2155 fstate[1] = fnodes.pop(fname, {})
2155 fstate[1] = fnodes.pop(fname, {})
2156
2156
2157 nodelist = prune(filerevlog, fstate[1])
2157 nodelist = prune(filerevlog, fstate[1])
2158 if nodelist:
2158 if nodelist:
2159 count[0] += 1
2159 count[0] += 1
2160 yield bundler.fileheader(fname)
2160 yield bundler.fileheader(fname)
2161 for chunk in filerevlog.group(nodelist, bundler, reorder):
2161 for chunk in filerevlog.group(nodelist, bundler, reorder):
2162 yield chunk
2162 yield chunk
2163
2163
2164 # Signal that no more groups are left.
2164 # Signal that no more groups are left.
2165 yield bundler.close()
2165 yield bundler.close()
2166 progress(_bundling, None)
2166 progress(_bundling, None)
2167
2167
2168 if csets:
2168 if csets:
2169 self.hook('outgoing', node=hex(csets[0]), source=source)
2169 self.hook('outgoing', node=hex(csets[0]), source=source)
2170
2170
2171 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2171 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2172
2172
2173 def changegroup(self, basenodes, source):
2173 def changegroup(self, basenodes, source):
2174 # to avoid a race we use changegroupsubset() (issue1320)
2174 # to avoid a race we use changegroupsubset() (issue1320)
2175 return self.changegroupsubset(basenodes, self.heads(), source)
2175 return self.changegroupsubset(basenodes, self.heads(), source)
2176
2176
2177 def _changegroup(self, nodes, source):
2177 def _changegroup(self, nodes, source):
2178 """Compute the changegroup of all nodes that we have that a recipient
2178 """Compute the changegroup of all nodes that we have that a recipient
2179 doesn't. Return a chunkbuffer object whose read() method will return
2179 doesn't. Return a chunkbuffer object whose read() method will return
2180 successive changegroup chunks.
2180 successive changegroup chunks.
2181
2181
2182 This is much easier than the previous function as we can assume that
2182 This is much easier than the previous function as we can assume that
2183 the recipient has any changenode we aren't sending them.
2183 the recipient has any changenode we aren't sending them.
2184
2184
2185 nodes is the set of nodes to send"""
2185 nodes is the set of nodes to send"""
2186
2186
2187 cl = self.changelog
2187 cl = self.changelog
2188 mf = self.manifest
2188 mf = self.manifest
2189 mfs = {}
2189 mfs = {}
2190 changedfiles = set()
2190 changedfiles = set()
2191 fstate = ['']
2191 fstate = ['']
2192 count = [0, 0]
2192 count = [0, 0]
2193
2193
2194 self.hook('preoutgoing', throw=True, source=source)
2194 self.hook('preoutgoing', throw=True, source=source)
2195 self.changegroupinfo(nodes, source)
2195 self.changegroupinfo(nodes, source)
2196
2196
2197 revset = set([cl.rev(n) for n in nodes])
2197 revset = set([cl.rev(n) for n in nodes])
2198
2198
2199 def gennodelst(log):
2199 def gennodelst(log):
2200 ln, llr = log.node, log.linkrev
2200 ln, llr = log.node, log.linkrev
2201 return [ln(r) for r in log if llr(r) in revset]
2201 return [ln(r) for r in log if llr(r) in revset]
2202
2202
2203 progress = self.ui.progress
2203 progress = self.ui.progress
2204 _bundling = _('bundling')
2204 _bundling = _('bundling')
2205 _changesets = _('changesets')
2205 _changesets = _('changesets')
2206 _manifests = _('manifests')
2206 _manifests = _('manifests')
2207 _files = _('files')
2207 _files = _('files')
2208
2208
2209 def lookup(revlog, x):
2209 def lookup(revlog, x):
2210 if revlog == cl:
2210 if revlog == cl:
2211 c = cl.read(x)
2211 c = cl.read(x)
2212 changedfiles.update(c[3])
2212 changedfiles.update(c[3])
2213 mfs.setdefault(c[0], x)
2213 mfs.setdefault(c[0], x)
2214 count[0] += 1
2214 count[0] += 1
2215 progress(_bundling, count[0],
2215 progress(_bundling, count[0],
2216 unit=_changesets, total=count[1])
2216 unit=_changesets, total=count[1])
2217 return x
2217 return x
2218 elif revlog == mf:
2218 elif revlog == mf:
2219 count[0] += 1
2219 count[0] += 1
2220 progress(_bundling, count[0],
2220 progress(_bundling, count[0],
2221 unit=_manifests, total=count[1])
2221 unit=_manifests, total=count[1])
2222 return cl.node(revlog.linkrev(revlog.rev(x)))
2222 return cl.node(revlog.linkrev(revlog.rev(x)))
2223 else:
2223 else:
2224 progress(_bundling, count[0], item=fstate[0],
2224 progress(_bundling, count[0], item=fstate[0],
2225 total=count[1], unit=_files)
2225 total=count[1], unit=_files)
2226 return cl.node(revlog.linkrev(revlog.rev(x)))
2226 return cl.node(revlog.linkrev(revlog.rev(x)))
2227
2227
2228 bundler = changegroup.bundle10(lookup)
2228 bundler = changegroup.bundle10(lookup)
2229 reorder = self.ui.config('bundle', 'reorder', 'auto')
2229 reorder = self.ui.config('bundle', 'reorder', 'auto')
2230 if reorder == 'auto':
2230 if reorder == 'auto':
2231 reorder = None
2231 reorder = None
2232 else:
2232 else:
2233 reorder = util.parsebool(reorder)
2233 reorder = util.parsebool(reorder)
2234
2234
2235 def gengroup():
2235 def gengroup():
2236 '''yield a sequence of changegroup chunks (strings)'''
2236 '''yield a sequence of changegroup chunks (strings)'''
2237 # construct a list of all changed files
2237 # construct a list of all changed files
2238
2238
2239 count[:] = [0, len(nodes)]
2239 count[:] = [0, len(nodes)]
2240 for chunk in cl.group(nodes, bundler, reorder=reorder):
2240 for chunk in cl.group(nodes, bundler, reorder=reorder):
2241 yield chunk
2241 yield chunk
2242 progress(_bundling, None)
2242 progress(_bundling, None)
2243
2243
2244 count[:] = [0, len(mfs)]
2244 count[:] = [0, len(mfs)]
2245 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2245 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2246 yield chunk
2246 yield chunk
2247 progress(_bundling, None)
2247 progress(_bundling, None)
2248
2248
2249 count[:] = [0, len(changedfiles)]
2249 count[:] = [0, len(changedfiles)]
2250 for fname in sorted(changedfiles):
2250 for fname in sorted(changedfiles):
2251 filerevlog = self.file(fname)
2251 filerevlog = self.file(fname)
2252 if not len(filerevlog):
2252 if not len(filerevlog):
2253 raise util.Abort(_("empty or missing revlog for %s")
2253 raise util.Abort(_("empty or missing revlog for %s")
2254 % fname)
2254 % fname)
2255 fstate[0] = fname
2255 fstate[0] = fname
2256 nodelist = gennodelst(filerevlog)
2256 nodelist = gennodelst(filerevlog)
2257 if nodelist:
2257 if nodelist:
2258 count[0] += 1
2258 count[0] += 1
2259 yield bundler.fileheader(fname)
2259 yield bundler.fileheader(fname)
2260 for chunk in filerevlog.group(nodelist, bundler, reorder):
2260 for chunk in filerevlog.group(nodelist, bundler, reorder):
2261 yield chunk
2261 yield chunk
2262 yield bundler.close()
2262 yield bundler.close()
2263 progress(_bundling, None)
2263 progress(_bundling, None)
2264
2264
2265 if nodes:
2265 if nodes:
2266 self.hook('outgoing', node=hex(nodes[0]), source=source)
2266 self.hook('outgoing', node=hex(nodes[0]), source=source)
2267
2267
2268 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2268 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2269
2269
2270 def addchangegroup(self, source, srctype, url, emptyok=False):
2270 def addchangegroup(self, source, srctype, url, emptyok=False):
2271 """Add the changegroup returned by source.read() to this repo.
2271 """Add the changegroup returned by source.read() to this repo.
2272 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2272 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2273 the URL of the repo where this changegroup is coming from.
2273 the URL of the repo where this changegroup is coming from.
2274
2274
2275 Return an integer summarizing the change to this repo:
2275 Return an integer summarizing the change to this repo:
2276 - nothing changed or no source: 0
2276 - nothing changed or no source: 0
2277 - more heads than before: 1+added heads (2..n)
2277 - more heads than before: 1+added heads (2..n)
2278 - fewer heads than before: -1-removed heads (-2..-n)
2278 - fewer heads than before: -1-removed heads (-2..-n)
2279 - number of heads stays the same: 1
2279 - number of heads stays the same: 1
2280 """
2280 """
2281 def csmap(x):
2281 def csmap(x):
2282 self.ui.debug("add changeset %s\n" % short(x))
2282 self.ui.debug("add changeset %s\n" % short(x))
2283 return len(cl)
2283 return len(cl)
2284
2284
2285 def revmap(x):
2285 def revmap(x):
2286 return cl.rev(x)
2286 return cl.rev(x)
2287
2287
2288 if not source:
2288 if not source:
2289 return 0
2289 return 0
2290
2290
2291 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2291 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2292
2292
2293 changesets = files = revisions = 0
2293 changesets = files = revisions = 0
2294 efiles = set()
2294 efiles = set()
2295
2295
2296 # write changelog data to temp files so concurrent readers will not see
2296 # write changelog data to temp files so concurrent readers will not see
2297 # inconsistent view
2297 # inconsistent view
2298 cl = self.changelog
2298 cl = self.changelog
2299 cl.delayupdate()
2299 cl.delayupdate()
2300 oldheads = cl.heads()
2300 oldheads = cl.heads()
2301
2301
2302 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2302 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2303 try:
2303 try:
2304 trp = weakref.proxy(tr)
2304 trp = weakref.proxy(tr)
2305 # pull off the changeset group
2305 # pull off the changeset group
2306 self.ui.status(_("adding changesets\n"))
2306 self.ui.status(_("adding changesets\n"))
2307 clstart = len(cl)
2307 clstart = len(cl)
2308 class prog(object):
2308 class prog(object):
2309 step = _('changesets')
2309 step = _('changesets')
2310 count = 1
2310 count = 1
2311 ui = self.ui
2311 ui = self.ui
2312 total = None
2312 total = None
2313 def __call__(self):
2313 def __call__(self):
2314 self.ui.progress(self.step, self.count, unit=_('chunks'),
2314 self.ui.progress(self.step, self.count, unit=_('chunks'),
2315 total=self.total)
2315 total=self.total)
2316 self.count += 1
2316 self.count += 1
2317 pr = prog()
2317 pr = prog()
2318 source.callback = pr
2318 source.callback = pr
2319
2319
2320 source.changelogheader()
2320 source.changelogheader()
2321 srccontent = cl.addgroup(source, csmap, trp)
2321 srccontent = cl.addgroup(source, csmap, trp)
2322 if not (srccontent or emptyok):
2322 if not (srccontent or emptyok):
2323 raise util.Abort(_("received changelog group is empty"))
2323 raise util.Abort(_("received changelog group is empty"))
2324 clend = len(cl)
2324 clend = len(cl)
2325 changesets = clend - clstart
2325 changesets = clend - clstart
2326 for c in xrange(clstart, clend):
2326 for c in xrange(clstart, clend):
2327 efiles.update(self[c].files())
2327 efiles.update(self[c].files())
2328 efiles = len(efiles)
2328 efiles = len(efiles)
2329 self.ui.progress(_('changesets'), None)
2329 self.ui.progress(_('changesets'), None)
2330
2330
2331 # pull off the manifest group
2331 # pull off the manifest group
2332 self.ui.status(_("adding manifests\n"))
2332 self.ui.status(_("adding manifests\n"))
2333 pr.step = _('manifests')
2333 pr.step = _('manifests')
2334 pr.count = 1
2334 pr.count = 1
2335 pr.total = changesets # manifests <= changesets
2335 pr.total = changesets # manifests <= changesets
2336 # no need to check for empty manifest group here:
2336 # no need to check for empty manifest group here:
2337 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2337 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2338 # no new manifest will be created and the manifest group will
2338 # no new manifest will be created and the manifest group will
2339 # be empty during the pull
2339 # be empty during the pull
2340 source.manifestheader()
2340 source.manifestheader()
2341 self.manifest.addgroup(source, revmap, trp)
2341 self.manifest.addgroup(source, revmap, trp)
2342 self.ui.progress(_('manifests'), None)
2342 self.ui.progress(_('manifests'), None)
2343
2343
2344 needfiles = {}
2344 needfiles = {}
2345 if self.ui.configbool('server', 'validate', default=False):
2345 if self.ui.configbool('server', 'validate', default=False):
2346 # validate incoming csets have their manifests
2346 # validate incoming csets have their manifests
2347 for cset in xrange(clstart, clend):
2347 for cset in xrange(clstart, clend):
2348 mfest = self.changelog.read(self.changelog.node(cset))[0]
2348 mfest = self.changelog.read(self.changelog.node(cset))[0]
2349 mfest = self.manifest.readdelta(mfest)
2349 mfest = self.manifest.readdelta(mfest)
2350 # store file nodes we must see
2350 # store file nodes we must see
2351 for f, n in mfest.iteritems():
2351 for f, n in mfest.iteritems():
2352 needfiles.setdefault(f, set()).add(n)
2352 needfiles.setdefault(f, set()).add(n)
2353
2353
2354 # process the files
2354 # process the files
2355 self.ui.status(_("adding file changes\n"))
2355 self.ui.status(_("adding file changes\n"))
2356 pr.step = _('files')
2356 pr.step = _('files')
2357 pr.count = 1
2357 pr.count = 1
2358 pr.total = efiles
2358 pr.total = efiles
2359 source.callback = None
2359 source.callback = None
2360
2360
2361 while True:
2361 while True:
2362 chunkdata = source.filelogheader()
2362 chunkdata = source.filelogheader()
2363 if not chunkdata:
2363 if not chunkdata:
2364 break
2364 break
2365 f = chunkdata["filename"]
2365 f = chunkdata["filename"]
2366 self.ui.debug("adding %s revisions\n" % f)
2366 self.ui.debug("adding %s revisions\n" % f)
2367 pr()
2367 pr()
2368 fl = self.file(f)
2368 fl = self.file(f)
2369 o = len(fl)
2369 o = len(fl)
2370 if not fl.addgroup(source, revmap, trp):
2370 if not fl.addgroup(source, revmap, trp):
2371 raise util.Abort(_("received file revlog group is empty"))
2371 raise util.Abort(_("received file revlog group is empty"))
2372 revisions += len(fl) - o
2372 revisions += len(fl) - o
2373 files += 1
2373 files += 1
2374 if f in needfiles:
2374 if f in needfiles:
2375 needs = needfiles[f]
2375 needs = needfiles[f]
2376 for new in xrange(o, len(fl)):
2376 for new in xrange(o, len(fl)):
2377 n = fl.node(new)
2377 n = fl.node(new)
2378 if n in needs:
2378 if n in needs:
2379 needs.remove(n)
2379 needs.remove(n)
2380 if not needs:
2380 if not needs:
2381 del needfiles[f]
2381 del needfiles[f]
2382 self.ui.progress(_('files'), None)
2382 self.ui.progress(_('files'), None)
2383
2383
2384 for f, needs in needfiles.iteritems():
2384 for f, needs in needfiles.iteritems():
2385 fl = self.file(f)
2385 fl = self.file(f)
2386 for n in needs:
2386 for n in needs:
2387 try:
2387 try:
2388 fl.rev(n)
2388 fl.rev(n)
2389 except error.LookupError:
2389 except error.LookupError:
2390 raise util.Abort(
2390 raise util.Abort(
2391 _('missing file data for %s:%s - run hg verify') %
2391 _('missing file data for %s:%s - run hg verify') %
2392 (f, hex(n)))
2392 (f, hex(n)))
2393
2393
2394 dh = 0
2394 dh = 0
2395 if oldheads:
2395 if oldheads:
2396 heads = cl.heads()
2396 heads = cl.heads()
2397 dh = len(heads) - len(oldheads)
2397 dh = len(heads) - len(oldheads)
2398 for h in heads:
2398 for h in heads:
2399 if h not in oldheads and self[h].closesbranch():
2399 if h not in oldheads and self[h].closesbranch():
2400 dh -= 1
2400 dh -= 1
2401 htext = ""
2401 htext = ""
2402 if dh:
2402 if dh:
2403 htext = _(" (%+d heads)") % dh
2403 htext = _(" (%+d heads)") % dh
2404
2404
2405 self.ui.status(_("added %d changesets"
2405 self.ui.status(_("added %d changesets"
2406 " with %d changes to %d files%s\n")
2406 " with %d changes to %d files%s\n")
2407 % (changesets, revisions, files, htext))
2407 % (changesets, revisions, files, htext))
2408 obsolete.clearobscaches(self)
2408 obsolete.clearobscaches(self)
2409
2409
2410 if changesets > 0:
2410 if changesets > 0:
2411 p = lambda: cl.writepending() and self.root or ""
2411 p = lambda: cl.writepending() and self.root or ""
2412 self.hook('pretxnchangegroup', throw=True,
2412 self.hook('pretxnchangegroup', throw=True,
2413 node=hex(cl.node(clstart)), source=srctype,
2413 node=hex(cl.node(clstart)), source=srctype,
2414 url=url, pending=p)
2414 url=url, pending=p)
2415
2415
2416 added = [cl.node(r) for r in xrange(clstart, clend)]
2416 added = [cl.node(r) for r in xrange(clstart, clend)]
2417 publishing = self.ui.configbool('phases', 'publish', True)
2417 publishing = self.ui.configbool('phases', 'publish', True)
2418 if srctype == 'push':
2418 if srctype == 'push':
2419 # Old server can not push the boundary themself.
2419 # Old server can not push the boundary themself.
2420 # New server won't push the boundary if changeset already
2420 # New server won't push the boundary if changeset already
2421 # existed locally as secrete
2421 # existed locally as secrete
2422 #
2422 #
2423 # We should not use added here but the list of all change in
2423 # We should not use added here but the list of all change in
2424 # the bundle
2424 # the bundle
2425 if publishing:
2425 if publishing:
2426 phases.advanceboundary(self, phases.public, srccontent)
2426 phases.advanceboundary(self, phases.public, srccontent)
2427 else:
2427 else:
2428 phases.advanceboundary(self, phases.draft, srccontent)
2428 phases.advanceboundary(self, phases.draft, srccontent)
2429 phases.retractboundary(self, phases.draft, added)
2429 phases.retractboundary(self, phases.draft, added)
2430 elif srctype != 'strip':
2430 elif srctype != 'strip':
2431 # publishing only alter behavior during push
2431 # publishing only alter behavior during push
2432 #
2432 #
2433 # strip should not touch boundary at all
2433 # strip should not touch boundary at all
2434 phases.retractboundary(self, phases.draft, added)
2434 phases.retractboundary(self, phases.draft, added)
2435
2435
2436 # make changelog see real files again
2436 # make changelog see real files again
2437 cl.finalize(trp)
2437 cl.finalize(trp)
2438
2438
2439 tr.close()
2439 tr.close()
2440
2440
2441 if changesets > 0:
2441 if changesets > 0:
2442 def runhooks():
2442 def runhooks():
2443 # forcefully update the on-disk branch cache
2443 # forcefully update the on-disk branch cache
2444 self.ui.debug("updating the branch cache\n")
2444 self.ui.debug("updating the branch cache\n")
2445 self.updatebranchcache()
2445 self.updatebranchcache()
2446 self.hook("changegroup", node=hex(cl.node(clstart)),
2446 self.hook("changegroup", node=hex(cl.node(clstart)),
2447 source=srctype, url=url)
2447 source=srctype, url=url)
2448
2448
2449 for n in added:
2449 for n in added:
2450 self.hook("incoming", node=hex(n), source=srctype,
2450 self.hook("incoming", node=hex(n), source=srctype,
2451 url=url)
2451 url=url)
2452 self._afterlock(runhooks)
2452 self._afterlock(runhooks)
2453
2453
2454 finally:
2454 finally:
2455 tr.release()
2455 tr.release()
2456 # never return 0 here:
2456 # never return 0 here:
2457 if dh < 0:
2457 if dh < 0:
2458 return dh - 1
2458 return dh - 1
2459 else:
2459 else:
2460 return dh + 1
2460 return dh + 1
2461
2461
2462 def stream_in(self, remote, requirements):
2462 def stream_in(self, remote, requirements):
2463 lock = self.lock()
2463 lock = self.lock()
2464 try:
2464 try:
2465 fp = remote.stream_out()
2465 fp = remote.stream_out()
2466 l = fp.readline()
2466 l = fp.readline()
2467 try:
2467 try:
2468 resp = int(l)
2468 resp = int(l)
2469 except ValueError:
2469 except ValueError:
2470 raise error.ResponseError(
2470 raise error.ResponseError(
2471 _('unexpected response from remote server:'), l)
2471 _('unexpected response from remote server:'), l)
2472 if resp == 1:
2472 if resp == 1:
2473 raise util.Abort(_('operation forbidden by server'))
2473 raise util.Abort(_('operation forbidden by server'))
2474 elif resp == 2:
2474 elif resp == 2:
2475 raise util.Abort(_('locking the remote repository failed'))
2475 raise util.Abort(_('locking the remote repository failed'))
2476 elif resp != 0:
2476 elif resp != 0:
2477 raise util.Abort(_('the server sent an unknown error code'))
2477 raise util.Abort(_('the server sent an unknown error code'))
2478 self.ui.status(_('streaming all changes\n'))
2478 self.ui.status(_('streaming all changes\n'))
2479 l = fp.readline()
2479 l = fp.readline()
2480 try:
2480 try:
2481 total_files, total_bytes = map(int, l.split(' ', 1))
2481 total_files, total_bytes = map(int, l.split(' ', 1))
2482 except (ValueError, TypeError):
2482 except (ValueError, TypeError):
2483 raise error.ResponseError(
2483 raise error.ResponseError(
2484 _('unexpected response from remote server:'), l)
2484 _('unexpected response from remote server:'), l)
2485 self.ui.status(_('%d files to transfer, %s of data\n') %
2485 self.ui.status(_('%d files to transfer, %s of data\n') %
2486 (total_files, util.bytecount(total_bytes)))
2486 (total_files, util.bytecount(total_bytes)))
2487 handled_bytes = 0
2487 handled_bytes = 0
2488 self.ui.progress(_('clone'), 0, total=total_bytes)
2488 self.ui.progress(_('clone'), 0, total=total_bytes)
2489 start = time.time()
2489 start = time.time()
2490 for i in xrange(total_files):
2490 for i in xrange(total_files):
2491 # XXX doesn't support '\n' or '\r' in filenames
2491 # XXX doesn't support '\n' or '\r' in filenames
2492 l = fp.readline()
2492 l = fp.readline()
2493 try:
2493 try:
2494 name, size = l.split('\0', 1)
2494 name, size = l.split('\0', 1)
2495 size = int(size)
2495 size = int(size)
2496 except (ValueError, TypeError):
2496 except (ValueError, TypeError):
2497 raise error.ResponseError(
2497 raise error.ResponseError(
2498 _('unexpected response from remote server:'), l)
2498 _('unexpected response from remote server:'), l)
2499 if self.ui.debugflag:
2499 if self.ui.debugflag:
2500 self.ui.debug('adding %s (%s)\n' %
2500 self.ui.debug('adding %s (%s)\n' %
2501 (name, util.bytecount(size)))
2501 (name, util.bytecount(size)))
2502 # for backwards compat, name was partially encoded
2502 # for backwards compat, name was partially encoded
2503 ofp = self.sopener(store.decodedir(name), 'w')
2503 ofp = self.sopener(store.decodedir(name), 'w')
2504 for chunk in util.filechunkiter(fp, limit=size):
2504 for chunk in util.filechunkiter(fp, limit=size):
2505 handled_bytes += len(chunk)
2505 handled_bytes += len(chunk)
2506 self.ui.progress(_('clone'), handled_bytes,
2506 self.ui.progress(_('clone'), handled_bytes,
2507 total=total_bytes)
2507 total=total_bytes)
2508 ofp.write(chunk)
2508 ofp.write(chunk)
2509 ofp.close()
2509 ofp.close()
2510 elapsed = time.time() - start
2510 elapsed = time.time() - start
2511 if elapsed <= 0:
2511 if elapsed <= 0:
2512 elapsed = 0.001
2512 elapsed = 0.001
2513 self.ui.progress(_('clone'), None)
2513 self.ui.progress(_('clone'), None)
2514 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2514 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2515 (util.bytecount(total_bytes), elapsed,
2515 (util.bytecount(total_bytes), elapsed,
2516 util.bytecount(total_bytes / elapsed)))
2516 util.bytecount(total_bytes / elapsed)))
2517
2517
2518 # new requirements = old non-format requirements +
2518 # new requirements = old non-format requirements +
2519 # new format-related
2519 # new format-related
2520 # requirements from the streamed-in repository
2520 # requirements from the streamed-in repository
2521 requirements.update(set(self.requirements) - self.supportedformats)
2521 requirements.update(set(self.requirements) - self.supportedformats)
2522 self._applyrequirements(requirements)
2522 self._applyrequirements(requirements)
2523 self._writerequirements()
2523 self._writerequirements()
2524
2524
2525 self.invalidate()
2525 self.invalidate()
2526 return len(self.heads()) + 1
2526 return len(self.heads()) + 1
2527 finally:
2527 finally:
2528 lock.release()
2528 lock.release()
2529
2529
2530 def clone(self, remote, heads=[], stream=False):
2530 def clone(self, remote, heads=[], stream=False):
2531 '''clone remote repository.
2531 '''clone remote repository.
2532
2532
2533 keyword arguments:
2533 keyword arguments:
2534 heads: list of revs to clone (forces use of pull)
2534 heads: list of revs to clone (forces use of pull)
2535 stream: use streaming clone if possible'''
2535 stream: use streaming clone if possible'''
2536
2536
2537 # now, all clients that can request uncompressed clones can
2537 # now, all clients that can request uncompressed clones can
2538 # read repo formats supported by all servers that can serve
2538 # read repo formats supported by all servers that can serve
2539 # them.
2539 # them.
2540
2540
2541 # if revlog format changes, client will have to check version
2541 # if revlog format changes, client will have to check version
2542 # and format flags on "stream" capability, and use
2542 # and format flags on "stream" capability, and use
2543 # uncompressed only if compatible.
2543 # uncompressed only if compatible.
2544
2544
2545 if not stream:
2545 if not stream:
2546 # if the server explicitly prefers to stream (for fast LANs)
2546 # if the server explicitly prefers to stream (for fast LANs)
2547 stream = remote.capable('stream-preferred')
2547 stream = remote.capable('stream-preferred')
2548
2548
2549 if stream and not heads:
2549 if stream and not heads:
2550 # 'stream' means remote revlog format is revlogv1 only
2550 # 'stream' means remote revlog format is revlogv1 only
2551 if remote.capable('stream'):
2551 if remote.capable('stream'):
2552 return self.stream_in(remote, set(('revlogv1',)))
2552 return self.stream_in(remote, set(('revlogv1',)))
2553 # otherwise, 'streamreqs' contains the remote revlog format
2553 # otherwise, 'streamreqs' contains the remote revlog format
2554 streamreqs = remote.capable('streamreqs')
2554 streamreqs = remote.capable('streamreqs')
2555 if streamreqs:
2555 if streamreqs:
2556 streamreqs = set(streamreqs.split(','))
2556 streamreqs = set(streamreqs.split(','))
2557 # if we support it, stream in and adjust our requirements
2557 # if we support it, stream in and adjust our requirements
2558 if not streamreqs - self.supportedformats:
2558 if not streamreqs - self.supportedformats:
2559 return self.stream_in(remote, streamreqs)
2559 return self.stream_in(remote, streamreqs)
2560 return self.pull(remote, heads)
2560 return self.pull(remote, heads)
2561
2561
2562 def pushkey(self, namespace, key, old, new):
2562 def pushkey(self, namespace, key, old, new):
2563 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2563 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2564 old=old, new=new)
2564 old=old, new=new)
2565 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2565 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2566 ret = pushkey.push(self, namespace, key, old, new)
2566 ret = pushkey.push(self, namespace, key, old, new)
2567 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2567 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2568 ret=ret)
2568 ret=ret)
2569 return ret
2569 return ret
2570
2570
2571 def listkeys(self, namespace):
2571 def listkeys(self, namespace):
2572 self.hook('prelistkeys', throw=True, namespace=namespace)
2572 self.hook('prelistkeys', throw=True, namespace=namespace)
2573 self.ui.debug('listing keys for "%s"\n' % namespace)
2573 self.ui.debug('listing keys for "%s"\n' % namespace)
2574 values = pushkey.list(self, namespace)
2574 values = pushkey.list(self, namespace)
2575 self.hook('listkeys', namespace=namespace, values=values)
2575 self.hook('listkeys', namespace=namespace, values=values)
2576 return values
2576 return values
2577
2577
2578 def debugwireargs(self, one, two, three=None, four=None, five=None):
2578 def debugwireargs(self, one, two, three=None, four=None, five=None):
2579 '''used to test argument passing over the wire'''
2579 '''used to test argument passing over the wire'''
2580 return "%s %s %s %s %s" % (one, two, three, four, five)
2580 return "%s %s %s %s %s" % (one, two, three, four, five)
2581
2581
2582 def savecommitmessage(self, text):
2582 def savecommitmessage(self, text):
2583 fp = self.opener('last-message.txt', 'wb')
2583 fp = self.opener('last-message.txt', 'wb')
2584 try:
2584 try:
2585 fp.write(text)
2585 fp.write(text)
2586 finally:
2586 finally:
2587 fp.close()
2587 fp.close()
2588 return self.pathto(fp.name[len(self.root)+1:])
2588 return self.pathto(fp.name[len(self.root)+1:])
2589
2589
2590 # used to avoid circular references so destructors work
2590 # used to avoid circular references so destructors work
2591 def aftertrans(files):
2591 def aftertrans(files):
2592 renamefiles = [tuple(t) for t in files]
2592 renamefiles = [tuple(t) for t in files]
2593 def a():
2593 def a():
2594 for src, dest in renamefiles:
2594 for src, dest in renamefiles:
2595 try:
2595 try:
2596 util.rename(src, dest)
2596 util.rename(src, dest)
2597 except OSError: # journal file does not yet exist
2597 except OSError: # journal file does not yet exist
2598 pass
2598 pass
2599 return a
2599 return a
2600
2600
2601 def undoname(fn):
2601 def undoname(fn):
2602 base, name = os.path.split(fn)
2602 base, name = os.path.split(fn)
2603 assert name.startswith('journal')
2603 assert name.startswith('journal')
2604 return os.path.join(base, name.replace('journal', 'undo', 1))
2604 return os.path.join(base, name.replace('journal', 'undo', 1))
2605
2605
2606 def instance(ui, path, create):
2606 def instance(ui, path, create):
2607 return localrepository(ui, util.urllocalpath(path), create)
2607 return localrepository(ui, util.urllocalpath(path), create)
2608
2608
2609 def islocal(path):
2609 def islocal(path):
2610 return True
2610 return True
General Comments 0
You need to be logged in to leave comments. Login now