##// END OF EJS Templates
discovery: diet discovery.prepush from non-discovery code...
Pierre-Yves David -
r15932:4154338f default
parent child Browse files
Show More
@@ -1,277 +1,239 b''
1 1 # discovery.py - protocol changeset discovery functions
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import nullid, short
9 9 from i18n import _
10 10 import util, setdiscovery, treediscovery, phases
11 11
12 12 def findcommonincoming(repo, remote, heads=None, force=False):
13 13 """Return a tuple (common, anyincoming, heads) used to identify the common
14 14 subset of nodes between repo and remote.
15 15
16 16 "common" is a list of (at least) the heads of the common subset.
17 17 "anyincoming" is testable as a boolean indicating if any nodes are missing
18 18 locally. If remote does not support getbundle, this actually is a list of
19 19 roots of the nodes that would be incoming, to be supplied to
20 20 changegroupsubset. No code except for pull should be relying on this fact
21 21 any longer.
22 22 "heads" is either the supplied heads, or else the remote's heads.
23 23
24 24 If you pass heads and they are all known locally, the reponse lists justs
25 25 these heads in "common" and in "heads".
26 26
27 27 Please use findcommonoutgoing to compute the set of outgoing nodes to give
28 28 extensions a good hook into outgoing.
29 29 """
30 30
31 31 if not remote.capable('getbundle'):
32 32 return treediscovery.findcommonincoming(repo, remote, heads, force)
33 33
34 34 if heads:
35 35 allknown = True
36 36 nm = repo.changelog.nodemap
37 37 for h in heads:
38 38 if nm.get(h) is None:
39 39 allknown = False
40 40 break
41 41 if allknown:
42 42 return (heads, False, heads)
43 43
44 44 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
45 45 abortwhenunrelated=not force)
46 46 common, anyinc, srvheads = res
47 47 return (list(common), anyinc, heads or list(srvheads))
48 48
49 49 class outgoing(object):
50 50 '''Represents the set of nodes present in a local repo but not in a
51 51 (possibly) remote one.
52 52
53 53 Members:
54 54
55 55 missing is a list of all nodes present in local but not in remote.
56 56 common is a list of all nodes shared between the two repos.
57 57 excluded is the list of missing changeset that shouldn't be sent remotely.
58 58 missingheads is the list of heads of missing.
59 59 commonheads is the list of heads of common.
60 60
61 61 The sets are computed on demand from the heads, unless provided upfront
62 62 by discovery.'''
63 63
64 64 def __init__(self, revlog, commonheads, missingheads):
65 65 self.commonheads = commonheads
66 66 self.missingheads = missingheads
67 67 self._revlog = revlog
68 68 self._common = None
69 69 self._missing = None
70 70 self.excluded = []
71 71
72 72 def _computecommonmissing(self):
73 73 sets = self._revlog.findcommonmissing(self.commonheads,
74 74 self.missingheads)
75 75 self._common, self._missing = sets
76 76
77 77 @util.propertycache
78 78 def common(self):
79 79 if self._common is None:
80 80 self._computecommonmissing()
81 81 return self._common
82 82
83 83 @util.propertycache
84 84 def missing(self):
85 85 if self._missing is None:
86 86 self._computecommonmissing()
87 87 return self._missing
88 88
89 89 def findcommonoutgoing(repo, other, onlyheads=None, force=False, commoninc=None):
90 90 '''Return an outgoing instance to identify the nodes present in repo but
91 91 not in other.
92 92
93 93 If onlyheads is given, only nodes ancestral to nodes in onlyheads (inclusive)
94 94 are included. If you already know the local repo's heads, passing them in
95 95 onlyheads is faster than letting them be recomputed here.
96 96
97 97 If commoninc is given, it must the the result of a prior call to
98 98 findcommonincoming(repo, other, force) to avoid recomputing it here.'''
99 99 # declare an empty outgoing object to be filled later
100 100 og = outgoing(repo.changelog, None, None)
101 101
102 102 # get common set if not provided
103 103 if commoninc is None:
104 104 commoninc = findcommonincoming(repo, other, force=force)
105 105 og.commonheads, _any, _hds = commoninc
106 106
107 107 # compute outgoing
108 108 if not repo._phaseroots[phases.secret]:
109 109 og.missingheads = onlyheads or repo.heads()
110 110 elif onlyheads is None:
111 111 # use visible heads as it should be cached
112 112 og.missingheads = phases.visibleheads(repo)
113 113 og.excluded = [ctx.node() for ctx in repo.set('secret()')]
114 114 else:
115 115 # compute common, missing and exclude secret stuff
116 116 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
117 117 og._common, allmissing = sets
118 118 og._missing = missing = []
119 119 og._excluded = excluded = []
120 120 for node in allmissing:
121 121 if repo[node].phase() >= phases.secret:
122 122 excluded.append(node)
123 123 else:
124 124 missing.append(node)
125 125 if excluded:
126 126 # update missing heads
127 127 rset = repo.set('heads(%ln)', missing)
128 128 missingheads = [ctx.node() for ctx in rset]
129 129 else:
130 130 missingheads = onlyheads
131 131 og.missingheads = missingheads
132 132
133 133 return og
134 134
135 def prepush(repo, remote, force, revs, newbranch):
136 '''Analyze the local and remote repositories and determine which
137 changesets need to be pushed to the remote. Return value depends
138 on circumstances:
139
140 If we are not going to push anything, return a tuple (None, 1,
141 common) The third element "common" is the list of heads of the
142 common set between local and remote.
135 def checkheads(repo, remote, outgoing, remoteheads, newbranch=False):
136 """Check that a push won't add any outgoing head
143 137
144 Otherwise, return a tuple (changegroup, remoteheads, futureheads),
145 where changegroup is a readable file-like object whose read()
146 returns successive changegroup chunks ready to be sent over the
147 wire, remoteheads is the list of remote heads and futureheads is
148 the list of heads of the common set between local and remote to
149 be after push completion.
150 '''
151 commoninc = findcommonincoming(repo, remote, force=force)
152 outgoing = findcommonoutgoing(repo, remote, onlyheads=revs,
153 commoninc=commoninc, force=force)
154 _common, inc, remoteheads = commoninc
138 raise Abort error and display ui message as needed.
139 """
140 if remoteheads == [nullid]:
141 # remote is empty, nothing to check.
142 return
155 143
156 144 cl = repo.changelog
157 outg = outgoing.missing
158 common = outgoing.commonheads
145 if remote.capable('branchmap'):
146 # Check for each named branch if we're creating new remote heads.
147 # To be a remote head after push, node must be either:
148 # - unknown locally
149 # - a local outgoing head descended from update
150 # - a remote head that's known locally and not
151 # ancestral to an outgoing head
159 152
160 if not outg:
161 if outgoing.excluded:
162 repo.ui.status(_("no changes to push but %i secret changesets\n")
163 % len(outgoing.excluded))
164 else:
165 repo.ui.status(_("no changes found\n"))
166 return None, 1, common
153 # 1. Create set of branches involved in the push.
154 branches = set(repo[n].branch() for n in outgoing.missing)
167 155
168 if not force and remoteheads != [nullid]:
169 if remote.capable('branchmap'):
170 # Check for each named branch if we're creating new remote heads.
171 # To be a remote head after push, node must be either:
172 # - unknown locally
173 # - a local outgoing head descended from update
174 # - a remote head that's known locally and not
175 # ancestral to an outgoing head
176
177 # 1. Create set of branches involved in the push.
178 branches = set(repo[n].branch() for n in outg)
156 # 2. Check for new branches on the remote.
157 remotemap = remote.branchmap()
158 newbranches = branches - set(remotemap)
159 if newbranches and not newbranch: # new branch requires --new-branch
160 branchnames = ', '.join(sorted(newbranches))
161 raise util.Abort(_("push creates new remote branches: %s!")
162 % branchnames,
163 hint=_("use 'hg push --new-branch' to create"
164 " new remote branches"))
165 branches.difference_update(newbranches)
179 166
180 # 2. Check for new branches on the remote.
181 remotemap = remote.branchmap()
182 newbranches = branches - set(remotemap)
183 if newbranches and not newbranch: # new branch requires --new-branch
184 branchnames = ', '.join(sorted(newbranches))
185 raise util.Abort(_("push creates new remote branches: %s!")
186 % branchnames,
187 hint=_("use 'hg push --new-branch' to create"
188 " new remote branches"))
189 branches.difference_update(newbranches)
167 # 3. Construct the initial oldmap and newmap dicts.
168 # They contain information about the remote heads before and
169 # after the push, respectively.
170 # Heads not found locally are not included in either dict,
171 # since they won't be affected by the push.
172 # unsynced contains all branches with incoming changesets.
173 oldmap = {}
174 newmap = {}
175 unsynced = set()
176 for branch in branches:
177 remotebrheads = remotemap[branch]
178 prunedbrheads = [h for h in remotebrheads if h in cl.nodemap]
179 oldmap[branch] = prunedbrheads
180 newmap[branch] = list(prunedbrheads)
181 if len(remotebrheads) > len(prunedbrheads):
182 unsynced.add(branch)
190 183
191 # 3. Construct the initial oldmap and newmap dicts.
192 # They contain information about the remote heads before and
193 # after the push, respectively.
194 # Heads not found locally are not included in either dict,
195 # since they won't be affected by the push.
196 # unsynced contains all branches with incoming changesets.
197 oldmap = {}
198 newmap = {}
199 unsynced = set()
200 for branch in branches:
201 remotebrheads = remotemap[branch]
202 prunedbrheads = [h for h in remotebrheads if h in cl.nodemap]
203 oldmap[branch] = prunedbrheads
204 newmap[branch] = list(prunedbrheads)
205 if len(remotebrheads) > len(prunedbrheads):
206 unsynced.add(branch)
207
208 # 4. Update newmap with outgoing changes.
209 # This will possibly add new heads and remove existing ones.
210 ctxgen = (repo[n] for n in outg)
211 repo._updatebranchcache(newmap, ctxgen)
184 # 4. Update newmap with outgoing changes.
185 # This will possibly add new heads and remove existing ones.
186 ctxgen = (repo[n] for n in outgoing.missing)
187 repo._updatebranchcache(newmap, ctxgen)
212 188
213 else:
214 # 1-4b. old servers: Check for new topological heads.
215 # Construct {old,new}map with branch = None (topological branch).
216 # (code based on _updatebranchcache)
217 oldheads = set(h for h in remoteheads if h in cl.nodemap)
218 newheads = oldheads.union(outg)
219 if len(newheads) > 1:
220 for latest in reversed(outg):
221 if latest not in newheads:
222 continue
223 minhrev = min(cl.rev(h) for h in newheads)
224 reachable = cl.reachable(latest, cl.node(minhrev))
225 reachable.remove(latest)
226 newheads.difference_update(reachable)
227 branches = set([None])
228 newmap = {None: newheads}
229 oldmap = {None: oldheads}
230 unsynced = inc and branches or set()
189 else:
190 # 1-4b. old servers: Check for new topological heads.
191 # Construct {old,new}map with branch = None (topological branch).
192 # (code based on _updatebranchcache)
193 oldheads = set(h for h in remoteheads if h in cl.nodemap)
194 newheads = oldheads.union(outg)
195 if len(newheads) > 1:
196 for latest in reversed(outg):
197 if latest not in newheads:
198 continue
199 minhrev = min(cl.rev(h) for h in newheads)
200 reachable = cl.reachable(latest, cl.node(minhrev))
201 reachable.remove(latest)
202 newheads.difference_update(reachable)
203 branches = set([None])
204 newmap = {None: newheads}
205 oldmap = {None: oldheads}
206 unsynced = inc and branches or set()
231 207
232 # 5. Check for new heads.
233 # If there are more heads after the push than before, a suitable
234 # error message, depending on unsynced status, is displayed.
235 error = None
236 for branch in branches:
237 newhs = set(newmap[branch])
238 oldhs = set(oldmap[branch])
239 if len(newhs) > len(oldhs):
240 dhs = list(newhs - oldhs)
241 if error is None:
242 if branch not in ('default', None):
243 error = _("push creates new remote head %s "
244 "on branch '%s'!") % (short(dhs[0]), branch)
245 else:
246 error = _("push creates new remote head %s!"
247 ) % short(dhs[0])
248 if branch in unsynced:
249 hint = _("you should pull and merge or "
250 "use push -f to force")
251 else:
252 hint = _("did you forget to merge? "
253 "use push -f to force")
254 if branch is not None:
255 repo.ui.note(_("new remote heads on branch '%s'\n") % branch)
256 for h in dhs:
257 repo.ui.note(_("new remote head %s\n") % short(h))
258 if error:
259 raise util.Abort(error, hint=hint)
208 # 5. Check for new heads.
209 # If there are more heads after the push than before, a suitable
210 # error message, depending on unsynced status, is displayed.
211 error = None
212 for branch in branches:
213 newhs = set(newmap[branch])
214 oldhs = set(oldmap[branch])
215 if len(newhs) > len(oldhs):
216 dhs = list(newhs - oldhs)
217 if error is None:
218 if branch not in ('default', None):
219 error = _("push creates new remote head %s "
220 "on branch '%s'!") % (short(dhs[0]), branch)
221 else:
222 error = _("push creates new remote head %s!"
223 ) % short(dhs[0])
224 if branch in unsynced:
225 hint = _("you should pull and merge or "
226 "use push -f to force")
227 else:
228 hint = _("did you forget to merge? "
229 "use push -f to force")
230 if branch is not None:
231 repo.ui.note(_("new remote heads on branch '%s'\n") % branch)
232 for h in dhs:
233 repo.ui.note(_("new remote head %s\n") % short(h))
234 if error:
235 raise util.Abort(error, hint=hint)
260 236
261 # 6. Check for unsynced changes on involved branches.
262 if unsynced:
263 repo.ui.warn(_("note: unsynced remote changes!\n"))
264
265 if revs is None and not outgoing.excluded:
266 # push everything,
267 # use the fast path, no race possible on push
268 cg = repo._changegroup(outg, 'push')
269 else:
270 cg = repo.getlocalbundle('push', outgoing)
271 # no need to compute outg ancestor. All node in outg have either:
272 # - parents in outg
273 # - parents in common
274 # - nullid parent
275 rset = repo.set('heads(%ln + %ln)', common, outg)
276 futureheads = [ctx.node() for ctx in rset]
277 return cg, remoteheads, futureheads
237 # 6. Check for unsynced changes on involved branches.
238 if unsynced:
239 repo.ui.warn(_("note: unsynced remote changes!\n"))
@@ -1,2254 +1,2283 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock, transaction, store, encoding
13 13 import scmutil, util, extensions, hook, error, revset
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 from lock import release
18 18 import weakref, errno, os, time, inspect
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class localrepository(repo.repository):
23 23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 24 'known', 'getbundle'))
25 25 supportedformats = set(('revlogv1', 'generaldelta'))
26 26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 27 'dotencode'))
28 28
29 29 def __init__(self, baseui, path=None, create=False):
30 30 repo.repository.__init__(self)
31 31 self.root = os.path.realpath(util.expandpath(path))
32 32 self.path = os.path.join(self.root, ".hg")
33 33 self.origroot = path
34 34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
35 35 self.opener = scmutil.opener(self.path)
36 36 self.wopener = scmutil.opener(self.root)
37 37 self.baseui = baseui
38 38 self.ui = baseui.copy()
39 39 self._dirtyphases = False
40 40 # A list of callback to shape the phase if no data were found.
41 41 # Callback are in the form: func(repo, roots) --> processed root.
42 42 # This list it to be filled by extension during repo setup
43 43 self._phasedefaults = []
44 44
45 45 try:
46 46 self.ui.readconfig(self.join("hgrc"), self.root)
47 47 extensions.loadall(self.ui)
48 48 except IOError:
49 49 pass
50 50
51 51 if not os.path.isdir(self.path):
52 52 if create:
53 53 if not os.path.exists(path):
54 54 util.makedirs(path)
55 55 util.makedir(self.path, notindexed=True)
56 56 requirements = ["revlogv1"]
57 57 if self.ui.configbool('format', 'usestore', True):
58 58 os.mkdir(os.path.join(self.path, "store"))
59 59 requirements.append("store")
60 60 if self.ui.configbool('format', 'usefncache', True):
61 61 requirements.append("fncache")
62 62 if self.ui.configbool('format', 'dotencode', True):
63 63 requirements.append('dotencode')
64 64 # create an invalid changelog
65 65 self.opener.append(
66 66 "00changelog.i",
67 67 '\0\0\0\2' # represents revlogv2
68 68 ' dummy changelog to prevent using the old repo layout'
69 69 )
70 70 if self.ui.configbool('format', 'generaldelta', False):
71 71 requirements.append("generaldelta")
72 72 requirements = set(requirements)
73 73 else:
74 74 raise error.RepoError(_("repository %s not found") % path)
75 75 elif create:
76 76 raise error.RepoError(_("repository %s already exists") % path)
77 77 else:
78 78 try:
79 79 requirements = scmutil.readrequires(self.opener, self.supported)
80 80 except IOError, inst:
81 81 if inst.errno != errno.ENOENT:
82 82 raise
83 83 requirements = set()
84 84
85 85 self.sharedpath = self.path
86 86 try:
87 87 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
88 88 if not os.path.exists(s):
89 89 raise error.RepoError(
90 90 _('.hg/sharedpath points to nonexistent directory %s') % s)
91 91 self.sharedpath = s
92 92 except IOError, inst:
93 93 if inst.errno != errno.ENOENT:
94 94 raise
95 95
96 96 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
97 97 self.spath = self.store.path
98 98 self.sopener = self.store.opener
99 99 self.sjoin = self.store.join
100 100 self.opener.createmode = self.store.createmode
101 101 self._applyrequirements(requirements)
102 102 if create:
103 103 self._writerequirements()
104 104
105 105
106 106 self._branchcache = None
107 107 self._branchcachetip = None
108 108 self.filterpats = {}
109 109 self._datafilters = {}
110 110 self._transref = self._lockref = self._wlockref = None
111 111
112 112 # A cache for various files under .hg/ that tracks file changes,
113 113 # (used by the filecache decorator)
114 114 #
115 115 # Maps a property name to its util.filecacheentry
116 116 self._filecache = {}
117 117
118 118 def _applyrequirements(self, requirements):
119 119 self.requirements = requirements
120 120 openerreqs = set(('revlogv1', 'generaldelta'))
121 121 self.sopener.options = dict((r, 1) for r in requirements
122 122 if r in openerreqs)
123 123
124 124 def _writerequirements(self):
125 125 reqfile = self.opener("requires", "w")
126 126 for r in self.requirements:
127 127 reqfile.write("%s\n" % r)
128 128 reqfile.close()
129 129
130 130 def _checknested(self, path):
131 131 """Determine if path is a legal nested repository."""
132 132 if not path.startswith(self.root):
133 133 return False
134 134 subpath = path[len(self.root) + 1:]
135 135 normsubpath = util.pconvert(subpath)
136 136
137 137 # XXX: Checking against the current working copy is wrong in
138 138 # the sense that it can reject things like
139 139 #
140 140 # $ hg cat -r 10 sub/x.txt
141 141 #
142 142 # if sub/ is no longer a subrepository in the working copy
143 143 # parent revision.
144 144 #
145 145 # However, it can of course also allow things that would have
146 146 # been rejected before, such as the above cat command if sub/
147 147 # is a subrepository now, but was a normal directory before.
148 148 # The old path auditor would have rejected by mistake since it
149 149 # panics when it sees sub/.hg/.
150 150 #
151 151 # All in all, checking against the working copy seems sensible
152 152 # since we want to prevent access to nested repositories on
153 153 # the filesystem *now*.
154 154 ctx = self[None]
155 155 parts = util.splitpath(subpath)
156 156 while parts:
157 157 prefix = '/'.join(parts)
158 158 if prefix in ctx.substate:
159 159 if prefix == normsubpath:
160 160 return True
161 161 else:
162 162 sub = ctx.sub(prefix)
163 163 return sub.checknested(subpath[len(prefix) + 1:])
164 164 else:
165 165 parts.pop()
166 166 return False
167 167
168 168 @filecache('bookmarks')
169 169 def _bookmarks(self):
170 170 return bookmarks.read(self)
171 171
172 172 @filecache('bookmarks.current')
173 173 def _bookmarkcurrent(self):
174 174 return bookmarks.readcurrent(self)
175 175
176 176 def _writebookmarks(self, marks):
177 177 bookmarks.write(self)
178 178
179 179 @filecache('phaseroots')
180 180 def _phaseroots(self):
181 181 self._dirtyphases = False
182 182 phaseroots = phases.readroots(self)
183 183 phases.filterunknown(self, phaseroots)
184 184 return phaseroots
185 185
186 186 @propertycache
187 187 def _phaserev(self):
188 188 cache = [phases.public] * len(self)
189 189 for phase in phases.trackedphases:
190 190 roots = map(self.changelog.rev, self._phaseroots[phase])
191 191 if roots:
192 192 for rev in roots:
193 193 cache[rev] = phase
194 194 for rev in self.changelog.descendants(*roots):
195 195 cache[rev] = phase
196 196 return cache
197 197
198 198 @filecache('00changelog.i', True)
199 199 def changelog(self):
200 200 c = changelog.changelog(self.sopener)
201 201 if 'HG_PENDING' in os.environ:
202 202 p = os.environ['HG_PENDING']
203 203 if p.startswith(self.root):
204 204 c.readpending('00changelog.i.a')
205 205 return c
206 206
207 207 @filecache('00manifest.i', True)
208 208 def manifest(self):
209 209 return manifest.manifest(self.sopener)
210 210
211 211 @filecache('dirstate')
212 212 def dirstate(self):
213 213 warned = [0]
214 214 def validate(node):
215 215 try:
216 216 self.changelog.rev(node)
217 217 return node
218 218 except error.LookupError:
219 219 if not warned[0]:
220 220 warned[0] = True
221 221 self.ui.warn(_("warning: ignoring unknown"
222 222 " working parent %s!\n") % short(node))
223 223 return nullid
224 224
225 225 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
226 226
227 227 def __getitem__(self, changeid):
228 228 if changeid is None:
229 229 return context.workingctx(self)
230 230 return context.changectx(self, changeid)
231 231
232 232 def __contains__(self, changeid):
233 233 try:
234 234 return bool(self.lookup(changeid))
235 235 except error.RepoLookupError:
236 236 return False
237 237
238 238 def __nonzero__(self):
239 239 return True
240 240
241 241 def __len__(self):
242 242 return len(self.changelog)
243 243
244 244 def __iter__(self):
245 245 for i in xrange(len(self)):
246 246 yield i
247 247
248 248 def revs(self, expr, *args):
249 249 '''Return a list of revisions matching the given revset'''
250 250 expr = revset.formatspec(expr, *args)
251 251 m = revset.match(None, expr)
252 252 return [r for r in m(self, range(len(self)))]
253 253
254 254 def set(self, expr, *args):
255 255 '''
256 256 Yield a context for each matching revision, after doing arg
257 257 replacement via revset.formatspec
258 258 '''
259 259 for r in self.revs(expr, *args):
260 260 yield self[r]
261 261
262 262 def url(self):
263 263 return 'file:' + self.root
264 264
265 265 def hook(self, name, throw=False, **args):
266 266 return hook.hook(self.ui, self, name, throw, **args)
267 267
268 268 tag_disallowed = ':\r\n'
269 269
270 270 def _tag(self, names, node, message, local, user, date, extra={}):
271 271 if isinstance(names, str):
272 272 allchars = names
273 273 names = (names,)
274 274 else:
275 275 allchars = ''.join(names)
276 276 for c in self.tag_disallowed:
277 277 if c in allchars:
278 278 raise util.Abort(_('%r cannot be used in a tag name') % c)
279 279
280 280 branches = self.branchmap()
281 281 for name in names:
282 282 self.hook('pretag', throw=True, node=hex(node), tag=name,
283 283 local=local)
284 284 if name in branches:
285 285 self.ui.warn(_("warning: tag %s conflicts with existing"
286 286 " branch name\n") % name)
287 287
288 288 def writetags(fp, names, munge, prevtags):
289 289 fp.seek(0, 2)
290 290 if prevtags and prevtags[-1] != '\n':
291 291 fp.write('\n')
292 292 for name in names:
293 293 m = munge and munge(name) or name
294 294 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
295 295 old = self.tags().get(name, nullid)
296 296 fp.write('%s %s\n' % (hex(old), m))
297 297 fp.write('%s %s\n' % (hex(node), m))
298 298 fp.close()
299 299
300 300 prevtags = ''
301 301 if local:
302 302 try:
303 303 fp = self.opener('localtags', 'r+')
304 304 except IOError:
305 305 fp = self.opener('localtags', 'a')
306 306 else:
307 307 prevtags = fp.read()
308 308
309 309 # local tags are stored in the current charset
310 310 writetags(fp, names, None, prevtags)
311 311 for name in names:
312 312 self.hook('tag', node=hex(node), tag=name, local=local)
313 313 return
314 314
315 315 try:
316 316 fp = self.wfile('.hgtags', 'rb+')
317 317 except IOError, e:
318 318 if e.errno != errno.ENOENT:
319 319 raise
320 320 fp = self.wfile('.hgtags', 'ab')
321 321 else:
322 322 prevtags = fp.read()
323 323
324 324 # committed tags are stored in UTF-8
325 325 writetags(fp, names, encoding.fromlocal, prevtags)
326 326
327 327 fp.close()
328 328
329 329 self.invalidatecaches()
330 330
331 331 if '.hgtags' not in self.dirstate:
332 332 self[None].add(['.hgtags'])
333 333
334 334 m = matchmod.exact(self.root, '', ['.hgtags'])
335 335 tagnode = self.commit(message, user, date, extra=extra, match=m)
336 336
337 337 for name in names:
338 338 self.hook('tag', node=hex(node), tag=name, local=local)
339 339
340 340 return tagnode
341 341
342 342 def tag(self, names, node, message, local, user, date):
343 343 '''tag a revision with one or more symbolic names.
344 344
345 345 names is a list of strings or, when adding a single tag, names may be a
346 346 string.
347 347
348 348 if local is True, the tags are stored in a per-repository file.
349 349 otherwise, they are stored in the .hgtags file, and a new
350 350 changeset is committed with the change.
351 351
352 352 keyword arguments:
353 353
354 354 local: whether to store tags in non-version-controlled file
355 355 (default False)
356 356
357 357 message: commit message to use if committing
358 358
359 359 user: name of user to use if committing
360 360
361 361 date: date tuple to use if committing'''
362 362
363 363 if not local:
364 364 for x in self.status()[:5]:
365 365 if '.hgtags' in x:
366 366 raise util.Abort(_('working copy of .hgtags is changed '
367 367 '(please commit .hgtags manually)'))
368 368
369 369 self.tags() # instantiate the cache
370 370 self._tag(names, node, message, local, user, date)
371 371
372 372 @propertycache
373 373 def _tagscache(self):
374 374 '''Returns a tagscache object that contains various tags related caches.'''
375 375
376 376 # This simplifies its cache management by having one decorated
377 377 # function (this one) and the rest simply fetch things from it.
378 378 class tagscache(object):
379 379 def __init__(self):
380 380 # These two define the set of tags for this repository. tags
381 381 # maps tag name to node; tagtypes maps tag name to 'global' or
382 382 # 'local'. (Global tags are defined by .hgtags across all
383 383 # heads, and local tags are defined in .hg/localtags.)
384 384 # They constitute the in-memory cache of tags.
385 385 self.tags = self.tagtypes = None
386 386
387 387 self.nodetagscache = self.tagslist = None
388 388
389 389 cache = tagscache()
390 390 cache.tags, cache.tagtypes = self._findtags()
391 391
392 392 return cache
393 393
394 394 def tags(self):
395 395 '''return a mapping of tag to node'''
396 396 return self._tagscache.tags
397 397
398 398 def _findtags(self):
399 399 '''Do the hard work of finding tags. Return a pair of dicts
400 400 (tags, tagtypes) where tags maps tag name to node, and tagtypes
401 401 maps tag name to a string like \'global\' or \'local\'.
402 402 Subclasses or extensions are free to add their own tags, but
403 403 should be aware that the returned dicts will be retained for the
404 404 duration of the localrepo object.'''
405 405
406 406 # XXX what tagtype should subclasses/extensions use? Currently
407 407 # mq and bookmarks add tags, but do not set the tagtype at all.
408 408 # Should each extension invent its own tag type? Should there
409 409 # be one tagtype for all such "virtual" tags? Or is the status
410 410 # quo fine?
411 411
412 412 alltags = {} # map tag name to (node, hist)
413 413 tagtypes = {}
414 414
415 415 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
416 416 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
417 417
418 418 # Build the return dicts. Have to re-encode tag names because
419 419 # the tags module always uses UTF-8 (in order not to lose info
420 420 # writing to the cache), but the rest of Mercurial wants them in
421 421 # local encoding.
422 422 tags = {}
423 423 for (name, (node, hist)) in alltags.iteritems():
424 424 if node != nullid:
425 425 try:
426 426 # ignore tags to unknown nodes
427 427 self.changelog.lookup(node)
428 428 tags[encoding.tolocal(name)] = node
429 429 except error.LookupError:
430 430 pass
431 431 tags['tip'] = self.changelog.tip()
432 432 tagtypes = dict([(encoding.tolocal(name), value)
433 433 for (name, value) in tagtypes.iteritems()])
434 434 return (tags, tagtypes)
435 435
436 436 def tagtype(self, tagname):
437 437 '''
438 438 return the type of the given tag. result can be:
439 439
440 440 'local' : a local tag
441 441 'global' : a global tag
442 442 None : tag does not exist
443 443 '''
444 444
445 445 return self._tagscache.tagtypes.get(tagname)
446 446
447 447 def tagslist(self):
448 448 '''return a list of tags ordered by revision'''
449 449 if not self._tagscache.tagslist:
450 450 l = []
451 451 for t, n in self.tags().iteritems():
452 452 r = self.changelog.rev(n)
453 453 l.append((r, t, n))
454 454 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
455 455
456 456 return self._tagscache.tagslist
457 457
458 458 def nodetags(self, node):
459 459 '''return the tags associated with a node'''
460 460 if not self._tagscache.nodetagscache:
461 461 nodetagscache = {}
462 462 for t, n in self.tags().iteritems():
463 463 nodetagscache.setdefault(n, []).append(t)
464 464 for tags in nodetagscache.itervalues():
465 465 tags.sort()
466 466 self._tagscache.nodetagscache = nodetagscache
467 467 return self._tagscache.nodetagscache.get(node, [])
468 468
469 469 def nodebookmarks(self, node):
470 470 marks = []
471 471 for bookmark, n in self._bookmarks.iteritems():
472 472 if n == node:
473 473 marks.append(bookmark)
474 474 return sorted(marks)
475 475
476 476 def _branchtags(self, partial, lrev):
477 477 # TODO: rename this function?
478 478 tiprev = len(self) - 1
479 479 if lrev != tiprev:
480 480 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
481 481 self._updatebranchcache(partial, ctxgen)
482 482 self._writebranchcache(partial, self.changelog.tip(), tiprev)
483 483
484 484 return partial
485 485
486 486 def updatebranchcache(self):
487 487 tip = self.changelog.tip()
488 488 if self._branchcache is not None and self._branchcachetip == tip:
489 489 return
490 490
491 491 oldtip = self._branchcachetip
492 492 self._branchcachetip = tip
493 493 if oldtip is None or oldtip not in self.changelog.nodemap:
494 494 partial, last, lrev = self._readbranchcache()
495 495 else:
496 496 lrev = self.changelog.rev(oldtip)
497 497 partial = self._branchcache
498 498
499 499 self._branchtags(partial, lrev)
500 500 # this private cache holds all heads (not just tips)
501 501 self._branchcache = partial
502 502
503 503 def branchmap(self):
504 504 '''returns a dictionary {branch: [branchheads]}'''
505 505 self.updatebranchcache()
506 506 return self._branchcache
507 507
508 508 def branchtags(self):
509 509 '''return a dict where branch names map to the tipmost head of
510 510 the branch, open heads come before closed'''
511 511 bt = {}
512 512 for bn, heads in self.branchmap().iteritems():
513 513 tip = heads[-1]
514 514 for h in reversed(heads):
515 515 if 'close' not in self.changelog.read(h)[5]:
516 516 tip = h
517 517 break
518 518 bt[bn] = tip
519 519 return bt
520 520
521 521 def _readbranchcache(self):
522 522 partial = {}
523 523 try:
524 524 f = self.opener("cache/branchheads")
525 525 lines = f.read().split('\n')
526 526 f.close()
527 527 except (IOError, OSError):
528 528 return {}, nullid, nullrev
529 529
530 530 try:
531 531 last, lrev = lines.pop(0).split(" ", 1)
532 532 last, lrev = bin(last), int(lrev)
533 533 if lrev >= len(self) or self[lrev].node() != last:
534 534 # invalidate the cache
535 535 raise ValueError('invalidating branch cache (tip differs)')
536 536 for l in lines:
537 537 if not l:
538 538 continue
539 539 node, label = l.split(" ", 1)
540 540 label = encoding.tolocal(label.strip())
541 541 partial.setdefault(label, []).append(bin(node))
542 542 except KeyboardInterrupt:
543 543 raise
544 544 except Exception, inst:
545 545 if self.ui.debugflag:
546 546 self.ui.warn(str(inst), '\n')
547 547 partial, last, lrev = {}, nullid, nullrev
548 548 return partial, last, lrev
549 549
550 550 def _writebranchcache(self, branches, tip, tiprev):
551 551 try:
552 552 f = self.opener("cache/branchheads", "w", atomictemp=True)
553 553 f.write("%s %s\n" % (hex(tip), tiprev))
554 554 for label, nodes in branches.iteritems():
555 555 for node in nodes:
556 556 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
557 557 f.close()
558 558 except (IOError, OSError):
559 559 pass
560 560
561 561 def _updatebranchcache(self, partial, ctxgen):
562 562 # collect new branch entries
563 563 newbranches = {}
564 564 for c in ctxgen:
565 565 newbranches.setdefault(c.branch(), []).append(c.node())
566 566 # if older branchheads are reachable from new ones, they aren't
567 567 # really branchheads. Note checking parents is insufficient:
568 568 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
569 569 for branch, newnodes in newbranches.iteritems():
570 570 bheads = partial.setdefault(branch, [])
571 571 bheads.extend(newnodes)
572 572 if len(bheads) <= 1:
573 573 continue
574 574 bheads = sorted(bheads, key=lambda x: self[x].rev())
575 575 # starting from tip means fewer passes over reachable
576 576 while newnodes:
577 577 latest = newnodes.pop()
578 578 if latest not in bheads:
579 579 continue
580 580 minbhrev = self[bheads[0]].node()
581 581 reachable = self.changelog.reachable(latest, minbhrev)
582 582 reachable.remove(latest)
583 583 if reachable:
584 584 bheads = [b for b in bheads if b not in reachable]
585 585 partial[branch] = bheads
586 586
587 587 def lookup(self, key):
588 588 if isinstance(key, int):
589 589 return self.changelog.node(key)
590 590 elif key == '.':
591 591 return self.dirstate.p1()
592 592 elif key == 'null':
593 593 return nullid
594 594 elif key == 'tip':
595 595 return self.changelog.tip()
596 596 n = self.changelog._match(key)
597 597 if n:
598 598 return n
599 599 if key in self._bookmarks:
600 600 return self._bookmarks[key]
601 601 if key in self.tags():
602 602 return self.tags()[key]
603 603 if key in self.branchtags():
604 604 return self.branchtags()[key]
605 605 n = self.changelog._partialmatch(key)
606 606 if n:
607 607 return n
608 608
609 609 # can't find key, check if it might have come from damaged dirstate
610 610 if key in self.dirstate.parents():
611 611 raise error.Abort(_("working directory has unknown parent '%s'!")
612 612 % short(key))
613 613 try:
614 614 if len(key) == 20:
615 615 key = hex(key)
616 616 except TypeError:
617 617 pass
618 618 raise error.RepoLookupError(_("unknown revision '%s'") % key)
619 619
620 620 def lookupbranch(self, key, remote=None):
621 621 repo = remote or self
622 622 if key in repo.branchmap():
623 623 return key
624 624
625 625 repo = (remote and remote.local()) and remote or self
626 626 return repo[key].branch()
627 627
628 628 def known(self, nodes):
629 629 nm = self.changelog.nodemap
630 630 result = []
631 631 for n in nodes:
632 632 r = nm.get(n)
633 633 resp = not (r is None or self._phaserev[r] >= phases.secret)
634 634 result.append(resp)
635 635 return result
636 636
637 637 def local(self):
638 638 return self
639 639
640 640 def cancopy(self):
641 641 return (repo.repository.cancopy(self)
642 642 and not self._phaseroots[phases.secret])
643 643
644 644 def join(self, f):
645 645 return os.path.join(self.path, f)
646 646
647 647 def wjoin(self, f):
648 648 return os.path.join(self.root, f)
649 649
650 650 def file(self, f):
651 651 if f[0] == '/':
652 652 f = f[1:]
653 653 return filelog.filelog(self.sopener, f)
654 654
655 655 def changectx(self, changeid):
656 656 return self[changeid]
657 657
658 658 def parents(self, changeid=None):
659 659 '''get list of changectxs for parents of changeid'''
660 660 return self[changeid].parents()
661 661
662 662 def filectx(self, path, changeid=None, fileid=None):
663 663 """changeid can be a changeset revision, node, or tag.
664 664 fileid can be a file revision or node."""
665 665 return context.filectx(self, path, changeid, fileid)
666 666
667 667 def getcwd(self):
668 668 return self.dirstate.getcwd()
669 669
670 670 def pathto(self, f, cwd=None):
671 671 return self.dirstate.pathto(f, cwd)
672 672
673 673 def wfile(self, f, mode='r'):
674 674 return self.wopener(f, mode)
675 675
676 676 def _link(self, f):
677 677 return os.path.islink(self.wjoin(f))
678 678
679 679 def _loadfilter(self, filter):
680 680 if filter not in self.filterpats:
681 681 l = []
682 682 for pat, cmd in self.ui.configitems(filter):
683 683 if cmd == '!':
684 684 continue
685 685 mf = matchmod.match(self.root, '', [pat])
686 686 fn = None
687 687 params = cmd
688 688 for name, filterfn in self._datafilters.iteritems():
689 689 if cmd.startswith(name):
690 690 fn = filterfn
691 691 params = cmd[len(name):].lstrip()
692 692 break
693 693 if not fn:
694 694 fn = lambda s, c, **kwargs: util.filter(s, c)
695 695 # Wrap old filters not supporting keyword arguments
696 696 if not inspect.getargspec(fn)[2]:
697 697 oldfn = fn
698 698 fn = lambda s, c, **kwargs: oldfn(s, c)
699 699 l.append((mf, fn, params))
700 700 self.filterpats[filter] = l
701 701 return self.filterpats[filter]
702 702
703 703 def _filter(self, filterpats, filename, data):
704 704 for mf, fn, cmd in filterpats:
705 705 if mf(filename):
706 706 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
707 707 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
708 708 break
709 709
710 710 return data
711 711
712 712 @propertycache
713 713 def _encodefilterpats(self):
714 714 return self._loadfilter('encode')
715 715
716 716 @propertycache
717 717 def _decodefilterpats(self):
718 718 return self._loadfilter('decode')
719 719
720 720 def adddatafilter(self, name, filter):
721 721 self._datafilters[name] = filter
722 722
723 723 def wread(self, filename):
724 724 if self._link(filename):
725 725 data = os.readlink(self.wjoin(filename))
726 726 else:
727 727 data = self.wopener.read(filename)
728 728 return self._filter(self._encodefilterpats, filename, data)
729 729
730 730 def wwrite(self, filename, data, flags):
731 731 data = self._filter(self._decodefilterpats, filename, data)
732 732 if 'l' in flags:
733 733 self.wopener.symlink(data, filename)
734 734 else:
735 735 self.wopener.write(filename, data)
736 736 if 'x' in flags:
737 737 util.setflags(self.wjoin(filename), False, True)
738 738
739 739 def wwritedata(self, filename, data):
740 740 return self._filter(self._decodefilterpats, filename, data)
741 741
742 742 def transaction(self, desc):
743 743 tr = self._transref and self._transref() or None
744 744 if tr and tr.running():
745 745 return tr.nest()
746 746
747 747 # abort here if the journal already exists
748 748 if os.path.exists(self.sjoin("journal")):
749 749 raise error.RepoError(
750 750 _("abandoned transaction found - run hg recover"))
751 751
752 752 journalfiles = self._writejournal(desc)
753 753 renames = [(x, undoname(x)) for x in journalfiles]
754 754
755 755 tr = transaction.transaction(self.ui.warn, self.sopener,
756 756 self.sjoin("journal"),
757 757 aftertrans(renames),
758 758 self.store.createmode)
759 759 self._transref = weakref.ref(tr)
760 760 return tr
761 761
762 762 def _writejournal(self, desc):
763 763 # save dirstate for rollback
764 764 try:
765 765 ds = self.opener.read("dirstate")
766 766 except IOError:
767 767 ds = ""
768 768 self.opener.write("journal.dirstate", ds)
769 769 self.opener.write("journal.branch",
770 770 encoding.fromlocal(self.dirstate.branch()))
771 771 self.opener.write("journal.desc",
772 772 "%d\n%s\n" % (len(self), desc))
773 773
774 774 bkname = self.join('bookmarks')
775 775 if os.path.exists(bkname):
776 776 util.copyfile(bkname, self.join('journal.bookmarks'))
777 777 else:
778 778 self.opener.write('journal.bookmarks', '')
779 779 phasesname = self.sjoin('phaseroots')
780 780 if os.path.exists(phasesname):
781 781 util.copyfile(phasesname, self.sjoin('journal.phaseroots'))
782 782 else:
783 783 self.sopener.write('journal.phaseroots', '')
784 784
785 785 return (self.sjoin('journal'), self.join('journal.dirstate'),
786 786 self.join('journal.branch'), self.join('journal.desc'),
787 787 self.join('journal.bookmarks'),
788 788 self.sjoin('journal.phaseroots'))
789 789
790 790 def recover(self):
791 791 lock = self.lock()
792 792 try:
793 793 if os.path.exists(self.sjoin("journal")):
794 794 self.ui.status(_("rolling back interrupted transaction\n"))
795 795 transaction.rollback(self.sopener, self.sjoin("journal"),
796 796 self.ui.warn)
797 797 self.invalidate()
798 798 return True
799 799 else:
800 800 self.ui.warn(_("no interrupted transaction available\n"))
801 801 return False
802 802 finally:
803 803 lock.release()
804 804
805 805 def rollback(self, dryrun=False, force=False):
806 806 wlock = lock = None
807 807 try:
808 808 wlock = self.wlock()
809 809 lock = self.lock()
810 810 if os.path.exists(self.sjoin("undo")):
811 811 return self._rollback(dryrun, force)
812 812 else:
813 813 self.ui.warn(_("no rollback information available\n"))
814 814 return 1
815 815 finally:
816 816 release(lock, wlock)
817 817
818 818 def _rollback(self, dryrun, force):
819 819 ui = self.ui
820 820 try:
821 821 args = self.opener.read('undo.desc').splitlines()
822 822 (oldlen, desc, detail) = (int(args[0]), args[1], None)
823 823 if len(args) >= 3:
824 824 detail = args[2]
825 825 oldtip = oldlen - 1
826 826
827 827 if detail and ui.verbose:
828 828 msg = (_('repository tip rolled back to revision %s'
829 829 ' (undo %s: %s)\n')
830 830 % (oldtip, desc, detail))
831 831 else:
832 832 msg = (_('repository tip rolled back to revision %s'
833 833 ' (undo %s)\n')
834 834 % (oldtip, desc))
835 835 except IOError:
836 836 msg = _('rolling back unknown transaction\n')
837 837 desc = None
838 838
839 839 if not force and self['.'] != self['tip'] and desc == 'commit':
840 840 raise util.Abort(
841 841 _('rollback of last commit while not checked out '
842 842 'may lose data'), hint=_('use -f to force'))
843 843
844 844 ui.status(msg)
845 845 if dryrun:
846 846 return 0
847 847
848 848 parents = self.dirstate.parents()
849 849 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
850 850 if os.path.exists(self.join('undo.bookmarks')):
851 851 util.rename(self.join('undo.bookmarks'),
852 852 self.join('bookmarks'))
853 853 if os.path.exists(self.sjoin('undo.phaseroots')):
854 854 util.rename(self.sjoin('undo.phaseroots'),
855 855 self.sjoin('phaseroots'))
856 856 self.invalidate()
857 857
858 858 parentgone = (parents[0] not in self.changelog.nodemap or
859 859 parents[1] not in self.changelog.nodemap)
860 860 if parentgone:
861 861 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
862 862 try:
863 863 branch = self.opener.read('undo.branch')
864 864 self.dirstate.setbranch(branch)
865 865 except IOError:
866 866 ui.warn(_('named branch could not be reset: '
867 867 'current branch is still \'%s\'\n')
868 868 % self.dirstate.branch())
869 869
870 870 self.dirstate.invalidate()
871 871 parents = tuple([p.rev() for p in self.parents()])
872 872 if len(parents) > 1:
873 873 ui.status(_('working directory now based on '
874 874 'revisions %d and %d\n') % parents)
875 875 else:
876 876 ui.status(_('working directory now based on '
877 877 'revision %d\n') % parents)
878 878 self.destroyed()
879 879 return 0
880 880
881 881 def invalidatecaches(self):
882 882 try:
883 883 delattr(self, '_tagscache')
884 884 except AttributeError:
885 885 pass
886 886
887 887 self._branchcache = None # in UTF-8
888 888 self._branchcachetip = None
889 889
890 890 def invalidatedirstate(self):
891 891 '''Invalidates the dirstate, causing the next call to dirstate
892 892 to check if it was modified since the last time it was read,
893 893 rereading it if it has.
894 894
895 895 This is different to dirstate.invalidate() that it doesn't always
896 896 rereads the dirstate. Use dirstate.invalidate() if you want to
897 897 explicitly read the dirstate again (i.e. restoring it to a previous
898 898 known good state).'''
899 899 try:
900 900 delattr(self, 'dirstate')
901 901 except AttributeError:
902 902 pass
903 903
904 904 def invalidate(self):
905 905 for k in self._filecache:
906 906 # dirstate is invalidated separately in invalidatedirstate()
907 907 if k == 'dirstate':
908 908 continue
909 909
910 910 try:
911 911 delattr(self, k)
912 912 except AttributeError:
913 913 pass
914 914 self.invalidatecaches()
915 915
916 916 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
917 917 try:
918 918 l = lock.lock(lockname, 0, releasefn, desc=desc)
919 919 except error.LockHeld, inst:
920 920 if not wait:
921 921 raise
922 922 self.ui.warn(_("waiting for lock on %s held by %r\n") %
923 923 (desc, inst.locker))
924 924 # default to 600 seconds timeout
925 925 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
926 926 releasefn, desc=desc)
927 927 if acquirefn:
928 928 acquirefn()
929 929 return l
930 930
931 931 def _afterlock(self, callback):
932 932 """add a callback to the current repository lock.
933 933
934 934 The callback will be executed on lock release."""
935 935 l = self._lockref and self._lockref()
936 936 if l:
937 937 l.postrelease.append(callback)
938 938
939 939 def lock(self, wait=True):
940 940 '''Lock the repository store (.hg/store) and return a weak reference
941 941 to the lock. Use this before modifying the store (e.g. committing or
942 942 stripping). If you are opening a transaction, get a lock as well.)'''
943 943 l = self._lockref and self._lockref()
944 944 if l is not None and l.held:
945 945 l.lock()
946 946 return l
947 947
948 948 def unlock():
949 949 self.store.write()
950 950 if self._dirtyphases:
951 951 phases.writeroots(self)
952 952 for k, ce in self._filecache.items():
953 953 if k == 'dirstate':
954 954 continue
955 955 ce.refresh()
956 956
957 957 l = self._lock(self.sjoin("lock"), wait, unlock,
958 958 self.invalidate, _('repository %s') % self.origroot)
959 959 self._lockref = weakref.ref(l)
960 960 return l
961 961
962 962 def wlock(self, wait=True):
963 963 '''Lock the non-store parts of the repository (everything under
964 964 .hg except .hg/store) and return a weak reference to the lock.
965 965 Use this before modifying files in .hg.'''
966 966 l = self._wlockref and self._wlockref()
967 967 if l is not None and l.held:
968 968 l.lock()
969 969 return l
970 970
971 971 def unlock():
972 972 self.dirstate.write()
973 973 ce = self._filecache.get('dirstate')
974 974 if ce:
975 975 ce.refresh()
976 976
977 977 l = self._lock(self.join("wlock"), wait, unlock,
978 978 self.invalidatedirstate, _('working directory of %s') %
979 979 self.origroot)
980 980 self._wlockref = weakref.ref(l)
981 981 return l
982 982
983 983 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
984 984 """
985 985 commit an individual file as part of a larger transaction
986 986 """
987 987
988 988 fname = fctx.path()
989 989 text = fctx.data()
990 990 flog = self.file(fname)
991 991 fparent1 = manifest1.get(fname, nullid)
992 992 fparent2 = fparent2o = manifest2.get(fname, nullid)
993 993
994 994 meta = {}
995 995 copy = fctx.renamed()
996 996 if copy and copy[0] != fname:
997 997 # Mark the new revision of this file as a copy of another
998 998 # file. This copy data will effectively act as a parent
999 999 # of this new revision. If this is a merge, the first
1000 1000 # parent will be the nullid (meaning "look up the copy data")
1001 1001 # and the second one will be the other parent. For example:
1002 1002 #
1003 1003 # 0 --- 1 --- 3 rev1 changes file foo
1004 1004 # \ / rev2 renames foo to bar and changes it
1005 1005 # \- 2 -/ rev3 should have bar with all changes and
1006 1006 # should record that bar descends from
1007 1007 # bar in rev2 and foo in rev1
1008 1008 #
1009 1009 # this allows this merge to succeed:
1010 1010 #
1011 1011 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1012 1012 # \ / merging rev3 and rev4 should use bar@rev2
1013 1013 # \- 2 --- 4 as the merge base
1014 1014 #
1015 1015
1016 1016 cfname = copy[0]
1017 1017 crev = manifest1.get(cfname)
1018 1018 newfparent = fparent2
1019 1019
1020 1020 if manifest2: # branch merge
1021 1021 if fparent2 == nullid or crev is None: # copied on remote side
1022 1022 if cfname in manifest2:
1023 1023 crev = manifest2[cfname]
1024 1024 newfparent = fparent1
1025 1025
1026 1026 # find source in nearest ancestor if we've lost track
1027 1027 if not crev:
1028 1028 self.ui.debug(" %s: searching for copy revision for %s\n" %
1029 1029 (fname, cfname))
1030 1030 for ancestor in self[None].ancestors():
1031 1031 if cfname in ancestor:
1032 1032 crev = ancestor[cfname].filenode()
1033 1033 break
1034 1034
1035 1035 if crev:
1036 1036 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1037 1037 meta["copy"] = cfname
1038 1038 meta["copyrev"] = hex(crev)
1039 1039 fparent1, fparent2 = nullid, newfparent
1040 1040 else:
1041 1041 self.ui.warn(_("warning: can't find ancestor for '%s' "
1042 1042 "copied from '%s'!\n") % (fname, cfname))
1043 1043
1044 1044 elif fparent2 != nullid:
1045 1045 # is one parent an ancestor of the other?
1046 1046 fparentancestor = flog.ancestor(fparent1, fparent2)
1047 1047 if fparentancestor == fparent1:
1048 1048 fparent1, fparent2 = fparent2, nullid
1049 1049 elif fparentancestor == fparent2:
1050 1050 fparent2 = nullid
1051 1051
1052 1052 # is the file changed?
1053 1053 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1054 1054 changelist.append(fname)
1055 1055 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1056 1056
1057 1057 # are just the flags changed during merge?
1058 1058 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1059 1059 changelist.append(fname)
1060 1060
1061 1061 return fparent1
1062 1062
1063 1063 def commit(self, text="", user=None, date=None, match=None, force=False,
1064 1064 editor=False, extra={}):
1065 1065 """Add a new revision to current repository.
1066 1066
1067 1067 Revision information is gathered from the working directory,
1068 1068 match can be used to filter the committed files. If editor is
1069 1069 supplied, it is called to get a commit message.
1070 1070 """
1071 1071
1072 1072 def fail(f, msg):
1073 1073 raise util.Abort('%s: %s' % (f, msg))
1074 1074
1075 1075 if not match:
1076 1076 match = matchmod.always(self.root, '')
1077 1077
1078 1078 if not force:
1079 1079 vdirs = []
1080 1080 match.dir = vdirs.append
1081 1081 match.bad = fail
1082 1082
1083 1083 wlock = self.wlock()
1084 1084 try:
1085 1085 wctx = self[None]
1086 1086 merge = len(wctx.parents()) > 1
1087 1087
1088 1088 if (not force and merge and match and
1089 1089 (match.files() or match.anypats())):
1090 1090 raise util.Abort(_('cannot partially commit a merge '
1091 1091 '(do not specify files or patterns)'))
1092 1092
1093 1093 changes = self.status(match=match, clean=force)
1094 1094 if force:
1095 1095 changes[0].extend(changes[6]) # mq may commit unchanged files
1096 1096
1097 1097 # check subrepos
1098 1098 subs = []
1099 1099 removedsubs = set()
1100 1100 if '.hgsub' in wctx:
1101 1101 # only manage subrepos and .hgsubstate if .hgsub is present
1102 1102 for p in wctx.parents():
1103 1103 removedsubs.update(s for s in p.substate if match(s))
1104 1104 for s in wctx.substate:
1105 1105 removedsubs.discard(s)
1106 1106 if match(s) and wctx.sub(s).dirty():
1107 1107 subs.append(s)
1108 1108 if (subs or removedsubs):
1109 1109 if (not match('.hgsub') and
1110 1110 '.hgsub' in (wctx.modified() + wctx.added())):
1111 1111 raise util.Abort(
1112 1112 _("can't commit subrepos without .hgsub"))
1113 1113 if '.hgsubstate' not in changes[0]:
1114 1114 changes[0].insert(0, '.hgsubstate')
1115 1115 if '.hgsubstate' in changes[2]:
1116 1116 changes[2].remove('.hgsubstate')
1117 1117 elif '.hgsub' in changes[2]:
1118 1118 # clean up .hgsubstate when .hgsub is removed
1119 1119 if ('.hgsubstate' in wctx and
1120 1120 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1121 1121 changes[2].insert(0, '.hgsubstate')
1122 1122
1123 1123 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1124 1124 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1125 1125 if changedsubs:
1126 1126 raise util.Abort(_("uncommitted changes in subrepo %s")
1127 1127 % changedsubs[0],
1128 1128 hint=_("use --subrepos for recursive commit"))
1129 1129
1130 1130 # make sure all explicit patterns are matched
1131 1131 if not force and match.files():
1132 1132 matched = set(changes[0] + changes[1] + changes[2])
1133 1133
1134 1134 for f in match.files():
1135 1135 if f == '.' or f in matched or f in wctx.substate:
1136 1136 continue
1137 1137 if f in changes[3]: # missing
1138 1138 fail(f, _('file not found!'))
1139 1139 if f in vdirs: # visited directory
1140 1140 d = f + '/'
1141 1141 for mf in matched:
1142 1142 if mf.startswith(d):
1143 1143 break
1144 1144 else:
1145 1145 fail(f, _("no match under directory!"))
1146 1146 elif f not in self.dirstate:
1147 1147 fail(f, _("file not tracked!"))
1148 1148
1149 1149 if (not force and not extra.get("close") and not merge
1150 1150 and not (changes[0] or changes[1] or changes[2])
1151 1151 and wctx.branch() == wctx.p1().branch()):
1152 1152 return None
1153 1153
1154 1154 ms = mergemod.mergestate(self)
1155 1155 for f in changes[0]:
1156 1156 if f in ms and ms[f] == 'u':
1157 1157 raise util.Abort(_("unresolved merge conflicts "
1158 1158 "(see hg help resolve)"))
1159 1159
1160 1160 cctx = context.workingctx(self, text, user, date, extra, changes)
1161 1161 if editor:
1162 1162 cctx._text = editor(self, cctx, subs)
1163 1163 edited = (text != cctx._text)
1164 1164
1165 1165 # commit subs
1166 1166 if subs or removedsubs:
1167 1167 state = wctx.substate.copy()
1168 1168 for s in sorted(subs):
1169 1169 sub = wctx.sub(s)
1170 1170 self.ui.status(_('committing subrepository %s\n') %
1171 1171 subrepo.subrelpath(sub))
1172 1172 sr = sub.commit(cctx._text, user, date)
1173 1173 state[s] = (state[s][0], sr)
1174 1174 subrepo.writestate(self, state)
1175 1175
1176 1176 # Save commit message in case this transaction gets rolled back
1177 1177 # (e.g. by a pretxncommit hook). Leave the content alone on
1178 1178 # the assumption that the user will use the same editor again.
1179 1179 msgfn = self.savecommitmessage(cctx._text)
1180 1180
1181 1181 p1, p2 = self.dirstate.parents()
1182 1182 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1183 1183 try:
1184 1184 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1185 1185 ret = self.commitctx(cctx, True)
1186 1186 except:
1187 1187 if edited:
1188 1188 self.ui.write(
1189 1189 _('note: commit message saved in %s\n') % msgfn)
1190 1190 raise
1191 1191
1192 1192 # update bookmarks, dirstate and mergestate
1193 1193 bookmarks.update(self, p1, ret)
1194 1194 for f in changes[0] + changes[1]:
1195 1195 self.dirstate.normal(f)
1196 1196 for f in changes[2]:
1197 1197 self.dirstate.drop(f)
1198 1198 self.dirstate.setparents(ret)
1199 1199 ms.reset()
1200 1200 finally:
1201 1201 wlock.release()
1202 1202
1203 1203 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1204 1204 return ret
1205 1205
1206 1206 def commitctx(self, ctx, error=False):
1207 1207 """Add a new revision to current repository.
1208 1208 Revision information is passed via the context argument.
1209 1209 """
1210 1210
1211 1211 tr = lock = None
1212 1212 removed = list(ctx.removed())
1213 1213 p1, p2 = ctx.p1(), ctx.p2()
1214 1214 user = ctx.user()
1215 1215
1216 1216 lock = self.lock()
1217 1217 try:
1218 1218 tr = self.transaction("commit")
1219 1219 trp = weakref.proxy(tr)
1220 1220
1221 1221 if ctx.files():
1222 1222 m1 = p1.manifest().copy()
1223 1223 m2 = p2.manifest()
1224 1224
1225 1225 # check in files
1226 1226 new = {}
1227 1227 changed = []
1228 1228 linkrev = len(self)
1229 1229 for f in sorted(ctx.modified() + ctx.added()):
1230 1230 self.ui.note(f + "\n")
1231 1231 try:
1232 1232 fctx = ctx[f]
1233 1233 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1234 1234 changed)
1235 1235 m1.set(f, fctx.flags())
1236 1236 except OSError, inst:
1237 1237 self.ui.warn(_("trouble committing %s!\n") % f)
1238 1238 raise
1239 1239 except IOError, inst:
1240 1240 errcode = getattr(inst, 'errno', errno.ENOENT)
1241 1241 if error or errcode and errcode != errno.ENOENT:
1242 1242 self.ui.warn(_("trouble committing %s!\n") % f)
1243 1243 raise
1244 1244 else:
1245 1245 removed.append(f)
1246 1246
1247 1247 # update manifest
1248 1248 m1.update(new)
1249 1249 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1250 1250 drop = [f for f in removed if f in m1]
1251 1251 for f in drop:
1252 1252 del m1[f]
1253 1253 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1254 1254 p2.manifestnode(), (new, drop))
1255 1255 files = changed + removed
1256 1256 else:
1257 1257 mn = p1.manifestnode()
1258 1258 files = []
1259 1259
1260 1260 # update changelog
1261 1261 self.changelog.delayupdate()
1262 1262 n = self.changelog.add(mn, files, ctx.description(),
1263 1263 trp, p1.node(), p2.node(),
1264 1264 user, ctx.date(), ctx.extra().copy())
1265 1265 p = lambda: self.changelog.writepending() and self.root or ""
1266 1266 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1267 1267 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1268 1268 parent2=xp2, pending=p)
1269 1269 self.changelog.finalize(trp)
1270 1270 # set the new commit is proper phase
1271 1271 targetphase = self.ui.configint('phases', 'new-commit',
1272 1272 phases.draft)
1273 1273 if targetphase:
1274 1274 # retract boundary do not alter parent changeset.
1275 1275 # if a parent have higher the resulting phase will
1276 1276 # be compliant anyway
1277 1277 #
1278 1278 # if minimal phase was 0 we don't need to retract anything
1279 1279 phases.retractboundary(self, targetphase, [n])
1280 1280 tr.close()
1281 1281 self.updatebranchcache()
1282 1282 return n
1283 1283 finally:
1284 1284 if tr:
1285 1285 tr.release()
1286 1286 lock.release()
1287 1287
1288 1288 def destroyed(self):
1289 1289 '''Inform the repository that nodes have been destroyed.
1290 1290 Intended for use by strip and rollback, so there's a common
1291 1291 place for anything that has to be done after destroying history.'''
1292 1292 # XXX it might be nice if we could take the list of destroyed
1293 1293 # nodes, but I don't see an easy way for rollback() to do that
1294 1294
1295 1295 # Ensure the persistent tag cache is updated. Doing it now
1296 1296 # means that the tag cache only has to worry about destroyed
1297 1297 # heads immediately after a strip/rollback. That in turn
1298 1298 # guarantees that "cachetip == currenttip" (comparing both rev
1299 1299 # and node) always means no nodes have been added or destroyed.
1300 1300
1301 1301 # XXX this is suboptimal when qrefresh'ing: we strip the current
1302 1302 # head, refresh the tag cache, then immediately add a new head.
1303 1303 # But I think doing it this way is necessary for the "instant
1304 1304 # tag cache retrieval" case to work.
1305 1305 self.invalidatecaches()
1306 1306
1307 1307 def walk(self, match, node=None):
1308 1308 '''
1309 1309 walk recursively through the directory tree or a given
1310 1310 changeset, finding all files matched by the match
1311 1311 function
1312 1312 '''
1313 1313 return self[node].walk(match)
1314 1314
1315 1315 def status(self, node1='.', node2=None, match=None,
1316 1316 ignored=False, clean=False, unknown=False,
1317 1317 listsubrepos=False):
1318 1318 """return status of files between two nodes or node and working directory
1319 1319
1320 1320 If node1 is None, use the first dirstate parent instead.
1321 1321 If node2 is None, compare node1 with working directory.
1322 1322 """
1323 1323
1324 1324 def mfmatches(ctx):
1325 1325 mf = ctx.manifest().copy()
1326 1326 for fn in mf.keys():
1327 1327 if not match(fn):
1328 1328 del mf[fn]
1329 1329 return mf
1330 1330
1331 1331 if isinstance(node1, context.changectx):
1332 1332 ctx1 = node1
1333 1333 else:
1334 1334 ctx1 = self[node1]
1335 1335 if isinstance(node2, context.changectx):
1336 1336 ctx2 = node2
1337 1337 else:
1338 1338 ctx2 = self[node2]
1339 1339
1340 1340 working = ctx2.rev() is None
1341 1341 parentworking = working and ctx1 == self['.']
1342 1342 match = match or matchmod.always(self.root, self.getcwd())
1343 1343 listignored, listclean, listunknown = ignored, clean, unknown
1344 1344
1345 1345 # load earliest manifest first for caching reasons
1346 1346 if not working and ctx2.rev() < ctx1.rev():
1347 1347 ctx2.manifest()
1348 1348
1349 1349 if not parentworking:
1350 1350 def bad(f, msg):
1351 1351 if f not in ctx1:
1352 1352 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1353 1353 match.bad = bad
1354 1354
1355 1355 if working: # we need to scan the working dir
1356 1356 subrepos = []
1357 1357 if '.hgsub' in self.dirstate:
1358 1358 subrepos = ctx2.substate.keys()
1359 1359 s = self.dirstate.status(match, subrepos, listignored,
1360 1360 listclean, listunknown)
1361 1361 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1362 1362
1363 1363 # check for any possibly clean files
1364 1364 if parentworking and cmp:
1365 1365 fixup = []
1366 1366 # do a full compare of any files that might have changed
1367 1367 for f in sorted(cmp):
1368 1368 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1369 1369 or ctx1[f].cmp(ctx2[f])):
1370 1370 modified.append(f)
1371 1371 else:
1372 1372 fixup.append(f)
1373 1373
1374 1374 # update dirstate for files that are actually clean
1375 1375 if fixup:
1376 1376 if listclean:
1377 1377 clean += fixup
1378 1378
1379 1379 try:
1380 1380 # updating the dirstate is optional
1381 1381 # so we don't wait on the lock
1382 1382 wlock = self.wlock(False)
1383 1383 try:
1384 1384 for f in fixup:
1385 1385 self.dirstate.normal(f)
1386 1386 finally:
1387 1387 wlock.release()
1388 1388 except error.LockError:
1389 1389 pass
1390 1390
1391 1391 if not parentworking:
1392 1392 mf1 = mfmatches(ctx1)
1393 1393 if working:
1394 1394 # we are comparing working dir against non-parent
1395 1395 # generate a pseudo-manifest for the working dir
1396 1396 mf2 = mfmatches(self['.'])
1397 1397 for f in cmp + modified + added:
1398 1398 mf2[f] = None
1399 1399 mf2.set(f, ctx2.flags(f))
1400 1400 for f in removed:
1401 1401 if f in mf2:
1402 1402 del mf2[f]
1403 1403 else:
1404 1404 # we are comparing two revisions
1405 1405 deleted, unknown, ignored = [], [], []
1406 1406 mf2 = mfmatches(ctx2)
1407 1407
1408 1408 modified, added, clean = [], [], []
1409 1409 for fn in mf2:
1410 1410 if fn in mf1:
1411 1411 if (fn not in deleted and
1412 1412 (mf1.flags(fn) != mf2.flags(fn) or
1413 1413 (mf1[fn] != mf2[fn] and
1414 1414 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1415 1415 modified.append(fn)
1416 1416 elif listclean:
1417 1417 clean.append(fn)
1418 1418 del mf1[fn]
1419 1419 elif fn not in deleted:
1420 1420 added.append(fn)
1421 1421 removed = mf1.keys()
1422 1422
1423 1423 if working and modified and not self.dirstate._checklink:
1424 1424 # Symlink placeholders may get non-symlink-like contents
1425 1425 # via user error or dereferencing by NFS or Samba servers,
1426 1426 # so we filter out any placeholders that don't look like a
1427 1427 # symlink
1428 1428 sane = []
1429 1429 for f in modified:
1430 1430 if ctx2.flags(f) == 'l':
1431 1431 d = ctx2[f].data()
1432 1432 if len(d) >= 1024 or '\n' in d or util.binary(d):
1433 1433 self.ui.debug('ignoring suspect symlink placeholder'
1434 1434 ' "%s"\n' % f)
1435 1435 continue
1436 1436 sane.append(f)
1437 1437 modified = sane
1438 1438
1439 1439 r = modified, added, removed, deleted, unknown, ignored, clean
1440 1440
1441 1441 if listsubrepos:
1442 1442 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1443 1443 if working:
1444 1444 rev2 = None
1445 1445 else:
1446 1446 rev2 = ctx2.substate[subpath][1]
1447 1447 try:
1448 1448 submatch = matchmod.narrowmatcher(subpath, match)
1449 1449 s = sub.status(rev2, match=submatch, ignored=listignored,
1450 1450 clean=listclean, unknown=listunknown,
1451 1451 listsubrepos=True)
1452 1452 for rfiles, sfiles in zip(r, s):
1453 1453 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1454 1454 except error.LookupError:
1455 1455 self.ui.status(_("skipping missing subrepository: %s\n")
1456 1456 % subpath)
1457 1457
1458 1458 for l in r:
1459 1459 l.sort()
1460 1460 return r
1461 1461
1462 1462 def heads(self, start=None):
1463 1463 heads = self.changelog.heads(start)
1464 1464 # sort the output in rev descending order
1465 1465 return sorted(heads, key=self.changelog.rev, reverse=True)
1466 1466
1467 1467 def branchheads(self, branch=None, start=None, closed=False):
1468 1468 '''return a (possibly filtered) list of heads for the given branch
1469 1469
1470 1470 Heads are returned in topological order, from newest to oldest.
1471 1471 If branch is None, use the dirstate branch.
1472 1472 If start is not None, return only heads reachable from start.
1473 1473 If closed is True, return heads that are marked as closed as well.
1474 1474 '''
1475 1475 if branch is None:
1476 1476 branch = self[None].branch()
1477 1477 branches = self.branchmap()
1478 1478 if branch not in branches:
1479 1479 return []
1480 1480 # the cache returns heads ordered lowest to highest
1481 1481 bheads = list(reversed(branches[branch]))
1482 1482 if start is not None:
1483 1483 # filter out the heads that cannot be reached from startrev
1484 1484 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1485 1485 bheads = [h for h in bheads if h in fbheads]
1486 1486 if not closed:
1487 1487 bheads = [h for h in bheads if
1488 1488 ('close' not in self.changelog.read(h)[5])]
1489 1489 return bheads
1490 1490
1491 1491 def branches(self, nodes):
1492 1492 if not nodes:
1493 1493 nodes = [self.changelog.tip()]
1494 1494 b = []
1495 1495 for n in nodes:
1496 1496 t = n
1497 1497 while True:
1498 1498 p = self.changelog.parents(n)
1499 1499 if p[1] != nullid or p[0] == nullid:
1500 1500 b.append((t, n, p[0], p[1]))
1501 1501 break
1502 1502 n = p[0]
1503 1503 return b
1504 1504
1505 1505 def between(self, pairs):
1506 1506 r = []
1507 1507
1508 1508 for top, bottom in pairs:
1509 1509 n, l, i = top, [], 0
1510 1510 f = 1
1511 1511
1512 1512 while n != bottom and n != nullid:
1513 1513 p = self.changelog.parents(n)[0]
1514 1514 if i == f:
1515 1515 l.append(n)
1516 1516 f = f * 2
1517 1517 n = p
1518 1518 i += 1
1519 1519
1520 1520 r.append(l)
1521 1521
1522 1522 return r
1523 1523
1524 1524 def pull(self, remote, heads=None, force=False):
1525 1525 lock = self.lock()
1526 1526 try:
1527 1527 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1528 1528 force=force)
1529 1529 common, fetch, rheads = tmp
1530 1530 if not fetch:
1531 1531 self.ui.status(_("no changes found\n"))
1532 1532 added = []
1533 1533 result = 0
1534 1534 else:
1535 1535 if heads is None and list(common) == [nullid]:
1536 1536 self.ui.status(_("requesting all changes\n"))
1537 1537 elif heads is None and remote.capable('changegroupsubset'):
1538 1538 # issue1320, avoid a race if remote changed after discovery
1539 1539 heads = rheads
1540 1540
1541 1541 if remote.capable('getbundle'):
1542 1542 cg = remote.getbundle('pull', common=common,
1543 1543 heads=heads or rheads)
1544 1544 elif heads is None:
1545 1545 cg = remote.changegroup(fetch, 'pull')
1546 1546 elif not remote.capable('changegroupsubset'):
1547 1547 raise util.Abort(_("partial pull cannot be done because "
1548 1548 "other repository doesn't support "
1549 1549 "changegroupsubset."))
1550 1550 else:
1551 1551 cg = remote.changegroupsubset(fetch, heads, 'pull')
1552 1552 clstart = len(self.changelog)
1553 1553 result = self.addchangegroup(cg, 'pull', remote.url())
1554 1554 clend = len(self.changelog)
1555 1555 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1556 1556
1557 1557
1558 1558 # Get remote phases data from remote
1559 1559 remotephases = remote.listkeys('phases')
1560 1560 publishing = bool(remotephases.get('publishing', False))
1561 1561 if remotephases and not publishing:
1562 1562 # remote is new and unpublishing
1563 1563 subset = common + added
1564 1564 pheads, _dr = phases.analyzeremotephases(self, subset,
1565 1565 remotephases)
1566 1566 phases.advanceboundary(self, phases.public, pheads)
1567 1567 phases.advanceboundary(self, phases.draft, common + added)
1568 1568 else:
1569 1569 # Remote is old or publishing all common changesets
1570 1570 # should be seen as public
1571 1571 phases.advanceboundary(self, phases.public, common + added)
1572 1572 finally:
1573 1573 lock.release()
1574 1574
1575 1575 return result
1576 1576
1577 1577 def checkpush(self, force, revs):
1578 1578 """Extensions can override this function if additional checks have
1579 1579 to be performed before pushing, or call it if they override push
1580 1580 command.
1581 1581 """
1582 1582 pass
1583 1583
1584 1584 def push(self, remote, force=False, revs=None, newbranch=False):
1585 1585 '''Push outgoing changesets (limited by revs) from the current
1586 1586 repository to remote. Return an integer:
1587 1587 - 0 means HTTP error *or* nothing to push
1588 1588 - 1 means we pushed and remote head count is unchanged *or*
1589 1589 we have outgoing changesets but refused to push
1590 1590 - other values as described by addchangegroup()
1591 1591 '''
1592 1592 # there are two ways to push to remote repo:
1593 1593 #
1594 1594 # addchangegroup assumes local user can lock remote
1595 1595 # repo (local filesystem, old ssh servers).
1596 1596 #
1597 1597 # unbundle assumes local user cannot lock remote repo (new ssh
1598 1598 # servers, http servers).
1599 1599
1600 1600 self.checkpush(force, revs)
1601 1601 lock = None
1602 1602 unbundle = remote.capable('unbundle')
1603 1603 if not unbundle:
1604 1604 lock = remote.lock()
1605 1605 try:
1606 1606 # get local lock as we might write phase data
1607 1607 locallock = self.lock()
1608 1608 try:
1609 cg, remote_heads, fut = discovery.prepush(self, remote, force,
1610 revs, newbranch)
1611 ret = remote_heads
1612 # create a callback for addchangegroup.
1613 # If will be used branch of the conditionnal too.
1614 if cg is not None:
1609 # discovery
1610 fci = discovery.findcommonincoming
1611 commoninc = fci(self, remote, force=force)
1612 common, inc, remoteheads = commoninc
1613 fco = discovery.findcommonoutgoing
1614 outgoing = fco(self, remote, onlyheads=revs,
1615 commoninc=commoninc, force=force)
1616
1617
1618 if not outgoing.missing:
1619 # nothing to push
1620 if outgoing.excluded:
1621 msg = "no changes to push but %i secret changesets\n"
1622 self.ui.status(_(msg) % len(outgoing.excluded))
1623 else:
1624 self.ui.status(_("no changes found\n"))
1625 fut = outgoing.common
1626 ret = 1
1627 else:
1628 # something to push
1629 if not force:
1630 discovery.checkheads(self, remote, outgoing,
1631 remoteheads, newbranch)
1632
1633 # create a changegroup from local
1634 if revs is None and not outgoing.excluded:
1635 # push everything,
1636 # use the fast path, no race possible on push
1637 cg = self._changegroup(outgoing.missing, 'push')
1638 else:
1639 cg = self.getlocalbundle('push', outgoing)
1640
1641 # apply changegroup to remote
1615 1642 if unbundle:
1616 1643 # local repo finds heads on server, finds out what
1617 1644 # revs it must push. once revs transferred, if server
1618 1645 # finds it has different heads (someone else won
1619 1646 # commit/push race), server aborts.
1620 1647 if force:
1621 remote_heads = ['force']
1648 remoteheads = ['force']
1622 1649 # ssh: return remote's addchangegroup()
1623 1650 # http: return remote's addchangegroup() or 0 for error
1624 ret = remote.unbundle(cg, remote_heads, 'push')
1651 ret = remote.unbundle(cg, remoteheads, 'push')
1625 1652 else:
1626 1653 # we return an integer indicating remote head count change
1627 1654 ret = remote.addchangegroup(cg, 'push', self.url())
1628 1655
1656 # compute what should be the now common
1657 #
1658 # XXX If push failed we should use strict common and not
1659 # future to avoid pushing phase data on unknown changeset.
1660 # This is to done later.
1661 fut = outgoing.commonheads + outgoing.missingheads
1629 1662 # even when we don't push, exchanging phase data is useful
1630 1663 remotephases = remote.listkeys('phases')
1631 1664 if not remotephases: # old server or public only repo
1632 1665 phases.advanceboundary(self, phases.public, fut)
1633 1666 # don't push any phase data as there is nothing to push
1634 1667 else:
1635 1668 ana = phases.analyzeremotephases(self, fut, remotephases)
1636 1669 pheads, droots = ana
1637 1670 ### Apply remote phase on local
1638 1671 if remotephases.get('publishing', False):
1639 1672 phases.advanceboundary(self, phases.public, fut)
1640 1673 else: # publish = False
1641 1674 phases.advanceboundary(self, phases.public, pheads)
1642 1675 phases.advanceboundary(self, phases.draft, fut)
1643 1676 ### Apply local phase on remote
1644 #
1645 # XXX If push failed we should use strict common and not
1646 # future to avoid pushing phase data on unknown changeset.
1647 # This is to done later.
1648 1677
1649 1678 # Get the list of all revs draft on remote by public here.
1650 1679 # XXX Beware that revset break if droots is not strictly
1651 1680 # XXX root we may want to ensure it is but it is costly
1652 1681 outdated = self.set('heads((%ln::%ln) and public())',
1653 1682 droots, fut)
1654 1683 for newremotehead in outdated:
1655 1684 r = remote.pushkey('phases',
1656 1685 newremotehead.hex(),
1657 1686 str(phases.draft),
1658 1687 str(phases.public))
1659 1688 if not r:
1660 1689 self.ui.warn(_('updating %s to public failed!\n')
1661 1690 % newremotehead)
1662 1691 finally:
1663 1692 locallock.release()
1664 1693 finally:
1665 1694 if lock is not None:
1666 1695 lock.release()
1667 1696
1668 1697 self.ui.debug("checking for updated bookmarks\n")
1669 1698 rb = remote.listkeys('bookmarks')
1670 1699 for k in rb.keys():
1671 1700 if k in self._bookmarks:
1672 1701 nr, nl = rb[k], hex(self._bookmarks[k])
1673 1702 if nr in self:
1674 1703 cr = self[nr]
1675 1704 cl = self[nl]
1676 1705 if cl in cr.descendants():
1677 1706 r = remote.pushkey('bookmarks', k, nr, nl)
1678 1707 if r:
1679 1708 self.ui.status(_("updating bookmark %s\n") % k)
1680 1709 else:
1681 1710 self.ui.warn(_('updating bookmark %s'
1682 1711 ' failed!\n') % k)
1683 1712
1684 1713 return ret
1685 1714
1686 1715 def changegroupinfo(self, nodes, source):
1687 1716 if self.ui.verbose or source == 'bundle':
1688 1717 self.ui.status(_("%d changesets found\n") % len(nodes))
1689 1718 if self.ui.debugflag:
1690 1719 self.ui.debug("list of changesets:\n")
1691 1720 for node in nodes:
1692 1721 self.ui.debug("%s\n" % hex(node))
1693 1722
1694 1723 def changegroupsubset(self, bases, heads, source):
1695 1724 """Compute a changegroup consisting of all the nodes that are
1696 1725 descendants of any of the bases and ancestors of any of the heads.
1697 1726 Return a chunkbuffer object whose read() method will return
1698 1727 successive changegroup chunks.
1699 1728
1700 1729 It is fairly complex as determining which filenodes and which
1701 1730 manifest nodes need to be included for the changeset to be complete
1702 1731 is non-trivial.
1703 1732
1704 1733 Another wrinkle is doing the reverse, figuring out which changeset in
1705 1734 the changegroup a particular filenode or manifestnode belongs to.
1706 1735 """
1707 1736 cl = self.changelog
1708 1737 if not bases:
1709 1738 bases = [nullid]
1710 1739 csets, bases, heads = cl.nodesbetween(bases, heads)
1711 1740 # We assume that all ancestors of bases are known
1712 1741 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1713 1742 return self._changegroupsubset(common, csets, heads, source)
1714 1743
1715 1744 def getlocalbundle(self, source, outgoing):
1716 1745 """Like getbundle, but taking a discovery.outgoing as an argument.
1717 1746
1718 1747 This is only implemented for local repos and reuses potentially
1719 1748 precomputed sets in outgoing."""
1720 1749 if not outgoing.missing:
1721 1750 return None
1722 1751 return self._changegroupsubset(outgoing.common,
1723 1752 outgoing.missing,
1724 1753 outgoing.missingheads,
1725 1754 source)
1726 1755
1727 1756 def getbundle(self, source, heads=None, common=None):
1728 1757 """Like changegroupsubset, but returns the set difference between the
1729 1758 ancestors of heads and the ancestors common.
1730 1759
1731 1760 If heads is None, use the local heads. If common is None, use [nullid].
1732 1761
1733 1762 The nodes in common might not all be known locally due to the way the
1734 1763 current discovery protocol works.
1735 1764 """
1736 1765 cl = self.changelog
1737 1766 if common:
1738 1767 nm = cl.nodemap
1739 1768 common = [n for n in common if n in nm]
1740 1769 else:
1741 1770 common = [nullid]
1742 1771 if not heads:
1743 1772 heads = cl.heads()
1744 1773 return self.getlocalbundle(source,
1745 1774 discovery.outgoing(cl, common, heads))
1746 1775
1747 1776 def _changegroupsubset(self, commonrevs, csets, heads, source):
1748 1777
1749 1778 cl = self.changelog
1750 1779 mf = self.manifest
1751 1780 mfs = {} # needed manifests
1752 1781 fnodes = {} # needed file nodes
1753 1782 changedfiles = set()
1754 1783 fstate = ['', {}]
1755 1784 count = [0]
1756 1785
1757 1786 # can we go through the fast path ?
1758 1787 heads.sort()
1759 1788 if heads == sorted(self.heads()):
1760 1789 return self._changegroup(csets, source)
1761 1790
1762 1791 # slow path
1763 1792 self.hook('preoutgoing', throw=True, source=source)
1764 1793 self.changegroupinfo(csets, source)
1765 1794
1766 1795 # filter any nodes that claim to be part of the known set
1767 1796 def prune(revlog, missing):
1768 1797 return [n for n in missing
1769 1798 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1770 1799
1771 1800 def lookup(revlog, x):
1772 1801 if revlog == cl:
1773 1802 c = cl.read(x)
1774 1803 changedfiles.update(c[3])
1775 1804 mfs.setdefault(c[0], x)
1776 1805 count[0] += 1
1777 1806 self.ui.progress(_('bundling'), count[0],
1778 1807 unit=_('changesets'), total=len(csets))
1779 1808 return x
1780 1809 elif revlog == mf:
1781 1810 clnode = mfs[x]
1782 1811 mdata = mf.readfast(x)
1783 1812 for f in changedfiles:
1784 1813 if f in mdata:
1785 1814 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1786 1815 count[0] += 1
1787 1816 self.ui.progress(_('bundling'), count[0],
1788 1817 unit=_('manifests'), total=len(mfs))
1789 1818 return mfs[x]
1790 1819 else:
1791 1820 self.ui.progress(
1792 1821 _('bundling'), count[0], item=fstate[0],
1793 1822 unit=_('files'), total=len(changedfiles))
1794 1823 return fstate[1][x]
1795 1824
1796 1825 bundler = changegroup.bundle10(lookup)
1797 1826 reorder = self.ui.config('bundle', 'reorder', 'auto')
1798 1827 if reorder == 'auto':
1799 1828 reorder = None
1800 1829 else:
1801 1830 reorder = util.parsebool(reorder)
1802 1831
1803 1832 def gengroup():
1804 1833 # Create a changenode group generator that will call our functions
1805 1834 # back to lookup the owning changenode and collect information.
1806 1835 for chunk in cl.group(csets, bundler, reorder=reorder):
1807 1836 yield chunk
1808 1837 self.ui.progress(_('bundling'), None)
1809 1838
1810 1839 # Create a generator for the manifestnodes that calls our lookup
1811 1840 # and data collection functions back.
1812 1841 count[0] = 0
1813 1842 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1814 1843 yield chunk
1815 1844 self.ui.progress(_('bundling'), None)
1816 1845
1817 1846 mfs.clear()
1818 1847
1819 1848 # Go through all our files in order sorted by name.
1820 1849 count[0] = 0
1821 1850 for fname in sorted(changedfiles):
1822 1851 filerevlog = self.file(fname)
1823 1852 if not len(filerevlog):
1824 1853 raise util.Abort(_("empty or missing revlog for %s") % fname)
1825 1854 fstate[0] = fname
1826 1855 fstate[1] = fnodes.pop(fname, {})
1827 1856
1828 1857 nodelist = prune(filerevlog, fstate[1])
1829 1858 if nodelist:
1830 1859 count[0] += 1
1831 1860 yield bundler.fileheader(fname)
1832 1861 for chunk in filerevlog.group(nodelist, bundler, reorder):
1833 1862 yield chunk
1834 1863
1835 1864 # Signal that no more groups are left.
1836 1865 yield bundler.close()
1837 1866 self.ui.progress(_('bundling'), None)
1838 1867
1839 1868 if csets:
1840 1869 self.hook('outgoing', node=hex(csets[0]), source=source)
1841 1870
1842 1871 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1843 1872
1844 1873 def changegroup(self, basenodes, source):
1845 1874 # to avoid a race we use changegroupsubset() (issue1320)
1846 1875 return self.changegroupsubset(basenodes, self.heads(), source)
1847 1876
1848 1877 def _changegroup(self, nodes, source):
1849 1878 """Compute the changegroup of all nodes that we have that a recipient
1850 1879 doesn't. Return a chunkbuffer object whose read() method will return
1851 1880 successive changegroup chunks.
1852 1881
1853 1882 This is much easier than the previous function as we can assume that
1854 1883 the recipient has any changenode we aren't sending them.
1855 1884
1856 1885 nodes is the set of nodes to send"""
1857 1886
1858 1887 cl = self.changelog
1859 1888 mf = self.manifest
1860 1889 mfs = {}
1861 1890 changedfiles = set()
1862 1891 fstate = ['']
1863 1892 count = [0]
1864 1893
1865 1894 self.hook('preoutgoing', throw=True, source=source)
1866 1895 self.changegroupinfo(nodes, source)
1867 1896
1868 1897 revset = set([cl.rev(n) for n in nodes])
1869 1898
1870 1899 def gennodelst(log):
1871 1900 return [log.node(r) for r in log if log.linkrev(r) in revset]
1872 1901
1873 1902 def lookup(revlog, x):
1874 1903 if revlog == cl:
1875 1904 c = cl.read(x)
1876 1905 changedfiles.update(c[3])
1877 1906 mfs.setdefault(c[0], x)
1878 1907 count[0] += 1
1879 1908 self.ui.progress(_('bundling'), count[0],
1880 1909 unit=_('changesets'), total=len(nodes))
1881 1910 return x
1882 1911 elif revlog == mf:
1883 1912 count[0] += 1
1884 1913 self.ui.progress(_('bundling'), count[0],
1885 1914 unit=_('manifests'), total=len(mfs))
1886 1915 return cl.node(revlog.linkrev(revlog.rev(x)))
1887 1916 else:
1888 1917 self.ui.progress(
1889 1918 _('bundling'), count[0], item=fstate[0],
1890 1919 total=len(changedfiles), unit=_('files'))
1891 1920 return cl.node(revlog.linkrev(revlog.rev(x)))
1892 1921
1893 1922 bundler = changegroup.bundle10(lookup)
1894 1923 reorder = self.ui.config('bundle', 'reorder', 'auto')
1895 1924 if reorder == 'auto':
1896 1925 reorder = None
1897 1926 else:
1898 1927 reorder = util.parsebool(reorder)
1899 1928
1900 1929 def gengroup():
1901 1930 '''yield a sequence of changegroup chunks (strings)'''
1902 1931 # construct a list of all changed files
1903 1932
1904 1933 for chunk in cl.group(nodes, bundler, reorder=reorder):
1905 1934 yield chunk
1906 1935 self.ui.progress(_('bundling'), None)
1907 1936
1908 1937 count[0] = 0
1909 1938 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1910 1939 yield chunk
1911 1940 self.ui.progress(_('bundling'), None)
1912 1941
1913 1942 count[0] = 0
1914 1943 for fname in sorted(changedfiles):
1915 1944 filerevlog = self.file(fname)
1916 1945 if not len(filerevlog):
1917 1946 raise util.Abort(_("empty or missing revlog for %s") % fname)
1918 1947 fstate[0] = fname
1919 1948 nodelist = gennodelst(filerevlog)
1920 1949 if nodelist:
1921 1950 count[0] += 1
1922 1951 yield bundler.fileheader(fname)
1923 1952 for chunk in filerevlog.group(nodelist, bundler, reorder):
1924 1953 yield chunk
1925 1954 yield bundler.close()
1926 1955 self.ui.progress(_('bundling'), None)
1927 1956
1928 1957 if nodes:
1929 1958 self.hook('outgoing', node=hex(nodes[0]), source=source)
1930 1959
1931 1960 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1932 1961
1933 1962 def addchangegroup(self, source, srctype, url, emptyok=False):
1934 1963 """Add the changegroup returned by source.read() to this repo.
1935 1964 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1936 1965 the URL of the repo where this changegroup is coming from.
1937 1966
1938 1967 Return an integer summarizing the change to this repo:
1939 1968 - nothing changed or no source: 0
1940 1969 - more heads than before: 1+added heads (2..n)
1941 1970 - fewer heads than before: -1-removed heads (-2..-n)
1942 1971 - number of heads stays the same: 1
1943 1972 """
1944 1973 def csmap(x):
1945 1974 self.ui.debug("add changeset %s\n" % short(x))
1946 1975 return len(cl)
1947 1976
1948 1977 def revmap(x):
1949 1978 return cl.rev(x)
1950 1979
1951 1980 if not source:
1952 1981 return 0
1953 1982
1954 1983 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1955 1984
1956 1985 changesets = files = revisions = 0
1957 1986 efiles = set()
1958 1987
1959 1988 # write changelog data to temp files so concurrent readers will not see
1960 1989 # inconsistent view
1961 1990 cl = self.changelog
1962 1991 cl.delayupdate()
1963 1992 oldheads = cl.heads()
1964 1993
1965 1994 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1966 1995 try:
1967 1996 trp = weakref.proxy(tr)
1968 1997 # pull off the changeset group
1969 1998 self.ui.status(_("adding changesets\n"))
1970 1999 clstart = len(cl)
1971 2000 class prog(object):
1972 2001 step = _('changesets')
1973 2002 count = 1
1974 2003 ui = self.ui
1975 2004 total = None
1976 2005 def __call__(self):
1977 2006 self.ui.progress(self.step, self.count, unit=_('chunks'),
1978 2007 total=self.total)
1979 2008 self.count += 1
1980 2009 pr = prog()
1981 2010 source.callback = pr
1982 2011
1983 2012 source.changelogheader()
1984 2013 srccontent = cl.addgroup(source, csmap, trp)
1985 2014 if not (srccontent or emptyok):
1986 2015 raise util.Abort(_("received changelog group is empty"))
1987 2016 clend = len(cl)
1988 2017 changesets = clend - clstart
1989 2018 for c in xrange(clstart, clend):
1990 2019 efiles.update(self[c].files())
1991 2020 efiles = len(efiles)
1992 2021 self.ui.progress(_('changesets'), None)
1993 2022
1994 2023 # pull off the manifest group
1995 2024 self.ui.status(_("adding manifests\n"))
1996 2025 pr.step = _('manifests')
1997 2026 pr.count = 1
1998 2027 pr.total = changesets # manifests <= changesets
1999 2028 # no need to check for empty manifest group here:
2000 2029 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2001 2030 # no new manifest will be created and the manifest group will
2002 2031 # be empty during the pull
2003 2032 source.manifestheader()
2004 2033 self.manifest.addgroup(source, revmap, trp)
2005 2034 self.ui.progress(_('manifests'), None)
2006 2035
2007 2036 needfiles = {}
2008 2037 if self.ui.configbool('server', 'validate', default=False):
2009 2038 # validate incoming csets have their manifests
2010 2039 for cset in xrange(clstart, clend):
2011 2040 mfest = self.changelog.read(self.changelog.node(cset))[0]
2012 2041 mfest = self.manifest.readdelta(mfest)
2013 2042 # store file nodes we must see
2014 2043 for f, n in mfest.iteritems():
2015 2044 needfiles.setdefault(f, set()).add(n)
2016 2045
2017 2046 # process the files
2018 2047 self.ui.status(_("adding file changes\n"))
2019 2048 pr.step = _('files')
2020 2049 pr.count = 1
2021 2050 pr.total = efiles
2022 2051 source.callback = None
2023 2052
2024 2053 while True:
2025 2054 chunkdata = source.filelogheader()
2026 2055 if not chunkdata:
2027 2056 break
2028 2057 f = chunkdata["filename"]
2029 2058 self.ui.debug("adding %s revisions\n" % f)
2030 2059 pr()
2031 2060 fl = self.file(f)
2032 2061 o = len(fl)
2033 2062 if not fl.addgroup(source, revmap, trp):
2034 2063 raise util.Abort(_("received file revlog group is empty"))
2035 2064 revisions += len(fl) - o
2036 2065 files += 1
2037 2066 if f in needfiles:
2038 2067 needs = needfiles[f]
2039 2068 for new in xrange(o, len(fl)):
2040 2069 n = fl.node(new)
2041 2070 if n in needs:
2042 2071 needs.remove(n)
2043 2072 if not needs:
2044 2073 del needfiles[f]
2045 2074 self.ui.progress(_('files'), None)
2046 2075
2047 2076 for f, needs in needfiles.iteritems():
2048 2077 fl = self.file(f)
2049 2078 for n in needs:
2050 2079 try:
2051 2080 fl.rev(n)
2052 2081 except error.LookupError:
2053 2082 raise util.Abort(
2054 2083 _('missing file data for %s:%s - run hg verify') %
2055 2084 (f, hex(n)))
2056 2085
2057 2086 dh = 0
2058 2087 if oldheads:
2059 2088 heads = cl.heads()
2060 2089 dh = len(heads) - len(oldheads)
2061 2090 for h in heads:
2062 2091 if h not in oldheads and 'close' in self[h].extra():
2063 2092 dh -= 1
2064 2093 htext = ""
2065 2094 if dh:
2066 2095 htext = _(" (%+d heads)") % dh
2067 2096
2068 2097 self.ui.status(_("added %d changesets"
2069 2098 " with %d changes to %d files%s\n")
2070 2099 % (changesets, revisions, files, htext))
2071 2100
2072 2101 if changesets > 0:
2073 2102 p = lambda: cl.writepending() and self.root or ""
2074 2103 self.hook('pretxnchangegroup', throw=True,
2075 2104 node=hex(cl.node(clstart)), source=srctype,
2076 2105 url=url, pending=p)
2077 2106
2078 2107 added = [cl.node(r) for r in xrange(clstart, clend)]
2079 2108 publishing = self.ui.configbool('phases', 'publish', True)
2080 2109 if srctype == 'push':
2081 2110 # Old server can not push the boundary themself.
2082 2111 # New server won't push the boundary if changeset already
2083 2112 # existed locally as secrete
2084 2113 #
2085 2114 # We should not use added here but the list of all change in
2086 2115 # the bundle
2087 2116 if publishing:
2088 2117 phases.advanceboundary(self, phases.public, srccontent)
2089 2118 else:
2090 2119 phases.advanceboundary(self, phases.draft, srccontent)
2091 2120 phases.retractboundary(self, phases.draft, added)
2092 2121 elif srctype != 'strip':
2093 2122 # publishing only alter behavior during push
2094 2123 #
2095 2124 # strip should not touch boundary at all
2096 2125 phases.retractboundary(self, phases.draft, added)
2097 2126
2098 2127 # make changelog see real files again
2099 2128 cl.finalize(trp)
2100 2129
2101 2130 tr.close()
2102 2131
2103 2132 if changesets > 0:
2104 2133 def runhooks():
2105 2134 # forcefully update the on-disk branch cache
2106 2135 self.ui.debug("updating the branch cache\n")
2107 2136 self.updatebranchcache()
2108 2137 self.hook("changegroup", node=hex(cl.node(clstart)),
2109 2138 source=srctype, url=url)
2110 2139
2111 2140 for n in added:
2112 2141 self.hook("incoming", node=hex(n), source=srctype,
2113 2142 url=url)
2114 2143 self._afterlock(runhooks)
2115 2144
2116 2145 finally:
2117 2146 tr.release()
2118 2147 # never return 0 here:
2119 2148 if dh < 0:
2120 2149 return dh - 1
2121 2150 else:
2122 2151 return dh + 1
2123 2152
2124 2153 def stream_in(self, remote, requirements):
2125 2154 lock = self.lock()
2126 2155 try:
2127 2156 fp = remote.stream_out()
2128 2157 l = fp.readline()
2129 2158 try:
2130 2159 resp = int(l)
2131 2160 except ValueError:
2132 2161 raise error.ResponseError(
2133 2162 _('Unexpected response from remote server:'), l)
2134 2163 if resp == 1:
2135 2164 raise util.Abort(_('operation forbidden by server'))
2136 2165 elif resp == 2:
2137 2166 raise util.Abort(_('locking the remote repository failed'))
2138 2167 elif resp != 0:
2139 2168 raise util.Abort(_('the server sent an unknown error code'))
2140 2169 self.ui.status(_('streaming all changes\n'))
2141 2170 l = fp.readline()
2142 2171 try:
2143 2172 total_files, total_bytes = map(int, l.split(' ', 1))
2144 2173 except (ValueError, TypeError):
2145 2174 raise error.ResponseError(
2146 2175 _('Unexpected response from remote server:'), l)
2147 2176 self.ui.status(_('%d files to transfer, %s of data\n') %
2148 2177 (total_files, util.bytecount(total_bytes)))
2149 2178 start = time.time()
2150 2179 for i in xrange(total_files):
2151 2180 # XXX doesn't support '\n' or '\r' in filenames
2152 2181 l = fp.readline()
2153 2182 try:
2154 2183 name, size = l.split('\0', 1)
2155 2184 size = int(size)
2156 2185 except (ValueError, TypeError):
2157 2186 raise error.ResponseError(
2158 2187 _('Unexpected response from remote server:'), l)
2159 2188 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2160 2189 # for backwards compat, name was partially encoded
2161 2190 ofp = self.sopener(store.decodedir(name), 'w')
2162 2191 for chunk in util.filechunkiter(fp, limit=size):
2163 2192 ofp.write(chunk)
2164 2193 ofp.close()
2165 2194 elapsed = time.time() - start
2166 2195 if elapsed <= 0:
2167 2196 elapsed = 0.001
2168 2197 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2169 2198 (util.bytecount(total_bytes), elapsed,
2170 2199 util.bytecount(total_bytes / elapsed)))
2171 2200
2172 2201 # new requirements = old non-format requirements + new format-related
2173 2202 # requirements from the streamed-in repository
2174 2203 requirements.update(set(self.requirements) - self.supportedformats)
2175 2204 self._applyrequirements(requirements)
2176 2205 self._writerequirements()
2177 2206
2178 2207 self.invalidate()
2179 2208 return len(self.heads()) + 1
2180 2209 finally:
2181 2210 lock.release()
2182 2211
2183 2212 def clone(self, remote, heads=[], stream=False):
2184 2213 '''clone remote repository.
2185 2214
2186 2215 keyword arguments:
2187 2216 heads: list of revs to clone (forces use of pull)
2188 2217 stream: use streaming clone if possible'''
2189 2218
2190 2219 # now, all clients that can request uncompressed clones can
2191 2220 # read repo formats supported by all servers that can serve
2192 2221 # them.
2193 2222
2194 2223 # if revlog format changes, client will have to check version
2195 2224 # and format flags on "stream" capability, and use
2196 2225 # uncompressed only if compatible.
2197 2226
2198 2227 if stream and not heads:
2199 2228 # 'stream' means remote revlog format is revlogv1 only
2200 2229 if remote.capable('stream'):
2201 2230 return self.stream_in(remote, set(('revlogv1',)))
2202 2231 # otherwise, 'streamreqs' contains the remote revlog format
2203 2232 streamreqs = remote.capable('streamreqs')
2204 2233 if streamreqs:
2205 2234 streamreqs = set(streamreqs.split(','))
2206 2235 # if we support it, stream in and adjust our requirements
2207 2236 if not streamreqs - self.supportedformats:
2208 2237 return self.stream_in(remote, streamreqs)
2209 2238 return self.pull(remote, heads)
2210 2239
2211 2240 def pushkey(self, namespace, key, old, new):
2212 2241 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2213 2242 old=old, new=new)
2214 2243 ret = pushkey.push(self, namespace, key, old, new)
2215 2244 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2216 2245 ret=ret)
2217 2246 return ret
2218 2247
2219 2248 def listkeys(self, namespace):
2220 2249 self.hook('prelistkeys', throw=True, namespace=namespace)
2221 2250 values = pushkey.list(self, namespace)
2222 2251 self.hook('listkeys', namespace=namespace, values=values)
2223 2252 return values
2224 2253
2225 2254 def debugwireargs(self, one, two, three=None, four=None, five=None):
2226 2255 '''used to test argument passing over the wire'''
2227 2256 return "%s %s %s %s %s" % (one, two, three, four, five)
2228 2257
2229 2258 def savecommitmessage(self, text):
2230 2259 fp = self.opener('last-message.txt', 'wb')
2231 2260 try:
2232 2261 fp.write(text)
2233 2262 finally:
2234 2263 fp.close()
2235 2264 return self.pathto(fp.name[len(self.root)+1:])
2236 2265
2237 2266 # used to avoid circular references so destructors work
2238 2267 def aftertrans(files):
2239 2268 renamefiles = [tuple(t) for t in files]
2240 2269 def a():
2241 2270 for src, dest in renamefiles:
2242 2271 util.rename(src, dest)
2243 2272 return a
2244 2273
2245 2274 def undoname(fn):
2246 2275 base, name = os.path.split(fn)
2247 2276 assert name.startswith('journal')
2248 2277 return os.path.join(base, name.replace('journal', 'undo', 1))
2249 2278
2250 2279 def instance(ui, path, create):
2251 2280 return localrepository(ui, util.urllocalpath(path), create)
2252 2281
2253 2282 def islocal(path):
2254 2283 return True
@@ -1,663 +1,660 b''
1 1 $ check_code="$TESTDIR"/../contrib/check-code.py
2 2 $ cd "$TESTDIR"/..
3 3
4 4 $ "$check_code" `hg manifest` || echo 'FAILURE IS NOT AN OPTION!!!'
5 5
6 6 $ "$check_code" --warnings --nolineno --per-file=0 `hg manifest`
7 7 contrib/check-code.py:0:
8 8 > # (r'^\s+[^_ \n][^_. \n]+_[^_\n]+\s*=', "don't use underbars in identifiers"),
9 9 warning: line over 80 characters
10 10 contrib/perf.py:0:
11 11 > except:
12 12 warning: naked except clause
13 13 contrib/perf.py:0:
14 14 > #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False, False))))
15 15 warning: line over 80 characters
16 16 contrib/perf.py:0:
17 17 > except:
18 18 warning: naked except clause
19 19 contrib/setup3k.py:0:
20 20 > except:
21 21 warning: naked except clause
22 22 contrib/setup3k.py:0:
23 23 > except:
24 24 warning: naked except clause
25 25 contrib/setup3k.py:0:
26 26 > except:
27 27 warning: naked except clause
28 28 warning: naked except clause
29 29 warning: naked except clause
30 30 contrib/shrink-revlog.py:0:
31 31 > '(You can delete those files when you are satisfied that your\n'
32 32 warning: line over 80 characters
33 33 contrib/shrink-revlog.py:0:
34 34 > ('', 'sort', 'reversepostorder', 'name of sort algorithm to use'),
35 35 warning: line over 80 characters
36 36 contrib/shrink-revlog.py:0:
37 37 > [('', 'revlog', '', _('index (.i) file of the revlog to shrink')),
38 38 warning: line over 80 characters
39 39 contrib/shrink-revlog.py:0:
40 40 > except:
41 41 warning: naked except clause
42 42 doc/gendoc.py:0:
43 43 > "together with Mercurial. Help for other extensions is available "
44 44 warning: line over 80 characters
45 45 hgext/bugzilla.py:0:
46 46 > raise util.Abort(_('cannot find bugzilla user id for %s or %s') %
47 47 warning: line over 80 characters
48 48 hgext/bugzilla.py:0:
49 49 > bzdir = self.ui.config('bugzilla', 'bzdir', '/var/www/html/bugzilla')
50 50 warning: line over 80 characters
51 51 hgext/convert/__init__.py:0:
52 52 > ('', 'ancestors', '', _('show current changeset in ancestor branches')),
53 53 warning: line over 80 characters
54 54 hgext/convert/bzr.py:0:
55 55 > except:
56 56 warning: naked except clause
57 57 hgext/convert/common.py:0:
58 58 > except:
59 59 warning: naked except clause
60 60 hgext/convert/common.py:0:
61 61 > except:
62 62 warning: naked except clause
63 63 warning: naked except clause
64 64 hgext/convert/convcmd.py:0:
65 65 > except:
66 66 warning: naked except clause
67 67 hgext/convert/cvs.py:0:
68 68 > # /1 :pserver:user@example.com:2401/cvsroot/foo Ah<Z
69 69 warning: line over 80 characters
70 70 hgext/convert/cvsps.py:0:
71 71 > assert len(branches) == 1, 'unknown branch: %s' % e.mergepoint
72 72 warning: line over 80 characters
73 73 hgext/convert/cvsps.py:0:
74 74 > ui.write('Ancestors: %s\n' % (','.join(r)))
75 75 warning: unwrapped ui message
76 76 hgext/convert/cvsps.py:0:
77 77 > ui.write('Parent: %d\n' % cs.parents[0].id)
78 78 warning: unwrapped ui message
79 79 hgext/convert/cvsps.py:0:
80 80 > ui.write('Parents: %s\n' %
81 81 warning: unwrapped ui message
82 82 hgext/convert/cvsps.py:0:
83 83 > except:
84 84 warning: naked except clause
85 85 hgext/convert/cvsps.py:0:
86 86 > ui.write('Branchpoints: %s \n' % ', '.join(branchpoints))
87 87 warning: unwrapped ui message
88 88 hgext/convert/cvsps.py:0:
89 89 > ui.write('Author: %s\n' % cs.author)
90 90 warning: unwrapped ui message
91 91 hgext/convert/cvsps.py:0:
92 92 > ui.write('Branch: %s\n' % (cs.branch or 'HEAD'))
93 93 warning: unwrapped ui message
94 94 hgext/convert/cvsps.py:0:
95 95 > ui.write('Date: %s\n' % util.datestr(cs.date,
96 96 warning: unwrapped ui message
97 97 hgext/convert/cvsps.py:0:
98 98 > ui.write('Log:\n')
99 99 warning: unwrapped ui message
100 100 hgext/convert/cvsps.py:0:
101 101 > ui.write('Members: \n')
102 102 warning: unwrapped ui message
103 103 hgext/convert/cvsps.py:0:
104 104 > ui.write('PatchSet %d \n' % cs.id)
105 105 warning: unwrapped ui message
106 106 hgext/convert/cvsps.py:0:
107 107 > ui.write('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1],
108 108 warning: unwrapped ui message
109 109 hgext/convert/git.py:0:
110 110 > except:
111 111 warning: naked except clause
112 112 hgext/convert/git.py:0:
113 113 > fh = self.gitopen('git diff-tree --name-only --root -r %s "%s^%s" --'
114 114 warning: line over 80 characters
115 115 hgext/convert/hg.py:0:
116 116 > # detect missing revlogs and abort on errors or populate self.ignored
117 117 warning: line over 80 characters
118 118 hgext/convert/hg.py:0:
119 119 > except:
120 120 warning: naked except clause
121 121 warning: naked except clause
122 122 hgext/convert/hg.py:0:
123 123 > except:
124 124 warning: naked except clause
125 125 hgext/convert/monotone.py:0:
126 126 > except:
127 127 warning: naked except clause
128 128 hgext/convert/monotone.py:0:
129 129 > except:
130 130 warning: naked except clause
131 131 hgext/convert/subversion.py:0:
132 132 > raise util.Abort(_('svn: branch has no revision %s') % to_revnum)
133 133 warning: line over 80 characters
134 134 hgext/convert/subversion.py:0:
135 135 > except:
136 136 warning: naked except clause
137 137 hgext/convert/subversion.py:0:
138 138 > args = [self.baseurl, relpaths, start, end, limit, discover_changed_paths,
139 139 warning: line over 80 characters
140 140 hgext/convert/subversion.py:0:
141 141 > self.trunkname = self.ui.config('convert', 'svn.trunk', 'trunk').strip('/')
142 142 warning: line over 80 characters
143 143 hgext/convert/subversion.py:0:
144 144 > except:
145 145 warning: naked except clause
146 146 hgext/convert/subversion.py:0:
147 147 > def get_log_child(fp, url, paths, start, end, limit=0, discover_changed_paths=True,
148 148 warning: line over 80 characters
149 149 hgext/eol.py:0:
150 150 > if ui.configbool('eol', 'fix-trailing-newline', False) and s and s[-1] != '\n':
151 151 warning: line over 80 characters
152 152 warning: line over 80 characters
153 153 hgext/gpg.py:0:
154 154 > except:
155 155 warning: naked except clause
156 156 hgext/hgcia.py:0:
157 157 > except:
158 158 warning: naked except clause
159 159 hgext/hgk.py:0:
160 160 > ui.write("%s%s\n" % (prefix, description.replace('\n', nlprefix).strip()))
161 161 warning: line over 80 characters
162 162 hgext/hgk.py:0:
163 163 > ui.write("parent %s\n" % p)
164 164 warning: unwrapped ui message
165 165 hgext/hgk.py:0:
166 166 > ui.write('k=%s\nv=%s\n' % (name, value))
167 167 warning: unwrapped ui message
168 168 hgext/hgk.py:0:
169 169 > ui.write("author %s %s %s\n" % (ctx.user(), int(date[0]), date[1]))
170 170 warning: unwrapped ui message
171 171 hgext/hgk.py:0:
172 172 > ui.write("branch %s\n\n" % ctx.branch())
173 173 warning: unwrapped ui message
174 174 hgext/hgk.py:0:
175 175 > ui.write("committer %s %s %s\n" % (committer, int(date[0]), date[1]))
176 176 warning: unwrapped ui message
177 177 hgext/hgk.py:0:
178 178 > ui.write("revision %d\n" % ctx.rev())
179 179 warning: unwrapped ui message
180 180 hgext/hgk.py:0:
181 181 > ui.write("tree %s\n" % short(ctx.changeset()[0])) # use ctx.node() instead ??
182 182 warning: line over 80 characters
183 183 warning: unwrapped ui message
184 184 hgext/highlight/__init__.py:0:
185 185 > extensions.wrapfunction(webcommands, '_filerevision', filerevision_highlight)
186 186 warning: line over 80 characters
187 187 hgext/highlight/__init__.py:0:
188 188 > return ['/* pygments_style = %s */\n\n' % pg_style, fmter.get_style_defs('')]
189 189 warning: line over 80 characters
190 190 hgext/inotify/__init__.py:0:
191 191 > if self._inotifyon and not ignored and not subrepos and not self._dirty:
192 192 warning: line over 80 characters
193 193 hgext/inotify/server.py:0:
194 194 > except:
195 195 warning: naked except clause
196 196 hgext/inotify/server.py:0:
197 197 > except:
198 198 warning: naked except clause
199 199 hgext/keyword.py:0:
200 200 > ui.note("hg ci -m '%s'\n" % msg)
201 201 warning: unwrapped ui message
202 202 hgext/largefiles/overrides.py:0:
203 203 > # When we call orig below it creates the standins but we don't add them
204 204 warning: line over 80 characters
205 205 hgext/largefiles/reposetup.py:0:
206 206 > if os.path.exists(self.wjoin(lfutil.standin(lfile))):
207 207 warning: line over 80 characters
208 208 hgext/mq.py:0:
209 209 > raise util.Abort(_("%s does not have a parent recorded" % root))
210 210 warning: line over 80 characters
211 211 hgext/mq.py:0:
212 212 > raise util.Abort(_("cannot push --exact with applied patches"))
213 213 warning: line over 80 characters
214 214 hgext/mq.py:0:
215 215 > raise util.Abort(_("cannot use --exact and --move together"))
216 216 warning: line over 80 characters
217 217 hgext/mq.py:0:
218 218 > self.ui.warn(_('Tag %s overrides mq patch of the same name\n')
219 219 warning: line over 80 characters
220 220 hgext/mq.py:0:
221 221 > except:
222 222 warning: naked except clause
223 223 warning: naked except clause
224 224 hgext/mq.py:0:
225 225 > except:
226 226 warning: naked except clause
227 227 warning: naked except clause
228 228 warning: naked except clause
229 229 warning: naked except clause
230 230 hgext/mq.py:0:
231 231 > raise util.Abort(_('cannot mix -l/--list with options or arguments'))
232 232 warning: line over 80 characters
233 233 hgext/mq.py:0:
234 234 > raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
235 235 warning: line over 80 characters
236 236 hgext/mq.py:0:
237 237 > ('', 'move', None, _('reorder patch series and apply only the patch'))],
238 238 warning: line over 80 characters
239 239 hgext/mq.py:0:
240 240 > ('U', 'noupdate', None, _('do not update the new working directories')),
241 241 warning: line over 80 characters
242 242 hgext/mq.py:0:
243 243 > ('e', 'exact', None, _('apply the target patch to its recorded parent')),
244 244 warning: line over 80 characters
245 245 hgext/mq.py:0:
246 246 > except:
247 247 warning: naked except clause
248 248 hgext/mq.py:0:
249 249 > ui.write("mq: %s\n" % ', '.join(m))
250 250 warning: unwrapped ui message
251 251 hgext/mq.py:0:
252 252 > repo.mq.qseries(repo, missing=opts.get('missing'), summary=opts.get('summary'))
253 253 warning: line over 80 characters
254 254 hgext/notify.py:0:
255 255 > ui.note(_('notify: suppressing notification for merge %d:%s\n') %
256 256 warning: line over 80 characters
257 257 hgext/patchbomb.py:0:
258 258 > binnode, seqno=idx, total=total)
259 259 warning: line over 80 characters
260 260 hgext/patchbomb.py:0:
261 261 > except:
262 262 warning: naked except clause
263 263 hgext/patchbomb.py:0:
264 264 > ui.write('Subject: %s\n' % subj)
265 265 warning: unwrapped ui message
266 266 hgext/patchbomb.py:0:
267 267 > p = mail.mimetextpatch('\n'.join(patchlines), 'x-patch', opts.get('test'))
268 268 warning: line over 80 characters
269 269 hgext/patchbomb.py:0:
270 270 > ui.write('From: %s\n' % sender)
271 271 warning: unwrapped ui message
272 272 hgext/record.py:0:
273 273 > ignoreblanklines=opts.get('ignore_blank_lines'))
274 274 warning: line over 80 characters
275 275 hgext/record.py:0:
276 276 > ignorewsamount=opts.get('ignore_space_change'),
277 277 warning: line over 80 characters
278 278 hgext/zeroconf/__init__.py:0:
279 279 > publish(name, desc, path, util.getport(u.config("web", "port", 8000)))
280 280 warning: line over 80 characters
281 281 hgext/zeroconf/__init__.py:0:
282 282 > except:
283 283 warning: naked except clause
284 284 warning: naked except clause
285 285 mercurial/bundlerepo.py:0:
286 286 > is a bundlerepo for the obtained bundle when the original "other" is remote.
287 287 warning: line over 80 characters
288 288 mercurial/bundlerepo.py:0:
289 289 > "local" is a local repo from which to obtain the actual incoming changesets; it
290 290 warning: line over 80 characters
291 291 mercurial/bundlerepo.py:0:
292 292 > tmp = discovery.findcommonincoming(repo, other, heads=onlyheads, force=force)
293 293 warning: line over 80 characters
294 294 mercurial/commands.py:0:
295 295 > " size " + basehdr + " link p1 p2 nodeid\n")
296 296 warning: line over 80 characters
297 297 mercurial/commands.py:0:
298 298 > raise util.Abort('cannot use localheads with old style discovery')
299 299 warning: line over 80 characters
300 300 mercurial/commands.py:0:
301 301 > ui.note('branch %s\n' % data)
302 302 warning: unwrapped ui message
303 303 mercurial/commands.py:0:
304 304 > ui.note('node %s\n' % str(data))
305 305 warning: unwrapped ui message
306 306 mercurial/commands.py:0:
307 307 > ui.note('tag %s\n' % name)
308 308 warning: unwrapped ui message
309 309 mercurial/commands.py:0:
310 310 > ui.write("unpruned common: %s\n" % " ".join([short(n)
311 311 warning: unwrapped ui message
312 312 mercurial/commands.py:0:
313 313 > yield 'n', (r, list(set(p for p in cl.parentrevs(r) if p != -1)))
314 314 warning: line over 80 characters
315 315 mercurial/commands.py:0:
316 316 > yield 'n', (r, list(set(p for p in rlog.parentrevs(r) if p != -1)))
317 317 warning: line over 80 characters
318 318 mercurial/commands.py:0:
319 319 > except:
320 320 warning: naked except clause
321 321 mercurial/commands.py:0:
322 322 > ui.status(_("(run 'hg heads .' to see heads, 'hg merge' to merge)\n"))
323 323 warning: line over 80 characters
324 324 mercurial/commands.py:0:
325 325 > ui.write("format: id, p1, p2, cset, delta base, len(delta)\n")
326 326 warning: unwrapped ui message
327 327 mercurial/commands.py:0:
328 328 > ui.write("local is subset\n")
329 329 warning: unwrapped ui message
330 330 mercurial/commands.py:0:
331 331 > ui.write("remote is subset\n")
332 332 warning: unwrapped ui message
333 333 mercurial/commands.py:0:
334 334 > ui.write(' other : ' + fmt2 % pcfmt(numoprev, numprev))
335 335 warning: line over 80 characters
336 336 mercurial/commands.py:0:
337 337 > ui.write(' where prev = p1 : ' + fmt2 % pcfmt(nump1prev, numprev))
338 338 warning: line over 80 characters
339 339 mercurial/commands.py:0:
340 340 > ui.write(' where prev = p2 : ' + fmt2 % pcfmt(nump2prev, numprev))
341 341 warning: line over 80 characters
342 342 mercurial/commands.py:0:
343 343 > ui.write('deltas against other : ' + fmt % pcfmt(numother, numdeltas))
344 344 warning: line over 80 characters
345 345 warning: unwrapped ui message
346 346 mercurial/commands.py:0:
347 347 > ui.write('deltas against p1 : ' + fmt % pcfmt(nump1, numdeltas))
348 348 warning: unwrapped ui message
349 349 mercurial/commands.py:0:
350 350 > ui.write('deltas against p2 : ' + fmt % pcfmt(nump2, numdeltas))
351 351 warning: unwrapped ui message
352 352 mercurial/commands.py:0:
353 353 > cmd, ext, mod = extensions.disabledcmd(ui, name, ui.config('ui', 'strict'))
354 354 warning: line over 80 characters
355 355 mercurial/commands.py:0:
356 356 > except:
357 357 warning: naked except clause
358 358 mercurial/commands.py:0:
359 359 > revs, checkout = hg.addbranchrevs(repo, other, branches, opts.get('rev'))
360 360 warning: line over 80 characters
361 361 mercurial/commands.py:0:
362 362 > ui.write("common heads: %s\n" % " ".join([short(n) for n in common]))
363 363 warning: unwrapped ui message
364 364 mercurial/commands.py:0:
365 365 > ui.write("match: %s\n" % m(d[0]))
366 366 warning: unwrapped ui message
367 367 mercurial/commands.py:0:
368 368 > ui.write('deltas against prev : ' + fmt % pcfmt(numprev, numdeltas))
369 369 warning: unwrapped ui message
370 370 mercurial/commands.py:0:
371 371 > ui.write('path %s\n' % k)
372 372 warning: unwrapped ui message
373 373 mercurial/commands.py:0:
374 374 > ui.write('uncompressed data size (min/max/avg) : %d / %d / %d\n'
375 375 warning: unwrapped ui message
376 376 mercurial/commands.py:0:
377 377 > Every ID must be a full-length hex node id string. Returns a list of 0s and 1s
378 378 warning: line over 80 characters
379 379 mercurial/commands.py:0:
380 380 > remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl), opts.get('branch'))
381 381 warning: line over 80 characters
382 382 mercurial/commands.py:0:
383 383 > ui.write("digraph G {\n")
384 384 warning: unwrapped ui message
385 385 mercurial/commands.py:0:
386 386 > ui.write("internal: %s %s\n" % d)
387 387 warning: unwrapped ui message
388 388 mercurial/commands.py:0:
389 389 > ui.write("standard: %s\n" % util.datestr(d))
390 390 warning: unwrapped ui message
391 391 mercurial/commands.py:0:
392 392 > ui.write('avg chain length : ' + fmt % avgchainlen)
393 393 warning: unwrapped ui message
394 394 mercurial/commands.py:0:
395 395 > ui.write('case-sensitive: %s\n' % (util.checkcase('.debugfsinfo')
396 396 warning: unwrapped ui message
397 397 mercurial/commands.py:0:
398 398 > ui.write('compression ratio : ' + fmt % compratio)
399 399 warning: unwrapped ui message
400 400 mercurial/commands.py:0:
401 401 > ui.write('delta size (min/max/avg) : %d / %d / %d\n'
402 402 warning: unwrapped ui message
403 403 mercurial/commands.py:0:
404 404 > ui.write('exec: %s\n' % (util.checkexec(path) and 'yes' or 'no'))
405 405 warning: unwrapped ui message
406 406 mercurial/commands.py:0:
407 407 > ui.write('flags : %s\n' % ', '.join(flags))
408 408 warning: unwrapped ui message
409 409 mercurial/commands.py:0:
410 410 > ui.write('format : %d\n' % format)
411 411 warning: unwrapped ui message
412 412 mercurial/commands.py:0:
413 413 > ui.write('full revision size (min/max/avg) : %d / %d / %d\n'
414 414 warning: unwrapped ui message
415 415 mercurial/commands.py:0:
416 416 > ui.write('revision size : ' + fmt2 % totalsize)
417 417 warning: unwrapped ui message
418 418 mercurial/commands.py:0:
419 419 > ui.write('revisions : ' + fmt2 % numrevs)
420 420 warning: unwrapped ui message
421 421 warning: unwrapped ui message
422 422 mercurial/commands.py:0:
423 423 > ui.write('symlink: %s\n' % (util.checklink(path) and 'yes' or 'no'))
424 424 warning: unwrapped ui message
425 425 mercurial/commandserver.py:0:
426 426 > # the ui here is really the repo ui so take its baseui so we don't end up
427 427 warning: line over 80 characters
428 428 mercurial/context.py:0:
429 429 > return self._manifestdelta[path], self._manifestdelta.flags(path)
430 430 warning: line over 80 characters
431 431 mercurial/dagparser.py:0:
432 432 > raise util.Abort(_("invalid character in dag description: %s...") % s)
433 433 warning: line over 80 characters
434 434 mercurial/dagparser.py:0:
435 435 > >>> dagtext([('n', (0, [-1])), ('C', 'my command line'), ('n', (1, [0]))])
436 436 warning: line over 80 characters
437 437 mercurial/dirstate.py:0:
438 438 > if not st is None and not getkind(st.st_mode) in (regkind, lnkkind):
439 439 warning: line over 80 characters
440 440 mercurial/discovery.py:0:
441 > repo.ui.note(_("new remote heads on branch '%s'\n") % branch)
442 warning: line over 80 characters
443 mercurial/discovery.py:0:
444 441 > If onlyheads is given, only nodes ancestral to nodes in onlyheads (inclusive)
445 442 warning: line over 80 characters
446 443 mercurial/discovery.py:0:
447 444 > def findcommonoutgoing(repo, other, onlyheads=None, force=False, commoninc=None):
448 445 warning: line over 80 characters
449 446 mercurial/dispatch.py:0:
450 447 > " (.hg not found)") % os.getcwd())
451 448 warning: line over 80 characters
452 449 mercurial/dispatch.py:0:
453 450 > aliases, entry = cmdutil.findcmd(cmd, cmdtable, lui.config("ui", "strict"))
454 451 warning: line over 80 characters
455 452 mercurial/dispatch.py:0:
456 453 > except:
457 454 warning: naked except clause
458 455 mercurial/dispatch.py:0:
459 456 > return lambda: runcommand(lui, None, cmd, args[:1], ui, options, d, [], {})
460 457 warning: line over 80 characters
461 458 mercurial/dispatch.py:0:
462 459 > def __init__(self, args, ui=None, repo=None, fin=None, fout=None, ferr=None):
463 460 warning: line over 80 characters
464 461 mercurial/dispatch.py:0:
465 462 > except:
466 463 warning: naked except clause
467 464 mercurial/hg.py:0:
468 465 > except:
469 466 warning: naked except clause
470 467 mercurial/hgweb/hgweb_mod.py:0:
471 468 > self.maxshortchanges = int(self.config("web", "maxshortchanges", 60))
472 469 warning: line over 80 characters
473 470 mercurial/keepalive.py:0:
474 471 > except:
475 472 warning: naked except clause
476 473 mercurial/keepalive.py:0:
477 474 > except:
478 475 warning: naked except clause
479 476 mercurial/localrepo.py:0:
480 477 > hint=_("use --subrepos for recursive commit"))
481 478 warning: line over 80 characters
482 479 mercurial/localrepo.py:0:
483 480 > # we return an integer indicating remote head count change
484 481 warning: line over 80 characters
485 482 mercurial/localrepo.py:0:
486 483 > raise util.Abort(_("empty or missing revlog for %s") % fname)
487 484 warning: line over 80 characters
488 485 warning: line over 80 characters
489 486 mercurial/localrepo.py:0:
490 487 > if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
491 488 warning: line over 80 characters
492 489 mercurial/localrepo.py:0:
493 490 > self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
494 491 warning: line over 80 characters
495 492 mercurial/localrepo.py:0:
496 493 > # new requirements = old non-format requirements + new format-related
497 494 warning: line over 80 characters
498 495 mercurial/localrepo.py:0:
499 496 > except:
500 497 warning: naked except clause
501 498 mercurial/localrepo.py:0:
502 499 > """return status of files between two nodes or node and working directory
503 500 warning: line over 80 characters
504 501 mercurial/localrepo.py:0:
505 502 > '''Returns a tagscache object that contains various tags related caches.'''
506 503 warning: line over 80 characters
507 504 mercurial/manifest.py:0:
508 505 > return "".join(struct.pack(">lll", start, end, len(content)) + content
509 506 warning: line over 80 characters
510 507 mercurial/merge.py:0:
511 508 > subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx), overwrite)
512 509 warning: line over 80 characters
513 510 mercurial/patch.py:0:
514 511 > modified, added, removed, copy, getfilectx, opts, losedata, prefix)
515 512 warning: line over 80 characters
516 513 mercurial/patch.py:0:
517 514 > diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b)
518 515 warning: line over 80 characters
519 516 mercurial/patch.py:0:
520 517 > output.append(_(' %d files changed, %d insertions(+), %d deletions(-)\n')
521 518 warning: line over 80 characters
522 519 mercurial/patch.py:0:
523 520 > except:
524 521 warning: naked except clause
525 522 mercurial/pure/base85.py:0:
526 523 > raise OverflowError('Base85 overflow in hunk starting at byte %d' % i)
527 524 warning: line over 80 characters
528 525 mercurial/pure/mpatch.py:0:
529 526 > frags.extend(reversed(new)) # what was left at the end
530 527 warning: line over 80 characters
531 528 mercurial/repair.py:0:
532 529 > except:
533 530 warning: naked except clause
534 531 mercurial/repair.py:0:
535 532 > except:
536 533 warning: naked except clause
537 534 mercurial/revset.py:0:
538 535 > elif c.isalnum() or c in '._' or ord(c) > 127: # gather up a symbol/keyword
539 536 warning: line over 80 characters
540 537 mercurial/revset.py:0:
541 538 > Changesets that are the Nth ancestor (first parents only) of a changeset in set.
542 539 warning: line over 80 characters
543 540 mercurial/scmutil.py:0:
544 541 > raise util.Abort(_("path '%s' is inside nested repo %r") %
545 542 warning: line over 80 characters
546 543 mercurial/scmutil.py:0:
547 544 > "requires features '%s' (upgrade Mercurial)") % "', '".join(missings))
548 545 warning: line over 80 characters
549 546 mercurial/scmutil.py:0:
550 547 > elif repo.dirstate[abs] != 'r' and (not good or not os.path.lexists(target)
551 548 warning: line over 80 characters
552 549 mercurial/setdiscovery.py:0:
553 550 > # treat remote heads (and maybe own heads) as a first implicit sample response
554 551 warning: line over 80 characters
555 552 mercurial/setdiscovery.py:0:
556 553 > undecided = dag.nodeset() # own nodes where I don't know if remote knows them
557 554 warning: line over 80 characters
558 555 mercurial/similar.py:0:
559 556 > repo.ui.progress(_('searching for similar files'), i, total=len(removed))
560 557 warning: line over 80 characters
561 558 mercurial/simplemerge.py:0:
562 559 > for zmatch, zend, amatch, aend, bmatch, bend in self.find_sync_regions():
563 560 warning: line over 80 characters
564 561 mercurial/sshrepo.py:0:
565 562 > self._abort(error.RepoError(_("no suitable response from remote hg")))
566 563 warning: line over 80 characters
567 564 mercurial/sshrepo.py:0:
568 565 > except:
569 566 warning: naked except clause
570 567 mercurial/subrepo.py:0:
571 568 > other, self._repo = hg.clone(self._repo._subparent.ui, {}, other,
572 569 warning: line over 80 characters
573 570 mercurial/subrepo.py:0:
574 571 > msg = (_(' subrepository sources for %s differ (in checked out version)\n'
575 572 warning: line over 80 characters
576 573 mercurial/transaction.py:0:
577 574 > except:
578 575 warning: naked except clause
579 576 mercurial/ui.py:0:
580 577 > traceback.print_exception(exc[0], exc[1], exc[2], file=self.ferr)
581 578 warning: line over 80 characters
582 579 mercurial/url.py:0:
583 580 > conn = httpsconnection(host, port, keyfile, certfile, *args, **kwargs)
584 581 warning: line over 80 characters
585 582 mercurial/util.py:0:
586 583 > except:
587 584 warning: naked except clause
588 585 mercurial/util.py:0:
589 586 > except:
590 587 warning: naked except clause
591 588 mercurial/verify.py:0:
592 589 > except:
593 590 warning: naked except clause
594 591 mercurial/verify.py:0:
595 592 > except:
596 593 warning: naked except clause
597 594 mercurial/wireproto.py:0:
598 595 > # Assuming the future to be filled with the result from the batched request
599 596 warning: line over 80 characters
600 597 mercurial/wireproto.py:0:
601 598 > '''remote must support _submitbatch(encbatch) and _submitone(op, encargs)'''
602 599 warning: line over 80 characters
603 600 mercurial/wireproto.py:0:
604 601 > All methods invoked on instances of this class are simply queued and return a
605 602 warning: line over 80 characters
606 603 mercurial/wireproto.py:0:
607 604 > The decorator returns a function which wraps this coroutine as a plain method,
608 605 warning: line over 80 characters
609 606 setup.py:0:
610 607 > raise SystemExit("Python headers are required to build Mercurial")
611 608 warning: line over 80 characters
612 609 setup.py:0:
613 610 > except:
614 611 warning: naked except clause
615 612 setup.py:0:
616 613 > # build_py), it will not find osutil & friends, thinking that those modules are
617 614 warning: line over 80 characters
618 615 setup.py:0:
619 616 > except:
620 617 warning: naked except clause
621 618 warning: naked except clause
622 619 setup.py:0:
623 620 > isironpython = platform.python_implementation().lower().find("ironpython") != -1
624 621 warning: line over 80 characters
625 622 setup.py:0:
626 623 > except:
627 624 warning: naked except clause
628 625 warning: naked except clause
629 626 warning: naked except clause
630 627 tests/autodiff.py:0:
631 628 > ui.write('data lost for: %s\n' % fn)
632 629 warning: unwrapped ui message
633 630 tests/run-tests.py:0:
634 631 > except:
635 632 warning: naked except clause
636 633 tests/test-commandserver.py:0:
637 634 > 'hooks.pre-identify=python:test-commandserver.hook', 'id'],
638 635 warning: line over 80 characters
639 636 tests/test-commandserver.py:0:
640 637 > # the cached repo local hgrc contains ui.foo=bar, so showconfig should show it
641 638 warning: line over 80 characters
642 639 tests/test-commandserver.py:0:
643 640 > print '%c, %r' % (ch, re.sub('encoding: [a-zA-Z0-9-]+', 'encoding: ***', data))
644 641 warning: line over 80 characters
645 642 tests/test-filecache.py:0:
646 643 > except:
647 644 warning: naked except clause
648 645 tests/test-filecache.py:0:
649 646 > if subprocess.call(['python', '%s/hghave' % os.environ['TESTDIR'], 'cacheable']):
650 647 warning: line over 80 characters
651 648 tests/test-ui-color.py:0:
652 649 > testui.warn('warning\n')
653 650 warning: unwrapped ui message
654 651 tests/test-ui-color.py:0:
655 652 > testui.write('buffered\n')
656 653 warning: unwrapped ui message
657 654 tests/test-walkrepo.py:0:
658 655 > print "Found %d repositories when I should have found 2" % (len(reposet),)
659 656 warning: line over 80 characters
660 657 tests/test-walkrepo.py:0:
661 658 > print "Found %d repositories when I should have found 3" % (len(reposet),)
662 659 warning: line over 80 characters
663 660 [1]
General Comments 0
You need to be logged in to leave comments. Login now