##// END OF EJS Templates
Merge with upstream
Brendan Cully -
r3241:a184cd0c merge default
parent child Browse files
Show More
@@ -1,295 +1,459 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import gettext as _
10 10 from demandload import demandload
11 11 demandload(globals(), "ancestor bdiff repo revlog util")
12 12
13 13 class changectx(object):
14 14 """A changecontext object makes access to data related to a particular
15 15 changeset convenient."""
16 16 def __init__(self, repo, changeid=None):
17 17 """changeid is a revision number, node, or tag"""
18 18 self._repo = repo
19 19
20 20 if not changeid and changeid != 0:
21 21 p1, p2 = self._repo.dirstate.parents()
22 22 self._rev = self._repo.changelog.rev(p1)
23 23 if self._rev == -1:
24 24 changeid = 'tip'
25 25 else:
26 26 self._node = p1
27 27 return
28 28
29 29 self._node = self._repo.lookup(changeid)
30 30 self._rev = self._repo.changelog.rev(self._node)
31 31
32 32 def __str__(self):
33 33 return short(self.node())
34 34
35 35 def __repr__(self):
36 return "<changectx %s>" % short(self.node())
36 return "<changectx %s>" % str(self)
37 37
38 38 def __eq__(self, other):
39 39 return self._rev == other._rev
40 40
41 41 def __nonzero__(self):
42 42 return self._rev != -1
43 43
44 def changeset(self):
45 try:
46 return self._changeset
47 except AttributeError:
44 def __getattr__(self, name):
45 if name == '_changeset':
48 46 self._changeset = self._repo.changelog.read(self.node())
49 47 return self._changeset
50
51 def manifest(self):
52 try:
48 elif name == '_manifest':
49 self._manifest = self._repo.manifest.read(self._changeset[0])
53 50 return self._manifest
54 except AttributeError:
55 self._manifest = self._repo.manifest.read(self.changeset()[0])
56 return self._manifest
51 else:
52 raise AttributeError, name
53
54 def changeset(self): return self._changeset
55 def manifest(self): return self._manifest
57 56
58 57 def rev(self): return self._rev
59 58 def node(self): return self._node
60 def user(self): return self.changeset()[1]
61 def date(self): return self.changeset()[2]
62 def files(self): return self.changeset()[3]
63 def description(self): return self.changeset()[4]
59 def user(self): return self._changeset[1]
60 def date(self): return self._changeset[2]
61 def files(self): return self._changeset[3]
62 def description(self): return self._changeset[4]
64 63
65 64 def parents(self):
66 65 """return contexts for each parent changeset"""
67 66 p = self._repo.changelog.parents(self._node)
68 67 return [ changectx(self._repo, x) for x in p ]
69 68
70 69 def children(self):
71 70 """return contexts for each child changeset"""
72 71 c = self._repo.changelog.children(self._node)
73 72 return [ changectx(self._repo, x) for x in c ]
74 73
75 74 def filenode(self, path):
76 node, flag = self._repo.manifest.find(self.changeset()[0], path)
75 if hasattr(self, "_manifest"):
76 return self._manifest[path]
77 node, flag = self._repo.manifest.find(self._changeset[0], path)
77 78 return node
78 79
79 80 def filectx(self, path, fileid=None):
80 81 """get a file context from this changeset"""
81 82 if fileid is None:
82 83 fileid = self.filenode(path)
83 if not fileid:
84 raise repo.LookupError(_("'%s' does not exist in changeset %s") %
85 (path, hex(self.node())))
86 return filectx(self._repo, path, fileid=fileid)
84 return filectx(self._repo, path, fileid=fileid, changectx=self)
87 85
88 86 def filectxs(self):
89 87 """generate a file context for each file in this changeset's
90 88 manifest"""
91 89 mf = self.manifest()
92 90 m = mf.keys()
93 91 m.sort()
94 92 for f in m:
95 93 yield self.filectx(f, fileid=mf[f])
96 94
97 95 def ancestor(self, c2):
98 96 """
99 97 return the ancestor context of self and c2
100 98 """
101 99 n = self._repo.changelog.ancestor(self._node, c2._node)
102 100 return changectx(self._repo, n)
103 101
104 102 class filectx(object):
105 103 """A filecontext object makes access to data related to a particular
106 104 filerevision convenient."""
107 def __init__(self, repo_, path, changeid=None, fileid=None, filelog=None):
105 def __init__(self, repo, path, changeid=None, fileid=None,
106 filelog=None, changectx=None):
108 107 """changeid can be a changeset revision, node, or tag.
109 108 fileid can be a file revision or node."""
110 self._repo = repo_
109 self._repo = repo
111 110 self._path = path
112 111
113 112 assert changeid is not None or fileid is not None
114 113
115 114 if filelog:
116 115 self._filelog = filelog
117 else:
118 self._filelog = self._repo.file(self._path)
116 if changectx:
117 self._changectx = changectx
118 self._changeid = changectx.node()
119 119
120 120 if fileid is None:
121 121 self._changeid = changeid
122 122 else:
123 try:
124 self._filenode = self._filelog.lookup(fileid)
125 except revlog.RevlogError, inst:
126 raise repo.LookupError(str(inst))
127 self._changeid = self._filelog.linkrev(self._filenode)
123 self._fileid = fileid
128 124
129 125 def __getattr__(self, name):
130 126 if name == '_changectx':
131 127 self._changectx = changectx(self._repo, self._changeid)
132 128 return self._changectx
129 elif name == '_filelog':
130 self._filelog = self._repo.file(self._path)
131 return self._filelog
132 elif name == '_changeid':
133 self._changeid = self._filelog.linkrev(self._filenode)
134 return self._changeid
133 135 elif name == '_filenode':
134 self._filenode = self._changectx.filenode(self._path)
136 try:
137 if hasattr(self, "_fileid"):
138 self._filenode = self._filelog.lookup(self._fileid)
139 else:
140 self._filenode = self._changectx.filenode(self._path)
141 except revlog.RevlogError, inst:
142 raise repo.LookupError(str(inst))
135 143 return self._filenode
136 144 elif name == '_filerev':
137 145 self._filerev = self._filelog.rev(self._filenode)
138 146 return self._filerev
139 147 else:
140 148 raise AttributeError, name
141 149
142 150 def __nonzero__(self):
143 151 return self._filerev != nullid
144 152
145 153 def __str__(self):
146 154 return "%s@%s" % (self.path(), short(self.node()))
147 155
148 156 def __repr__(self):
149 return "<filectx %s@%s>" % (self.path(), short(self.node()))
157 return "<filectx %s>" % str(self)
150 158
151 159 def __eq__(self, other):
152 160 return self._path == other._path and self._changeid == other._changeid
153 161
154 162 def filectx(self, fileid):
155 163 '''opens an arbitrary revision of the file without
156 164 opening a new filelog'''
157 165 return filectx(self._repo, self._path, fileid=fileid,
158 166 filelog=self._filelog)
159 167
160 168 def filerev(self): return self._filerev
161 169 def filenode(self): return self._filenode
162 170 def filelog(self): return self._filelog
163 171
164 172 def rev(self):
165 173 if hasattr(self, "_changectx"):
166 174 return self._changectx.rev()
167 175 return self._filelog.linkrev(self._filenode)
168 176
169 177 def node(self): return self._changectx.node()
170 178 def user(self): return self._changectx.user()
171 179 def date(self): return self._changectx.date()
172 180 def files(self): return self._changectx.files()
173 181 def description(self): return self._changectx.description()
174 182 def manifest(self): return self._changectx.manifest()
175 183 def changectx(self): return self._changectx
176 184
177 185 def data(self): return self._filelog.read(self._filenode)
178 186 def renamed(self): return self._filelog.renamed(self._filenode)
179 187 def path(self): return self._path
180 188
181 189 def parents(self):
182 190 p = self._path
183 191 fl = self._filelog
184 192 pl = [ (p, n, fl) for n in self._filelog.parents(self._filenode) ]
185 193
186 194 r = self.renamed()
187 195 if r:
188 196 pl[0] = (r[0], r[1], None)
189 197
190 198 return [ filectx(self._repo, p, fileid=n, filelog=l)
191 199 for p,n,l in pl if n != nullid ]
192 200
193 201 def children(self):
194 202 # hard for renames
195 203 c = self._filelog.children(self._filenode)
196 204 return [ filectx(self._repo, self._path, fileid=x,
197 205 filelog=self._filelog) for x in c ]
198 206
199 207 def annotate(self, follow=False):
200 208 '''returns a list of tuples of (ctx, line) for each line
201 209 in the file, where ctx is the filectx of the node where
202 210 that line was last changed'''
203 211
204 212 def decorate(text, rev):
205 213 return ([rev] * len(text.splitlines()), text)
206 214
207 215 def pair(parent, child):
208 216 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
209 217 child[0][b1:b2] = parent[0][a1:a2]
210 218 return child
211 219
212 220 getlog = util.cachefunc(lambda x: self._repo.file(x))
213 221 def getctx(path, fileid):
214 222 log = path == self._path and self._filelog or getlog(path)
215 223 return filectx(self._repo, path, fileid=fileid, filelog=log)
216 224 getctx = util.cachefunc(getctx)
217 225
218 226 def parents(f):
219 227 # we want to reuse filectx objects as much as possible
220 228 p = f._path
221 pl = [ (p, r) for r in f._filelog.parentrevs(f._filerev) ]
229 if f._filerev is None: # working dir
230 pl = [ (n.path(), n.filerev()) for n in f.parents() ]
231 else:
232 pl = [ (p, n) for n in f._filelog.parentrevs(f._filerev) ]
222 233
223 234 if follow:
224 235 r = f.renamed()
225 236 if r:
226 237 pl[0] = (r[0], getlog(r[0]).rev(r[1]))
227 238
228 239 return [ getctx(p, n) for p, n in pl if n != -1 ]
229 240
230 241 # find all ancestors
231 242 needed = {self: 1}
232 243 visit = [self]
233 244 files = [self._path]
234 245 while visit:
235 246 f = visit.pop(0)
236 247 for p in parents(f):
237 248 if p not in needed:
238 249 needed[p] = 1
239 250 visit.append(p)
240 251 if p._path not in files:
241 252 files.append(p._path)
242 253 else:
243 254 # count how many times we'll use this
244 255 needed[p] += 1
245 256
246 257 # sort by revision (per file) which is a topological order
247 258 visit = []
248 259 files.reverse()
249 260 for f in files:
250 261 fn = [(n._filerev, n) for n in needed.keys() if n._path == f]
251 262 fn.sort()
252 263 visit.extend(fn)
253 264 hist = {}
254 265
255 266 for r, f in visit:
256 267 curr = decorate(f.data(), f)
257 268 for p in parents(f):
258 269 if p != nullid:
259 270 curr = pair(hist[p], curr)
260 271 # trim the history of unneeded revs
261 272 needed[p] -= 1
262 273 if not needed[p]:
263 274 del hist[p]
264 275 hist[f] = curr
265 276
266 277 return zip(hist[f][0], hist[f][1].splitlines(1))
267 278
268 279 def ancestor(self, fc2):
269 280 """
270 281 find the common ancestor file context, if any, of self, and fc2
271 282 """
272 283
273 284 acache = {}
285
286 # prime the ancestor cache for the working directory
287 for c in (self, fc2):
288 if c._filerev == None:
289 pl = [ (n.path(), n.filenode()) for n in c.parents() ]
290 acache[(c._path, None)] = pl
291
274 292 flcache = {self._path:self._filelog, fc2._path:fc2._filelog}
275 293 def parents(vertex):
276 294 if vertex in acache:
277 295 return acache[vertex]
278 296 f, n = vertex
279 297 if f not in flcache:
280 298 flcache[f] = self._repo.file(f)
281 299 fl = flcache[f]
282 300 pl = [ (f,p) for p in fl.parents(n) if p != nullid ]
283 301 re = fl.renamed(n)
284 302 if re:
285 303 pl.append(re)
286 304 acache[vertex]=pl
287 305 return pl
288 306
289 307 a, b = (self._path, self._filenode), (fc2._path, fc2._filenode)
290 308 v = ancestor.ancestor(a, b, parents)
291 309 if v:
292 310 f,n = v
293 311 return filectx(self._repo, f, fileid=n, filelog=flcache[f])
294 312
295 313 return None
314
315 class workingctx(changectx):
316 """A workingctx object makes access to data related to
317 the current working directory convenient."""
318 def __init__(self, repo):
319 self._repo = repo
320 self._rev = None
321 self._node = None
322
323 def __str__(self):
324 return "."
325
326 def __nonzero__(self):
327 return True
328
329 def __getattr__(self, name):
330 if name == '_parents':
331 self._parents = self._repo.parents()
332 return self._parents
333 if name == '_status':
334 self._status = self._repo.status()
335 return self._status
336 if name == '_manifest':
337 self._buildmanifest()
338 return self._manifest
339 else:
340 raise AttributeError, name
341
342 def _buildmanifest(self):
343 """generate a manifest corresponding to the working directory"""
344
345 man = self._parents[0].manifest().copy()
346 copied = self._repo.dirstate.copies()
347 modified, added, removed, deleted, unknown = self._status[:5]
348 for i,l in (("a", added), ("m", modified), ("u", unknown)):
349 for f in l:
350 man[f] = man.get(copied.get(f, f), nullid) + i
351 man.set(f, util.is_exec(self._repo.wjoin(f), man.execf(f)))
352
353 for f in deleted + removed:
354 del man[f]
355
356 self._manifest = man
357
358 def manifest(self): return self._manifest
359
360 def user(self): return self._repo.ui.username()
361 def date(self): return util.makedate()
362 def description(self): return ""
363 def files(self):
364 f = self.modified() + self.added() + self.removed()
365 f.sort()
366 return f
367
368 def modified(self): return self._status[0]
369 def added(self): return self._status[1]
370 def removed(self): return self._status[2]
371 def deleted(self): return self._status[3]
372 def unknown(self): return self._status[4]
373 def clean(self): return self._status[5]
374
375 def parents(self):
376 """return contexts for each parent changeset"""
377 return self._parents
378
379 def children(self):
380 return []
381
382 def filectx(self, path):
383 """get a file context from the working directory"""
384 return workingfilectx(self._repo, path, workingctx=self)
385
386 def ancestor(self, c2):
387 """return the ancestor context of self and c2"""
388 return self._parents[0].ancestor(c2) # punt on two parents for now
389
390 class workingfilectx(filectx):
391 """A workingfilectx object makes access to data related to a particular
392 file in the working directory convenient."""
393 def __init__(self, repo, path, filelog=None, workingctx=None):
394 """changeid can be a changeset revision, node, or tag.
395 fileid can be a file revision or node."""
396 self._repo = repo
397 self._path = path
398 self._changeid = None
399 self._filerev = self._filenode = None
400
401 if filelog:
402 self._filelog = filelog
403 if workingctx:
404 self._changectx = workingctx
405
406 def __getattr__(self, name):
407 if name == '_changectx':
408 self._changectx = workingctx(repo)
409 return self._changectx
410 elif name == '_repopath':
411 self._repopath = self._repo.dirstate.copied(p) or self._path
412 elif name == '_filelog':
413 self._filelog = self._repo.file(self._repopath)
414 return self._filelog
415 else:
416 raise AttributeError, name
417
418 def __nonzero__(self):
419 return True
420
421 def __str__(self):
422 return "%s@." % self.path()
423
424 def filectx(self, fileid):
425 '''opens an arbitrary revision of the file without
426 opening a new filelog'''
427 return filectx(self._repo, self._repopath, fileid=fileid,
428 filelog=self._filelog)
429
430 def rev(self):
431 if hasattr(self, "_changectx"):
432 return self._changectx.rev()
433 return self._filelog.linkrev(self._filenode)
434
435 def data(self): return self._repo.wread(self._path)
436 def renamed(self):
437 rp = self._repopath
438 if rp == self._path:
439 return None
440 return rp, self._workingctx._parents._manifest.get(rp, nullid)
441
442 def parents(self):
443 '''return parent filectxs, following copies if necessary'''
444 p = self._path
445 rp = self._repopath
446 pcl = self._workingctx._parents
447 fl = self._filelog
448 pl = [ (rp, pcl[0]._manifest.get(rp, nullid), fl) ]
449 if len(pcl) > 1:
450 if rp != p:
451 fl = None
452 pl.append((p, pcl[1]._manifest.get(p, nullid), fl))
453
454 return [ filectx(self._repo, p, fileid=n, filelog=l)
455 for p,n,l in pl if n != nullid ]
456
457 def children(self):
458 return []
459
@@ -1,1765 +1,1768 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import gettext as _
10 10 from demandload import *
11 11 import repo
12 12 demandload(globals(), "appendfile changegroup")
13 13 demandload(globals(), "changelog dirstate filelog manifest context")
14 14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 15 demandload(globals(), "os revlog time util")
16 16
17 17 class localrepository(repo.repository):
18 18 capabilities = ()
19 19
20 20 def __del__(self):
21 21 self.transhandle = None
22 22 def __init__(self, parentui, path=None, create=0):
23 23 repo.repository.__init__(self)
24 24 if not path:
25 25 p = os.getcwd()
26 26 while not os.path.isdir(os.path.join(p, ".hg")):
27 27 oldp = p
28 28 p = os.path.dirname(p)
29 29 if p == oldp:
30 30 raise repo.RepoError(_("There is no Mercurial repository"
31 31 " here (.hg not found)"))
32 32 path = p
33 33 self.path = os.path.join(path, ".hg")
34 34
35 35 if not os.path.isdir(self.path):
36 36 if create:
37 37 if not os.path.exists(path):
38 38 os.mkdir(path)
39 39 os.mkdir(self.path)
40 40 os.mkdir(self.join("data"))
41 41 else:
42 42 raise repo.RepoError(_("repository %s not found") % path)
43 43 elif create:
44 44 raise repo.RepoError(_("repository %s already exists") % path)
45 45
46 46 self.root = os.path.abspath(path)
47 47 self.origroot = path
48 48 self.ui = ui.ui(parentui=parentui)
49 49 self.opener = util.opener(self.path)
50 50 self.wopener = util.opener(self.root)
51 51
52 52 try:
53 53 self.ui.readconfig(self.join("hgrc"), self.root)
54 54 except IOError:
55 55 pass
56 56
57 57 v = self.ui.revlogopts
58 58 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
59 59 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
60 60 fl = v.get('flags', None)
61 61 flags = 0
62 62 if fl != None:
63 63 for x in fl.split():
64 64 flags |= revlog.flagstr(x)
65 65 elif self.revlogv1:
66 66 flags = revlog.REVLOG_DEFAULT_FLAGS
67 67
68 68 v = self.revlogversion | flags
69 69 self.manifest = manifest.manifest(self.opener, v)
70 70 self.changelog = changelog.changelog(self.opener, v)
71 71
72 72 # the changelog might not have the inline index flag
73 73 # on. If the format of the changelog is the same as found in
74 74 # .hgrc, apply any flags found in the .hgrc as well.
75 75 # Otherwise, just version from the changelog
76 76 v = self.changelog.version
77 77 if v == self.revlogversion:
78 78 v |= flags
79 79 self.revlogversion = v
80 80
81 81 self.tagscache = None
82 82 self.nodetagscache = None
83 83 self.encodepats = None
84 84 self.decodepats = None
85 85 self.transhandle = None
86 86
87 87 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
88 88
89 89 def url(self):
90 90 return 'file:' + self.root
91 91
92 92 def hook(self, name, throw=False, **args):
93 93 def callhook(hname, funcname):
94 94 '''call python hook. hook is callable object, looked up as
95 95 name in python module. if callable returns "true", hook
96 96 fails, else passes. if hook raises exception, treated as
97 97 hook failure. exception propagates if throw is "true".
98 98
99 99 reason for "true" meaning "hook failed" is so that
100 100 unmodified commands (e.g. mercurial.commands.update) can
101 101 be run as hooks without wrappers to convert return values.'''
102 102
103 103 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
104 104 d = funcname.rfind('.')
105 105 if d == -1:
106 106 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
107 107 % (hname, funcname))
108 108 modname = funcname[:d]
109 109 try:
110 110 obj = __import__(modname)
111 111 except ImportError:
112 112 try:
113 113 # extensions are loaded with hgext_ prefix
114 114 obj = __import__("hgext_%s" % modname)
115 115 except ImportError:
116 116 raise util.Abort(_('%s hook is invalid '
117 117 '(import of "%s" failed)') %
118 118 (hname, modname))
119 119 try:
120 120 for p in funcname.split('.')[1:]:
121 121 obj = getattr(obj, p)
122 122 except AttributeError, err:
123 123 raise util.Abort(_('%s hook is invalid '
124 124 '("%s" is not defined)') %
125 125 (hname, funcname))
126 126 if not callable(obj):
127 127 raise util.Abort(_('%s hook is invalid '
128 128 '("%s" is not callable)') %
129 129 (hname, funcname))
130 130 try:
131 131 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
132 132 except (KeyboardInterrupt, util.SignalInterrupt):
133 133 raise
134 134 except Exception, exc:
135 135 if isinstance(exc, util.Abort):
136 136 self.ui.warn(_('error: %s hook failed: %s\n') %
137 137 (hname, exc.args[0]))
138 138 else:
139 139 self.ui.warn(_('error: %s hook raised an exception: '
140 140 '%s\n') % (hname, exc))
141 141 if throw:
142 142 raise
143 143 self.ui.print_exc()
144 144 return True
145 145 if r:
146 146 if throw:
147 147 raise util.Abort(_('%s hook failed') % hname)
148 148 self.ui.warn(_('warning: %s hook failed\n') % hname)
149 149 return r
150 150
151 151 def runhook(name, cmd):
152 152 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
153 153 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
154 154 r = util.system(cmd, environ=env, cwd=self.root)
155 155 if r:
156 156 desc, r = util.explain_exit(r)
157 157 if throw:
158 158 raise util.Abort(_('%s hook %s') % (name, desc))
159 159 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
160 160 return r
161 161
162 162 r = False
163 163 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
164 164 if hname.split(".", 1)[0] == name and cmd]
165 165 hooks.sort()
166 166 for hname, cmd in hooks:
167 167 if cmd.startswith('python:'):
168 168 r = callhook(hname, cmd[7:].strip()) or r
169 169 else:
170 170 r = runhook(hname, cmd) or r
171 171 return r
172 172
173 173 tag_disallowed = ':\r\n'
174 174
175 175 def tag(self, name, node, message, local, user, date):
176 176 '''tag a revision with a symbolic name.
177 177
178 178 if local is True, the tag is stored in a per-repository file.
179 179 otherwise, it is stored in the .hgtags file, and a new
180 180 changeset is committed with the change.
181 181
182 182 keyword arguments:
183 183
184 184 local: whether to store tag in non-version-controlled file
185 185 (default False)
186 186
187 187 message: commit message to use if committing
188 188
189 189 user: name of user to use if committing
190 190
191 191 date: date tuple to use if committing'''
192 192
193 193 for c in self.tag_disallowed:
194 194 if c in name:
195 195 raise util.Abort(_('%r cannot be used in a tag name') % c)
196 196
197 197 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
198 198
199 199 if local:
200 200 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
201 201 self.hook('tag', node=hex(node), tag=name, local=local)
202 202 return
203 203
204 204 for x in self.status()[:5]:
205 205 if '.hgtags' in x:
206 206 raise util.Abort(_('working copy of .hgtags is changed '
207 207 '(please commit .hgtags manually)'))
208 208
209 209 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
210 210 if self.dirstate.state('.hgtags') == '?':
211 211 self.add(['.hgtags'])
212 212
213 213 self.commit(['.hgtags'], message, user, date)
214 214 self.hook('tag', node=hex(node), tag=name, local=local)
215 215
216 216 def tags(self):
217 217 '''return a mapping of tag to node'''
218 218 if not self.tagscache:
219 219 self.tagscache = {}
220 220
221 221 def parsetag(line, context):
222 222 if not line:
223 223 return
224 224 s = l.split(" ", 1)
225 225 if len(s) != 2:
226 226 self.ui.warn(_("%s: cannot parse entry\n") % context)
227 227 return
228 228 node, key = s
229 229 key = key.strip()
230 230 try:
231 231 bin_n = bin(node)
232 232 except TypeError:
233 233 self.ui.warn(_("%s: node '%s' is not well formed\n") %
234 234 (context, node))
235 235 return
236 236 if bin_n not in self.changelog.nodemap:
237 237 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
238 238 (context, key))
239 239 return
240 240 self.tagscache[key] = bin_n
241 241
242 242 # read the tags file from each head, ending with the tip,
243 243 # and add each tag found to the map, with "newer" ones
244 244 # taking precedence
245 245 heads = self.heads()
246 246 heads.reverse()
247 247 fl = self.file(".hgtags")
248 248 for node in heads:
249 249 change = self.changelog.read(node)
250 250 rev = self.changelog.rev(node)
251 251 fn, ff = self.manifest.find(change[0], '.hgtags')
252 252 if fn is None: continue
253 253 count = 0
254 254 for l in fl.read(fn).splitlines():
255 255 count += 1
256 256 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
257 257 (rev, short(node), count))
258 258 try:
259 259 f = self.opener("localtags")
260 260 count = 0
261 261 for l in f:
262 262 count += 1
263 263 parsetag(l, _("localtags, line %d") % count)
264 264 except IOError:
265 265 pass
266 266
267 267 self.tagscache['tip'] = self.changelog.tip()
268 268
269 269 return self.tagscache
270 270
271 271 def tagslist(self):
272 272 '''return a list of tags ordered by revision'''
273 273 l = []
274 274 for t, n in self.tags().items():
275 275 try:
276 276 r = self.changelog.rev(n)
277 277 except:
278 278 r = -2 # sort to the beginning of the list if unknown
279 279 l.append((r, t, n))
280 280 l.sort()
281 281 return [(t, n) for r, t, n in l]
282 282
283 283 def nodetags(self, node):
284 284 '''return the tags associated with a node'''
285 285 if not self.nodetagscache:
286 286 self.nodetagscache = {}
287 287 for t, n in self.tags().items():
288 288 self.nodetagscache.setdefault(n, []).append(t)
289 289 return self.nodetagscache.get(node, [])
290 290
291 291 def lookup(self, key):
292 292 try:
293 293 return self.tags()[key]
294 294 except KeyError:
295 295 if key == '.':
296 296 key = self.dirstate.parents()[0]
297 297 if key == nullid:
298 298 raise repo.RepoError(_("no revision checked out"))
299 299 try:
300 300 return self.changelog.lookup(key)
301 301 except:
302 302 raise repo.RepoError(_("unknown revision '%s'") % key)
303 303
304 304 def dev(self):
305 305 return os.lstat(self.path).st_dev
306 306
307 307 def local(self):
308 308 return True
309 309
310 310 def join(self, f):
311 311 return os.path.join(self.path, f)
312 312
313 313 def wjoin(self, f):
314 314 return os.path.join(self.root, f)
315 315
316 316 def file(self, f):
317 317 if f[0] == '/':
318 318 f = f[1:]
319 319 return filelog.filelog(self.opener, f, self.revlogversion)
320 320
321 321 def changectx(self, changeid=None):
322 322 return context.changectx(self, changeid)
323 323
324 def workingctx(self):
325 return context.workingctx(self)
326
324 327 def parents(self, changeid=None):
325 328 '''
326 329 get list of changectxs for parents of changeid or working directory
327 330 '''
328 331 if changeid is None:
329 332 pl = self.dirstate.parents()
330 333 else:
331 334 n = self.changelog.lookup(changeid)
332 335 pl = self.changelog.parents(n)
333 336 if pl[1] == nullid:
334 337 return [self.changectx(pl[0])]
335 338 return [self.changectx(pl[0]), self.changectx(pl[1])]
336 339
337 340 def filectx(self, path, changeid=None, fileid=None):
338 341 """changeid can be a changeset revision, node, or tag.
339 342 fileid can be a file revision or node."""
340 343 return context.filectx(self, path, changeid, fileid)
341 344
342 345 def getcwd(self):
343 346 return self.dirstate.getcwd()
344 347
345 348 def wfile(self, f, mode='r'):
346 349 return self.wopener(f, mode)
347 350
348 351 def wread(self, filename):
349 352 if self.encodepats == None:
350 353 l = []
351 354 for pat, cmd in self.ui.configitems("encode"):
352 355 mf = util.matcher(self.root, "", [pat], [], [])[1]
353 356 l.append((mf, cmd))
354 357 self.encodepats = l
355 358
356 359 data = self.wopener(filename, 'r').read()
357 360
358 361 for mf, cmd in self.encodepats:
359 362 if mf(filename):
360 363 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
361 364 data = util.filter(data, cmd)
362 365 break
363 366
364 367 return data
365 368
366 369 def wwrite(self, filename, data, fd=None):
367 370 if self.decodepats == None:
368 371 l = []
369 372 for pat, cmd in self.ui.configitems("decode"):
370 373 mf = util.matcher(self.root, "", [pat], [], [])[1]
371 374 l.append((mf, cmd))
372 375 self.decodepats = l
373 376
374 377 for mf, cmd in self.decodepats:
375 378 if mf(filename):
376 379 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
377 380 data = util.filter(data, cmd)
378 381 break
379 382
380 383 if fd:
381 384 return fd.write(data)
382 385 return self.wopener(filename, 'w').write(data)
383 386
384 387 def transaction(self):
385 388 tr = self.transhandle
386 389 if tr != None and tr.running():
387 390 return tr.nest()
388 391
389 392 # save dirstate for rollback
390 393 try:
391 394 ds = self.opener("dirstate").read()
392 395 except IOError:
393 396 ds = ""
394 397 self.opener("journal.dirstate", "w").write(ds)
395 398
396 399 tr = transaction.transaction(self.ui.warn, self.opener,
397 400 self.join("journal"),
398 401 aftertrans(self.path))
399 402 self.transhandle = tr
400 403 return tr
401 404
402 405 def recover(self):
403 406 l = self.lock()
404 407 if os.path.exists(self.join("journal")):
405 408 self.ui.status(_("rolling back interrupted transaction\n"))
406 409 transaction.rollback(self.opener, self.join("journal"))
407 410 self.reload()
408 411 return True
409 412 else:
410 413 self.ui.warn(_("no interrupted transaction available\n"))
411 414 return False
412 415
413 416 def rollback(self, wlock=None):
414 417 if not wlock:
415 418 wlock = self.wlock()
416 419 l = self.lock()
417 420 if os.path.exists(self.join("undo")):
418 421 self.ui.status(_("rolling back last transaction\n"))
419 422 transaction.rollback(self.opener, self.join("undo"))
420 423 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
421 424 self.reload()
422 425 self.wreload()
423 426 else:
424 427 self.ui.warn(_("no rollback information available\n"))
425 428
426 429 def wreload(self):
427 430 self.dirstate.read()
428 431
429 432 def reload(self):
430 433 self.changelog.load()
431 434 self.manifest.load()
432 435 self.tagscache = None
433 436 self.nodetagscache = None
434 437
435 438 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
436 439 desc=None):
437 440 try:
438 441 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
439 442 except lock.LockHeld, inst:
440 443 if not wait:
441 444 raise
442 445 self.ui.warn(_("waiting for lock on %s held by %s\n") %
443 446 (desc, inst.args[0]))
444 447 # default to 600 seconds timeout
445 448 l = lock.lock(self.join(lockname),
446 449 int(self.ui.config("ui", "timeout") or 600),
447 450 releasefn, desc=desc)
448 451 if acquirefn:
449 452 acquirefn()
450 453 return l
451 454
452 455 def lock(self, wait=1):
453 456 return self.do_lock("lock", wait, acquirefn=self.reload,
454 457 desc=_('repository %s') % self.origroot)
455 458
456 459 def wlock(self, wait=1):
457 460 return self.do_lock("wlock", wait, self.dirstate.write,
458 461 self.wreload,
459 462 desc=_('working directory of %s') % self.origroot)
460 463
461 464 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
462 465 "determine whether a new filenode is needed"
463 466 fp1 = manifest1.get(filename, nullid)
464 467 fp2 = manifest2.get(filename, nullid)
465 468
466 469 if fp2 != nullid:
467 470 # is one parent an ancestor of the other?
468 471 fpa = filelog.ancestor(fp1, fp2)
469 472 if fpa == fp1:
470 473 fp1, fp2 = fp2, nullid
471 474 elif fpa == fp2:
472 475 fp2 = nullid
473 476
474 477 # is the file unmodified from the parent? report existing entry
475 478 if fp2 == nullid and text == filelog.read(fp1):
476 479 return (fp1, None, None)
477 480
478 481 return (None, fp1, fp2)
479 482
480 483 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
481 484 orig_parent = self.dirstate.parents()[0] or nullid
482 485 p1 = p1 or self.dirstate.parents()[0] or nullid
483 486 p2 = p2 or self.dirstate.parents()[1] or nullid
484 487 c1 = self.changelog.read(p1)
485 488 c2 = self.changelog.read(p2)
486 489 m1 = self.manifest.read(c1[0]).copy()
487 490 m2 = self.manifest.read(c2[0])
488 491 changed = []
489 492
490 493 if orig_parent == p1:
491 494 update_dirstate = 1
492 495 else:
493 496 update_dirstate = 0
494 497
495 498 if not wlock:
496 499 wlock = self.wlock()
497 500 l = self.lock()
498 501 tr = self.transaction()
499 502 linkrev = self.changelog.count()
500 503 for f in files:
501 504 try:
502 505 t = self.wread(f)
503 506 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
504 507 r = self.file(f)
505 508
506 509 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
507 510 if entry:
508 511 m1[f] = entry
509 512 continue
510 513
511 514 m1[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
512 515 changed.append(f)
513 516 if update_dirstate:
514 517 self.dirstate.update([f], "n")
515 518 except IOError:
516 519 try:
517 520 del m1[f]
518 521 if update_dirstate:
519 522 self.dirstate.forget([f])
520 523 except:
521 524 # deleted from p2?
522 525 pass
523 526
524 527 mnode = self.manifest.add(m1, tr, linkrev, c1[0], c2[0])
525 528 user = user or self.ui.username()
526 529 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
527 530 tr.close()
528 531 if update_dirstate:
529 532 self.dirstate.setparents(n, nullid)
530 533
531 534 def commit(self, files=None, text="", user=None, date=None,
532 535 match=util.always, force=False, lock=None, wlock=None,
533 536 force_editor=False):
534 537 commit = []
535 538 remove = []
536 539 changed = []
537 540
538 541 if files:
539 542 for f in files:
540 543 s = self.dirstate.state(f)
541 544 if s in 'nmai':
542 545 commit.append(f)
543 546 elif s == 'r':
544 547 remove.append(f)
545 548 else:
546 549 self.ui.warn(_("%s not tracked!\n") % f)
547 550 else:
548 551 modified, added, removed, deleted, unknown = self.status(match=match)[:5]
549 552 commit = modified + added
550 553 remove = removed
551 554
552 555 p1, p2 = self.dirstate.parents()
553 556 c1 = self.changelog.read(p1)
554 557 c2 = self.changelog.read(p2)
555 558 m1 = self.manifest.read(c1[0]).copy()
556 559 m2 = self.manifest.read(c2[0])
557 560
558 561 if not commit and not remove and not force and p2 == nullid:
559 562 self.ui.status(_("nothing changed\n"))
560 563 return None
561 564
562 565 xp1 = hex(p1)
563 566 if p2 == nullid: xp2 = ''
564 567 else: xp2 = hex(p2)
565 568
566 569 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
567 570
568 571 if not wlock:
569 572 wlock = self.wlock()
570 573 if not lock:
571 574 lock = self.lock()
572 575 tr = self.transaction()
573 576
574 577 # check in files
575 578 new = {}
576 579 linkrev = self.changelog.count()
577 580 commit.sort()
578 581 for f in commit:
579 582 self.ui.note(f + "\n")
580 583 try:
581 584 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
582 585 t = self.wread(f)
583 586 except IOError:
584 587 self.ui.warn(_("trouble committing %s!\n") % f)
585 588 raise
586 589
587 590 r = self.file(f)
588 591
589 592 meta = {}
590 593 cp = self.dirstate.copied(f)
591 594 if cp:
592 595 meta["copy"] = cp
593 596 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
594 597 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
595 598 fp1, fp2 = nullid, nullid
596 599 else:
597 600 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
598 601 if entry:
599 602 new[f] = entry
600 603 continue
601 604
602 605 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
603 606 # remember what we've added so that we can later calculate
604 607 # the files to pull from a set of changesets
605 608 changed.append(f)
606 609
607 610 # update manifest
608 611 m1.update(new)
609 612 for f in remove:
610 613 if f in m1:
611 614 del m1[f]
612 615 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0],
613 616 (new, remove))
614 617
615 618 # add changeset
616 619 new = new.keys()
617 620 new.sort()
618 621
619 622 user = user or self.ui.username()
620 623 if not text or force_editor:
621 624 edittext = []
622 625 if text:
623 626 edittext.append(text)
624 627 edittext.append("")
625 628 if p2 != nullid:
626 629 edittext.append("HG: branch merge")
627 630 edittext.extend(["HG: changed %s" % f for f in changed])
628 631 edittext.extend(["HG: removed %s" % f for f in remove])
629 632 if not changed and not remove:
630 633 edittext.append("HG: no files changed")
631 634 edittext.append("")
632 635 # run editor in the repository root
633 636 olddir = os.getcwd()
634 637 os.chdir(self.root)
635 638 text = self.ui.edit("\n".join(edittext), user)
636 639 os.chdir(olddir)
637 640
638 641 lines = [line.rstrip() for line in text.rstrip().splitlines()]
639 642 while lines and not lines[0]:
640 643 del lines[0]
641 644 if not lines:
642 645 return None
643 646 text = '\n'.join(lines)
644 647 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
645 648 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
646 649 parent2=xp2)
647 650 tr.close()
648 651
649 652 self.dirstate.setparents(n)
650 653 self.dirstate.update(new, "n")
651 654 self.dirstate.forget(remove)
652 655
653 656 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
654 657 return n
655 658
656 659 def walk(self, node=None, files=[], match=util.always, badmatch=None):
657 660 if node:
658 661 fdict = dict.fromkeys(files)
659 662 for fn in self.manifest.read(self.changelog.read(node)[0]):
660 663 for ffn in fdict:
661 664 # match if the file is the exact name or a directory
662 665 if ffn == fn or fn.startswith("%s/" % ffn):
663 666 del fdict[ffn]
664 667 break
665 668 if match(fn):
666 669 yield 'm', fn
667 670 for fn in fdict:
668 671 if badmatch and badmatch(fn):
669 672 if match(fn):
670 673 yield 'b', fn
671 674 else:
672 675 self.ui.warn(_('%s: No such file in rev %s\n') % (
673 676 util.pathto(self.getcwd(), fn), short(node)))
674 677 else:
675 678 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
676 679 yield src, fn
677 680
678 681 def status(self, node1=None, node2=None, files=[], match=util.always,
679 682 wlock=None, list_ignored=False, list_clean=False):
680 683 """return status of files between two nodes or node and working directory
681 684
682 685 If node1 is None, use the first dirstate parent instead.
683 686 If node2 is None, compare node1 with working directory.
684 687 """
685 688
686 689 def fcmp(fn, mf):
687 690 t1 = self.wread(fn)
688 691 return self.file(fn).cmp(mf.get(fn, nullid), t1)
689 692
690 693 def mfmatches(node):
691 694 change = self.changelog.read(node)
692 695 mf = dict(self.manifest.read(change[0]))
693 696 for fn in mf.keys():
694 697 if not match(fn):
695 698 del mf[fn]
696 699 return mf
697 700
698 701 modified, added, removed, deleted, unknown = [], [], [], [], []
699 702 ignored, clean = [], []
700 703
701 704 compareworking = False
702 705 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
703 706 compareworking = True
704 707
705 708 if not compareworking:
706 709 # read the manifest from node1 before the manifest from node2,
707 710 # so that we'll hit the manifest cache if we're going through
708 711 # all the revisions in parent->child order.
709 712 mf1 = mfmatches(node1)
710 713
711 714 # are we comparing the working directory?
712 715 if not node2:
713 716 if not wlock:
714 717 try:
715 718 wlock = self.wlock(wait=0)
716 719 except lock.LockException:
717 720 wlock = None
718 721 (lookup, modified, added, removed, deleted, unknown,
719 722 ignored, clean) = self.dirstate.status(files, match,
720 723 list_ignored, list_clean)
721 724
722 725 # are we comparing working dir against its parent?
723 726 if compareworking:
724 727 if lookup:
725 728 # do a full compare of any files that might have changed
726 729 mf2 = mfmatches(self.dirstate.parents()[0])
727 730 for f in lookup:
728 731 if fcmp(f, mf2):
729 732 modified.append(f)
730 733 else:
731 734 clean.append(f)
732 735 if wlock is not None:
733 736 self.dirstate.update([f], "n")
734 737 else:
735 738 # we are comparing working dir against non-parent
736 739 # generate a pseudo-manifest for the working dir
737 740 mf2 = mfmatches(self.dirstate.parents()[0])
738 741 for f in lookup + modified + added:
739 742 mf2[f] = ""
740 743 for f in removed:
741 744 if f in mf2:
742 745 del mf2[f]
743 746 else:
744 747 # we are comparing two revisions
745 748 mf2 = mfmatches(node2)
746 749
747 750 if not compareworking:
748 751 # flush lists from dirstate before comparing manifests
749 752 modified, added, clean = [], [], []
750 753
751 754 # make sure to sort the files so we talk to the disk in a
752 755 # reasonable order
753 756 mf2keys = mf2.keys()
754 757 mf2keys.sort()
755 758 for fn in mf2keys:
756 759 if mf1.has_key(fn):
757 760 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
758 761 modified.append(fn)
759 762 elif list_clean:
760 763 clean.append(fn)
761 764 del mf1[fn]
762 765 else:
763 766 added.append(fn)
764 767
765 768 removed = mf1.keys()
766 769
767 770 # sort and return results:
768 771 for l in modified, added, removed, deleted, unknown, ignored, clean:
769 772 l.sort()
770 773 return (modified, added, removed, deleted, unknown, ignored, clean)
771 774
772 775 def add(self, list, wlock=None):
773 776 if not wlock:
774 777 wlock = self.wlock()
775 778 for f in list:
776 779 p = self.wjoin(f)
777 780 if not os.path.exists(p):
778 781 self.ui.warn(_("%s does not exist!\n") % f)
779 782 elif not os.path.isfile(p):
780 783 self.ui.warn(_("%s not added: only files supported currently\n")
781 784 % f)
782 785 elif self.dirstate.state(f) in 'an':
783 786 self.ui.warn(_("%s already tracked!\n") % f)
784 787 else:
785 788 self.dirstate.update([f], "a")
786 789
787 790 def forget(self, list, wlock=None):
788 791 if not wlock:
789 792 wlock = self.wlock()
790 793 for f in list:
791 794 if self.dirstate.state(f) not in 'ai':
792 795 self.ui.warn(_("%s not added!\n") % f)
793 796 else:
794 797 self.dirstate.forget([f])
795 798
796 799 def remove(self, list, unlink=False, wlock=None):
797 800 if unlink:
798 801 for f in list:
799 802 try:
800 803 util.unlink(self.wjoin(f))
801 804 except OSError, inst:
802 805 if inst.errno != errno.ENOENT:
803 806 raise
804 807 if not wlock:
805 808 wlock = self.wlock()
806 809 for f in list:
807 810 p = self.wjoin(f)
808 811 if os.path.exists(p):
809 812 self.ui.warn(_("%s still exists!\n") % f)
810 813 elif self.dirstate.state(f) == 'a':
811 814 self.dirstate.forget([f])
812 815 elif f not in self.dirstate:
813 816 self.ui.warn(_("%s not tracked!\n") % f)
814 817 else:
815 818 self.dirstate.update([f], "r")
816 819
817 820 def undelete(self, list, wlock=None):
818 821 p = self.dirstate.parents()[0]
819 822 mn = self.changelog.read(p)[0]
820 823 m = self.manifest.read(mn)
821 824 if not wlock:
822 825 wlock = self.wlock()
823 826 for f in list:
824 827 if self.dirstate.state(f) not in "r":
825 828 self.ui.warn("%s not removed!\n" % f)
826 829 else:
827 830 t = self.file(f).read(m[f])
828 831 self.wwrite(f, t)
829 832 util.set_exec(self.wjoin(f), m.execf(f))
830 833 self.dirstate.update([f], "n")
831 834
832 835 def copy(self, source, dest, wlock=None):
833 836 p = self.wjoin(dest)
834 837 if not os.path.exists(p):
835 838 self.ui.warn(_("%s does not exist!\n") % dest)
836 839 elif not os.path.isfile(p):
837 840 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
838 841 else:
839 842 if not wlock:
840 843 wlock = self.wlock()
841 844 if self.dirstate.state(dest) == '?':
842 845 self.dirstate.update([dest], "a")
843 846 self.dirstate.copy(source, dest)
844 847
845 848 def heads(self, start=None):
846 849 heads = self.changelog.heads(start)
847 850 # sort the output in rev descending order
848 851 heads = [(-self.changelog.rev(h), h) for h in heads]
849 852 heads.sort()
850 853 return [n for (r, n) in heads]
851 854
852 855 # branchlookup returns a dict giving a list of branches for
853 856 # each head. A branch is defined as the tag of a node or
854 857 # the branch of the node's parents. If a node has multiple
855 858 # branch tags, tags are eliminated if they are visible from other
856 859 # branch tags.
857 860 #
858 861 # So, for this graph: a->b->c->d->e
859 862 # \ /
860 863 # aa -----/
861 864 # a has tag 2.6.12
862 865 # d has tag 2.6.13
863 866 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
864 867 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
865 868 # from the list.
866 869 #
867 870 # It is possible that more than one head will have the same branch tag.
868 871 # callers need to check the result for multiple heads under the same
869 872 # branch tag if that is a problem for them (ie checkout of a specific
870 873 # branch).
871 874 #
872 875 # passing in a specific branch will limit the depth of the search
873 876 # through the parents. It won't limit the branches returned in the
874 877 # result though.
875 878 def branchlookup(self, heads=None, branch=None):
876 879 if not heads:
877 880 heads = self.heads()
878 881 headt = [ h for h in heads ]
879 882 chlog = self.changelog
880 883 branches = {}
881 884 merges = []
882 885 seenmerge = {}
883 886
884 887 # traverse the tree once for each head, recording in the branches
885 888 # dict which tags are visible from this head. The branches
886 889 # dict also records which tags are visible from each tag
887 890 # while we traverse.
888 891 while headt or merges:
889 892 if merges:
890 893 n, found = merges.pop()
891 894 visit = [n]
892 895 else:
893 896 h = headt.pop()
894 897 visit = [h]
895 898 found = [h]
896 899 seen = {}
897 900 while visit:
898 901 n = visit.pop()
899 902 if n in seen:
900 903 continue
901 904 pp = chlog.parents(n)
902 905 tags = self.nodetags(n)
903 906 if tags:
904 907 for x in tags:
905 908 if x == 'tip':
906 909 continue
907 910 for f in found:
908 911 branches.setdefault(f, {})[n] = 1
909 912 branches.setdefault(n, {})[n] = 1
910 913 break
911 914 if n not in found:
912 915 found.append(n)
913 916 if branch in tags:
914 917 continue
915 918 seen[n] = 1
916 919 if pp[1] != nullid and n not in seenmerge:
917 920 merges.append((pp[1], [x for x in found]))
918 921 seenmerge[n] = 1
919 922 if pp[0] != nullid:
920 923 visit.append(pp[0])
921 924 # traverse the branches dict, eliminating branch tags from each
922 925 # head that are visible from another branch tag for that head.
923 926 out = {}
924 927 viscache = {}
925 928 for h in heads:
926 929 def visible(node):
927 930 if node in viscache:
928 931 return viscache[node]
929 932 ret = {}
930 933 visit = [node]
931 934 while visit:
932 935 x = visit.pop()
933 936 if x in viscache:
934 937 ret.update(viscache[x])
935 938 elif x not in ret:
936 939 ret[x] = 1
937 940 if x in branches:
938 941 visit[len(visit):] = branches[x].keys()
939 942 viscache[node] = ret
940 943 return ret
941 944 if h not in branches:
942 945 continue
943 946 # O(n^2), but somewhat limited. This only searches the
944 947 # tags visible from a specific head, not all the tags in the
945 948 # whole repo.
946 949 for b in branches[h]:
947 950 vis = False
948 951 for bb in branches[h].keys():
949 952 if b != bb:
950 953 if b in visible(bb):
951 954 vis = True
952 955 break
953 956 if not vis:
954 957 l = out.setdefault(h, [])
955 958 l[len(l):] = self.nodetags(b)
956 959 return out
957 960
958 961 def branches(self, nodes):
959 962 if not nodes:
960 963 nodes = [self.changelog.tip()]
961 964 b = []
962 965 for n in nodes:
963 966 t = n
964 967 while 1:
965 968 p = self.changelog.parents(n)
966 969 if p[1] != nullid or p[0] == nullid:
967 970 b.append((t, n, p[0], p[1]))
968 971 break
969 972 n = p[0]
970 973 return b
971 974
972 975 def between(self, pairs):
973 976 r = []
974 977
975 978 for top, bottom in pairs:
976 979 n, l, i = top, [], 0
977 980 f = 1
978 981
979 982 while n != bottom:
980 983 p = self.changelog.parents(n)[0]
981 984 if i == f:
982 985 l.append(n)
983 986 f = f * 2
984 987 n = p
985 988 i += 1
986 989
987 990 r.append(l)
988 991
989 992 return r
990 993
991 994 def findincoming(self, remote, base=None, heads=None, force=False):
992 995 """Return list of roots of the subsets of missing nodes from remote
993 996
994 997 If base dict is specified, assume that these nodes and their parents
995 998 exist on the remote side and that no child of a node of base exists
996 999 in both remote and self.
997 1000 Furthermore base will be updated to include the nodes that exists
998 1001 in self and remote but no children exists in self and remote.
999 1002 If a list of heads is specified, return only nodes which are heads
1000 1003 or ancestors of these heads.
1001 1004
1002 1005 All the ancestors of base are in self and in remote.
1003 1006 All the descendants of the list returned are missing in self.
1004 1007 (and so we know that the rest of the nodes are missing in remote, see
1005 1008 outgoing)
1006 1009 """
1007 1010 m = self.changelog.nodemap
1008 1011 search = []
1009 1012 fetch = {}
1010 1013 seen = {}
1011 1014 seenbranch = {}
1012 1015 if base == None:
1013 1016 base = {}
1014 1017
1015 1018 if not heads:
1016 1019 heads = remote.heads()
1017 1020
1018 1021 if self.changelog.tip() == nullid:
1019 1022 base[nullid] = 1
1020 1023 if heads != [nullid]:
1021 1024 return [nullid]
1022 1025 return []
1023 1026
1024 1027 # assume we're closer to the tip than the root
1025 1028 # and start by examining the heads
1026 1029 self.ui.status(_("searching for changes\n"))
1027 1030
1028 1031 unknown = []
1029 1032 for h in heads:
1030 1033 if h not in m:
1031 1034 unknown.append(h)
1032 1035 else:
1033 1036 base[h] = 1
1034 1037
1035 1038 if not unknown:
1036 1039 return []
1037 1040
1038 1041 req = dict.fromkeys(unknown)
1039 1042 reqcnt = 0
1040 1043
1041 1044 # search through remote branches
1042 1045 # a 'branch' here is a linear segment of history, with four parts:
1043 1046 # head, root, first parent, second parent
1044 1047 # (a branch always has two parents (or none) by definition)
1045 1048 unknown = remote.branches(unknown)
1046 1049 while unknown:
1047 1050 r = []
1048 1051 while unknown:
1049 1052 n = unknown.pop(0)
1050 1053 if n[0] in seen:
1051 1054 continue
1052 1055
1053 1056 self.ui.debug(_("examining %s:%s\n")
1054 1057 % (short(n[0]), short(n[1])))
1055 1058 if n[0] == nullid: # found the end of the branch
1056 1059 pass
1057 1060 elif n in seenbranch:
1058 1061 self.ui.debug(_("branch already found\n"))
1059 1062 continue
1060 1063 elif n[1] and n[1] in m: # do we know the base?
1061 1064 self.ui.debug(_("found incomplete branch %s:%s\n")
1062 1065 % (short(n[0]), short(n[1])))
1063 1066 search.append(n) # schedule branch range for scanning
1064 1067 seenbranch[n] = 1
1065 1068 else:
1066 1069 if n[1] not in seen and n[1] not in fetch:
1067 1070 if n[2] in m and n[3] in m:
1068 1071 self.ui.debug(_("found new changeset %s\n") %
1069 1072 short(n[1]))
1070 1073 fetch[n[1]] = 1 # earliest unknown
1071 1074 for p in n[2:4]:
1072 1075 if p in m:
1073 1076 base[p] = 1 # latest known
1074 1077
1075 1078 for p in n[2:4]:
1076 1079 if p not in req and p not in m:
1077 1080 r.append(p)
1078 1081 req[p] = 1
1079 1082 seen[n[0]] = 1
1080 1083
1081 1084 if r:
1082 1085 reqcnt += 1
1083 1086 self.ui.debug(_("request %d: %s\n") %
1084 1087 (reqcnt, " ".join(map(short, r))))
1085 1088 for p in range(0, len(r), 10):
1086 1089 for b in remote.branches(r[p:p+10]):
1087 1090 self.ui.debug(_("received %s:%s\n") %
1088 1091 (short(b[0]), short(b[1])))
1089 1092 unknown.append(b)
1090 1093
1091 1094 # do binary search on the branches we found
1092 1095 while search:
1093 1096 n = search.pop(0)
1094 1097 reqcnt += 1
1095 1098 l = remote.between([(n[0], n[1])])[0]
1096 1099 l.append(n[1])
1097 1100 p = n[0]
1098 1101 f = 1
1099 1102 for i in l:
1100 1103 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1101 1104 if i in m:
1102 1105 if f <= 2:
1103 1106 self.ui.debug(_("found new branch changeset %s\n") %
1104 1107 short(p))
1105 1108 fetch[p] = 1
1106 1109 base[i] = 1
1107 1110 else:
1108 1111 self.ui.debug(_("narrowed branch search to %s:%s\n")
1109 1112 % (short(p), short(i)))
1110 1113 search.append((p, i))
1111 1114 break
1112 1115 p, f = i, f * 2
1113 1116
1114 1117 # sanity check our fetch list
1115 1118 for f in fetch.keys():
1116 1119 if f in m:
1117 1120 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1118 1121
1119 1122 if base.keys() == [nullid]:
1120 1123 if force:
1121 1124 self.ui.warn(_("warning: repository is unrelated\n"))
1122 1125 else:
1123 1126 raise util.Abort(_("repository is unrelated"))
1124 1127
1125 1128 self.ui.debug(_("found new changesets starting at ") +
1126 1129 " ".join([short(f) for f in fetch]) + "\n")
1127 1130
1128 1131 self.ui.debug(_("%d total queries\n") % reqcnt)
1129 1132
1130 1133 return fetch.keys()
1131 1134
1132 1135 def findoutgoing(self, remote, base=None, heads=None, force=False):
1133 1136 """Return list of nodes that are roots of subsets not in remote
1134 1137
1135 1138 If base dict is specified, assume that these nodes and their parents
1136 1139 exist on the remote side.
1137 1140 If a list of heads is specified, return only nodes which are heads
1138 1141 or ancestors of these heads, and return a second element which
1139 1142 contains all remote heads which get new children.
1140 1143 """
1141 1144 if base == None:
1142 1145 base = {}
1143 1146 self.findincoming(remote, base, heads, force=force)
1144 1147
1145 1148 self.ui.debug(_("common changesets up to ")
1146 1149 + " ".join(map(short, base.keys())) + "\n")
1147 1150
1148 1151 remain = dict.fromkeys(self.changelog.nodemap)
1149 1152
1150 1153 # prune everything remote has from the tree
1151 1154 del remain[nullid]
1152 1155 remove = base.keys()
1153 1156 while remove:
1154 1157 n = remove.pop(0)
1155 1158 if n in remain:
1156 1159 del remain[n]
1157 1160 for p in self.changelog.parents(n):
1158 1161 remove.append(p)
1159 1162
1160 1163 # find every node whose parents have been pruned
1161 1164 subset = []
1162 1165 # find every remote head that will get new children
1163 1166 updated_heads = {}
1164 1167 for n in remain:
1165 1168 p1, p2 = self.changelog.parents(n)
1166 1169 if p1 not in remain and p2 not in remain:
1167 1170 subset.append(n)
1168 1171 if heads:
1169 1172 if p1 in heads:
1170 1173 updated_heads[p1] = True
1171 1174 if p2 in heads:
1172 1175 updated_heads[p2] = True
1173 1176
1174 1177 # this is the set of all roots we have to push
1175 1178 if heads:
1176 1179 return subset, updated_heads.keys()
1177 1180 else:
1178 1181 return subset
1179 1182
1180 1183 def pull(self, remote, heads=None, force=False, lock=None):
1181 1184 mylock = False
1182 1185 if not lock:
1183 1186 lock = self.lock()
1184 1187 mylock = True
1185 1188
1186 1189 try:
1187 1190 fetch = self.findincoming(remote, force=force)
1188 1191 if fetch == [nullid]:
1189 1192 self.ui.status(_("requesting all changes\n"))
1190 1193
1191 1194 if not fetch:
1192 1195 self.ui.status(_("no changes found\n"))
1193 1196 return 0
1194 1197
1195 1198 if heads is None:
1196 1199 cg = remote.changegroup(fetch, 'pull')
1197 1200 else:
1198 1201 cg = remote.changegroupsubset(fetch, heads, 'pull')
1199 1202 return self.addchangegroup(cg, 'pull', remote.url())
1200 1203 finally:
1201 1204 if mylock:
1202 1205 lock.release()
1203 1206
1204 1207 def push(self, remote, force=False, revs=None):
1205 1208 # there are two ways to push to remote repo:
1206 1209 #
1207 1210 # addchangegroup assumes local user can lock remote
1208 1211 # repo (local filesystem, old ssh servers).
1209 1212 #
1210 1213 # unbundle assumes local user cannot lock remote repo (new ssh
1211 1214 # servers, http servers).
1212 1215
1213 1216 if remote.capable('unbundle'):
1214 1217 return self.push_unbundle(remote, force, revs)
1215 1218 return self.push_addchangegroup(remote, force, revs)
1216 1219
1217 1220 def prepush(self, remote, force, revs):
1218 1221 base = {}
1219 1222 remote_heads = remote.heads()
1220 1223 inc = self.findincoming(remote, base, remote_heads, force=force)
1221 1224 if not force and inc:
1222 1225 self.ui.warn(_("abort: unsynced remote changes!\n"))
1223 1226 self.ui.status(_("(did you forget to sync?"
1224 1227 " use push -f to force)\n"))
1225 1228 return None, 1
1226 1229
1227 1230 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1228 1231 if revs is not None:
1229 1232 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1230 1233 else:
1231 1234 bases, heads = update, self.changelog.heads()
1232 1235
1233 1236 if not bases:
1234 1237 self.ui.status(_("no changes found\n"))
1235 1238 return None, 1
1236 1239 elif not force:
1237 1240 # FIXME we don't properly detect creation of new heads
1238 1241 # in the push -r case, assume the user knows what he's doing
1239 1242 if not revs and len(remote_heads) < len(heads) \
1240 1243 and remote_heads != [nullid]:
1241 1244 self.ui.warn(_("abort: push creates new remote branches!\n"))
1242 1245 self.ui.status(_("(did you forget to merge?"
1243 1246 " use push -f to force)\n"))
1244 1247 return None, 1
1245 1248
1246 1249 if revs is None:
1247 1250 cg = self.changegroup(update, 'push')
1248 1251 else:
1249 1252 cg = self.changegroupsubset(update, revs, 'push')
1250 1253 return cg, remote_heads
1251 1254
1252 1255 def push_addchangegroup(self, remote, force, revs):
1253 1256 lock = remote.lock()
1254 1257
1255 1258 ret = self.prepush(remote, force, revs)
1256 1259 if ret[0] is not None:
1257 1260 cg, remote_heads = ret
1258 1261 return remote.addchangegroup(cg, 'push', self.url())
1259 1262 return ret[1]
1260 1263
1261 1264 def push_unbundle(self, remote, force, revs):
1262 1265 # local repo finds heads on server, finds out what revs it
1263 1266 # must push. once revs transferred, if server finds it has
1264 1267 # different heads (someone else won commit/push race), server
1265 1268 # aborts.
1266 1269
1267 1270 ret = self.prepush(remote, force, revs)
1268 1271 if ret[0] is not None:
1269 1272 cg, remote_heads = ret
1270 1273 if force: remote_heads = ['force']
1271 1274 return remote.unbundle(cg, remote_heads, 'push')
1272 1275 return ret[1]
1273 1276
1274 1277 def changegroupsubset(self, bases, heads, source):
1275 1278 """This function generates a changegroup consisting of all the nodes
1276 1279 that are descendents of any of the bases, and ancestors of any of
1277 1280 the heads.
1278 1281
1279 1282 It is fairly complex as determining which filenodes and which
1280 1283 manifest nodes need to be included for the changeset to be complete
1281 1284 is non-trivial.
1282 1285
1283 1286 Another wrinkle is doing the reverse, figuring out which changeset in
1284 1287 the changegroup a particular filenode or manifestnode belongs to."""
1285 1288
1286 1289 self.hook('preoutgoing', throw=True, source=source)
1287 1290
1288 1291 # Set up some initial variables
1289 1292 # Make it easy to refer to self.changelog
1290 1293 cl = self.changelog
1291 1294 # msng is short for missing - compute the list of changesets in this
1292 1295 # changegroup.
1293 1296 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1294 1297 # Some bases may turn out to be superfluous, and some heads may be
1295 1298 # too. nodesbetween will return the minimal set of bases and heads
1296 1299 # necessary to re-create the changegroup.
1297 1300
1298 1301 # Known heads are the list of heads that it is assumed the recipient
1299 1302 # of this changegroup will know about.
1300 1303 knownheads = {}
1301 1304 # We assume that all parents of bases are known heads.
1302 1305 for n in bases:
1303 1306 for p in cl.parents(n):
1304 1307 if p != nullid:
1305 1308 knownheads[p] = 1
1306 1309 knownheads = knownheads.keys()
1307 1310 if knownheads:
1308 1311 # Now that we know what heads are known, we can compute which
1309 1312 # changesets are known. The recipient must know about all
1310 1313 # changesets required to reach the known heads from the null
1311 1314 # changeset.
1312 1315 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1313 1316 junk = None
1314 1317 # Transform the list into an ersatz set.
1315 1318 has_cl_set = dict.fromkeys(has_cl_set)
1316 1319 else:
1317 1320 # If there were no known heads, the recipient cannot be assumed to
1318 1321 # know about any changesets.
1319 1322 has_cl_set = {}
1320 1323
1321 1324 # Make it easy to refer to self.manifest
1322 1325 mnfst = self.manifest
1323 1326 # We don't know which manifests are missing yet
1324 1327 msng_mnfst_set = {}
1325 1328 # Nor do we know which filenodes are missing.
1326 1329 msng_filenode_set = {}
1327 1330
1328 1331 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1329 1332 junk = None
1330 1333
1331 1334 # A changeset always belongs to itself, so the changenode lookup
1332 1335 # function for a changenode is identity.
1333 1336 def identity(x):
1334 1337 return x
1335 1338
1336 1339 # A function generating function. Sets up an environment for the
1337 1340 # inner function.
1338 1341 def cmp_by_rev_func(revlog):
1339 1342 # Compare two nodes by their revision number in the environment's
1340 1343 # revision history. Since the revision number both represents the
1341 1344 # most efficient order to read the nodes in, and represents a
1342 1345 # topological sorting of the nodes, this function is often useful.
1343 1346 def cmp_by_rev(a, b):
1344 1347 return cmp(revlog.rev(a), revlog.rev(b))
1345 1348 return cmp_by_rev
1346 1349
1347 1350 # If we determine that a particular file or manifest node must be a
1348 1351 # node that the recipient of the changegroup will already have, we can
1349 1352 # also assume the recipient will have all the parents. This function
1350 1353 # prunes them from the set of missing nodes.
1351 1354 def prune_parents(revlog, hasset, msngset):
1352 1355 haslst = hasset.keys()
1353 1356 haslst.sort(cmp_by_rev_func(revlog))
1354 1357 for node in haslst:
1355 1358 parentlst = [p for p in revlog.parents(node) if p != nullid]
1356 1359 while parentlst:
1357 1360 n = parentlst.pop()
1358 1361 if n not in hasset:
1359 1362 hasset[n] = 1
1360 1363 p = [p for p in revlog.parents(n) if p != nullid]
1361 1364 parentlst.extend(p)
1362 1365 for n in hasset:
1363 1366 msngset.pop(n, None)
1364 1367
1365 1368 # This is a function generating function used to set up an environment
1366 1369 # for the inner function to execute in.
1367 1370 def manifest_and_file_collector(changedfileset):
1368 1371 # This is an information gathering function that gathers
1369 1372 # information from each changeset node that goes out as part of
1370 1373 # the changegroup. The information gathered is a list of which
1371 1374 # manifest nodes are potentially required (the recipient may
1372 1375 # already have them) and total list of all files which were
1373 1376 # changed in any changeset in the changegroup.
1374 1377 #
1375 1378 # We also remember the first changenode we saw any manifest
1376 1379 # referenced by so we can later determine which changenode 'owns'
1377 1380 # the manifest.
1378 1381 def collect_manifests_and_files(clnode):
1379 1382 c = cl.read(clnode)
1380 1383 for f in c[3]:
1381 1384 # This is to make sure we only have one instance of each
1382 1385 # filename string for each filename.
1383 1386 changedfileset.setdefault(f, f)
1384 1387 msng_mnfst_set.setdefault(c[0], clnode)
1385 1388 return collect_manifests_and_files
1386 1389
1387 1390 # Figure out which manifest nodes (of the ones we think might be part
1388 1391 # of the changegroup) the recipient must know about and remove them
1389 1392 # from the changegroup.
1390 1393 def prune_manifests():
1391 1394 has_mnfst_set = {}
1392 1395 for n in msng_mnfst_set:
1393 1396 # If a 'missing' manifest thinks it belongs to a changenode
1394 1397 # the recipient is assumed to have, obviously the recipient
1395 1398 # must have that manifest.
1396 1399 linknode = cl.node(mnfst.linkrev(n))
1397 1400 if linknode in has_cl_set:
1398 1401 has_mnfst_set[n] = 1
1399 1402 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1400 1403
1401 1404 # Use the information collected in collect_manifests_and_files to say
1402 1405 # which changenode any manifestnode belongs to.
1403 1406 def lookup_manifest_link(mnfstnode):
1404 1407 return msng_mnfst_set[mnfstnode]
1405 1408
1406 1409 # A function generating function that sets up the initial environment
1407 1410 # the inner function.
1408 1411 def filenode_collector(changedfiles):
1409 1412 next_rev = [0]
1410 1413 # This gathers information from each manifestnode included in the
1411 1414 # changegroup about which filenodes the manifest node references
1412 1415 # so we can include those in the changegroup too.
1413 1416 #
1414 1417 # It also remembers which changenode each filenode belongs to. It
1415 1418 # does this by assuming the a filenode belongs to the changenode
1416 1419 # the first manifest that references it belongs to.
1417 1420 def collect_msng_filenodes(mnfstnode):
1418 1421 r = mnfst.rev(mnfstnode)
1419 1422 if r == next_rev[0]:
1420 1423 # If the last rev we looked at was the one just previous,
1421 1424 # we only need to see a diff.
1422 1425 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1423 1426 # For each line in the delta
1424 1427 for dline in delta.splitlines():
1425 1428 # get the filename and filenode for that line
1426 1429 f, fnode = dline.split('\0')
1427 1430 fnode = bin(fnode[:40])
1428 1431 f = changedfiles.get(f, None)
1429 1432 # And if the file is in the list of files we care
1430 1433 # about.
1431 1434 if f is not None:
1432 1435 # Get the changenode this manifest belongs to
1433 1436 clnode = msng_mnfst_set[mnfstnode]
1434 1437 # Create the set of filenodes for the file if
1435 1438 # there isn't one already.
1436 1439 ndset = msng_filenode_set.setdefault(f, {})
1437 1440 # And set the filenode's changelog node to the
1438 1441 # manifest's if it hasn't been set already.
1439 1442 ndset.setdefault(fnode, clnode)
1440 1443 else:
1441 1444 # Otherwise we need a full manifest.
1442 1445 m = mnfst.read(mnfstnode)
1443 1446 # For every file in we care about.
1444 1447 for f in changedfiles:
1445 1448 fnode = m.get(f, None)
1446 1449 # If it's in the manifest
1447 1450 if fnode is not None:
1448 1451 # See comments above.
1449 1452 clnode = msng_mnfst_set[mnfstnode]
1450 1453 ndset = msng_filenode_set.setdefault(f, {})
1451 1454 ndset.setdefault(fnode, clnode)
1452 1455 # Remember the revision we hope to see next.
1453 1456 next_rev[0] = r + 1
1454 1457 return collect_msng_filenodes
1455 1458
1456 1459 # We have a list of filenodes we think we need for a file, lets remove
1457 1460 # all those we now the recipient must have.
1458 1461 def prune_filenodes(f, filerevlog):
1459 1462 msngset = msng_filenode_set[f]
1460 1463 hasset = {}
1461 1464 # If a 'missing' filenode thinks it belongs to a changenode we
1462 1465 # assume the recipient must have, then the recipient must have
1463 1466 # that filenode.
1464 1467 for n in msngset:
1465 1468 clnode = cl.node(filerevlog.linkrev(n))
1466 1469 if clnode in has_cl_set:
1467 1470 hasset[n] = 1
1468 1471 prune_parents(filerevlog, hasset, msngset)
1469 1472
1470 1473 # A function generator function that sets up the a context for the
1471 1474 # inner function.
1472 1475 def lookup_filenode_link_func(fname):
1473 1476 msngset = msng_filenode_set[fname]
1474 1477 # Lookup the changenode the filenode belongs to.
1475 1478 def lookup_filenode_link(fnode):
1476 1479 return msngset[fnode]
1477 1480 return lookup_filenode_link
1478 1481
1479 1482 # Now that we have all theses utility functions to help out and
1480 1483 # logically divide up the task, generate the group.
1481 1484 def gengroup():
1482 1485 # The set of changed files starts empty.
1483 1486 changedfiles = {}
1484 1487 # Create a changenode group generator that will call our functions
1485 1488 # back to lookup the owning changenode and collect information.
1486 1489 group = cl.group(msng_cl_lst, identity,
1487 1490 manifest_and_file_collector(changedfiles))
1488 1491 for chnk in group:
1489 1492 yield chnk
1490 1493
1491 1494 # The list of manifests has been collected by the generator
1492 1495 # calling our functions back.
1493 1496 prune_manifests()
1494 1497 msng_mnfst_lst = msng_mnfst_set.keys()
1495 1498 # Sort the manifestnodes by revision number.
1496 1499 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1497 1500 # Create a generator for the manifestnodes that calls our lookup
1498 1501 # and data collection functions back.
1499 1502 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1500 1503 filenode_collector(changedfiles))
1501 1504 for chnk in group:
1502 1505 yield chnk
1503 1506
1504 1507 # These are no longer needed, dereference and toss the memory for
1505 1508 # them.
1506 1509 msng_mnfst_lst = None
1507 1510 msng_mnfst_set.clear()
1508 1511
1509 1512 changedfiles = changedfiles.keys()
1510 1513 changedfiles.sort()
1511 1514 # Go through all our files in order sorted by name.
1512 1515 for fname in changedfiles:
1513 1516 filerevlog = self.file(fname)
1514 1517 # Toss out the filenodes that the recipient isn't really
1515 1518 # missing.
1516 1519 if msng_filenode_set.has_key(fname):
1517 1520 prune_filenodes(fname, filerevlog)
1518 1521 msng_filenode_lst = msng_filenode_set[fname].keys()
1519 1522 else:
1520 1523 msng_filenode_lst = []
1521 1524 # If any filenodes are left, generate the group for them,
1522 1525 # otherwise don't bother.
1523 1526 if len(msng_filenode_lst) > 0:
1524 1527 yield changegroup.genchunk(fname)
1525 1528 # Sort the filenodes by their revision #
1526 1529 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1527 1530 # Create a group generator and only pass in a changenode
1528 1531 # lookup function as we need to collect no information
1529 1532 # from filenodes.
1530 1533 group = filerevlog.group(msng_filenode_lst,
1531 1534 lookup_filenode_link_func(fname))
1532 1535 for chnk in group:
1533 1536 yield chnk
1534 1537 if msng_filenode_set.has_key(fname):
1535 1538 # Don't need this anymore, toss it to free memory.
1536 1539 del msng_filenode_set[fname]
1537 1540 # Signal that no more groups are left.
1538 1541 yield changegroup.closechunk()
1539 1542
1540 1543 if msng_cl_lst:
1541 1544 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1542 1545
1543 1546 return util.chunkbuffer(gengroup())
1544 1547
1545 1548 def changegroup(self, basenodes, source):
1546 1549 """Generate a changegroup of all nodes that we have that a recipient
1547 1550 doesn't.
1548 1551
1549 1552 This is much easier than the previous function as we can assume that
1550 1553 the recipient has any changenode we aren't sending them."""
1551 1554
1552 1555 self.hook('preoutgoing', throw=True, source=source)
1553 1556
1554 1557 cl = self.changelog
1555 1558 nodes = cl.nodesbetween(basenodes, None)[0]
1556 1559 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1557 1560
1558 1561 def identity(x):
1559 1562 return x
1560 1563
1561 1564 def gennodelst(revlog):
1562 1565 for r in xrange(0, revlog.count()):
1563 1566 n = revlog.node(r)
1564 1567 if revlog.linkrev(n) in revset:
1565 1568 yield n
1566 1569
1567 1570 def changed_file_collector(changedfileset):
1568 1571 def collect_changed_files(clnode):
1569 1572 c = cl.read(clnode)
1570 1573 for fname in c[3]:
1571 1574 changedfileset[fname] = 1
1572 1575 return collect_changed_files
1573 1576
1574 1577 def lookuprevlink_func(revlog):
1575 1578 def lookuprevlink(n):
1576 1579 return cl.node(revlog.linkrev(n))
1577 1580 return lookuprevlink
1578 1581
1579 1582 def gengroup():
1580 1583 # construct a list of all changed files
1581 1584 changedfiles = {}
1582 1585
1583 1586 for chnk in cl.group(nodes, identity,
1584 1587 changed_file_collector(changedfiles)):
1585 1588 yield chnk
1586 1589 changedfiles = changedfiles.keys()
1587 1590 changedfiles.sort()
1588 1591
1589 1592 mnfst = self.manifest
1590 1593 nodeiter = gennodelst(mnfst)
1591 1594 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1592 1595 yield chnk
1593 1596
1594 1597 for fname in changedfiles:
1595 1598 filerevlog = self.file(fname)
1596 1599 nodeiter = gennodelst(filerevlog)
1597 1600 nodeiter = list(nodeiter)
1598 1601 if nodeiter:
1599 1602 yield changegroup.genchunk(fname)
1600 1603 lookup = lookuprevlink_func(filerevlog)
1601 1604 for chnk in filerevlog.group(nodeiter, lookup):
1602 1605 yield chnk
1603 1606
1604 1607 yield changegroup.closechunk()
1605 1608
1606 1609 if nodes:
1607 1610 self.hook('outgoing', node=hex(nodes[0]), source=source)
1608 1611
1609 1612 return util.chunkbuffer(gengroup())
1610 1613
1611 1614 def addchangegroup(self, source, srctype, url):
1612 1615 """add changegroup to repo.
1613 1616 returns number of heads modified or added + 1."""
1614 1617
1615 1618 def csmap(x):
1616 1619 self.ui.debug(_("add changeset %s\n") % short(x))
1617 1620 return cl.count()
1618 1621
1619 1622 def revmap(x):
1620 1623 return cl.rev(x)
1621 1624
1622 1625 if not source:
1623 1626 return 0
1624 1627
1625 1628 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1626 1629
1627 1630 changesets = files = revisions = 0
1628 1631
1629 1632 tr = self.transaction()
1630 1633
1631 1634 # write changelog data to temp files so concurrent readers will not see
1632 1635 # inconsistent view
1633 1636 cl = None
1634 1637 try:
1635 1638 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1636 1639
1637 1640 oldheads = len(cl.heads())
1638 1641
1639 1642 # pull off the changeset group
1640 1643 self.ui.status(_("adding changesets\n"))
1641 1644 cor = cl.count() - 1
1642 1645 chunkiter = changegroup.chunkiter(source)
1643 1646 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1644 1647 raise util.Abort(_("received changelog group is empty"))
1645 1648 cnr = cl.count() - 1
1646 1649 changesets = cnr - cor
1647 1650
1648 1651 # pull off the manifest group
1649 1652 self.ui.status(_("adding manifests\n"))
1650 1653 chunkiter = changegroup.chunkiter(source)
1651 1654 # no need to check for empty manifest group here:
1652 1655 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1653 1656 # no new manifest will be created and the manifest group will
1654 1657 # be empty during the pull
1655 1658 self.manifest.addgroup(chunkiter, revmap, tr)
1656 1659
1657 1660 # process the files
1658 1661 self.ui.status(_("adding file changes\n"))
1659 1662 while 1:
1660 1663 f = changegroup.getchunk(source)
1661 1664 if not f:
1662 1665 break
1663 1666 self.ui.debug(_("adding %s revisions\n") % f)
1664 1667 fl = self.file(f)
1665 1668 o = fl.count()
1666 1669 chunkiter = changegroup.chunkiter(source)
1667 1670 if fl.addgroup(chunkiter, revmap, tr) is None:
1668 1671 raise util.Abort(_("received file revlog group is empty"))
1669 1672 revisions += fl.count() - o
1670 1673 files += 1
1671 1674
1672 1675 cl.writedata()
1673 1676 finally:
1674 1677 if cl:
1675 1678 cl.cleanup()
1676 1679
1677 1680 # make changelog see real files again
1678 1681 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1679 1682 self.changelog.checkinlinesize(tr)
1680 1683
1681 1684 newheads = len(self.changelog.heads())
1682 1685 heads = ""
1683 1686 if oldheads and newheads != oldheads:
1684 1687 heads = _(" (%+d heads)") % (newheads - oldheads)
1685 1688
1686 1689 self.ui.status(_("added %d changesets"
1687 1690 " with %d changes to %d files%s\n")
1688 1691 % (changesets, revisions, files, heads))
1689 1692
1690 1693 if changesets > 0:
1691 1694 self.hook('pretxnchangegroup', throw=True,
1692 1695 node=hex(self.changelog.node(cor+1)), source=srctype,
1693 1696 url=url)
1694 1697
1695 1698 tr.close()
1696 1699
1697 1700 if changesets > 0:
1698 1701 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1699 1702 source=srctype, url=url)
1700 1703
1701 1704 for i in range(cor + 1, cnr + 1):
1702 1705 self.hook("incoming", node=hex(self.changelog.node(i)),
1703 1706 source=srctype, url=url)
1704 1707
1705 1708 return newheads - oldheads + 1
1706 1709
1707 1710
1708 1711 def stream_in(self, remote):
1709 1712 fp = remote.stream_out()
1710 1713 resp = int(fp.readline())
1711 1714 if resp != 0:
1712 1715 raise util.Abort(_('operation forbidden by server'))
1713 1716 self.ui.status(_('streaming all changes\n'))
1714 1717 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1715 1718 self.ui.status(_('%d files to transfer, %s of data\n') %
1716 1719 (total_files, util.bytecount(total_bytes)))
1717 1720 start = time.time()
1718 1721 for i in xrange(total_files):
1719 1722 name, size = fp.readline().split('\0', 1)
1720 1723 size = int(size)
1721 1724 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1722 1725 ofp = self.opener(name, 'w')
1723 1726 for chunk in util.filechunkiter(fp, limit=size):
1724 1727 ofp.write(chunk)
1725 1728 ofp.close()
1726 1729 elapsed = time.time() - start
1727 1730 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1728 1731 (util.bytecount(total_bytes), elapsed,
1729 1732 util.bytecount(total_bytes / elapsed)))
1730 1733 self.reload()
1731 1734 return len(self.heads()) + 1
1732 1735
1733 1736 def clone(self, remote, heads=[], stream=False):
1734 1737 '''clone remote repository.
1735 1738
1736 1739 keyword arguments:
1737 1740 heads: list of revs to clone (forces use of pull)
1738 1741 stream: use streaming clone if possible'''
1739 1742
1740 1743 # now, all clients that can request uncompressed clones can
1741 1744 # read repo formats supported by all servers that can serve
1742 1745 # them.
1743 1746
1744 1747 # if revlog format changes, client will have to check version
1745 1748 # and format flags on "stream" capability, and use
1746 1749 # uncompressed only if compatible.
1747 1750
1748 1751 if stream and not heads and remote.capable('stream'):
1749 1752 return self.stream_in(remote)
1750 1753 return self.pull(remote, heads)
1751 1754
1752 1755 # used to avoid circular references so destructors work
1753 1756 def aftertrans(base):
1754 1757 p = base
1755 1758 def a():
1756 1759 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1757 1760 util.rename(os.path.join(p, "journal.dirstate"),
1758 1761 os.path.join(p, "undo.dirstate"))
1759 1762 return a
1760 1763
1761 1764 def instance(ui, path, create):
1762 1765 return localrepository(ui, util.drop_scheme('file', path), create)
1763 1766
1764 1767 def islocal(path):
1765 1768 return True
@@ -1,417 +1,408 b''
1 1 # merge.py - directory-level update/merge handling for Mercurial
2 2 #
3 3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import gettext as _
10 10 from demandload import *
11 11 demandload(globals(), "errno util os tempfile")
12 12
13 def merge3(repo, fn, my, other, p1, p2):
14 """perform a 3-way merge in the working directory"""
13 def filemerge(repo, fw, fo, fd, my, other, p1, p2, move):
14 """perform a 3-way merge in the working directory
15 15
16 def temp(prefix, node):
17 pre = "%s~%s." % (os.path.basename(fn), prefix)
16 fw = filename in the working directory and first parent
17 fo = filename in other parent
18 fd = destination filename
19 my = fileid in first parent
20 other = fileid in second parent
21 p1, p2 = hex changeset ids for merge command
22 move = whether to move or copy the file to the destination
23
24 TODO:
25 if fw is copied in the working directory, we get confused
26 implement move and fd
27 """
28
29 def temp(prefix, ctx):
30 pre = "%s~%s." % (os.path.basename(ctx.path()), prefix)
18 31 (fd, name) = tempfile.mkstemp(prefix=pre)
19 32 f = os.fdopen(fd, "wb")
20 repo.wwrite(fn, fl.read(node), f)
33 repo.wwrite(ctx.path(), ctx.data(), f)
21 34 f.close()
22 35 return name
23 36
24 fl = repo.file(fn)
25 base = fl.ancestor(my, other)
26 a = repo.wjoin(fn)
27 b = temp("base", base)
28 c = temp("other", other)
37 fcm = repo.filectx(fw, fileid=my)
38 fco = repo.filectx(fo, fileid=other)
39 fca = fcm.ancestor(fco)
40 if not fca:
41 fca = repo.filectx(fw, fileid=-1)
42 a = repo.wjoin(fw)
43 b = temp("base", fca)
44 c = temp("other", fco)
29 45
30 repo.ui.note(_("resolving %s\n") % fn)
31 repo.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
32 (fn, short(my), short(other), short(base)))
46 repo.ui.note(_("resolving %s\n") % fw)
47 repo.ui.debug(_("my %s other %s ancestor %s\n") % (fcm, fco, fca))
33 48
34 49 cmd = (os.environ.get("HGMERGE") or repo.ui.config("ui", "merge")
35 50 or "hgmerge")
36 51 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=repo.root,
37 environ={'HG_FILE': fn,
52 environ={'HG_FILE': fw,
38 53 'HG_MY_NODE': p1,
39 'HG_OTHER_NODE': p2,
40 'HG_FILE_MY_NODE': hex(my),
41 'HG_FILE_OTHER_NODE': hex(other),
42 'HG_FILE_BASE_NODE': hex(base)})
54 'HG_OTHER_NODE': p2})
43 55 if r:
44 repo.ui.warn(_("merging %s failed!\n") % fn)
56 repo.ui.warn(_("merging %s failed!\n") % fw)
45 57
46 58 os.unlink(b)
47 59 os.unlink(c)
48 60 return r
49 61
50 def checkunknown(repo, m2, status):
62 def checkunknown(repo, m2, wctx):
51 63 """
52 64 check for collisions between unknown files and files in m2
53 65 """
54 modified, added, removed, deleted, unknown = status[:5]
55 for f in unknown:
66 for f in wctx.unknown():
56 67 if f in m2:
57 68 if repo.file(f).cmp(m2[f], repo.wread(f)):
58 69 raise util.Abort(_("'%s' already exists in the working"
59 70 " dir and differs from remote") % f)
60 71
61 def workingmanifest(repo, man, status):
62 """
63 Update manifest to correspond to the working directory
64 """
65
66 copied = repo.dirstate.copies()
67 modified, added, removed, deleted, unknown = status[:5]
68 for i,l in (("a", added), ("m", modified), ("u", unknown)):
69 for f in l:
70 man[f] = man.get(copied.get(f, f), nullid) + i
71 man.set(f, util.is_exec(repo.wjoin(f), man.execf(f)))
72
73 for f in deleted + removed:
74 del man[f]
75
76 return man
77
78 def forgetremoved(m2, status):
72 def forgetremoved(m2, wctx):
79 73 """
80 74 Forget removed files
81 75
82 76 If we're jumping between revisions (as opposed to merging), and if
83 77 neither the working directory nor the target rev has the file,
84 78 then we need to remove it from the dirstate, to prevent the
85 79 dirstate from listing the file when it is no longer in the
86 80 manifest.
87 81 """
88 82
89 modified, added, removed, deleted, unknown = status[:5]
90 83 action = []
91 84
92 for f in deleted + removed:
85 for f in wctx.deleted() + wctx.removed():
93 86 if f not in m2:
94 87 action.append((f, "f"))
95 88
96 89 return action
97 90
98 91 def nonoverlap(d1, d2):
99 92 """
100 93 Return list of elements in d1 not in d2
101 94 """
102 95
103 96 l = []
104 97 for d in d1:
105 98 if d not in d2:
106 99 l.append(d)
107 100
108 101 l.sort()
109 102 return l
110 103
111 104 def findold(fctx, limit):
112 105 """
113 106 find files that path was copied from, back to linkrev limit
114 107 """
115 108
116 109 old = {}
117 110 orig = fctx.path()
118 111 visit = [fctx]
119 112 while visit:
120 113 fc = visit.pop()
121 114 if fc.rev() < limit:
122 115 continue
123 116 if fc.path() != orig and fc.path() not in old:
124 117 old[fc.path()] = 1
125 118 visit += fc.parents()
126 119
127 120 old = old.keys()
128 121 old.sort()
129 122 return old
130 123
131 124 def findcopies(repo, m1, m2, limit):
132 125 """
133 126 Find moves and copies between m1 and m2 back to limit linkrev
134 127 """
135 128
136 129 # avoid silly behavior for update from empty dir
137 130 if not m1:
138 131 return {}
139 132
140 133 dcopies = repo.dirstate.copies()
141 134 copy = {}
142 135 match = {}
143 136 u1 = nonoverlap(m1, m2)
144 137 u2 = nonoverlap(m2, m1)
145 138 ctx = util.cachefunc(lambda f,n: repo.filectx(f, fileid=n[:20]))
146 139
147 140 def checkpair(c, f2, man):
148 141 ''' check if an apparent pair actually matches '''
149 142 c2 = ctx(f2, man[f2])
150 143 ca = c.ancestor(c2)
151 144 if ca:
152 145 copy[c.path()] = f2
153 146 copy[f2] = c.path()
154 147
155 148 for f in u1:
156 149 c = ctx(dcopies.get(f, f), m1[f])
157 150 for of in findold(c, limit):
158 151 if of in m2:
159 152 checkpair(c, of, m2)
160 153 else:
161 154 match.setdefault(of, []).append(f)
162 155
163 156 for f in u2:
164 157 c = ctx(f, m2[f])
165 158 for of in findold(c, limit):
166 159 if of in m1:
167 160 checkpair(c, of, m1)
168 161 elif of in match:
169 162 for mf in match[of]:
170 163 checkpair(c, mf, m1)
171 164
172 165 return copy
173 166
174 167 def filtermanifest(man, partial):
175 168 if partial:
176 169 for k in man.keys():
177 170 if not partial(k): del man[k]
178 171
179 172 def manifestmerge(ui, m1, m2, ma, overwrite, backwards):
180 173 """
181 174 Merge manifest m1 with m2 using ancestor ma and generate merge action list
182 175 """
183 176
184 177 def fmerge(f):
185 178 """merge executable flags"""
186 179 a, b, c = ma.execf(f), m1.execf(f), m2.execf(f)
187 180 return ((a^b) | (a^c)) ^ a
188 181
189 182 action = []
190 183
191 184 def act(msg, f, m, *args):
192 185 ui.debug(" %s: %s -> %s\n" % (f, msg, m))
193 186 action.append((f, m) + args)
194 187
195 188 # Compare manifests
196 189 for f, n in m1.iteritems():
197 190 if f in m2:
198 191 # are files different?
199 192 if n != m2[f]:
200 193 a = ma.get(f, nullid)
201 194 # are both different from the ancestor?
202 195 if not overwrite and n != a and m2[f] != a:
203 196 act("versions differ", f, "m", fmerge(f), n[:20], m2[f])
204 197 # are we clobbering?
205 198 # is remote's version newer?
206 199 # or are we going back in time and clean?
207 200 elif overwrite or m2[f] != a or (backwards and not n[20:]):
208 201 act("remote is newer", f, "g", m2.execf(f), m2[f])
209 202 # local is newer, not overwrite, check mode bits
210 203 elif fmerge(f) != m1.execf(f):
211 204 act("update permissions", f, "e", m2.execf(f))
212 205 # contents same, check mode bits
213 206 elif m1.execf(f) != m2.execf(f):
214 207 if overwrite or fmerge(f) != m1.execf(f):
215 208 act("update permissions", f, "e", m2.execf(f))
216 209 del m2[f]
217 210 elif f in ma:
218 211 if n != ma[f] and not overwrite:
219 212 if ui.prompt(
220 213 (_(" local changed %s which remote deleted\n") % f) +
221 214 _("(k)eep or (d)elete?"), _("[kd]"), _("k")) == _("d"):
222 215 act("prompt delete", f, "r")
223 216 else:
224 217 act("other deleted", f, "r")
225 218 else:
226 219 # file is created on branch or in working directory
227 220 if (overwrite and n[20:] != "u") or (backwards and not n[20:]):
228 221 act("remote deleted", f, "r")
229 222
230 223 for f, n in m2.iteritems():
231 224 if f in ma:
232 225 if overwrite or backwards:
233 226 act("recreating", f, "g", m2.execf(f), n)
234 227 elif n != ma[f]:
235 228 if ui.prompt(
236 229 (_("remote changed %s which local deleted\n") % f) +
237 230 _("(k)eep or (d)elete?"), _("[kd]"), _("k")) == _("k"):
238 231 act("prompt recreating", f, "g", m2.execf(f), n)
239 232 else:
240 233 act("remote created", f, "g", m2.execf(f), n)
241 234
242 235 return action
243 236
244 237 def applyupdates(repo, action, xp1, xp2):
245 238 updated, merged, removed, unresolved = 0, 0, 0, 0
246 239 action.sort()
247 240 for a in action:
248 241 f, m = a[:2]
249 242 if f[0] == "/":
250 243 continue
251 244 if m == "r": # remove
252 245 repo.ui.note(_("removing %s\n") % f)
253 246 util.audit_path(f)
254 247 try:
255 248 util.unlink(repo.wjoin(f))
256 249 except OSError, inst:
257 250 if inst.errno != errno.ENOENT:
258 251 repo.ui.warn(_("update failed to remove %s: %s!\n") %
259 252 (f, inst.strerror))
260 253 removed +=1
261 254 elif m == "m": # merge
262 255 flag, my, other = a[2:]
263 256 repo.ui.status(_("merging %s\n") % f)
264 if merge3(repo, f, my, other, xp1, xp2):
257 if filemerge(repo, f, f, f, my, other, xp1, xp2, False):
265 258 unresolved += 1
266 259 util.set_exec(repo.wjoin(f), flag)
267 260 merged += 1
268 261 elif m == "g": # get
269 262 flag, node = a[2:]
270 263 repo.ui.note(_("getting %s\n") % f)
271 264 t = repo.file(f).read(node)
272 265 repo.wwrite(f, t)
273 266 util.set_exec(repo.wjoin(f), flag)
274 267 updated += 1
275 268 elif m == "e": # exec
276 269 flag = a[2:]
277 270 util.set_exec(repo.wjoin(f), flag)
278 271
279 272 return updated, merged, removed, unresolved
280 273
281 274 def recordupdates(repo, action, branchmerge):
282 275 for a in action:
283 276 f, m = a[:2]
284 277 if m == "r": # remove
285 278 if branchmerge:
286 279 repo.dirstate.update([f], 'r')
287 280 else:
288 281 repo.dirstate.forget([f])
289 282 elif m == "f": # forget
290 283 repo.dirstate.forget([f])
291 284 elif m == "g": # get
292 285 if branchmerge:
293 286 repo.dirstate.update([f], 'n', st_mtime=-1)
294 287 else:
295 288 repo.dirstate.update([f], 'n')
296 289 elif m == "m": # merge
297 290 flag, my, other = a[2:]
298 291 if branchmerge:
299 292 # We've done a branch merge, mark this file as merged
300 293 # so that we properly record the merger later
301 294 repo.dirstate.update([f], 'm')
302 295 else:
303 296 # We've update-merged a locally modified file, so
304 297 # we set the dirstate to emulate a normal checkout
305 298 # of that file some time in the past. Thus our
306 299 # merge will appear as a normal local file
307 300 # modification.
308 301 fl = repo.file(f)
309 302 f_len = fl.size(fl.rev(other))
310 303 repo.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
311 304
312 305 def update(repo, node, branchmerge=False, force=False, partial=None,
313 306 wlock=None, show_stats=True, remind=True):
314 307
315 308 overwrite = force and not branchmerge
316 309 forcemerge = force and branchmerge
317 310
318 311 if not wlock:
319 312 wlock = repo.wlock()
320 313
321 314 ### check phase
322 315
323 pl = repo.parents()
316 wc = repo.workingctx()
317 pl = wc.parents()
324 318 if not overwrite and len(pl) > 1:
325 319 raise util.Abort(_("outstanding uncommitted merges"))
326 320
327 321 p1, p2 = pl[0], repo.changectx(node)
328 322 pa = p1.ancestor(p2)
329 323
330 324 # are we going backwards?
331 325 backwards = (pa == p2)
332 326
333 327 # is there a linear path from p1 to p2?
334 328 if pa == p1 or pa == p2:
335 329 if branchmerge:
336 330 raise util.Abort(_("there is nothing to merge, just use "
337 331 "'hg update' or look at 'hg heads'"))
338 332 elif not (overwrite or branchmerge):
339 333 raise util.Abort(_("update spans branches, use 'hg merge' "
340 334 "or 'hg update -C' to lose changes"))
341 335
342 status = repo.status()
343 modified, added, removed, deleted, unknown = status[:5]
344 336 if branchmerge and not forcemerge:
345 if modified or added or removed:
337 if wc.modified() or wc.added() or wc.removed():
346 338 raise util.Abort(_("outstanding uncommitted changes"))
347 339
348 m1 = p1.manifest().copy()
340 m1 = wc.manifest().copy()
349 341 m2 = p2.manifest().copy()
350 342 ma = pa.manifest()
351 343
352 344 # resolve the manifest to determine which files
353 345 # we care about merging
354 346 repo.ui.note(_("resolving manifests\n"))
355 347 repo.ui.debug(_(" overwrite %s branchmerge %s partial %s\n") %
356 348 (overwrite, branchmerge, bool(partial)))
357 349 repo.ui.debug(_(" ancestor %s local %s remote %s\n") % (p1, p2, pa))
358 350
359 351 action = []
360 352 copy = {}
361 353
362 m1 = workingmanifest(repo, m1, status)
363 354 filtermanifest(m1, partial)
364 355 filtermanifest(m2, partial)
365 356
366 357 if not force:
367 checkunknown(repo, m2, status)
358 checkunknown(repo, m2, wc)
368 359 if not branchmerge:
369 action += forgetremoved(m2, status)
360 action += forgetremoved(m2, wc)
370 361 if not (backwards or overwrite):
371 362 copy = findcopies(repo, m1, m2, pa.rev())
372 363
373 364 action += manifestmerge(repo.ui, m1, m2, ma, overwrite, backwards)
374 365 del m1, m2, ma
375 366
376 367 ### apply phase
377 368
378 369 if not branchmerge:
379 370 # we don't need to do any magic, just jump to the new rev
380 371 p1, p2 = p2, repo.changectx(nullid)
381 372
382 373 xp1, xp2 = str(p1), str(p2)
383 374 if not p2: xp2 = ''
384 375
385 376 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
386 377
387 378 updated, merged, removed, unresolved = applyupdates(repo, action, xp1, xp2)
388 379
389 380 # update dirstate
390 381 if not partial:
391 382 repo.dirstate.setparents(p1.node(), p2.node())
392 383 recordupdates(repo, action, branchmerge)
393 384
394 385 if show_stats:
395 386 stats = ((updated, _("updated")),
396 387 (merged - unresolved, _("merged")),
397 388 (removed, _("removed")),
398 389 (unresolved, _("unresolved")))
399 390 note = ", ".join([_("%d files %s") % s for s in stats])
400 391 repo.ui.status("%s\n" % note)
401 392 if not partial:
402 393 if branchmerge:
403 394 if unresolved:
404 395 repo.ui.status(_("There are unresolved merges,"
405 396 " you can redo the full merge using:\n"
406 397 " hg update -C %s\n"
407 398 " hg merge %s\n"
408 399 % (p1.rev(), p2.rev())))
409 400 elif remind:
410 401 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
411 402 elif unresolved:
412 403 repo.ui.status(_("There are unresolved merges with"
413 404 " locally modified files.\n"))
414 405
415 406 repo.hook('update', parent1=xp1, parent2=xp2, error=unresolved)
416 407 return unresolved
417 408
@@ -1,78 +1,78 b''
1 1 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
2 2 pulling from ../test-a
3 3 searching for changes
4 4 adding changesets
5 5 adding manifests
6 6 adding file changes
7 7 added 1 changesets with 1 changes to 1 files (+1 heads)
8 8 (run 'hg heads' to see heads, 'hg merge' to merge)
9 9 merge: warning: conflicts during merge
10 10 merging test.txt
11 11 merging test.txt failed!
12 12 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
13 13 There are unresolved merges, you can redo the full merge using:
14 14 hg update -C 1
15 15 hg merge 2
16 16 pulling from ../test-a
17 17 searching for changes
18 18 adding changesets
19 19 adding manifests
20 20 adding file changes
21 21 added 1 changesets with 1 changes to 1 files (+1 heads)
22 22 (run 'hg heads' to see heads, 'hg merge' to merge)
23 23 merge: warning: conflicts during merge
24 24 resolving manifests
25 25 overwrite None branchmerge True partial False
26 26 ancestor 451c744aabcc local a070d41e8360 remote faaea63e63a9
27 27 test.txt: versions differ -> m
28 28 merging test.txt
29 29 resolving test.txt
30 file test.txt: my fc3148072371 other d40249267ae3 ancestor 8fe46a3eb557
30 my test.txt@451c744aabcc other test.txt@a070d41e8360 ancestor test.txt@faaea63e63a9
31 31 merging test.txt failed!
32 32 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
33 33 There are unresolved merges, you can redo the full merge using:
34 34 hg update -C 3
35 35 hg merge 4
36 36 one
37 37 <<<<<<<
38 38 two-point-five
39 39 =======
40 40 two-point-one
41 41 >>>>>>>
42 42 three
43 43 rev offset length base linkrev nodeid p1 p2
44 44 0 0 7 0 0 01365c4cca56 000000000000 000000000000
45 45 1 7 9 1 1 7b013192566a 01365c4cca56 000000000000
46 46 2 16 15 2 2 8fe46a3eb557 01365c4cca56 000000000000
47 47 3 31 27 2 3 fc3148072371 7b013192566a 8fe46a3eb557
48 48 4 58 25 4 4 d40249267ae3 8fe46a3eb557 000000000000
49 49 changeset: 4:a070d41e8360
50 50 tag: tip
51 51 parent: 2:faaea63e63a9
52 52 user: test
53 53 date: Mon Jan 12 13:46:40 1970 +0000
54 54 summary: two -> two-point-one
55 55
56 56 changeset: 3:451c744aabcc
57 57 parent: 1:e409be6afcc0
58 58 parent: 2:faaea63e63a9
59 59 user: test
60 60 date: Mon Jan 12 13:46:40 1970 +0000
61 61 summary: Merge 1
62 62
63 63 changeset: 2:faaea63e63a9
64 64 parent: 0:095c92b91f1a
65 65 user: test
66 66 date: Mon Jan 12 13:46:40 1970 +0000
67 67 summary: Numbers as words
68 68
69 69 changeset: 1:e409be6afcc0
70 70 user: test
71 71 date: Mon Jan 12 13:46:40 1970 +0000
72 72 summary: 2 -> 2.5
73 73
74 74 changeset: 0:095c92b91f1a
75 75 user: test
76 76 date: Mon Jan 12 13:46:40 1970 +0000
77 77 summary: Initial
78 78
@@ -1,141 +1,141 b''
1 1 adding a
2 2 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
3 3 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
4 4 diff -r 33aaa84a386b a
5 5 --- a/a
6 6 +++ b/a
7 7 @@ -1,1 +1,1 @@ a
8 8 -a
9 9 +abc
10 10 adding b
11 11 M a
12 12 changeset: 0:33aaa84a386b
13 13 user: test
14 14 date: Mon Jan 12 13:46:40 1970 +0000
15 15 summary: 1
16 16
17 17 resolving manifests
18 18 overwrite False branchmerge False partial False
19 19 ancestor 33aaa84a386b local 802f095af299 remote 33aaa84a386b
20 20 a: versions differ -> m
21 21 b: remote created -> g
22 22 merging a
23 23 resolving a
24 file a: my b789fdd96dc2 other d730145abbf9 ancestor b789fdd96dc2
24 my a@33aaa84a386b other a@802f095af299 ancestor a@33aaa84a386b
25 25 getting b
26 26 1 files updated, 1 files merged, 0 files removed, 0 files unresolved
27 27 changeset: 1:802f095af299
28 28 tag: tip
29 29 user: test
30 30 date: Mon Jan 12 13:46:40 1970 +0000
31 31 summary: 2
32 32
33 33 resolving manifests
34 34 overwrite False branchmerge False partial False
35 35 ancestor 802f095af299 local 33aaa84a386b remote 33aaa84a386b
36 36 b: remote deleted -> r
37 37 removing b
38 38 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
39 39 changeset: 0:33aaa84a386b
40 40 user: test
41 41 date: Mon Jan 12 13:46:40 1970 +0000
42 42 summary: 1
43 43
44 44 abort: there is nothing to merge - use "hg update" instead
45 45 failed
46 46 changeset: 0:33aaa84a386b
47 47 user: test
48 48 date: Mon Jan 12 13:46:40 1970 +0000
49 49 summary: 1
50 50
51 51 resolving manifests
52 52 overwrite False branchmerge False partial False
53 53 ancestor 33aaa84a386b local 802f095af299 remote 33aaa84a386b
54 54 a: versions differ -> m
55 55 b: remote created -> g
56 56 merging a
57 57 resolving a
58 file a: my b789fdd96dc2 other d730145abbf9 ancestor b789fdd96dc2
58 my a@33aaa84a386b other a@802f095af299 ancestor a@33aaa84a386b
59 59 getting b
60 60 1 files updated, 1 files merged, 0 files removed, 0 files unresolved
61 61 changeset: 1:802f095af299
62 62 tag: tip
63 63 user: test
64 64 date: Mon Jan 12 13:46:40 1970 +0000
65 65 summary: 2
66 66
67 67 changeset: 1:802f095af299
68 68 tag: tip
69 69 user: test
70 70 date: Mon Jan 12 13:46:40 1970 +0000
71 71 files: a b
72 72 description:
73 73 2
74 74
75 75
76 76 changeset: 0:33aaa84a386b
77 77 user: test
78 78 date: Mon Jan 12 13:46:40 1970 +0000
79 79 files: a
80 80 description:
81 81 1
82 82
83 83
84 84 diff -r 802f095af299 a
85 85 --- a/a
86 86 +++ b/a
87 87 @@ -1,1 +1,1 @@ a2
88 88 -a2
89 89 +abc
90 90 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
91 91 adding b
92 92 M a
93 93 changeset: 1:802f095af299
94 94 user: test
95 95 date: Mon Jan 12 13:46:40 1970 +0000
96 96 summary: 2
97 97
98 98 abort: update spans branches, use 'hg merge' or 'hg update -C' to lose changes
99 99 failed
100 100 abort: outstanding uncommitted changes
101 101 failed
102 102 resolving manifests
103 103 overwrite False branchmerge True partial False
104 104 ancestor 802f095af299 local 030602aee63d remote 33aaa84a386b
105 105 a: versions differ -> m
106 106 b: versions differ -> m
107 107 merging a
108 108 resolving a
109 file a: my d730145abbf9 other 13e0d5f949fa ancestor b789fdd96dc2
109 my a@802f095af299 other a@030602aee63d ancestor a@33aaa84a386b
110 110 merging b
111 111 resolving b
112 file b: my 1e88685f5dde other 61de8c7723ca ancestor 000000000000
112 my b@802f095af299 other b@030602aee63d ancestor b@000000000000
113 113 0 files updated, 2 files merged, 0 files removed, 0 files unresolved
114 114 (branch merge, don't forget to commit)
115 115 changeset: 1:802f095af299
116 116 user: test
117 117 date: Mon Jan 12 13:46:40 1970 +0000
118 118 summary: 2
119 119
120 120 changeset: 2:030602aee63d
121 121 tag: tip
122 122 parent: 0:33aaa84a386b
123 123 user: test
124 124 date: Mon Jan 12 13:46:40 1970 +0000
125 125 summary: 3
126 126
127 127 diff -r 802f095af299 a
128 128 --- a/a
129 129 +++ b/a
130 130 @@ -1,1 +1,1 @@ a2
131 131 -a2
132 132 +abc
133 133 adding a
134 134 pulling from ../a
135 135 requesting all changes
136 136 adding changesets
137 137 adding manifests
138 138 adding file changes
139 139 added 1 changesets with 1 changes to 1 files
140 140 merging a
141 141 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
General Comments 0
You need to be logged in to leave comments. Login now