##// END OF EJS Templates
merge: use new working context object in update
Matt Mackall -
r3218:8d4855fd default
parent child Browse files
Show More
@@ -1,459 +1,459 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from demandload import *
9 9 from node import *
10 10 demandload(globals(), 'bdiff')
11 11
12 12 from node import *
13 13 from demandload import demandload
14 14 demandload(globals(), "ancestor util")
15 15
16 16 class changectx(object):
17 17 """A changecontext object makes access to data related to a particular
18 18 changeset convenient."""
19 19 def __init__(self, repo, changeid=None):
20 20 """changeid is a revision number, node, or tag"""
21 21 self._repo = repo
22 22
23 23 if not changeid and changeid != 0:
24 24 p1, p2 = self._repo.dirstate.parents()
25 25 self._rev = self._repo.changelog.rev(p1)
26 26 if self._rev == -1:
27 27 changeid = 'tip'
28 28 else:
29 29 self._node = p1
30 30 return
31 31
32 32 self._node = self._repo.lookup(changeid)
33 33 self._rev = self._repo.changelog.rev(self._node)
34 34
35 35 def __str__(self):
36 36 return short(self.node())
37 37
38 38 def __repr__(self):
39 39 return "<changectx %s>" % str(self)
40 40
41 41 def __eq__(self, other):
42 42 return self._rev == other._rev
43 43
44 44 def __nonzero__(self):
45 45 return self._rev != -1
46 46
47 47 def __getattr__(self, name):
48 48 if name == '_changeset':
49 49 self._changeset = self._repo.changelog.read(self.node())
50 50 return self._changeset
51 51 elif name == '_manifest':
52 52 self._manifest = self._repo.manifest.read(self._changeset[0])
53 53 return self._manifest
54 54 else:
55 55 raise AttributeError, name
56 56
57 57 def changeset(self): return self._changeset
58 58 def manifest(self): return self._manifest
59 59
60 60 def rev(self): return self._rev
61 61 def node(self): return self._node
62 62 def user(self): return self._changeset[1]
63 63 def date(self): return self._changeset[2]
64 64 def files(self): return self._changeset[3]
65 65 def description(self): return self._changeset[4]
66 66
67 67 def parents(self):
68 68 """return contexts for each parent changeset"""
69 69 p = self._repo.changelog.parents(self._node)
70 70 return [ changectx(self._repo, x) for x in p ]
71 71
72 72 def children(self):
73 73 """return contexts for each child changeset"""
74 74 c = self._repo.changelog.children(self._node)
75 75 return [ changectx(self._repo, x) for x in c ]
76 76
77 77 def filenode(self, path):
78 78 if hasattr(self, "_manifest"):
79 79 return self._manifest[path]
80 80 node, flag = self._repo.manifest.find(self._changeset[0], path)
81 81 return node
82 82
83 83 def filectx(self, path, fileid=None):
84 84 """get a file context from this changeset"""
85 85 if fileid is None:
86 86 fileid = self.filenode(path)
87 87 return filectx(self._repo, path, fileid=fileid, changectx=self)
88 88
89 89 def filectxs(self):
90 90 """generate a file context for each file in this changeset's
91 91 manifest"""
92 92 mf = self.manifest()
93 93 m = mf.keys()
94 94 m.sort()
95 95 for f in m:
96 96 yield self.filectx(f, fileid=mf[f])
97 97
98 98 def ancestor(self, c2):
99 99 """
100 100 return the ancestor context of self and c2
101 101 """
102 102 n = self._repo.changelog.ancestor(self._node, c2._node)
103 103 return changectx(self._repo, n)
104 104
105 105 class filectx(object):
106 106 """A filecontext object makes access to data related to a particular
107 107 filerevision convenient."""
108 108 def __init__(self, repo, path, changeid=None, fileid=None,
109 109 filelog=None, changectx=None):
110 110 """changeid can be a changeset revision, node, or tag.
111 111 fileid can be a file revision or node."""
112 112 self._repo = repo
113 113 self._path = path
114 114
115 115 assert changeid is not None or fileid is not None
116 116
117 117 if filelog:
118 118 self._filelog = filelog
119 119 if changectx:
120 120 self._changectx = changectx
121 121 self._changeid = changectx.node()
122 122
123 123 if fileid is None:
124 124 self._changeid = changeid
125 125 else:
126 126 self._fileid = fileid
127 127
128 128 def __getattr__(self, name):
129 129 if name == '_changectx':
130 130 self._changectx = changectx(self._repo, self._changeid)
131 131 return self._changectx
132 132 elif name == '_filelog':
133 133 self._filelog = self._repo.file(self._path)
134 134 return self._filelog
135 135 elif name == '_changeid':
136 136 self._changeid = self._filelog.linkrev(self._filenode)
137 137 return self._changeid
138 138 elif name == '_filenode':
139 139 if hasattr(self, "_fileid"):
140 140 self._filenode = self._filelog.lookup(self._fileid)
141 141 else:
142 142 self._filenode = self._changectx.filenode(self._path)
143 143 return self._filenode
144 144 elif name == '_filerev':
145 145 self._filerev = self._filelog.rev(self._filenode)
146 146 return self._filerev
147 147 else:
148 148 raise AttributeError, name
149 149
150 150 def __nonzero__(self):
151 151 return self._filerev != nullid
152 152
153 153 def __str__(self):
154 154 return "%s@%s" % (self.path(), short(self.node()))
155 155
156 156 def __repr__(self):
157 157 return "<filectx %s>" % str(self)
158 158
159 159 def __eq__(self, other):
160 160 return self._path == other._path and self._changeid == other._changeid
161 161
162 162 def filectx(self, fileid):
163 163 '''opens an arbitrary revision of the file without
164 164 opening a new filelog'''
165 165 return filectx(self._repo, self._path, fileid=fileid,
166 166 filelog=self._filelog)
167 167
168 168 def filerev(self): return self._filerev
169 169 def filenode(self): return self._filenode
170 170 def filelog(self): return self._filelog
171 171
172 172 def rev(self):
173 173 if hasattr(self, "_changectx"):
174 174 return self._changectx.rev()
175 175 return self._filelog.linkrev(self._filenode)
176 176
177 177 def node(self): return self._changectx.node()
178 178 def user(self): return self._changectx.user()
179 179 def date(self): return self._changectx.date()
180 180 def files(self): return self._changectx.files()
181 181 def description(self): return self._changectx.description()
182 182 def manifest(self): return self._changectx.manifest()
183 183 def changectx(self): return self._changectx
184 184
185 185 def data(self): return self._filelog.read(self._filenode)
186 186 def renamed(self): return self._filelog.renamed(self._filenode)
187 187 def path(self): return self._path
188 188
189 189 def parents(self):
190 190 p = self._path
191 191 fl = self._filelog
192 192 pl = [ (p, n, fl) for n in self._filelog.parents(self._filenode) ]
193 193
194 194 r = self.renamed()
195 195 if r:
196 196 pl[0] = (r[0], r[1], None)
197 197
198 198 return [ filectx(self._repo, p, fileid=n, filelog=l)
199 199 for p,n,l in pl if n != nullid ]
200 200
201 201 def children(self):
202 202 # hard for renames
203 203 c = self._filelog.children(self._filenode)
204 204 return [ filectx(self._repo, self._path, fileid=x,
205 205 filelog=self._filelog) for x in c ]
206 206
207 207 def annotate(self, follow=False):
208 208 '''returns a list of tuples of (ctx, line) for each line
209 209 in the file, where ctx is the filectx of the node where
210 210 that line was last changed'''
211 211
212 212 def decorate(text, rev):
213 213 return ([rev] * len(text.splitlines()), text)
214 214
215 215 def pair(parent, child):
216 216 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
217 217 child[0][b1:b2] = parent[0][a1:a2]
218 218 return child
219 219
220 220 getlog = util.cachefunc(lambda x: self._repo.file(x))
221 221 def getctx(path, fileid):
222 222 log = path == self._path and self._filelog or getlog(path)
223 223 return filectx(self._repo, path, fileid=fileid, filelog=log)
224 224 getctx = util.cachefunc(getctx)
225 225
226 226 def parents(f):
227 227 # we want to reuse filectx objects as much as possible
228 228 p = f._path
229 229 if f._filerev is None: # working dir
230 230 pl = [ (n.path(), n.filerev()) for n in f.parents() ]
231 231 else:
232 232 pl = [ (p, n) for n in f._filelog.parentrevs(f._filerev) ]
233 233
234 234 if follow:
235 235 r = f.renamed()
236 236 if r:
237 237 pl[0] = (r[0], getlog(r[0]).rev(r[1]))
238 238
239 239 return [ getctx(p, n) for p, n in pl if n != -1 ]
240 240
241 241 # find all ancestors
242 242 needed = {self: 1}
243 243 visit = [self]
244 244 files = [self._path]
245 245 while visit:
246 246 f = visit.pop(0)
247 247 for p in parents(f):
248 248 if p not in needed:
249 249 needed[p] = 1
250 250 visit.append(p)
251 251 if p._path not in files:
252 252 files.append(p._path)
253 253 else:
254 254 # count how many times we'll use this
255 255 needed[p] += 1
256 256
257 257 # sort by revision (per file) which is a topological order
258 258 visit = []
259 259 files.reverse()
260 260 for f in files:
261 261 fn = [(n._filerev, n) for n in needed.keys() if n._path == f]
262 262 fn.sort()
263 263 visit.extend(fn)
264 264 hist = {}
265 265
266 266 for r, f in visit:
267 267 curr = decorate(f.data(), f)
268 268 for p in parents(f):
269 269 if p != nullid:
270 270 curr = pair(hist[p], curr)
271 271 # trim the history of unneeded revs
272 272 needed[p] -= 1
273 273 if not needed[p]:
274 274 del hist[p]
275 275 hist[f] = curr
276 276
277 277 return zip(hist[f][0], hist[f][1].splitlines(1))
278 278
279 279 def ancestor(self, fc2):
280 280 """
281 281 find the common ancestor file context, if any, of self, and fc2
282 282 """
283 283
284 284 acache = {}
285 285
286 286 # prime the ancestor cache for the working directory
287 287 for c in (self, fc2):
288 288 if c._filerev == None:
289 289 pl = [ (n.path(), n.filenode()) for n in c.parents() ]
290 290 acache[(c._path, None)] = pl
291 291
292 292 flcache = {self._path:self._filelog, fc2._path:fc2._filelog}
293 293 def parents(vertex):
294 294 if vertex in acache:
295 295 return acache[vertex]
296 296 f, n = vertex
297 297 if f not in flcache:
298 298 flcache[f] = self._repo.file(f)
299 299 fl = flcache[f]
300 300 pl = [ (f,p) for p in fl.parents(n) if p != nullid ]
301 301 re = fl.renamed(n)
302 302 if re:
303 303 pl.append(re)
304 304 acache[vertex]=pl
305 305 return pl
306 306
307 307 a, b = (self._path, self._filenode), (fc2._path, fc2._filenode)
308 308 v = ancestor.ancestor(a, b, parents)
309 309 if v:
310 310 f,n = v
311 311 return filectx(self._repo, f, fileid=n, filelog=flcache[f])
312 312
313 313 return None
314 314
315 315 class workingctx(changectx):
316 316 """A workingctx object makes access to data related to
317 317 the current working directory convenient."""
318 318 def __init__(self, repo):
319 319 self._repo = repo
320 320 self._rev = None
321 321 self._node = None
322 322
323 323 def __str__(self):
324 324 return "."
325 325
326 326 def __nonzero__(self):
327 327 return True
328 328
329 329 def __getattr__(self, name):
330 330 if name == '_parents':
331 331 self._parents = self._repo.parents()
332 332 return self._parents
333 333 if name == '_status':
334 334 self._status = self._repo.status()
335 335 return self._status
336 336 if name == '_manifest':
337 337 self._buildmanifest()
338 338 return self._manifest
339 339 else:
340 340 raise AttributeError, name
341 341
342 342 def _buildmanifest(self):
343 343 """generate a manifest corresponding to the working directory"""
344 344
345 man = self._parents[0].manifest().coy()
345 man = self._parents[0].manifest().copy()
346 346 copied = self._repo.dirstate.copies()
347 347 modified, added, removed, deleted, unknown = self._status[:5]
348 348 for i,l in (("a", added), ("m", modified), ("u", unknown)):
349 349 for f in l:
350 350 man[f] = man.get(copied.get(f, f), nullid) + i
351 351 man.set(f, util.is_exec(self._repo.wjoin(f), man.execf(f)))
352 352
353 353 for f in deleted + removed:
354 354 del man[f]
355 355
356 356 self._manifest = man
357 357
358 358 def manifest(self): return self._manifest
359 359
360 360 def user(self): return self._repo.ui.username()
361 361 def date(self): return util.makedate()
362 362 def description(self): return ""
363 363 def files(self):
364 364 f = self.modified() + self.added() + self.removed()
365 365 f.sort()
366 366 return f
367 367
368 368 def modified(self): return self._status[0]
369 369 def added(self): return self._status[1]
370 370 def removed(self): return self._status[2]
371 371 def deleted(self): return self._status[3]
372 372 def unknown(self): return self._status[4]
373 373 def clean(self): return self._status[5]
374 374
375 375 def parents(self):
376 376 """return contexts for each parent changeset"""
377 377 return self._parents
378 378
379 379 def children(self):
380 380 return []
381 381
382 382 def filectx(self, path):
383 383 """get a file context from the working directory"""
384 384 return workingfilectx(self._repo, path, workingctx=self)
385 385
386 386 def ancestor(self, c2):
387 387 """return the ancestor context of self and c2"""
388 388 return self._parents[0].ancestor(c2) # punt on two parents for now
389 389
390 390 class workingfilectx(filectx):
391 391 """A workingfilectx object makes access to data related to a particular
392 392 file in the working directory convenient."""
393 393 def __init__(self, repo, path, filelog=None, workingctx=None):
394 394 """changeid can be a changeset revision, node, or tag.
395 395 fileid can be a file revision or node."""
396 396 self._repo = repo
397 397 self._path = path
398 398 self._changeid = None
399 399 self._filerev = self._filenode = None
400 400
401 401 if filelog:
402 402 self._filelog = filelog
403 403 if workingctx:
404 404 self._changectx = workingctx
405 405
406 406 def __getattr__(self, name):
407 407 if name == '_changectx':
408 408 self._changectx = workingctx(repo)
409 409 return self._changectx
410 410 elif name == '_repopath':
411 411 self._repopath = self._repo.dirstate.copied(p) or self._path
412 412 elif name == '_filelog':
413 413 self._filelog = self._repo.file(self._repopath)
414 414 return self._filelog
415 415 else:
416 416 raise AttributeError, name
417 417
418 418 def __nonzero__(self):
419 419 return True
420 420
421 421 def __str__(self):
422 422 return "%s@." % self.path()
423 423
424 424 def filectx(self, fileid):
425 425 '''opens an arbitrary revision of the file without
426 426 opening a new filelog'''
427 427 return filectx(self._repo, self._repopath, fileid=fileid,
428 428 filelog=self._filelog)
429 429
430 430 def rev(self):
431 431 if hasattr(self, "_changectx"):
432 432 return self._changectx.rev()
433 433 return self._filelog.linkrev(self._filenode)
434 434
435 435 def data(self): return self._repo.wread(self._path)
436 436 def renamed(self):
437 437 rp = self._repopath
438 438 if rp == self._path:
439 439 return None
440 440 return rp, self._workingctx._parents._manifest.get(rp, nullid)
441 441
442 442 def parents(self):
443 443 '''return parent filectxs, following copies if necessary'''
444 444 p = self._path
445 445 rp = self._repopath
446 446 pcl = self._workingctx._parents
447 447 fl = self._filelog
448 448 pl = [ (rp, pcl[0]._manifest.get(rp, nullid), fl) ]
449 449 if len(pcl) > 1:
450 450 if rp != p:
451 451 fl = None
452 452 pl.append((p, pcl[1]._manifest.get(p, nullid), fl))
453 453
454 454 return [ filectx(self._repo, p, fileid=n, filelog=l)
455 455 for p,n,l in pl if n != nullid ]
456 456
457 457 def children(self):
458 458 return []
459 459
@@ -1,1765 +1,1768 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import gettext as _
10 10 from demandload import *
11 11 import repo
12 12 demandload(globals(), "appendfile changegroup")
13 13 demandload(globals(), "changelog dirstate filelog manifest context")
14 14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 15 demandload(globals(), "os revlog time util")
16 16
17 17 class localrepository(repo.repository):
18 18 capabilities = ()
19 19
20 20 def __del__(self):
21 21 self.transhandle = None
22 22 def __init__(self, parentui, path=None, create=0):
23 23 repo.repository.__init__(self)
24 24 if not path:
25 25 p = os.getcwd()
26 26 while not os.path.isdir(os.path.join(p, ".hg")):
27 27 oldp = p
28 28 p = os.path.dirname(p)
29 29 if p == oldp:
30 30 raise repo.RepoError(_("There is no Mercurial repository"
31 31 " here (.hg not found)"))
32 32 path = p
33 33 self.path = os.path.join(path, ".hg")
34 34
35 35 if not os.path.isdir(self.path):
36 36 if create:
37 37 if not os.path.exists(path):
38 38 os.mkdir(path)
39 39 os.mkdir(self.path)
40 40 os.mkdir(self.join("data"))
41 41 else:
42 42 raise repo.RepoError(_("repository %s not found") % path)
43 43 elif create:
44 44 raise repo.RepoError(_("repository %s already exists") % path)
45 45
46 46 self.root = os.path.abspath(path)
47 47 self.origroot = path
48 48 self.ui = ui.ui(parentui=parentui)
49 49 self.opener = util.opener(self.path)
50 50 self.wopener = util.opener(self.root)
51 51
52 52 try:
53 53 self.ui.readconfig(self.join("hgrc"), self.root)
54 54 except IOError:
55 55 pass
56 56
57 57 v = self.ui.revlogopts
58 58 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
59 59 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
60 60 fl = v.get('flags', None)
61 61 flags = 0
62 62 if fl != None:
63 63 for x in fl.split():
64 64 flags |= revlog.flagstr(x)
65 65 elif self.revlogv1:
66 66 flags = revlog.REVLOG_DEFAULT_FLAGS
67 67
68 68 v = self.revlogversion | flags
69 69 self.manifest = manifest.manifest(self.opener, v)
70 70 self.changelog = changelog.changelog(self.opener, v)
71 71
72 72 # the changelog might not have the inline index flag
73 73 # on. If the format of the changelog is the same as found in
74 74 # .hgrc, apply any flags found in the .hgrc as well.
75 75 # Otherwise, just version from the changelog
76 76 v = self.changelog.version
77 77 if v == self.revlogversion:
78 78 v |= flags
79 79 self.revlogversion = v
80 80
81 81 self.tagscache = None
82 82 self.nodetagscache = None
83 83 self.encodepats = None
84 84 self.decodepats = None
85 85 self.transhandle = None
86 86
87 87 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
88 88
89 89 def url(self):
90 90 return 'file:' + self.root
91 91
92 92 def hook(self, name, throw=False, **args):
93 93 def callhook(hname, funcname):
94 94 '''call python hook. hook is callable object, looked up as
95 95 name in python module. if callable returns "true", hook
96 96 fails, else passes. if hook raises exception, treated as
97 97 hook failure. exception propagates if throw is "true".
98 98
99 99 reason for "true" meaning "hook failed" is so that
100 100 unmodified commands (e.g. mercurial.commands.update) can
101 101 be run as hooks without wrappers to convert return values.'''
102 102
103 103 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
104 104 d = funcname.rfind('.')
105 105 if d == -1:
106 106 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
107 107 % (hname, funcname))
108 108 modname = funcname[:d]
109 109 try:
110 110 obj = __import__(modname)
111 111 except ImportError:
112 112 try:
113 113 # extensions are loaded with hgext_ prefix
114 114 obj = __import__("hgext_%s" % modname)
115 115 except ImportError:
116 116 raise util.Abort(_('%s hook is invalid '
117 117 '(import of "%s" failed)') %
118 118 (hname, modname))
119 119 try:
120 120 for p in funcname.split('.')[1:]:
121 121 obj = getattr(obj, p)
122 122 except AttributeError, err:
123 123 raise util.Abort(_('%s hook is invalid '
124 124 '("%s" is not defined)') %
125 125 (hname, funcname))
126 126 if not callable(obj):
127 127 raise util.Abort(_('%s hook is invalid '
128 128 '("%s" is not callable)') %
129 129 (hname, funcname))
130 130 try:
131 131 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
132 132 except (KeyboardInterrupt, util.SignalInterrupt):
133 133 raise
134 134 except Exception, exc:
135 135 if isinstance(exc, util.Abort):
136 136 self.ui.warn(_('error: %s hook failed: %s\n') %
137 137 (hname, exc.args[0]))
138 138 else:
139 139 self.ui.warn(_('error: %s hook raised an exception: '
140 140 '%s\n') % (hname, exc))
141 141 if throw:
142 142 raise
143 143 self.ui.print_exc()
144 144 return True
145 145 if r:
146 146 if throw:
147 147 raise util.Abort(_('%s hook failed') % hname)
148 148 self.ui.warn(_('warning: %s hook failed\n') % hname)
149 149 return r
150 150
151 151 def runhook(name, cmd):
152 152 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
153 153 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
154 154 r = util.system(cmd, environ=env, cwd=self.root)
155 155 if r:
156 156 desc, r = util.explain_exit(r)
157 157 if throw:
158 158 raise util.Abort(_('%s hook %s') % (name, desc))
159 159 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
160 160 return r
161 161
162 162 r = False
163 163 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
164 164 if hname.split(".", 1)[0] == name and cmd]
165 165 hooks.sort()
166 166 for hname, cmd in hooks:
167 167 if cmd.startswith('python:'):
168 168 r = callhook(hname, cmd[7:].strip()) or r
169 169 else:
170 170 r = runhook(hname, cmd) or r
171 171 return r
172 172
173 173 tag_disallowed = ':\r\n'
174 174
175 175 def tag(self, name, node, message, local, user, date):
176 176 '''tag a revision with a symbolic name.
177 177
178 178 if local is True, the tag is stored in a per-repository file.
179 179 otherwise, it is stored in the .hgtags file, and a new
180 180 changeset is committed with the change.
181 181
182 182 keyword arguments:
183 183
184 184 local: whether to store tag in non-version-controlled file
185 185 (default False)
186 186
187 187 message: commit message to use if committing
188 188
189 189 user: name of user to use if committing
190 190
191 191 date: date tuple to use if committing'''
192 192
193 193 for c in self.tag_disallowed:
194 194 if c in name:
195 195 raise util.Abort(_('%r cannot be used in a tag name') % c)
196 196
197 197 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
198 198
199 199 if local:
200 200 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
201 201 self.hook('tag', node=hex(node), tag=name, local=local)
202 202 return
203 203
204 204 for x in self.status()[:5]:
205 205 if '.hgtags' in x:
206 206 raise util.Abort(_('working copy of .hgtags is changed '
207 207 '(please commit .hgtags manually)'))
208 208
209 209 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
210 210 if self.dirstate.state('.hgtags') == '?':
211 211 self.add(['.hgtags'])
212 212
213 213 self.commit(['.hgtags'], message, user, date)
214 214 self.hook('tag', node=hex(node), tag=name, local=local)
215 215
216 216 def tags(self):
217 217 '''return a mapping of tag to node'''
218 218 if not self.tagscache:
219 219 self.tagscache = {}
220 220
221 221 def parsetag(line, context):
222 222 if not line:
223 223 return
224 224 s = l.split(" ", 1)
225 225 if len(s) != 2:
226 226 self.ui.warn(_("%s: cannot parse entry\n") % context)
227 227 return
228 228 node, key = s
229 229 key = key.strip()
230 230 try:
231 231 bin_n = bin(node)
232 232 except TypeError:
233 233 self.ui.warn(_("%s: node '%s' is not well formed\n") %
234 234 (context, node))
235 235 return
236 236 if bin_n not in self.changelog.nodemap:
237 237 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
238 238 (context, key))
239 239 return
240 240 self.tagscache[key] = bin_n
241 241
242 242 # read the tags file from each head, ending with the tip,
243 243 # and add each tag found to the map, with "newer" ones
244 244 # taking precedence
245 245 heads = self.heads()
246 246 heads.reverse()
247 247 fl = self.file(".hgtags")
248 248 for node in heads:
249 249 change = self.changelog.read(node)
250 250 rev = self.changelog.rev(node)
251 251 fn, ff = self.manifest.find(change[0], '.hgtags')
252 252 if fn is None: continue
253 253 count = 0
254 254 for l in fl.read(fn).splitlines():
255 255 count += 1
256 256 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
257 257 (rev, short(node), count))
258 258 try:
259 259 f = self.opener("localtags")
260 260 count = 0
261 261 for l in f:
262 262 count += 1
263 263 parsetag(l, _("localtags, line %d") % count)
264 264 except IOError:
265 265 pass
266 266
267 267 self.tagscache['tip'] = self.changelog.tip()
268 268
269 269 return self.tagscache
270 270
271 271 def tagslist(self):
272 272 '''return a list of tags ordered by revision'''
273 273 l = []
274 274 for t, n in self.tags().items():
275 275 try:
276 276 r = self.changelog.rev(n)
277 277 except:
278 278 r = -2 # sort to the beginning of the list if unknown
279 279 l.append((r, t, n))
280 280 l.sort()
281 281 return [(t, n) for r, t, n in l]
282 282
283 283 def nodetags(self, node):
284 284 '''return the tags associated with a node'''
285 285 if not self.nodetagscache:
286 286 self.nodetagscache = {}
287 287 for t, n in self.tags().items():
288 288 self.nodetagscache.setdefault(n, []).append(t)
289 289 return self.nodetagscache.get(node, [])
290 290
291 291 def lookup(self, key):
292 292 try:
293 293 return self.tags()[key]
294 294 except KeyError:
295 295 if key == '.':
296 296 key = self.dirstate.parents()[0]
297 297 if key == nullid:
298 298 raise repo.RepoError(_("no revision checked out"))
299 299 try:
300 300 return self.changelog.lookup(key)
301 301 except:
302 302 raise repo.RepoError(_("unknown revision '%s'") % key)
303 303
304 304 def dev(self):
305 305 return os.lstat(self.path).st_dev
306 306
307 307 def local(self):
308 308 return True
309 309
310 310 def join(self, f):
311 311 return os.path.join(self.path, f)
312 312
313 313 def wjoin(self, f):
314 314 return os.path.join(self.root, f)
315 315
316 316 def file(self, f):
317 317 if f[0] == '/':
318 318 f = f[1:]
319 319 return filelog.filelog(self.opener, f, self.revlogversion)
320 320
321 321 def changectx(self, changeid=None):
322 322 return context.changectx(self, changeid)
323 323
324 def workingctx(self):
325 return context.workingctx(self)
326
324 327 def parents(self, changeid=None):
325 328 '''
326 329 get list of changectxs for parents of changeid or working directory
327 330 '''
328 331 if changeid is None:
329 332 pl = self.dirstate.parents()
330 333 else:
331 334 n = self.changelog.lookup(changeid)
332 335 pl = self.changelog.parents(n)
333 336 if pl[1] == nullid:
334 337 return [self.changectx(pl[0])]
335 338 return [self.changectx(pl[0]), self.changectx(pl[1])]
336 339
337 340 def filectx(self, path, changeid=None, fileid=None):
338 341 """changeid can be a changeset revision, node, or tag.
339 342 fileid can be a file revision or node."""
340 343 return context.filectx(self, path, changeid, fileid)
341 344
342 345 def getcwd(self):
343 346 return self.dirstate.getcwd()
344 347
345 348 def wfile(self, f, mode='r'):
346 349 return self.wopener(f, mode)
347 350
348 351 def wread(self, filename):
349 352 if self.encodepats == None:
350 353 l = []
351 354 for pat, cmd in self.ui.configitems("encode"):
352 355 mf = util.matcher(self.root, "", [pat], [], [])[1]
353 356 l.append((mf, cmd))
354 357 self.encodepats = l
355 358
356 359 data = self.wopener(filename, 'r').read()
357 360
358 361 for mf, cmd in self.encodepats:
359 362 if mf(filename):
360 363 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
361 364 data = util.filter(data, cmd)
362 365 break
363 366
364 367 return data
365 368
366 369 def wwrite(self, filename, data, fd=None):
367 370 if self.decodepats == None:
368 371 l = []
369 372 for pat, cmd in self.ui.configitems("decode"):
370 373 mf = util.matcher(self.root, "", [pat], [], [])[1]
371 374 l.append((mf, cmd))
372 375 self.decodepats = l
373 376
374 377 for mf, cmd in self.decodepats:
375 378 if mf(filename):
376 379 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
377 380 data = util.filter(data, cmd)
378 381 break
379 382
380 383 if fd:
381 384 return fd.write(data)
382 385 return self.wopener(filename, 'w').write(data)
383 386
384 387 def transaction(self):
385 388 tr = self.transhandle
386 389 if tr != None and tr.running():
387 390 return tr.nest()
388 391
389 392 # save dirstate for rollback
390 393 try:
391 394 ds = self.opener("dirstate").read()
392 395 except IOError:
393 396 ds = ""
394 397 self.opener("journal.dirstate", "w").write(ds)
395 398
396 399 tr = transaction.transaction(self.ui.warn, self.opener,
397 400 self.join("journal"),
398 401 aftertrans(self.path))
399 402 self.transhandle = tr
400 403 return tr
401 404
402 405 def recover(self):
403 406 l = self.lock()
404 407 if os.path.exists(self.join("journal")):
405 408 self.ui.status(_("rolling back interrupted transaction\n"))
406 409 transaction.rollback(self.opener, self.join("journal"))
407 410 self.reload()
408 411 return True
409 412 else:
410 413 self.ui.warn(_("no interrupted transaction available\n"))
411 414 return False
412 415
413 416 def rollback(self, wlock=None):
414 417 if not wlock:
415 418 wlock = self.wlock()
416 419 l = self.lock()
417 420 if os.path.exists(self.join("undo")):
418 421 self.ui.status(_("rolling back last transaction\n"))
419 422 transaction.rollback(self.opener, self.join("undo"))
420 423 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
421 424 self.reload()
422 425 self.wreload()
423 426 else:
424 427 self.ui.warn(_("no rollback information available\n"))
425 428
426 429 def wreload(self):
427 430 self.dirstate.read()
428 431
429 432 def reload(self):
430 433 self.changelog.load()
431 434 self.manifest.load()
432 435 self.tagscache = None
433 436 self.nodetagscache = None
434 437
435 438 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
436 439 desc=None):
437 440 try:
438 441 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
439 442 except lock.LockHeld, inst:
440 443 if not wait:
441 444 raise
442 445 self.ui.warn(_("waiting for lock on %s held by %s\n") %
443 446 (desc, inst.args[0]))
444 447 # default to 600 seconds timeout
445 448 l = lock.lock(self.join(lockname),
446 449 int(self.ui.config("ui", "timeout") or 600),
447 450 releasefn, desc=desc)
448 451 if acquirefn:
449 452 acquirefn()
450 453 return l
451 454
452 455 def lock(self, wait=1):
453 456 return self.do_lock("lock", wait, acquirefn=self.reload,
454 457 desc=_('repository %s') % self.origroot)
455 458
456 459 def wlock(self, wait=1):
457 460 return self.do_lock("wlock", wait, self.dirstate.write,
458 461 self.wreload,
459 462 desc=_('working directory of %s') % self.origroot)
460 463
461 464 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
462 465 "determine whether a new filenode is needed"
463 466 fp1 = manifest1.get(filename, nullid)
464 467 fp2 = manifest2.get(filename, nullid)
465 468
466 469 if fp2 != nullid:
467 470 # is one parent an ancestor of the other?
468 471 fpa = filelog.ancestor(fp1, fp2)
469 472 if fpa == fp1:
470 473 fp1, fp2 = fp2, nullid
471 474 elif fpa == fp2:
472 475 fp2 = nullid
473 476
474 477 # is the file unmodified from the parent? report existing entry
475 478 if fp2 == nullid and text == filelog.read(fp1):
476 479 return (fp1, None, None)
477 480
478 481 return (None, fp1, fp2)
479 482
480 483 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
481 484 orig_parent = self.dirstate.parents()[0] or nullid
482 485 p1 = p1 or self.dirstate.parents()[0] or nullid
483 486 p2 = p2 or self.dirstate.parents()[1] or nullid
484 487 c1 = self.changelog.read(p1)
485 488 c2 = self.changelog.read(p2)
486 489 m1 = self.manifest.read(c1[0]).copy()
487 490 m2 = self.manifest.read(c2[0])
488 491 changed = []
489 492
490 493 if orig_parent == p1:
491 494 update_dirstate = 1
492 495 else:
493 496 update_dirstate = 0
494 497
495 498 if not wlock:
496 499 wlock = self.wlock()
497 500 l = self.lock()
498 501 tr = self.transaction()
499 502 linkrev = self.changelog.count()
500 503 for f in files:
501 504 try:
502 505 t = self.wread(f)
503 506 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
504 507 r = self.file(f)
505 508
506 509 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
507 510 if entry:
508 511 m1[f] = entry
509 512 continue
510 513
511 514 m1[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
512 515 changed.append(f)
513 516 if update_dirstate:
514 517 self.dirstate.update([f], "n")
515 518 except IOError:
516 519 try:
517 520 del m1[f]
518 521 if update_dirstate:
519 522 self.dirstate.forget([f])
520 523 except:
521 524 # deleted from p2?
522 525 pass
523 526
524 527 mnode = self.manifest.add(m1, tr, linkrev, c1[0], c2[0])
525 528 user = user or self.ui.username()
526 529 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
527 530 tr.close()
528 531 if update_dirstate:
529 532 self.dirstate.setparents(n, nullid)
530 533
531 534 def commit(self, files=None, text="", user=None, date=None,
532 535 match=util.always, force=False, lock=None, wlock=None,
533 536 force_editor=False):
534 537 commit = []
535 538 remove = []
536 539 changed = []
537 540
538 541 if files:
539 542 for f in files:
540 543 s = self.dirstate.state(f)
541 544 if s in 'nmai':
542 545 commit.append(f)
543 546 elif s == 'r':
544 547 remove.append(f)
545 548 else:
546 549 self.ui.warn(_("%s not tracked!\n") % f)
547 550 else:
548 551 modified, added, removed, deleted, unknown = self.status(match=match)[:5]
549 552 commit = modified + added
550 553 remove = removed
551 554
552 555 p1, p2 = self.dirstate.parents()
553 556 c1 = self.changelog.read(p1)
554 557 c2 = self.changelog.read(p2)
555 558 m1 = self.manifest.read(c1[0]).copy()
556 559 m2 = self.manifest.read(c2[0])
557 560
558 561 if not commit and not remove and not force and p2 == nullid:
559 562 self.ui.status(_("nothing changed\n"))
560 563 return None
561 564
562 565 xp1 = hex(p1)
563 566 if p2 == nullid: xp2 = ''
564 567 else: xp2 = hex(p2)
565 568
566 569 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
567 570
568 571 if not wlock:
569 572 wlock = self.wlock()
570 573 if not lock:
571 574 lock = self.lock()
572 575 tr = self.transaction()
573 576
574 577 # check in files
575 578 new = {}
576 579 linkrev = self.changelog.count()
577 580 commit.sort()
578 581 for f in commit:
579 582 self.ui.note(f + "\n")
580 583 try:
581 584 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
582 585 t = self.wread(f)
583 586 except IOError:
584 587 self.ui.warn(_("trouble committing %s!\n") % f)
585 588 raise
586 589
587 590 r = self.file(f)
588 591
589 592 meta = {}
590 593 cp = self.dirstate.copied(f)
591 594 if cp:
592 595 meta["copy"] = cp
593 596 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
594 597 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
595 598 fp1, fp2 = nullid, nullid
596 599 else:
597 600 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
598 601 if entry:
599 602 new[f] = entry
600 603 continue
601 604
602 605 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
603 606 # remember what we've added so that we can later calculate
604 607 # the files to pull from a set of changesets
605 608 changed.append(f)
606 609
607 610 # update manifest
608 611 m1.update(new)
609 612 for f in remove:
610 613 if f in m1:
611 614 del m1[f]
612 615 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0],
613 616 (new, remove))
614 617
615 618 # add changeset
616 619 new = new.keys()
617 620 new.sort()
618 621
619 622 user = user or self.ui.username()
620 623 if not text or force_editor:
621 624 edittext = []
622 625 if text:
623 626 edittext.append(text)
624 627 edittext.append("")
625 628 if p2 != nullid:
626 629 edittext.append("HG: branch merge")
627 630 edittext.extend(["HG: changed %s" % f for f in changed])
628 631 edittext.extend(["HG: removed %s" % f for f in remove])
629 632 if not changed and not remove:
630 633 edittext.append("HG: no files changed")
631 634 edittext.append("")
632 635 # run editor in the repository root
633 636 olddir = os.getcwd()
634 637 os.chdir(self.root)
635 638 text = self.ui.edit("\n".join(edittext), user)
636 639 os.chdir(olddir)
637 640
638 641 lines = [line.rstrip() for line in text.rstrip().splitlines()]
639 642 while lines and not lines[0]:
640 643 del lines[0]
641 644 if not lines:
642 645 return None
643 646 text = '\n'.join(lines)
644 647 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
645 648 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
646 649 parent2=xp2)
647 650 tr.close()
648 651
649 652 self.dirstate.setparents(n)
650 653 self.dirstate.update(new, "n")
651 654 self.dirstate.forget(remove)
652 655
653 656 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
654 657 return n
655 658
656 659 def walk(self, node=None, files=[], match=util.always, badmatch=None):
657 660 if node:
658 661 fdict = dict.fromkeys(files)
659 662 for fn in self.manifest.read(self.changelog.read(node)[0]):
660 663 for ffn in fdict:
661 664 # match if the file is the exact name or a directory
662 665 if ffn == fn or fn.startswith("%s/" % ffn):
663 666 del fdict[ffn]
664 667 break
665 668 if match(fn):
666 669 yield 'm', fn
667 670 for fn in fdict:
668 671 if badmatch and badmatch(fn):
669 672 if match(fn):
670 673 yield 'b', fn
671 674 else:
672 675 self.ui.warn(_('%s: No such file in rev %s\n') % (
673 676 util.pathto(self.getcwd(), fn), short(node)))
674 677 else:
675 678 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
676 679 yield src, fn
677 680
678 681 def status(self, node1=None, node2=None, files=[], match=util.always,
679 682 wlock=None, list_ignored=False, list_clean=False):
680 683 """return status of files between two nodes or node and working directory
681 684
682 685 If node1 is None, use the first dirstate parent instead.
683 686 If node2 is None, compare node1 with working directory.
684 687 """
685 688
686 689 def fcmp(fn, mf):
687 690 t1 = self.wread(fn)
688 691 return self.file(fn).cmp(mf.get(fn, nullid), t1)
689 692
690 693 def mfmatches(node):
691 694 change = self.changelog.read(node)
692 695 mf = dict(self.manifest.read(change[0]))
693 696 for fn in mf.keys():
694 697 if not match(fn):
695 698 del mf[fn]
696 699 return mf
697 700
698 701 modified, added, removed, deleted, unknown = [], [], [], [], []
699 702 ignored, clean = [], []
700 703
701 704 compareworking = False
702 705 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
703 706 compareworking = True
704 707
705 708 if not compareworking:
706 709 # read the manifest from node1 before the manifest from node2,
707 710 # so that we'll hit the manifest cache if we're going through
708 711 # all the revisions in parent->child order.
709 712 mf1 = mfmatches(node1)
710 713
711 714 # are we comparing the working directory?
712 715 if not node2:
713 716 if not wlock:
714 717 try:
715 718 wlock = self.wlock(wait=0)
716 719 except lock.LockException:
717 720 wlock = None
718 721 (lookup, modified, added, removed, deleted, unknown,
719 722 ignored, clean) = self.dirstate.status(files, match,
720 723 list_ignored, list_clean)
721 724
722 725 # are we comparing working dir against its parent?
723 726 if compareworking:
724 727 if lookup:
725 728 # do a full compare of any files that might have changed
726 729 mf2 = mfmatches(self.dirstate.parents()[0])
727 730 for f in lookup:
728 731 if fcmp(f, mf2):
729 732 modified.append(f)
730 733 else:
731 734 clean.append(f)
732 735 if wlock is not None:
733 736 self.dirstate.update([f], "n")
734 737 else:
735 738 # we are comparing working dir against non-parent
736 739 # generate a pseudo-manifest for the working dir
737 740 mf2 = mfmatches(self.dirstate.parents()[0])
738 741 for f in lookup + modified + added:
739 742 mf2[f] = ""
740 743 for f in removed:
741 744 if f in mf2:
742 745 del mf2[f]
743 746 else:
744 747 # we are comparing two revisions
745 748 mf2 = mfmatches(node2)
746 749
747 750 if not compareworking:
748 751 # flush lists from dirstate before comparing manifests
749 752 modified, added, clean = [], [], []
750 753
751 754 # make sure to sort the files so we talk to the disk in a
752 755 # reasonable order
753 756 mf2keys = mf2.keys()
754 757 mf2keys.sort()
755 758 for fn in mf2keys:
756 759 if mf1.has_key(fn):
757 760 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
758 761 modified.append(fn)
759 762 elif list_clean:
760 763 clean.append(fn)
761 764 del mf1[fn]
762 765 else:
763 766 added.append(fn)
764 767
765 768 removed = mf1.keys()
766 769
767 770 # sort and return results:
768 771 for l in modified, added, removed, deleted, unknown, ignored, clean:
769 772 l.sort()
770 773 return (modified, added, removed, deleted, unknown, ignored, clean)
771 774
772 775 def add(self, list, wlock=None):
773 776 if not wlock:
774 777 wlock = self.wlock()
775 778 for f in list:
776 779 p = self.wjoin(f)
777 780 if not os.path.exists(p):
778 781 self.ui.warn(_("%s does not exist!\n") % f)
779 782 elif not os.path.isfile(p):
780 783 self.ui.warn(_("%s not added: only files supported currently\n")
781 784 % f)
782 785 elif self.dirstate.state(f) in 'an':
783 786 self.ui.warn(_("%s already tracked!\n") % f)
784 787 else:
785 788 self.dirstate.update([f], "a")
786 789
787 790 def forget(self, list, wlock=None):
788 791 if not wlock:
789 792 wlock = self.wlock()
790 793 for f in list:
791 794 if self.dirstate.state(f) not in 'ai':
792 795 self.ui.warn(_("%s not added!\n") % f)
793 796 else:
794 797 self.dirstate.forget([f])
795 798
796 799 def remove(self, list, unlink=False, wlock=None):
797 800 if unlink:
798 801 for f in list:
799 802 try:
800 803 util.unlink(self.wjoin(f))
801 804 except OSError, inst:
802 805 if inst.errno != errno.ENOENT:
803 806 raise
804 807 if not wlock:
805 808 wlock = self.wlock()
806 809 for f in list:
807 810 p = self.wjoin(f)
808 811 if os.path.exists(p):
809 812 self.ui.warn(_("%s still exists!\n") % f)
810 813 elif self.dirstate.state(f) == 'a':
811 814 self.dirstate.forget([f])
812 815 elif f not in self.dirstate:
813 816 self.ui.warn(_("%s not tracked!\n") % f)
814 817 else:
815 818 self.dirstate.update([f], "r")
816 819
817 820 def undelete(self, list, wlock=None):
818 821 p = self.dirstate.parents()[0]
819 822 mn = self.changelog.read(p)[0]
820 823 m = self.manifest.read(mn)
821 824 if not wlock:
822 825 wlock = self.wlock()
823 826 for f in list:
824 827 if self.dirstate.state(f) not in "r":
825 828 self.ui.warn("%s not removed!\n" % f)
826 829 else:
827 830 t = self.file(f).read(m[f])
828 831 self.wwrite(f, t)
829 832 util.set_exec(self.wjoin(f), m.execf(f))
830 833 self.dirstate.update([f], "n")
831 834
832 835 def copy(self, source, dest, wlock=None):
833 836 p = self.wjoin(dest)
834 837 if not os.path.exists(p):
835 838 self.ui.warn(_("%s does not exist!\n") % dest)
836 839 elif not os.path.isfile(p):
837 840 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
838 841 else:
839 842 if not wlock:
840 843 wlock = self.wlock()
841 844 if self.dirstate.state(dest) == '?':
842 845 self.dirstate.update([dest], "a")
843 846 self.dirstate.copy(source, dest)
844 847
845 848 def heads(self, start=None):
846 849 heads = self.changelog.heads(start)
847 850 # sort the output in rev descending order
848 851 heads = [(-self.changelog.rev(h), h) for h in heads]
849 852 heads.sort()
850 853 return [n for (r, n) in heads]
851 854
852 855 # branchlookup returns a dict giving a list of branches for
853 856 # each head. A branch is defined as the tag of a node or
854 857 # the branch of the node's parents. If a node has multiple
855 858 # branch tags, tags are eliminated if they are visible from other
856 859 # branch tags.
857 860 #
858 861 # So, for this graph: a->b->c->d->e
859 862 # \ /
860 863 # aa -----/
861 864 # a has tag 2.6.12
862 865 # d has tag 2.6.13
863 866 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
864 867 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
865 868 # from the list.
866 869 #
867 870 # It is possible that more than one head will have the same branch tag.
868 871 # callers need to check the result for multiple heads under the same
869 872 # branch tag if that is a problem for them (ie checkout of a specific
870 873 # branch).
871 874 #
872 875 # passing in a specific branch will limit the depth of the search
873 876 # through the parents. It won't limit the branches returned in the
874 877 # result though.
875 878 def branchlookup(self, heads=None, branch=None):
876 879 if not heads:
877 880 heads = self.heads()
878 881 headt = [ h for h in heads ]
879 882 chlog = self.changelog
880 883 branches = {}
881 884 merges = []
882 885 seenmerge = {}
883 886
884 887 # traverse the tree once for each head, recording in the branches
885 888 # dict which tags are visible from this head. The branches
886 889 # dict also records which tags are visible from each tag
887 890 # while we traverse.
888 891 while headt or merges:
889 892 if merges:
890 893 n, found = merges.pop()
891 894 visit = [n]
892 895 else:
893 896 h = headt.pop()
894 897 visit = [h]
895 898 found = [h]
896 899 seen = {}
897 900 while visit:
898 901 n = visit.pop()
899 902 if n in seen:
900 903 continue
901 904 pp = chlog.parents(n)
902 905 tags = self.nodetags(n)
903 906 if tags:
904 907 for x in tags:
905 908 if x == 'tip':
906 909 continue
907 910 for f in found:
908 911 branches.setdefault(f, {})[n] = 1
909 912 branches.setdefault(n, {})[n] = 1
910 913 break
911 914 if n not in found:
912 915 found.append(n)
913 916 if branch in tags:
914 917 continue
915 918 seen[n] = 1
916 919 if pp[1] != nullid and n not in seenmerge:
917 920 merges.append((pp[1], [x for x in found]))
918 921 seenmerge[n] = 1
919 922 if pp[0] != nullid:
920 923 visit.append(pp[0])
921 924 # traverse the branches dict, eliminating branch tags from each
922 925 # head that are visible from another branch tag for that head.
923 926 out = {}
924 927 viscache = {}
925 928 for h in heads:
926 929 def visible(node):
927 930 if node in viscache:
928 931 return viscache[node]
929 932 ret = {}
930 933 visit = [node]
931 934 while visit:
932 935 x = visit.pop()
933 936 if x in viscache:
934 937 ret.update(viscache[x])
935 938 elif x not in ret:
936 939 ret[x] = 1
937 940 if x in branches:
938 941 visit[len(visit):] = branches[x].keys()
939 942 viscache[node] = ret
940 943 return ret
941 944 if h not in branches:
942 945 continue
943 946 # O(n^2), but somewhat limited. This only searches the
944 947 # tags visible from a specific head, not all the tags in the
945 948 # whole repo.
946 949 for b in branches[h]:
947 950 vis = False
948 951 for bb in branches[h].keys():
949 952 if b != bb:
950 953 if b in visible(bb):
951 954 vis = True
952 955 break
953 956 if not vis:
954 957 l = out.setdefault(h, [])
955 958 l[len(l):] = self.nodetags(b)
956 959 return out
957 960
958 961 def branches(self, nodes):
959 962 if not nodes:
960 963 nodes = [self.changelog.tip()]
961 964 b = []
962 965 for n in nodes:
963 966 t = n
964 967 while 1:
965 968 p = self.changelog.parents(n)
966 969 if p[1] != nullid or p[0] == nullid:
967 970 b.append((t, n, p[0], p[1]))
968 971 break
969 972 n = p[0]
970 973 return b
971 974
972 975 def between(self, pairs):
973 976 r = []
974 977
975 978 for top, bottom in pairs:
976 979 n, l, i = top, [], 0
977 980 f = 1
978 981
979 982 while n != bottom:
980 983 p = self.changelog.parents(n)[0]
981 984 if i == f:
982 985 l.append(n)
983 986 f = f * 2
984 987 n = p
985 988 i += 1
986 989
987 990 r.append(l)
988 991
989 992 return r
990 993
991 994 def findincoming(self, remote, base=None, heads=None, force=False):
992 995 """Return list of roots of the subsets of missing nodes from remote
993 996
994 997 If base dict is specified, assume that these nodes and their parents
995 998 exist on the remote side and that no child of a node of base exists
996 999 in both remote and self.
997 1000 Furthermore base will be updated to include the nodes that exists
998 1001 in self and remote but no children exists in self and remote.
999 1002 If a list of heads is specified, return only nodes which are heads
1000 1003 or ancestors of these heads.
1001 1004
1002 1005 All the ancestors of base are in self and in remote.
1003 1006 All the descendants of the list returned are missing in self.
1004 1007 (and so we know that the rest of the nodes are missing in remote, see
1005 1008 outgoing)
1006 1009 """
1007 1010 m = self.changelog.nodemap
1008 1011 search = []
1009 1012 fetch = {}
1010 1013 seen = {}
1011 1014 seenbranch = {}
1012 1015 if base == None:
1013 1016 base = {}
1014 1017
1015 1018 if not heads:
1016 1019 heads = remote.heads()
1017 1020
1018 1021 if self.changelog.tip() == nullid:
1019 1022 base[nullid] = 1
1020 1023 if heads != [nullid]:
1021 1024 return [nullid]
1022 1025 return []
1023 1026
1024 1027 # assume we're closer to the tip than the root
1025 1028 # and start by examining the heads
1026 1029 self.ui.status(_("searching for changes\n"))
1027 1030
1028 1031 unknown = []
1029 1032 for h in heads:
1030 1033 if h not in m:
1031 1034 unknown.append(h)
1032 1035 else:
1033 1036 base[h] = 1
1034 1037
1035 1038 if not unknown:
1036 1039 return []
1037 1040
1038 1041 req = dict.fromkeys(unknown)
1039 1042 reqcnt = 0
1040 1043
1041 1044 # search through remote branches
1042 1045 # a 'branch' here is a linear segment of history, with four parts:
1043 1046 # head, root, first parent, second parent
1044 1047 # (a branch always has two parents (or none) by definition)
1045 1048 unknown = remote.branches(unknown)
1046 1049 while unknown:
1047 1050 r = []
1048 1051 while unknown:
1049 1052 n = unknown.pop(0)
1050 1053 if n[0] in seen:
1051 1054 continue
1052 1055
1053 1056 self.ui.debug(_("examining %s:%s\n")
1054 1057 % (short(n[0]), short(n[1])))
1055 1058 if n[0] == nullid: # found the end of the branch
1056 1059 pass
1057 1060 elif n in seenbranch:
1058 1061 self.ui.debug(_("branch already found\n"))
1059 1062 continue
1060 1063 elif n[1] and n[1] in m: # do we know the base?
1061 1064 self.ui.debug(_("found incomplete branch %s:%s\n")
1062 1065 % (short(n[0]), short(n[1])))
1063 1066 search.append(n) # schedule branch range for scanning
1064 1067 seenbranch[n] = 1
1065 1068 else:
1066 1069 if n[1] not in seen and n[1] not in fetch:
1067 1070 if n[2] in m and n[3] in m:
1068 1071 self.ui.debug(_("found new changeset %s\n") %
1069 1072 short(n[1]))
1070 1073 fetch[n[1]] = 1 # earliest unknown
1071 1074 for p in n[2:4]:
1072 1075 if p in m:
1073 1076 base[p] = 1 # latest known
1074 1077
1075 1078 for p in n[2:4]:
1076 1079 if p not in req and p not in m:
1077 1080 r.append(p)
1078 1081 req[p] = 1
1079 1082 seen[n[0]] = 1
1080 1083
1081 1084 if r:
1082 1085 reqcnt += 1
1083 1086 self.ui.debug(_("request %d: %s\n") %
1084 1087 (reqcnt, " ".join(map(short, r))))
1085 1088 for p in range(0, len(r), 10):
1086 1089 for b in remote.branches(r[p:p+10]):
1087 1090 self.ui.debug(_("received %s:%s\n") %
1088 1091 (short(b[0]), short(b[1])))
1089 1092 unknown.append(b)
1090 1093
1091 1094 # do binary search on the branches we found
1092 1095 while search:
1093 1096 n = search.pop(0)
1094 1097 reqcnt += 1
1095 1098 l = remote.between([(n[0], n[1])])[0]
1096 1099 l.append(n[1])
1097 1100 p = n[0]
1098 1101 f = 1
1099 1102 for i in l:
1100 1103 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1101 1104 if i in m:
1102 1105 if f <= 2:
1103 1106 self.ui.debug(_("found new branch changeset %s\n") %
1104 1107 short(p))
1105 1108 fetch[p] = 1
1106 1109 base[i] = 1
1107 1110 else:
1108 1111 self.ui.debug(_("narrowed branch search to %s:%s\n")
1109 1112 % (short(p), short(i)))
1110 1113 search.append((p, i))
1111 1114 break
1112 1115 p, f = i, f * 2
1113 1116
1114 1117 # sanity check our fetch list
1115 1118 for f in fetch.keys():
1116 1119 if f in m:
1117 1120 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1118 1121
1119 1122 if base.keys() == [nullid]:
1120 1123 if force:
1121 1124 self.ui.warn(_("warning: repository is unrelated\n"))
1122 1125 else:
1123 1126 raise util.Abort(_("repository is unrelated"))
1124 1127
1125 1128 self.ui.debug(_("found new changesets starting at ") +
1126 1129 " ".join([short(f) for f in fetch]) + "\n")
1127 1130
1128 1131 self.ui.debug(_("%d total queries\n") % reqcnt)
1129 1132
1130 1133 return fetch.keys()
1131 1134
1132 1135 def findoutgoing(self, remote, base=None, heads=None, force=False):
1133 1136 """Return list of nodes that are roots of subsets not in remote
1134 1137
1135 1138 If base dict is specified, assume that these nodes and their parents
1136 1139 exist on the remote side.
1137 1140 If a list of heads is specified, return only nodes which are heads
1138 1141 or ancestors of these heads, and return a second element which
1139 1142 contains all remote heads which get new children.
1140 1143 """
1141 1144 if base == None:
1142 1145 base = {}
1143 1146 self.findincoming(remote, base, heads, force=force)
1144 1147
1145 1148 self.ui.debug(_("common changesets up to ")
1146 1149 + " ".join(map(short, base.keys())) + "\n")
1147 1150
1148 1151 remain = dict.fromkeys(self.changelog.nodemap)
1149 1152
1150 1153 # prune everything remote has from the tree
1151 1154 del remain[nullid]
1152 1155 remove = base.keys()
1153 1156 while remove:
1154 1157 n = remove.pop(0)
1155 1158 if n in remain:
1156 1159 del remain[n]
1157 1160 for p in self.changelog.parents(n):
1158 1161 remove.append(p)
1159 1162
1160 1163 # find every node whose parents have been pruned
1161 1164 subset = []
1162 1165 # find every remote head that will get new children
1163 1166 updated_heads = {}
1164 1167 for n in remain:
1165 1168 p1, p2 = self.changelog.parents(n)
1166 1169 if p1 not in remain and p2 not in remain:
1167 1170 subset.append(n)
1168 1171 if heads:
1169 1172 if p1 in heads:
1170 1173 updated_heads[p1] = True
1171 1174 if p2 in heads:
1172 1175 updated_heads[p2] = True
1173 1176
1174 1177 # this is the set of all roots we have to push
1175 1178 if heads:
1176 1179 return subset, updated_heads.keys()
1177 1180 else:
1178 1181 return subset
1179 1182
1180 1183 def pull(self, remote, heads=None, force=False, lock=None):
1181 1184 mylock = False
1182 1185 if not lock:
1183 1186 lock = self.lock()
1184 1187 mylock = True
1185 1188
1186 1189 try:
1187 1190 fetch = self.findincoming(remote, force=force)
1188 1191 if fetch == [nullid]:
1189 1192 self.ui.status(_("requesting all changes\n"))
1190 1193
1191 1194 if not fetch:
1192 1195 self.ui.status(_("no changes found\n"))
1193 1196 return 0
1194 1197
1195 1198 if heads is None:
1196 1199 cg = remote.changegroup(fetch, 'pull')
1197 1200 else:
1198 1201 cg = remote.changegroupsubset(fetch, heads, 'pull')
1199 1202 return self.addchangegroup(cg, 'pull', remote.url())
1200 1203 finally:
1201 1204 if mylock:
1202 1205 lock.release()
1203 1206
1204 1207 def push(self, remote, force=False, revs=None):
1205 1208 # there are two ways to push to remote repo:
1206 1209 #
1207 1210 # addchangegroup assumes local user can lock remote
1208 1211 # repo (local filesystem, old ssh servers).
1209 1212 #
1210 1213 # unbundle assumes local user cannot lock remote repo (new ssh
1211 1214 # servers, http servers).
1212 1215
1213 1216 if remote.capable('unbundle'):
1214 1217 return self.push_unbundle(remote, force, revs)
1215 1218 return self.push_addchangegroup(remote, force, revs)
1216 1219
1217 1220 def prepush(self, remote, force, revs):
1218 1221 base = {}
1219 1222 remote_heads = remote.heads()
1220 1223 inc = self.findincoming(remote, base, remote_heads, force=force)
1221 1224 if not force and inc:
1222 1225 self.ui.warn(_("abort: unsynced remote changes!\n"))
1223 1226 self.ui.status(_("(did you forget to sync?"
1224 1227 " use push -f to force)\n"))
1225 1228 return None, 1
1226 1229
1227 1230 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1228 1231 if revs is not None:
1229 1232 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1230 1233 else:
1231 1234 bases, heads = update, self.changelog.heads()
1232 1235
1233 1236 if not bases:
1234 1237 self.ui.status(_("no changes found\n"))
1235 1238 return None, 1
1236 1239 elif not force:
1237 1240 # FIXME we don't properly detect creation of new heads
1238 1241 # in the push -r case, assume the user knows what he's doing
1239 1242 if not revs and len(remote_heads) < len(heads) \
1240 1243 and remote_heads != [nullid]:
1241 1244 self.ui.warn(_("abort: push creates new remote branches!\n"))
1242 1245 self.ui.status(_("(did you forget to merge?"
1243 1246 " use push -f to force)\n"))
1244 1247 return None, 1
1245 1248
1246 1249 if revs is None:
1247 1250 cg = self.changegroup(update, 'push')
1248 1251 else:
1249 1252 cg = self.changegroupsubset(update, revs, 'push')
1250 1253 return cg, remote_heads
1251 1254
1252 1255 def push_addchangegroup(self, remote, force, revs):
1253 1256 lock = remote.lock()
1254 1257
1255 1258 ret = self.prepush(remote, force, revs)
1256 1259 if ret[0] is not None:
1257 1260 cg, remote_heads = ret
1258 1261 return remote.addchangegroup(cg, 'push', self.url())
1259 1262 return ret[1]
1260 1263
1261 1264 def push_unbundle(self, remote, force, revs):
1262 1265 # local repo finds heads on server, finds out what revs it
1263 1266 # must push. once revs transferred, if server finds it has
1264 1267 # different heads (someone else won commit/push race), server
1265 1268 # aborts.
1266 1269
1267 1270 ret = self.prepush(remote, force, revs)
1268 1271 if ret[0] is not None:
1269 1272 cg, remote_heads = ret
1270 1273 if force: remote_heads = ['force']
1271 1274 return remote.unbundle(cg, remote_heads, 'push')
1272 1275 return ret[1]
1273 1276
1274 1277 def changegroupsubset(self, bases, heads, source):
1275 1278 """This function generates a changegroup consisting of all the nodes
1276 1279 that are descendents of any of the bases, and ancestors of any of
1277 1280 the heads.
1278 1281
1279 1282 It is fairly complex as determining which filenodes and which
1280 1283 manifest nodes need to be included for the changeset to be complete
1281 1284 is non-trivial.
1282 1285
1283 1286 Another wrinkle is doing the reverse, figuring out which changeset in
1284 1287 the changegroup a particular filenode or manifestnode belongs to."""
1285 1288
1286 1289 self.hook('preoutgoing', throw=True, source=source)
1287 1290
1288 1291 # Set up some initial variables
1289 1292 # Make it easy to refer to self.changelog
1290 1293 cl = self.changelog
1291 1294 # msng is short for missing - compute the list of changesets in this
1292 1295 # changegroup.
1293 1296 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1294 1297 # Some bases may turn out to be superfluous, and some heads may be
1295 1298 # too. nodesbetween will return the minimal set of bases and heads
1296 1299 # necessary to re-create the changegroup.
1297 1300
1298 1301 # Known heads are the list of heads that it is assumed the recipient
1299 1302 # of this changegroup will know about.
1300 1303 knownheads = {}
1301 1304 # We assume that all parents of bases are known heads.
1302 1305 for n in bases:
1303 1306 for p in cl.parents(n):
1304 1307 if p != nullid:
1305 1308 knownheads[p] = 1
1306 1309 knownheads = knownheads.keys()
1307 1310 if knownheads:
1308 1311 # Now that we know what heads are known, we can compute which
1309 1312 # changesets are known. The recipient must know about all
1310 1313 # changesets required to reach the known heads from the null
1311 1314 # changeset.
1312 1315 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1313 1316 junk = None
1314 1317 # Transform the list into an ersatz set.
1315 1318 has_cl_set = dict.fromkeys(has_cl_set)
1316 1319 else:
1317 1320 # If there were no known heads, the recipient cannot be assumed to
1318 1321 # know about any changesets.
1319 1322 has_cl_set = {}
1320 1323
1321 1324 # Make it easy to refer to self.manifest
1322 1325 mnfst = self.manifest
1323 1326 # We don't know which manifests are missing yet
1324 1327 msng_mnfst_set = {}
1325 1328 # Nor do we know which filenodes are missing.
1326 1329 msng_filenode_set = {}
1327 1330
1328 1331 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1329 1332 junk = None
1330 1333
1331 1334 # A changeset always belongs to itself, so the changenode lookup
1332 1335 # function for a changenode is identity.
1333 1336 def identity(x):
1334 1337 return x
1335 1338
1336 1339 # A function generating function. Sets up an environment for the
1337 1340 # inner function.
1338 1341 def cmp_by_rev_func(revlog):
1339 1342 # Compare two nodes by their revision number in the environment's
1340 1343 # revision history. Since the revision number both represents the
1341 1344 # most efficient order to read the nodes in, and represents a
1342 1345 # topological sorting of the nodes, this function is often useful.
1343 1346 def cmp_by_rev(a, b):
1344 1347 return cmp(revlog.rev(a), revlog.rev(b))
1345 1348 return cmp_by_rev
1346 1349
1347 1350 # If we determine that a particular file or manifest node must be a
1348 1351 # node that the recipient of the changegroup will already have, we can
1349 1352 # also assume the recipient will have all the parents. This function
1350 1353 # prunes them from the set of missing nodes.
1351 1354 def prune_parents(revlog, hasset, msngset):
1352 1355 haslst = hasset.keys()
1353 1356 haslst.sort(cmp_by_rev_func(revlog))
1354 1357 for node in haslst:
1355 1358 parentlst = [p for p in revlog.parents(node) if p != nullid]
1356 1359 while parentlst:
1357 1360 n = parentlst.pop()
1358 1361 if n not in hasset:
1359 1362 hasset[n] = 1
1360 1363 p = [p for p in revlog.parents(n) if p != nullid]
1361 1364 parentlst.extend(p)
1362 1365 for n in hasset:
1363 1366 msngset.pop(n, None)
1364 1367
1365 1368 # This is a function generating function used to set up an environment
1366 1369 # for the inner function to execute in.
1367 1370 def manifest_and_file_collector(changedfileset):
1368 1371 # This is an information gathering function that gathers
1369 1372 # information from each changeset node that goes out as part of
1370 1373 # the changegroup. The information gathered is a list of which
1371 1374 # manifest nodes are potentially required (the recipient may
1372 1375 # already have them) and total list of all files which were
1373 1376 # changed in any changeset in the changegroup.
1374 1377 #
1375 1378 # We also remember the first changenode we saw any manifest
1376 1379 # referenced by so we can later determine which changenode 'owns'
1377 1380 # the manifest.
1378 1381 def collect_manifests_and_files(clnode):
1379 1382 c = cl.read(clnode)
1380 1383 for f in c[3]:
1381 1384 # This is to make sure we only have one instance of each
1382 1385 # filename string for each filename.
1383 1386 changedfileset.setdefault(f, f)
1384 1387 msng_mnfst_set.setdefault(c[0], clnode)
1385 1388 return collect_manifests_and_files
1386 1389
1387 1390 # Figure out which manifest nodes (of the ones we think might be part
1388 1391 # of the changegroup) the recipient must know about and remove them
1389 1392 # from the changegroup.
1390 1393 def prune_manifests():
1391 1394 has_mnfst_set = {}
1392 1395 for n in msng_mnfst_set:
1393 1396 # If a 'missing' manifest thinks it belongs to a changenode
1394 1397 # the recipient is assumed to have, obviously the recipient
1395 1398 # must have that manifest.
1396 1399 linknode = cl.node(mnfst.linkrev(n))
1397 1400 if linknode in has_cl_set:
1398 1401 has_mnfst_set[n] = 1
1399 1402 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1400 1403
1401 1404 # Use the information collected in collect_manifests_and_files to say
1402 1405 # which changenode any manifestnode belongs to.
1403 1406 def lookup_manifest_link(mnfstnode):
1404 1407 return msng_mnfst_set[mnfstnode]
1405 1408
1406 1409 # A function generating function that sets up the initial environment
1407 1410 # the inner function.
1408 1411 def filenode_collector(changedfiles):
1409 1412 next_rev = [0]
1410 1413 # This gathers information from each manifestnode included in the
1411 1414 # changegroup about which filenodes the manifest node references
1412 1415 # so we can include those in the changegroup too.
1413 1416 #
1414 1417 # It also remembers which changenode each filenode belongs to. It
1415 1418 # does this by assuming the a filenode belongs to the changenode
1416 1419 # the first manifest that references it belongs to.
1417 1420 def collect_msng_filenodes(mnfstnode):
1418 1421 r = mnfst.rev(mnfstnode)
1419 1422 if r == next_rev[0]:
1420 1423 # If the last rev we looked at was the one just previous,
1421 1424 # we only need to see a diff.
1422 1425 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1423 1426 # For each line in the delta
1424 1427 for dline in delta.splitlines():
1425 1428 # get the filename and filenode for that line
1426 1429 f, fnode = dline.split('\0')
1427 1430 fnode = bin(fnode[:40])
1428 1431 f = changedfiles.get(f, None)
1429 1432 # And if the file is in the list of files we care
1430 1433 # about.
1431 1434 if f is not None:
1432 1435 # Get the changenode this manifest belongs to
1433 1436 clnode = msng_mnfst_set[mnfstnode]
1434 1437 # Create the set of filenodes for the file if
1435 1438 # there isn't one already.
1436 1439 ndset = msng_filenode_set.setdefault(f, {})
1437 1440 # And set the filenode's changelog node to the
1438 1441 # manifest's if it hasn't been set already.
1439 1442 ndset.setdefault(fnode, clnode)
1440 1443 else:
1441 1444 # Otherwise we need a full manifest.
1442 1445 m = mnfst.read(mnfstnode)
1443 1446 # For every file in we care about.
1444 1447 for f in changedfiles:
1445 1448 fnode = m.get(f, None)
1446 1449 # If it's in the manifest
1447 1450 if fnode is not None:
1448 1451 # See comments above.
1449 1452 clnode = msng_mnfst_set[mnfstnode]
1450 1453 ndset = msng_filenode_set.setdefault(f, {})
1451 1454 ndset.setdefault(fnode, clnode)
1452 1455 # Remember the revision we hope to see next.
1453 1456 next_rev[0] = r + 1
1454 1457 return collect_msng_filenodes
1455 1458
1456 1459 # We have a list of filenodes we think we need for a file, lets remove
1457 1460 # all those we now the recipient must have.
1458 1461 def prune_filenodes(f, filerevlog):
1459 1462 msngset = msng_filenode_set[f]
1460 1463 hasset = {}
1461 1464 # If a 'missing' filenode thinks it belongs to a changenode we
1462 1465 # assume the recipient must have, then the recipient must have
1463 1466 # that filenode.
1464 1467 for n in msngset:
1465 1468 clnode = cl.node(filerevlog.linkrev(n))
1466 1469 if clnode in has_cl_set:
1467 1470 hasset[n] = 1
1468 1471 prune_parents(filerevlog, hasset, msngset)
1469 1472
1470 1473 # A function generator function that sets up the a context for the
1471 1474 # inner function.
1472 1475 def lookup_filenode_link_func(fname):
1473 1476 msngset = msng_filenode_set[fname]
1474 1477 # Lookup the changenode the filenode belongs to.
1475 1478 def lookup_filenode_link(fnode):
1476 1479 return msngset[fnode]
1477 1480 return lookup_filenode_link
1478 1481
1479 1482 # Now that we have all theses utility functions to help out and
1480 1483 # logically divide up the task, generate the group.
1481 1484 def gengroup():
1482 1485 # The set of changed files starts empty.
1483 1486 changedfiles = {}
1484 1487 # Create a changenode group generator that will call our functions
1485 1488 # back to lookup the owning changenode and collect information.
1486 1489 group = cl.group(msng_cl_lst, identity,
1487 1490 manifest_and_file_collector(changedfiles))
1488 1491 for chnk in group:
1489 1492 yield chnk
1490 1493
1491 1494 # The list of manifests has been collected by the generator
1492 1495 # calling our functions back.
1493 1496 prune_manifests()
1494 1497 msng_mnfst_lst = msng_mnfst_set.keys()
1495 1498 # Sort the manifestnodes by revision number.
1496 1499 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1497 1500 # Create a generator for the manifestnodes that calls our lookup
1498 1501 # and data collection functions back.
1499 1502 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1500 1503 filenode_collector(changedfiles))
1501 1504 for chnk in group:
1502 1505 yield chnk
1503 1506
1504 1507 # These are no longer needed, dereference and toss the memory for
1505 1508 # them.
1506 1509 msng_mnfst_lst = None
1507 1510 msng_mnfst_set.clear()
1508 1511
1509 1512 changedfiles = changedfiles.keys()
1510 1513 changedfiles.sort()
1511 1514 # Go through all our files in order sorted by name.
1512 1515 for fname in changedfiles:
1513 1516 filerevlog = self.file(fname)
1514 1517 # Toss out the filenodes that the recipient isn't really
1515 1518 # missing.
1516 1519 if msng_filenode_set.has_key(fname):
1517 1520 prune_filenodes(fname, filerevlog)
1518 1521 msng_filenode_lst = msng_filenode_set[fname].keys()
1519 1522 else:
1520 1523 msng_filenode_lst = []
1521 1524 # If any filenodes are left, generate the group for them,
1522 1525 # otherwise don't bother.
1523 1526 if len(msng_filenode_lst) > 0:
1524 1527 yield changegroup.genchunk(fname)
1525 1528 # Sort the filenodes by their revision #
1526 1529 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1527 1530 # Create a group generator and only pass in a changenode
1528 1531 # lookup function as we need to collect no information
1529 1532 # from filenodes.
1530 1533 group = filerevlog.group(msng_filenode_lst,
1531 1534 lookup_filenode_link_func(fname))
1532 1535 for chnk in group:
1533 1536 yield chnk
1534 1537 if msng_filenode_set.has_key(fname):
1535 1538 # Don't need this anymore, toss it to free memory.
1536 1539 del msng_filenode_set[fname]
1537 1540 # Signal that no more groups are left.
1538 1541 yield changegroup.closechunk()
1539 1542
1540 1543 if msng_cl_lst:
1541 1544 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1542 1545
1543 1546 return util.chunkbuffer(gengroup())
1544 1547
1545 1548 def changegroup(self, basenodes, source):
1546 1549 """Generate a changegroup of all nodes that we have that a recipient
1547 1550 doesn't.
1548 1551
1549 1552 This is much easier than the previous function as we can assume that
1550 1553 the recipient has any changenode we aren't sending them."""
1551 1554
1552 1555 self.hook('preoutgoing', throw=True, source=source)
1553 1556
1554 1557 cl = self.changelog
1555 1558 nodes = cl.nodesbetween(basenodes, None)[0]
1556 1559 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1557 1560
1558 1561 def identity(x):
1559 1562 return x
1560 1563
1561 1564 def gennodelst(revlog):
1562 1565 for r in xrange(0, revlog.count()):
1563 1566 n = revlog.node(r)
1564 1567 if revlog.linkrev(n) in revset:
1565 1568 yield n
1566 1569
1567 1570 def changed_file_collector(changedfileset):
1568 1571 def collect_changed_files(clnode):
1569 1572 c = cl.read(clnode)
1570 1573 for fname in c[3]:
1571 1574 changedfileset[fname] = 1
1572 1575 return collect_changed_files
1573 1576
1574 1577 def lookuprevlink_func(revlog):
1575 1578 def lookuprevlink(n):
1576 1579 return cl.node(revlog.linkrev(n))
1577 1580 return lookuprevlink
1578 1581
1579 1582 def gengroup():
1580 1583 # construct a list of all changed files
1581 1584 changedfiles = {}
1582 1585
1583 1586 for chnk in cl.group(nodes, identity,
1584 1587 changed_file_collector(changedfiles)):
1585 1588 yield chnk
1586 1589 changedfiles = changedfiles.keys()
1587 1590 changedfiles.sort()
1588 1591
1589 1592 mnfst = self.manifest
1590 1593 nodeiter = gennodelst(mnfst)
1591 1594 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1592 1595 yield chnk
1593 1596
1594 1597 for fname in changedfiles:
1595 1598 filerevlog = self.file(fname)
1596 1599 nodeiter = gennodelst(filerevlog)
1597 1600 nodeiter = list(nodeiter)
1598 1601 if nodeiter:
1599 1602 yield changegroup.genchunk(fname)
1600 1603 lookup = lookuprevlink_func(filerevlog)
1601 1604 for chnk in filerevlog.group(nodeiter, lookup):
1602 1605 yield chnk
1603 1606
1604 1607 yield changegroup.closechunk()
1605 1608
1606 1609 if nodes:
1607 1610 self.hook('outgoing', node=hex(nodes[0]), source=source)
1608 1611
1609 1612 return util.chunkbuffer(gengroup())
1610 1613
1611 1614 def addchangegroup(self, source, srctype, url):
1612 1615 """add changegroup to repo.
1613 1616 returns number of heads modified or added + 1."""
1614 1617
1615 1618 def csmap(x):
1616 1619 self.ui.debug(_("add changeset %s\n") % short(x))
1617 1620 return cl.count()
1618 1621
1619 1622 def revmap(x):
1620 1623 return cl.rev(x)
1621 1624
1622 1625 if not source:
1623 1626 return 0
1624 1627
1625 1628 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1626 1629
1627 1630 changesets = files = revisions = 0
1628 1631
1629 1632 tr = self.transaction()
1630 1633
1631 1634 # write changelog data to temp files so concurrent readers will not see
1632 1635 # inconsistent view
1633 1636 cl = None
1634 1637 try:
1635 1638 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1636 1639
1637 1640 oldheads = len(cl.heads())
1638 1641
1639 1642 # pull off the changeset group
1640 1643 self.ui.status(_("adding changesets\n"))
1641 1644 cor = cl.count() - 1
1642 1645 chunkiter = changegroup.chunkiter(source)
1643 1646 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1644 1647 raise util.Abort(_("received changelog group is empty"))
1645 1648 cnr = cl.count() - 1
1646 1649 changesets = cnr - cor
1647 1650
1648 1651 # pull off the manifest group
1649 1652 self.ui.status(_("adding manifests\n"))
1650 1653 chunkiter = changegroup.chunkiter(source)
1651 1654 # no need to check for empty manifest group here:
1652 1655 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1653 1656 # no new manifest will be created and the manifest group will
1654 1657 # be empty during the pull
1655 1658 self.manifest.addgroup(chunkiter, revmap, tr)
1656 1659
1657 1660 # process the files
1658 1661 self.ui.status(_("adding file changes\n"))
1659 1662 while 1:
1660 1663 f = changegroup.getchunk(source)
1661 1664 if not f:
1662 1665 break
1663 1666 self.ui.debug(_("adding %s revisions\n") % f)
1664 1667 fl = self.file(f)
1665 1668 o = fl.count()
1666 1669 chunkiter = changegroup.chunkiter(source)
1667 1670 if fl.addgroup(chunkiter, revmap, tr) is None:
1668 1671 raise util.Abort(_("received file revlog group is empty"))
1669 1672 revisions += fl.count() - o
1670 1673 files += 1
1671 1674
1672 1675 cl.writedata()
1673 1676 finally:
1674 1677 if cl:
1675 1678 cl.cleanup()
1676 1679
1677 1680 # make changelog see real files again
1678 1681 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1679 1682 self.changelog.checkinlinesize(tr)
1680 1683
1681 1684 newheads = len(self.changelog.heads())
1682 1685 heads = ""
1683 1686 if oldheads and newheads != oldheads:
1684 1687 heads = _(" (%+d heads)") % (newheads - oldheads)
1685 1688
1686 1689 self.ui.status(_("added %d changesets"
1687 1690 " with %d changes to %d files%s\n")
1688 1691 % (changesets, revisions, files, heads))
1689 1692
1690 1693 if changesets > 0:
1691 1694 self.hook('pretxnchangegroup', throw=True,
1692 1695 node=hex(self.changelog.node(cor+1)), source=srctype,
1693 1696 url=url)
1694 1697
1695 1698 tr.close()
1696 1699
1697 1700 if changesets > 0:
1698 1701 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1699 1702 source=srctype, url=url)
1700 1703
1701 1704 for i in range(cor + 1, cnr + 1):
1702 1705 self.hook("incoming", node=hex(self.changelog.node(i)),
1703 1706 source=srctype, url=url)
1704 1707
1705 1708 return newheads - oldheads + 1
1706 1709
1707 1710
1708 1711 def stream_in(self, remote):
1709 1712 fp = remote.stream_out()
1710 1713 resp = int(fp.readline())
1711 1714 if resp != 0:
1712 1715 raise util.Abort(_('operation forbidden by server'))
1713 1716 self.ui.status(_('streaming all changes\n'))
1714 1717 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1715 1718 self.ui.status(_('%d files to transfer, %s of data\n') %
1716 1719 (total_files, util.bytecount(total_bytes)))
1717 1720 start = time.time()
1718 1721 for i in xrange(total_files):
1719 1722 name, size = fp.readline().split('\0', 1)
1720 1723 size = int(size)
1721 1724 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1722 1725 ofp = self.opener(name, 'w')
1723 1726 for chunk in util.filechunkiter(fp, limit=size):
1724 1727 ofp.write(chunk)
1725 1728 ofp.close()
1726 1729 elapsed = time.time() - start
1727 1730 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1728 1731 (util.bytecount(total_bytes), elapsed,
1729 1732 util.bytecount(total_bytes / elapsed)))
1730 1733 self.reload()
1731 1734 return len(self.heads()) + 1
1732 1735
1733 1736 def clone(self, remote, heads=[], stream=False):
1734 1737 '''clone remote repository.
1735 1738
1736 1739 keyword arguments:
1737 1740 heads: list of revs to clone (forces use of pull)
1738 1741 stream: use streaming clone if possible'''
1739 1742
1740 1743 # now, all clients that can request uncompressed clones can
1741 1744 # read repo formats supported by all servers that can serve
1742 1745 # them.
1743 1746
1744 1747 # if revlog format changes, client will have to check version
1745 1748 # and format flags on "stream" capability, and use
1746 1749 # uncompressed only if compatible.
1747 1750
1748 1751 if stream and not heads and remote.capable('stream'):
1749 1752 return self.stream_in(remote)
1750 1753 return self.pull(remote, heads)
1751 1754
1752 1755 # used to avoid circular references so destructors work
1753 1756 def aftertrans(base):
1754 1757 p = base
1755 1758 def a():
1756 1759 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1757 1760 util.rename(os.path.join(p, "journal.dirstate"),
1758 1761 os.path.join(p, "undo.dirstate"))
1759 1762 return a
1760 1763
1761 1764 def instance(ui, path, create):
1762 1765 return localrepository(ui, util.drop_scheme('file', path), create)
1763 1766
1764 1767 def islocal(path):
1765 1768 return True
@@ -1,429 +1,408 b''
1 1 # merge.py - directory-level update/merge handling for Mercurial
2 2 #
3 3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import gettext as _
10 10 from demandload import *
11 11 demandload(globals(), "errno util os tempfile")
12 12
13 13 def filemerge(repo, fw, fo, fd, my, other, p1, p2, move):
14 14 """perform a 3-way merge in the working directory
15 15
16 16 fw = filename in the working directory and first parent
17 17 fo = filename in other parent
18 18 fd = destination filename
19 19 my = fileid in first parent
20 20 other = fileid in second parent
21 21 p1, p2 = hex changeset ids for merge command
22 22 move = whether to move or copy the file to the destination
23 23
24 24 TODO:
25 25 if fw is copied in the working directory, we get confused
26 26 implement move and fd
27 27 """
28 28
29 29 def temp(prefix, ctx):
30 30 pre = "%s~%s." % (os.path.basename(ctx.path()), prefix)
31 31 (fd, name) = tempfile.mkstemp(prefix=pre)
32 32 f = os.fdopen(fd, "wb")
33 33 repo.wwrite(ctx.path(), ctx.data(), f)
34 34 f.close()
35 35 return name
36 36
37 37 fcm = repo.filectx(fw, fileid=my)
38 38 fco = repo.filectx(fo, fileid=other)
39 39 fca = fcm.ancestor(fco)
40 40 if not fca:
41 41 fca = repo.filectx(fw, fileid=-1)
42 42 a = repo.wjoin(fw)
43 43 b = temp("base", fca)
44 44 c = temp("other", fco)
45 45
46 46 repo.ui.note(_("resolving %s\n") % fw)
47 47 repo.ui.debug(_("my %s other %s ancestor %s\n") % (fcm, fco, fca))
48 48
49 49 cmd = (os.environ.get("HGMERGE") or repo.ui.config("ui", "merge")
50 50 or "hgmerge")
51 51 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=repo.root,
52 52 environ={'HG_FILE': fw,
53 53 'HG_MY_NODE': p1,
54 54 'HG_OTHER_NODE': p2})
55 55 if r:
56 56 repo.ui.warn(_("merging %s failed!\n") % fw)
57 57
58 58 os.unlink(b)
59 59 os.unlink(c)
60 60 return r
61 61
62 def checkunknown(repo, m2, status):
62 def checkunknown(repo, m2, wctx):
63 63 """
64 64 check for collisions between unknown files and files in m2
65 65 """
66 modified, added, removed, deleted, unknown = status[:5]
67 for f in unknown:
66 for f in wctx.unknown():
68 67 if f in m2:
69 68 if repo.file(f).cmp(m2[f], repo.wread(f)):
70 69 raise util.Abort(_("'%s' already exists in the working"
71 70 " dir and differs from remote") % f)
72 71
73 def workingmanifest(repo, man, status):
74 """
75 Update manifest to correspond to the working directory
76 """
77
78 copied = repo.dirstate.copies()
79 modified, added, removed, deleted, unknown = status[:5]
80 for i,l in (("a", added), ("m", modified), ("u", unknown)):
81 for f in l:
82 man[f] = man.get(copied.get(f, f), nullid) + i
83 man.set(f, util.is_exec(repo.wjoin(f), man.execf(f)))
84
85 for f in deleted + removed:
86 del man[f]
87
88 return man
89
90 def forgetremoved(m2, status):
72 def forgetremoved(m2, wctx):
91 73 """
92 74 Forget removed files
93 75
94 76 If we're jumping between revisions (as opposed to merging), and if
95 77 neither the working directory nor the target rev has the file,
96 78 then we need to remove it from the dirstate, to prevent the
97 79 dirstate from listing the file when it is no longer in the
98 80 manifest.
99 81 """
100 82
101 modified, added, removed, deleted, unknown = status[:5]
102 83 action = []
103 84
104 for f in deleted + removed:
85 for f in wctx.deleted() + wctx.removed():
105 86 if f not in m2:
106 87 action.append((f, "f"))
107 88
108 89 return action
109 90
110 91 def nonoverlap(d1, d2):
111 92 """
112 93 Return list of elements in d1 not in d2
113 94 """
114 95
115 96 l = []
116 97 for d in d1:
117 98 if d not in d2:
118 99 l.append(d)
119 100
120 101 l.sort()
121 102 return l
122 103
123 104 def findold(fctx, limit):
124 105 """
125 106 find files that path was copied from, back to linkrev limit
126 107 """
127 108
128 109 old = {}
129 110 orig = fctx.path()
130 111 visit = [fctx]
131 112 while visit:
132 113 fc = visit.pop()
133 114 if fc.rev() < limit:
134 115 continue
135 116 if fc.path() != orig and fc.path() not in old:
136 117 old[fc.path()] = 1
137 118 visit += fc.parents()
138 119
139 120 old = old.keys()
140 121 old.sort()
141 122 return old
142 123
143 124 def findcopies(repo, m1, m2, limit):
144 125 """
145 126 Find moves and copies between m1 and m2 back to limit linkrev
146 127 """
147 128
148 129 # avoid silly behavior for update from empty dir
149 130 if not m1:
150 131 return {}
151 132
152 133 dcopies = repo.dirstate.copies()
153 134 copy = {}
154 135 match = {}
155 136 u1 = nonoverlap(m1, m2)
156 137 u2 = nonoverlap(m2, m1)
157 138 ctx = util.cachefunc(lambda f,n: repo.filectx(f, fileid=n[:20]))
158 139
159 140 def checkpair(c, f2, man):
160 141 ''' check if an apparent pair actually matches '''
161 142 c2 = ctx(f2, man[f2])
162 143 ca = c.ancestor(c2)
163 144 if ca:
164 145 copy[c.path()] = f2
165 146 copy[f2] = c.path()
166 147
167 148 for f in u1:
168 149 c = ctx(dcopies.get(f, f), m1[f])
169 150 for of in findold(c, limit):
170 151 if of in m2:
171 152 checkpair(c, of, m2)
172 153 else:
173 154 match.setdefault(of, []).append(f)
174 155
175 156 for f in u2:
176 157 c = ctx(f, m2[f])
177 158 for of in findold(c, limit):
178 159 if of in m1:
179 160 checkpair(c, of, m1)
180 161 elif of in match:
181 162 for mf in match[of]:
182 163 checkpair(c, mf, m1)
183 164
184 165 return copy
185 166
186 167 def filtermanifest(man, partial):
187 168 if partial:
188 169 for k in man.keys():
189 170 if not partial(k): del man[k]
190 171
191 172 def manifestmerge(ui, m1, m2, ma, overwrite, backwards):
192 173 """
193 174 Merge manifest m1 with m2 using ancestor ma and generate merge action list
194 175 """
195 176
196 177 def fmerge(f):
197 178 """merge executable flags"""
198 179 a, b, c = ma.execf(f), m1.execf(f), m2.execf(f)
199 180 return ((a^b) | (a^c)) ^ a
200 181
201 182 action = []
202 183
203 184 def act(msg, f, m, *args):
204 185 ui.debug(" %s: %s -> %s\n" % (f, msg, m))
205 186 action.append((f, m) + args)
206 187
207 188 # Compare manifests
208 189 for f, n in m1.iteritems():
209 190 if f in m2:
210 191 # are files different?
211 192 if n != m2[f]:
212 193 a = ma.get(f, nullid)
213 194 # are both different from the ancestor?
214 195 if not overwrite and n != a and m2[f] != a:
215 196 act("versions differ", f, "m", fmerge(f), n[:20], m2[f])
216 197 # are we clobbering?
217 198 # is remote's version newer?
218 199 # or are we going back in time and clean?
219 200 elif overwrite or m2[f] != a or (backwards and not n[20:]):
220 201 act("remote is newer", f, "g", m2.execf(f), m2[f])
221 202 # local is newer, not overwrite, check mode bits
222 203 elif fmerge(f) != m1.execf(f):
223 204 act("update permissions", f, "e", m2.execf(f))
224 205 # contents same, check mode bits
225 206 elif m1.execf(f) != m2.execf(f):
226 207 if overwrite or fmerge(f) != m1.execf(f):
227 208 act("update permissions", f, "e", m2.execf(f))
228 209 del m2[f]
229 210 elif f in ma:
230 211 if n != ma[f] and not overwrite:
231 212 if ui.prompt(
232 213 (_(" local changed %s which remote deleted\n") % f) +
233 214 _("(k)eep or (d)elete?"), _("[kd]"), _("k")) == _("d"):
234 215 act("prompt delete", f, "r")
235 216 else:
236 217 act("other deleted", f, "r")
237 218 else:
238 219 # file is created on branch or in working directory
239 220 if (overwrite and n[20:] != "u") or (backwards and not n[20:]):
240 221 act("remote deleted", f, "r")
241 222
242 223 for f, n in m2.iteritems():
243 224 if f in ma:
244 225 if overwrite or backwards:
245 226 act("recreating", f, "g", m2.execf(f), n)
246 227 elif n != ma[f]:
247 228 if ui.prompt(
248 229 (_("remote changed %s which local deleted\n") % f) +
249 230 _("(k)eep or (d)elete?"), _("[kd]"), _("k")) == _("k"):
250 231 act("prompt recreating", f, "g", m2.execf(f), n)
251 232 else:
252 233 act("remote created", f, "g", m2.execf(f), n)
253 234
254 235 return action
255 236
256 237 def applyupdates(repo, action, xp1, xp2):
257 238 updated, merged, removed, unresolved = 0, 0, 0, 0
258 239 action.sort()
259 240 for a in action:
260 241 f, m = a[:2]
261 242 if f[0] == "/":
262 243 continue
263 244 if m == "r": # remove
264 245 repo.ui.note(_("removing %s\n") % f)
265 246 util.audit_path(f)
266 247 try:
267 248 util.unlink(repo.wjoin(f))
268 249 except OSError, inst:
269 250 if inst.errno != errno.ENOENT:
270 251 repo.ui.warn(_("update failed to remove %s: %s!\n") %
271 252 (f, inst.strerror))
272 253 removed +=1
273 254 elif m == "m": # merge
274 255 flag, my, other = a[2:]
275 256 repo.ui.status(_("merging %s\n") % f)
276 257 if filemerge(repo, f, f, f, my, other, xp1, xp2, False):
277 258 unresolved += 1
278 259 util.set_exec(repo.wjoin(f), flag)
279 260 merged += 1
280 261 elif m == "g": # get
281 262 flag, node = a[2:]
282 263 repo.ui.note(_("getting %s\n") % f)
283 264 t = repo.file(f).read(node)
284 265 repo.wwrite(f, t)
285 266 util.set_exec(repo.wjoin(f), flag)
286 267 updated += 1
287 268 elif m == "e": # exec
288 269 flag = a[2:]
289 270 util.set_exec(repo.wjoin(f), flag)
290 271
291 272 return updated, merged, removed, unresolved
292 273
293 274 def recordupdates(repo, action, branchmerge):
294 275 for a in action:
295 276 f, m = a[:2]
296 277 if m == "r": # remove
297 278 if branchmerge:
298 279 repo.dirstate.update([f], 'r')
299 280 else:
300 281 repo.dirstate.forget([f])
301 282 elif m == "f": # forget
302 283 repo.dirstate.forget([f])
303 284 elif m == "g": # get
304 285 if branchmerge:
305 286 repo.dirstate.update([f], 'n', st_mtime=-1)
306 287 else:
307 288 repo.dirstate.update([f], 'n')
308 289 elif m == "m": # merge
309 290 flag, my, other = a[2:]
310 291 if branchmerge:
311 292 # We've done a branch merge, mark this file as merged
312 293 # so that we properly record the merger later
313 294 repo.dirstate.update([f], 'm')
314 295 else:
315 296 # We've update-merged a locally modified file, so
316 297 # we set the dirstate to emulate a normal checkout
317 298 # of that file some time in the past. Thus our
318 299 # merge will appear as a normal local file
319 300 # modification.
320 301 fl = repo.file(f)
321 302 f_len = fl.size(fl.rev(other))
322 303 repo.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
323 304
324 305 def update(repo, node, branchmerge=False, force=False, partial=None,
325 306 wlock=None, show_stats=True, remind=True):
326 307
327 308 overwrite = force and not branchmerge
328 309 forcemerge = force and branchmerge
329 310
330 311 if not wlock:
331 312 wlock = repo.wlock()
332 313
333 314 ### check phase
334 315
335 pl = repo.parents()
316 wc = repo.workingctx()
317 pl = wc.parents()
336 318 if not overwrite and len(pl) > 1:
337 319 raise util.Abort(_("outstanding uncommitted merges"))
338 320
339 321 p1, p2 = pl[0], repo.changectx(node)
340 322 pa = p1.ancestor(p2)
341 323
342 324 # are we going backwards?
343 325 backwards = (pa == p2)
344 326
345 327 # is there a linear path from p1 to p2?
346 328 if pa == p1 or pa == p2:
347 329 if branchmerge:
348 330 raise util.Abort(_("there is nothing to merge, just use "
349 331 "'hg update' or look at 'hg heads'"))
350 332 elif not (overwrite or branchmerge):
351 333 raise util.Abort(_("update spans branches, use 'hg merge' "
352 334 "or 'hg update -C' to lose changes"))
353 335
354 status = repo.status()
355 modified, added, removed, deleted, unknown = status[:5]
356 336 if branchmerge and not forcemerge:
357 if modified or added or removed:
337 if wc.modified() or wc.added() or wc.removed():
358 338 raise util.Abort(_("outstanding uncommitted changes"))
359 339
360 m1 = p1.manifest().copy()
340 m1 = wc.manifest().copy()
361 341 m2 = p2.manifest().copy()
362 342 ma = pa.manifest()
363 343
364 344 # resolve the manifest to determine which files
365 345 # we care about merging
366 346 repo.ui.note(_("resolving manifests\n"))
367 347 repo.ui.debug(_(" overwrite %s branchmerge %s partial %s\n") %
368 348 (overwrite, branchmerge, bool(partial)))
369 349 repo.ui.debug(_(" ancestor %s local %s remote %s\n") % (p1, p2, pa))
370 350
371 351 action = []
372 352 copy = {}
373 353
374 m1 = workingmanifest(repo, m1, status)
375 354 filtermanifest(m1, partial)
376 355 filtermanifest(m2, partial)
377 356
378 357 if not force:
379 checkunknown(repo, m2, status)
358 checkunknown(repo, m2, wc)
380 359 if not branchmerge:
381 action += forgetremoved(m2, status)
360 action += forgetremoved(m2, wc)
382 361 if not (backwards or overwrite):
383 362 copy = findcopies(repo, m1, m2, pa.rev())
384 363
385 364 action += manifestmerge(repo.ui, m1, m2, ma, overwrite, backwards)
386 365 del m1, m2, ma
387 366
388 367 ### apply phase
389 368
390 369 if not branchmerge:
391 370 # we don't need to do any magic, just jump to the new rev
392 371 p1, p2 = p2, repo.changectx(nullid)
393 372
394 373 xp1, xp2 = str(p1), str(p2)
395 374 if not p2: xp2 = ''
396 375
397 376 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
398 377
399 378 updated, merged, removed, unresolved = applyupdates(repo, action, xp1, xp2)
400 379
401 380 # update dirstate
402 381 if not partial:
403 382 repo.dirstate.setparents(p1.node(), p2.node())
404 383 recordupdates(repo, action, branchmerge)
405 384
406 385 if show_stats:
407 386 stats = ((updated, _("updated")),
408 387 (merged - unresolved, _("merged")),
409 388 (removed, _("removed")),
410 389 (unresolved, _("unresolved")))
411 390 note = ", ".join([_("%d files %s") % s for s in stats])
412 391 repo.ui.status("%s\n" % note)
413 392 if not partial:
414 393 if branchmerge:
415 394 if unresolved:
416 395 repo.ui.status(_("There are unresolved merges,"
417 396 " you can redo the full merge using:\n"
418 397 " hg update -C %s\n"
419 398 " hg merge %s\n"
420 399 % (p1.rev(), p2.rev())))
421 400 elif remind:
422 401 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
423 402 elif unresolved:
424 403 repo.ui.status(_("There are unresolved merges with"
425 404 " locally modified files.\n"))
426 405
427 406 repo.hook('update', parent1=xp1, parent2=xp2, error=unresolved)
428 407 return unresolved
429 408
General Comments 0
You need to be logged in to leave comments. Login now