##// END OF EJS Templates
lookup: fast-paths for int and 'tip'
Matt Mackall -
r7377:374a6b3a default
parent child Browse files
Show More
@@ -1,2125 +1,2129 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import lock, transaction, stat, errno, ui, store
12 import lock, transaction, stat, errno, ui, store
13 import os, revlog, time, util, extensions, hook, inspect
13 import os, revlog, time, util, extensions, hook, inspect
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = util.set(('lookup', 'changegroupsubset'))
18 capabilities = util.set(('lookup', 'changegroupsubset'))
19 supported = ('revlogv1', 'store', 'fncache')
19 supported = ('revlogv1', 'store', 'fncache')
20
20
21 def __init__(self, parentui, path=None, create=0):
21 def __init__(self, parentui, path=None, create=0):
22 repo.repository.__init__(self)
22 repo.repository.__init__(self)
23 self.root = os.path.realpath(path)
23 self.root = os.path.realpath(path)
24 self.path = os.path.join(self.root, ".hg")
24 self.path = os.path.join(self.root, ".hg")
25 self.origroot = path
25 self.origroot = path
26 self.opener = util.opener(self.path)
26 self.opener = util.opener(self.path)
27 self.wopener = util.opener(self.root)
27 self.wopener = util.opener(self.root)
28
28
29 if not os.path.isdir(self.path):
29 if not os.path.isdir(self.path):
30 if create:
30 if create:
31 if not os.path.exists(path):
31 if not os.path.exists(path):
32 os.mkdir(path)
32 os.mkdir(path)
33 os.mkdir(self.path)
33 os.mkdir(self.path)
34 requirements = ["revlogv1"]
34 requirements = ["revlogv1"]
35 if parentui.configbool('format', 'usestore', True):
35 if parentui.configbool('format', 'usestore', True):
36 os.mkdir(os.path.join(self.path, "store"))
36 os.mkdir(os.path.join(self.path, "store"))
37 requirements.append("store")
37 requirements.append("store")
38 if parentui.configbool('format', 'usefncache', True):
38 if parentui.configbool('format', 'usefncache', True):
39 requirements.append("fncache")
39 requirements.append("fncache")
40 # create an invalid changelog
40 # create an invalid changelog
41 self.opener("00changelog.i", "a").write(
41 self.opener("00changelog.i", "a").write(
42 '\0\0\0\2' # represents revlogv2
42 '\0\0\0\2' # represents revlogv2
43 ' dummy changelog to prevent using the old repo layout'
43 ' dummy changelog to prevent using the old repo layout'
44 )
44 )
45 reqfile = self.opener("requires", "w")
45 reqfile = self.opener("requires", "w")
46 for r in requirements:
46 for r in requirements:
47 reqfile.write("%s\n" % r)
47 reqfile.write("%s\n" % r)
48 reqfile.close()
48 reqfile.close()
49 else:
49 else:
50 raise repo.RepoError(_("repository %s not found") % path)
50 raise repo.RepoError(_("repository %s not found") % path)
51 elif create:
51 elif create:
52 raise repo.RepoError(_("repository %s already exists") % path)
52 raise repo.RepoError(_("repository %s already exists") % path)
53 else:
53 else:
54 # find requirements
54 # find requirements
55 requirements = []
55 requirements = []
56 try:
56 try:
57 requirements = self.opener("requires").read().splitlines()
57 requirements = self.opener("requires").read().splitlines()
58 for r in requirements:
58 for r in requirements:
59 if r not in self.supported:
59 if r not in self.supported:
60 raise repo.RepoError(_("requirement '%s' not supported") % r)
60 raise repo.RepoError(_("requirement '%s' not supported") % r)
61 except IOError, inst:
61 except IOError, inst:
62 if inst.errno != errno.ENOENT:
62 if inst.errno != errno.ENOENT:
63 raise
63 raise
64
64
65 self.store = store.store(requirements, self.path, util.opener)
65 self.store = store.store(requirements, self.path, util.opener)
66 self.spath = self.store.path
66 self.spath = self.store.path
67 self.sopener = self.store.opener
67 self.sopener = self.store.opener
68 self.sjoin = self.store.join
68 self.sjoin = self.store.join
69 self.opener.createmode = self.store.createmode
69 self.opener.createmode = self.store.createmode
70
70
71 self.ui = ui.ui(parentui=parentui)
71 self.ui = ui.ui(parentui=parentui)
72 try:
72 try:
73 self.ui.readconfig(self.join("hgrc"), self.root)
73 self.ui.readconfig(self.join("hgrc"), self.root)
74 extensions.loadall(self.ui)
74 extensions.loadall(self.ui)
75 except IOError:
75 except IOError:
76 pass
76 pass
77
77
78 self.tagscache = None
78 self.tagscache = None
79 self._tagstypecache = None
79 self._tagstypecache = None
80 self.branchcache = None
80 self.branchcache = None
81 self._ubranchcache = None # UTF-8 version of branchcache
81 self._ubranchcache = None # UTF-8 version of branchcache
82 self._branchcachetip = None
82 self._branchcachetip = None
83 self.nodetagscache = None
83 self.nodetagscache = None
84 self.filterpats = {}
84 self.filterpats = {}
85 self._datafilters = {}
85 self._datafilters = {}
86 self._transref = self._lockref = self._wlockref = None
86 self._transref = self._lockref = self._wlockref = None
87
87
88 def __getattr__(self, name):
88 def __getattr__(self, name):
89 if name == 'changelog':
89 if name == 'changelog':
90 self.changelog = changelog.changelog(self.sopener)
90 self.changelog = changelog.changelog(self.sopener)
91 self.sopener.defversion = self.changelog.version
91 self.sopener.defversion = self.changelog.version
92 return self.changelog
92 return self.changelog
93 if name == 'manifest':
93 if name == 'manifest':
94 self.changelog
94 self.changelog
95 self.manifest = manifest.manifest(self.sopener)
95 self.manifest = manifest.manifest(self.sopener)
96 return self.manifest
96 return self.manifest
97 if name == 'dirstate':
97 if name == 'dirstate':
98 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
98 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
99 return self.dirstate
99 return self.dirstate
100 else:
100 else:
101 raise AttributeError(name)
101 raise AttributeError(name)
102
102
103 def __getitem__(self, changeid):
103 def __getitem__(self, changeid):
104 if changeid == None:
104 if changeid == None:
105 return context.workingctx(self)
105 return context.workingctx(self)
106 return context.changectx(self, changeid)
106 return context.changectx(self, changeid)
107
107
108 def __nonzero__(self):
108 def __nonzero__(self):
109 return True
109 return True
110
110
111 def __len__(self):
111 def __len__(self):
112 return len(self.changelog)
112 return len(self.changelog)
113
113
114 def __iter__(self):
114 def __iter__(self):
115 for i in xrange(len(self)):
115 for i in xrange(len(self)):
116 yield i
116 yield i
117
117
118 def url(self):
118 def url(self):
119 return 'file:' + self.root
119 return 'file:' + self.root
120
120
121 def hook(self, name, throw=False, **args):
121 def hook(self, name, throw=False, **args):
122 return hook.hook(self.ui, self, name, throw, **args)
122 return hook.hook(self.ui, self, name, throw, **args)
123
123
124 tag_disallowed = ':\r\n'
124 tag_disallowed = ':\r\n'
125
125
126 def _tag(self, names, node, message, local, user, date, parent=None,
126 def _tag(self, names, node, message, local, user, date, parent=None,
127 extra={}):
127 extra={}):
128 use_dirstate = parent is None
128 use_dirstate = parent is None
129
129
130 if isinstance(names, str):
130 if isinstance(names, str):
131 allchars = names
131 allchars = names
132 names = (names,)
132 names = (names,)
133 else:
133 else:
134 allchars = ''.join(names)
134 allchars = ''.join(names)
135 for c in self.tag_disallowed:
135 for c in self.tag_disallowed:
136 if c in allchars:
136 if c in allchars:
137 raise util.Abort(_('%r cannot be used in a tag name') % c)
137 raise util.Abort(_('%r cannot be used in a tag name') % c)
138
138
139 for name in names:
139 for name in names:
140 self.hook('pretag', throw=True, node=hex(node), tag=name,
140 self.hook('pretag', throw=True, node=hex(node), tag=name,
141 local=local)
141 local=local)
142
142
143 def writetags(fp, names, munge, prevtags):
143 def writetags(fp, names, munge, prevtags):
144 fp.seek(0, 2)
144 fp.seek(0, 2)
145 if prevtags and prevtags[-1] != '\n':
145 if prevtags and prevtags[-1] != '\n':
146 fp.write('\n')
146 fp.write('\n')
147 for name in names:
147 for name in names:
148 m = munge and munge(name) or name
148 m = munge and munge(name) or name
149 if self._tagstypecache and name in self._tagstypecache:
149 if self._tagstypecache and name in self._tagstypecache:
150 old = self.tagscache.get(name, nullid)
150 old = self.tagscache.get(name, nullid)
151 fp.write('%s %s\n' % (hex(old), m))
151 fp.write('%s %s\n' % (hex(old), m))
152 fp.write('%s %s\n' % (hex(node), m))
152 fp.write('%s %s\n' % (hex(node), m))
153 fp.close()
153 fp.close()
154
154
155 prevtags = ''
155 prevtags = ''
156 if local:
156 if local:
157 try:
157 try:
158 fp = self.opener('localtags', 'r+')
158 fp = self.opener('localtags', 'r+')
159 except IOError, err:
159 except IOError, err:
160 fp = self.opener('localtags', 'a')
160 fp = self.opener('localtags', 'a')
161 else:
161 else:
162 prevtags = fp.read()
162 prevtags = fp.read()
163
163
164 # local tags are stored in the current charset
164 # local tags are stored in the current charset
165 writetags(fp, names, None, prevtags)
165 writetags(fp, names, None, prevtags)
166 for name in names:
166 for name in names:
167 self.hook('tag', node=hex(node), tag=name, local=local)
167 self.hook('tag', node=hex(node), tag=name, local=local)
168 return
168 return
169
169
170 if use_dirstate:
170 if use_dirstate:
171 try:
171 try:
172 fp = self.wfile('.hgtags', 'rb+')
172 fp = self.wfile('.hgtags', 'rb+')
173 except IOError, err:
173 except IOError, err:
174 fp = self.wfile('.hgtags', 'ab')
174 fp = self.wfile('.hgtags', 'ab')
175 else:
175 else:
176 prevtags = fp.read()
176 prevtags = fp.read()
177 else:
177 else:
178 try:
178 try:
179 prevtags = self.filectx('.hgtags', parent).data()
179 prevtags = self.filectx('.hgtags', parent).data()
180 except revlog.LookupError:
180 except revlog.LookupError:
181 pass
181 pass
182 fp = self.wfile('.hgtags', 'wb')
182 fp = self.wfile('.hgtags', 'wb')
183 if prevtags:
183 if prevtags:
184 fp.write(prevtags)
184 fp.write(prevtags)
185
185
186 # committed tags are stored in UTF-8
186 # committed tags are stored in UTF-8
187 writetags(fp, names, util.fromlocal, prevtags)
187 writetags(fp, names, util.fromlocal, prevtags)
188
188
189 if use_dirstate and '.hgtags' not in self.dirstate:
189 if use_dirstate and '.hgtags' not in self.dirstate:
190 self.add(['.hgtags'])
190 self.add(['.hgtags'])
191
191
192 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
192 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
193 extra=extra)
193 extra=extra)
194
194
195 for name in names:
195 for name in names:
196 self.hook('tag', node=hex(node), tag=name, local=local)
196 self.hook('tag', node=hex(node), tag=name, local=local)
197
197
198 return tagnode
198 return tagnode
199
199
200 def tag(self, names, node, message, local, user, date):
200 def tag(self, names, node, message, local, user, date):
201 '''tag a revision with one or more symbolic names.
201 '''tag a revision with one or more symbolic names.
202
202
203 names is a list of strings or, when adding a single tag, names may be a
203 names is a list of strings or, when adding a single tag, names may be a
204 string.
204 string.
205
205
206 if local is True, the tags are stored in a per-repository file.
206 if local is True, the tags are stored in a per-repository file.
207 otherwise, they are stored in the .hgtags file, and a new
207 otherwise, they are stored in the .hgtags file, and a new
208 changeset is committed with the change.
208 changeset is committed with the change.
209
209
210 keyword arguments:
210 keyword arguments:
211
211
212 local: whether to store tags in non-version-controlled file
212 local: whether to store tags in non-version-controlled file
213 (default False)
213 (default False)
214
214
215 message: commit message to use if committing
215 message: commit message to use if committing
216
216
217 user: name of user to use if committing
217 user: name of user to use if committing
218
218
219 date: date tuple to use if committing'''
219 date: date tuple to use if committing'''
220
220
221 for x in self.status()[:5]:
221 for x in self.status()[:5]:
222 if '.hgtags' in x:
222 if '.hgtags' in x:
223 raise util.Abort(_('working copy of .hgtags is changed '
223 raise util.Abort(_('working copy of .hgtags is changed '
224 '(please commit .hgtags manually)'))
224 '(please commit .hgtags manually)'))
225
225
226 self._tag(names, node, message, local, user, date)
226 self._tag(names, node, message, local, user, date)
227
227
228 def tags(self):
228 def tags(self):
229 '''return a mapping of tag to node'''
229 '''return a mapping of tag to node'''
230 if self.tagscache:
230 if self.tagscache:
231 return self.tagscache
231 return self.tagscache
232
232
233 globaltags = {}
233 globaltags = {}
234 tagtypes = {}
234 tagtypes = {}
235
235
236 def readtags(lines, fn, tagtype):
236 def readtags(lines, fn, tagtype):
237 filetags = {}
237 filetags = {}
238 count = 0
238 count = 0
239
239
240 def warn(msg):
240 def warn(msg):
241 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
241 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
242
242
243 for l in lines:
243 for l in lines:
244 count += 1
244 count += 1
245 if not l:
245 if not l:
246 continue
246 continue
247 s = l.split(" ", 1)
247 s = l.split(" ", 1)
248 if len(s) != 2:
248 if len(s) != 2:
249 warn(_("cannot parse entry"))
249 warn(_("cannot parse entry"))
250 continue
250 continue
251 node, key = s
251 node, key = s
252 key = util.tolocal(key.strip()) # stored in UTF-8
252 key = util.tolocal(key.strip()) # stored in UTF-8
253 try:
253 try:
254 bin_n = bin(node)
254 bin_n = bin(node)
255 except TypeError:
255 except TypeError:
256 warn(_("node '%s' is not well formed") % node)
256 warn(_("node '%s' is not well formed") % node)
257 continue
257 continue
258 if bin_n not in self.changelog.nodemap:
258 if bin_n not in self.changelog.nodemap:
259 warn(_("tag '%s' refers to unknown node") % key)
259 warn(_("tag '%s' refers to unknown node") % key)
260 continue
260 continue
261
261
262 h = []
262 h = []
263 if key in filetags:
263 if key in filetags:
264 n, h = filetags[key]
264 n, h = filetags[key]
265 h.append(n)
265 h.append(n)
266 filetags[key] = (bin_n, h)
266 filetags[key] = (bin_n, h)
267
267
268 for k, nh in filetags.items():
268 for k, nh in filetags.items():
269 if k not in globaltags:
269 if k not in globaltags:
270 globaltags[k] = nh
270 globaltags[k] = nh
271 tagtypes[k] = tagtype
271 tagtypes[k] = tagtype
272 continue
272 continue
273
273
274 # we prefer the global tag if:
274 # we prefer the global tag if:
275 # it supercedes us OR
275 # it supercedes us OR
276 # mutual supercedes and it has a higher rank
276 # mutual supercedes and it has a higher rank
277 # otherwise we win because we're tip-most
277 # otherwise we win because we're tip-most
278 an, ah = nh
278 an, ah = nh
279 bn, bh = globaltags[k]
279 bn, bh = globaltags[k]
280 if (bn != an and an in bh and
280 if (bn != an and an in bh and
281 (bn not in ah or len(bh) > len(ah))):
281 (bn not in ah or len(bh) > len(ah))):
282 an = bn
282 an = bn
283 ah.extend([n for n in bh if n not in ah])
283 ah.extend([n for n in bh if n not in ah])
284 globaltags[k] = an, ah
284 globaltags[k] = an, ah
285 tagtypes[k] = tagtype
285 tagtypes[k] = tagtype
286
286
287 # read the tags file from each head, ending with the tip
287 # read the tags file from each head, ending with the tip
288 f = None
288 f = None
289 for rev, node, fnode in self._hgtagsnodes():
289 for rev, node, fnode in self._hgtagsnodes():
290 f = (f and f.filectx(fnode) or
290 f = (f and f.filectx(fnode) or
291 self.filectx('.hgtags', fileid=fnode))
291 self.filectx('.hgtags', fileid=fnode))
292 readtags(f.data().splitlines(), f, "global")
292 readtags(f.data().splitlines(), f, "global")
293
293
294 try:
294 try:
295 data = util.fromlocal(self.opener("localtags").read())
295 data = util.fromlocal(self.opener("localtags").read())
296 # localtags are stored in the local character set
296 # localtags are stored in the local character set
297 # while the internal tag table is stored in UTF-8
297 # while the internal tag table is stored in UTF-8
298 readtags(data.splitlines(), "localtags", "local")
298 readtags(data.splitlines(), "localtags", "local")
299 except IOError:
299 except IOError:
300 pass
300 pass
301
301
302 self.tagscache = {}
302 self.tagscache = {}
303 self._tagstypecache = {}
303 self._tagstypecache = {}
304 for k,nh in globaltags.items():
304 for k,nh in globaltags.items():
305 n = nh[0]
305 n = nh[0]
306 if n != nullid:
306 if n != nullid:
307 self.tagscache[k] = n
307 self.tagscache[k] = n
308 self._tagstypecache[k] = tagtypes[k]
308 self._tagstypecache[k] = tagtypes[k]
309 self.tagscache['tip'] = self.changelog.tip()
309 self.tagscache['tip'] = self.changelog.tip()
310 return self.tagscache
310 return self.tagscache
311
311
312 def tagtype(self, tagname):
312 def tagtype(self, tagname):
313 '''
313 '''
314 return the type of the given tag. result can be:
314 return the type of the given tag. result can be:
315
315
316 'local' : a local tag
316 'local' : a local tag
317 'global' : a global tag
317 'global' : a global tag
318 None : tag does not exist
318 None : tag does not exist
319 '''
319 '''
320
320
321 self.tags()
321 self.tags()
322
322
323 return self._tagstypecache.get(tagname)
323 return self._tagstypecache.get(tagname)
324
324
325 def _hgtagsnodes(self):
325 def _hgtagsnodes(self):
326 heads = self.heads()
326 heads = self.heads()
327 heads.reverse()
327 heads.reverse()
328 last = {}
328 last = {}
329 ret = []
329 ret = []
330 for node in heads:
330 for node in heads:
331 c = self[node]
331 c = self[node]
332 rev = c.rev()
332 rev = c.rev()
333 try:
333 try:
334 fnode = c.filenode('.hgtags')
334 fnode = c.filenode('.hgtags')
335 except revlog.LookupError:
335 except revlog.LookupError:
336 continue
336 continue
337 ret.append((rev, node, fnode))
337 ret.append((rev, node, fnode))
338 if fnode in last:
338 if fnode in last:
339 ret[last[fnode]] = None
339 ret[last[fnode]] = None
340 last[fnode] = len(ret) - 1
340 last[fnode] = len(ret) - 1
341 return [item for item in ret if item]
341 return [item for item in ret if item]
342
342
343 def tagslist(self):
343 def tagslist(self):
344 '''return a list of tags ordered by revision'''
344 '''return a list of tags ordered by revision'''
345 l = []
345 l = []
346 for t, n in self.tags().items():
346 for t, n in self.tags().items():
347 try:
347 try:
348 r = self.changelog.rev(n)
348 r = self.changelog.rev(n)
349 except:
349 except:
350 r = -2 # sort to the beginning of the list if unknown
350 r = -2 # sort to the beginning of the list if unknown
351 l.append((r, t, n))
351 l.append((r, t, n))
352 return [(t, n) for r, t, n in util.sort(l)]
352 return [(t, n) for r, t, n in util.sort(l)]
353
353
354 def nodetags(self, node):
354 def nodetags(self, node):
355 '''return the tags associated with a node'''
355 '''return the tags associated with a node'''
356 if not self.nodetagscache:
356 if not self.nodetagscache:
357 self.nodetagscache = {}
357 self.nodetagscache = {}
358 for t, n in self.tags().items():
358 for t, n in self.tags().items():
359 self.nodetagscache.setdefault(n, []).append(t)
359 self.nodetagscache.setdefault(n, []).append(t)
360 return self.nodetagscache.get(node, [])
360 return self.nodetagscache.get(node, [])
361
361
362 def _branchtags(self, partial, lrev):
362 def _branchtags(self, partial, lrev):
363 tiprev = len(self) - 1
363 tiprev = len(self) - 1
364 if lrev != tiprev:
364 if lrev != tiprev:
365 self._updatebranchcache(partial, lrev+1, tiprev+1)
365 self._updatebranchcache(partial, lrev+1, tiprev+1)
366 self._writebranchcache(partial, self.changelog.tip(), tiprev)
366 self._writebranchcache(partial, self.changelog.tip(), tiprev)
367
367
368 return partial
368 return partial
369
369
370 def branchtags(self):
370 def branchtags(self):
371 tip = self.changelog.tip()
371 tip = self.changelog.tip()
372 if self.branchcache is not None and self._branchcachetip == tip:
372 if self.branchcache is not None and self._branchcachetip == tip:
373 return self.branchcache
373 return self.branchcache
374
374
375 oldtip = self._branchcachetip
375 oldtip = self._branchcachetip
376 self._branchcachetip = tip
376 self._branchcachetip = tip
377 if self.branchcache is None:
377 if self.branchcache is None:
378 self.branchcache = {} # avoid recursion in changectx
378 self.branchcache = {} # avoid recursion in changectx
379 else:
379 else:
380 self.branchcache.clear() # keep using the same dict
380 self.branchcache.clear() # keep using the same dict
381 if oldtip is None or oldtip not in self.changelog.nodemap:
381 if oldtip is None or oldtip not in self.changelog.nodemap:
382 partial, last, lrev = self._readbranchcache()
382 partial, last, lrev = self._readbranchcache()
383 else:
383 else:
384 lrev = self.changelog.rev(oldtip)
384 lrev = self.changelog.rev(oldtip)
385 partial = self._ubranchcache
385 partial = self._ubranchcache
386
386
387 self._branchtags(partial, lrev)
387 self._branchtags(partial, lrev)
388
388
389 # the branch cache is stored on disk as UTF-8, but in the local
389 # the branch cache is stored on disk as UTF-8, but in the local
390 # charset internally
390 # charset internally
391 for k, v in partial.items():
391 for k, v in partial.items():
392 self.branchcache[util.tolocal(k)] = v
392 self.branchcache[util.tolocal(k)] = v
393 self._ubranchcache = partial
393 self._ubranchcache = partial
394 return self.branchcache
394 return self.branchcache
395
395
396 def _readbranchcache(self):
396 def _readbranchcache(self):
397 partial = {}
397 partial = {}
398 try:
398 try:
399 f = self.opener("branch.cache")
399 f = self.opener("branch.cache")
400 lines = f.read().split('\n')
400 lines = f.read().split('\n')
401 f.close()
401 f.close()
402 except (IOError, OSError):
402 except (IOError, OSError):
403 return {}, nullid, nullrev
403 return {}, nullid, nullrev
404
404
405 try:
405 try:
406 last, lrev = lines.pop(0).split(" ", 1)
406 last, lrev = lines.pop(0).split(" ", 1)
407 last, lrev = bin(last), int(lrev)
407 last, lrev = bin(last), int(lrev)
408 if lrev >= len(self) or self[lrev].node() != last:
408 if lrev >= len(self) or self[lrev].node() != last:
409 # invalidate the cache
409 # invalidate the cache
410 raise ValueError('invalidating branch cache (tip differs)')
410 raise ValueError('invalidating branch cache (tip differs)')
411 for l in lines:
411 for l in lines:
412 if not l: continue
412 if not l: continue
413 node, label = l.split(" ", 1)
413 node, label = l.split(" ", 1)
414 partial[label.strip()] = bin(node)
414 partial[label.strip()] = bin(node)
415 except (KeyboardInterrupt, util.SignalInterrupt):
415 except (KeyboardInterrupt, util.SignalInterrupt):
416 raise
416 raise
417 except Exception, inst:
417 except Exception, inst:
418 if self.ui.debugflag:
418 if self.ui.debugflag:
419 self.ui.warn(str(inst), '\n')
419 self.ui.warn(str(inst), '\n')
420 partial, last, lrev = {}, nullid, nullrev
420 partial, last, lrev = {}, nullid, nullrev
421 return partial, last, lrev
421 return partial, last, lrev
422
422
423 def _writebranchcache(self, branches, tip, tiprev):
423 def _writebranchcache(self, branches, tip, tiprev):
424 try:
424 try:
425 f = self.opener("branch.cache", "w", atomictemp=True)
425 f = self.opener("branch.cache", "w", atomictemp=True)
426 f.write("%s %s\n" % (hex(tip), tiprev))
426 f.write("%s %s\n" % (hex(tip), tiprev))
427 for label, node in branches.iteritems():
427 for label, node in branches.iteritems():
428 f.write("%s %s\n" % (hex(node), label))
428 f.write("%s %s\n" % (hex(node), label))
429 f.rename()
429 f.rename()
430 except (IOError, OSError):
430 except (IOError, OSError):
431 pass
431 pass
432
432
433 def _updatebranchcache(self, partial, start, end):
433 def _updatebranchcache(self, partial, start, end):
434 for r in xrange(start, end):
434 for r in xrange(start, end):
435 c = self[r]
435 c = self[r]
436 b = c.branch()
436 b = c.branch()
437 partial[b] = c.node()
437 partial[b] = c.node()
438
438
439 def lookup(self, key):
439 def lookup(self, key):
440 if key == '.':
440 if isinstance(key, int):
441 return self.changelog.node(key)
442 elif key == '.':
441 return self.dirstate.parents()[0]
443 return self.dirstate.parents()[0]
442 elif key == 'null':
444 elif key == 'null':
443 return nullid
445 return nullid
446 elif key == 'tip':
447 return self.changelog.tip()
444 n = self.changelog._match(key)
448 n = self.changelog._match(key)
445 if n:
449 if n:
446 return n
450 return n
447 if key in self.tags():
451 if key in self.tags():
448 return self.tags()[key]
452 return self.tags()[key]
449 if key in self.branchtags():
453 if key in self.branchtags():
450 return self.branchtags()[key]
454 return self.branchtags()[key]
451 n = self.changelog._partialmatch(key)
455 n = self.changelog._partialmatch(key)
452 if n:
456 if n:
453 return n
457 return n
454 try:
458 try:
455 if len(key) == 20:
459 if len(key) == 20:
456 key = hex(key)
460 key = hex(key)
457 except:
461 except:
458 pass
462 pass
459 raise repo.RepoError(_("unknown revision '%s'") % key)
463 raise repo.RepoError(_("unknown revision '%s'") % key)
460
464
461 def local(self):
465 def local(self):
462 return True
466 return True
463
467
464 def join(self, f):
468 def join(self, f):
465 return os.path.join(self.path, f)
469 return os.path.join(self.path, f)
466
470
467 def wjoin(self, f):
471 def wjoin(self, f):
468 return os.path.join(self.root, f)
472 return os.path.join(self.root, f)
469
473
470 def rjoin(self, f):
474 def rjoin(self, f):
471 return os.path.join(self.root, util.pconvert(f))
475 return os.path.join(self.root, util.pconvert(f))
472
476
473 def file(self, f):
477 def file(self, f):
474 if f[0] == '/':
478 if f[0] == '/':
475 f = f[1:]
479 f = f[1:]
476 return filelog.filelog(self.sopener, f)
480 return filelog.filelog(self.sopener, f)
477
481
478 def changectx(self, changeid):
482 def changectx(self, changeid):
479 return self[changeid]
483 return self[changeid]
480
484
481 def parents(self, changeid=None):
485 def parents(self, changeid=None):
482 '''get list of changectxs for parents of changeid'''
486 '''get list of changectxs for parents of changeid'''
483 return self[changeid].parents()
487 return self[changeid].parents()
484
488
485 def filectx(self, path, changeid=None, fileid=None):
489 def filectx(self, path, changeid=None, fileid=None):
486 """changeid can be a changeset revision, node, or tag.
490 """changeid can be a changeset revision, node, or tag.
487 fileid can be a file revision or node."""
491 fileid can be a file revision or node."""
488 return context.filectx(self, path, changeid, fileid)
492 return context.filectx(self, path, changeid, fileid)
489
493
490 def getcwd(self):
494 def getcwd(self):
491 return self.dirstate.getcwd()
495 return self.dirstate.getcwd()
492
496
493 def pathto(self, f, cwd=None):
497 def pathto(self, f, cwd=None):
494 return self.dirstate.pathto(f, cwd)
498 return self.dirstate.pathto(f, cwd)
495
499
496 def wfile(self, f, mode='r'):
500 def wfile(self, f, mode='r'):
497 return self.wopener(f, mode)
501 return self.wopener(f, mode)
498
502
499 def _link(self, f):
503 def _link(self, f):
500 return os.path.islink(self.wjoin(f))
504 return os.path.islink(self.wjoin(f))
501
505
502 def _filter(self, filter, filename, data):
506 def _filter(self, filter, filename, data):
503 if filter not in self.filterpats:
507 if filter not in self.filterpats:
504 l = []
508 l = []
505 for pat, cmd in self.ui.configitems(filter):
509 for pat, cmd in self.ui.configitems(filter):
506 if cmd == '!':
510 if cmd == '!':
507 continue
511 continue
508 mf = util.matcher(self.root, "", [pat], [], [])[1]
512 mf = util.matcher(self.root, "", [pat], [], [])[1]
509 fn = None
513 fn = None
510 params = cmd
514 params = cmd
511 for name, filterfn in self._datafilters.iteritems():
515 for name, filterfn in self._datafilters.iteritems():
512 if cmd.startswith(name):
516 if cmd.startswith(name):
513 fn = filterfn
517 fn = filterfn
514 params = cmd[len(name):].lstrip()
518 params = cmd[len(name):].lstrip()
515 break
519 break
516 if not fn:
520 if not fn:
517 fn = lambda s, c, **kwargs: util.filter(s, c)
521 fn = lambda s, c, **kwargs: util.filter(s, c)
518 # Wrap old filters not supporting keyword arguments
522 # Wrap old filters not supporting keyword arguments
519 if not inspect.getargspec(fn)[2]:
523 if not inspect.getargspec(fn)[2]:
520 oldfn = fn
524 oldfn = fn
521 fn = lambda s, c, **kwargs: oldfn(s, c)
525 fn = lambda s, c, **kwargs: oldfn(s, c)
522 l.append((mf, fn, params))
526 l.append((mf, fn, params))
523 self.filterpats[filter] = l
527 self.filterpats[filter] = l
524
528
525 for mf, fn, cmd in self.filterpats[filter]:
529 for mf, fn, cmd in self.filterpats[filter]:
526 if mf(filename):
530 if mf(filename):
527 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
531 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
528 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
532 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
529 break
533 break
530
534
531 return data
535 return data
532
536
533 def adddatafilter(self, name, filter):
537 def adddatafilter(self, name, filter):
534 self._datafilters[name] = filter
538 self._datafilters[name] = filter
535
539
536 def wread(self, filename):
540 def wread(self, filename):
537 if self._link(filename):
541 if self._link(filename):
538 data = os.readlink(self.wjoin(filename))
542 data = os.readlink(self.wjoin(filename))
539 else:
543 else:
540 data = self.wopener(filename, 'r').read()
544 data = self.wopener(filename, 'r').read()
541 return self._filter("encode", filename, data)
545 return self._filter("encode", filename, data)
542
546
543 def wwrite(self, filename, data, flags):
547 def wwrite(self, filename, data, flags):
544 data = self._filter("decode", filename, data)
548 data = self._filter("decode", filename, data)
545 try:
549 try:
546 os.unlink(self.wjoin(filename))
550 os.unlink(self.wjoin(filename))
547 except OSError:
551 except OSError:
548 pass
552 pass
549 if 'l' in flags:
553 if 'l' in flags:
550 self.wopener.symlink(data, filename)
554 self.wopener.symlink(data, filename)
551 else:
555 else:
552 self.wopener(filename, 'w').write(data)
556 self.wopener(filename, 'w').write(data)
553 if 'x' in flags:
557 if 'x' in flags:
554 util.set_flags(self.wjoin(filename), False, True)
558 util.set_flags(self.wjoin(filename), False, True)
555
559
556 def wwritedata(self, filename, data):
560 def wwritedata(self, filename, data):
557 return self._filter("decode", filename, data)
561 return self._filter("decode", filename, data)
558
562
559 def transaction(self):
563 def transaction(self):
560 if self._transref and self._transref():
564 if self._transref and self._transref():
561 return self._transref().nest()
565 return self._transref().nest()
562
566
563 # abort here if the journal already exists
567 # abort here if the journal already exists
564 if os.path.exists(self.sjoin("journal")):
568 if os.path.exists(self.sjoin("journal")):
565 raise repo.RepoError(_("journal already exists - run hg recover"))
569 raise repo.RepoError(_("journal already exists - run hg recover"))
566
570
567 # save dirstate for rollback
571 # save dirstate for rollback
568 try:
572 try:
569 ds = self.opener("dirstate").read()
573 ds = self.opener("dirstate").read()
570 except IOError:
574 except IOError:
571 ds = ""
575 ds = ""
572 self.opener("journal.dirstate", "w").write(ds)
576 self.opener("journal.dirstate", "w").write(ds)
573 self.opener("journal.branch", "w").write(self.dirstate.branch())
577 self.opener("journal.branch", "w").write(self.dirstate.branch())
574
578
575 renames = [(self.sjoin("journal"), self.sjoin("undo")),
579 renames = [(self.sjoin("journal"), self.sjoin("undo")),
576 (self.join("journal.dirstate"), self.join("undo.dirstate")),
580 (self.join("journal.dirstate"), self.join("undo.dirstate")),
577 (self.join("journal.branch"), self.join("undo.branch"))]
581 (self.join("journal.branch"), self.join("undo.branch"))]
578 tr = transaction.transaction(self.ui.warn, self.sopener,
582 tr = transaction.transaction(self.ui.warn, self.sopener,
579 self.sjoin("journal"),
583 self.sjoin("journal"),
580 aftertrans(renames),
584 aftertrans(renames),
581 self.store.createmode)
585 self.store.createmode)
582 self._transref = weakref.ref(tr)
586 self._transref = weakref.ref(tr)
583 return tr
587 return tr
584
588
585 def recover(self):
589 def recover(self):
586 l = self.lock()
590 l = self.lock()
587 try:
591 try:
588 if os.path.exists(self.sjoin("journal")):
592 if os.path.exists(self.sjoin("journal")):
589 self.ui.status(_("rolling back interrupted transaction\n"))
593 self.ui.status(_("rolling back interrupted transaction\n"))
590 transaction.rollback(self.sopener, self.sjoin("journal"))
594 transaction.rollback(self.sopener, self.sjoin("journal"))
591 self.invalidate()
595 self.invalidate()
592 return True
596 return True
593 else:
597 else:
594 self.ui.warn(_("no interrupted transaction available\n"))
598 self.ui.warn(_("no interrupted transaction available\n"))
595 return False
599 return False
596 finally:
600 finally:
597 del l
601 del l
598
602
599 def rollback(self):
603 def rollback(self):
600 wlock = lock = None
604 wlock = lock = None
601 try:
605 try:
602 wlock = self.wlock()
606 wlock = self.wlock()
603 lock = self.lock()
607 lock = self.lock()
604 if os.path.exists(self.sjoin("undo")):
608 if os.path.exists(self.sjoin("undo")):
605 self.ui.status(_("rolling back last transaction\n"))
609 self.ui.status(_("rolling back last transaction\n"))
606 transaction.rollback(self.sopener, self.sjoin("undo"))
610 transaction.rollback(self.sopener, self.sjoin("undo"))
607 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
611 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
608 try:
612 try:
609 branch = self.opener("undo.branch").read()
613 branch = self.opener("undo.branch").read()
610 self.dirstate.setbranch(branch)
614 self.dirstate.setbranch(branch)
611 except IOError:
615 except IOError:
612 self.ui.warn(_("Named branch could not be reset, "
616 self.ui.warn(_("Named branch could not be reset, "
613 "current branch still is: %s\n")
617 "current branch still is: %s\n")
614 % util.tolocal(self.dirstate.branch()))
618 % util.tolocal(self.dirstate.branch()))
615 self.invalidate()
619 self.invalidate()
616 self.dirstate.invalidate()
620 self.dirstate.invalidate()
617 else:
621 else:
618 self.ui.warn(_("no rollback information available\n"))
622 self.ui.warn(_("no rollback information available\n"))
619 finally:
623 finally:
620 del lock, wlock
624 del lock, wlock
621
625
622 def invalidate(self):
626 def invalidate(self):
623 for a in "changelog manifest".split():
627 for a in "changelog manifest".split():
624 if a in self.__dict__:
628 if a in self.__dict__:
625 delattr(self, a)
629 delattr(self, a)
626 self.tagscache = None
630 self.tagscache = None
627 self._tagstypecache = None
631 self._tagstypecache = None
628 self.nodetagscache = None
632 self.nodetagscache = None
629 self.branchcache = None
633 self.branchcache = None
630 self._ubranchcache = None
634 self._ubranchcache = None
631 self._branchcachetip = None
635 self._branchcachetip = None
632
636
633 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
637 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
634 try:
638 try:
635 l = lock.lock(lockname, 0, releasefn, desc=desc)
639 l = lock.lock(lockname, 0, releasefn, desc=desc)
636 except lock.LockHeld, inst:
640 except lock.LockHeld, inst:
637 if not wait:
641 if not wait:
638 raise
642 raise
639 self.ui.warn(_("waiting for lock on %s held by %r\n") %
643 self.ui.warn(_("waiting for lock on %s held by %r\n") %
640 (desc, inst.locker))
644 (desc, inst.locker))
641 # default to 600 seconds timeout
645 # default to 600 seconds timeout
642 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
646 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
643 releasefn, desc=desc)
647 releasefn, desc=desc)
644 if acquirefn:
648 if acquirefn:
645 acquirefn()
649 acquirefn()
646 return l
650 return l
647
651
648 def lock(self, wait=True):
652 def lock(self, wait=True):
649 if self._lockref and self._lockref():
653 if self._lockref and self._lockref():
650 return self._lockref()
654 return self._lockref()
651
655
652 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
656 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
653 _('repository %s') % self.origroot)
657 _('repository %s') % self.origroot)
654 self._lockref = weakref.ref(l)
658 self._lockref = weakref.ref(l)
655 return l
659 return l
656
660
657 def wlock(self, wait=True):
661 def wlock(self, wait=True):
658 if self._wlockref and self._wlockref():
662 if self._wlockref and self._wlockref():
659 return self._wlockref()
663 return self._wlockref()
660
664
661 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
665 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
662 self.dirstate.invalidate, _('working directory of %s') %
666 self.dirstate.invalidate, _('working directory of %s') %
663 self.origroot)
667 self.origroot)
664 self._wlockref = weakref.ref(l)
668 self._wlockref = weakref.ref(l)
665 return l
669 return l
666
670
667 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
671 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
668 """
672 """
669 commit an individual file as part of a larger transaction
673 commit an individual file as part of a larger transaction
670 """
674 """
671
675
672 fn = fctx.path()
676 fn = fctx.path()
673 t = fctx.data()
677 t = fctx.data()
674 fl = self.file(fn)
678 fl = self.file(fn)
675 fp1 = manifest1.get(fn, nullid)
679 fp1 = manifest1.get(fn, nullid)
676 fp2 = manifest2.get(fn, nullid)
680 fp2 = manifest2.get(fn, nullid)
677
681
678 meta = {}
682 meta = {}
679 cp = fctx.renamed()
683 cp = fctx.renamed()
680 if cp and cp[0] != fn:
684 if cp and cp[0] != fn:
681 # Mark the new revision of this file as a copy of another
685 # Mark the new revision of this file as a copy of another
682 # file. This copy data will effectively act as a parent
686 # file. This copy data will effectively act as a parent
683 # of this new revision. If this is a merge, the first
687 # of this new revision. If this is a merge, the first
684 # parent will be the nullid (meaning "look up the copy data")
688 # parent will be the nullid (meaning "look up the copy data")
685 # and the second one will be the other parent. For example:
689 # and the second one will be the other parent. For example:
686 #
690 #
687 # 0 --- 1 --- 3 rev1 changes file foo
691 # 0 --- 1 --- 3 rev1 changes file foo
688 # \ / rev2 renames foo to bar and changes it
692 # \ / rev2 renames foo to bar and changes it
689 # \- 2 -/ rev3 should have bar with all changes and
693 # \- 2 -/ rev3 should have bar with all changes and
690 # should record that bar descends from
694 # should record that bar descends from
691 # bar in rev2 and foo in rev1
695 # bar in rev2 and foo in rev1
692 #
696 #
693 # this allows this merge to succeed:
697 # this allows this merge to succeed:
694 #
698 #
695 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
699 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
696 # \ / merging rev3 and rev4 should use bar@rev2
700 # \ / merging rev3 and rev4 should use bar@rev2
697 # \- 2 --- 4 as the merge base
701 # \- 2 --- 4 as the merge base
698 #
702 #
699
703
700 cf = cp[0]
704 cf = cp[0]
701 cr = manifest1.get(cf)
705 cr = manifest1.get(cf)
702 nfp = fp2
706 nfp = fp2
703
707
704 if manifest2: # branch merge
708 if manifest2: # branch merge
705 if fp2 == nullid: # copied on remote side
709 if fp2 == nullid: # copied on remote side
706 if fp1 != nullid or cf in manifest2:
710 if fp1 != nullid or cf in manifest2:
707 cr = manifest2[cf]
711 cr = manifest2[cf]
708 nfp = fp1
712 nfp = fp1
709
713
710 # find source in nearest ancestor if we've lost track
714 # find source in nearest ancestor if we've lost track
711 if not cr:
715 if not cr:
712 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
716 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
713 (fn, cf))
717 (fn, cf))
714 for a in self['.'].ancestors():
718 for a in self['.'].ancestors():
715 if cf in a:
719 if cf in a:
716 cr = a[cf].filenode()
720 cr = a[cf].filenode()
717 break
721 break
718
722
719 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
723 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
720 meta["copy"] = cf
724 meta["copy"] = cf
721 meta["copyrev"] = hex(cr)
725 meta["copyrev"] = hex(cr)
722 fp1, fp2 = nullid, nfp
726 fp1, fp2 = nullid, nfp
723 elif fp2 != nullid:
727 elif fp2 != nullid:
724 # is one parent an ancestor of the other?
728 # is one parent an ancestor of the other?
725 fpa = fl.ancestor(fp1, fp2)
729 fpa = fl.ancestor(fp1, fp2)
726 if fpa == fp1:
730 if fpa == fp1:
727 fp1, fp2 = fp2, nullid
731 fp1, fp2 = fp2, nullid
728 elif fpa == fp2:
732 elif fpa == fp2:
729 fp2 = nullid
733 fp2 = nullid
730
734
731 # is the file unmodified from the parent? report existing entry
735 # is the file unmodified from the parent? report existing entry
732 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
736 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
733 return fp1
737 return fp1
734
738
735 changelist.append(fn)
739 changelist.append(fn)
736 return fl.add(t, meta, tr, linkrev, fp1, fp2)
740 return fl.add(t, meta, tr, linkrev, fp1, fp2)
737
741
738 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
742 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
739 if p1 is None:
743 if p1 is None:
740 p1, p2 = self.dirstate.parents()
744 p1, p2 = self.dirstate.parents()
741 return self.commit(files=files, text=text, user=user, date=date,
745 return self.commit(files=files, text=text, user=user, date=date,
742 p1=p1, p2=p2, extra=extra, empty_ok=True)
746 p1=p1, p2=p2, extra=extra, empty_ok=True)
743
747
744 def commit(self, files=None, text="", user=None, date=None,
748 def commit(self, files=None, text="", user=None, date=None,
745 match=None, force=False, force_editor=False,
749 match=None, force=False, force_editor=False,
746 p1=None, p2=None, extra={}, empty_ok=False):
750 p1=None, p2=None, extra={}, empty_ok=False):
747 wlock = lock = None
751 wlock = lock = None
748 if files:
752 if files:
749 files = util.unique(files)
753 files = util.unique(files)
750 try:
754 try:
751 wlock = self.wlock()
755 wlock = self.wlock()
752 lock = self.lock()
756 lock = self.lock()
753 use_dirstate = (p1 is None) # not rawcommit
757 use_dirstate = (p1 is None) # not rawcommit
754
758
755 if use_dirstate:
759 if use_dirstate:
756 p1, p2 = self.dirstate.parents()
760 p1, p2 = self.dirstate.parents()
757 update_dirstate = True
761 update_dirstate = True
758
762
759 if (not force and p2 != nullid and
763 if (not force and p2 != nullid and
760 (match and (match.files() or match.anypats()))):
764 (match and (match.files() or match.anypats()))):
761 raise util.Abort(_('cannot partially commit a merge '
765 raise util.Abort(_('cannot partially commit a merge '
762 '(do not specify files or patterns)'))
766 '(do not specify files or patterns)'))
763
767
764 if files:
768 if files:
765 modified, removed = [], []
769 modified, removed = [], []
766 for f in files:
770 for f in files:
767 s = self.dirstate[f]
771 s = self.dirstate[f]
768 if s in 'nma':
772 if s in 'nma':
769 modified.append(f)
773 modified.append(f)
770 elif s == 'r':
774 elif s == 'r':
771 removed.append(f)
775 removed.append(f)
772 else:
776 else:
773 self.ui.warn(_("%s not tracked!\n") % f)
777 self.ui.warn(_("%s not tracked!\n") % f)
774 changes = [modified, [], removed, [], []]
778 changes = [modified, [], removed, [], []]
775 else:
779 else:
776 changes = self.status(match=match)
780 changes = self.status(match=match)
777 else:
781 else:
778 p1, p2 = p1, p2 or nullid
782 p1, p2 = p1, p2 or nullid
779 update_dirstate = (self.dirstate.parents()[0] == p1)
783 update_dirstate = (self.dirstate.parents()[0] == p1)
780 changes = [files, [], [], [], []]
784 changes = [files, [], [], [], []]
781
785
782 ms = merge_.mergestate(self)
786 ms = merge_.mergestate(self)
783 for f in changes[0]:
787 for f in changes[0]:
784 if f in ms and ms[f] == 'u':
788 if f in ms and ms[f] == 'u':
785 raise util.Abort(_("unresolved merge conflicts "
789 raise util.Abort(_("unresolved merge conflicts "
786 "(see hg resolve)"))
790 "(see hg resolve)"))
787 wctx = context.workingctx(self, (p1, p2), text, user, date,
791 wctx = context.workingctx(self, (p1, p2), text, user, date,
788 extra, changes)
792 extra, changes)
789 return self._commitctx(wctx, force, force_editor, empty_ok,
793 return self._commitctx(wctx, force, force_editor, empty_ok,
790 use_dirstate, update_dirstate)
794 use_dirstate, update_dirstate)
791 finally:
795 finally:
792 del lock, wlock
796 del lock, wlock
793
797
794 def commitctx(self, ctx):
798 def commitctx(self, ctx):
795 """Add a new revision to current repository.
799 """Add a new revision to current repository.
796
800
797 Revision information is passed in the context.memctx argument.
801 Revision information is passed in the context.memctx argument.
798 commitctx() does not touch the working directory.
802 commitctx() does not touch the working directory.
799 """
803 """
800 wlock = lock = None
804 wlock = lock = None
801 try:
805 try:
802 wlock = self.wlock()
806 wlock = self.wlock()
803 lock = self.lock()
807 lock = self.lock()
804 return self._commitctx(ctx, force=True, force_editor=False,
808 return self._commitctx(ctx, force=True, force_editor=False,
805 empty_ok=True, use_dirstate=False,
809 empty_ok=True, use_dirstate=False,
806 update_dirstate=False)
810 update_dirstate=False)
807 finally:
811 finally:
808 del lock, wlock
812 del lock, wlock
809
813
810 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
814 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
811 use_dirstate=True, update_dirstate=True):
815 use_dirstate=True, update_dirstate=True):
812 tr = None
816 tr = None
813 valid = 0 # don't save the dirstate if this isn't set
817 valid = 0 # don't save the dirstate if this isn't set
814 try:
818 try:
815 commit = util.sort(wctx.modified() + wctx.added())
819 commit = util.sort(wctx.modified() + wctx.added())
816 remove = wctx.removed()
820 remove = wctx.removed()
817 extra = wctx.extra().copy()
821 extra = wctx.extra().copy()
818 branchname = extra['branch']
822 branchname = extra['branch']
819 user = wctx.user()
823 user = wctx.user()
820 text = wctx.description()
824 text = wctx.description()
821
825
822 p1, p2 = [p.node() for p in wctx.parents()]
826 p1, p2 = [p.node() for p in wctx.parents()]
823 c1 = self.changelog.read(p1)
827 c1 = self.changelog.read(p1)
824 c2 = self.changelog.read(p2)
828 c2 = self.changelog.read(p2)
825 m1 = self.manifest.read(c1[0]).copy()
829 m1 = self.manifest.read(c1[0]).copy()
826 m2 = self.manifest.read(c2[0])
830 m2 = self.manifest.read(c2[0])
827
831
828 if use_dirstate:
832 if use_dirstate:
829 oldname = c1[5].get("branch") # stored in UTF-8
833 oldname = c1[5].get("branch") # stored in UTF-8
830 if (not commit and not remove and not force and p2 == nullid
834 if (not commit and not remove and not force and p2 == nullid
831 and branchname == oldname):
835 and branchname == oldname):
832 self.ui.status(_("nothing changed\n"))
836 self.ui.status(_("nothing changed\n"))
833 return None
837 return None
834
838
835 xp1 = hex(p1)
839 xp1 = hex(p1)
836 if p2 == nullid: xp2 = ''
840 if p2 == nullid: xp2 = ''
837 else: xp2 = hex(p2)
841 else: xp2 = hex(p2)
838
842
839 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
843 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
840
844
841 tr = self.transaction()
845 tr = self.transaction()
842 trp = weakref.proxy(tr)
846 trp = weakref.proxy(tr)
843
847
844 # check in files
848 # check in files
845 new = {}
849 new = {}
846 changed = []
850 changed = []
847 linkrev = len(self)
851 linkrev = len(self)
848 for f in commit:
852 for f in commit:
849 self.ui.note(f + "\n")
853 self.ui.note(f + "\n")
850 try:
854 try:
851 fctx = wctx.filectx(f)
855 fctx = wctx.filectx(f)
852 newflags = fctx.flags()
856 newflags = fctx.flags()
853 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
857 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
854 if ((not changed or changed[-1] != f) and
858 if ((not changed or changed[-1] != f) and
855 m2.get(f) != new[f]):
859 m2.get(f) != new[f]):
856 # mention the file in the changelog if some
860 # mention the file in the changelog if some
857 # flag changed, even if there was no content
861 # flag changed, even if there was no content
858 # change.
862 # change.
859 if m1.flags(f) != newflags:
863 if m1.flags(f) != newflags:
860 changed.append(f)
864 changed.append(f)
861 m1.set(f, newflags)
865 m1.set(f, newflags)
862 if use_dirstate:
866 if use_dirstate:
863 self.dirstate.normal(f)
867 self.dirstate.normal(f)
864
868
865 except (OSError, IOError):
869 except (OSError, IOError):
866 if use_dirstate:
870 if use_dirstate:
867 self.ui.warn(_("trouble committing %s!\n") % f)
871 self.ui.warn(_("trouble committing %s!\n") % f)
868 raise
872 raise
869 else:
873 else:
870 remove.append(f)
874 remove.append(f)
871
875
872 updated, added = [], []
876 updated, added = [], []
873 for f in util.sort(changed):
877 for f in util.sort(changed):
874 if f in m1 or f in m2:
878 if f in m1 or f in m2:
875 updated.append(f)
879 updated.append(f)
876 else:
880 else:
877 added.append(f)
881 added.append(f)
878
882
879 # update manifest
883 # update manifest
880 m1.update(new)
884 m1.update(new)
881 removed = []
885 removed = []
882
886
883 for f in util.sort(remove):
887 for f in util.sort(remove):
884 if f in m1:
888 if f in m1:
885 del m1[f]
889 del m1[f]
886 removed.append(f)
890 removed.append(f)
887 elif f in m2:
891 elif f in m2:
888 removed.append(f)
892 removed.append(f)
889 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
893 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
890 (new, removed))
894 (new, removed))
891
895
892 # add changeset
896 # add changeset
893 if (not empty_ok and not text) or force_editor:
897 if (not empty_ok and not text) or force_editor:
894 edittext = []
898 edittext = []
895 if text:
899 if text:
896 edittext.append(text)
900 edittext.append(text)
897 edittext.append("")
901 edittext.append("")
898 edittext.append("") # Empty line between message and comments.
902 edittext.append("") # Empty line between message and comments.
899 edittext.append(_("HG: Enter commit message."
903 edittext.append(_("HG: Enter commit message."
900 " Lines beginning with 'HG:' are removed."))
904 " Lines beginning with 'HG:' are removed."))
901 edittext.append("HG: --")
905 edittext.append("HG: --")
902 edittext.append("HG: user: %s" % user)
906 edittext.append("HG: user: %s" % user)
903 if p2 != nullid:
907 if p2 != nullid:
904 edittext.append("HG: branch merge")
908 edittext.append("HG: branch merge")
905 if branchname:
909 if branchname:
906 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
910 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
907 edittext.extend(["HG: added %s" % f for f in added])
911 edittext.extend(["HG: added %s" % f for f in added])
908 edittext.extend(["HG: changed %s" % f for f in updated])
912 edittext.extend(["HG: changed %s" % f for f in updated])
909 edittext.extend(["HG: removed %s" % f for f in removed])
913 edittext.extend(["HG: removed %s" % f for f in removed])
910 if not added and not updated and not removed:
914 if not added and not updated and not removed:
911 edittext.append("HG: no files changed")
915 edittext.append("HG: no files changed")
912 edittext.append("")
916 edittext.append("")
913 # run editor in the repository root
917 # run editor in the repository root
914 olddir = os.getcwd()
918 olddir = os.getcwd()
915 os.chdir(self.root)
919 os.chdir(self.root)
916 text = self.ui.edit("\n".join(edittext), user)
920 text = self.ui.edit("\n".join(edittext), user)
917 os.chdir(olddir)
921 os.chdir(olddir)
918
922
919 lines = [line.rstrip() for line in text.rstrip().splitlines()]
923 lines = [line.rstrip() for line in text.rstrip().splitlines()]
920 while lines and not lines[0]:
924 while lines and not lines[0]:
921 del lines[0]
925 del lines[0]
922 if not lines and use_dirstate:
926 if not lines and use_dirstate:
923 raise util.Abort(_("empty commit message"))
927 raise util.Abort(_("empty commit message"))
924 text = '\n'.join(lines)
928 text = '\n'.join(lines)
925
929
926 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
930 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
927 user, wctx.date(), extra)
931 user, wctx.date(), extra)
928 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
932 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
929 parent2=xp2)
933 parent2=xp2)
930 tr.close()
934 tr.close()
931
935
932 if self.branchcache:
936 if self.branchcache:
933 self.branchtags()
937 self.branchtags()
934
938
935 if use_dirstate or update_dirstate:
939 if use_dirstate or update_dirstate:
936 self.dirstate.setparents(n)
940 self.dirstate.setparents(n)
937 if use_dirstate:
941 if use_dirstate:
938 for f in removed:
942 for f in removed:
939 self.dirstate.forget(f)
943 self.dirstate.forget(f)
940 valid = 1 # our dirstate updates are complete
944 valid = 1 # our dirstate updates are complete
941
945
942 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
946 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
943 return n
947 return n
944 finally:
948 finally:
945 if not valid: # don't save our updated dirstate
949 if not valid: # don't save our updated dirstate
946 self.dirstate.invalidate()
950 self.dirstate.invalidate()
947 del tr
951 del tr
948
952
949 def walk(self, match, node=None):
953 def walk(self, match, node=None):
950 '''
954 '''
951 walk recursively through the directory tree or a given
955 walk recursively through the directory tree or a given
952 changeset, finding all files matched by the match
956 changeset, finding all files matched by the match
953 function
957 function
954 '''
958 '''
955 return self[node].walk(match)
959 return self[node].walk(match)
956
960
957 def status(self, node1='.', node2=None, match=None,
961 def status(self, node1='.', node2=None, match=None,
958 ignored=False, clean=False, unknown=False):
962 ignored=False, clean=False, unknown=False):
959 """return status of files between two nodes or node and working directory
963 """return status of files between two nodes or node and working directory
960
964
961 If node1 is None, use the first dirstate parent instead.
965 If node1 is None, use the first dirstate parent instead.
962 If node2 is None, compare node1 with working directory.
966 If node2 is None, compare node1 with working directory.
963 """
967 """
964
968
965 def mfmatches(ctx):
969 def mfmatches(ctx):
966 mf = ctx.manifest().copy()
970 mf = ctx.manifest().copy()
967 for fn in mf.keys():
971 for fn in mf.keys():
968 if not match(fn):
972 if not match(fn):
969 del mf[fn]
973 del mf[fn]
970 return mf
974 return mf
971
975
972 if isinstance(node1, context.changectx):
976 if isinstance(node1, context.changectx):
973 ctx1 = node1
977 ctx1 = node1
974 else:
978 else:
975 ctx1 = self[node1]
979 ctx1 = self[node1]
976 if isinstance(node2, context.changectx):
980 if isinstance(node2, context.changectx):
977 ctx2 = node2
981 ctx2 = node2
978 else:
982 else:
979 ctx2 = self[node2]
983 ctx2 = self[node2]
980
984
981 working = ctx2 == self[None]
985 working = ctx2 == self[None]
982 parentworking = working and ctx1 == self['.']
986 parentworking = working and ctx1 == self['.']
983 match = match or match_.always(self.root, self.getcwd())
987 match = match or match_.always(self.root, self.getcwd())
984 listignored, listclean, listunknown = ignored, clean, unknown
988 listignored, listclean, listunknown = ignored, clean, unknown
985
989
986 # load earliest manifest first for caching reasons
990 # load earliest manifest first for caching reasons
987 if not working and ctx2.rev() < ctx1.rev():
991 if not working and ctx2.rev() < ctx1.rev():
988 ctx2.manifest()
992 ctx2.manifest()
989
993
990 if not parentworking:
994 if not parentworking:
991 def bad(f, msg):
995 def bad(f, msg):
992 if f not in ctx1:
996 if f not in ctx1:
993 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
997 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
994 return False
998 return False
995 match.bad = bad
999 match.bad = bad
996
1000
997 if working: # we need to scan the working dir
1001 if working: # we need to scan the working dir
998 s = self.dirstate.status(match, listignored, listclean, listunknown)
1002 s = self.dirstate.status(match, listignored, listclean, listunknown)
999 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1003 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1000
1004
1001 # check for any possibly clean files
1005 # check for any possibly clean files
1002 if parentworking and cmp:
1006 if parentworking and cmp:
1003 fixup = []
1007 fixup = []
1004 # do a full compare of any files that might have changed
1008 # do a full compare of any files that might have changed
1005 for f in cmp:
1009 for f in cmp:
1006 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1010 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1007 or ctx1[f].cmp(ctx2[f].data())):
1011 or ctx1[f].cmp(ctx2[f].data())):
1008 modified.append(f)
1012 modified.append(f)
1009 else:
1013 else:
1010 fixup.append(f)
1014 fixup.append(f)
1011
1015
1012 if listclean:
1016 if listclean:
1013 clean += fixup
1017 clean += fixup
1014
1018
1015 # update dirstate for files that are actually clean
1019 # update dirstate for files that are actually clean
1016 if fixup:
1020 if fixup:
1017 wlock = None
1021 wlock = None
1018 try:
1022 try:
1019 try:
1023 try:
1020 wlock = self.wlock(False)
1024 wlock = self.wlock(False)
1021 for f in fixup:
1025 for f in fixup:
1022 self.dirstate.normal(f)
1026 self.dirstate.normal(f)
1023 except lock.LockException:
1027 except lock.LockException:
1024 pass
1028 pass
1025 finally:
1029 finally:
1026 del wlock
1030 del wlock
1027
1031
1028 if not parentworking:
1032 if not parentworking:
1029 mf1 = mfmatches(ctx1)
1033 mf1 = mfmatches(ctx1)
1030 if working:
1034 if working:
1031 # we are comparing working dir against non-parent
1035 # we are comparing working dir against non-parent
1032 # generate a pseudo-manifest for the working dir
1036 # generate a pseudo-manifest for the working dir
1033 mf2 = mfmatches(self['.'])
1037 mf2 = mfmatches(self['.'])
1034 for f in cmp + modified + added:
1038 for f in cmp + modified + added:
1035 mf2[f] = None
1039 mf2[f] = None
1036 mf2.set(f, ctx2.flags(f))
1040 mf2.set(f, ctx2.flags(f))
1037 for f in removed:
1041 for f in removed:
1038 if f in mf2:
1042 if f in mf2:
1039 del mf2[f]
1043 del mf2[f]
1040 else:
1044 else:
1041 # we are comparing two revisions
1045 # we are comparing two revisions
1042 deleted, unknown, ignored = [], [], []
1046 deleted, unknown, ignored = [], [], []
1043 mf2 = mfmatches(ctx2)
1047 mf2 = mfmatches(ctx2)
1044
1048
1045 modified, added, clean = [], [], []
1049 modified, added, clean = [], [], []
1046 for fn in mf2:
1050 for fn in mf2:
1047 if fn in mf1:
1051 if fn in mf1:
1048 if (mf1.flags(fn) != mf2.flags(fn) or
1052 if (mf1.flags(fn) != mf2.flags(fn) or
1049 (mf1[fn] != mf2[fn] and
1053 (mf1[fn] != mf2[fn] and
1050 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1054 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1051 modified.append(fn)
1055 modified.append(fn)
1052 elif listclean:
1056 elif listclean:
1053 clean.append(fn)
1057 clean.append(fn)
1054 del mf1[fn]
1058 del mf1[fn]
1055 else:
1059 else:
1056 added.append(fn)
1060 added.append(fn)
1057 removed = mf1.keys()
1061 removed = mf1.keys()
1058
1062
1059 r = modified, added, removed, deleted, unknown, ignored, clean
1063 r = modified, added, removed, deleted, unknown, ignored, clean
1060 [l.sort() for l in r]
1064 [l.sort() for l in r]
1061 return r
1065 return r
1062
1066
1063 def add(self, list):
1067 def add(self, list):
1064 wlock = self.wlock()
1068 wlock = self.wlock()
1065 try:
1069 try:
1066 rejected = []
1070 rejected = []
1067 for f in list:
1071 for f in list:
1068 p = self.wjoin(f)
1072 p = self.wjoin(f)
1069 try:
1073 try:
1070 st = os.lstat(p)
1074 st = os.lstat(p)
1071 except:
1075 except:
1072 self.ui.warn(_("%s does not exist!\n") % f)
1076 self.ui.warn(_("%s does not exist!\n") % f)
1073 rejected.append(f)
1077 rejected.append(f)
1074 continue
1078 continue
1075 if st.st_size > 10000000:
1079 if st.st_size > 10000000:
1076 self.ui.warn(_("%s: files over 10MB may cause memory and"
1080 self.ui.warn(_("%s: files over 10MB may cause memory and"
1077 " performance problems\n"
1081 " performance problems\n"
1078 "(use 'hg revert %s' to unadd the file)\n")
1082 "(use 'hg revert %s' to unadd the file)\n")
1079 % (f, f))
1083 % (f, f))
1080 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1084 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1081 self.ui.warn(_("%s not added: only files and symlinks "
1085 self.ui.warn(_("%s not added: only files and symlinks "
1082 "supported currently\n") % f)
1086 "supported currently\n") % f)
1083 rejected.append(p)
1087 rejected.append(p)
1084 elif self.dirstate[f] in 'amn':
1088 elif self.dirstate[f] in 'amn':
1085 self.ui.warn(_("%s already tracked!\n") % f)
1089 self.ui.warn(_("%s already tracked!\n") % f)
1086 elif self.dirstate[f] == 'r':
1090 elif self.dirstate[f] == 'r':
1087 self.dirstate.normallookup(f)
1091 self.dirstate.normallookup(f)
1088 else:
1092 else:
1089 self.dirstate.add(f)
1093 self.dirstate.add(f)
1090 return rejected
1094 return rejected
1091 finally:
1095 finally:
1092 del wlock
1096 del wlock
1093
1097
1094 def forget(self, list):
1098 def forget(self, list):
1095 wlock = self.wlock()
1099 wlock = self.wlock()
1096 try:
1100 try:
1097 for f in list:
1101 for f in list:
1098 if self.dirstate[f] != 'a':
1102 if self.dirstate[f] != 'a':
1099 self.ui.warn(_("%s not added!\n") % f)
1103 self.ui.warn(_("%s not added!\n") % f)
1100 else:
1104 else:
1101 self.dirstate.forget(f)
1105 self.dirstate.forget(f)
1102 finally:
1106 finally:
1103 del wlock
1107 del wlock
1104
1108
1105 def remove(self, list, unlink=False):
1109 def remove(self, list, unlink=False):
1106 wlock = None
1110 wlock = None
1107 try:
1111 try:
1108 if unlink:
1112 if unlink:
1109 for f in list:
1113 for f in list:
1110 try:
1114 try:
1111 util.unlink(self.wjoin(f))
1115 util.unlink(self.wjoin(f))
1112 except OSError, inst:
1116 except OSError, inst:
1113 if inst.errno != errno.ENOENT:
1117 if inst.errno != errno.ENOENT:
1114 raise
1118 raise
1115 wlock = self.wlock()
1119 wlock = self.wlock()
1116 for f in list:
1120 for f in list:
1117 if unlink and os.path.exists(self.wjoin(f)):
1121 if unlink and os.path.exists(self.wjoin(f)):
1118 self.ui.warn(_("%s still exists!\n") % f)
1122 self.ui.warn(_("%s still exists!\n") % f)
1119 elif self.dirstate[f] == 'a':
1123 elif self.dirstate[f] == 'a':
1120 self.dirstate.forget(f)
1124 self.dirstate.forget(f)
1121 elif f not in self.dirstate:
1125 elif f not in self.dirstate:
1122 self.ui.warn(_("%s not tracked!\n") % f)
1126 self.ui.warn(_("%s not tracked!\n") % f)
1123 else:
1127 else:
1124 self.dirstate.remove(f)
1128 self.dirstate.remove(f)
1125 finally:
1129 finally:
1126 del wlock
1130 del wlock
1127
1131
1128 def undelete(self, list):
1132 def undelete(self, list):
1129 wlock = None
1133 wlock = None
1130 try:
1134 try:
1131 manifests = [self.manifest.read(self.changelog.read(p)[0])
1135 manifests = [self.manifest.read(self.changelog.read(p)[0])
1132 for p in self.dirstate.parents() if p != nullid]
1136 for p in self.dirstate.parents() if p != nullid]
1133 wlock = self.wlock()
1137 wlock = self.wlock()
1134 for f in list:
1138 for f in list:
1135 if self.dirstate[f] != 'r':
1139 if self.dirstate[f] != 'r':
1136 self.ui.warn(_("%s not removed!\n") % f)
1140 self.ui.warn(_("%s not removed!\n") % f)
1137 else:
1141 else:
1138 m = f in manifests[0] and manifests[0] or manifests[1]
1142 m = f in manifests[0] and manifests[0] or manifests[1]
1139 t = self.file(f).read(m[f])
1143 t = self.file(f).read(m[f])
1140 self.wwrite(f, t, m.flags(f))
1144 self.wwrite(f, t, m.flags(f))
1141 self.dirstate.normal(f)
1145 self.dirstate.normal(f)
1142 finally:
1146 finally:
1143 del wlock
1147 del wlock
1144
1148
1145 def copy(self, source, dest):
1149 def copy(self, source, dest):
1146 wlock = None
1150 wlock = None
1147 try:
1151 try:
1148 p = self.wjoin(dest)
1152 p = self.wjoin(dest)
1149 if not (os.path.exists(p) or os.path.islink(p)):
1153 if not (os.path.exists(p) or os.path.islink(p)):
1150 self.ui.warn(_("%s does not exist!\n") % dest)
1154 self.ui.warn(_("%s does not exist!\n") % dest)
1151 elif not (os.path.isfile(p) or os.path.islink(p)):
1155 elif not (os.path.isfile(p) or os.path.islink(p)):
1152 self.ui.warn(_("copy failed: %s is not a file or a "
1156 self.ui.warn(_("copy failed: %s is not a file or a "
1153 "symbolic link\n") % dest)
1157 "symbolic link\n") % dest)
1154 else:
1158 else:
1155 wlock = self.wlock()
1159 wlock = self.wlock()
1156 if self.dirstate[dest] in '?r':
1160 if self.dirstate[dest] in '?r':
1157 self.dirstate.add(dest)
1161 self.dirstate.add(dest)
1158 self.dirstate.copy(source, dest)
1162 self.dirstate.copy(source, dest)
1159 finally:
1163 finally:
1160 del wlock
1164 del wlock
1161
1165
1162 def heads(self, start=None):
1166 def heads(self, start=None):
1163 heads = self.changelog.heads(start)
1167 heads = self.changelog.heads(start)
1164 # sort the output in rev descending order
1168 # sort the output in rev descending order
1165 heads = [(-self.changelog.rev(h), h) for h in heads]
1169 heads = [(-self.changelog.rev(h), h) for h in heads]
1166 return [n for (r, n) in util.sort(heads)]
1170 return [n for (r, n) in util.sort(heads)]
1167
1171
1168 def branchheads(self, branch=None, start=None):
1172 def branchheads(self, branch=None, start=None):
1169 if branch is None:
1173 if branch is None:
1170 branch = self[None].branch()
1174 branch = self[None].branch()
1171 branches = self.branchtags()
1175 branches = self.branchtags()
1172 if branch not in branches:
1176 if branch not in branches:
1173 return []
1177 return []
1174 # The basic algorithm is this:
1178 # The basic algorithm is this:
1175 #
1179 #
1176 # Start from the branch tip since there are no later revisions that can
1180 # Start from the branch tip since there are no later revisions that can
1177 # possibly be in this branch, and the tip is a guaranteed head.
1181 # possibly be in this branch, and the tip is a guaranteed head.
1178 #
1182 #
1179 # Remember the tip's parents as the first ancestors, since these by
1183 # Remember the tip's parents as the first ancestors, since these by
1180 # definition are not heads.
1184 # definition are not heads.
1181 #
1185 #
1182 # Step backwards from the brach tip through all the revisions. We are
1186 # Step backwards from the brach tip through all the revisions. We are
1183 # guaranteed by the rules of Mercurial that we will now be visiting the
1187 # guaranteed by the rules of Mercurial that we will now be visiting the
1184 # nodes in reverse topological order (children before parents).
1188 # nodes in reverse topological order (children before parents).
1185 #
1189 #
1186 # If a revision is one of the ancestors of a head then we can toss it
1190 # If a revision is one of the ancestors of a head then we can toss it
1187 # out of the ancestors set (we've already found it and won't be
1191 # out of the ancestors set (we've already found it and won't be
1188 # visiting it again) and put its parents in the ancestors set.
1192 # visiting it again) and put its parents in the ancestors set.
1189 #
1193 #
1190 # Otherwise, if a revision is in the branch it's another head, since it
1194 # Otherwise, if a revision is in the branch it's another head, since it
1191 # wasn't in the ancestor list of an existing head. So add it to the
1195 # wasn't in the ancestor list of an existing head. So add it to the
1192 # head list, and add its parents to the ancestor list.
1196 # head list, and add its parents to the ancestor list.
1193 #
1197 #
1194 # If it is not in the branch ignore it.
1198 # If it is not in the branch ignore it.
1195 #
1199 #
1196 # Once we have a list of heads, use nodesbetween to filter out all the
1200 # Once we have a list of heads, use nodesbetween to filter out all the
1197 # heads that cannot be reached from startrev. There may be a more
1201 # heads that cannot be reached from startrev. There may be a more
1198 # efficient way to do this as part of the previous algorithm.
1202 # efficient way to do this as part of the previous algorithm.
1199
1203
1200 set = util.set
1204 set = util.set
1201 heads = [self.changelog.rev(branches[branch])]
1205 heads = [self.changelog.rev(branches[branch])]
1202 # Don't care if ancestors contains nullrev or not.
1206 # Don't care if ancestors contains nullrev or not.
1203 ancestors = set(self.changelog.parentrevs(heads[0]))
1207 ancestors = set(self.changelog.parentrevs(heads[0]))
1204 for rev in xrange(heads[0] - 1, nullrev, -1):
1208 for rev in xrange(heads[0] - 1, nullrev, -1):
1205 if rev in ancestors:
1209 if rev in ancestors:
1206 ancestors.update(self.changelog.parentrevs(rev))
1210 ancestors.update(self.changelog.parentrevs(rev))
1207 ancestors.remove(rev)
1211 ancestors.remove(rev)
1208 elif self[rev].branch() == branch:
1212 elif self[rev].branch() == branch:
1209 heads.append(rev)
1213 heads.append(rev)
1210 ancestors.update(self.changelog.parentrevs(rev))
1214 ancestors.update(self.changelog.parentrevs(rev))
1211 heads = [self.changelog.node(rev) for rev in heads]
1215 heads = [self.changelog.node(rev) for rev in heads]
1212 if start is not None:
1216 if start is not None:
1213 heads = self.changelog.nodesbetween([start], heads)[2]
1217 heads = self.changelog.nodesbetween([start], heads)[2]
1214 return heads
1218 return heads
1215
1219
1216 def branches(self, nodes):
1220 def branches(self, nodes):
1217 if not nodes:
1221 if not nodes:
1218 nodes = [self.changelog.tip()]
1222 nodes = [self.changelog.tip()]
1219 b = []
1223 b = []
1220 for n in nodes:
1224 for n in nodes:
1221 t = n
1225 t = n
1222 while 1:
1226 while 1:
1223 p = self.changelog.parents(n)
1227 p = self.changelog.parents(n)
1224 if p[1] != nullid or p[0] == nullid:
1228 if p[1] != nullid or p[0] == nullid:
1225 b.append((t, n, p[0], p[1]))
1229 b.append((t, n, p[0], p[1]))
1226 break
1230 break
1227 n = p[0]
1231 n = p[0]
1228 return b
1232 return b
1229
1233
1230 def between(self, pairs):
1234 def between(self, pairs):
1231 r = []
1235 r = []
1232
1236
1233 for top, bottom in pairs:
1237 for top, bottom in pairs:
1234 n, l, i = top, [], 0
1238 n, l, i = top, [], 0
1235 f = 1
1239 f = 1
1236
1240
1237 while n != bottom:
1241 while n != bottom:
1238 p = self.changelog.parents(n)[0]
1242 p = self.changelog.parents(n)[0]
1239 if i == f:
1243 if i == f:
1240 l.append(n)
1244 l.append(n)
1241 f = f * 2
1245 f = f * 2
1242 n = p
1246 n = p
1243 i += 1
1247 i += 1
1244
1248
1245 r.append(l)
1249 r.append(l)
1246
1250
1247 return r
1251 return r
1248
1252
1249 def findincoming(self, remote, base=None, heads=None, force=False):
1253 def findincoming(self, remote, base=None, heads=None, force=False):
1250 """Return list of roots of the subsets of missing nodes from remote
1254 """Return list of roots of the subsets of missing nodes from remote
1251
1255
1252 If base dict is specified, assume that these nodes and their parents
1256 If base dict is specified, assume that these nodes and their parents
1253 exist on the remote side and that no child of a node of base exists
1257 exist on the remote side and that no child of a node of base exists
1254 in both remote and self.
1258 in both remote and self.
1255 Furthermore base will be updated to include the nodes that exists
1259 Furthermore base will be updated to include the nodes that exists
1256 in self and remote but no children exists in self and remote.
1260 in self and remote but no children exists in self and remote.
1257 If a list of heads is specified, return only nodes which are heads
1261 If a list of heads is specified, return only nodes which are heads
1258 or ancestors of these heads.
1262 or ancestors of these heads.
1259
1263
1260 All the ancestors of base are in self and in remote.
1264 All the ancestors of base are in self and in remote.
1261 All the descendants of the list returned are missing in self.
1265 All the descendants of the list returned are missing in self.
1262 (and so we know that the rest of the nodes are missing in remote, see
1266 (and so we know that the rest of the nodes are missing in remote, see
1263 outgoing)
1267 outgoing)
1264 """
1268 """
1265 m = self.changelog.nodemap
1269 m = self.changelog.nodemap
1266 search = []
1270 search = []
1267 fetch = {}
1271 fetch = {}
1268 seen = {}
1272 seen = {}
1269 seenbranch = {}
1273 seenbranch = {}
1270 if base == None:
1274 if base == None:
1271 base = {}
1275 base = {}
1272
1276
1273 if not heads:
1277 if not heads:
1274 heads = remote.heads()
1278 heads = remote.heads()
1275
1279
1276 if self.changelog.tip() == nullid:
1280 if self.changelog.tip() == nullid:
1277 base[nullid] = 1
1281 base[nullid] = 1
1278 if heads != [nullid]:
1282 if heads != [nullid]:
1279 return [nullid]
1283 return [nullid]
1280 return []
1284 return []
1281
1285
1282 # assume we're closer to the tip than the root
1286 # assume we're closer to the tip than the root
1283 # and start by examining the heads
1287 # and start by examining the heads
1284 self.ui.status(_("searching for changes\n"))
1288 self.ui.status(_("searching for changes\n"))
1285
1289
1286 unknown = []
1290 unknown = []
1287 for h in heads:
1291 for h in heads:
1288 if h not in m:
1292 if h not in m:
1289 unknown.append(h)
1293 unknown.append(h)
1290 else:
1294 else:
1291 base[h] = 1
1295 base[h] = 1
1292
1296
1293 if not unknown:
1297 if not unknown:
1294 return []
1298 return []
1295
1299
1296 req = dict.fromkeys(unknown)
1300 req = dict.fromkeys(unknown)
1297 reqcnt = 0
1301 reqcnt = 0
1298
1302
1299 # search through remote branches
1303 # search through remote branches
1300 # a 'branch' here is a linear segment of history, with four parts:
1304 # a 'branch' here is a linear segment of history, with four parts:
1301 # head, root, first parent, second parent
1305 # head, root, first parent, second parent
1302 # (a branch always has two parents (or none) by definition)
1306 # (a branch always has two parents (or none) by definition)
1303 unknown = remote.branches(unknown)
1307 unknown = remote.branches(unknown)
1304 while unknown:
1308 while unknown:
1305 r = []
1309 r = []
1306 while unknown:
1310 while unknown:
1307 n = unknown.pop(0)
1311 n = unknown.pop(0)
1308 if n[0] in seen:
1312 if n[0] in seen:
1309 continue
1313 continue
1310
1314
1311 self.ui.debug(_("examining %s:%s\n")
1315 self.ui.debug(_("examining %s:%s\n")
1312 % (short(n[0]), short(n[1])))
1316 % (short(n[0]), short(n[1])))
1313 if n[0] == nullid: # found the end of the branch
1317 if n[0] == nullid: # found the end of the branch
1314 pass
1318 pass
1315 elif n in seenbranch:
1319 elif n in seenbranch:
1316 self.ui.debug(_("branch already found\n"))
1320 self.ui.debug(_("branch already found\n"))
1317 continue
1321 continue
1318 elif n[1] and n[1] in m: # do we know the base?
1322 elif n[1] and n[1] in m: # do we know the base?
1319 self.ui.debug(_("found incomplete branch %s:%s\n")
1323 self.ui.debug(_("found incomplete branch %s:%s\n")
1320 % (short(n[0]), short(n[1])))
1324 % (short(n[0]), short(n[1])))
1321 search.append(n[0:2]) # schedule branch range for scanning
1325 search.append(n[0:2]) # schedule branch range for scanning
1322 seenbranch[n] = 1
1326 seenbranch[n] = 1
1323 else:
1327 else:
1324 if n[1] not in seen and n[1] not in fetch:
1328 if n[1] not in seen and n[1] not in fetch:
1325 if n[2] in m and n[3] in m:
1329 if n[2] in m and n[3] in m:
1326 self.ui.debug(_("found new changeset %s\n") %
1330 self.ui.debug(_("found new changeset %s\n") %
1327 short(n[1]))
1331 short(n[1]))
1328 fetch[n[1]] = 1 # earliest unknown
1332 fetch[n[1]] = 1 # earliest unknown
1329 for p in n[2:4]:
1333 for p in n[2:4]:
1330 if p in m:
1334 if p in m:
1331 base[p] = 1 # latest known
1335 base[p] = 1 # latest known
1332
1336
1333 for p in n[2:4]:
1337 for p in n[2:4]:
1334 if p not in req and p not in m:
1338 if p not in req and p not in m:
1335 r.append(p)
1339 r.append(p)
1336 req[p] = 1
1340 req[p] = 1
1337 seen[n[0]] = 1
1341 seen[n[0]] = 1
1338
1342
1339 if r:
1343 if r:
1340 reqcnt += 1
1344 reqcnt += 1
1341 self.ui.debug(_("request %d: %s\n") %
1345 self.ui.debug(_("request %d: %s\n") %
1342 (reqcnt, " ".join(map(short, r))))
1346 (reqcnt, " ".join(map(short, r))))
1343 for p in xrange(0, len(r), 10):
1347 for p in xrange(0, len(r), 10):
1344 for b in remote.branches(r[p:p+10]):
1348 for b in remote.branches(r[p:p+10]):
1345 self.ui.debug(_("received %s:%s\n") %
1349 self.ui.debug(_("received %s:%s\n") %
1346 (short(b[0]), short(b[1])))
1350 (short(b[0]), short(b[1])))
1347 unknown.append(b)
1351 unknown.append(b)
1348
1352
1349 # do binary search on the branches we found
1353 # do binary search on the branches we found
1350 while search:
1354 while search:
1351 newsearch = []
1355 newsearch = []
1352 reqcnt += 1
1356 reqcnt += 1
1353 for n, l in zip(search, remote.between(search)):
1357 for n, l in zip(search, remote.between(search)):
1354 l.append(n[1])
1358 l.append(n[1])
1355 p = n[0]
1359 p = n[0]
1356 f = 1
1360 f = 1
1357 for i in l:
1361 for i in l:
1358 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1362 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1359 if i in m:
1363 if i in m:
1360 if f <= 2:
1364 if f <= 2:
1361 self.ui.debug(_("found new branch changeset %s\n") %
1365 self.ui.debug(_("found new branch changeset %s\n") %
1362 short(p))
1366 short(p))
1363 fetch[p] = 1
1367 fetch[p] = 1
1364 base[i] = 1
1368 base[i] = 1
1365 else:
1369 else:
1366 self.ui.debug(_("narrowed branch search to %s:%s\n")
1370 self.ui.debug(_("narrowed branch search to %s:%s\n")
1367 % (short(p), short(i)))
1371 % (short(p), short(i)))
1368 newsearch.append((p, i))
1372 newsearch.append((p, i))
1369 break
1373 break
1370 p, f = i, f * 2
1374 p, f = i, f * 2
1371 search = newsearch
1375 search = newsearch
1372
1376
1373 # sanity check our fetch list
1377 # sanity check our fetch list
1374 for f in fetch.keys():
1378 for f in fetch.keys():
1375 if f in m:
1379 if f in m:
1376 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1380 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1377
1381
1378 if base.keys() == [nullid]:
1382 if base.keys() == [nullid]:
1379 if force:
1383 if force:
1380 self.ui.warn(_("warning: repository is unrelated\n"))
1384 self.ui.warn(_("warning: repository is unrelated\n"))
1381 else:
1385 else:
1382 raise util.Abort(_("repository is unrelated"))
1386 raise util.Abort(_("repository is unrelated"))
1383
1387
1384 self.ui.debug(_("found new changesets starting at ") +
1388 self.ui.debug(_("found new changesets starting at ") +
1385 " ".join([short(f) for f in fetch]) + "\n")
1389 " ".join([short(f) for f in fetch]) + "\n")
1386
1390
1387 self.ui.debug(_("%d total queries\n") % reqcnt)
1391 self.ui.debug(_("%d total queries\n") % reqcnt)
1388
1392
1389 return fetch.keys()
1393 return fetch.keys()
1390
1394
1391 def findoutgoing(self, remote, base=None, heads=None, force=False):
1395 def findoutgoing(self, remote, base=None, heads=None, force=False):
1392 """Return list of nodes that are roots of subsets not in remote
1396 """Return list of nodes that are roots of subsets not in remote
1393
1397
1394 If base dict is specified, assume that these nodes and their parents
1398 If base dict is specified, assume that these nodes and their parents
1395 exist on the remote side.
1399 exist on the remote side.
1396 If a list of heads is specified, return only nodes which are heads
1400 If a list of heads is specified, return only nodes which are heads
1397 or ancestors of these heads, and return a second element which
1401 or ancestors of these heads, and return a second element which
1398 contains all remote heads which get new children.
1402 contains all remote heads which get new children.
1399 """
1403 """
1400 if base == None:
1404 if base == None:
1401 base = {}
1405 base = {}
1402 self.findincoming(remote, base, heads, force=force)
1406 self.findincoming(remote, base, heads, force=force)
1403
1407
1404 self.ui.debug(_("common changesets up to ")
1408 self.ui.debug(_("common changesets up to ")
1405 + " ".join(map(short, base.keys())) + "\n")
1409 + " ".join(map(short, base.keys())) + "\n")
1406
1410
1407 remain = dict.fromkeys(self.changelog.nodemap)
1411 remain = dict.fromkeys(self.changelog.nodemap)
1408
1412
1409 # prune everything remote has from the tree
1413 # prune everything remote has from the tree
1410 del remain[nullid]
1414 del remain[nullid]
1411 remove = base.keys()
1415 remove = base.keys()
1412 while remove:
1416 while remove:
1413 n = remove.pop(0)
1417 n = remove.pop(0)
1414 if n in remain:
1418 if n in remain:
1415 del remain[n]
1419 del remain[n]
1416 for p in self.changelog.parents(n):
1420 for p in self.changelog.parents(n):
1417 remove.append(p)
1421 remove.append(p)
1418
1422
1419 # find every node whose parents have been pruned
1423 # find every node whose parents have been pruned
1420 subset = []
1424 subset = []
1421 # find every remote head that will get new children
1425 # find every remote head that will get new children
1422 updated_heads = {}
1426 updated_heads = {}
1423 for n in remain:
1427 for n in remain:
1424 p1, p2 = self.changelog.parents(n)
1428 p1, p2 = self.changelog.parents(n)
1425 if p1 not in remain and p2 not in remain:
1429 if p1 not in remain and p2 not in remain:
1426 subset.append(n)
1430 subset.append(n)
1427 if heads:
1431 if heads:
1428 if p1 in heads:
1432 if p1 in heads:
1429 updated_heads[p1] = True
1433 updated_heads[p1] = True
1430 if p2 in heads:
1434 if p2 in heads:
1431 updated_heads[p2] = True
1435 updated_heads[p2] = True
1432
1436
1433 # this is the set of all roots we have to push
1437 # this is the set of all roots we have to push
1434 if heads:
1438 if heads:
1435 return subset, updated_heads.keys()
1439 return subset, updated_heads.keys()
1436 else:
1440 else:
1437 return subset
1441 return subset
1438
1442
1439 def pull(self, remote, heads=None, force=False):
1443 def pull(self, remote, heads=None, force=False):
1440 lock = self.lock()
1444 lock = self.lock()
1441 try:
1445 try:
1442 fetch = self.findincoming(remote, heads=heads, force=force)
1446 fetch = self.findincoming(remote, heads=heads, force=force)
1443 if fetch == [nullid]:
1447 if fetch == [nullid]:
1444 self.ui.status(_("requesting all changes\n"))
1448 self.ui.status(_("requesting all changes\n"))
1445
1449
1446 if not fetch:
1450 if not fetch:
1447 self.ui.status(_("no changes found\n"))
1451 self.ui.status(_("no changes found\n"))
1448 return 0
1452 return 0
1449
1453
1450 if heads is None:
1454 if heads is None:
1451 cg = remote.changegroup(fetch, 'pull')
1455 cg = remote.changegroup(fetch, 'pull')
1452 else:
1456 else:
1453 if 'changegroupsubset' not in remote.capabilities:
1457 if 'changegroupsubset' not in remote.capabilities:
1454 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1458 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1455 cg = remote.changegroupsubset(fetch, heads, 'pull')
1459 cg = remote.changegroupsubset(fetch, heads, 'pull')
1456 return self.addchangegroup(cg, 'pull', remote.url())
1460 return self.addchangegroup(cg, 'pull', remote.url())
1457 finally:
1461 finally:
1458 del lock
1462 del lock
1459
1463
1460 def push(self, remote, force=False, revs=None):
1464 def push(self, remote, force=False, revs=None):
1461 # there are two ways to push to remote repo:
1465 # there are two ways to push to remote repo:
1462 #
1466 #
1463 # addchangegroup assumes local user can lock remote
1467 # addchangegroup assumes local user can lock remote
1464 # repo (local filesystem, old ssh servers).
1468 # repo (local filesystem, old ssh servers).
1465 #
1469 #
1466 # unbundle assumes local user cannot lock remote repo (new ssh
1470 # unbundle assumes local user cannot lock remote repo (new ssh
1467 # servers, http servers).
1471 # servers, http servers).
1468
1472
1469 if remote.capable('unbundle'):
1473 if remote.capable('unbundle'):
1470 return self.push_unbundle(remote, force, revs)
1474 return self.push_unbundle(remote, force, revs)
1471 return self.push_addchangegroup(remote, force, revs)
1475 return self.push_addchangegroup(remote, force, revs)
1472
1476
1473 def prepush(self, remote, force, revs):
1477 def prepush(self, remote, force, revs):
1474 base = {}
1478 base = {}
1475 remote_heads = remote.heads()
1479 remote_heads = remote.heads()
1476 inc = self.findincoming(remote, base, remote_heads, force=force)
1480 inc = self.findincoming(remote, base, remote_heads, force=force)
1477
1481
1478 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1482 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1479 if revs is not None:
1483 if revs is not None:
1480 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1484 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1481 else:
1485 else:
1482 bases, heads = update, self.changelog.heads()
1486 bases, heads = update, self.changelog.heads()
1483
1487
1484 if not bases:
1488 if not bases:
1485 self.ui.status(_("no changes found\n"))
1489 self.ui.status(_("no changes found\n"))
1486 return None, 1
1490 return None, 1
1487 elif not force:
1491 elif not force:
1488 # check if we're creating new remote heads
1492 # check if we're creating new remote heads
1489 # to be a remote head after push, node must be either
1493 # to be a remote head after push, node must be either
1490 # - unknown locally
1494 # - unknown locally
1491 # - a local outgoing head descended from update
1495 # - a local outgoing head descended from update
1492 # - a remote head that's known locally and not
1496 # - a remote head that's known locally and not
1493 # ancestral to an outgoing head
1497 # ancestral to an outgoing head
1494
1498
1495 warn = 0
1499 warn = 0
1496
1500
1497 if remote_heads == [nullid]:
1501 if remote_heads == [nullid]:
1498 warn = 0
1502 warn = 0
1499 elif not revs and len(heads) > len(remote_heads):
1503 elif not revs and len(heads) > len(remote_heads):
1500 warn = 1
1504 warn = 1
1501 else:
1505 else:
1502 newheads = list(heads)
1506 newheads = list(heads)
1503 for r in remote_heads:
1507 for r in remote_heads:
1504 if r in self.changelog.nodemap:
1508 if r in self.changelog.nodemap:
1505 desc = self.changelog.heads(r, heads)
1509 desc = self.changelog.heads(r, heads)
1506 l = [h for h in heads if h in desc]
1510 l = [h for h in heads if h in desc]
1507 if not l:
1511 if not l:
1508 newheads.append(r)
1512 newheads.append(r)
1509 else:
1513 else:
1510 newheads.append(r)
1514 newheads.append(r)
1511 if len(newheads) > len(remote_heads):
1515 if len(newheads) > len(remote_heads):
1512 warn = 1
1516 warn = 1
1513
1517
1514 if warn:
1518 if warn:
1515 self.ui.warn(_("abort: push creates new remote heads!\n"))
1519 self.ui.warn(_("abort: push creates new remote heads!\n"))
1516 self.ui.status(_("(did you forget to merge?"
1520 self.ui.status(_("(did you forget to merge?"
1517 " use push -f to force)\n"))
1521 " use push -f to force)\n"))
1518 return None, 0
1522 return None, 0
1519 elif inc:
1523 elif inc:
1520 self.ui.warn(_("note: unsynced remote changes!\n"))
1524 self.ui.warn(_("note: unsynced remote changes!\n"))
1521
1525
1522
1526
1523 if revs is None:
1527 if revs is None:
1524 cg = self.changegroup(update, 'push')
1528 cg = self.changegroup(update, 'push')
1525 else:
1529 else:
1526 cg = self.changegroupsubset(update, revs, 'push')
1530 cg = self.changegroupsubset(update, revs, 'push')
1527 return cg, remote_heads
1531 return cg, remote_heads
1528
1532
1529 def push_addchangegroup(self, remote, force, revs):
1533 def push_addchangegroup(self, remote, force, revs):
1530 lock = remote.lock()
1534 lock = remote.lock()
1531 try:
1535 try:
1532 ret = self.prepush(remote, force, revs)
1536 ret = self.prepush(remote, force, revs)
1533 if ret[0] is not None:
1537 if ret[0] is not None:
1534 cg, remote_heads = ret
1538 cg, remote_heads = ret
1535 return remote.addchangegroup(cg, 'push', self.url())
1539 return remote.addchangegroup(cg, 'push', self.url())
1536 return ret[1]
1540 return ret[1]
1537 finally:
1541 finally:
1538 del lock
1542 del lock
1539
1543
1540 def push_unbundle(self, remote, force, revs):
1544 def push_unbundle(self, remote, force, revs):
1541 # local repo finds heads on server, finds out what revs it
1545 # local repo finds heads on server, finds out what revs it
1542 # must push. once revs transferred, if server finds it has
1546 # must push. once revs transferred, if server finds it has
1543 # different heads (someone else won commit/push race), server
1547 # different heads (someone else won commit/push race), server
1544 # aborts.
1548 # aborts.
1545
1549
1546 ret = self.prepush(remote, force, revs)
1550 ret = self.prepush(remote, force, revs)
1547 if ret[0] is not None:
1551 if ret[0] is not None:
1548 cg, remote_heads = ret
1552 cg, remote_heads = ret
1549 if force: remote_heads = ['force']
1553 if force: remote_heads = ['force']
1550 return remote.unbundle(cg, remote_heads, 'push')
1554 return remote.unbundle(cg, remote_heads, 'push')
1551 return ret[1]
1555 return ret[1]
1552
1556
1553 def changegroupinfo(self, nodes, source):
1557 def changegroupinfo(self, nodes, source):
1554 if self.ui.verbose or source == 'bundle':
1558 if self.ui.verbose or source == 'bundle':
1555 self.ui.status(_("%d changesets found\n") % len(nodes))
1559 self.ui.status(_("%d changesets found\n") % len(nodes))
1556 if self.ui.debugflag:
1560 if self.ui.debugflag:
1557 self.ui.debug(_("List of changesets:\n"))
1561 self.ui.debug(_("List of changesets:\n"))
1558 for node in nodes:
1562 for node in nodes:
1559 self.ui.debug("%s\n" % hex(node))
1563 self.ui.debug("%s\n" % hex(node))
1560
1564
1561 def changegroupsubset(self, bases, heads, source, extranodes=None):
1565 def changegroupsubset(self, bases, heads, source, extranodes=None):
1562 """This function generates a changegroup consisting of all the nodes
1566 """This function generates a changegroup consisting of all the nodes
1563 that are descendents of any of the bases, and ancestors of any of
1567 that are descendents of any of the bases, and ancestors of any of
1564 the heads.
1568 the heads.
1565
1569
1566 It is fairly complex as determining which filenodes and which
1570 It is fairly complex as determining which filenodes and which
1567 manifest nodes need to be included for the changeset to be complete
1571 manifest nodes need to be included for the changeset to be complete
1568 is non-trivial.
1572 is non-trivial.
1569
1573
1570 Another wrinkle is doing the reverse, figuring out which changeset in
1574 Another wrinkle is doing the reverse, figuring out which changeset in
1571 the changegroup a particular filenode or manifestnode belongs to.
1575 the changegroup a particular filenode or manifestnode belongs to.
1572
1576
1573 The caller can specify some nodes that must be included in the
1577 The caller can specify some nodes that must be included in the
1574 changegroup using the extranodes argument. It should be a dict
1578 changegroup using the extranodes argument. It should be a dict
1575 where the keys are the filenames (or 1 for the manifest), and the
1579 where the keys are the filenames (or 1 for the manifest), and the
1576 values are lists of (node, linknode) tuples, where node is a wanted
1580 values are lists of (node, linknode) tuples, where node is a wanted
1577 node and linknode is the changelog node that should be transmitted as
1581 node and linknode is the changelog node that should be transmitted as
1578 the linkrev.
1582 the linkrev.
1579 """
1583 """
1580
1584
1581 if extranodes is None:
1585 if extranodes is None:
1582 # can we go through the fast path ?
1586 # can we go through the fast path ?
1583 heads.sort()
1587 heads.sort()
1584 allheads = self.heads()
1588 allheads = self.heads()
1585 allheads.sort()
1589 allheads.sort()
1586 if heads == allheads:
1590 if heads == allheads:
1587 common = []
1591 common = []
1588 # parents of bases are known from both sides
1592 # parents of bases are known from both sides
1589 for n in bases:
1593 for n in bases:
1590 for p in self.changelog.parents(n):
1594 for p in self.changelog.parents(n):
1591 if p != nullid:
1595 if p != nullid:
1592 common.append(p)
1596 common.append(p)
1593 return self._changegroup(common, source)
1597 return self._changegroup(common, source)
1594
1598
1595 self.hook('preoutgoing', throw=True, source=source)
1599 self.hook('preoutgoing', throw=True, source=source)
1596
1600
1597 # Set up some initial variables
1601 # Set up some initial variables
1598 # Make it easy to refer to self.changelog
1602 # Make it easy to refer to self.changelog
1599 cl = self.changelog
1603 cl = self.changelog
1600 # msng is short for missing - compute the list of changesets in this
1604 # msng is short for missing - compute the list of changesets in this
1601 # changegroup.
1605 # changegroup.
1602 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1606 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1603 self.changegroupinfo(msng_cl_lst, source)
1607 self.changegroupinfo(msng_cl_lst, source)
1604 # Some bases may turn out to be superfluous, and some heads may be
1608 # Some bases may turn out to be superfluous, and some heads may be
1605 # too. nodesbetween will return the minimal set of bases and heads
1609 # too. nodesbetween will return the minimal set of bases and heads
1606 # necessary to re-create the changegroup.
1610 # necessary to re-create the changegroup.
1607
1611
1608 # Known heads are the list of heads that it is assumed the recipient
1612 # Known heads are the list of heads that it is assumed the recipient
1609 # of this changegroup will know about.
1613 # of this changegroup will know about.
1610 knownheads = {}
1614 knownheads = {}
1611 # We assume that all parents of bases are known heads.
1615 # We assume that all parents of bases are known heads.
1612 for n in bases:
1616 for n in bases:
1613 for p in cl.parents(n):
1617 for p in cl.parents(n):
1614 if p != nullid:
1618 if p != nullid:
1615 knownheads[p] = 1
1619 knownheads[p] = 1
1616 knownheads = knownheads.keys()
1620 knownheads = knownheads.keys()
1617 if knownheads:
1621 if knownheads:
1618 # Now that we know what heads are known, we can compute which
1622 # Now that we know what heads are known, we can compute which
1619 # changesets are known. The recipient must know about all
1623 # changesets are known. The recipient must know about all
1620 # changesets required to reach the known heads from the null
1624 # changesets required to reach the known heads from the null
1621 # changeset.
1625 # changeset.
1622 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1626 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1623 junk = None
1627 junk = None
1624 # Transform the list into an ersatz set.
1628 # Transform the list into an ersatz set.
1625 has_cl_set = dict.fromkeys(has_cl_set)
1629 has_cl_set = dict.fromkeys(has_cl_set)
1626 else:
1630 else:
1627 # If there were no known heads, the recipient cannot be assumed to
1631 # If there were no known heads, the recipient cannot be assumed to
1628 # know about any changesets.
1632 # know about any changesets.
1629 has_cl_set = {}
1633 has_cl_set = {}
1630
1634
1631 # Make it easy to refer to self.manifest
1635 # Make it easy to refer to self.manifest
1632 mnfst = self.manifest
1636 mnfst = self.manifest
1633 # We don't know which manifests are missing yet
1637 # We don't know which manifests are missing yet
1634 msng_mnfst_set = {}
1638 msng_mnfst_set = {}
1635 # Nor do we know which filenodes are missing.
1639 # Nor do we know which filenodes are missing.
1636 msng_filenode_set = {}
1640 msng_filenode_set = {}
1637
1641
1638 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1642 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1639 junk = None
1643 junk = None
1640
1644
1641 # A changeset always belongs to itself, so the changenode lookup
1645 # A changeset always belongs to itself, so the changenode lookup
1642 # function for a changenode is identity.
1646 # function for a changenode is identity.
1643 def identity(x):
1647 def identity(x):
1644 return x
1648 return x
1645
1649
1646 # A function generating function. Sets up an environment for the
1650 # A function generating function. Sets up an environment for the
1647 # inner function.
1651 # inner function.
1648 def cmp_by_rev_func(revlog):
1652 def cmp_by_rev_func(revlog):
1649 # Compare two nodes by their revision number in the environment's
1653 # Compare two nodes by their revision number in the environment's
1650 # revision history. Since the revision number both represents the
1654 # revision history. Since the revision number both represents the
1651 # most efficient order to read the nodes in, and represents a
1655 # most efficient order to read the nodes in, and represents a
1652 # topological sorting of the nodes, this function is often useful.
1656 # topological sorting of the nodes, this function is often useful.
1653 def cmp_by_rev(a, b):
1657 def cmp_by_rev(a, b):
1654 return cmp(revlog.rev(a), revlog.rev(b))
1658 return cmp(revlog.rev(a), revlog.rev(b))
1655 return cmp_by_rev
1659 return cmp_by_rev
1656
1660
1657 # If we determine that a particular file or manifest node must be a
1661 # If we determine that a particular file or manifest node must be a
1658 # node that the recipient of the changegroup will already have, we can
1662 # node that the recipient of the changegroup will already have, we can
1659 # also assume the recipient will have all the parents. This function
1663 # also assume the recipient will have all the parents. This function
1660 # prunes them from the set of missing nodes.
1664 # prunes them from the set of missing nodes.
1661 def prune_parents(revlog, hasset, msngset):
1665 def prune_parents(revlog, hasset, msngset):
1662 haslst = hasset.keys()
1666 haslst = hasset.keys()
1663 haslst.sort(cmp_by_rev_func(revlog))
1667 haslst.sort(cmp_by_rev_func(revlog))
1664 for node in haslst:
1668 for node in haslst:
1665 parentlst = [p for p in revlog.parents(node) if p != nullid]
1669 parentlst = [p for p in revlog.parents(node) if p != nullid]
1666 while parentlst:
1670 while parentlst:
1667 n = parentlst.pop()
1671 n = parentlst.pop()
1668 if n not in hasset:
1672 if n not in hasset:
1669 hasset[n] = 1
1673 hasset[n] = 1
1670 p = [p for p in revlog.parents(n) if p != nullid]
1674 p = [p for p in revlog.parents(n) if p != nullid]
1671 parentlst.extend(p)
1675 parentlst.extend(p)
1672 for n in hasset:
1676 for n in hasset:
1673 msngset.pop(n, None)
1677 msngset.pop(n, None)
1674
1678
1675 # This is a function generating function used to set up an environment
1679 # This is a function generating function used to set up an environment
1676 # for the inner function to execute in.
1680 # for the inner function to execute in.
1677 def manifest_and_file_collector(changedfileset):
1681 def manifest_and_file_collector(changedfileset):
1678 # This is an information gathering function that gathers
1682 # This is an information gathering function that gathers
1679 # information from each changeset node that goes out as part of
1683 # information from each changeset node that goes out as part of
1680 # the changegroup. The information gathered is a list of which
1684 # the changegroup. The information gathered is a list of which
1681 # manifest nodes are potentially required (the recipient may
1685 # manifest nodes are potentially required (the recipient may
1682 # already have them) and total list of all files which were
1686 # already have them) and total list of all files which were
1683 # changed in any changeset in the changegroup.
1687 # changed in any changeset in the changegroup.
1684 #
1688 #
1685 # We also remember the first changenode we saw any manifest
1689 # We also remember the first changenode we saw any manifest
1686 # referenced by so we can later determine which changenode 'owns'
1690 # referenced by so we can later determine which changenode 'owns'
1687 # the manifest.
1691 # the manifest.
1688 def collect_manifests_and_files(clnode):
1692 def collect_manifests_and_files(clnode):
1689 c = cl.read(clnode)
1693 c = cl.read(clnode)
1690 for f in c[3]:
1694 for f in c[3]:
1691 # This is to make sure we only have one instance of each
1695 # This is to make sure we only have one instance of each
1692 # filename string for each filename.
1696 # filename string for each filename.
1693 changedfileset.setdefault(f, f)
1697 changedfileset.setdefault(f, f)
1694 msng_mnfst_set.setdefault(c[0], clnode)
1698 msng_mnfst_set.setdefault(c[0], clnode)
1695 return collect_manifests_and_files
1699 return collect_manifests_and_files
1696
1700
1697 # Figure out which manifest nodes (of the ones we think might be part
1701 # Figure out which manifest nodes (of the ones we think might be part
1698 # of the changegroup) the recipient must know about and remove them
1702 # of the changegroup) the recipient must know about and remove them
1699 # from the changegroup.
1703 # from the changegroup.
1700 def prune_manifests():
1704 def prune_manifests():
1701 has_mnfst_set = {}
1705 has_mnfst_set = {}
1702 for n in msng_mnfst_set:
1706 for n in msng_mnfst_set:
1703 # If a 'missing' manifest thinks it belongs to a changenode
1707 # If a 'missing' manifest thinks it belongs to a changenode
1704 # the recipient is assumed to have, obviously the recipient
1708 # the recipient is assumed to have, obviously the recipient
1705 # must have that manifest.
1709 # must have that manifest.
1706 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1710 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1707 if linknode in has_cl_set:
1711 if linknode in has_cl_set:
1708 has_mnfst_set[n] = 1
1712 has_mnfst_set[n] = 1
1709 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1713 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1710
1714
1711 # Use the information collected in collect_manifests_and_files to say
1715 # Use the information collected in collect_manifests_and_files to say
1712 # which changenode any manifestnode belongs to.
1716 # which changenode any manifestnode belongs to.
1713 def lookup_manifest_link(mnfstnode):
1717 def lookup_manifest_link(mnfstnode):
1714 return msng_mnfst_set[mnfstnode]
1718 return msng_mnfst_set[mnfstnode]
1715
1719
1716 # A function generating function that sets up the initial environment
1720 # A function generating function that sets up the initial environment
1717 # the inner function.
1721 # the inner function.
1718 def filenode_collector(changedfiles):
1722 def filenode_collector(changedfiles):
1719 next_rev = [0]
1723 next_rev = [0]
1720 # This gathers information from each manifestnode included in the
1724 # This gathers information from each manifestnode included in the
1721 # changegroup about which filenodes the manifest node references
1725 # changegroup about which filenodes the manifest node references
1722 # so we can include those in the changegroup too.
1726 # so we can include those in the changegroup too.
1723 #
1727 #
1724 # It also remembers which changenode each filenode belongs to. It
1728 # It also remembers which changenode each filenode belongs to. It
1725 # does this by assuming the a filenode belongs to the changenode
1729 # does this by assuming the a filenode belongs to the changenode
1726 # the first manifest that references it belongs to.
1730 # the first manifest that references it belongs to.
1727 def collect_msng_filenodes(mnfstnode):
1731 def collect_msng_filenodes(mnfstnode):
1728 r = mnfst.rev(mnfstnode)
1732 r = mnfst.rev(mnfstnode)
1729 if r == next_rev[0]:
1733 if r == next_rev[0]:
1730 # If the last rev we looked at was the one just previous,
1734 # If the last rev we looked at was the one just previous,
1731 # we only need to see a diff.
1735 # we only need to see a diff.
1732 deltamf = mnfst.readdelta(mnfstnode)
1736 deltamf = mnfst.readdelta(mnfstnode)
1733 # For each line in the delta
1737 # For each line in the delta
1734 for f, fnode in deltamf.items():
1738 for f, fnode in deltamf.items():
1735 f = changedfiles.get(f, None)
1739 f = changedfiles.get(f, None)
1736 # And if the file is in the list of files we care
1740 # And if the file is in the list of files we care
1737 # about.
1741 # about.
1738 if f is not None:
1742 if f is not None:
1739 # Get the changenode this manifest belongs to
1743 # Get the changenode this manifest belongs to
1740 clnode = msng_mnfst_set[mnfstnode]
1744 clnode = msng_mnfst_set[mnfstnode]
1741 # Create the set of filenodes for the file if
1745 # Create the set of filenodes for the file if
1742 # there isn't one already.
1746 # there isn't one already.
1743 ndset = msng_filenode_set.setdefault(f, {})
1747 ndset = msng_filenode_set.setdefault(f, {})
1744 # And set the filenode's changelog node to the
1748 # And set the filenode's changelog node to the
1745 # manifest's if it hasn't been set already.
1749 # manifest's if it hasn't been set already.
1746 ndset.setdefault(fnode, clnode)
1750 ndset.setdefault(fnode, clnode)
1747 else:
1751 else:
1748 # Otherwise we need a full manifest.
1752 # Otherwise we need a full manifest.
1749 m = mnfst.read(mnfstnode)
1753 m = mnfst.read(mnfstnode)
1750 # For every file in we care about.
1754 # For every file in we care about.
1751 for f in changedfiles:
1755 for f in changedfiles:
1752 fnode = m.get(f, None)
1756 fnode = m.get(f, None)
1753 # If it's in the manifest
1757 # If it's in the manifest
1754 if fnode is not None:
1758 if fnode is not None:
1755 # See comments above.
1759 # See comments above.
1756 clnode = msng_mnfst_set[mnfstnode]
1760 clnode = msng_mnfst_set[mnfstnode]
1757 ndset = msng_filenode_set.setdefault(f, {})
1761 ndset = msng_filenode_set.setdefault(f, {})
1758 ndset.setdefault(fnode, clnode)
1762 ndset.setdefault(fnode, clnode)
1759 # Remember the revision we hope to see next.
1763 # Remember the revision we hope to see next.
1760 next_rev[0] = r + 1
1764 next_rev[0] = r + 1
1761 return collect_msng_filenodes
1765 return collect_msng_filenodes
1762
1766
1763 # We have a list of filenodes we think we need for a file, lets remove
1767 # We have a list of filenodes we think we need for a file, lets remove
1764 # all those we now the recipient must have.
1768 # all those we now the recipient must have.
1765 def prune_filenodes(f, filerevlog):
1769 def prune_filenodes(f, filerevlog):
1766 msngset = msng_filenode_set[f]
1770 msngset = msng_filenode_set[f]
1767 hasset = {}
1771 hasset = {}
1768 # If a 'missing' filenode thinks it belongs to a changenode we
1772 # If a 'missing' filenode thinks it belongs to a changenode we
1769 # assume the recipient must have, then the recipient must have
1773 # assume the recipient must have, then the recipient must have
1770 # that filenode.
1774 # that filenode.
1771 for n in msngset:
1775 for n in msngset:
1772 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1776 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1773 if clnode in has_cl_set:
1777 if clnode in has_cl_set:
1774 hasset[n] = 1
1778 hasset[n] = 1
1775 prune_parents(filerevlog, hasset, msngset)
1779 prune_parents(filerevlog, hasset, msngset)
1776
1780
1777 # A function generator function that sets up the a context for the
1781 # A function generator function that sets up the a context for the
1778 # inner function.
1782 # inner function.
1779 def lookup_filenode_link_func(fname):
1783 def lookup_filenode_link_func(fname):
1780 msngset = msng_filenode_set[fname]
1784 msngset = msng_filenode_set[fname]
1781 # Lookup the changenode the filenode belongs to.
1785 # Lookup the changenode the filenode belongs to.
1782 def lookup_filenode_link(fnode):
1786 def lookup_filenode_link(fnode):
1783 return msngset[fnode]
1787 return msngset[fnode]
1784 return lookup_filenode_link
1788 return lookup_filenode_link
1785
1789
1786 # Add the nodes that were explicitly requested.
1790 # Add the nodes that were explicitly requested.
1787 def add_extra_nodes(name, nodes):
1791 def add_extra_nodes(name, nodes):
1788 if not extranodes or name not in extranodes:
1792 if not extranodes or name not in extranodes:
1789 return
1793 return
1790
1794
1791 for node, linknode in extranodes[name]:
1795 for node, linknode in extranodes[name]:
1792 if node not in nodes:
1796 if node not in nodes:
1793 nodes[node] = linknode
1797 nodes[node] = linknode
1794
1798
1795 # Now that we have all theses utility functions to help out and
1799 # Now that we have all theses utility functions to help out and
1796 # logically divide up the task, generate the group.
1800 # logically divide up the task, generate the group.
1797 def gengroup():
1801 def gengroup():
1798 # The set of changed files starts empty.
1802 # The set of changed files starts empty.
1799 changedfiles = {}
1803 changedfiles = {}
1800 # Create a changenode group generator that will call our functions
1804 # Create a changenode group generator that will call our functions
1801 # back to lookup the owning changenode and collect information.
1805 # back to lookup the owning changenode and collect information.
1802 group = cl.group(msng_cl_lst, identity,
1806 group = cl.group(msng_cl_lst, identity,
1803 manifest_and_file_collector(changedfiles))
1807 manifest_and_file_collector(changedfiles))
1804 for chnk in group:
1808 for chnk in group:
1805 yield chnk
1809 yield chnk
1806
1810
1807 # The list of manifests has been collected by the generator
1811 # The list of manifests has been collected by the generator
1808 # calling our functions back.
1812 # calling our functions back.
1809 prune_manifests()
1813 prune_manifests()
1810 add_extra_nodes(1, msng_mnfst_set)
1814 add_extra_nodes(1, msng_mnfst_set)
1811 msng_mnfst_lst = msng_mnfst_set.keys()
1815 msng_mnfst_lst = msng_mnfst_set.keys()
1812 # Sort the manifestnodes by revision number.
1816 # Sort the manifestnodes by revision number.
1813 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1817 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1814 # Create a generator for the manifestnodes that calls our lookup
1818 # Create a generator for the manifestnodes that calls our lookup
1815 # and data collection functions back.
1819 # and data collection functions back.
1816 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1820 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1817 filenode_collector(changedfiles))
1821 filenode_collector(changedfiles))
1818 for chnk in group:
1822 for chnk in group:
1819 yield chnk
1823 yield chnk
1820
1824
1821 # These are no longer needed, dereference and toss the memory for
1825 # These are no longer needed, dereference and toss the memory for
1822 # them.
1826 # them.
1823 msng_mnfst_lst = None
1827 msng_mnfst_lst = None
1824 msng_mnfst_set.clear()
1828 msng_mnfst_set.clear()
1825
1829
1826 if extranodes:
1830 if extranodes:
1827 for fname in extranodes:
1831 for fname in extranodes:
1828 if isinstance(fname, int):
1832 if isinstance(fname, int):
1829 continue
1833 continue
1830 msng_filenode_set.setdefault(fname, {})
1834 msng_filenode_set.setdefault(fname, {})
1831 changedfiles[fname] = 1
1835 changedfiles[fname] = 1
1832 # Go through all our files in order sorted by name.
1836 # Go through all our files in order sorted by name.
1833 for fname in util.sort(changedfiles):
1837 for fname in util.sort(changedfiles):
1834 filerevlog = self.file(fname)
1838 filerevlog = self.file(fname)
1835 if not len(filerevlog):
1839 if not len(filerevlog):
1836 raise util.Abort(_("empty or missing revlog for %s") % fname)
1840 raise util.Abort(_("empty or missing revlog for %s") % fname)
1837 # Toss out the filenodes that the recipient isn't really
1841 # Toss out the filenodes that the recipient isn't really
1838 # missing.
1842 # missing.
1839 if fname in msng_filenode_set:
1843 if fname in msng_filenode_set:
1840 prune_filenodes(fname, filerevlog)
1844 prune_filenodes(fname, filerevlog)
1841 add_extra_nodes(fname, msng_filenode_set[fname])
1845 add_extra_nodes(fname, msng_filenode_set[fname])
1842 msng_filenode_lst = msng_filenode_set[fname].keys()
1846 msng_filenode_lst = msng_filenode_set[fname].keys()
1843 else:
1847 else:
1844 msng_filenode_lst = []
1848 msng_filenode_lst = []
1845 # If any filenodes are left, generate the group for them,
1849 # If any filenodes are left, generate the group for them,
1846 # otherwise don't bother.
1850 # otherwise don't bother.
1847 if len(msng_filenode_lst) > 0:
1851 if len(msng_filenode_lst) > 0:
1848 yield changegroup.chunkheader(len(fname))
1852 yield changegroup.chunkheader(len(fname))
1849 yield fname
1853 yield fname
1850 # Sort the filenodes by their revision #
1854 # Sort the filenodes by their revision #
1851 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1855 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1852 # Create a group generator and only pass in a changenode
1856 # Create a group generator and only pass in a changenode
1853 # lookup function as we need to collect no information
1857 # lookup function as we need to collect no information
1854 # from filenodes.
1858 # from filenodes.
1855 group = filerevlog.group(msng_filenode_lst,
1859 group = filerevlog.group(msng_filenode_lst,
1856 lookup_filenode_link_func(fname))
1860 lookup_filenode_link_func(fname))
1857 for chnk in group:
1861 for chnk in group:
1858 yield chnk
1862 yield chnk
1859 if fname in msng_filenode_set:
1863 if fname in msng_filenode_set:
1860 # Don't need this anymore, toss it to free memory.
1864 # Don't need this anymore, toss it to free memory.
1861 del msng_filenode_set[fname]
1865 del msng_filenode_set[fname]
1862 # Signal that no more groups are left.
1866 # Signal that no more groups are left.
1863 yield changegroup.closechunk()
1867 yield changegroup.closechunk()
1864
1868
1865 if msng_cl_lst:
1869 if msng_cl_lst:
1866 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1870 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1867
1871
1868 return util.chunkbuffer(gengroup())
1872 return util.chunkbuffer(gengroup())
1869
1873
1870 def changegroup(self, basenodes, source):
1874 def changegroup(self, basenodes, source):
1871 # to avoid a race we use changegroupsubset() (issue1320)
1875 # to avoid a race we use changegroupsubset() (issue1320)
1872 return self.changegroupsubset(basenodes, self.heads(), source)
1876 return self.changegroupsubset(basenodes, self.heads(), source)
1873
1877
1874 def _changegroup(self, common, source):
1878 def _changegroup(self, common, source):
1875 """Generate a changegroup of all nodes that we have that a recipient
1879 """Generate a changegroup of all nodes that we have that a recipient
1876 doesn't.
1880 doesn't.
1877
1881
1878 This is much easier than the previous function as we can assume that
1882 This is much easier than the previous function as we can assume that
1879 the recipient has any changenode we aren't sending them.
1883 the recipient has any changenode we aren't sending them.
1880
1884
1881 common is the set of common nodes between remote and self"""
1885 common is the set of common nodes between remote and self"""
1882
1886
1883 self.hook('preoutgoing', throw=True, source=source)
1887 self.hook('preoutgoing', throw=True, source=source)
1884
1888
1885 cl = self.changelog
1889 cl = self.changelog
1886 nodes = cl.findmissing(common)
1890 nodes = cl.findmissing(common)
1887 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1891 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1888 self.changegroupinfo(nodes, source)
1892 self.changegroupinfo(nodes, source)
1889
1893
1890 def identity(x):
1894 def identity(x):
1891 return x
1895 return x
1892
1896
1893 def gennodelst(log):
1897 def gennodelst(log):
1894 for r in log:
1898 for r in log:
1895 if log.linkrev(r) in revset:
1899 if log.linkrev(r) in revset:
1896 yield log.node(r)
1900 yield log.node(r)
1897
1901
1898 def changed_file_collector(changedfileset):
1902 def changed_file_collector(changedfileset):
1899 def collect_changed_files(clnode):
1903 def collect_changed_files(clnode):
1900 c = cl.read(clnode)
1904 c = cl.read(clnode)
1901 for fname in c[3]:
1905 for fname in c[3]:
1902 changedfileset[fname] = 1
1906 changedfileset[fname] = 1
1903 return collect_changed_files
1907 return collect_changed_files
1904
1908
1905 def lookuprevlink_func(revlog):
1909 def lookuprevlink_func(revlog):
1906 def lookuprevlink(n):
1910 def lookuprevlink(n):
1907 return cl.node(revlog.linkrev(revlog.rev(n)))
1911 return cl.node(revlog.linkrev(revlog.rev(n)))
1908 return lookuprevlink
1912 return lookuprevlink
1909
1913
1910 def gengroup():
1914 def gengroup():
1911 # construct a list of all changed files
1915 # construct a list of all changed files
1912 changedfiles = {}
1916 changedfiles = {}
1913
1917
1914 for chnk in cl.group(nodes, identity,
1918 for chnk in cl.group(nodes, identity,
1915 changed_file_collector(changedfiles)):
1919 changed_file_collector(changedfiles)):
1916 yield chnk
1920 yield chnk
1917
1921
1918 mnfst = self.manifest
1922 mnfst = self.manifest
1919 nodeiter = gennodelst(mnfst)
1923 nodeiter = gennodelst(mnfst)
1920 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1924 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1921 yield chnk
1925 yield chnk
1922
1926
1923 for fname in util.sort(changedfiles):
1927 for fname in util.sort(changedfiles):
1924 filerevlog = self.file(fname)
1928 filerevlog = self.file(fname)
1925 if not len(filerevlog):
1929 if not len(filerevlog):
1926 raise util.Abort(_("empty or missing revlog for %s") % fname)
1930 raise util.Abort(_("empty or missing revlog for %s") % fname)
1927 nodeiter = gennodelst(filerevlog)
1931 nodeiter = gennodelst(filerevlog)
1928 nodeiter = list(nodeiter)
1932 nodeiter = list(nodeiter)
1929 if nodeiter:
1933 if nodeiter:
1930 yield changegroup.chunkheader(len(fname))
1934 yield changegroup.chunkheader(len(fname))
1931 yield fname
1935 yield fname
1932 lookup = lookuprevlink_func(filerevlog)
1936 lookup = lookuprevlink_func(filerevlog)
1933 for chnk in filerevlog.group(nodeiter, lookup):
1937 for chnk in filerevlog.group(nodeiter, lookup):
1934 yield chnk
1938 yield chnk
1935
1939
1936 yield changegroup.closechunk()
1940 yield changegroup.closechunk()
1937
1941
1938 if nodes:
1942 if nodes:
1939 self.hook('outgoing', node=hex(nodes[0]), source=source)
1943 self.hook('outgoing', node=hex(nodes[0]), source=source)
1940
1944
1941 return util.chunkbuffer(gengroup())
1945 return util.chunkbuffer(gengroup())
1942
1946
1943 def addchangegroup(self, source, srctype, url, emptyok=False):
1947 def addchangegroup(self, source, srctype, url, emptyok=False):
1944 """add changegroup to repo.
1948 """add changegroup to repo.
1945
1949
1946 return values:
1950 return values:
1947 - nothing changed or no source: 0
1951 - nothing changed or no source: 0
1948 - more heads than before: 1+added heads (2..n)
1952 - more heads than before: 1+added heads (2..n)
1949 - less heads than before: -1-removed heads (-2..-n)
1953 - less heads than before: -1-removed heads (-2..-n)
1950 - number of heads stays the same: 1
1954 - number of heads stays the same: 1
1951 """
1955 """
1952 def csmap(x):
1956 def csmap(x):
1953 self.ui.debug(_("add changeset %s\n") % short(x))
1957 self.ui.debug(_("add changeset %s\n") % short(x))
1954 return len(cl)
1958 return len(cl)
1955
1959
1956 def revmap(x):
1960 def revmap(x):
1957 return cl.rev(x)
1961 return cl.rev(x)
1958
1962
1959 if not source:
1963 if not source:
1960 return 0
1964 return 0
1961
1965
1962 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1966 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1963
1967
1964 changesets = files = revisions = 0
1968 changesets = files = revisions = 0
1965
1969
1966 # write changelog data to temp files so concurrent readers will not see
1970 # write changelog data to temp files so concurrent readers will not see
1967 # inconsistent view
1971 # inconsistent view
1968 cl = self.changelog
1972 cl = self.changelog
1969 cl.delayupdate()
1973 cl.delayupdate()
1970 oldheads = len(cl.heads())
1974 oldheads = len(cl.heads())
1971
1975
1972 tr = self.transaction()
1976 tr = self.transaction()
1973 try:
1977 try:
1974 trp = weakref.proxy(tr)
1978 trp = weakref.proxy(tr)
1975 # pull off the changeset group
1979 # pull off the changeset group
1976 self.ui.status(_("adding changesets\n"))
1980 self.ui.status(_("adding changesets\n"))
1977 cor = len(cl) - 1
1981 cor = len(cl) - 1
1978 chunkiter = changegroup.chunkiter(source)
1982 chunkiter = changegroup.chunkiter(source)
1979 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1983 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1980 raise util.Abort(_("received changelog group is empty"))
1984 raise util.Abort(_("received changelog group is empty"))
1981 cnr = len(cl) - 1
1985 cnr = len(cl) - 1
1982 changesets = cnr - cor
1986 changesets = cnr - cor
1983
1987
1984 # pull off the manifest group
1988 # pull off the manifest group
1985 self.ui.status(_("adding manifests\n"))
1989 self.ui.status(_("adding manifests\n"))
1986 chunkiter = changegroup.chunkiter(source)
1990 chunkiter = changegroup.chunkiter(source)
1987 # no need to check for empty manifest group here:
1991 # no need to check for empty manifest group here:
1988 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1992 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1989 # no new manifest will be created and the manifest group will
1993 # no new manifest will be created and the manifest group will
1990 # be empty during the pull
1994 # be empty during the pull
1991 self.manifest.addgroup(chunkiter, revmap, trp)
1995 self.manifest.addgroup(chunkiter, revmap, trp)
1992
1996
1993 # process the files
1997 # process the files
1994 self.ui.status(_("adding file changes\n"))
1998 self.ui.status(_("adding file changes\n"))
1995 while 1:
1999 while 1:
1996 f = changegroup.getchunk(source)
2000 f = changegroup.getchunk(source)
1997 if not f:
2001 if not f:
1998 break
2002 break
1999 self.ui.debug(_("adding %s revisions\n") % f)
2003 self.ui.debug(_("adding %s revisions\n") % f)
2000 fl = self.file(f)
2004 fl = self.file(f)
2001 o = len(fl)
2005 o = len(fl)
2002 chunkiter = changegroup.chunkiter(source)
2006 chunkiter = changegroup.chunkiter(source)
2003 if fl.addgroup(chunkiter, revmap, trp) is None:
2007 if fl.addgroup(chunkiter, revmap, trp) is None:
2004 raise util.Abort(_("received file revlog group is empty"))
2008 raise util.Abort(_("received file revlog group is empty"))
2005 revisions += len(fl) - o
2009 revisions += len(fl) - o
2006 files += 1
2010 files += 1
2007
2011
2008 # make changelog see real files again
2012 # make changelog see real files again
2009 cl.finalize(trp)
2013 cl.finalize(trp)
2010
2014
2011 newheads = len(self.changelog.heads())
2015 newheads = len(self.changelog.heads())
2012 heads = ""
2016 heads = ""
2013 if oldheads and newheads != oldheads:
2017 if oldheads and newheads != oldheads:
2014 heads = _(" (%+d heads)") % (newheads - oldheads)
2018 heads = _(" (%+d heads)") % (newheads - oldheads)
2015
2019
2016 self.ui.status(_("added %d changesets"
2020 self.ui.status(_("added %d changesets"
2017 " with %d changes to %d files%s\n")
2021 " with %d changes to %d files%s\n")
2018 % (changesets, revisions, files, heads))
2022 % (changesets, revisions, files, heads))
2019
2023
2020 if changesets > 0:
2024 if changesets > 0:
2021 self.hook('pretxnchangegroup', throw=True,
2025 self.hook('pretxnchangegroup', throw=True,
2022 node=hex(self.changelog.node(cor+1)), source=srctype,
2026 node=hex(self.changelog.node(cor+1)), source=srctype,
2023 url=url)
2027 url=url)
2024
2028
2025 tr.close()
2029 tr.close()
2026 finally:
2030 finally:
2027 del tr
2031 del tr
2028
2032
2029 if changesets > 0:
2033 if changesets > 0:
2030 # forcefully update the on-disk branch cache
2034 # forcefully update the on-disk branch cache
2031 self.ui.debug(_("updating the branch cache\n"))
2035 self.ui.debug(_("updating the branch cache\n"))
2032 self.branchtags()
2036 self.branchtags()
2033 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2037 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2034 source=srctype, url=url)
2038 source=srctype, url=url)
2035
2039
2036 for i in xrange(cor + 1, cnr + 1):
2040 for i in xrange(cor + 1, cnr + 1):
2037 self.hook("incoming", node=hex(self.changelog.node(i)),
2041 self.hook("incoming", node=hex(self.changelog.node(i)),
2038 source=srctype, url=url)
2042 source=srctype, url=url)
2039
2043
2040 # never return 0 here:
2044 # never return 0 here:
2041 if newheads < oldheads:
2045 if newheads < oldheads:
2042 return newheads - oldheads - 1
2046 return newheads - oldheads - 1
2043 else:
2047 else:
2044 return newheads - oldheads + 1
2048 return newheads - oldheads + 1
2045
2049
2046
2050
2047 def stream_in(self, remote):
2051 def stream_in(self, remote):
2048 fp = remote.stream_out()
2052 fp = remote.stream_out()
2049 l = fp.readline()
2053 l = fp.readline()
2050 try:
2054 try:
2051 resp = int(l)
2055 resp = int(l)
2052 except ValueError:
2056 except ValueError:
2053 raise util.UnexpectedOutput(
2057 raise util.UnexpectedOutput(
2054 _('Unexpected response from remote server:'), l)
2058 _('Unexpected response from remote server:'), l)
2055 if resp == 1:
2059 if resp == 1:
2056 raise util.Abort(_('operation forbidden by server'))
2060 raise util.Abort(_('operation forbidden by server'))
2057 elif resp == 2:
2061 elif resp == 2:
2058 raise util.Abort(_('locking the remote repository failed'))
2062 raise util.Abort(_('locking the remote repository failed'))
2059 elif resp != 0:
2063 elif resp != 0:
2060 raise util.Abort(_('the server sent an unknown error code'))
2064 raise util.Abort(_('the server sent an unknown error code'))
2061 self.ui.status(_('streaming all changes\n'))
2065 self.ui.status(_('streaming all changes\n'))
2062 l = fp.readline()
2066 l = fp.readline()
2063 try:
2067 try:
2064 total_files, total_bytes = map(int, l.split(' ', 1))
2068 total_files, total_bytes = map(int, l.split(' ', 1))
2065 except (ValueError, TypeError):
2069 except (ValueError, TypeError):
2066 raise util.UnexpectedOutput(
2070 raise util.UnexpectedOutput(
2067 _('Unexpected response from remote server:'), l)
2071 _('Unexpected response from remote server:'), l)
2068 self.ui.status(_('%d files to transfer, %s of data\n') %
2072 self.ui.status(_('%d files to transfer, %s of data\n') %
2069 (total_files, util.bytecount(total_bytes)))
2073 (total_files, util.bytecount(total_bytes)))
2070 start = time.time()
2074 start = time.time()
2071 for i in xrange(total_files):
2075 for i in xrange(total_files):
2072 # XXX doesn't support '\n' or '\r' in filenames
2076 # XXX doesn't support '\n' or '\r' in filenames
2073 l = fp.readline()
2077 l = fp.readline()
2074 try:
2078 try:
2075 name, size = l.split('\0', 1)
2079 name, size = l.split('\0', 1)
2076 size = int(size)
2080 size = int(size)
2077 except (ValueError, TypeError):
2081 except (ValueError, TypeError):
2078 raise util.UnexpectedOutput(
2082 raise util.UnexpectedOutput(
2079 _('Unexpected response from remote server:'), l)
2083 _('Unexpected response from remote server:'), l)
2080 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2084 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2081 ofp = self.sopener(name, 'w')
2085 ofp = self.sopener(name, 'w')
2082 for chunk in util.filechunkiter(fp, limit=size):
2086 for chunk in util.filechunkiter(fp, limit=size):
2083 ofp.write(chunk)
2087 ofp.write(chunk)
2084 ofp.close()
2088 ofp.close()
2085 elapsed = time.time() - start
2089 elapsed = time.time() - start
2086 if elapsed <= 0:
2090 if elapsed <= 0:
2087 elapsed = 0.001
2091 elapsed = 0.001
2088 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2092 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2089 (util.bytecount(total_bytes), elapsed,
2093 (util.bytecount(total_bytes), elapsed,
2090 util.bytecount(total_bytes / elapsed)))
2094 util.bytecount(total_bytes / elapsed)))
2091 self.invalidate()
2095 self.invalidate()
2092 return len(self.heads()) + 1
2096 return len(self.heads()) + 1
2093
2097
2094 def clone(self, remote, heads=[], stream=False):
2098 def clone(self, remote, heads=[], stream=False):
2095 '''clone remote repository.
2099 '''clone remote repository.
2096
2100
2097 keyword arguments:
2101 keyword arguments:
2098 heads: list of revs to clone (forces use of pull)
2102 heads: list of revs to clone (forces use of pull)
2099 stream: use streaming clone if possible'''
2103 stream: use streaming clone if possible'''
2100
2104
2101 # now, all clients that can request uncompressed clones can
2105 # now, all clients that can request uncompressed clones can
2102 # read repo formats supported by all servers that can serve
2106 # read repo formats supported by all servers that can serve
2103 # them.
2107 # them.
2104
2108
2105 # if revlog format changes, client will have to check version
2109 # if revlog format changes, client will have to check version
2106 # and format flags on "stream" capability, and use
2110 # and format flags on "stream" capability, and use
2107 # uncompressed only if compatible.
2111 # uncompressed only if compatible.
2108
2112
2109 if stream and not heads and remote.capable('stream'):
2113 if stream and not heads and remote.capable('stream'):
2110 return self.stream_in(remote)
2114 return self.stream_in(remote)
2111 return self.pull(remote, heads)
2115 return self.pull(remote, heads)
2112
2116
2113 # used to avoid circular references so destructors work
2117 # used to avoid circular references so destructors work
2114 def aftertrans(files):
2118 def aftertrans(files):
2115 renamefiles = [tuple(t) for t in files]
2119 renamefiles = [tuple(t) for t in files]
2116 def a():
2120 def a():
2117 for src, dest in renamefiles:
2121 for src, dest in renamefiles:
2118 util.rename(src, dest)
2122 util.rename(src, dest)
2119 return a
2123 return a
2120
2124
2121 def instance(ui, path, create):
2125 def instance(ui, path, create):
2122 return localrepository(ui, util.drop_scheme('file', path), create)
2126 return localrepository(ui, util.drop_scheme('file', path), create)
2123
2127
2124 def islocal(path):
2128 def islocal(path):
2125 return True
2129 return True
General Comments 0
You need to be logged in to leave comments. Login now